input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
may be connected to in shared mode. If this bit is set, SCARD_STATE_PRESENT will also be set
SCARD_STATE_MUTE There is an unresponsive card in the reader
from smartcard.scard import *
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
hresult, readers = SCardListReaders(hcontext, [])
readerstates = []
cards = [ 'Schlumberger Cryptoflex 4k', 'Schlumberger Cryptoflex 8k', 'Schlumberger Cryptoflex 8k v2' ]
for i in xrange(len(readers)):
readerstates += [ (readers[i], SCARD_STATE_UNAWARE) ]
hresult, newstates = SCardLocateCards(hcontext, cards, readerstates)
print '----- Please insert or remove a card ------------'
hresult, newstates = SCardGetStatusChange(hcontext, INFINITE, newstates)
for i in newstates
reader, eventstate, atr = i
if eventstate & SCARD_STATE_ATRMATCH:
print ' Card found'
if eventstate & SCARD_STATE_EMPTY:
print ' Reader empty'
"""
return _scard.SCardGetStatusChange(hcontext, dwTimeout, readerstatelist)
def SCardListReaders(hcontext, readergroups):
r"""
SCardListReaders( hcontext, [] readergroups) -> SCARDRETCODE
Parameters
----------
hcontext: context handle return from SCardEstablishContext()
readergroups: a list of reader groups to search for readers
This function returns a list of currently available readers on the system.
A list of group can be provided in input to list readers in a given
group only.
from smartcard.scard import *
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
hresult, readers = SCardListReaders(hcontext, [])
if hresult != SCARD_S_SUCCESS:
raise error, 'Failed to list readers: ' + SCardGetErrorMessage(hresult)
print 'PCSC Readers: ', readers
hresult, readers = SCardListReaders(hcontext, ['SCard$T1ProtocolReaders', 'SCard$MyOwnGroup']
...
"""
return _scard.SCardListReaders(hcontext, readergroups)
def SCardListReaderGroups(hcontext):
r"""
SCardListReaderGroups( hcontext) -> SCARDRETCODE
Parameters
----------
hcontext: context handle return from SCardEstablishContext()
This function returns a list of currently available reader groups on the
system.
from smartcard.scard import *
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
hresult, readerGroups = SCardListReaderGroups(hcontext)
if hresult != SCARD_S_SUCCESS:
raise error, 'Unable to list reader groups: ' + SCardGetErrorMessage(hresult)
print 'PCSC Reader groups: ', readerGroups
"""
return _scard.SCardListReaderGroups(hcontext)
def SCardReconnect(hcard, dwShareMode, dwPreferredProtocols, dwInitialization):
r"""
SCardReconnect( hcard, dwShareMode, dwPreferredProtocols, dwInitialization) -> SCARDRETCODE
Parameters
----------
hcard: card handle return from SCardConnect()
dwShareMode: share mode
dwPreferredProtocols: preferred protocols
dwInitialization: the type of initialization that should be performed on the card
This function reestablishes a connection to a reader that was previously
connected to using SCardConnect(). In a multi application environment it
is possible for an application to reset the card in shared mode. When
this occurs any other application trying to access certain commands will
be returned the value SCARD_W_RESET_CARD. When this occurs
SCardReconnect() must be called in order to acknowledge that the card was
reset and allow it to change it's state accordingly.
Value of dwShareMode Meaning
SCARD_SHARE_SHARED This application will allow others to share the reader
SCARD_SHARE_EXCLUSIVE This application will NOT allow others to share the reader
Value of dwPreferredProtocols Meaning
SCARD_PROTOCOL_T0 Use the T=0 protocol
SCARD_PROTOCOL_T1 Use the T=1 protocol
SCARD_PROTOCOL_RAW Use with memory type cards
dwPreferredProtocols is a bit mask of acceptable protocols for the connection. You can use (SCARD_PROTOCOL_T0 | SCARD_PROTOCOL_T1) if you do not have a preferred protocol.
Value of dwInitialization Meaning
SCARD_LEAVE_CARD Do nothing
SCARD_RESET_CARD Reset the card (warm reset)
SCARD_UNPOWER_CARD Unpower the card (cold reset)
SCARD_EJECT_CARD Eject the card
from smartcard.scard import *
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
hresult, hcard, dwActiveProtocol = SCardConnect(
hcontext, 'SchlumbergerSema Reflex USB v.2 0', SCARD_SHARE_SHARED, SCARD_PROTOCOL_T0)
hresult, activeProtocol = SCardReconnect(hcard, SCARD_SHARE_EXCLUSIVE,
SCARD_PROTOCOL_T0, SCARD_RESET_CARD)
...
"""
return _scard.SCardReconnect(hcard, dwShareMode, dwPreferredProtocols, dwInitialization)
def SCardReleaseContext(hcontext):
r"""
SCardReleaseContext( hcontext) -> SCARDRETCODE
Parameters
----------
hcontext: context handle return from SCardEstablishContext()
"""
return _scard.SCardReleaseContext(hcontext)
def SCardStatus(hcard):
r"""
SCardStatus( hcard) -> SCARDRETCODE
Parameters
----------
hcard: card handle return from SCardConnect()
This function returns the current status of the reader connected to by
hcard. The reader friendly name is returned, as well as the state,
protocol and ATR. The state is a DWORD possibly OR'd with the following
values:
Value of pdwState Meaning
SCARD_ABSENT There is no card in the reader
SCARD_PRESENT There is a card in the reader, but it has not been moved into position for use
SCARD_SWALLOWED There is a card in the reader in position for use. The card is not powered
SCARD_POWERED Power is being provided to the card, but the reader driver is unaware of the mode of the card
SCARD_NEGOTIABLE The card has been reset and is awaiting PTS negotiation
SCARD_SPECIFIC The card has been reset and specific communication protocols have been established
Value of pdwProtocol Meaning
SCARD_PROTOCOL_T0 Use the T=0 protocol
SCARD_PROTOCOL_T1 Use the T=1 protocol
from smartcard.scard import *
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
hresult, hcard, dwActiveProtocol = SCardConnect(
hcontext, 'SchlumbergerSema Reflex USB v.2 0', SCARD_SHARE_SHARED, SCARD_PROTOCOL_T0)
hresult, reader, state, protocol, atr = SCardStatus(hcard)
if hresult != SCARD_S_SUCCESS:
raise error, 'failed to get status: ' + SCardGetErrorMessage(hresult)
print 'Reader: ', reader
print 'State: ', state
print 'Protocol: ', protocol
print 'ATR: ',
for i in xrange(len(atr)):
print '0x%.2X' % i,
print
...
"""
return _scard.SCardStatus(hcard)
def SCardTransmit(hcard, pioSendPci, apducommand):
r"""
SCardTransmit( hcard, unsigned long pioSendPci, byte[] apducommand) -> SCARDRETCODE
Parameters
----------
hcard: card handle return from SCardConnect()
pioSendPci: unsigned long
apducommand: list of APDU bytes to transmit
This function sends an APDU to the smart card contained in the reader
connected to by SCardConnect().
It returns a result and the card APDU response.
Value of pioSendPci Meaning
SCARD_PCI_T0 Pre-defined T=0 PCI structure
SCARD_PCI_T1 Pre-defined T=1 PCI structure
from smartcard.scard import *
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
hresult, hcard, dwActiveProtocol = SCardConnect(
hcontext, 'SchlumbergerSema Reflex USB v.2 0', SCARD_SHARE_SHARED, SCARD_PROTOCOL_T0)
SELECT = [0xA0, 0xA4, 0x00, 0x00, 0x02]
DF_TELECOM = [0x7F, 0x10]
hresult, response = SCardTransmit(hcard, SCARD_PCI_T0, SELECT + DF_TELECOM)
if hresult != SCARD_S_SUCCESS:
raise error, 'Failed to transmit: ' + SCardGetErrorMessage(hresult)
"""
return _scard.SCardTransmit(hcard, pioSendPci, apducommand)
def SCARD_CTL_CODE(code):
r"""
SCARD_CTL_CODE(long code) -> long
Parameters
----------
code: long
This function returns the value of a control code
from smartcard.scard import *
...
CM_IOCTL_GET_FEATURE_REQUEST = SCARD_CTL_CODE(3400)
...
"""
return _scard.SCARD_CTL_CODE(code)
def SCardGetErrorMessage(lErrCode):
r"""
SCardGetErrorMessage(long lErrCode) -> ERRORSTRING *
Parameters
----------
lErrCode: long
This function return a human readable text for the given PC/SC error code.
from smartcard.scard import *
...
hresult, response = SCardTransmit(hcard, SCARD_PCI_T0, SELECT + DF_TELECOM)
if hresult != SCARD_S_SUCCESS:
raise error, 'Failed to transmit: ' + SCardGetErrorMessage(hresult)
...
"""
return _scard.SCardGetErrorMessage(lErrCode)
error = _scard.error
SCARD_SCOPE_USER = _scard.SCARD_SCOPE_USER
r"""
This function return a human readable text for the given PC/SC error code.
from smartcard.scard import *
...
hresult, response = SCardTransmit(hcard, SCARD_PCI_T0, SELECT + DF_TELECOM)
if hresult != SCARD_S_SUCCESS:
raise error, 'Failed to transmit: ' + SCardGetErrorMessage(hresult)
...
"""
SCARD_SCOPE_TERMINAL = _scard.SCARD_SCOPE_TERMINAL
r"""
This function return a human readable text for the given PC/SC error code.
from smartcard.scard import *
...
hresult, response = SCardTransmit(hcard, SCARD_PCI_T0, SELECT + DF_TELECOM)
if hresult != SCARD_S_SUCCESS:
raise error, 'Failed to transmit: ' + SCardGetErrorMessage(hresult)
...
"""
SCARD_SCOPE_SYSTEM = _scard.SCARD_SCOPE_SYSTEM
r"""
This function return a human readable text for the given PC/SC error code.
from smartcard.scard import *
...
hresult, response = SCardTransmit(hcard, SCARD_PCI_T0, SELECT + DF_TELECOM)
if hresult != SCARD_S_SUCCESS:
raise error, 'Failed to transmit: ' + SCardGetErrorMessage(hresult)
...
"""
SCARD_SHARE_SHARED = _scard.SCARD_SHARE_SHARED
r"""
This function return a human readable text for the given PC/SC error code.
from smartcard.scard import *
...
hresult, response = SCardTransmit(hcard, SCARD_PCI_T0, SELECT + DF_TELECOM)
if hresult != SCARD_S_SUCCESS:
raise error, 'Failed to transmit: ' + SCardGetErrorMessage(hresult)
...
"""
SCARD_SHARE_EXCLUSIVE = _scard.SCARD_SHARE_EXCLUSIVE
r"""
This function return a human readable text for the given PC/SC error code.
from smartcard.scard import *
...
hresult, response = SCardTransmit(hcard, SCARD_PCI_T0, SELECT + DF_TELECOM)
if hresult != SCARD_S_SUCCESS:
raise error, 'Failed to transmit: ' + SCardGetErrorMessage(hresult)
...
"""
SCARD_SHARE_DIRECT = _scard.SCARD_SHARE_DIRECT
r"""
This function return a human readable text for the given PC/SC error code.
from smartcard.scard import *
...
hresult, response = SCardTransmit(hcard, SCARD_PCI_T0, SELECT + DF_TELECOM)
if hresult != SCARD_S_SUCCESS:
raise error, 'Failed to transmit: ' + SCardGetErrorMessage(hresult)
...
"""
SCARD_LEAVE_CARD = _scard.SCARD_LEAVE_CARD
r"""
This function return a human readable text for the given PC/SC error code.
from smartcard.scard import *
...
hresult, response = SCardTransmit(hcard, SCARD_PCI_T0, SELECT + DF_TELECOM)
if hresult != SCARD_S_SUCCESS:
raise error, 'Failed to transmit: ' + SCardGetErrorMessage(hresult)
...
"""
SCARD_RESET_CARD = _scard.SCARD_RESET_CARD
r"""
This function return a human readable text for the given PC/SC | |
"""
This module provides ways of systematically combining operations, sets of
operations, etc., and executing such sequences of operations.
"""
import copy
from collections import Iterable
import functools
import itertools
import operator
import threading
import traceback
from six import string_types
from combtest.action import SerialAction, Action, CancelWalk
import combtest.central_logger as central_logger
from combtest.central_logger import logger
import combtest.encode as encode
from combtest.utils import RangeTree
class WalkFailedError(RuntimeError):
"""
Raised when a :class:`combtest.walk.Walk` failed to execute to completion
for any reason (e.g. one of its operations raised an ``Exception``).
"""
pass
class Walk(object):
"""
A Walk, named after the graph theory concept, is a list of
:class:`combtest.action.Action` to execute, together with a piece of
state that the Actions can manipulate. The notion here is that each
``Action`` performs an operation that takes us through a transition
in state/action space.
:param iterable elems: An iterable of :class:`combtest.action.Action`
"""
def __init__(self, *elems):
_elems = []
if len(elems) == 1 and isinstance(elems, Iterable):
for elem in elems[0]:
_elems.append(elem)
else:
_elems = []
for elem in elems:
if not isinstance(elem, Action):
raise ValueError("Walks must contain only Actions; "
"got: %s" % str(type(elem)))
_elems.append(elem)
self._elems = _elems
def append(self, elem):
"""
Logically equivalent to list.append
"""
self._elems.append(elem)
def __len__(self):
return len(self._elems)
def __eq__(self, other):
if not isinstance(other, Walk):
return False
return other._elems == self._elems
def __add__(self, other):
"""
:return: concatenation of this ``Walk`` and another
"""
if not isinstance(other, Walk):
raise TypeError("Cannot add a Walk with object of type %s" %
str(type(other)))
elems = self._elems + other._elems
return self.__class__(*elems)
def __iter__(self):
return iter(self._elems)
def execute(self, state, log_errors=True):
"""
Execute the ``Actions`` in order. If an Action raises
:class:`CancelWalk`, we will stop executing immediately.
:return: True if the Walk was run successfully or `CancelWalk` was raised, False otherwise.
"""
try:
for op in self:
# I will leave it up to the user to decide to log or not, and
# the appropriate verbosity.
op(state=state)
except CancelWalk as e:
# Likewise here: let the user decide to log or not in their layer
return True
except Exception as e:
msg_state = "Walk was: %s\nstate: %s" % (repr(self),
encode.encode(state))
exc = type(e)(str(e) + "\n" + msg_state)
if log_errors:
logger.exception(exc)
logger.error(msg_state)
new_msg = "Walk failed:\n"
new_msg += traceback.format_exc()
new_msg += "\n" + msg_state
wfe = WalkFailedError(new_msg)
if hasattr(e, 'errno'):
wfe.errno = e.errno
raise wfe
return False
def __repr__(self):
return self.as_json()
def as_json(self):
encoded = encode.encode(self)
return encoded
def to_json(self):
return list(self._elems)
@classmethod
def from_json(cls, obj):
assert not isinstance(obj, string_types), str(obj)
out = cls()
for act in obj:
out.append(act)
return out
class Segment(object):
"""
This represents a collection of ``Walk`` portions to run. Example: if you
have 300 Walks made of: [Action1, Action2, SerialAction1, Action3], the first
Segment will be the [Action1, Action2] portion of every one of those 300
walks. After running that Segment, we would then run the SerialAction.
A Segment is: (serial_action_instance, [list, of, Action option sets, ...])
It's iterator produces the Walk portions + consistent indexing of those
portions so that a later portion can reclaim the earlier portion's state.
:param int walk_count: the total number of Walks this Segment will produce
portions for.
:param iterable options: An iterable of Action sets, as you'd get from
MyActionType.get_option_set()
:param SerialAction serial_action_instance: SerialAction instance to
run before the Walk portions.
:param int parent_period: how many options our parent segment is tracking
:param int level: Effectively: how many segments came before this segment
"""
# Presume not threadsafe
def __init__(self, walk_count, options=(),
serial_action_instance=None,
parent_period=1,
level=0):
assert options or serial_action_instance
assert serial_action_instance is None or isinstance(
serial_action_instance, SerialAction)
self._walk_count = walk_count
self._count_walks_produced = 0
self._level = level
# Run *after* the SerialAction, if there is one. That means that if this
# Segment represents the leaf of the tree and we have a SerialAction,
# then options is None.
# tuple([option1_class1, option2_class1, ...],
# [option1_class2, option2_class2, ...],
# ...
# )
self._options = tuple(options)
self._current_repeat_count = 0
self._held_walk = None
self._parent_period = parent_period
# Run *before* the segment; thus it is None if e.g. we have no
# SerialActions at all.
self._serial_action = serial_action_instance
# Created after initialization because of our construction method
# below.
self._children = []
# Used to produce segments of walks
self._state_combinator = None
self._refresh_state_combinator()
def _refresh_state_combinator(self):
self._state_combinator = StateCombinator(*self._options)
def add_child(self, child):
self._children.append(child)
@property
def children(self):
return copy.copy(self._children)
@property
def serial_action(self):
return self._serial_action
@property
def walk_count(self):
return self._walk_count
@property
def level(self):
return self._level
def reset(self):
self._count_walks_produced = 0
self._current_repeat_count = 0
self._held_walk = None
self._refresh_state_combinator()
def __iter__(self):
return self
def next(self):
if self._count_walks_produced == self._walk_count:
raise StopIteration()
if self._held_walk is None:
self._held_walk = self._state_combinator.next()
walk = self._held_walk
self._count_walks_produced += 1
self._current_repeat_count += 1
if self._current_repeat_count == self._parent_period:
self._current_repeat_count = 0
check = self._count_walks_produced == self._walk_count
try:
self._held_walk = self._state_combinator.next()
except StopIteration:
self._refresh_state_combinator()
self._held_walk = self._state_combinator.next()
else:
if check:
raise RuntimeError("Should have exhausted iterator")
return walk
def __next__(self):
return self.next()
class Epoch(object):
"""
An Epoch wraps a bunch of Walk portions that can run in parallel, and
provides them consistent walk_id and branch_id.
"""
# Not threadsafe
def __init__(self, walk_idx_start, walk_idx_end, range_tree,
serial_action=None, walks=None, child=None, level=0):
"""
Represents a set of stuff (e.g. walk segments) that can run
in parallel to other such sets of stuff. The sync point will be
executed before the walks. 'child' points to a segment we can run
once this Epoch finishes.
"""
self.serial_action = serial_action
self.walks = tuple(walks)
self.child = child
self.walk_idx_start = walk_idx_start
self.walk_idx_end = walk_idx_end
self.range_tree = range_tree
self._level = level
# This means we calculate once here and once in next(), which is 2 *
# O(N). I've gone this route instead of saving a map of
# walk_idx->branch_id since that would be O(N) in mem, which is worse
# in my opinion. This is all cheap in-memory operation.
self.branch_ids = set([self.range_tree.provide(idx) for idx in
range(walk_idx_start, walk_idx_end)])
self._current_walk_idx = self.walk_idx_start
@property
def level(self):
return self._level
def __iter__(self):
return self
def next(self):
if self._current_walk_idx == self.walk_idx_end:
raise StopIteration()
idx = self._current_walk_idx - self.walk_idx_start
walk = self.walks[idx]
walk_idx = self._current_walk_idx
branch_id = self.range_tree.provide(walk_idx)
self.branch_ids.add(branch_id)
self._current_walk_idx += 1
return walk_idx, branch_id, walk
def __next__(self):
return self.next()
class WalkOptions(object):
"""
A WalkOptions accepts a set of :class:`Action` and :class:`SerialAction`
options and produces an iterable of :class:`Epoch`. Each `Epoch` represents
a set of ``Walk`` segments which can run in parallel, and an optional
``SerialAction`` which should be run *before* the ``Walk`` portions.
Not threadsafe.
:param iterable walk_order: An iterable :class:`Action` types
"""
def __init__(self, walk_order):
self._serial_action_idxs = []
self._sizes = []
# Calculated on first use, cached here; safe since since we are
# intended to be immutable.
self._tree = None
# Maps walk_id->branch_id
self._branch_ids = []
# [ [options1, options2, ...], syncpoint1, [options3, options4, ...], ...]
# And each 'optionsX' is itself an iterable of options as you'd see
# from SomeActionClass.get_option_set().
self._segment_options = []
seg_start_idx = 0
# ('declare' here to cover the 'not walk_order' case for our tail
# flush below)
action_class = None
idx = 0
for idx, action_class in enumerate(walk_order):
if issubclass(action_class, SerialAction):
self._serial_action_idxs.append(idx)
# Append any action-ish segments, and the sync point
actions = walk_order[seg_start_idx:idx]
if actions:
actions = tuple([tuple(ac.get_option_set()) for ac in
actions])
self._segment_options.append(actions)
# else e.g. idx = 0: the first action_class is a SerialAction
self._segment_options.append(action_class)
seg_start_idx = idx + 1
option_set = tuple(action_class.get_option_set())
self._sizes.append(len(option_set))
# Tail flush, if we haven't already.
if not issubclass(action_class, SerialAction):
actions = walk_order[seg_start_idx:]
if actions:
actions = tuple([tuple(ac.get_option_set()) for ac in
actions])
self._segment_options.append(actions)
self.walk_count = functools.reduce(operator.mul, self._sizes)
# During iteration, set to a list of:
# [(segment, start_idx, end_idx)]
self._frontier = None
@property
def sizes(self):
"""
:return: A tuple of the size of the option sets for each
:class:`Action` type in this ``WalkOptions``.
"""
return tuple(self._sizes)
def _get_next_options(self, segment_options, start_idx=0):
walk_options = []
idx = start_idx
count = 0
if idx < len(segment_options) and \
isinstance(segment_options[start_idx], tuple):
idx += 1
current_options = segment_options[start_idx]
walk_options.extend(current_options)
count = functools.reduce(operator.mul, [len(wo) for wo in current_options])
| |
% 1 == 0, "1/azimuth_res must be an integer."
assert not 360 % match, "360/match must be an integer."
assert match / azimuth_res % 1 == 0, "match/azimuth_res must be an \
integer."
elevation = np.atleast_1d(np.asarray(elevation))
# calculate delta azimuth to meet the desired great circle distance.
# (according to Bovbjerg et al. 2000: Measuring the head related transfer
# functions of an artificial head with a high directional azimuth_res)
d_az = 2 * np.arcsin(np.clip(
np.sin(gcd / 360 * np.pi) / np.cos(elevation / 180 * np.pi), -1, 1))
d_az = d_az / np.pi * 180
# correct values at the poles
d_az[np.abs(elevation) == 90] = 360
# next smallest value in desired angular azimuth_res
d_az = d_az // azimuth_res * azimuth_res
# adjust phi to make sure that: match // d_az == 0
for nn in range(d_az.size):
if abs(elevation[nn]) != 90:
while match % d_az[nn] > 1e-15:
# round to precision of azimuth_res to avoid numerical errors
d_az[nn] = np.round((d_az[nn] - azimuth_res)/azimuth_res) \
* azimuth_res
# construct the full sampling grid
azim = np.empty(0)
elev = np.empty(0)
for nn in range(elevation.size):
azim = np.append(azim, np.arange(0, 360, d_az[nn]))
elev = np.append(elev, np.full(int(360 / d_az[nn]), elevation[nn]))
# round to precision of azimuth_res to avoid numerical errors
azim = np.round(azim/azimuth_res) * azimuth_res
# make Coordinates object
sampling = pyfar.Coordinates(
azim, elev, radius, 'sph', 'top_elev', 'deg',
comment='spherical great circle sampling grid')
return sampling
def sph_lebedev(n_points=None, sh_order=None, radius=1.):
"""
Return Lebedev spherical sampling grid.
For detailed information, see [#]_. For a list of available values
for `n_points` and `sh_order` call :py:func:`sph_lebedev`.
Parameters
----------
n_points : int, optional
Number of sampling points in the grid. Related to the spherical
harmonic order by ``n_points = (sh_order + 1)**2``. Either `n_points`
or `sh_order` must be provided. The default is ``None``.
sh_order : int, optional
Maximum applicable spherical harmonic order. Related to the number of
points by ``sh_order = np.sqrt(n_points) - 1``. Either `n_points` or
`sh_order` must be provided. The default is ``None``.
radius : number, optional
Radius of the sampling grid in meters. The default is ``1``.
Returns
-------
sampling : Coordinates
Sampling positions including sampling weights.
Notes
-----
This is a Python port of the Matlab Code written by <NAME> [#]_.
References
----------
.. [#] <NAME>, and <NAME>
"A quadrature formula for the sphere of the 131st
algebraic order of accuracy"
Doklady Mathematics, Vol. 59, No. 3, 1999, pp. 477-481.
.. [#] https://de.mathworks.com/matlabcentral/fileexchange/27097-\
getlebedevsphere
"""
# possible degrees
degrees = np.array([6, 14, 26, 38, 50, 74, 86, 110, 146, 170, 194, 230,
266, 302, 350, 434, 590, 770, 974, 1202, 1454, 1730,
2030, 2354, 2702, 3074, 3470, 3890, 4334, 4802, 5294,
5810], dtype=int)
# corresponding spherical harmonic orders
orders = np.array((np.floor(np.sqrt(degrees / 1.3) - 1)), dtype=int)
# list possible sh orders and degrees
if n_points is None and sh_order is None:
print('Possible input values:')
for o, d in zip(orders, degrees):
print(f"SH order {o}, number of points {d}")
return None
# check input
if n_points is not None and sh_order is not None:
raise ValueError("Either n_points or sh_order must be None.")
# check if the order is available
if sh_order is not None:
if sh_order not in orders:
str_orders = [f"{o}" for o in orders]
raise ValueError("Invalid spherical harmonic order 'sh_order'. \
Valid orders are: {}.".format(
', '.join(str_orders)))
n_points = int(degrees[orders == sh_order])
# check if n_points is available
if n_points not in degrees:
str_degrees = [f"{d}" for d in degrees]
raise ValueError("Invalid number of points n_points. Valid degrees \
are: {}.".format(', '.join(str_degrees)))
# calculate sh_order
sh_order = int(orders[degrees == n_points])
# get the samlpling
leb = external.lebedev_sphere(n_points)
# normalize the weights
weights = leb["w"] / (4 * np.pi)
# generate Coordinates object
sampling = pyfar.Coordinates(
leb["x"] * radius,
leb["y"] * radius,
leb["z"] * radius,
sh_order=sh_order, weights=weights,
comment='spherical Lebedev sampling grid')
return sampling
def sph_fliege(n_points=None, sh_order=None, radius=1.):
"""
Return Fliege-Maier spherical sampling grid.
For detailed information, see [#]_. Call :py:func:`sph_fliege`
for a list of possible values for `n_points` and `sh_order`.
Parameters
----------
n_points : int, optional
Number of sampling points in the grid. Related to the spherical
harmonic order by ``n_points = (sh_order + 1)**2``. Either `n_points`
or `sh_order` must be provided. The default is ``None``.
sh_order : int, optional
Maximum applicable spherical harmonic order. Related to the number of
points by ``sh_order = np.sqrt(n_points) - 1``. Either `n_points` or
`sh_order` must be provided. The default is ``None``.
radius : number, optional
Radius of the sampling grid in meters. The default is ``1``.
Returns
-------
sampling : Coordinates
Sampling positions including sampling weights.
Notes
-----
This implementation uses pre-calculated points from the SOFiA
toolbox [#]_. Possible combinations of `n_points` and `sh_order` are:
+------------+------------+
| `n_points` | `sh_order` |
+============+============+
| 4 | 1 |
+------------+------------+
| 9 | 2 |
+------------+------------+
| 16 | 3 |
+------------+------------+
| 25 | 4 |
+------------+------------+
| 36 | 5 |
+------------+------------+
| 49 | 6 |
+------------+------------+
| 64 | 7 |
+------------+------------+
| 81 | 8 |
+------------+------------+
| 100 | 9 |
+------------+------------+
| 121 | 10 |
+------------+------------+
| 144 | 11 |
+------------+------------+
| 169 | 12 |
+------------+------------+
| 196 | 13 |
+------------+------------+
| 225 | 14 |
+------------+------------+
| 256 | 15 |
+------------+------------+
| 289 | 16 |
+------------+------------+
| 324 | 17 |
+------------+------------+
| 361 | 18 |
+------------+------------+
| 400 | 19 |
+------------+------------+
| 441 | 20 |
+------------+------------+
| 484 | 21 |
+------------+------------+
| 529 | 22 |
+------------+------------+
| 576 | 23 |
+------------+------------+
| 625 | 24 |
+------------+------------+
| 676 | 25 |
+------------+------------+
| 729 | 26 |
+------------+------------+
| 784 | 27 |
+------------+------------+
| 841 | 28 |
+------------+------------+
| 900 | 29 |
+------------+------------+
References
----------
.. [#] <NAME> and <NAME>, "The distribution of points on the sphere
and corresponding cubature formulae,” IMA J. Numerical Analysis,
Vol. 19, pp. 317–334, Apr. 1999, doi: 10.1093/imanum/19.2.317.
.. [#] https://audiogroup.web.th-koeln.de/SOFiA_wiki/DOWNLOAD.html
"""
# possible values for n_points and sh_order
points = np.array([4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196,
225, 256, 289, 324, 361, 400, 441, 484, 529, 576, 625,
676, 729, 784, 841, 900], dtype=int)
orders = np.array(np.floor(np.sqrt(points) - 1), dtype=int)
# list possible sh orders and number of points
if n_points is None and sh_order is None:
for o, d in zip(orders, points):
print(f"SH order {o}, number of points {d}")
return None
# check input
if n_points is not None and sh_order is not None:
raise ValueError("Either n_points or sh_order must be None.")
if sh_order is not None:
# check if the order is available
if sh_order not in orders:
str_orders = [f"{o}" for o in orders]
raise ValueError("Invalid spherical harmonic order 'sh_order'. \
Valid orders are: {}.".format(
', '.join(str_orders)))
# assign n_points
n_points = int(points[orders == sh_order])
else:
# check if n_points is available
if n_points not in points:
str_points = [f"{d}" for d in points]
raise ValueError("Invalid number of points n_points. Valid points \
are: {}.".format(', '.join(str_points)))
# assign sh_order
sh_order = int(orders[points == n_points])
# get the sampling points
fliege = sio.loadmat(os.path.join(
os.path.dirname(__file__), "external", "samplings_fliege.mat"),
variable_names=f"Fliege_{int(n_points)}")
fliege = fliege[f"Fliege_{int(n_points)}"]
# generate Coordinates object
sampling = pyfar.Coordinates(
fliege[:, 0],
fliege[:, 1],
radius,
domain='sph', convention='top_colat', unit='rad',
sh_order=sh_order, weights=fliege[:, 2],
comment='spherical Fliege sampling grid')
# switch and invert coordinates in Cartesian representation to be
# consistent with [1]
xyz = sampling.get_cart(convention='right')
sampling.set_cart(xyz[:, 1], xyz[:, 0], -xyz[:, 2])
return sampling
def sph_equal_area(n_points, radius=1.):
"""
Sampling based on partitioning into faces with equal area.
For detailed information, see [#]_.
Parameters
----------
n_points : int
Number of points corresponding to the number of partitions of the
sphere.
radius : number, optional
Radius of the sampling grid in meters. The default is ``1``.
Returns
-------
sampling : Coordinates
Sampling positions. Sampling weights can | |
"""
Filter rows by subject, predicate, object values.
"""
from argparse import Namespace, SUPPRESS
import typing
from kgtk.cli_argparse import KGTKArgumentParser, KGTKFiles
def parser():
return {
'help': 'Filter rows by subject, predicate, object values.',
'description': 'Filter KGTK file based on values in the node1 (subject), ' +
'label (predicate), and node2 (object) fields.'
}
def add_arguments_extended(parser: KGTKArgumentParser, parsed_shared_args: Namespace):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions
from kgtk.utils.argparsehelpers import optional_bool
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
_expert: bool = parsed_shared_args._expert
# '$label == "/r/DefinedAs" && $node2=="/c/en/number_zero"'
parser.add_input_file(positional=True)
parser.add_output_file(who="The KGTK output file for records that pass the filter.")
parser.add_output_file(who="The KGTK reject file for records that fail the filter.",
dest="reject_file",
options=["--reject-file"],
metavar="REJECT_FILE",
optional=True)
# parser.add_argument('-dt', "--datatype", action="store", type=str, dest="datatype", help="Datatype of the input file, e.g., tsv or csv.", default="tsv")
parser.add_argument('-p', '--pattern', action="store", type=str, dest="pattern", help="Pattern to filter on, for instance, \" ; P154 ; \" ", required=True)
parser.add_argument('--subj', action="store", type=str, dest='subj_col', help="Subject column, default is node1")
parser.add_argument('--pred', action="store", type=str, dest='pred_col', help="Predicate column, default is label")
parser.add_argument('--obj', action="store", type=str, dest='obj_col', help="Object column, default is node2")
parser.add_argument( "--or", dest="or_pattern", metavar="True|False",
help="'Or' the clauses of the pattern. (default=%(default)s).",
type=optional_bool, nargs='?', const=True, default=False)
parser.add_argument( "--invert", dest="invert", metavar="True|False",
help="Invert the result of applying the pattern. (default=%(default)s).",
type=optional_bool, nargs='?', const=True, default=False)
parser.add_argument( "--show-version", dest="show_version", type=optional_bool, nargs='?', const=True, default=False,
help="Print the version of this program. (default=%(default)s).", metavar="True/False")
KgtkReader.add_debug_arguments(parser, expert=_expert)
KgtkReaderOptions.add_arguments(parser, mode_options=True, expert=_expert)
KgtkValueOptions.add_arguments(parser, expert=_expert)
def run(input_file: KGTKFiles,
output_file: KGTKFiles,
reject_file: KGTKFiles,
pattern: str,
subj_col: typing.Optional[str],
pred_col: typing.Optional[str],
obj_col: typing.Optional[str],
or_pattern: bool,
invert: bool,
show_version: bool,
errors_to_stdout: bool = False,
errors_to_stderr: bool = True,
show_options: bool = False,
verbose: bool = False,
very_verbose: bool = False,
**kwargs # Whatever KgtkFileOptions and KgtkValueOptions want.
)->int:
# import modules locally
from pathlib import Path
import sys
from kgtk.exceptions import kgtk_exception_auto_handler, KGTKException
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions
from kgtk.io.kgtkwriter import KgtkWriter
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
input_kgtk_file: Path = KGTKArgumentParser.get_input_file(input_file)
output_kgtk_file: Path = KGTKArgumentParser.get_output_file(output_file)
reject_kgtk_file: typing.Optional[Path] = KGTKArgumentParser.get_optional_output_file(reject_file, who="KGTK reject file")
# Select where to send error messages, defaulting to stderr.
error_file: typing.TextIO = sys.stdout if errors_to_stdout else sys.stderr
# Build the option structures.
reader_options: KgtkReaderOptions = KgtkReaderOptions.from_dict(kwargs)
value_options: KgtkValueOptions = KgtkValueOptions.from_dict(kwargs)
UPDATE_VERSION: str = "2020-08-06T17:06:06.829542+00:00#Mu9vz3KEPh+beQeSwZ8qGMKrTJzHWZFfZFXY6UrYXJAnNpPSin+5NvkSfxKLMkyJtGyeavgGAz8+74bup7eYaQ=="
if show_version or verbose:
print("kgtk filter version: %s" % UPDATE_VERSION, file=error_file, flush=True)
# Show the final option structures for debugging and documentation.
if show_options:
print("--input-file=%s" % str(input_kgtk_file), file=error_file)
print("--output-file=%s" % str(output_kgtk_file), file=error_file)
if reject_kgtk_file is not None:
print("--reject-file=%s" % str(reject_kgtk_file), file=error_file)
print("--pattern=%s" % str(pattern), file=error_file)
if subj_col is not None:
print("--subj=%s" % str(subj_col), file=error_file)
if pred_col is not None:
print("--pred=%s" % str(pred_col), file=error_file)
if obj_col is not None:
print("--obj=%s" % str(obj_col), file=error_file)
print("--or=%s" % str(or_pattern), file=error_file)
print("--invert=%s" % str(invert), file=error_file)
reader_options.show(out=error_file)
value_options.show(out=error_file)
print("=======", file=error_file, flush=True)
def prepare_filter(pattern: str)->typing.Set[str]:
filt: typing.Set[str] = set()
pattern = pattern.strip()
if len(pattern) == 0:
return filt
target: str
for target in pattern.split(","):
target=target.strip()
if len(target) > 0:
filt.add(target)
return filt
def single_predicate_filter(kr: KgtkReader,
kw: KgtkWriter,
rw: typing.Optional[KgtkWriter],
pred_idx: int,
pred_filter: typing.Set[str],
):
if verbose:
print("Applying a single predicate filter", file=error_file, flush=True)
pred_filter_value: str = list(pred_filter)[0]
input_line_count: int = 0
reject_line_count: int = 0
output_line_count: int = 0
row: typing.List[str]
for row in kr:
input_line_count += 1
if row[pred_idx] == pred_filter_value:
kw.write(row)
output_line_count += 1
else:
if rw is not None:
rw.write(row)
reject_line_count += 1
if verbose:
print("Read %d rows, rejected %d rows, wrote %d rows." % (input_line_count, reject_line_count, output_line_count))
def single_predicate_filter_inverted(kr: KgtkReader,
kw: KgtkWriter,
rw: typing.Optional[KgtkWriter],
pred_idx: int,
pred_filter: typing.Set[str],
):
if verbose:
print("Applying a single predicate filter inverted", file=error_file, flush=True)
pred_filter_value: str = list(pred_filter)[0]
input_line_count: int = 0
reject_line_count: int = 0
output_line_count: int = 0
row: typing.List[str]
for row in kr:
input_line_count += 1
if row[pred_idx] != pred_filter_value:
kw.write(row)
output_line_count += 1
else:
if rw is not None:
rw.write(row)
reject_line_count += 1
if verbose:
print("Read %d rows, rejected %d rows, wrote %d rows." % (input_line_count, reject_line_count, output_line_count))
def single_object_filter(kr: KgtkReader,
kw: KgtkWriter,
rw: typing.Optional[KgtkWriter],
obj_idx: int,
obj_filter: typing.Set[str],
):
if verbose:
print("Applying a single object filter", file=error_file, flush=True)
obj_filter_value: str = list(obj_filter)[0]
input_line_count: int = 0
reject_line_count: int = 0
output_line_count: int = 0
row: typing.List[str]
for row in kr:
input_line_count += 1
if row[obj_idx] == obj_filter_value:
kw.write(row)
output_line_count += 1
else:
if rw is not None:
rw.write(row)
reject_line_count += 1
if verbose:
print("Read %d rows, rejected %d rows, wrote %d rows." % (input_line_count, reject_line_count, output_line_count))
def single_object_filter_inverted(kr: KgtkReader,
kw: KgtkWriter,
rw: typing.Optional[KgtkWriter],
obj_idx: int,
obj_filter: typing.Set[str],
):
if verbose:
print("Applying a single object filter inverted", file=error_file, flush=True)
obj_filter_value: str = list(obj_filter)[0]
input_line_count: int = 0
reject_line_count: int = 0
output_line_count: int = 0
row: typing.List[str]
for row in kr:
input_line_count += 1
if row[obj_idx] != obj_filter_value:
kw.write(row)
output_line_count += 1
else:
if rw is not None:
rw.write(row)
reject_line_count += 1
if verbose:
print("Read %d rows, rejected %d rows, wrote %d rows." % (input_line_count, reject_line_count, output_line_count))
def general_filter(kr: KgtkReader,
kw: KgtkWriter,
rw: typing.Optional[KgtkWriter],
subj_idx: int,
subj_filter: typing.Set[str],
pred_idx: int,
pred_filter: typing.Set[str],
obj_idx: int,
obj_filter: typing.Set[str]):
if verbose:
print("Applying a general filter", file=error_file, flush=True)
apply_subj_filter: bool = len(subj_filter) > 0
apply_pred_filter: bool = len(pred_filter) > 0
apply_obj_filter: bool = len(obj_filter) > 0
input_line_count: int = 0
reject_line_count: int = 0
output_line_count: int = 0
subj_filter_keep_count: int = 0
pred_filter_keep_count: int = 0
obj_filter_keep_count: int = 0
subj_filter_reject_count: int = 0
pred_filter_reject_count: int = 0
obj_filter_reject_count: int = 0
row: typing.List[str]
for row in kr:
input_line_count += 1
keep: bool = False
reject: bool = False
if apply_subj_filter:
if row[subj_idx] in subj_filter:
keep = True
subj_filter_keep_count += 1
else:
reject = True
subj_filter_reject_count += 1
if apply_pred_filter:
if row[pred_idx] in pred_filter:
keep = True
pred_filter_keep_count += 1
else:
reject = True
pred_filter_reject_count += 1
if apply_obj_filter:
if row[obj_idx] in obj_filter:
keep = True
obj_filter_keep_count += 1
else:
reject = True
obj_filter_reject_count += 1
if (not keep ^ invert) if or_pattern else (reject ^ invert):
if rw is not None:
rw.write(row)
reject_line_count += 1
else:
kw.write(row)
output_line_count += 1
if verbose:
print("Read %d rows, rejected %d rows, wrote %d rows." % (input_line_count, reject_line_count, output_line_count))
print("Keep counts: subject=%d, predicate=%d, object=%d." % (subj_filter_keep_count, pred_filter_keep_count, obj_filter_keep_count))
print("Reject counts: subject=%d, predicate=%d, object=%d." % (subj_filter_reject_count, pred_filter_reject_count, obj_filter_reject_count))
try:
patterns: typing.List[str] = pattern.split(";")
if len(patterns) != 3:
print("Error: The pattern must have three sections separated by semicolons (two semicolons total).", file=error_file, flush=True)
raise KGTKException("Bad pattern")
subj_filter: typing.Set[str] = prepare_filter(patterns[0])
pred_filter: typing.Set[str] = prepare_filter(patterns[1])
obj_filter: typing.Set[str] = prepare_filter(patterns[2])
if verbose and len(subj_filter) == 0 and len(pred_filter) == 0 and len(obj_filter) == 0:
print("Warning: the filter is empty.", file=error_file, flush=True)
if verbose:
print("Opening the input file: %s" % str(input_kgtk_file), file=error_file, flush=True)
kr: KgtkReader = KgtkReader.open(input_kgtk_file,
options=reader_options,
value_options = value_options,
error_file=error_file,
verbose=verbose,
very_verbose=very_verbose,
)
subj_idx: int = kr.get_node1_column_index(subj_col)
pred_idx: int = kr.get_label_column_index(pred_col)
obj_idx: int = kr.get_node2_column_index(obj_col)
# Complain about a missing column only when it is needed by the pattern.
trouble: bool = False
if subj_idx < 0 and len(subj_filter) > 0:
trouble = True
print("Error: Cannot find the subject column '%s'." % kr.get_node1_canonical_name(subj_col), file=error_file, flush=True)
if pred_idx < 0 and len(pred_filter) > 0:
trouble = True
print("Error: Cannot find the predicate column '%s'." % kr.get_label_canonical_name(pred_col), file=error_file, flush=True)
if obj_idx < 0 and len(obj_filter) > 0:
trouble = True
print("Error: Cannot find the object column '%s'." % kr.get_node2_canonical_name(obj_col), file=error_file, flush=True)
if trouble:
raise KGTKException("Missing columns.")
if verbose:
print("Opening the output file: %s" % str(output_kgtk_file), file=error_file, flush=True)
kw: KgtkWriter = KgtkWriter.open(kr.column_names,
output_kgtk_file,
mode=KgtkWriter.Mode[kr.mode.name],
verbose=verbose,
very_verbose=very_verbose)
rw: typing.Optional[KgtkWriter] = None
if reject_kgtk_file is not None:
if verbose:
print("Opening the reject file: %s" % str(reject_kgtk_file), file=error_file, flush=True)
rw = KgtkWriter.open(kr.column_names,
reject_kgtk_file,
mode=KgtkWriter.Mode[kr.mode.name],
verbose=verbose,
very_verbose=very_verbose)
if len(subj_filter) == 0 and len(pred_filter) == 1 and len(obj_filter) == 0:
if invert:
single_predicate_filter_inverted(kr, kw, rw, pred_idx, pred_filter)
else:
single_predicate_filter(kr, kw, rw, pred_idx, pred_filter)
elif len(subj_filter) == 0 and len(pred_filter) == 0 and len(obj_filter) == 1:
if invert:
single_object_filter_inverted(kr, kw, rw, obj_idx, obj_filter)
else:
single_object_filter(kr, kw, rw, obj_idx, obj_filter)
else:
general_filter(kr, kw, rw, subj_idx, subj_filter, | |
self.about:
background = pygame.Surface((self.bg_size_x, self.bg_size_y))
background.fill((0, 0, 0))
background.set_alpha(192)
ds.blit(background, (304, 16))
ds.blit(self.text_render, (320, 32))
ds.blit(self.text_2_render, (320, 96))
ds.blit(self.text_3_render, (320, 128))
def draw_select(self, color):
if self.df_on:
self.array_value = self.size-1-abs(self.draw_index-self.result[self.result_index][self.draw_index])
else:
self.array_value = self.result[self.result_index][self.draw_index]
if self.shap_index == 0:
self.draw_bar(color)
elif self.shap_index == 1:
self.draw_dot(color)
elif self.shap_index == 2:
self.draw_circle_dot(color)
def draw_bar(self, color):
pygame.draw.rect(ds, color, (320+(self.bar_width*self.draw_index), ds_Y - 80, self.bar_width+1, -((ds_Y - 112)/(self.size-1))*(self.array_value)))
def draw_dot(self, color):
pygame.draw.rect(ds, color, (320+(self.bar_width*self.draw_index), ds_Y - 80-((ds_Y - 112)/(self.size-1))*(self.array_value), 4, 4))
def draw_circle_dot(self, color):
self.dot_radius = self.array_value/self.size*self.radius
self.dot_angle = math.radians(self.draw_index/self.size*360+-90)
self.dot_x = self.center_x+(math.cos(self.dot_angle)*self.dot_radius)
self.dot_y = self.center_y+(math.sin(self.dot_angle)*self.dot_radius)
pygame.draw.rect(ds, color, (self.dot_x, self.dot_y, 4, 4))
def draw_text(self):
self.text = "Sorting Algorithm Visualizer 0.2 (Jan 1, 2018)"
self.text_render = font_list[0].render(self.text, True, (255, 255, 255))
self.text_2 = "Visualizating some sorting algrithms for integers."
self.text_2_render = font_list[1].render(self.text_2, True, (255, 255, 255))
self.text_3 = "Copyright(c) 2017 <NAME>"
self.text_3_render = font_list[1].render(self.text_3, True, (255, 255, 255))
def inplay(self):
if self.playing:
self.draw_index = 0
self.result_size = len(self.result)
if self.speed >= 0:
if self.speed_count > self.speed:
if self.result_index + 1 < self.result_size:
self.result_index += 1
self.speed_count = 0
else:
self.speed_count = 0
self.playing = False
self.played = True
self.speed_count += 1
else:
if self.result_index + 1 < self.result_size:
self.result_index += 1-self.speed
if self.result_index >= self.result_size:
self.result_index = self.result_size - 1
else:
self.playing = False
self.played = True
if self.played:
self.played = False
def init_result(self):
self.result_index = 0
self.draw_index = 0
sorter.result = []
sorter.result_hl = []
sorter.add_result()
sorter.add_result()
class Button:
def __init__(self, pos_X, pos_Y, size_X, size_Y, index, text):
self.pos_X = pos_X
self.pos_Y = pos_Y
self.size_X = size_X
self.size_Y = size_Y
self.clicked = -1
self.index = index
self.text = text
self.text_render = font_list[0].render(self.text, True, (0, 0, 0))
self.text_rect = self.text_render.get_rect(center=(self.pos_X + (self.size_X/2), self.pos_Y + (self.size_Y/2)))
def update(self):
if mouse_pos[0] >= self.pos_X and mouse_pos[0] < self.pos_X+self.size_X and mouse_pos[1] >= self.pos_Y and mouse_pos[1] < self.pos_Y+self.size_Y:
self.color = (192, 192, 192)
self.text_color = (64, 64, 64)
if clicked:
if not self.pos_out:
self.color = (128, 128, 128)
self.text_color = (0, 0, 0)
self.clicked += 1
else:
self.pos_out = False
self.clicked = -1
else:
self.color = (64, 64, 64)
self.text_color = (255, 255, 255)
self.clicked = -1
if clicked:
self.pos_out = True
else:
self.pos_out = False
if self.clicked == 0:
self.action()
self.clicked = 1
def draw(self):
pygame.draw.rect(ds, self.color, (self.pos_X, self.pos_Y, self.size_X, self.size_Y))
self.text_render = font_list[0].render(self.text, True, self.text_color)
ds.blit(self.text_render, self.text_rect)
def action(self):
if self.index == 1:
if sorter.size != sorter.newsize:
sorter.size = sorter.newsize
sorter.gene_array()
sorter.init_array()
sorter.sort_array()
sorter.result_index = 0
sorter.draw_index = 0
sorter.playing = True
elif self.index == 2:
sorter.result_index = 0
sorter.draw_index = 0
sorter.playing = True
class Slider:
def __init__(self, pos_X, pos_Y, size_X, size_Y, index, min, max, ini, text):
self.bar_pos_X = pos_X
self.pos_Y = pos_Y
self.pos_X = pos_X
self.bar_size_X = size_X
self.size_Y = size_Y
self.size_X = 32
self.clicked = False
self.bar_clicked = -1
self.bar_input = False
self.bar_input_end = False
self.index = index
self.num = ini
self.min = min
self.max = max
self.text = text
self.num_text = "%d"%ini
self.num_pos_X = self.bar_pos_X + (self.bar_size_X/2)
self.num_pos_Y = self.pos_Y - 24
self.num_size_X = self.bar_pos_X + self.bar_size_X - self.num_pos_X
self.num_size_Y = 24
self.text_render = font_list[1].render(self.text, True, (0, 0, 0))
self.text_color = (255, 255, 255)
self.num_text_color = (255, 255, 255)
self.pos_X = int((self.num-self.min+(self.bar_pos_X/(self.bar_size_X-self.size_X)*(self.max-self.min)))/(self.max-self.min)*(self.bar_size_X-self.size_X))
def update(self):
global key
self.pos_in = (mouse_pos[0] >= self.pos_X and mouse_pos[0] < self.pos_X+self.size_X and mouse_pos[1] >= self.pos_Y and mouse_pos[1] < self.pos_Y+self.size_Y)
self.bar_pos_in = (mouse_pos[0] >= self.bar_pos_X and mouse_pos[0] < self.bar_pos_X+self.bar_size_X and mouse_pos[1] >= self.pos_Y and mouse_pos[1] < self.pos_Y+self.size_Y)
self.num_pos_in = (mouse_pos[0] >= self.num_pos_X-4 and mouse_pos[0] < self.num_pos_X+self.num_size_X+4 and mouse_pos[1] >= self.num_pos_Y and mouse_pos[1] < self.num_pos_Y+self.num_size_Y)
if self.clicked or self.pos_in:
self.color = (192, 192, 192)
if clicked:
if not self.pos_out:
self.color = (128, 128, 128)
self.clicked = True
else:
self.pos_out = False
self.clicked = False
else:
self.color = (64, 64, 64)
if clicked:
self.pos_out = True
self.clicked = False
else:
self.pos_out = False
if (self.bar_pos_in or self.num_pos_in) and (not self.pos_in):
if self.bar_input:
self.bar_color = (255, 255, 255)
self.num_text_color = (0, 0, 0)
else:
self.bar_color = (48, 48, 48)
self.num_text_color = (192, 192, 192)
if clicked:
if not self.bar_pos_out:
if self.bar_input:
self.bar_color = (255, 255, 255)
self.num_text_color = (0, 0, 0)
else:
self.bar_color = (16, 16, 16)
self.num_text_color = (255, 255, 255)
self.bar_clicked += 1
else:
self.bar_pos_out = False
self.bar_clicked = -1
else:
if self.bar_input:
self.bar_color = (255, 255, 255)
self.num_text_color = (0, 0, 0)
else:
self.bar_color = (16, 16, 16)
self.num_text_color = (255, 255, 255)
if clicked:
self.bar_pos_out = True
self.bar_input = False
self.bar_input_end = True
else:
self.bar_pos_out = False
if self.clicked:
self.pos_X = mouse_pos[0]-(self.size_X/2)
self.num = int(self.pos_X/(self.bar_size_X-self.size_X)*(self.max-self.min))-int(self.bar_pos_X/(self.bar_size_X-self.size_X)*(self.max-self.min))+self.min
self.num_text = "%d"%self.num
self.action()
if self.pos_X < self.bar_pos_X:
self.pos_X = self.bar_pos_X
if self.pos_X > self.bar_pos_X+self.bar_size_X-self.size_X:
self.pos_X = self.bar_pos_X+self.bar_size_X-self.size_X
if self.bar_clicked == 0:
self.bar_color = (255, 255, 255)
self.num_text_color = (0, 0, 0)
if self.bar_input:
self.bar_input = False
self.bar_input_end = True
else:
self.bar_input = True
self.bar_clicked = 1
if self.bar_input:
for kpn in ('[0]', '[1]', '[2]', '[3]', '[4]', '[5]', '[6]', '[7]', '[8]', '[9]'):
if key == kpn:
key = kpn[1]
break
for kpn in ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9'):
if key == kpn:
self.num_text += key
break
if key == '-' or key == '[-]':
self.num_text += '-'
if key == 'backspace':
self.num_text = self.num_text[:len(self.num_text)-1]
if key == 'return':
self.bar_input = False
self.bar_input_end = True
if self.bar_input_end:
if self.num_text == '' or self.num_text == '-':
self.num = 0
else:
if (self.num_text.count('-') >= 2) or (self.num_text.count('-') == 1 and self.num_text[0] != '-'):
self.num = 0
else:
self.num = int(self.num_text)
if self.num > self.max:
self.num = self.max
if self.num < self.min:
self.num = self.min
self.num_text = "%d"%self.num
self.pos_X = int((self.num-self.min+(self.bar_pos_X/(self.bar_size_X-self.size_X)*(self.max-self.min)))/(self.max-self.min)*(self.bar_size_X-self.size_X))
self.action()
self.bar_input_end = False
def draw(self):
pygame.draw.rect(ds, self.bar_color, (self.bar_pos_X, self.pos_Y, self.bar_size_X, self.size_Y))
pygame.draw.rect(ds, self.color, (self.pos_X, self.pos_Y, self.size_X, self.size_Y))
pygame.draw.rect(ds, self.bar_color, (self.num_pos_X-4, self.num_pos_Y, self.num_size_X+4, self.num_size_Y))
self.text_render = font_list[1].render(self.text, True, self.text_color)
self.num_render = font_list[1].render(self.num_text, True, self.num_text_color)
ds.blit(self.text_render, (self.bar_pos_X, self.num_pos_Y))
ds.blit(self.num_render, (self.num_pos_X, self.num_pos_Y))
def action(self):
if self.index == 1:
sorter.newsize = self.num
if self.index == 2:
sorter.speed = self.num
class ComboBox:
def __init__(self, pos_X, pos_Y, size_X, size_Y, index, list, text):
self.pos_X = pos_X
self.pos_Y = pos_Y
self.size_X = size_X
self.size_Y = size_Y
self.list = list
self.list_pos_Y = self.pos_Y + self.size_Y
self.list_len = len(self.list)
self.list_text_size_Y = 20
self.list_size_Y = self.list_text_size_Y * self.list_len
self.list_text_color = (0, 0, 0)
self.list_show_index = 0
self.list_index = self.list_show_index
self.list_show_text_pos_Y = self.pos_Y + int((self.size_Y - self.list_text_size_Y)*(2/3))
self.text = text
self.text_color = (255, 255, 255)
self.text_pos_Y = self.pos_Y - 20
self.text_render = font_list[1].render(self.text, True, self.text_color)
self.index = index
self.clicked = -1
self.input = False
self.input_end = False
self.list_pos_in = False
def update(self):
self.pos_in = (mouse_pos[0] >= self.pos_X and mouse_pos[0] < self.pos_X+self.size_X and mouse_pos[1] >= self.pos_Y and mouse_pos[1] < self.pos_Y+self.size_Y)
if self.pos_in:
if self.input:
self.color = (16, 16, 16)
self.bar_text_color = (255, 255, 255)
else:
self.color = (48, 48, 48)
self.bar_text_color = (192, 192, 192)
if clicked:
if not self.pos_out:
if self.input:
self.color = (16, 16, 16)
self.bar_text_color = (255, 255, 255)
else:
self.color = (16, 16, 16)
self.bar_text_color = (255, 255, 255)
self.clicked += 1
else:
self.pos_out = False
self.clicked = -1
else:
if self.input:
self.color = (16, 16, 16)
self.bar_text_color = (255, 255, 255)
else:
self.color = (16, 16, 16)
self.bar_text_color = (255, 255, 255)
if clicked:
self.pos_out = True
self.input = False
self.input_end = True
else:
self.pos_out = False
if self.clicked == 0:
self.color = (16, 16, 16)
self.bar_text_color = (255, 255, 255)
if self.input:
self.input = False
self.input_end = True
else:
self.input = True
self.clicked = 1
if self.input:
self.draw_list = True
self.list_pos_in = (mouse_pos[0] >= self.pos_X and mouse_pos[0] < self.pos_X+self.size_X and mouse_pos[1] >= self.list_pos_Y and mouse_pos[1] < self.list_pos_Y+self.list_size_Y)
self.list_index = (mouse_pos[1] - self.list_pos_Y) // self.list_text_size_Y
self.list_text_pos_Y = self.list_pos_Y + (self.list_index*self.list_text_size_Y)
if self.list_pos_in:
self.draw_select = True
else:
self.draw_select = False
else:
self.draw_list = False
if self.input_end:
if self.list_index >= 0 and self.list_index < self.list_len and self.list_pos_in:
self.list_show_index = self.list_index
self.action()
self.input_end = False
def draw(self):
if self.draw_list:
pygame.draw.rect(ds, (255, 255, 255), (self.pos_X, self.list_pos_Y, self.size_X, self.list_size_Y))
if self.draw_select:
pygame.draw.rect(ds, (192, 192, 192), (self.pos_X, self.list_text_pos_Y, self.size_X, self.list_text_size_Y))
for i in range(self.list_len):
self.list_text_render = font_list[1].render(self.list[i], True, self.list_text_color)
ds.blit(self.list_text_render, (self.pos_X+4, self.list_pos_Y + (i*self.list_text_size_Y) + 2))
pygame.draw.rect(ds, self.color, (self.pos_X, self.pos_Y, self.size_X, self.size_Y))
self.list_show_text_render = font_list[1].render(self.list[self.list_show_index], True, self.bar_text_color)
ds.blit(self.text_render, (self.pos_X, self.text_pos_Y))
ds.blit(self.list_show_text_render, (self.pos_X+4, self.list_show_text_pos_Y))
def action(self):
if self.index == 1:
sorter.sort_index = self.list_show_index
elif self.index == 2:
sorter.init_index = self.list_show_index
elif self.index == 3:
sorter.styl_index = self.list_show_index
elif self.index == 4:
sorter.shap_index = self.list_show_index
class CheckBox:
def __init__(self, pos_X, pos_Y, size_X, size_Y, index, text):
self.pos_X = pos_X
self.pos_Y = pos_Y
self.size_X = size_X
self.size_Y = size_Y
self.index = index
self.text = text
self.text_size_Y = 24
self.text_pos_Y = self.pos_Y + int((self.size_Y - self.text_size_Y)*(2/3))
self.clicked = -1
self.checked = False
def update(self):
self.pos_in = mouse_pos[0] >= self.pos_X and mouse_pos[0] < self.pos_X+self.size_X and mouse_pos[1] >= self.pos_Y and mouse_pos[1] < self.pos_Y+self.size_Y
if self.pos_in:
self.color = (48, 48, 48)
self.text_color = (192, 192, 192)
if clicked:
if not self.pos_out:
self.color = (16, 16, 16)
self.text_color = (255, 255, 255)
self.clicked += 1
else:
self.pos_out = False
self.clicked = -1
else:
self.color = (16, 16, 16)
self.text_color = (255, 255, 255)
self.clicked = -1
if clicked:
self.pos_out = True
else:
self.pos_out = False
if self.clicked == 0:
self.checked = not self.checked
self.action()
self.clicked = 1
if self.checked:
self.check_text = "V"
else:
self.check_text = " "
def draw(self):
pygame.draw.rect(ds, self.color, (self.pos_X, self.pos_Y, self.size_X, self.size_Y))
self.text_render = font_list[1].render(self.text, True, self.text_color)
self.check_text_render = font_list[1].render(self.check_text, True, self.text_color)
ds.blit(self.text_render, (self.pos_X+32, self.text_pos_Y))
ds.blit(self.check_text_render, (self.pos_X+8, self.text_pos_Y))
def action(self):
if self.index == 1:
sorter.hl_on = self.checked
if self.index == 2:
sorter.df_on = self.checked
if self.index == 3:
sorter.about = self.checked
class Label:
def __init__(self, pos_X, pos_Y, size_X, size_Y, text):
self.pos_X = pos_X
self.pos_Y = pos_Y
self.size_X = size_X
self.size_Y = size_Y
self.text = text
self.text_color = (255, 255, 255)
self.text_pos_Y = self.pos_Y - 24
self.text_render = font_list[1].render(self.text, True, self.text_color)
def update(self):
return
def draw(self):
ds.blit(self.text_render, (self.pos_X, self.text_pos_Y))
sorter = Sorter()
pygame.init()
sorter.chan = pygame.mixer.Channel(0)
ds_default_X = 1280
ds_default_Y = 720
ds_min_X = 640
ds_min_Y = 720
ds_changed = False
ds_X = ds_default_X
ds_Y = ds_default_Y
user_ds_X = pygame.display.Info().current_w
user_ds_Y = pygame.display.Info().current_h
ds = pygame.display.set_mode([ds_X, ds_Y], pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE)
pygame.display.set_icon(pygame.image.load('icon.png'))
pygame.display.set_caption("Sorting Algorithm Visualizer 0.2")
font_list = []
font_list.append(pygame.font.SysFont("Arial", 32, bold = True))
font_list.append(pygame.font.SysFont("Arial", 16, bold = True))
gui_list = []
gui_list.append(Button(16, 16, 128, 48, 1, "Sort"))
gui_list.append(Button(160, 16, 128, 48, 2, "Replay"))
gui_list.append(Slider(16, 128, 272, 24, 1, 2, 1024, 16, "Array Size"))
gui_list.append(Slider(16, 192, 272, 24, 2, -50, 50, 0, "Play Speed"))
gui_list.append(Label(160, 616, 128, 32, "Options"))
gui_list.append(CheckBox(160, 616, 128, 32, 1, "Highlight"))
gui_list.append(CheckBox(160, 648, 128, 32, 2, "Difference"))
gui_list.append(CheckBox(160, 680, 128, 32, 3, "About"))
gui_list.append(ComboBox(160, 496, 128, 32, 4, sorter.shap_list, "Shape"))
gui_list.append(ComboBox(160, 376, 128, 32, 3, sorter.styl_list, "Color"))
gui_list.append(ComboBox(16, 256, 128, 32, 1, sorter.sort_list, "Algorithm"))
gui_list.append(ComboBox(160, 256, 128, 32, 2, sorter.init_list, "Input Type"))
sorter.draw_text()
clicked = False
key = ""
while True:
mouse_pos = pygame.mouse.get_pos()
for | |
str or ~azure.mgmt.sql.models.OperationMode
:param storage_key_type: Storage key type. Possible values include: "SharedAccessKey",
"StorageAccessKey".
:type storage_key_type: str or ~azure.mgmt.sql.models.StorageKeyType
:param storage_key: Storage key.
:type storage_key: str
:param storage_uri: Storage Uri.
:type storage_uri: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'operation_mode': {'key': 'properties.operationMode', 'type': 'str'},
'storage_key_type': {'key': 'properties.storageKeyType', 'type': 'str'},
'storage_key': {'key': 'properties.storageKey', 'type': 'str'},
'storage_uri': {'key': 'properties.storageUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DatabaseExtensions, self).__init__(**kwargs)
self.operation_mode = kwargs.get('operation_mode', None)
self.storage_key_type = kwargs.get('storage_key_type', None)
self.storage_key = kwargs.get('storage_key', None)
self.storage_uri = kwargs.get('storage_uri', None)
class DatabaseListResult(msrest.serialization.Model):
"""A list of databases.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.Database]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Database]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DatabaseListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class DatabaseOperation(ProxyResource):
"""A database operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar database_name: The name of the database the operation is being performed on.
:vartype database_name: str
:ivar operation: The name of operation.
:vartype operation: str
:ivar operation_friendly_name: The friendly name of operation.
:vartype operation_friendly_name: str
:ivar percent_complete: The percentage of the operation completed.
:vartype percent_complete: int
:ivar server_name: The name of the server.
:vartype server_name: str
:ivar start_time: The operation start time.
:vartype start_time: ~datetime.datetime
:ivar state: The operation state. Possible values include: "Pending", "InProgress",
"Succeeded", "Failed", "CancelInProgress", "Cancelled".
:vartype state: str or ~azure.mgmt.sql.models.ManagementOperationState
:ivar error_code: The operation error code.
:vartype error_code: int
:ivar error_description: The operation error description.
:vartype error_description: str
:ivar error_severity: The operation error severity.
:vartype error_severity: int
:ivar is_user_error: Whether or not the error is a user error.
:vartype is_user_error: bool
:ivar estimated_completion_time: The estimated completion time of the operation.
:vartype estimated_completion_time: ~datetime.datetime
:ivar description: The operation description.
:vartype description: str
:ivar is_cancellable: Whether the operation can be cancelled.
:vartype is_cancellable: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'database_name': {'readonly': True},
'operation': {'readonly': True},
'operation_friendly_name': {'readonly': True},
'percent_complete': {'readonly': True},
'server_name': {'readonly': True},
'start_time': {'readonly': True},
'state': {'readonly': True},
'error_code': {'readonly': True},
'error_description': {'readonly': True},
'error_severity': {'readonly': True},
'is_user_error': {'readonly': True},
'estimated_completion_time': {'readonly': True},
'description': {'readonly': True},
'is_cancellable': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'database_name': {'key': 'properties.databaseName', 'type': 'str'},
'operation': {'key': 'properties.operation', 'type': 'str'},
'operation_friendly_name': {'key': 'properties.operationFriendlyName', 'type': 'str'},
'percent_complete': {'key': 'properties.percentComplete', 'type': 'int'},
'server_name': {'key': 'properties.serverName', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'state': {'key': 'properties.state', 'type': 'str'},
'error_code': {'key': 'properties.errorCode', 'type': 'int'},
'error_description': {'key': 'properties.errorDescription', 'type': 'str'},
'error_severity': {'key': 'properties.errorSeverity', 'type': 'int'},
'is_user_error': {'key': 'properties.isUserError', 'type': 'bool'},
'estimated_completion_time': {'key': 'properties.estimatedCompletionTime', 'type': 'iso-8601'},
'description': {'key': 'properties.description', 'type': 'str'},
'is_cancellable': {'key': 'properties.isCancellable', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(DatabaseOperation, self).__init__(**kwargs)
self.database_name = None
self.operation = None
self.operation_friendly_name = None
self.percent_complete = None
self.server_name = None
self.start_time = None
self.state = None
self.error_code = None
self.error_description = None
self.error_severity = None
self.is_user_error = None
self.estimated_completion_time = None
self.description = None
self.is_cancellable = None
class DatabaseOperationListResult(msrest.serialization.Model):
"""The response to a list database operations request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.DatabaseOperation]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DatabaseOperation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DatabaseOperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class DatabaseSchema(ProxyResource):
"""A database schema resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DatabaseSchema, self).__init__(**kwargs)
class DatabaseSchemaListResult(msrest.serialization.Model):
"""A list of database schemas.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.DatabaseSchema]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DatabaseSchema]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DatabaseSchemaListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class DatabaseSecurityAlertListResult(msrest.serialization.Model):
"""A list of the database's security alert policies.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.DatabaseSecurityAlertPolicy]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DatabaseSecurityAlertPolicy]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DatabaseSecurityAlertListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class DatabaseSecurityAlertPolicy(ProxyResource):
"""A database security alert policy.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar system_data: SystemData of SecurityAlertPolicyResource.
:vartype system_data: ~azure.mgmt.sql.models.SystemData
:param state: Specifies the state of the policy, whether it is enabled or disabled or a policy
has not been applied yet on the specific database. Possible values include: "Enabled",
"Disabled".
:type state: str or ~azure.mgmt.sql.models.SecurityAlertsPolicyState
:param disabled_alerts: Specifies an array of alerts that are disabled. Allowed values are:
Sql_Injection, Sql_Injection_Vulnerability, Access_Anomaly, Data_Exfiltration, Unsafe_Action,
Brute_Force.
:type disabled_alerts: list[str]
:param email_addresses: Specifies an array of e-mail addresses to which the alert is sent.
:type email_addresses: list[str]
:param email_account_admins: Specifies that the alert is sent to the account administrators.
:type email_account_admins: bool
:param storage_endpoint: Specifies the blob storage endpoint (e.g.
https://MyAccount.blob.core.windows.net). This blob storage will hold all Threat Detection
audit logs.
:type storage_endpoint: str
:param storage_account_access_key: Specifies the identifier key of the Threat Detection audit
storage account.
:type storage_account_access_key: str
:param retention_days: Specifies the number of days to keep in the Threat Detection audit logs.
:type retention_days: int
:ivar creation_time: Specifies the UTC creation time of the policy.
:vartype creation_time: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'creation_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'state': {'key': 'properties.state', 'type': 'str'},
'disabled_alerts': {'key': 'properties.disabledAlerts', 'type': '[str]'},
'email_addresses': {'key': 'properties.emailAddresses', 'type': '[str]'},
'email_account_admins': {'key': 'properties.emailAccountAdmins', 'type': 'bool'},
'storage_endpoint': {'key': 'properties.storageEndpoint', 'type': 'str'},
'storage_account_access_key': {'key': 'properties.storageAccountAccessKey', 'type': 'str'},
'retention_days': {'key': 'properties.retentionDays', 'type': 'int'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(DatabaseSecurityAlertPolicy, self).__init__(**kwargs)
self.system_data = None
self.state = kwargs.get('state', None)
self.disabled_alerts = kwargs.get('disabled_alerts', None)
self.email_addresses = kwargs.get('email_addresses', None)
self.email_account_admins = kwargs.get('email_account_admins', None)
self.storage_endpoint = kwargs.get('storage_endpoint', None)
self.storage_account_access_key = kwargs.get('storage_account_access_key', None)
self.retention_days = kwargs.get('retention_days', None)
self.creation_time = None
class DatabaseTable(ProxyResource):
"""A database table resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param temporal_type: The table temporal | |
await msg.edit(embed=embed)
else:
print(self.scfield_emote)
self.sc_config.update_field(div, name, str(type), self.scfield_emote)
await msg.add_reaction("👍")
#await msg.channel.send(embed=utils.format_embed("Updated field " + name + "!", False))
await self.generate_scoreboard_message(ctx, False) # Update the scoreboard, BOI
# Creates or updates a scoreboard field
@commands.command()
async def scremovefield(self, ctx, division, name):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member) and utils.authorize_admin(ctx.guild, ctx.author): # Prevent this from running outside of a guild or by non-admins:
if not self.sc_config.is_scoreboard_loaded():
await ctx.send(embed=utils.format_embed("Error: No scoreboard is currently loaded! Load one with !scload", True))
return
div = self.sc_config.get_division(division)
if div is None:
await ctx.send(embed=utils.format_embed("Error: Invalid divsion name \"" + division + "\""))
return
fields = self.sc_config.get_fields(div)
if name in fields.keys():
embed = discord.Embed(title="Removing scoreboard field:", color=0x4EDB23)
else:
ctx.send(embed=utils.format_embed("Error: Field " + name + " doesn't exist!", True))
return
msg_text = '\n**Name:** ' + name
msg_text += '\n**Warning: This will permanently delete this field and its scores!**'
msg_text += '\n\nTo confirm deletion: React with "❌"'
embed.description = msg_text
msg = await ctx.send(embed=embed)
await msg.add_reaction("❌")
def reaction_check(reaction, user): # Checks if the emoji reaction to scremovefield is valid or not
return user == ctx.author and reaction.message.id == msg.id and str(reaction.emoji) == '❌' # Only accept 'X' reactions from the command sender on the message we sent
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=reaction_check)
except asyncio.TimeoutError:
msg_text += '\n**Waiting for a reaction timed out - run this command again**'
embed.description = msg_text
embed.color = 0xDB2323
await msg.edit(embed=embed)
else:
self.sc_config.remove_field(div, name)
#await msg.channel.send(embed=utils.format_embed("Deleted field " + name, False))
await msg.add_reaction("👍")
await self.generate_scoreboard_message(ctx, False) # Update the scoreboard, BOI
# Submits a score for entry
@commands.command()
async def submit(self, ctx, score=-1):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member): # Prevent this from running outside of a guild:
if not self.sc_config.is_scoreboard_loaded():
await ctx.send(embed=utils.format_embed("Error: No scoreboard is currently loaded!", True))
return
#division = "EZ" # DEBUG: Hard-code a division name, here, until we can grab a player's real division
division = self.sc_config.get_player_division(ctx.author.id)
print("Player " + ctx.author.display_name + "(id: " + str(ctx.author.id) + ") submitting unverified score " + str(score))
if division is None: # Player hasn't joined a division yet, prompt them to join
print("Division check returned None, prompting player for division")
res = await self.division(ctx)
if res is False: # Didn't choose a division lol, bail here
return
division = self.sc_config.get_player_division(ctx.author.id)
else:
print("Division check passed, player division \"" + division + "\"")
div = self.sc_config.get_division(division)
if div is None:
print("Division invalid!")
await ctx.send(embed=utils.format_embed("Error: The division you joined (\"" + division + "\") is invalid... somehow. Please contact an admin :("))
return
# Our logic here changes if there's only one field on the scoreboard
# If there's one field, select it by default and just submit scores to that
# For multiple fields, we need to prompt the user to select a field
fields = self.sc_config.get_fields_emoji(div)
if len(fields) == 1:
sub_field = list(fields.values())[0]
else:
sub_field = None
# Our reaction-based field prompt - Validate input if the submitting user reacted to it, and their reaction is a field emoji
def sub_reaction_check(reaction, user):
if user == ctx.author and reaction.message.id == msg.id and str(reaction.emoji) in fields.keys():
self.scsubmit_field[user.id] = fields[str(reaction.emoji)]
return True
return False
try:
if sub_field is None: # If we haven't set a default field to submit to (above), send the reaction prompt and wait for a reaction
embed = discord.Embed(title="Submitting a score:", color=0x4EDB23)
msg_text = "\n" + ctx.author.display_name + ": " + str(score)
msg_text += "\n\n**React with the field to submit this score to!**\n"
for emote, field in fields.items(): # Display the emotes for all the fields we can submit to
msg_text += emote + " - " + field + "\n"
embed.description = msg_text
msg = await ctx.send(embed=embed)
for emote in fields.keys(): # And react to the message with all the fields we can submit to (two for loops yeehaw)
await msg.add_reaction(emote)
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=sub_reaction_check) # Now wait for the user to react
except asyncio.TimeoutError:
msg_text += "\n\n**Waiting for a reaction timed out - run this command again**"
embed.description = msg_text
embed.color = 0xDB2323
await msg.edit(embed=embed)
else: # On reaction (or if we have a field to submit to and never prompted for a reaction), submit the score!
if sub_field is None:
sub_field = self.scsubmit_field[ctx.author.id]
print("Reaction-based field set: " + sub_field)
print("Attempting submission to field " + sub_field)
self.sc_config.update_entry(div, sub_field, ctx.author.id, score, False)
if randrange(0, 100) == 0:
scmsg = "You showed us... your ULTIMATE dance... Thank you very much... I can't stop CRYING, BUCKETS of ***TEARS.....***"
else:
scmsg = Scoreboard.score_messages[randrange(len(Scoreboard.score_messages))]
await self.generate_scoreboard_message(ctx, False) # Update the scoreboard
embedd = discord.Embed(title="Score submitted for verification - " + scmsg, description=sub_field + ": " + str(score), colour=0x16E200)
await ctx.send(embed=embedd)
# Joins a player to a division, returns if the player is currently in a division or not
@commands.command()
async def division(self, ctx):
divisions = self.sc_config.get_division_names() # Get a list of divisions and divisions emotes/descriptions
divisions_emotes = self.sc_config.get_division_emotes()
cur_div = self.sc_config.get_player_division(ctx.author.id)
if cur_div is not None:
await ctx.send(embed=utils.format_embed("You're in division \"" + cur_div + "\"!\n\n (At this time, you cannot switch divisions)", False))
return True
# If there's only one divisions, we want to auto-join that one!
if len(divisions) == 1:
selected_div = list(divisions.keys())[0]
else:
selected_div = None
# Helper function to validate reaction-based input for the below prompt
def div_reaction_check(reaction, user):
if user == ctx.author and reaction.message.id == msg.id and str(reaction.emoji) in divisions_emotes.keys():
self.scdiv_division[user.id] = divisions_emotes[str(reaction.emoji)]
return True
return False
try:
if selected_div is None: # Didn't auto-pick a division? Prompt for one
emb = discord.Embed(title="Pick a division to join:", color=0x4EDB23)
desc = ""
for div_emote, div_name in divisions_emotes.items(): # List the divisions to join, their emotes, and descriptions
desc += div_emote + " **" + div_name + ":** " + divisions[div_name] + "\n"
emb.description = desc
msg = await ctx.send(embed=emb)
for emote in divisions_emotes.keys(): # React to the message with the division emotes we can join
await msg.add_reaction(emote)
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=div_reaction_check) # And wait for the user to react to pick a division
except asyncio.TimeoutError: # User didn't react in time
desc += "\n\n**Waiting for a reaction timed out - run this command again**"
emb.description = desc
emb.color = 0xDB2323
await msg.edit(embed=emb)
return False
else: # On reaction (or auto-selected division), set the player's division
if selected_div is None: # Didn't auto-select
selected_div = self.scdiv_division[ctx.author.id]
print("Reaction-based division set: " + selected_div)
print("Attempting to set user division: " + ctx.author.display_name + ", " + selected_div)
res = self.sc_config.set_player_division(ctx.author.id, selected_div)
if res:
await ctx.send(embed=utils.format_embed("You've joined division " + selected_div + "!", False))
return True
else:
await ctx.send(embed=utils.format_embed("Unable to join division " + selected_div + " - Please contact an admin", True))
return False
# Sets a user's verified score entry (to a specified score if specified, else to their unverified score if they have one)
@commands.command()
async def verify(self, ctx, division, member: discord.Member, score=-1):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member) and utils.authorize_admin(ctx.guild, ctx.author): # Prevent this from running outside of a guild or by non-admins:
if not self.sc_config.is_scoreboard_loaded():
await ctx.send(embed=utils.format_embed("Error: No scoreboard is currently loaded! Load one with !scload", True))
return
div = self.sc_config.get_division(division)
if div is None:
await ctx.send(embed=utils.format_embed("Error: Invalid divsion name \"" + division + "\"", True))
return
fields = self.sc_config.get_fields_emoji(div)
if len(fields) == 1: # Only one field, just submit to that one by default
ver_field = list(fields.values())[0]
else: # Multiple fields - figure out which later:tm:
ver_field = None
# Validation method when prompting the user to pick a field to verify scores from
def ver_reaction_check(reaction, user):
if user == ctx.author and reaction.message.id == msg.id and str(reaction.emoji) in fields.keys():
self.scverify_field[user.id] = fields[str(reaction.emoji)]
return True
return False
try:
if ver_field is None: # Still need to prompt the user to choose a field to submit to, do it
embed = discord.Embed(title="Verifying score:", color=0x4EDB23)
msg_text = "\n" + member.display_name
if int(score) == | |
default='new', required=True)
class IrActionsServer(models.Model):
""" Server actions model. Server action work on a base model and offer various
type of actions that can be executed automatically, for example using base
action rules, of manually, by adding the action in the 'More' contextual
menu.
Since Odoo 8.0 a button 'Create Menu Action' button is available on the
action form view. It creates an entry in the More menu of the base model.
This allows to create server actions and run them in mass mode easily through
the interface.
The available actions are :
- 'Execute Python Code': a block of python code that will be executed
- 'Create a new Record': create a new record with new values
- 'Write on a Record': update the values of a record
- 'Execute several actions': define an action that triggers several other
server actions
"""
_name = 'ir.actions.server'
_description = 'Server Actions'
_table = 'ir_act_server'
_inherit = 'ir.actions.actions'
_sequence = 'ir_actions_id_seq'
_order = 'sequence,name'
DEFAULT_PYTHON_CODE = """# Available variables:
# - env: Odoo Environment on which the action is triggered
# - model: Odoo Model of the record on which the action is triggered; is a void recordset
# - record: record on which the action is triggered; may be void
# - records: recordset of all records on which the action is triggered in multi-mode; may be void
# - time, datetime, dateutil, timezone: useful Python libraries
# - log: log(message, level='info'): logging function to record debug information in ir.logging table
# - Warning: Warning Exception to use with raise
# To return an action, assign: action = {...}\n\n\n\n"""
@api.model
def _select_objects(self):
records = self.env['ir.model'].search([])
return [(record.model, record.name) for record in records] + [('', '')]
name = fields.Char(string='Action Name', translate=True)
type = fields.Char(default='ir.actions.server')
usage = fields.Selection([
('ir_actions_server', 'Server Action'),
('ir_cron', 'Scheduled Action')], string='Usage',
default='ir_actions_server', required=True)
state = fields.Selection([
('code', 'Execute Python Code'),
('object_create', 'Create a new Record'),
('object_write', 'Update the Record'),
('multi', 'Execute several actions')], string='Action To Do',
default='object_write', required=True,
help="Type of server action. The following values are available:\n"
"- 'Execute Python Code': a block of python code that will be executed\n"
"- 'Create': create a new record with new values\n"
"- 'Update a Record': update the values of a record\n"
"- 'Execute several actions': define an action that triggers several other server actions\n"
"- 'Send Email': automatically send an email (Discuss)\n"
"- 'Add Followers': add followers to a record (Discuss)\n"
"- 'Create Next Activity': create an activity (Discuss)")
# Generic
sequence = fields.Integer(default=5,
help="When dealing with multiple actions, the execution order is "
"based on the sequence. Low number means high priority.")
model_id = fields.Many2one('ir.model', string='Model', required=True, ondelete='cascade',
help="Model on which the server action runs.")
model_name = fields.Char(related='model_id.model', string='Model Name', readonly=True, store=True)
# Python code
code = fields.Text(string='Python Code', groups='base.group_system',
default=DEFAULT_PYTHON_CODE,
help="Write Python code that the action will execute. Some variables are "
"available for use; help about python expression is given in the help tab.")
# Multi
child_ids = fields.Many2many('ir.actions.server', 'rel_server_actions', 'server_id', 'action_id',
string='Child Actions', help='Child server actions that will be executed. Note that the last return returned action value will be used as global return value.')
# Create
crud_model_id = fields.Many2one('ir.model', string='Create/Write Target Model',
oldname='srcmodel_id', help="Model for record creation / update. Set this field only to specify a different model than the base model.")
crud_model_name = fields.Char(related='crud_model_id.model', string='Target Model', readonly=True)
link_field_id = fields.Many2one('ir.model.fields', string='Link using field',
help="Provide the field used to link the newly created record "
"on the record on used by the server action.")
fields_lines = fields.One2many('ir.server.object.lines', 'server_id', string='Value Mapping', copy=True)
@api.constrains('code')
def _check_python_code(self):
for action in self.sudo().filtered('code'):
msg = test_python_expr(expr=action.code.strip(), mode="exec")
if msg:
raise ValidationError(msg)
@api.constrains('child_ids')
def _check_recursion(self):
if not self._check_m2m_recursion('child_ids'):
raise ValidationError(_('Recursion found in child server actions'))
@api.onchange('crud_model_id')
def _onchange_crud_model_id(self):
self.link_field_id = False
self.crud_model_name = self.crud_model_id.model
@api.onchange('model_id')
def _onchange_model_id(self):
self.model_name = self.model_id.model
@api.multi
def create_action(self):
""" Create a contextual action for each server action. """
for action in self:
action.write({'binding_model_id': action.model_id.id,
'binding_type': 'action'})
return True
@api.multi
def unlink_action(self):
""" Remove the contextual actions created for the server actions. """
self.check_access_rights('write', raise_exception=True)
self.filtered('binding_model_id').write({'binding_model_id': False})
return True
@api.model
def run_action_code_multi(self, action, eval_context=None):
safe_eval(action.sudo().code.strip(), eval_context, mode="exec", nocopy=True) # nocopy allows to return 'action'
if 'action' in eval_context:
return eval_context['action']
@api.model
def run_action_multi(self, action, eval_context=None):
res = False
for act in action.child_ids:
result = act.run()
if result:
res = result
return res
@api.model
def run_action_object_write(self, action, eval_context=None):
"""Apply specified write changes to active_id."""
res = {}
for exp in action.fields_lines:
res[exp.col1.name] = exp.eval_value(eval_context=eval_context)[exp.id]
if self._context.get('onchange_self'):
record_cached = self._context['onchange_self']
for field, new_value in res.items():
record_cached[field] = new_value
else:
self.env[action.model_id.model].browse(self._context.get('active_id')).write(res)
@api.model
def run_action_object_create(self, action, eval_context=None):
"""Create specified model object with specified values.
If applicable, link active_id.<self.link_field_id> to the new record.
"""
res = {}
for exp in action.fields_lines:
res[exp.col1.name] = exp.eval_value(eval_context=eval_context)[exp.id]
res = self.env[action.crud_model_id.model].create(res)
if action.link_field_id:
record = self.env[action.model_id.model].browse(self._context.get('active_id'))
record.write({action.link_field_id.name: res.id})
@api.model
def _get_eval_context(self, action=None):
""" Prepare the context used when evaluating python code, like the
python formulas or code server actions.
:param action: the current server action
:type action: browse record
:returns: dict -- evaluation context given to (safe_)safe_eval """
def log(message, level="info"):
with self.pool.cursor() as cr:
cr.execute("""
INSERT INTO ir_logging(create_date, create_uid, type, dbname, name, level, message, path, line, func)
VALUES (NOW() at time zone 'UTC', %s, %s, %s, %s, %s, %s, %s, %s, %s)
""", (self.env.uid, 'server', self._cr.dbname, __name__, level, message, "action", action.id, action.name))
eval_context = super(IrActionsServer, self)._get_eval_context(action=action)
model_name = action.model_id.sudo().model
model = self.env[model_name]
record = None
records = None
if self._context.get('active_model') == model_name and self._context.get('active_id'):
record = model.browse(self._context['active_id'])
if self._context.get('active_model') == model_name and self._context.get('active_ids'):
records = model.browse(self._context['active_ids'])
if self._context.get('onchange_self'):
record = self._context['onchange_self']
eval_context.update({
# orm
'env': self.env,
'model': model,
# Exceptions
'Warning': odoo.exceptions.Warning,
# record
'record': record,
'records': records,
# helpers
'log': log,
})
return eval_context
@api.multi
def run(self):
""" Runs the server action. For each server action, the
run_action_<STATE> method is called. This allows easy overriding
of the server actions.
:param dict context: context should contain following keys
- active_id: id of the current object (single mode)
- active_model: current model that should equal the action's model
The following keys are optional:
- active_ids: ids of the current records (mass mode). If active_ids
and active_id are present, active_ids is given precedence.
:return: an action_id to be executed, or False is finished correctly without
return action
"""
res = False
for action in self:
eval_context = self._get_eval_context(action)
if hasattr(self, 'run_action_%s_multi' % action.state):
# call the multi method
run_self = self.with_context(eval_context['env'].context)
func = getattr(run_self, 'run_action_%s_multi' % action.state)
res = func(action, eval_context=eval_context)
elif hasattr(self, 'run_action_%s' % action.state):
active_id = self._context.get('active_id')
if not active_id and self._context.get('onchange_self'):
active_id = self._context['onchange_self']._origin.id
if not active_id: # onchange on new record
func = getattr(self, 'run_action_%s' % action.state)
res = func(action, eval_context=eval_context)
active_ids = self._context.get('active_ids', [active_id] if active_id else [])
for active_id in active_ids:
# run context dedicated to a particular active_id
run_self = self.with_context(active_ids=[active_id], active_id=active_id)
eval_context["env"].context = run_self._context
# call the single method related to the action: run_action_<STATE>
func = getattr(run_self, 'run_action_%s' % action.state)
res = func(action, eval_context=eval_context)
return res
@api.model
def _run_actions(self, ids):
"""
Run server actions with given ids.
Allow crons to run specific server actions
"""
return self.browse(ids).run()
class IrServerObjectLines(models.Model):
_name = 'ir.server.object.lines'
_description = 'Server Action value mapping'
_sequence = 'ir_actions_id_seq'
server_id = fields.Many2one('ir.actions.server', string='Related Server Action', ondelete='cascade')
col1 = fields.Many2one('ir.model.fields', string='Field', required=True)
value = fields.Text(required=True, help="Expression containing a value specification. \n"
"When Formula type is selected, this field may be a Python expression "
" that can use the same values as for the code field on the server action.\n"
"If Value type is selected, the value will be used directly without evaluation.")
type = fields.Selection([
('value', 'Value'),
('reference', 'Reference'),
('equation', 'Python expression')
], 'Evaluation Type', default='value', required=True, change_default=True)
resource_ref = fields.Reference(
string='Record', selection='_selection_target_model',
compute='_compute_resource_ref', inverse='_set_resource_ref')
@api.model
def _selection_target_model(self):
models = self.env['ir.model'].search([])
return [(model.model, model.name) for model in models]
@api.depends('col1.relation', 'value', 'type')
def _compute_resource_ref(self):
for line in self:
if line.type | |
'''
Functions that help visualize results
'''
import os
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
from . import config
__all__ = ['plotter',
'segment_plotter',
'plot_poincare',
'plot_breathing']
def plotter(working_data, measures, show=True, figsize=None,
title='Heart Rate Signal Peak Detection', moving_average=False): # pragma: no cover
'''plots the analysis results.
Function that uses calculated measures and data stored in the working_data{} and measures{}
dict objects to visualise the fitted peak detection solution.
Parameters
----------
working_data : dict
dictionary object that contains all heartpy's working data (temp) objects.
will be created if not passed to function
measures : dict
dictionary object used by heartpy to store computed measures. Will be created
if not passed to function
show : bool
when False, function will return a plot object rather than display the results.
default : True
figsize: tuple
Set dimensions of image in inches like in matplotlib. figsize=(x, y)
default: None => (6.4, 4.8)
title : string
title for the plot.
default : "Heart Rate Signal Peak Detection"
moving_average : bool
whether to display the moving average on the plot.
The moving average is used for peak fitting.
default: False
Returns
-------
out : matplotlib plot object
only returned if show == False.
Examples
--------
First let's load and analyse some data to visualise
>>> import heartpy as hp
>>> data, _ = hp.load_exampledata(0)
>>> wd, m = hp.process(data, 100.0)
Then we can visualise
>>> plot_object = plotter(wd, m, show=False, title='some awesome title')
This returns a plot object which can be visualized or saved or appended.
See matplotlib API for more information on how to do this.
A matplotlib plotting object is returned. This can be further processed and saved
to a file.
'''
#get color palette
colorpalette = config.get_colorpalette_plotter()
# create plot x-var
fs = working_data['sample_rate']
plotx = np.arange(0, len(working_data['hr'])/fs, 1/fs)
#check if there's a rounding error causing differing lengths of plotx and signal
diff = len(plotx) - len(working_data['hr'])
if diff < 0:
#add to linspace
plotx = np.append(plotx, plotx[-1] + (plotx[-2] - plotx[-1]))
elif diff > 0:
#trim linspace
plotx = plotx[0:-diff]
peaklist = working_data['peaklist']
ybeat = working_data['ybeat']
rejectedpeaks = working_data['removed_beats']
rejectedpeaks_y = working_data['removed_beats_y']
fig, ax = plt.subplots(figsize=figsize)
ax.set_title(title)
ax.plot(plotx, working_data['hr'], color=colorpalette[0], label='heart rate signal', zorder=-10)
ax.set_xlabel('Time (s)')
if moving_average:
ax.plot(plotx, working_data['rolling_mean'], color='gray', alpha=0.5)
ax.scatter(np.asarray(peaklist)/fs, ybeat, color=colorpalette[1], label='BPM:%.2f' %(measures['bpm']))
ax.scatter(rejectedpeaks/fs, rejectedpeaks_y, color=colorpalette[2], label='rejected peaks')
#check if rejected segment detection is on and has rejected segments
try:
if len(working_data['rejected_segments']) >= 1:
for segment in working_data['rejected_segments']:
ax.axvspan(segment[0], segment[1], facecolor='red', alpha=0.5)
except:
pass
ax.legend(loc=4, framealpha=0.6)
if show:
fig.show()
else:
return fig
def segment_plotter(working_data, measures, title='Heart Rate Signal Peak Detection',
figsize=(6, 6), path='', start=0, end=None, step=1): # pragma: no cover
'''plots analysis results
Function that plots the results of segmentwise processing of heart rate signal
and writes all results to separate files at the path provided.
Parameters
----------
working_data : dict
dictionary object that contains all heartpy's working data (temp) objects.
will be created if not passed to function
measures : dict
dictionary object used by heartpy to store computed measures. Will be created
if not passed to function
title : str
the title used in the plot
figsize : tuple
figsize tuple to be passed to matplotlib
path : str
the path where the files will be stored, folder must exist.
start : int
what segment to start plotting with
default : 0
end : int
last segment to plot. Must be smaller than total number of segments
default : None, will plot until end
step : int
stepsize used when iterating over plots every step'th segment will be plotted
default : 1
Returns
-------
None
Examples
--------
This function has no examples. See documentation of heartpy for more info.
'''
#sanity check
assert 0 < step < len(working_data['hr']), 'step must be larger than zero and smaller than total number of segments'
#set endpoint if not explicitly defined
if end == None:
end = len(working_data['hr'])
else:
#make sure it is defined within boundary conditions
assert end <= len(working_data['hr']), 'defined "end" endpoint is larger than number of segments'
#add trailing path slash if user omitted it
if not (path.endswith('/') or path.endswith('\\')) and len(path) > 0:
path += '/'
#create path if it doesn't exist
if not os.path.isdir(path):
os.makedirs(path)
#make plots
filenum = 0
for i in range(start, end, step):
wd_segment = {}
m_segment = {}
#assign values to sub-object for plotting purposes
wd_segment['peaklist'] = working_data['peaklist'][i]
wd_segment['ybeat'] = working_data['ybeat'][i]
wd_segment['removed_beats'] = working_data['removed_beats'][i]
wd_segment['removed_beats_y'] = working_data['removed_beats_y'][i]
wd_segment['hr'] = working_data['hr'][i]
wd_segment['rolling_mean'] = working_data['rolling_mean'][i]
wd_segment['sample_rate'] = working_data['sample_rate'][i]
m_segment['bpm'] = measures['bpm'][i]
try:
wd_segment['rejected_segments'] = working_data['rejected_segments'][i]
except:
pass
#plot it using built-in plotter
plt.figure(figsize = figsize)
p = plotter(wd_segment, m_segment, show=False)
p.savefig('%s%i.png' %(path, filenum))
plt.close('all')
filenum += 1
def plot_poincare(working_data, measures, show = True, figsize=None,
title='Poincare plot'): # pragma: no cover
'''visualize poincare plot
function that visualises poincare plot.
Parameters
----------
working_data : dict
dictionary object that contains all heartpy's working data (temp) objects.
will be created if not passed to function
measures : dict
dictionary object used by heartpy to store computed measures. Will be created
if not passed to function
show : bool
whether to show the plot right away, or return a matplotlib object for
further manipulation
figsize: tuple
Set dimensions of image in inches like in matplotlib. figsize=(x, y)
default: None => (6.4, 4.8)
title : str
the title used in the plot
Returns
-------
out : matplotlib plot object
only returned if show == False.
Examples
--------
This function has no examples. See documentation of heartpy for more info.
'''
#get color palette
colorpalette = config.get_colorpalette_poincare()
#get values from dict
x_plus = working_data['poincare']['x_plus']
x_minus = working_data['poincare']['x_minus']
sd1 = measures['sd1']
sd2 = measures['sd2']
#define figure
fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'}, figsize=figsize)
#plot scatter
ax.scatter(x_plus, x_minus, color = colorpalette[0],
alpha = 0.75, label = 'peak-peak intervals')
#plot identity line
mins = np.min([x_plus, x_minus])
maxs = np.max([x_plus, x_minus])
identity_line = np.linspace(np.min(mins), np.max(maxs))
ax.plot(identity_line, identity_line, color='black', alpha=0.5,
label = 'identity line')
#rotate SD1, SD2 vectors 45 degrees counterclockwise
sd1_xrot, sd1_yrot = rotate_vec(0, sd1, 45)
sd2_xrot, sd2_yrot = rotate_vec(0, sd2, 45)
#plot rotated SD1, SD2 lines
ax.plot([np.mean(x_plus), np.mean(x_plus) + sd1_xrot],
[np.mean(x_minus), np.mean(x_minus) + sd1_yrot],
color = colorpalette[1], label = 'SD1')
ax.plot([np.mean(x_plus), np.mean(x_plus) - sd2_xrot],
[np.mean(x_minus), np.mean(x_minus) + sd2_yrot],
color = colorpalette[2], label = 'SD2')
#plot ellipse
xmn = np.mean(x_plus)
ymn = np.mean(x_minus)
el = Ellipse((xmn, ymn), width = sd2 * 2, height = sd1 * 2, angle = 45.0)
ax.add_artist(el)
el.set_edgecolor((0,0,0))
el.fill = False
ax.set_xlabel(r'RRi$_n$ (ms)')
ax.set_ylabel(r'RRi$_{n+1}$ (ms)')
ax.legend(loc=4, framealpha=0.6)
ax.set_title(title)
if show:
fig.show()
else:
return fig
def rotate_vec(x, y, angle):
'''rotates vector around origin point
Function that takes vector and angle, and rotates around origin point
with given amount of degrees.
Helper function for poincare plotting
Parameters
----------
x : int or float
vector x coordinate
y : int or float
vector y coordinate
angle: int or float
the angle of rotation applied to the vecftor
Returns
-------
x_rot : float
new x coordinate with rotation applied
y_rot : float
new x coordinate with rotation applied
Examples
--------
Given a vector (0,1), if we apply a rotation of 90 degrees clockwise
we expect to get (1,0). Let's test
>>> x_new, y_new = rotate_vec(0, 1, -90)
>>> print('%.3f, %.3f' %(x_new, y_new))
1.000, 0.000
'''
theta = np.radians(angle)
cs = np.cos(theta)
sn = np.sin(theta)
x_rot = (x * cs) - (y * sn)
y_rot = (x * sn) + (y * cs)
return x_rot, y_rot
def plot_breathing(working_data, measures, show=True, figsize=None): # pragma: no cover
'''plots extracted breathing signal and spectrogram
Function that plots the breathing signal extracted from RR-intervals alongside
its computed spectrogram representation.
Parameters
----------
working_data : dict
dictionary object that contains all heartpy's working data (temp) objects.
will be created if not passed to function
measures : dict
dictionary object used by heartpy to store computed measures. Will be created
if not passed to function
show : bool
whether to show | |
<reponame>Vgr255/Lexive<filename>cmds.py
import argparse
import random
import os
from typing import List, Tuple, Optional, Iterable
import discord
from discord.ext.commands.context import Context
from loader import (
casefold,
load,
mechanics,
player_cards,
player_mats,
nemesis_cards,
nemesis_mats,
waves,
treasure_values,
cards_num,
ctypes,
assets,
)
_owner_cmds = ("eval", "reload")
import config
cmds = {}
content_dicts = []
def command(name=None):
def wrapper(func):
nonlocal name
if name is None:
name = func.__name__
cmds[name] = func
return func
return wrapper
def get_card(name: str) -> Tuple[Optional[List[str]], Optional[List[str]]]:
mention = None # Optional
if "<@!" in name and ">" in name: # mentioning someone else
index = name.index("<@!")
name, mention = name[:index], name[index:]
for x in ("@", "#"): # ignore what's after
if x in name:
name = name[:name.index(x)]
ass = []
arg = casefold(name)
possible = set()
for func, mapping in content_dicts:
possible.update(mapping.keys())
matches = complete_match(arg, possible)
values = []
if len(matches) > config.max_dupes:
values.append(None)
for x in matches:
for func, d in content_dicts:
if x in d:
for n in d[x]:
if n["name"] not in values:
values.append(n["name"])
return values, ass
for x in matches:
for func, mapping in content_dicts:
if x in mapping:
values.append(func(x))
if x in assets:
ass.append(assets[x])
if not values:
return None, ass
ret = []
if mention is not None:
ret.append(mention)
for x in values:
if ret:
ret.append(r"\NEWLINE/")
ret.extend(x)
return ret, ass
def complete_match(string: str, matches: Iterable[str]) -> list:
possible_matches = set()
for possible in matches:
if string == possible:
return [string]
if possible.startswith(string) or string in possible:
possible_matches.add(possible)
return sorted(possible_matches)
# Create the randomizer and its parser
class ArgParser(argparse.ArgumentParser):
def print_usage(self, file=None) -> None:
super().print_usage(HelperFile())
def print_help(self, file=None):
super().print_help(HelperFile())
def exit(self, status=0, message=None):
raise RuntimeError(message)
class HelperFile:
def write(self, content):
raise RuntimeError(f"```\n{content}\n```")
_randomizer_args = ArgParser(prog="random", description="Generate a random market, mages and nemesis", add_help=False)
_randomizer_args.add_argument("--help", "-h", action="help", default=argparse.SUPPRESS, help="Prints this help message")
_randomizer_args.add_argument("--player-count", "-p", type=int, default=2, choices=range(1, 5), help="How many mages are going to play")
_randomizer_args.add_argument("--gem-count", "-g", type=int, default=3, choices=range(10), help="How many gems to include in the market")
_randomizer_args.add_argument("--force-cheap-gem", "-c", action="store_true", help="If set and --gem-count > 0, forces at least one gem costing at most 3")
_randomizer_args.add_argument("--relic-count", "-r", type=int, default=2, choices=range(10), help="How many relics to include in the market")
_randomizer_args.add_argument("--spell-count", "-s", type=int, default=4, choices=range(10), help="How many spells to include in the market")
_randomizer_args.add_argument("--lowest-difficulty", "-d", type=int, default=1, choices=range(11), help="The lowest nemesis difficulty to allow")
_randomizer_args.add_argument("--highest-difficulty", "-D", type=int, default=10, choices=range(11), help="The highest nemesis difficulty to allow")
_randomizer_args.add_argument("--minimum-rating", "-m", type=int, default=1, choices=range(11), help="The minimum mage complexity rating to allow")
_randomizer_args.add_argument("--maximum-rating", "-M", type=int, default=10, choices=range(11), help="The maximum complexity rating to allow")
#_randomizer_args.add_argument("--expedition", "-e", action="store_true", help="If set, will generate an expedition of length specified in --expedition-length")
#_randomizer_args.add_argument("--expedition-length", "-E", type=int, default=4, choices=range(1, 9), help="How many battles the expedition should be")
#_randomizer_args.add_argument("--boxes", "-b", action="extend", default=waves, choices=waves, help="From which boxes should the content be pulled")
_randomizer_args.add_argument("--verbose", "-v", action="count", default=0, help="Turn on verbose output (up to -vvv)")
def _isin(code: str, *items: str) -> bool:
"""Temporary hack until the parser is functional."""
for item in items:
if f"{item}=" in code:
return True
return False
@command("random")
async def random_cmd(ctx: Context, *args):
# TODO: Add expedition support
try:
namespace = _randomizer_args.parse_args(args)
except (argparse.ArgumentError, RuntimeError) as e:
await ctx.send(str(e))
return
verbose = namespace.verbose
# TODO: Allow users to add and select which boxes they have (probably a SQL db or something)
if verbose >= 1:
await ctx.send(f"Settings: {namespace}")
message = ["Random battle:", ""]
# TODO: Add box handling (and make sure that there's enough mages/markets/etc.)
boxes = list(waves)
message.append("Using ALL released content (currently not configurable, will be in the future)")
message.append("")
nemesis = None
count = 0
while nemesis is None:
count += 1
if count == 1000:
await ctx.send("Could not find a matching nemesis")
return
values = random.choice(list(nemesis_mats.values()))
value = random.choice(values)
if verbose >= 2:
await ctx.send(f"Checking {value['name']}")
if not (namespace.lowest_difficulty <= value["difficulty"] <= namespace.highest_difficulty):
if verbose >= 3:
await ctx.send("Difficulty doesn't match")
continue
if "NOEXP" in value["code"]:
continue
if value["box"] not in boxes:
if verbose >= 3:
await ctx.send("Box doesn't match")
continue
nemesis = value
message.append(f"Fighting {nemesis['name']} (difficulty {nemesis['difficulty']})")
mages = []
count = 0
while len(mages) < namespace.player_count:
count += 1
if count == 1000:
await ctx.send("Could not find enough mages")
return
values = random.choice(list(player_mats.values()))
value = random.choice(values)
if value in mages:
if verbose >= 3:
await ctx.send(f"Found {value['name']} but already in, skipping")
continue
if verbose >= 2:
await ctx.send(f"Checking {value['name']}")
if not (namespace.minimum_rating <= value["rating"] <= namespace.maximum_rating):
if verbose >= 3:
await ctx.send("Complexity rating doesn't match")
continue
if value["box"] not in boxes:
if verbose >= 3:
await ctx.send("Box doesn't match")
continue
mages.append(value)
message.append(f"Using mages {', '.join(m['name'] for m in mages)}")
# Note: this block below checks the code column in a very hacky way
# This is going to be improved when the parser is complete
gems = []
relics = []
spells = []
count = 0
while len(gems) < namespace.gem_count or len(relics) < namespace.relic_count or len(spells) < namespace.spell_count:
count += 1
if count == 5000:
await ctx.send("Could not find enough market cards")
return
for value in random.choice(list(player_cards.values())):
if value["type"] == "G":
if not gems and namespace.force_cheap_gem and value["cost"] > 3:
continue
if len(gems) >= namespace.gem_count:
continue
if value["starter"]:
continue
if _isin(value["code"], "T", "U", "N"):
continue
if value not in gems:
gems.append(value)
if value["type"] == "R":
if len(relics) >= namespace.relic_count:
continue
if value["starter"]:
continue
if _isin(value["code"], "T", "U", "N"):
continue
if value not in relics:
relics.append(value)
if value["type"] == "S":
if len(spells) >= namespace.spell_count:
continue
if value["starter"]:
continue
if _isin(value["code"], "T", "U", "N"):
continue
if value not in spells:
spells.append(value)
gems.sort(key=lambda x: x["cost"])
relics.sort(key=lambda x: x["cost"])
spells.sort(key=lambda x: x["cost"])
for name, container in (("gems", gems), ("relics", relics), ("spells", spells)):
message.append("")
message.append(f"Market {name}:")
message.extend([f"{value['name']} (from {value['box']}, {value['cost']}-cost)" for value in container])
await ctx.send("\n".join(message))
@command()
async def info(ctx: Context, *args):
arg = "".join(args)
if not arg:
await ctx.send("No argument provided.")
return
if not arg.isalpha() and arg.isalnum(): # has numbers and no special characters
await ctx.send(f"Number detected. Did you want `{config.prefix}card` instead?")
return
values, asset = get_card(arg)
if values and values[0] is None: # too many values
to_send = f"Ambiguous value. Possible matches: {', '.join(values[1:])}"
elif not values:
to_send = f"No content found matching {' '.join(args)}"
else:
to_send = "\n".join(values)
for msg in to_send.split(r"\NEWLINE/"):
await ctx.send(msg)
for ass in asset:
with open(os.path.join("assets", ass), mode="rb") as a:
await ctx.send(file=discord.File(a))
@command()
async def card(ctx: Context, *args):
await ctx.send(card_(casefold("".join(args)).upper(), detailed=True))
def card_(arg: str, *, detailed=False) -> str:
if arg.isdigit():
return "No prefix supplied."
index = 0
for i, x in enumerate(arg):
if x.isdigit():
index = i
break
if not index:
return f"No number found. Did you want `{config.prefix}info` instead?"
prefix, num = arg[:index], arg[index:]
deck = None
if ("I" in prefix or prefix == "V") and "T" not in prefix: # Legacy and not Into the Wild
deck = prefix
prefix = None
if not num.isdigit(): # probably has a deck in it, like 1a
if num[0].isdigit() and num[1].isalpha() and num[2:].isdigit():
deck, num = num[:2], num[2:]
if prefix not in cards_num:
return f"Prefix {prefix} is unrecognized"
values = cards_num[prefix]
# this is a hack
if deck and len(deck) == 2 and deck[1] in "ABCD":
deck = deck[0] + deck[1].lower()
if deck not in values:
return f"Deck {deck} not recognized"
num = int(num)
if num not in values[deck]:
return f"Card {num} is unknown"
ctype, name = values[deck][num]
if not detailed:
return name
if ctype == "P":
ctype = "Player card"
elif ctype == "N":
ctype = "Nemesis card"
elif ctype == "T":
ctype = "Treasure card"
elif ctype == "O":
ctype = "Xaxos: Outcast Ability"
else:
ctype = "Unknown card type"
return f"{name} ({ctype})"
@command()
async def box(ctx: Context, *args):
arg = "".join(args)
arg = casefold(arg)
mapping = {casefold(x): x for x in waves}
values = complete_match(arg, mapping)
if len(values) > 1:
await ctx.send(f"Ambiguous value. Possible matches: {', '.join(values)}")
return
if not values:
await ctx.send("No match found")
return
box = mapping[values[0]]
prefix = waves[box][0]
result = ["```", f"Cards from {box}:", ""]
count = len(" ".join(result))
c = {"P": player_cards, "N": nemesis_cards, "T": treasure_values}
for deck in cards_num[prefix]:
if count >= 1800:
result.append("```\\NEWLINE/```")
count = 3
if deck:
result.extend([f"```\\NEWLINE/```", f"Deck: {deck}", ""])
count = len(deck) + | |
#!/usr/bin/python
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the University nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import os
import sys
import unittest
try:
from sldr.ldml_exemplars import UCD, Exemplars
except ImportError:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'lib')))
from sldr.ldml_exemplars import UCD, Exemplars
class UCDTests(unittest.TestCase):
def setUp(self):
self.ucd = UCD()
def tearDown(self):
pass
def ignore_findit(self):
from icu import Char, UProperty
maxchar = 0x10ffff
maxchar = 0xffff
for usv in xrange(maxchar):
char = unichr(usv)
# if ((not self.ucd.is_specific_script(char)) and
# (not self.ucd.is_exemplar_wordbreak(char)) and
# (not Char.isUAlphabetic(char))):
if self.ucd.isformat(char) and not Char.hasBinaryProperty(char, UProperty.DEFAULT_IGNORABLE_CODE_POINT):
print '%04X' % usv
self.assertTrue(False)
# marks
def test_mark_true(self):
self.assertTrue(self.ucd.ismark(u'\u0301'))
def test_mark_false(self):
self.assertFalse(self.ucd.ismark(u'e'))
def test_nukta_true(self):
self.assertTrue(self.ucd.isnukta(u'\u093c'))
def test_nukta_false(self):
self.assertFalse(self.ucd.isnukta(u'\u0915'))
# always_combine
def test_nukta_always_combine(self):
self.assertTrue(self.ucd.is_always_combine(u'\u093c'))
def test_diacritic_always_combine(self):
self.assertFalse(self.ucd.is_always_combine(u'\u0300'))
def test_virama_always_combine(self):
self.assertFalse(self.ucd.is_always_combine(u'\u0ccd'))
def test_matra_always_combine(self):
self.assertFalse(self.ucd.is_always_combine(u'\u093e'))
# sometimes_combine
def test_nukta_sometimes_combine(self):
self.assertFalse(self.ucd.is_sometimes_combine(u'\u093c'))
def test_diacritic_sometimes_combine(self):
self.assertTrue(self.ucd.is_sometimes_combine(u'\u0300'))
def test_virama_sometimes_combine(self):
self.assertFalse(self.ucd.is_sometimes_combine(u'\u0ccd'))
def test_matra_sometimes_combine(self):
self.assertFalse(self.ucd.is_sometimes_combine(u'\u093e'))
# never_combine
def test_nukta_never_combine(self):
self.assertFalse(self.ucd.is_never_combine(u'\u093c'))
def test_diacritic_never_combine(self):
self.assertFalse(self.ucd.is_never_combine(u'\u0300'))
def test_virama_never_combine(self):
self.assertTrue(self.ucd.is_never_combine(u'\u0ccd'))
def test_matra_never_combine(self):
self.assertTrue(self.ucd.is_never_combine(u'\u093e'))
# other tests
def test_number_true(self):
self.assertTrue(self.ucd.isnumber(u'1'))
def test_number_false(self):
self.assertFalse(self.ucd.isnumber(u'a'))
def test_format_true(self):
self.assertTrue(self.ucd.isformat(u'\u2060'))
def test_format_false(self):
self.assertFalse(self.ucd.isformat(u'a'))
def test_space_separator_true(self):
self.assertTrue(self.ucd.is_space_separator(u'\u200a'))
def test_space_separator_false(self):
self.assertFalse(self.ucd.is_space_separator(u'a'))
def test_pua_false_bmp(self):
self.assertFalse(self.ucd.is_pua(u'a'))
def test_pua_true_bmp(self):
self.assertTrue(self.ucd.is_pua(u'\ue000'))
def test_pua_false_nonbmp(self):
self.assertFalse(self.ucd.is_pua(u'\U0001D510'))
def test_pua_true_nonbmp_a(self):
self.assertTrue(self.ucd.is_pua(u'\U000fff80'))
def test_pua_true_nonbmp_b(self):
self.assertTrue(self.ucd.is_pua(u'\U000fff80'))
def test_script_specific_true_latin(self):
self.assertTrue(self.ucd.is_specific_script(u'\ua78c'))
def test_script_specific_false_latin(self):
self.assertFalse(self.ucd.is_specific_script(u'\u02bc'))
def test_script_specific_false_chinese(self):
self.assertFalse(self.ucd.is_specific_script(u'\ua700'))
def test_script_specific_false_vedic(self):
self.assertFalse(self.ucd.is_specific_script(u'\u1CD1'))
def test_wordbreak_katakana(self):
self.assertTrue(self.ucd.is_exemplar_wordbreak(u'\u309b'))
def test_wordbreak_aletter(self):
self.assertTrue(self.ucd.is_exemplar_wordbreak(u'\u05f3'))
def test_wordbreak_midletter(self):
self.assertFalse(self.ucd.is_exemplar_wordbreak(u'\u05f4'))
def test_wordbreak_chinese(self):
self.assertFalse(self.ucd.is_exemplar_wordbreak(u'\ua700'))
def test_nfc(self):
text = u'e\u0301'
self.assertEqual(u'\u00e9', self.ucd.normalize('NFC', text))
def test_nfd(self):
text = u'\u00e9'
self.assertEqual(u'e\u0301', self.ucd.normalize('NFD', text))
def test_nfc_tus10(self):
text = u'\u0061\u035C\u0315\u0300\u1DF6\u0062'
self.assertEqual(u'\u00E0\u0315\u1DF6\u035C\u0062', self.ucd.normalize('NFC', text))
def test_nfd_tus10(self):
text = u'\u0061\u035C\u0315\u0300\u1DF6\u0062'
self.assertEqual(u'\u0061\u0300\u0315\u1DF6\u035C\u0062', self.ucd.normalize('NFD', text))
def ignore_nfc_tus11(self):
text = u'\u0061\u0315\u0300\u05AE\u09FE\u0062'
self.assertEqual(u'\u00E0\u05AE\u09FE\u0315\u0062', self.ucd.normalize('NFC', text))
def ignore_nfd_tus11(self):
text = u'\u0061\u0315\u0300\u05AE\u09FE\u0062'
self.assertEqual(u'\u0061\u05AE\u0300\u09FE\u0315\u0062', self.ucd.normalize('NFD', text))
class ExemplarsTests(unittest.TestCase):
def setUp(self):
self.exemplars = Exemplars()
self.exemplars.unittest = True
self.exemplars.frequent = 10
def tearDown(self):
pass
def test_simple_main(self):
"""Simple test for main and digit exemplars.
Also, ignore specified exemplars if they do not occur in the data.
"""
self.exemplars.main = u'z'
self.exemplars.digits = u'0'
self.exemplars.process(u'[{cab.1}]')
self.exemplars.analyze()
self.assertEqual(u'a b c', self.exemplars.main)
self.assertEqual(u'1', self.exemplars.digits)
def test_simple_punctuation(self):
"""Simple test for punctuation and digit exemplars.
Also, ignore specified exemplars if they do not occur in the data.
"""
self.exemplars.punctuation = u','
self.exemplars.digits = u'0'
self.exemplars.process(u'[{cab.1}]')
self.exemplars.analyze()
self.assertEqual(u'. [ ] { }', self.exemplars.punctuation)
self.assertEqual(u'1', self.exemplars.digits)
def test_japanese_katakana(self):
"""Characters with Word_Break property Katakana are letters."""
self.exemplars.process(u'\u307b\u309b')
self.exemplars.analyze()
self.assertEqual(u'\u307b \u309b', self.exemplars.main)
def test_hebrew_aletter(self):
"""Characters with Word_Break property ALetter are not punctuation."""
self.exemplars.process(u'\u05d1\u05f3\u05d2')
self.exemplars.analyze()
self.assertEqual(u'\u05d1 \u05d2 \u05f3', self.exemplars.main)
self.assertEqual(u'', self.exemplars.punctuation)
def test_hebrew_midletter(self):
"""Characters with Word_Break property MidLetter could be classified as non punctuation.
If so, they could be in the main or auxiliary exemplars.
This is allowed by http://unicode.org/reports/tr35/tr35-general.html#Restrictions
However, in most cases, these characters are used as punctuation.
"""
self.exemplars.process(u'\u05f4\u05d0\u05f4')
self.exemplars.analyze()
self.assertEqual(u'\u05d0', self.exemplars.main)
self.assertEqual(u'\u05f4', self.exemplars.punctuation)
def test_chinese(self):
self.exemplars.process(u'\u6606\u660e\ua700')
self.exemplars.analyze()
self.assertEqual(u'\u6606 \u660e', self.exemplars.main)
def test_png(self):
"""Digits are ignored, unless they have diacritics."""
self.exemplars.process(u'1\u0301 2\u0301 3\u0301 4\u0301 5\u0301 6\u0301')
self.exemplars.analyze()
self.assertEqual(u'1 2 3 4 5 6 \u0301', self.exemplars.main)
self.assertEqual(u'', self.exemplars.digits)
def test_not_included(self):
self.exemplars.process(u'\u034f\u00ad\u06dd')
self.exemplars.analyze()
self.assertEqual(u'', self.exemplars.main)
def test_lithuanian_main(self):
self.exemplars.process(u'\u00c1\u0328 \u00e1\u0328 I\u0307\u0301 i\u0307\u0301')
self.exemplars.analyze()
self.assertEqual(u'\u0105 i\u0307 \u0301', self.exemplars.main)
def test_lithuanian_index(self):
self.exemplars.process(u'a \u0105 b c A \u0104 B C Z')
self.exemplars.analyze()
self.assertEqual(u'A \u0104 B C', self.exemplars.index)
def test_english_main(self):
self.exemplars.frequent = 80
self.exemplars.auxiliary = u'\u00e9'
self.exemplars.process(u'r\u00e9sum\u00e9 resume resume')
self.exemplars.analyze()
self.assertEqual(u'e m r s u', self.exemplars.main)
def test_english_auxiliary_nfc(self):
"""Handle exemplars being in NFC.
Also, ignore specified exemplars if they do not occur in the data.
"""
self.exemplars.frequent = 80
self.exemplars.auxiliary = u'\u00e9 \u00fc'
# self.exemplars.auxiliary = u'\u00e9'
self.exemplars.process(u'r\u00e9sum\u00e9 resume resume')
self.exemplars.analyze()
self.assertEqual(u'\u00e9', self.exemplars.auxiliary)
def test_english_auxiliary_nfd(self):
"""Handle exemplars being in NFD."""
self.exemplars.frequent = 80
self.exemplars.auxiliary = u'e\u0301'
self.exemplars.process(u're\u0301sume\u0301 resume resume')
self.exemplars.analyze()
self.assertEqual(u'\u00e9', self.exemplars.auxiliary)
def test_english_index(self):
"""Index set should start with main set, not main set plus auxiliary set."""
self.exemplars.frequent = 80
self.exemplars.auxiliary = u'\u00e9'
self.exemplars.process(u'r\u00e9sum\u00e9 resume resume')
self.exemplars.analyze()
self.assertEqual(u'E M R S U', self.exemplars.index)
def test_spanish(self):
"""Marks occurring on a few bases are not separate."""
self.exemplars.process(u'biling\u00fce')
self.exemplars.analyze()
self.assertEqual(u'b e g i l n \u00fc', self.exemplars.main)
def test_french_main_nfc(self):
"""Marks occurring on many bases are separate.
Even if the characters are combined (NFC).
"""
self.exemplars.many_bases = 4
self.exemplars.process(u'r\u00e9sum\u00e9 \u00e2 \u00ea \u00ee \u00f4 \u00fb')
self.exemplars.analyze()
self.assertEqual(u'a e \u00e9 i m o r s u \u0302', self.exemplars.main)
def test_french_main_nfd(self):
"""Marks occurring on many bases are separate."""
self.exemplars.many_bases = 4
self.exemplars.process(u're\u0301sume\u0301 a\u0302 e\u0302 i\u0302 o\u0302 u\u0302')
self.exemplars.analyze()
self.assertEqual(u'a e \u00e9 i m o r s u \u0302', self.exemplars.main)
def test_french_auxiliary(self):
self.exemplars.process(u'r\u00e9sum\u00e9')
self.exemplars.analyze()
self.assertEqual(u'', self.exemplars.auxiliary)
def test_french_count(self):
"""Infrequently occurring exemplars should go in the auxiliary list, not the main list."""
self.exemplars.many_bases = 4
self.exemplars.frequent = 80
base = u'a e i o u'
grave = u'\u00e0 \u00e8 \u00f9'
circumflex = u'\u00e2 \u00ea \u00ee \u00f4 \u00fb'
self.exemplars.process(base + grave + circumflex)
self.exemplars.analyze()
self.assertEqual(u'a e i o u \u0302', self.exemplars.main)
self.assertEqual(u'\u00e0 \u00e8 \u00f9', self.exemplars.auxiliary)
def test_french_index(self):
self.exemplars.process(u'r\u00e9sum\u00e9')
self.exemplars.analyze()
self.assertEqual(u'\u00c9 M R S U', self.exemplars.index)
def test_swahil_main(self):
self.exemplars.main = u'ng ng\ua78c'
self.exemplars.process(u'ran rang rang\ua78c')
self.exemplars.analyze()
self.assertEqual(u'a n ng ng\ua78c r', self.exemplars.main)
def test_swahili_index(self):
self.exemplars.main = u'ng ng\ua78c'
self.exemplars.process(u'ran rang rang\ua78c')
self.exemplars.analyze()
self.assertEqual(u'A N NG NG\ua78b R', self.exemplars.index)
def test_swahili_glottal(self):
"""Exemplars have a specific script, unless they have specific Word_Break properties.
The script values of Common or Inherited are not considered to be a specific script.
So U+A78C should be included as it has a specific script,
and U+02BC or U+02C0 should be included as they have the needed Word_Break property.
"""
self.exemplars.process(u'ng\ua78c ng\u02bc ng\u02c0')
self.exemplars.analyze()
self.assertEqual(u'g n \u02bc \u02c0 \ua78c', self.exemplars.main)
def test_devanagari_many(self):
"""Indic matras are always separate."""
self.exemplars.process(u'\u0958\u093e \u0959\u093e \u095a\u093e \u095b\u093e '
u'\u095c\u093e \u095d\u093e \u095e\u093e \u095f\u093e')
self.exemplars.analyze()
self.assertEqual(u'\u0915\u093c \u0916\u093c \u0917\u093c \u091c\u093c '
u'\u0921\u093c \u0922\u093c \u092b\u093c \u092f\u093c '
u'\u093e',
self.exemplars.main)
def test_devanagari_few(self):
"""Indic matras are always separate (even on a few bases).
Even though the matras (Marks) occur on few bases
(which would otherwise classify them as not separate),
they are considered separate.
"""
self.exemplars.process(u'\u0958\u093e \u0959\u093e \u095a\u093e')
self.exemplars.analyze()
self.assertEqual(u'\u0915\u093c \u0916\u093c \u0917\u093c \u093e',
self.exemplars.main)
def test_devanagari_graphemes(self):
"""Graphemes are the found clusters before doing analysis."""
self.exemplars.process(u'\u0958\u093e \u0959\u093e \u0959\u093e '
u'\u095a\u093e \u095a\u093e \u095a\u093e')
self.exemplars.analyze()
self.assertEqual(u'\u0917\u093c\u093e \u0916\u093c\u093e \u0915\u093c\u093e',
self.exemplars.graphemes)
def test_devanagari_frequency(self):
"""For debugging, show the counts of each grapheme."""
self.exemplars.process(u'\u0958\u093e \u0959\u093e \u0959\u093e '
u'\u095a\u093e \u095a\u093e \u095a\u093e')
self.exemplars.analyze()
self.assertEqual(u'\u0917\u093c\u093e:3 \u0916\u093c\u093e:2 \u0915\u093c\u093e:1',
self.exemplars.frequency)
def test_devanagari_index(self):
self.exemplars.many_bases = 1
self.exemplars.process(u'\u0905 \u0906 \u0915 \u0916 '
u'\u0915\u093e \u0916\u093e '
u'\u0958\u093e \u0959\u093e')
self.exemplars.analyze()
self.assertEqual(u'\u0905 \u0906 \u0915 \u0915\u093c \u0916 \u0916\u093c',
self.exemplars.index)
def test_devanagari_vedic(self):
"""Exemplar bases should have a specific script, not the values Common or Inherited.
The character U+1CD1 has a script value of Inherited, but it is a mark, so allow it.
"""
self.exemplars.process(u'\u0915\u1cd1')
self.exemplars.analyze()
self.assertEqual(u'\u0915 \u1cd1', self.exemplars.main)
def test_kannada_main_old(self):
"""Clusters with virama, ZWJ."""
self.exemplars.process(u'\u0cb0\u0ccd\u200d\u0c95 \u0c95\u0ccd\u200d\u0c95')
self.exemplars.analyze()
self.assertEqual(u'\u0c95 \u0cb0 \u0ccd', self.exemplars.main)
def test_kannada_main_new(self):
"""Clusters with ZWJ, virama."""
self.exemplars.process(u'\u0cb0\u200d\u0ccd\u0c95 \u0c95\u200d\u0ccd\u0c95')
self.exemplars.analyze()
self.assertEqual(u'\u0c95 \u0cb0 \u0ccd', self.exemplars.main)
def test_kannada_auxiliary(self):
"""A Default_Ignorable_Code_Point such as ZWJ goes into the auxiliary exemplar."""
self.exemplars.process(u'\u0cb0\u200d\u0ccd\u0c95')
self.exemplars.analyze()
self.assertEqual(u'\u200d', self.exemplars.auxiliary)
def test_kannada_graphemes(self):
"""Clusters are useful for testing rendering."""
self.exemplars.process(u'\u0cb0\u200d\u0ccd\u0c95 \u0cb0\u200d\u0ccd\u0c95 '
u'\u0cb0\u0ccd\u200d\u0c95')
self.exemplars.analyze()
self.assertEqual(u'\u0c95 \u0cb0\u200d\u0ccd \u0cb0\u0ccd\u200d', self.exemplars.graphemes)
| |
<reponame>WolfLink/qsearch<filename>qsearch/gatesets.py
"""
This module defines the Gateset class, which represents the allowed gates and topology for a specific quantum computer.
Several Implementations of Gateset are also defined here.
Several aliases are also defined, for the most common use cases.
Attributes:
ZXZXZCNOTLinear : A Gateset that uses CNOT and the ZXZXZ single qubit parameterization with the linear topology.
U3CNOTLinear : A Gateset that uses CNOT and the U3 single qubit parameterization with the linear topology.
QubitCNOTLinear : A Gateset that uses CNOT and the U3 single qubit parameterization with the linear topology, except it uses an XZXZ instead of a U3 after the control qubit of each CNOT. This results in a gateset that covers the same search space as U3CNOTLinear, but with fewer redundant parameters, and therefore faster runtime.
QubitCNOTRing : Uses U3 and XZXZ like QubitCNOTLinear, but includes a NonadjacentCNOTGate to add a link from the last qubit to the 0th.
QubitCNOTAdjacencyList : Similar to QubitCNOTLinear and QubitCNOTRing, but takes in an adjacency list which uses NonadjacentCNOTGate to define work with a custom topology.
QutritCPIPhaseLinear : A qutrit gateset that uses the CPIPhase gate as its two-qutrit gate, with a linear topology.
QutritCNOTLinear : A qutrit gateset that uses an upgraded version of the CNOT gate as its two-qutrit gate, with a linear topology.
DefaultQubit : The default Gateset for working with qubits. Currently is equivalent to QubitCNOTLinear.
DefaultQutrit : The default Gateset for working with qutrits. Currently is equivalent to QutritCPIPhaseLinear.
Default : The overall default Gateset, which is equivalent to DefaultQubit.
"""
from .gates import *
from .assemblers import flatten_intermediate
import numpy as np
class Gateset():
"""This class defines the supported gates and topology for a specific quantum hardware."""
def __init__(self):
"""Gatesets must set the value of d in their initializer, which represents the size of qudits that are supported (e.g. 2 for qubits or 3 for qutrits)."""
self.d = 0
raise NotImplementedError("Gatesets must implemented their own initializers and must set self.d to reflect the size of the qudits implemented in the gateset")
def initial_layer(self, qudits):
"""
The initial layer in the compilation. Usually a layer of parameterized single-qudit gates.
Args:
qudits : The number of qudits in this circuit.
Returns:
qsearch.gates.Gate : A single Gate representing an initial layer for the circuit
"""
return None # NOTE: Returns A SINGLE gate
# the set of possible multi-qubit gates for searching. Generally a two-qubit gate with single qubit gates after it.
def search_layers(self, qudits):
"""
A set of possible multi-qubit gates for searching. Usually this is a two-qudit gate followed by two single-qudit gates, for every allowed placement of the two-qudit gate. This defines the branching factor of the search tree.
Args:
qudits : The number of qudits in this circuit
Returns:
list : A list of tuples of (gate,weight) where Gate is the Gate representing that possible placement of the two-qudit gate, and weight is the weight or cost of adding that gate in that placement to the final circuit.
"""
return [] # NOTES: Returns a LIST of tuples of (gate, weight)
def branching_factor(self, qudits):
"""
Returns an integer indicating the expected branching factor. Usually this is automatically determined from search_layers, but it may need to be overridden if successors is overridden.
Args:
qudits : The number of qudits in this circuit
Returns:
int : An integer indicating the expecte branching factor
"""
# This is the default implementation, for Gatesets that rely on search_layers
return len(self.search_layers(qudits))
def successors(self, circ, qudits=None):
"""
Returns a list of Gates that are successors in the search tree to the input Gate, circ, representing a current ansatz circuit.
Args:
circ : The curret ansatz Gate.
qudits : The number of qudits in this circuit.
Returns:
list : A list of tuples of (gate, weight) where gate is a Gate that is a successor to circ, and weight is the cost or weight of moving to gate from circ.
"""
# NOTE: Returns a LIST of tuples of (gate, weight)
# NOTE: it is safe to assume that the circuit passed in here was produced by the functions of this class
# This is the default implementation, for Gatesets that rely on search_layers
qudits = int(np.log(circ.matrix([0]*circ.num_inputs).shape[0])/np.log(self.d))
return [(circ.appending(t[0]), t[1]) for t in self.search_layers(qudits)]
def __eq__(self, other):
if self is other:
return True
if self.__module__ == Gateset.__module__:
if type(self) == type(other):
if self.__dict__ == other.__dict__:
return True
return False
class ZXZXZCNOTLinear(Gateset):
"""A Gateset for working with CNOT and single-qubit gates parameterized with ZXZXZGate on the linear topology."""
def __init__(self):
self.single_gate = ZXZXZGate()
self.cnot = CNOTGate()
self.d = 2
def initial_layer(self, n):
return fill_row(self.single_gate, n)
def search_layers(self, n):
return linear_topology(self.cnot, self.single_gate, n, self.d)
class U3CNOTLinear(Gateset):
"""A Gateset for working with CNOT and single-qubit gates parameterized with U3Gate on the linear topology."""
def __init__(self):
self.single_gate = U3Gate()
self.cnot = CNOTGate()
self.d = 2
def initial_layer(self, n):
return fill_row(self.single_gate, n)
def search_layers(self, n):
return linear_topology(self.cnot, self.single_gate, n, self.d)
class QubitCNOTLinear(Gateset):
"""A Gateset for working with CNOT and single-qubit gates parameterized with U3Gate and XZXZGate on the linear topology. This Gateset covers the same search space but uses fewer parameters than ZXZXZCNOTLinear and U3CNOTLinear.
Args:
single_gate: A qsearch.gates.Gate object used as the single-qubit gate placed after the target side of a CNOT.
single_alt: A qsearch.gates.Gate object used as the single-qubit gate placed after the control side of a CNOT.
"""
def __init__(self, single_gate=U3Gate(), single_alt=XZXZGate()):
self.single_gate = single_gate
self.single_alt = single_alt
self.cnot = CNOTGate()
self.d = 2
def initial_layer(self, n):
return fill_row(self.single_gate, n)
def search_layers(self, n):
return linear_topology(self.cnot, self.single_gate, n, self.d, single_alt=self.single_alt)
def branching_factor(self, qudits):
return qudits-1
def successors(self, circ, qudits=None):
if qudits is None:
qudits = int(np.log(circ.matrix([0]*circ.num_inputs).shape[0])/np.log(self.d))
skip_index = find_last_3_cnots_linear(circ)
return [(circ.appending(layer[0]), layer[1]) for layer in linear_topology(self.cnot, self.single_gate, qudits, self.d, single_alt=self.single_alt, skip_index=skip_index)]
class QubitCNOTRing(Gateset):
"""A Gateset for working with CNOT and single-qubit gates parameterized with U3Gate and XZXZGate on the ring topology.
Args:
single_gate: A qsearch.gates.Gate object used as the single-qubit gate placed after the target side of a CNOT.
single_alt: A qsearch.gates.Gate object used as the single-qubit gate placed after the control side of a CNOT.
"""
def __init__(self, single_gate=U3Gate(), single_alt=XZXZGate()):
self.single_gate = single_gate
self.single_alt = single_alt
self.cnot = CNOTGate()
self.d = 2
def initial_layer(self, n):
return fill_row(self.single_gate, n)
def search_layers(self, n):
I = IdentityGate()
gates = linear_topology(self.cnot, self.single_gate, n, self.d, identity_gate=I, single_alt=self.single_alt)
if n == 2:
return gates
finisher = (ProductGate(NonadjacentCNOTGate(n, n-1, 0), KroneckerGate(self.single_gate, *[I]*(n-2), self.single_alt)), 1)
return gates + [finisher]
class QubitCZLinear(Gateset):
"""A Gateset for working with CZ and single-qubit gates parameterized with U3Gate and XZXZGate on the linear topology."""
def __init__(self):
self.single_gate = U3Gate()
self.single_alt = XZXZGate()
self.two_gate = CZGate()
self.d = 2
def initial_layer(self, n):
return fill_row(self.single_gate, n)
def search_layers(self, n):
return linear_topology(self.two_gate, self.single_gate, n, self.d, single_alt=self.single_alt)
def branching_factor(self, qudits):
return qudits-1
def successors(self, circ, qudits=None):
if qudits is None:
qudits = int(np.log(circ.matrix([0]*circ.num_inputs).shape[0])/np.log(self.d))
skip_index = find_last_3_cnots_linear(circ)
return [(circ.appending(layer[0]), layer[1]) for layer in linear_topology(self.two_gate, self.single_gate, qudits, self.d, single_alt=self.single_alt, skip_index=skip_index)]
class QubitISwapLinear(Gateset):
"""A Gateset for working with ISwap and single-qubit gates parameterized with U3Gate and XZXZGate on the linear topology."""
def __init__(self):
self.single_gate = U3Gate()
self.single_alt = XZXZGate()
self.two_gate = ISwapGate()
self.d = 2
def initial_layer(self, n):
return fill_row(self.single_gate, n)
def search_layers(self, n):
return linear_topology(self.two_gate, self.single_gate, n, self.d, single_alt=self.single_alt)
def branching_factor(self, qudits):
return qudits-1
def successors(self, circ, qudits=None):
if qudits is None:
qudits = int(np.log(circ.matrix([0]*circ.num_inputs).shape[0])/np.log(self.d))
skip_index = find_last_3_cnots_linear(circ)
return [(circ.appending(layer[0]), layer[1]) for layer in linear_topology(self.two_gate, self.single_gate, qudits, self.d, single_alt=self.single_alt, skip_index=skip_index)]
class QubitXXLinear(Gateset):
"""A Gateset for working with ISwap and single-qubit gates parameterized with U3Gate and XZXZGate on the linear topology."""
def __init__(self):
self.single_gate = U3Gate()
self.single_alt = XZXZGate()
self.two_gate = XXGate()
self.d = 2
def initial_layer(self, n):
return fill_row(self.single_gate, n)
def search_layers(self, n):
return linear_topology(self.two_gate, self.single_gate, n, self.d, single_alt=self.single_alt)
def branching_factor(self, qudits):
return qudits-1
def successors(self, circ, qudits=None):
if qudits is None:
qudits = int(np.log(circ.matrix([0]*circ.num_inputs).shape[0])/np.log(self.d))
skip_index = find_last_3_cnots_linear(circ)
return [(circ.appending(layer[0]), | |
self.GetBoundaryFaces()
self.GetBoundaryEdges()
def RemoveElements(self, xyz_min_max=None, element_removal_criterion="all", keep_boundary_only=False, return_removed_mesh=False,
compute_edges=True, compute_faces=True, show_plot=False):
"""Removes elements from the mesh given some specified criteria
input:
(x_min,y_min,z_min,x_max,y_max,z_max) [tuple of floats or np.ndarray] of box selection. Deletes all the elements
apart from the ones within this box, either a tuple of 4/6 floats (2D/3D)
or 2D numpy array of shape (2,ndim) where ndim=2,3
element_removal_criterion [str]{"all","any"} the criterion for element removal with box selection.
How many nodes of the element should be within the box in order
not to be removed. Default is "all". "any" implies at least one node
keep_boundary_only [bool] delete all elements apart from the boundary ones
return_removed_mesh [bool] return the removed mesh [inverse of what is selected]
compute_edges [bool] if True also compute new edges
compute_faces [bool] if True also compute new faces (only 3D)
plot_new_mesh [bool] if True also plot the new mesh
return:
nodal_map: [1D array] numbering of nodes in old mesh
idx_kept_elements: [1D array] indices of kept element
removed_mesh: [Mesh] an instance of removed mesh, returned only if return_removed_mesh=True
1. Note that this method computes a new mesh without maintaining a copy of the original
2. Different criteria can be mixed for instance removing all elements in the mesh apart from the ones
in the boundary which are within a box
"""
self.__do_memebers_exist__()
if xyz_min_max is None and keep_boundary_only is False:
raise ValueError("Please specify lower and upper bounds of the cut mesh or supply keep_boundary_only=True or both")
ndim = self.InferSpatialDimension()
edim = self.InferElementalDimension()
if keep_boundary_only is True:
if xyz_min_max is None:
# 3D surface meshes are all boundary so just return in case only
# keep_boundary_only==True and xyz_min_max is None
if edim == 2 and ndim == 3:
if return_removed_mesh:
return np.unique(self.elements), np.arange(self.nelem), Mesh()
else:
return np.unique(self.elements), np.arange(self.nelem)
# We need to set this regardless
xyz_min_max = self.Bounds*10.
if isinstance(xyz_min_max,list):
xyz_min_max = np.array(xyz_min_max)
if isinstance(xyz_min_max,tuple):
if ndim==2:
assert len(xyz_min_max)==4
x_min = xyz_min_max[0]
y_min = xyz_min_max[1]
x_max = xyz_min_max[2]
y_max = xyz_min_max[3]
elif ndim == 3:
assert len(xyz_min_max)==6
x_min = xyz_min_max[0]
y_min = xyz_min_max[1]
z_min = xyz_min_max[2]
x_max = xyz_min_max[3]
y_max = xyz_min_max[4]
z_max = xyz_min_max[5]
elif isinstance(xyz_min_max,np.ndarray):
assert xyz_min_max.shape == (2,ndim)
if ndim==2:
x_min = xyz_min_max[0,0]
y_min = xyz_min_max[0,1]
x_max = xyz_min_max[1,0]
y_max = xyz_min_max[1,1]
elif ndim == 3:
x_min = xyz_min_max[0,0]
y_min = xyz_min_max[0,1]
z_min = xyz_min_max[0,2]
x_max = xyz_min_max[1,0]
y_max = xyz_min_max[1,1]
z_max = xyz_min_max[1,2]
if x_min >= x_max:
raise ValueError("Invalid range for mesh removal")
if y_min >= y_max:
raise ValueError("Invalid range for mesh removal")
if ndim == 3:
if z_min >= z_max:
raise ValueError("Invalid range for mesh removal")
all_nelems = self.nelem
if ndim==2:
xe = self.points[self.elements,0]
ye = self.points[self.elements,1]
if element_removal_criterion == "all":
cond = np.logical_and(np.logical_and(np.logical_and(
(xe > x_min).all(axis=1),(ye > y_min).all(axis=1)),
(xe < x_max).all(axis=1)),(ye < y_max).all(axis=1))
elif element_removal_criterion == "any":
cond = np.logical_and(np.logical_and(np.logical_and(
(xe > x_min).any(axis=1),(ye > y_min).any(axis=1)),
(xe < x_max).any(axis=1)),(ye < y_max).any(axis=1))
elif ndim==3:
xe = self.points[self.elements,0]
ye = self.points[self.elements,1]
ze = self.points[self.elements,2]
if element_removal_criterion == "all":
cond = np.logical_and(np.logical_and(np.logical_and(np.logical_and(np.logical_and(
(xe > x_min).all(axis=1),(ye > y_min).all(axis=1)),
(ze > z_min).all(axis=1)),(xe < x_max).all(axis=1)),
(ye < y_max).all(axis=1)), (ze < z_max).all(axis=1))
elif element_removal_criterion == "any":
cond = np.logical_and(np.logical_and(np.logical_and(np.logical_and(np.logical_and(
(xe > x_min).any(axis=1),(ye > y_min).any(axis=1)),
(ze > z_min).any(axis=1)),(xe < x_max).any(axis=1)),
(ye < y_max).any(axis=1)), (ze < z_max).any(axis=1))
boundary_elements = np.arange(self.nelem)
if keep_boundary_only == True:
if edim==2:
boundary_elements = self.GetElementsWithBoundaryEdges()
elif edim==3:
boundary_elements = self.GetElementsWithBoundaryFaces()
cond_boundary = np.zeros(all_nelems,dtype=bool)
cond_boundary[boundary_elements[:,0]] = True
cond = np.logical_and(cond,cond_boundary)
new_elements = self.elements[cond,:]
else:
new_elements = self.elements[cond,:]
new_elements = new_elements.astype(self.elements.dtype)
new_points = np.copy(self.points)
element_type = self.element_type
if return_removed_mesh:
omesh = deepcopy(self)
# RESET FIRST OR MESH WILL CONTAIN INCONSISTENT DATA
self.__reset__()
self.element_type = element_type
self.elements = np.copy(new_elements)
self.nelem = self.elements.shape[0]
nodal_map, inv_elements = np.unique(self.elements,return_inverse=True)
self.points = new_points[nodal_map,:]
self.nnode = self.points.shape[0]
# RE-ORDER ELEMENT CONNECTIVITY
remap_elements = np.arange(self.points.shape[0])
self.elements = remap_elements[inv_elements].reshape(self.nelem,self.elements.shape[1])
# self.edges = None
# self.faces = None
# RECOMPUTE EDGES
if compute_edges == True:
self.GetBoundaryEdges()
# RECOMPUTE FACES
if compute_faces == True:
if self.element_type == "tet" or self.element_type == "hex":
self.GetBoundaryFaces()
if self.edges is not None:
self.GetBoundaryEdges()
if return_removed_mesh:
new_elements = omesh.elements[~cond,:]
new_elements = new_elements.astype(omesh.elements.dtype)
new_points = np.copy(omesh.points)
element_type = omesh.element_type
# RESET FIRST OR MESH WILL CONTAIN INCONSISTENT DATA
mesh = Mesh()
mesh.__reset__()
mesh.element_type = element_type
mesh.elements = np.copy(new_elements)
mesh.nelem = mesh.elements.shape[0]
unique_elements_inv, inv_elements = np.unique(mesh.elements,return_inverse=True)
mesh.points = new_points[unique_elements_inv,:]
mesh.nnode = mesh.points.shape[0]
# RE-ORDER ELEMENT CONNECTIVITY
remap_elements = np.arange(mesh.points.shape[0])
mesh.elements = remap_elements[inv_elements].reshape(mesh.nelem,mesh.elements.shape[1])
# RECOMPUTE EDGES
if compute_edges == True:
mesh.GetBoundaryEdges()
# RECOMPUTE FACES
if compute_faces == True:
if mesh.element_type == "tet" or mesh.element_type == "hex":
mesh.GetBoundaryFaces()
mesh.GetBoundaryEdges()
# PLOT THE NEW MESH
if show_plot == True:
self.SimplePlot()
aranger = np.arange(all_nelems)
idx_kept_elements = aranger[cond]
if return_removed_mesh:
return nodal_map, idx_kept_elements, mesh
else:
return nodal_map, idx_kept_elements
def MergeWith(self, mesh, self_solution=None, other_solution=None):
""" Merges self with another mesh:
NOTE: It is the responsibility of the user to ensure that meshes are conforming
"""
self.__do_essential_memebers_exist__()
mesh.__do_essential_memebers_exist__()
if mesh.element_type != self.element_type:
raise NotImplementedError('Merging two diffferent meshes is not possible yet')
if self.elements.shape[1] != mesh.elements.shape[1]:
warn('Elements are of not the same order. I am going to modify both meshes to their linear variants')
if self.InferPolynomialDegree() > 1:
dum = self.GetLinearMesh(remap=True)
self.__dict__.update(dum.__dict__)
if mesh.InferPolynomialDegree() > 1:
mesh = mesh.GetLinearMesh(remap=True)
tol = 1e-10
makezero(self.points, tol=tol)
makezero(mesh.points, tol=tol)
from Florence.Tensor import remove_duplicates_2D, unique2d
points = np.concatenate((self.points,mesh.points),axis=0)
rounded_points = np.round(points,decimals=8)
_, idx_mpoints, inv_mpoints = unique2d(rounded_points,order=False,
consider_sort=False,return_index=True,return_inverse=True)
mpoints = points[idx_mpoints,:]
elements = np.concatenate((self.elements, self.elements.max()+1+mesh.elements),axis=0)
nelem = elements.shape[0]
nodeperelem = elements.shape[1]
element_type = self.element_type
unique_elements, inv_elements = np.unique(elements,return_inverse=True)
unique_elements = unique_elements[inv_mpoints]
melements = unique_elements[inv_elements]
melements = melements.reshape(nelem,nodeperelem).astype(np.int64)
self.__reset__()
self.element_type = element_type
self.elements = melements
self.nelem = melements.shape[0]
self.points = mpoints
self.nnode = mpoints.shape[0]
ndim = self.InferSpatialDimension()
if self.element_type == "tet" or self.element_type == "hex":
self.GetBoundaryFaces()
self.GetBoundaryEdges()
elif self.element_type == "tri" or self.element_type == "quad":
self.GetBoundaryEdges()
if self_solution is not None and other_solution is not None:
if isinstance(self_solution,np.ndarray) and isinstance(other_solution,np.ndarray):
if self_solution.ndim == 3 and other_solution.ndim == 3:
solution = np.concatenate((self_solution,other_solution),axis=0)
solution = solution[idx_mpoints,:,:]
elif self_solution.ndim == 2 and other_solution.ndim == 2:
solution = np.concatenate((self_solution,other_solution),axis=0)
solution = solution[idx_mpoints,:]
return solution
def Smooth(self, criteria={'aspect_ratio':3}):
"""Performs mesh smoothing based on a given criteria.
input:
criteria [dict] criteria can be either None, {'volume':<number>},
{'area':<number>} or {'aspect_ratio':<number>}. The
number implies that all elements above that number
should be refined. Default is {'aspect_ratio':4}
Note that this is a simple mesh smoothing, and does not perform rigorous check, in
particular it does not guarantee mesh conformality
"""
self.__do_essential_memebers_exist__()
if not isinstance(criteria,dict):
raise ValueError("Smoothing criteria should be a dictionry")
if len(criteria.keys()) > 1:
raise ValueError("Smoothing criteria should be a dictionry with only one key")
criterion = list(criteria.keys())[0]
number = list(criteria.values())[0]
if "aspect_ratio" in insensitive(criterion):
quantity = self.AspectRatios()
elif "area" in insensitive(criterion):
quantity = self.Areas()
elif "volume" in insensitive(criterion):
quantity = self.Volumes()
else:
quantity = self.AspectRatios()
non_smooth_elements_idx = np.where(quantity >= number)[0]
if non_smooth_elements_idx.shape[0]==0:
return
if self.element_type == "quad":
refiner_func = self.QuadrilateralProjection
elif self.element_type == "tri":
refiner_func = self.TriangularProjection
else:
raise ValueError("Smoothing of {} elements not supported yet".format(self.element_type))
mesh = refiner_func(points=self.points[self.elements[non_smooth_elements_idx[0],:],:],npoints=2)
for i in range(1,non_smooth_elements_idx.shape[0]):
mesh += refiner_func(points=self.points[self.elements[non_smooth_elements_idx[i],:],:],npoints=2)
smooth_elements_idx = np.where(quantity < number)[0]
if smooth_elements_idx.shape[0]>0:
mesh += self.GetLocalisedMesh(smooth_elements_idx)
self.__update__(mesh)
def LaplacianSmoothing(self, niter=1, algorithm="jacobi", smart=False, quality_assessor=None,
pnodes_movement=None, acceptance_factor=0.2, show_plot=False):
"""Standard Laplacian smoothing
input:
pnodes_movement: [str] either "laplacian" or "interpolation".
For higher order only
algorithm: [str] either "jacobi" or "gauss_seidal"
smart: [bool] wether to use smart Laplacian or not
quality_assessor: [str] quality evaluator for smart Laplacian.
default is size (area/volume) based
acceptance_factor [float] This is a ratio essentially in that it implies what
percentage of node movement is acceptable
based on quality_assessor. If the quality deteriorates
beyond this ratio the node position won't be updated
"""
self.__do_memebers_exist__()
edim = self.InferElementalDimension()
ndim = self.InferSpatialDimension()
p = self.InferPolynomialDegree()
if p > 1:
if pnodes_movement is None:
raise ValueError("Please specify (pnodes_movement): the strategy for moving high order nodes")
if pnodes_movement != "interpolation" and pnodes_movement != "laplacian":
raise ValueError('pnodes_movement should be either "laplacian" or "interpolation"')
if pnodes_movement == "interpolation":
lmesh = self.GetLinearMesh(remap=True)
else:
lmesh = self
plot_mesh = | |
The URL can be used as a curl command or directly with S3. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_file_with_http_info(file_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str file_id: Unique identifier for the file to retrieve. (required)
:param str tenant_id: Optional parameter to see shared data in another tenant
:param str presigned_url_mode: Optional parameter to specify presigned url's content-disposition. If not specified, the browser will determine the default behavior. Possible values: Attachment, Inline, Browser
:param bool include_volume_metadata: Optional parameter to return volume's metadata
:param str metadata_include: Optional parameter to specify comma separated patterns to include metadata by their field names.
:param str metadata_exclude: Optional parameter to specify comma separated patterns to exclude metadata by their field names.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(FileResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'file_id',
'tenant_id',
'presigned_url_mode',
'include_volume_metadata',
'metadata_include',
'metadata_exclude'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_file" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'file_id' is set
if self.api_client.client_side_validation and ('file_id' not in local_var_params or # noqa: E501
local_var_params['file_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `file_id` when calling `get_file`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_id' in local_var_params:
path_params['fileId'] = local_var_params['file_id'] # noqa: E501
query_params = []
if 'tenant_id' in local_var_params and local_var_params['tenant_id'] is not None: # noqa: E501
query_params.append(('tenantId', local_var_params['tenant_id'])) # noqa: E501
if 'presigned_url_mode' in local_var_params and local_var_params['presigned_url_mode'] is not None: # noqa: E501
query_params.append(('presignedUrlMode', local_var_params['presigned_url_mode'])) # noqa: E501
if 'include_volume_metadata' in local_var_params and local_var_params['include_volume_metadata'] is not None: # noqa: E501
query_params.append(('includeVolumeMetadata', local_var_params['include_volume_metadata'])) # noqa: E501
if 'metadata_include' in local_var_params and local_var_params['metadata_include'] is not None: # noqa: E501
query_params.append(('metadata.include', local_var_params['metadata_include'])) # noqa: E501
if 'metadata_exclude' in local_var_params and local_var_params['metadata_exclude'] is not None: # noqa: E501
query_params.append(('metadata.exclude', local_var_params['metadata_exclude'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/files/{fileId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_files(self, **kwargs): # noqa: E501
"""Get a list of files # noqa: E501
Given a volumeId or volume name, get a list of files accessible by the JWT. The default sort returned is alphabetical, ascending. The default page size is 10 items # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_files(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param list[str] volume_id: Optional field that specifies comma-separated volume IDs to include in the list
:param list[str] volume_name: Optional field that specifies comma-separated volume names to include in the list
:param list[str] path: Optional field that specifies comma-separated paths to include in the list. Value can use wildcards (e.g. /a/b/c/*) or exact matches (e.g. /a/b/c/d/).
:param bool is_uploaded: Optional field to filter by Uploaded files
:param str archive_status: Optional field that specifies comma-separated Archive Statuses to include in the list
:param bool recursive: Optional field to specify if files should be returned recursively in and under the specified paths, or only directly in the specified paths
:param str presigned_url_mode: Optional parameter to specify presigned url's content-disposition. If not specified, the browser will determine the default behavior. Possible values: Attachment, Inline, Browser
:param str include: Optionally include additional fields in the response. Multiple fields can be included by comma-separation. Possible values: TotalItemCount, PresignedUrl, InheritedAcl
:param int page_size: START_DESC END_DESC
:param str page_token: START_DESC END_DESC
:param str tenant_id: Optional parameter to see shared data in another tenant
:param str metadata_include: Optional parameter to specify comma separated patterns to include metadata by their field names.
:param str metadata_exclude: Optional parameter to specify comma separated patterns to exclude metadata by their field names.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: FileListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_files_with_http_info(**kwargs) # noqa: E501
def list_files_with_http_info(self, **kwargs): # noqa: E501
"""Get a list of files # noqa: E501
Given a volumeId or volume name, get a list of files accessible by the JWT. The default sort returned is alphabetical, ascending. The default page size is 10 items # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_files_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param list[str] volume_id: Optional field that specifies comma-separated volume IDs to include in the list
:param list[str] volume_name: Optional field that specifies comma-separated volume names to include in the list
:param list[str] path: Optional field that specifies comma-separated paths to include in the list. Value can use wildcards (e.g. /a/b/c/*) or exact matches (e.g. /a/b/c/d/).
:param bool is_uploaded: Optional field to filter by Uploaded files
:param str archive_status: Optional field that specifies comma-separated Archive Statuses to include in the list
:param bool recursive: Optional field to specify if files should be returned recursively in and under the specified paths, or only directly in the specified paths
:param str presigned_url_mode: Optional parameter to specify presigned url's content-disposition. If not specified, the browser will determine the default behavior. Possible values: Attachment, Inline, Browser
:param str include: Optionally include additional fields in the response. Multiple fields can be included by comma-separation. Possible values: TotalItemCount, PresignedUrl, InheritedAcl
:param int page_size: START_DESC END_DESC
:param str page_token: START_DESC END_DESC
:param str tenant_id: Optional parameter to see shared data in another tenant
:param str metadata_include: Optional parameter to specify comma separated patterns to include metadata by their field names.
:param str metadata_exclude: Optional parameter to specify comma separated patterns to exclude metadata by their field names.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(FileListResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'volume_id',
'volume_name',
'path',
'is_uploaded',
'archive_status',
'recursive',
'presigned_url_mode',
'include',
'page_size',
'page_token',
'tenant_id',
'metadata_include',
'metadata_exclude'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_files" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and 'page_size' in local_var_params and local_var_params['page_size'] > 10000: # noqa: E501
raise ApiValueError("Invalid value for parameter `page_size` when calling `list_files`, must be a value less than or equal to `10000`") # noqa: E501
if self.api_client.client_side_validation and 'page_size' in local_var_params and local_var_params['page_size'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `page_size` | |
activity_index = "authors_{}".format(search_type)
# Check the database first.
authors_data = database.activity_retrieve(subreddit_name, specific_month, activity_index)
# If we don't have local data, fetch it.
if authors_data is None:
authors_data = {}
api_search_query = (
"https://api.pushshift.io/reddit/search/{}/?subreddit={}"
"&sort_type=score&sort=desc&after={}&before={}&aggs=author&size=50"
)
# Get the data from Pushshift as a dictionary.
retrieved_data = subreddit_pushshift_access(
api_search_query.format(search_type, subreddit_name, start_time, end_time),
stream_possible=True,
)
# If for some reason we encounter an error, we return an error
# string and will re-access next update.
if "aggs" not in retrieved_data:
# Change the header in the error depending on the type.
if search_type == "submission":
error_message = "\n\n**Top Submitters**\n\n"
else:
error_message = "\n\n**Top Commenters**\n\n"
error_message += WIKIPAGE_PS_ERROR.strip()
return error_message
returned_authors = retrieved_data["aggs"]["author"]
# Code to remove bots and [deleted] from the authors list.
# Otherwise, it is very likely that they will end up as some
# of the "top" submitters for comments due to frequency.
excluded_usernames = [
"AutoModerator",
"Decronym",
"[deleted]",
"RemindMeBot",
"TotesMessenger",
"translator-BOT",
]
# Iterate over the data and collect top authors into a
# dictionary that's indexed by key.
for author in returned_authors:
submitter = author["key"]
if submitter not in excluded_usernames:
submit_count = int(author["doc_count"])
authors_data[submitter] = submit_count
# Write to the database if we are not in the current month.
if specific_month != current_month:
database.activity_insert(subreddit_name, specific_month, activity_index, authors_data)
# Get the data formatted.
formatted_data = subreddit_pushshift_time_authors_collater(authors_data, search_type)
return formatted_data
def subreddit_pushshift_time_authors_collater(input_dictionary, search_type):
"""This simple function takes data from its equivalent dictionary
and outputs it as a Markdown segment.
:param input_dictionary: A dictionary containing data on the most
frequent authors during a time period.
:param search_type: Either `submission` or `comment`.
:return: A Markdown segment.
"""
formatted_lines = []
bullet_number = 1
line_template = "{}. {:,} {}s by u/{}"
# Go through the dictionary.
for author in sorted(input_dictionary, key=input_dictionary.get, reverse=True):
num_type = input_dictionary[author]
line = line_template.format(bullet_number, num_type, search_type, author)
formatted_lines.append(line)
bullet_number += 1
# Format everything together and change the header depending on the
# type of item we're processing.
if search_type == "submission":
header = "\n\n**Top Submitters**\n\n"
else:
header = "\n\n**Top Commenters**\n\n"
# If we have entries for this month, format everything together.
# Otherwise, return a section noting there's nothing.
if len(formatted_lines) > 0:
body = header + "\n".join(formatted_lines[: SETTINGS.num_display])
else:
no_section = "* It appears that there were no {}s during this period.".format(search_type)
body = header + no_section
return body
def subreddit_pushshift_activity_retriever(subreddit_name, start_time, end_time, search_type):
"""This function accesses Pushshift to retrieve the activity,
including MOST submissions or comments, on a subreddit for a given
timespan. It also formats it as a bulleted Markdown list.
It also calculates the total AVERAGE over this time period and
includes it at a separate line at the end.
:param subreddit_name: The community we are looking for.
:param start_time: We want to find posts *after* this time,
expressed in string form.
:param end_time: We want to find posts *before* this time,
expressed in string form.
:param search_type: `comment` or `submission`, depending on the type
of top results one wants.
:return: A Markdown list with a header and bulleted list for each
most active day.
"""
# Convert YYYY-MM-DD UTC to Unix time, get the number of days
# in month, and get the number of days in between these two dates.
# When getting the `end_time`, Artemis will get it from literally
# the last second of the day to account for full coverage.
specific_month = start_time.rsplit("-", 1)[0]
start_time = timekeeping.convert_to_unix(start_time)
end_time = timekeeping.convert_to_unix(end_time) + 86399
current_month = timekeeping.month_convert_to_string(time.time())
activity_index = "activity_{}".format(search_type)
# Check the database first.
days_data = database.activity_retrieve(subreddit_name, specific_month, activity_index)
# If we don't have local data, fetch it.
if days_data is None:
days_data = {}
api_search_query = (
"https://api.pushshift.io/reddit/search/{}/?subreddit={}"
"&sort_type=created_utc&after={}&before={}&aggs=created_utc&size=50"
)
# Get the data from Pushshift as a dictionary.
retrieved_data = subreddit_pushshift_access(
api_search_query.format(search_type, subreddit_name, start_time, end_time),
stream_possible=True,
)
# If for some reason we encounter an error, we return an error
# string for inclusion.
if "aggs" not in retrieved_data:
error_message = "\n\n**{}s Activity**\n\n".format(search_type.title())
error_message += WIKIPAGE_PS_ERROR.strip()
return error_message
returned_days = retrieved_data["aggs"]["created_utc"]
# Iterate over the data. If the number of posts in a day is more
# than zero, save it.
for day in returned_days:
day_string = timekeeping.convert_to_string(int(day["key"]))
num_of_posts = int(day["doc_count"])
if num_of_posts != 0:
days_data[day_string] = num_of_posts
# Write to the database if we are not in the current month.
if specific_month != current_month:
database.activity_insert(subreddit_name, specific_month, activity_index, days_data)
# Get the data formatted.
formatted_data = subreddit_pushshift_activity_collater(days_data, search_type)
return formatted_data
def subreddit_pushshift_activity_collater(input_dictionary, search_type):
"""This simple function takes data from its equivalent dictionary
and outputs it as a Markdown segment.
:param input_dictionary: A dictionary containing data on the most
active days during a time period.
:param search_type: Either `submission` or `comment`.
:return: A Markdown segment.
"""
days_highest = []
lines_to_post = []
unavailable = "* It appears that there were no {}s during this period.".format(search_type)
# Define the number of days stored in the dictionary.
num_days = len(input_dictionary)
# Find the average number of the type.
if num_days > 0:
# If we have a time frame of how many days we're
# getting, let's get the average.
num_average = sum(input_dictionary.values()) / num_days
average_line = "\n\n*Average {0}s per day*: **{1:,}** {0}s.".format(
search_type, int(num_average)
)
else:
average_line = str(unavailable)
# Find the busiest days and add those days to a list with the date.
most_posts = sorted(zip(input_dictionary.values()), reverse=True)[: SETTINGS.num_display]
for number in most_posts:
for date, count in input_dictionary.items():
if number[0] == count and date not in str(days_highest): # Get the unique date.
days_highest.append([date, number[0]])
break
# Format the individual lines.
for day in days_highest:
if int(day[1]) != 0:
line = "* **{:,}** {}s on **{}**".format(int(day[1]), search_type, day[0])
lines_to_post.append(line)
# Format the text body. If there are days recorded join up all the
# data. Otherwise, return the `unavailable` message.
header = "\n\n**{}s Activity**\n\n*Most Active Days:*\n\n".format(search_type.title())
if len(lines_to_post) > 0: #
body = header + "\n".join(lines_to_post) + average_line
else:
body = header + unavailable
return body
def subreddit_stream_post_information(subreddit_name, start_time, end_time):
"""This function looks at Stream data and gathers some supplementary
"aggregation" information on the kinds of posts.
:param subreddit_name: The community we are looking for.
:param start_time: We want to find posts *after* this time.
:param end_time: We want to find posts *before* this time.
:return: A Markdown list with a header and bulleted list for each
requested attribute.
"""
attribute_dictionary = {}
formatted_lines = []
specific_month = start_time.rsplit("-", 1)[0]
current_month = timekeeping.month_convert_to_string(time.time())
start_time = timekeeping.convert_to_unix(start_time)
end_time = timekeeping.convert_to_unix(end_time)
# Reject timeframes earlier than 2021-06-01, since there will not
# be any stream data for before that time.
if start_time < 1622505600:
return None
# Conduct the queries on the Stream database. We use a variation of
# the Pushshift syntax for compatibility.
for query in SETTINGS.stream_include_attributes:
# Check first to see if we have local data.
query_data = database.activity_retrieve(subreddit_name, specific_month, query)
# If no results, get data from the Stream database.
if query_data is None:
logger.debug(f"Stream Post Information: Conducting Stream query for `{query}`.")
search_query = "/search/submission/?subreddit={}&after={}&before={}&aggs={}"
search_query = search_query.format(subreddit_name, start_time, end_time, query)
results = stream_query_access(search_query, False)
attribute_dictionary[query] = results
# Write to the database if we are not in the current month.
if specific_month != current_month:
database.activity_insert(subreddit_name, specific_month, query, dict(results))
else:
# We have local data. Load it from the database.
logger.debug(f"Stream Post Information: Loading `{query}` data from database.")
attribute_dictionary[query] = query_data
# Format the results into Markdown lines.
for query in SETTINGS.stream_include_attributes:
query_data = attribute_dictionary[query]
# For now, we only want True/False binary query data.
if len(query_data) <= 2:
total_amount = sum(query_data.values())
try:
percentage = "{:.2%}".format(query_data[True]/total_amount)
except ZeroDivisionError:
percentage = "N/A"
entry_line = ("* `{}` posts: {}/{} ({})".format(query, query_data[True], total_amount,
percentage))
formatted_lines.append(entry_line)
# Pull everything together.
header = "\n\n#### Post Types\n\n{}\n"
body = header.format('\n'.join(formatted_lines))
return body
def subreddit_statistics_earliest_determiner(subreddit_name):
"""This function uses PRAW to fetch the Reddit limit of 1000 posts.
Then it checks the dates of those posts and returns the earliest day
| |
<filename>functions/DICTIONARIES_AGNfitter.py
"""%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
DICTIONARIES_AGNFitter.py
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script contains all functions which are needed to construct the total model of AGN.
##For constructing a new dictionary,
(in cases: 1)add a filter which is not included,
2) need finer grid for better S/N data)
see DICTIONARIES_AGNfitter.py
"""
import numpy as np
import sys
from collections import defaultdict
import MODEL_AGNfitter as model
from scipy.integrate import trapz
import time
import cPickle
import shelve
from astropy import units as u
class MODELSDICT:
"""
Class MODELSDICT
Builds a dictionary of model templates.
##input:
- filename of the dictionary you want to create
- the path whre it will be located
- Also variables self.ebvgal_array,self.ebvbbb_array, self.z_array
can be change by the user, for a finer grid in this parameters.
##bugs:
"""
def __init__(self, filename, path, filters):
self.filename = filename
self.path=path
self.ebvgal_array = np.array(np.arange(0.,100.,5.)/100)
self.ebvbbb_array = np.array(np.arange(0.,100.,5.)/100)
self.z_array = filters['dict_zarray']
self.filterset = filters['Bandset']
self.filters = filters
def build(self):
f = open(self.filename, 'wb')
COSMOS_modelsdict = dict()
print 'MODELSDICT.build'
print 'Constructing Dictionary of models.'
print '--------------------------------------'
print 'Make sure the filterset contains all the photometric bands'
print 'needed by your catalog.'
print 'This process might take a while, but you have to do it only once.'
print 'If you interrupt it, please trash the empty file created.'
print ''
i=0
dictionary_progressbar(i, len(self.z_array), prefix = 'Dict:', suffix = 'Complete', barLength = 50)
for z in self.z_array:
i += 1
filterdict = filter_dictionaries(self.filterset, self.path, self.filters)
dict_modelsfiltered = self.construct_dictionaryarray_filtered(z, filterdict, self.path)
COSMOS_modelsdict[str(z)] = dict_modelsfiltered
time.sleep(0.01)
dictionary_progressbar(i, len(self.z_array), prefix = 'Dict:', suffix = 'Complete', barLength = 50)
print 'Dictionary has been created in :', self.filename
cPickle.dump(COSMOS_modelsdict, f, protocol=2)
f.close()
def construct_dictionaryarray_filtered(self, z, filterdict,path):
"""
Construct the dictionaries of fluxes at bands (to compare to data),
and dictionaries of fluxes over the whole spectrum, for plotting.
"""
GALAXYFdict_filtered = dict()
GALAXY_SFRdict = dict()
STARBURSTFdict_filtered = dict()
BBBFdict_filtered = dict()
TORUSFdict_filtered = dict()
GALAXYFdict_4plot = dict()
STARBURSTFdict_4plot = dict()
BBBFdict_4plot = dict()
TORUSFdict_4plot = dict()
#OPENING TEMPLATES AND BUILDING DICTIONARIES
#Call object containing all galaxy models
galaxy_object = cPickle.load(file(path + 'models/GALAXY/bc03_275templates.pickle', 'rb'))
_, ageidx, tauidx, _, _,_ = np.shape(galaxy_object.SED)
#Construct dictionaries
for taui in range(tauidx):
for agei in range(ageidx):
t1= time.time()
gal_wl, gal_Fwl = galaxy_object.wave, galaxy_object.SED[:,agei,taui,:,:,:].squeeze()
gal_nus= gal_wl.to(u.Hz, equivalencies=u.spectral())[::-1]#invert
gal_Fnu= (gal_Fwl * 3.34e-19 * gal_wl**2.)[::-1]
gal_SFR= galaxy_object.SFR[:,agei,taui,:,:].squeeze()
GALAXY_SFRdict[str(galaxy_object.tau.value[taui]),str(galaxy_object.tg.value[agei])] = gal_SFR
for EBV_gal in self.ebvgal_array:
#Apply reddening
gal_nu, gal_Fnu_red = model.GALAXYred_Calzetti(gal_nus.value[0:len(gal_nus):3], gal_Fnu.value[0:len(gal_nus):3], EBV_gal)
GALAXYFdict_4plot[str(galaxy_object.tau.value[taui]),str(galaxy_object.tg.value[agei]), str(EBV_gal)] = \
np.log10(gal_nu), gal_Fnu_red
#Projection of filter curves on models
bands, gal_Fnu_filtered = model.filters1(np.log10(gal_nu), gal_Fnu_red, filterdict, z)
GALAXYFdict_filtered[str(galaxy_object.tau.value[taui]),str(galaxy_object.tg.value[agei]), str(EBV_gal)] = \
bands, gal_Fnu_filtered
#Call object containing all starburst models
starburst_object = cPickle.load(file(path + 'models/STARBURST/dalehelou_charyelbaz_v1.pickle', 'rb'))
irlumidx = len(starburst_object.SED)
#Construct dictionaries
for irlumi in range(irlumidx):
sb_nu0, sb_Fnu0 = starburst_object.wave[irlumi], starburst_object.SED[irlumi].squeeze()
STARBURSTFdict_4plot[str(starburst_object.irlum[irlumi])] = sb_nu0, sb_Fnu0
bands, sb_Fnu_filtered = model.filters1(sb_nu0, sb_Fnu0, filterdict, z)
STARBURSTFdict_filtered[str(starburst_object.irlum[irlumi])] = bands, sb_Fnu_filtered
if np.amax(sb_Fnu_filtered) == 0:
print 'Error: something is wrong in the calculation of STARBURST flux'
#No object to call since bbb is only one model
bbb_object = cPickle.load(file(path + 'models/BBB/richards.pickle', 'rb'))
bbb_nu, bbb_Fnu = bbb_object.wave, bbb_object.SED.squeeze()
#Construct dictionaries
for EBV_bbb in self.ebvbbb_array:
bbb_nu0, bbb_Fnu_red = model.BBBred_Prevot(bbb_nu, bbb_Fnu, EBV_bbb, z )
BBBFdict_4plot[str(EBV_bbb)] =bbb_nu0, bbb_Fnu_red
bands, bbb_Fnu_filtered = model.filters1(bbb_nu0, bbb_Fnu_red, filterdict,z)
BBBFdict_filtered[str(EBV_bbb)] = bands, bbb_Fnu_filtered
if np.amax(bbb_Fnu_filtered) == 0:
print 'Error: something is wrong in the calculation of BBB flux'
#Call object containing all torus models
torus_object = cPickle.load(file(path + 'models/TORUS/silva_v1.pickle', 'rb'))
nhidx=len(torus_object.SED)
#Construct dictionaries
for nhi in range(nhidx):
tor_nu0, tor_Fnu0 = torus_object.wave[nhi], torus_object.SED[nhi].squeeze()
TORUSFdict_4plot[str(torus_object.nh[nhi])] = tor_nu0, tor_Fnu0
bands, tor_Fnu_filtered = model.filters1(tor_nu0, tor_Fnu0, filterdict, z)
TORUSFdict_filtered[str(torus_object.nh[nhi])] = bands, tor_Fnu_filtered
if np.amax(tor_Fnu_filtered) == 0:
print 'Error: something is wrong in the calculation of TORUS flux'
return STARBURSTFdict_filtered , BBBFdict_filtered, GALAXYFdict_filtered, TORUSFdict_filtered, \
STARBURSTFdict_4plot , BBBFdict_4plot, GALAXYFdict_4plot, TORUSFdict_4plot,GALAXY_SFRdict
def dictkey_arrays(MODELSdict):
"""
Construct the dictionaries of fluxes at bands (to campare to data),
and dictionaries of fluxes over the whole spectrum, for plotting.
##input:
##output:
"""
STARBURSTFdict , BBBFdict, GALAXYFdict, TORUSFdict, _,_,_,_,GALAXY_SFRdict= MODELSdict
tau_dict= np.array(list(GALAXYFdict.keys()))[:,0]
age_dict= np.array(list(GALAXYFdict.keys()))[:,1]
ebvg_dict = np.array(list(GALAXYFdict.keys()))[:,2]
irlum_dict = np.array(list(STARBURSTFdict.keys()))
nh_dict = np.array(list(TORUSFdict.keys()))
ebvb_dict = np.array(list(BBBFdict.keys()))
#For computational reasons (to be used in PARAMETERspace_AGNfitter.py)
class gal_class:
def __init__(self, tau_dict, age_dict, ebvg_dict):
self.tau_dict =tau_dict
self.age_dict= age_dict
self.ebvg_dict = ebvg_dict
self.tau_dict_float =tau_dict.astype(float)
self.age_dict_float= age_dict.astype(float)
self.ebvg_dict_float = ebvg_dict.astype(float)
def nearest_par2dict(self, tau, age, ebvg):
taui =np.abs(self.tau_dict_float-tau).argmin()
agei= np.abs(self.age_dict_float-age).argmin()
ebvgi = np.abs(self.ebvg_dict_float-ebvg).argmin()
self.t = tau_dict[taui]
self.a= age_dict[agei]
self.e= ebvg_dict[ebvgi]
gal_obj = gal_class(tau_dict, age_dict, ebvg_dict)
return gal_obj, irlum_dict, nh_dict, ebvb_dict, GALAXY_SFRdict
def filter_dictionaries(filterset, path, filters):
"""
Constructs the dictionaries of fluxes
1) specifically for your photometric bands (to campare to data), and
2) dictionaries of fluxes for the whole spectrum, for plotting.
input
-------
- filterset: Here we have two types of filterset:
'BANDSET_default' or 'BANDSET_settings'.
This was specified from the RUN_AGNfitter_multi.py script.
'BANDSET_default' includes bands needed for the example.
'BANDSET_settings' includes all bands you specify in RUN_AGNfitter_multi.py.
dependency
----------
This function is called in the CLASS MODELSDICT
"""
H500band_file = path + 'models/FILTERS/HERSCHEL/SPIRE_500mu.txt'
H500_lambda, H500_factor = np.loadtxt(H500band_file, usecols=(0,1),unpack= True)
H350band_file = path + 'models/FILTERS/HERSCHEL/SPIRE_350mu.txt'
H350_lambda, H350_factor = np.loadtxt(H350band_file, usecols=(0,1),unpack= True)
H250band_file = path + 'models/FILTERS/HERSCHEL/SPIRE_250mu.txt'
H250_lambda, H250_factor = np.loadtxt(H250band_file, usecols=(0,1),unpack= True)
H160band_file = path + 'models/FILTERS/HERSCHEL/PACS_160mu.txt'
H160_lambda, H160_factor = np.loadtxt(H160band_file, usecols=(0,1),unpack= True)
H100band_file =path + 'models/FILTERS/HERSCHEL/PACS_100mu.txt'
H100_lambda, H100_factor = np.loadtxt(H100band_file, usecols=(0,1),unpack= True)
#SPITZER
M160band_file = path + 'models/FILTERS/SPITZER/mips160.res'
M160_lambda, M160_factor = np.loadtxt(M160band_file, usecols=(0,1),unpack= True)
M70band_file = path + 'models/FILTERS/SPITZER/mips70.res'
M70_lambda, M70_factor = np.loadtxt(M70band_file, usecols=(0,1),unpack= True)
M24band_file = path + 'models/FILTERS/SPITZER/mips24.res'
M24_lambda, M24_factor = np.loadtxt(M24band_file, usecols=(0,1),unpack= True)
#IRAC
I4band_file = path + 'models/FILTERS/SPITZER/irac_ch4.res'
I4_lambda, I4_factor = np.loadtxt(I4band_file, usecols=(0,1),unpack= True)
I3band_file = path + 'models/FILTERS/SPITZER/irac_ch3.res'
I3_lambda, I3_factor = np.loadtxt(I3band_file, usecols=(0,1),unpack= True)
I2band_file = path + 'models/FILTERS/SPITZER/irac_ch2.res'
I2_lambda, I2_factor = np.loadtxt(I2band_file, usecols=(0,1),unpack= True)
I1band_file = path + 'models/FILTERS/SPITZER/irac_ch1.res'
I1_lambda, I1_factor = np.loadtxt(I1band_file, usecols=(0,1),unpack= True)
#WISE
W4band_file = path + 'models/FILTERS/WISE/NRSR-W4.txt'
W4_lambda, W4_factor = np.loadtxt(W4band_file, usecols=(0,1),unpack= True)
W3band_file = path + 'models/FILTERS/WISE/NRSR-W3.txt'
W3_lambda, W3_factor = np.loadtxt(W3band_file, usecols=(0,1),unpack= True)
W2band_file = path + 'models/FILTERS/WISE/NRSR-W2.txt'
W2_lambda, W2_factor = np.loadtxt(W2band_file, usecols=(0,1),unpack= True)
W1band_file = path + 'models/FILTERS/WISE/NRSR-W1.txt'
W1_lambda, W1_factor = np.loadtxt(W1band_file, usecols=(0,1),unpack= True)
#2mass
Kband_file = path + 'models/FILTERS/2MASS/Ks_2mass.res'
K_lambda, K_factor = np.loadtxt(Kband_file, usecols=(0,1),unpack= True)
Hband_file = path + 'models/FILTERS/2MASS/H_2mass.res'
H_lambda, H_factor = np.loadtxt(Hband_file, usecols=(0,1),unpack= True)
Jband_file = path + 'models/FILTERS/2MASS/J_2mass.res'
J_lambda, J_factor = np.loadtxt(Jband_file, usecols=(0,1),unpack= True)
#VISTA
Huvband_file = path + 'models/FILTERS/VISTA/H_uv.res'
Huv_lambda, Huv_factor = np.loadtxt(Huvband_file, usecols=(0,1),unpack= True)
Juvband_file = path + 'models/FILTERS/VISTA/J_uv.res'
Juv_lambda, Juv_factor = np.loadtxt(Juvband_file, usecols=(0,1),unpack= True)
Kuvband_file = path + 'models/FILTERS/VISTA/K_uv.res'
Kuv_lambda, Kuv_factor = np.loadtxt(Kuvband_file, usecols=(0,1),unpack= True)
Yuvband_file = path + 'models/FILTERS/VISTA/Y_uv.res'
Yuv_lambda, Yuv_factor = np.loadtxt(Yuvband_file, usecols=(0,1),unpack= True)
#CHFT ugriz
uband_file_CHFT = path + 'models/FILTERS/CHFT/u_megaprime_sagem.res'
u_lambda_CHFT, u_factor_CHFT = np.loadtxt(uband_file_CHFT, usecols=(0,1),unpack= True)
gband_file_CHFT = path + 'models/FILTERS/CHFT/g_megaprime_sagem.res'
g_lambda_CHFT, g_factor_CHFT = np.loadtxt(gband_file_CHFT, usecols=(0,1),unpack= True)
rband_file_CHFT = path + 'models/FILTERS/CHFT/r_megaprime_sagem.res'
r_lambda_CHFT, r_factor_CHFT = np.loadtxt(rband_file_CHFT, usecols=(0,1),unpack= True)
iband_file_CHFT = path + 'models/FILTERS/CHFT/i_megaprime_sagem.res'
i_lambda_CHFT, i_factor_CHFT = np.loadtxt(iband_file_CHFT, usecols=(0,1),unpack= True)
zband_file_CHFT = path + 'models/FILTERS/CHFT/z_megaprime_sagem.res'
z_lambda_CHFT, z_factor_CHFT = np.loadtxt(zband_file_CHFT, usecols=(0,1),unpack= True)
#SDSS ugriz
uband_file_SDSS = path + 'models/FILTERS/SDSS/u_SDSS.res'
u_lambda_SDSS, u_factor_SDSS = np.loadtxt(uband_file_SDSS, usecols=(0,1),unpack= True)
gband_file_SDSS = path + 'models/FILTERS/SDSS/g_SDSS.res'
g_lambda_SDSS, g_factor_SDSS = np.loadtxt(gband_file_SDSS, usecols=(0,1),unpack= True)
rband_file_SDSS = path + 'models/FILTERS/SDSS/r_SDSS.res'
r_lambda_SDSS, r_factor_SDSS = np.loadtxt(rband_file_SDSS, usecols=(0,1),unpack= True)
iband_file_SDSS = path + 'models/FILTERS/SDSS/i_SDSS.res'
i_lambda_SDSS, i_factor_SDSS = np.loadtxt(iband_file_SDSS, usecols=(0,1),unpack= True)
zband_file_SDSS = path + 'models/FILTERS/SDSS/z_SDSS.res'
z_lambda_SDSS, z_factor_SDSS = np.loadtxt(zband_file_SDSS, usecols=(0,1),unpack= True)
#SUBARU
gband_file =path + 'models/FILTERS/SUBARU/g_subaru.res'
g_lambda,g_factor = np.loadtxt(gband_file, usecols=(0,1),unpack= True)
rband_file = path + 'models/FILTERS/SUBARU/r_subaru.res'
r_lambda,r_factor = np.loadtxt(rband_file, usecols=(0,1),unpack= True)
iband_file = path + 'models/FILTERS/SUBARU/i_subaru.res'
i_lambda,i_factor = np.loadtxt(iband_file, usecols=(0,1),unpack= True)
zband_file =path + 'models/FILTERS/SUBARU/z_subaru.res'
z_lambda, z_factor = np.loadtxt(zband_file, usecols=(0,1),unpack= True)
Bband_file = path + 'models/FILTERS/SUBARU/B_subaru.res'
B_lambda, B_factor = np.loadtxt(Bband_file, usecols=(0,1),unpack= True)
Vband_file = path + 'models/FILTERS/SUBARU/V_subaru.res'
V_lambda, V_factor = np.loadtxt(Vband_file, usecols=(0,1),unpack= True)
#GALEX
NUVband_file = path + 'models/FILTERS/GALEX/galex2500.res'
NUV_lambda, NUV_factor = np.loadtxt(NUVband_file, usecols=(0,1),unpack= True)
FUVband_file = path + 'models/FILTERS/GALEX/galex1500.res'
FUV_lambda, FUV_factor = np.loadtxt(FUVband_file, usecols=(0,1),unpack= True)
if filterset == 'BANDSET_default':
#List of file names
files = [ H500band_file, H350band_file, H250band_file, M24band_file, I4band_file ,\
I3band_file, I2band_file, I1band_file, Kband_file, Hband_file, | |
' ' +\
'username: ' + repr(self.username) + ' ' +\
'password: ' + repr(self.password) + ' ' +\
'tls_required: ' + repr(self.tls_required) + ' ' +\
'>'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'healthy': self.healthy,
'tags': self.tags,
'secret_store_id': self.secret_store_id,
'egress_filter': self.egress_filter,
'hostname': self.hostname,
'port_override': self.port_override,
'port': self.port,
'username': self.username,
'password': self.password,
'tls_required': self.tls_required,
}
@classmethod
def from_dict(cls, d):
return cls(
id=d.get('id'),
name=d.get('name'),
healthy=d.get('healthy'),
tags=d.get('tags'),
secret_store_id=d.get('secret_store_id'),
egress_filter=d.get('egress_filter'),
hostname=d.get('hostname'),
port_override=d.get('port_override'),
port=d.get('port'),
username=d.get('username'),
password=d.get('password'),
tls_required=d.get('tls_required'),
)
class AmazonMQAMQP091:
"""
:param id: Unique identifier of the Resource.
:param name: Unique human-readable name of the Resource.
:param healthy: True if the datasource is reachable and the credentials are valid.
:param tags: Tags is a map of key, value pairs.
:param secret_store_id: ID of the secret store containing credentials for this resource, if any.
:param egress_filter: A filter applied to the routing logic to pin datasource to nodes.
:param hostname:
:param port_override:
:param port:
:param username:
:param password:
:param tls_required:
"""
__slots__ = [
'id',
'name',
'healthy',
'tags',
'secret_store_id',
'egress_filter',
'hostname',
'port_override',
'port',
'username',
'password',
'tls_required',
]
def __init__(
self,
id=None,
name=None,
healthy=None,
tags=None,
secret_store_id=None,
egress_filter=None,
hostname=None,
port_override=None,
port=None,
username=None,
password=None,
tls_required=None,
):
self.id = id
self.name = name
self.healthy = healthy
self.tags = tags
self.secret_store_id = secret_store_id
self.egress_filter = egress_filter
self.hostname = hostname
self.port_override = port_override
self.port = port
self.username = username
self.password = password
self.tls_required = tls_required
def __repr__(self):
return '<sdm.AmazonMQAMQP091 ' + \
'id: ' + repr(self.id) + ' ' +\
'name: ' + repr(self.name) + ' ' +\
'healthy: ' + repr(self.healthy) + ' ' +\
'tags: ' + repr(self.tags) + ' ' +\
'secret_store_id: ' + repr(self.secret_store_id) + ' ' +\
'egress_filter: ' + repr(self.egress_filter) + ' ' +\
'hostname: ' + repr(self.hostname) + ' ' +\
'port_override: ' + repr(self.port_override) + ' ' +\
'port: ' + repr(self.port) + ' ' +\
'username: ' + repr(self.username) + ' ' +\
'password: ' + repr(self.password) + ' ' +\
'tls_required: ' + repr(self.tls_required) + ' ' +\
'>'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'healthy': self.healthy,
'tags': self.tags,
'secret_store_id': self.secret_store_id,
'egress_filter': self.egress_filter,
'hostname': self.hostname,
'port_override': self.port_override,
'port': self.port,
'username': self.username,
'password': <PASSWORD>,
'tls_required': self.tls_required,
}
@classmethod
def from_dict(cls, d):
return cls(
id=d.get('id'),
name=d.get('name'),
healthy=d.get('healthy'),
tags=d.get('tags'),
secret_store_id=d.get('secret_store_id'),
egress_filter=d.get('egress_filter'),
hostname=d.get('hostname'),
port_override=d.get('port_override'),
port=d.get('port'),
username=d.get('username'),
password=d.get('password'),
tls_required=d.get('tls_required'),
)
class Athena:
"""
:param id: Unique identifier of the Resource.
:param name: Unique human-readable name of the Resource.
:param healthy: True if the datasource is reachable and the credentials are valid.
:param tags: Tags is a map of key, value pairs.
:param secret_store_id: ID of the secret store containing credentials for this resource, if any.
:param egress_filter: A filter applied to the routing logic to pin datasource to nodes.
:param access_key:
:param secret_access_key:
:param output:
:param port_override:
:param region:
:param role_arn:
:param role_external_id:
"""
__slots__ = [
'id',
'name',
'healthy',
'tags',
'secret_store_id',
'egress_filter',
'access_key',
'secret_access_key',
'output',
'port_override',
'region',
'role_arn',
'role_external_id',
]
def __init__(
self,
id=None,
name=None,
healthy=None,
tags=None,
secret_store_id=None,
egress_filter=None,
access_key=None,
secret_access_key=None,
output=None,
port_override=None,
region=None,
role_arn=None,
role_external_id=None,
):
self.id = id
self.name = name
self.healthy = healthy
self.tags = tags
self.secret_store_id = secret_store_id
self.egress_filter = egress_filter
self.access_key = access_key
self.secret_access_key = secret_access_key
self.output = output
self.port_override = port_override
self.region = region
self.role_arn = role_arn
self.role_external_id = role_external_id
def __repr__(self):
return '<sdm.Athena ' + \
'id: ' + repr(self.id) + ' ' +\
'name: ' + repr(self.name) + ' ' +\
'healthy: ' + repr(self.healthy) + ' ' +\
'tags: ' + repr(self.tags) + ' ' +\
'secret_store_id: ' + repr(self.secret_store_id) + ' ' +\
'egress_filter: ' + repr(self.egress_filter) + ' ' +\
'access_key: ' + repr(self.access_key) + ' ' +\
'secret_access_key: ' + repr(self.secret_access_key) + ' ' +\
'output: ' + repr(self.output) + ' ' +\
'port_override: ' + repr(self.port_override) + ' ' +\
'region: ' + repr(self.region) + ' ' +\
'role_arn: ' + repr(self.role_arn) + ' ' +\
'role_external_id: ' + repr(self.role_external_id) + ' ' +\
'>'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'healthy': self.healthy,
'tags': self.tags,
'secret_store_id': self.secret_store_id,
'egress_filter': self.egress_filter,
'access_key': self.access_key,
'secret_access_key': self.secret_access_key,
'output': self.output,
'port_override': self.port_override,
'region': self.region,
'role_arn': self.role_arn,
'role_external_id': self.role_external_id,
}
@classmethod
def from_dict(cls, d):
return cls(
id=d.get('id'),
name=d.get('name'),
healthy=d.get('healthy'),
tags=d.get('tags'),
secret_store_id=d.get('secret_store_id'),
egress_filter=d.get('egress_filter'),
access_key=d.get('access_key'),
secret_access_key=d.get('secret_access_key'),
output=d.get('output'),
port_override=d.get('port_override'),
region=d.get('region'),
role_arn=d.get('role_arn'),
role_external_id=d.get('role_external_id'),
)
class AWS:
"""
:param id: Unique identifier of the Resource.
:param name: Unique human-readable name of the Resource.
:param healthy: True if the datasource is reachable and the credentials are valid.
:param tags: Tags is a map of key, value pairs.
:param secret_store_id: ID of the secret store containing credentials for this resource, if any.
:param egress_filter: A filter applied to the routing logic to pin datasource to nodes.
:param access_key:
:param secret_access_key:
:param healthcheck_region:
:param role_arn:
:param role_external_id:
"""
__slots__ = [
'id',
'name',
'healthy',
'tags',
'secret_store_id',
'egress_filter',
'access_key',
'secret_access_key',
'healthcheck_region',
'role_arn',
'role_external_id',
]
def __init__(
self,
id=None,
name=None,
healthy=None,
tags=None,
secret_store_id=None,
egress_filter=None,
access_key=None,
secret_access_key=None,
healthcheck_region=None,
role_arn=None,
role_external_id=None,
):
self.id = id
self.name = name
self.healthy = healthy
self.tags = tags
self.secret_store_id = secret_store_id
self.egress_filter = egress_filter
self.access_key = access_key
self.secret_access_key = secret_access_key
self.healthcheck_region = healthcheck_region
self.role_arn = role_arn
self.role_external_id = role_external_id
def __repr__(self):
return '<sdm.AWS ' + \
'id: ' + repr(self.id) + ' ' +\
'name: ' + repr(self.name) + ' ' +\
'healthy: ' + repr(self.healthy) + ' ' +\
'tags: ' + repr(self.tags) + ' ' +\
'secret_store_id: ' + repr(self.secret_store_id) + ' ' +\
'egress_filter: ' + repr(self.egress_filter) + ' ' +\
'access_key: ' + repr(self.access_key) + ' ' +\
'secret_access_key: ' + repr(self.secret_access_key) + ' ' +\
'healthcheck_region: ' + repr(self.healthcheck_region) + ' ' +\
'role_arn: ' + repr(self.role_arn) + ' ' +\
'role_external_id: ' + repr(self.role_external_id) + ' ' +\
'>'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'healthy': self.healthy,
'tags': self.tags,
'secret_store_id': self.secret_store_id,
'egress_filter': self.egress_filter,
'access_key': self.access_key,
'secret_access_key': self.secret_access_key,
'healthcheck_region': self.healthcheck_region,
'role_arn': self.role_arn,
'role_external_id': self.role_external_id,
}
@classmethod
def from_dict(cls, d):
return cls(
id=d.get('id'),
name=d.get('name'),
healthy=d.get('healthy'),
tags=d.get('tags'),
secret_store_id=d.get('secret_store_id'),
egress_filter=d.get('egress_filter'),
access_key=d.get('access_key'),
secret_access_key=d.get('secret_access_key'),
healthcheck_region=d.get('healthcheck_region'),
role_arn=d.get('role_arn'),
role_external_id=d.get('role_external_id'),
)
class BigQuery:
"""
:param id: Unique identifier of the Resource.
:param name: Unique human-readable name of the Resource.
:param healthy: True if the datasource is reachable and the credentials are valid.
:param tags: Tags is a map of key, value pairs.
:param secret_store_id: ID of the secret store containing credentials for this resource, if any.
:param egress_filter: A filter applied to the routing logic to pin datasource to nodes.
:param private_key:
:param project:
:param port_override:
:param endpoint:
:param username:
"""
__slots__ = [
'id',
'name',
'healthy',
'tags',
'secret_store_id',
'egress_filter',
'private_key',
'project',
'port_override',
'endpoint',
'username',
]
def __init__(
self,
id=None,
name=None,
healthy=None,
tags=None,
secret_store_id=None,
egress_filter=None,
private_key=None,
project=None,
port_override=None,
endpoint=None,
username=None,
):
self.id = id
self.name = name
self.healthy = healthy
self.tags = tags
self.secret_store_id = secret_store_id
self.egress_filter = egress_filter
self.private_key = private_key
self.project = project
self.port_override = port_override
self.endpoint = endpoint
self.username = username
def __repr__(self):
return '<sdm.BigQuery ' + \
'id: ' + repr(self.id) + ' ' +\
'name: ' + repr(self.name) + ' ' +\
'healthy: ' + repr(self.healthy) + ' ' +\
'tags: ' + repr(self.tags) + ' ' +\
'secret_store_id: ' + repr(self.secret_store_id) + ' ' +\
'egress_filter: ' + repr(self.egress_filter) + ' ' +\
'private_key: ' + repr(self.private_key) + ' ' +\
'project: ' + repr(self.project) + ' ' +\
'port_override: ' + repr(self.port_override) + ' ' +\
'endpoint: ' + repr(self.endpoint) + ' ' +\
'username: ' + repr(self.username) + ' ' +\
'>'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'healthy': self.healthy,
'tags': self.tags,
'secret_store_id': self.secret_store_id,
'egress_filter': self.egress_filter,
'private_key': self.private_key,
'project': self.project,
'port_override': self.port_override,
'endpoint': self.endpoint,
'username': self.username,
}
@classmethod
def from_dict(cls, d):
return cls(
id=d.get('id'),
name=d.get('name'),
healthy=d.get('healthy'),
tags=d.get('tags'),
secret_store_id=d.get('secret_store_id'),
egress_filter=d.get('egress_filter'),
private_key=d.get('private_key'),
| |
<reponame>mynameistechno/aimet
# /usr/bin/env python3.6
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
"""
This file demonstrates the use of quantization aware training technique using
AIMET Quantsim.
"""
import os
import argparse
from datetime import datetime
import logging
from typing import List, Callable, Any
import tensorflow as tf
from tensorflow.python.keras.applications.resnet import ResNet50
# imports for AIMET
from aimet_common.defs import QuantScheme
from aimet_tensorflow import batch_norm_fold as aimet_bnf
from aimet_tensorflow.quantsim import QuantizationSimModel
from aimet_tensorflow.utils.graph_saver import save_model_to_meta
# imports for data pipelines
from Examples.common import image_net_config
from Examples.tensorflow.utils.image_net_evaluator import ImageNetEvaluator
from Examples.tensorflow.utils.image_net_trainer import ImageNetTrainer
from Examples.tensorflow.utils.add_computational_nodes_in_graph import add_image_net_computational_nodes_in_graph
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
logger = logging.getLogger('TensorFlowQAT')
formatter = logging.Formatter('%(asctime)s : %(name)s - %(levelname)s - %(message)s')
logging.basicConfig(format=formatter)
###
# This script utilizes AIMET to perform Quantization aware training on a resnet50
# pretrained model with the ImageNet data set.This is intended as a working example
# to show how AIMET APIs can be invoked.
# Scenario parameters:
# - AIMET quantization aware training using simulation model
# - Quant Scheme: 'tf'
# - rounding_mode: 'nearest'
# - default_output_bw: 8, default_param_bw: 8
# - Encoding computation using 5 batches of data
# - Input shape: [1, 3, 224, 224]
# - Learning rate: 0.001
# - Decay Steps: 5
###
class ImageNetDataPipeline:
"""
Provides APIs for model evaluation and training using ImageNet TFRecords.
"""
def __init__(self, _config: argparse.Namespace):
"""
Instantiates ImageNetDataPipeline object
:param _config:
"""
self._config = _config
def evaluate(self, sess: tf.Session, iterations: int = None) -> float:
"""
Evaluate the specified session using the specified number of samples from the validation set.
AIMET's QuantizationSimModel.compute_encodings() expects the function with this signature
to its eval_callback parameter.
:param sess: The sess graph to be evaluated.
:param iterations: The number of batches of the dataset.
:return: The accuracy for the sample with the maximum accuracy.
"""
# your code goes here instead of the example from below
evaluator = ImageNetEvaluator(self._config.tfrecord_dir, training_inputs=['keras_learning_phase:0'],
data_inputs=['input_1:0'], validation_inputs=['labels:0'],
image_size=image_net_config.dataset['image_size'],
batch_size=image_net_config.evaluation['batch_size'],
format_bgr=True)
return evaluator.evaluate(sess, iterations)
def train(self, sess: tf.Session, update_ops_name: List[str] = None):
"""
Trains the session graph. The implementation provided here is just an example,
provide your own implementation if needed.
:param sess: The sess graph to train.
:param update_ops_name: list of name of update ops (mostly BatchNorms' moving averages).
tf.GraphKeys.UPDATE_OPS collections is always used
in addition to this list
"""
# Your code goes here instead of the example from below
trainer = ImageNetTrainer(self._config.tfrecord_dir, training_inputs=['keras_learning_phase:0'],
data_inputs=['input_1:0'], validation_inputs=['labels:0'],
image_size=image_net_config.dataset['image_size'],
batch_size=image_net_config.train['batch_size'],
num_epochs=self._config.epochs, format_bgr=True)
trainer.train(sess, update_ops_name=update_ops_name, learning_rate=self._config.learning_rate,
decay_steps=self._config.decay_steps)
save_model_to_meta(sess, meta_path=os.path.join(self._config.logdir, 'QAT_model'))
def create_quant_sim_model(sess: tf.Session, start_op_names: List[str], output_op_names: List[str],
use_cuda: bool, parity_config_file: str,
evaluator: Callable[[tf.Session, Any], None])-> QuantizationSimModel:
"""
Apply quantizer simulator on the original model and return its object.
:param sess: The sess with graph.
:param start_op_names: The list of input op names of the sess.graph
:param output_op_names: The list of output op names of the sess.graph
:param use_cuda: If True then use a GPU for QuantizationSimModel
:param parity_config_file: Config file for H/W parity
:param evaluator: A callback function that is expected to run forward passes on a session
:return: QuantizationSimModel object
"""
# Quant scheme can be 'post_training_tf' or 'post_training_tf_enhanced'
quant_scheme = QuantScheme.post_training_tf
# Rounding mode can be 'nearest' or 'stochastic'
rounding_mode = 'nearest'
# Output bit-width for quantization
default_output_bw = 8
# Parameter bit-width for quantization
default_param_bw = 8
quant_sim_model = QuantizationSimModel(session=sess,
starting_op_names=start_op_names,
output_op_names=output_op_names,
quant_scheme=quant_scheme, rounding_mode=rounding_mode,
default_output_bw=default_output_bw,
default_param_bw=default_param_bw,
use_cuda=use_cuda, config_file=parity_config_file)
# Number of batches to use for computing encodings
# Only 5 batches are used here to speed up the process, also the
# number of images in these 5 batches should be sufficient for
# compute encodings
iterations = 5
# Here evaluator is used for forward_pass_callback as it is available
# from Data Pipeline class. But any forward pass function can be used
# here which doesn't necessarily need to use any labels data or return
# any output. For Example, following snippet of code can be used for
# forward_pass_callback:
# def forward_pass_callback(session: tf.Session, iterations: int):
# input_tensor = <input tensor in session>
# train_tensor = <train tensor in session>
# curr_iter = 1
# for input_data, _ in data_loaders:
# feed_dict = {input_tensor: input_data,
# train_tensor: False}
# session.run([], feed_dict=feed_dict)
# curr_iter += 1
# if curr_iter > iterations:
# break
quant_sim_model.compute_encodings(forward_pass_callback=evaluator,
forward_pass_callback_args=iterations)
return quant_sim_model
def perform_qat(config: argparse.Namespace):
"""
1. Instantiates Data Pipeline for evaluation and training
2. Loads the pretrained resnet50 keras model
3. Calculates floating point accuracy
4. Quantization Sim Model
4.1. Creates Quantization Sim model using AIMET QuantizationSimModel
4.2. Calculates and logs the accuracy of quantizer sim model
5. Quantization Aware Training
5.1. Trains the quantization aware model
5.2. Calculates and logs the accuracy of quantization Aware trained model
5.3. Exports quantization aware model so it is ready to be run on-target
:param config: This argparse.Namespace config expects following parameters:
tfrecord_dir: Path to a directory containing ImageNet TFRecords.
This folder should contain files starting with:
'train*': for training records and 'validation*': for validation records
parity_config_file: An optional parity config file, used in Quantizer
use_cuda: A boolean var to indicate to run the test on GPU.
logdir: Path to a directory for logging.
epochs: Number of epochs (type int) for training.
learning_rate: A float type learning rate for model training
decay_steps: A number used to adjust(decay) the learning rate after every decay_steps
epochs in training.
"""
# 1. Instantiates Data Pipeline for evaluation and training
data_pipeline = ImageNetDataPipeline(config)
# 2. Loads the pretrained resnet50 keras model
input_shape = (image_net_config.dataset['image_width'],
image_net_config.dataset['image_height'],
image_net_config.dataset['image_channels'])
tf.keras.backend.clear_session()
model = ResNet50(weights='imagenet', input_shape=input_shape)
sess = tf.keras.backend.get_session()
add_image_net_computational_nodes_in_graph(sess, model.output, image_net_config.dataset['images_classes'])
update_ops_name = [op.name for op in model.updates]
# 3. Calculates floating point accuracy
accuracy = data_pipeline.evaluate(sess)
logger.info("Original Model Top-1 accuracy = %.2f", accuracy)
# 4. Quantization Sim Model
logger.info("Starting Model QuantSim...")
# 4.1. Creates Quantization Sim model using AIMET QuantizationSimModel
# It is recommended to fold Batch-norms before making model runnable on target
BN_folded_sess, _ = aimet_bnf.fold_all_batch_norms(sess, input_op_names=['input_1'],
output_op_names=[model.output.name.split(":")[0]])
quant_sim = create_quant_sim_model(sess=BN_folded_sess,
start_op_names=['input_1'],
output_op_names=[model.output.name.split(":")[0]],
use_cuda=config.use_cuda, parity_config_file=config.parity_config_file,
evaluator=data_pipeline.evaluate)
# 4.2. Calculates and logs the accuracy of quantizer sim model
accuracy = data_pipeline.evaluate(quant_sim.session)
logger.info("Model Top-1 Accuracy on Quant Simulator = %.2f", accuracy)
logger.info("Model QuantSim Done")
# 5. Quantization Aware Training
logger.info("Starting Model QAT")
# 5.1. Trains the quantization aware model
data_pipeline.train(quant_sim.session, update_ops_name=update_ops_name)
# 5.2. Calculates and logs the accuracy of quantization aware trained model
accuracy = data_pipeline.evaluate(quant_sim.session)
logger.info("Quantization aware trained model Top-1 Accuracy on Quant Simulator = %.2f", accuracy)
# 5.3. Exports quantization aware model so it is ready to be run on-target
logger.info("Saving Quantized model graph")
quant_sim.export(path=config.logdir, filename_prefix='quantized_model')
logger.info("Quantized model graph is saved!")
logger.info("Model QAT Done")
if __name__ == '__main__':
default_logdir = os.path.join("benchmark_output", "QAT_1.0_" + datetime.now().strftime("%Y-%m-%d-%H-%M-%S"))
parser = argparse.ArgumentParser(
description='Perform Quantization aware training on pretrained ResNet50 model for ImageNet dataset')
parser.add_argument('--tfrecord_dir', type=str,
required=True,
help="Path to | |
<reponame>edawson/parliament2
"""
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.validation import DataConversionWarning
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
"""Check input parameter validation."""
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# binomial deviance requires ``n_classes == 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='bdeviance').fit(X, y),
X, [0, 0, 1, 1, 2, 2])
# multinomial deviance requires ``n_classes > 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='mdeviance').fit(X, y),
X, [0, 0, 1, 1, 1, 0])
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_classification_synthetic():
"""Test GradientBoostingClassifier on synthetic dataset used by
Hastie et al. in ESLII Example 12.7. """
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.085, \
"GB failed with error %.4f" % error_rate
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, \
"Stochastic GB failed with error %.4f" % error_rate
def test_boston():
"""Check consistency on dataset boston house prices with least squares
and least absolute deviation. """
for loss in ("ls", "lad", "huber"):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4,
min_samples_split=1, random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and mse = %.4f" % (loss, mse)
def test_iris():
"""Check consistency on dataset iris."""
for subsample in (1.0, 0.5):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
"""Test on synthetic regression datasets used in Leo Breiman,
`Bagging Predictors?. Machine Learning 24(2): 123-140 (1996). """
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability():
"""Predict probabilities."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
"""Test input checks (shape and type of X and y)."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
def test_check_inputs_predict():
"""X has wrong shape """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
"""test if max_features is valid. """
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
"""Test to make sure random state is set properly. """
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_staged_predict():
"""Test whether staged decision function eventually gives
the same prediction.
"""
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
"""Test whether staged predict proba eventually gives
the same prediction.
"""
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_serialization():
"""Check model serialization."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
"""Check if we can fit even though all targets are equal. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
"""Check if quantile loss with alpha=0.5 equals lad. """
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
"""Test with non-integer class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
"""Test with float class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
"""Test with float class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
with warnings.catch_warnings(record=True):
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
warnings.simplefilter("always", DataConversionWarning)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
"""Test with different memory layouts of X and y"""
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ | |
res = 'ערך תקין עבור בדיקת %s בין %.2f ועד %.2f, ערך מתחת %.2f נחשב חריג' % (
bloodtest_entity, bloodtest_thr2, bloodtest_max, bloodtest_thr1)
elif bloodtest_type == 3:
res = 'ערך תקין עבור בדיקת %s בין %.2f ועד %.2f' % (
bloodtest_entity, bloodtest_thr1, bloodtest_thr2)
res = res_timer(res, tracker)
dispatcher.utter_message(text="%s" % res)
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class ActionBloodtestValueQuestion(Action):
def name(self) -> Text:
return "action_nutrition_bloodtest_value"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
# db_dict = load_db_googleSheet(0x102)
db_dict = get_tables('0x102')
lut_df = db_dict['lut']
bloodtest_df = db_dict['bloodtest_vals']
user_msg = tracker.latest_message.get('text')
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in [x for x in lut_df[self.name()].values if x != 0]:
if ent['entity'] == 'integer':
val = ent['value']
else:
bloodtest_entity = ent['value']
if not val:
regex_res = re.search('האם (.*) הוא .*', user_msg.replace('?', ''))
if regex_res:
val = regex_res.group(1).strip()
if not val:
raise Exception()
feature = db_dict['lut']['Entity'][bloodtest_entity]
gender_str = "Male"
if tracker.get_slot('gender') == "זכר":
gender_str = "Male"
elif tracker.get_slot('gender') == "נקבה":
gender_str = "Female"
age = float(tracker.get_slot('age') if tracker.get_slot('age') else "40")
bloodtest_row = bloodtest_df[(bloodtest_df['Element'] == feature) & \
((bloodtest_df['Gender'] == "ANY") | (
bloodtest_df['Gender'] == gender_str)) & \
((bloodtest_df['Age min'] == "ANY") | (
bloodtest_df['Age min'].replace('ANY', -1).astype(float) <= age)) & \
((bloodtest_df['Age Max'] == "ANY") | (
bloodtest_df['Age Max'].replace('ANY', -1).astype(float) > age))]
bloodtest_type = int(bloodtest_row['Graph type'].values[0])
bloodtest_min = bloodtest_row['Min'].values[0]
bloodtest_thr1 = bloodtest_row['Threshold 1'].values[0]
bloodtest_thr2 = bloodtest_row['Threshold 2'].values[0]
bloodtest_max = bloodtest_row['Max'].values[0]
if bloodtest_type == 1:
if bloodtest_min <= float(val) <= bloodtest_thr1:
res = 'כן, זהו ערך תקין עבור בדיקת %s היות והוא נופל בטווח בין %.2f ועד %.2f. ערך מעל %.2f נחשב לחריג' % (
bloodtest_entity, bloodtest_min, bloodtest_thr1, bloodtest_thr2)
else:
res = 'לא, זהו אינו ערך תקין עבור בדיקת %s. ערך תקין הינו בטווח בין %.2f ועד %.2f. ערך מעל %.2f נחשב לחריג' % (
bloodtest_entity, bloodtest_min, bloodtest_thr1, bloodtest_thr2)
elif bloodtest_type == 2:
if bloodtest_thr2 <= float(val) <= bloodtest_max:
res = 'כן, זהו ערך תקין עבור בדיקת %s היות והוא נופל בטווח בין %.2f ועד %.2f. ערך מתחת %.2f נחשב לחריג' % (
bloodtest_entity, bloodtest_thr2, bloodtest_max, bloodtest_thr1)
else:
res = 'לא, זהו אינו ערך תקין עבור בדיקת %s. ערך תקין הינו בטווח בין %.2f ועד %.2f. ערך מתחת %.2f נחשב לחריג' % (
bloodtest_entity, bloodtest_thr2, bloodtest_max, bloodtest_thr1)
elif bloodtest_type == 3:
if bloodtest_thr1 <= float(val) <= bloodtest_thr2:
res = 'כן, זהו ערך תקין עבור בדיקת %s היות והוא נופל בטווח בין %.2f ועד %.2f' % (
bloodtest_entity, bloodtest_thr1, bloodtest_thr2)
else:
res = 'לא, זהו אינו ערך תקין עבור בדיקת %s. ערך תקין הינו בטווח בין %.2f ועד %.2f.' % (
bloodtest_entity, bloodtest_thr1, bloodtest_thr2)
else:
raise Exception()
res = res_timer(res, tracker)
dispatcher.utter_message(text="%s" % res)
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class ActionFoodSubstituteQuestion(Action):
def name(self) -> Text:
return "action_nutrition_food_substitute"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
# db_dict = load_db_googleSheet(0xc33)
db_dict = get_tables('0xc33')
db_df = db_dict['tzameret']
lut_df = db_dict['lut']
features_df = db_dict['food_units_features']
common_df = db_dict['common_food']
food_ranges_df = db_dict['food_ranges']
subs_tags_alias_df = db_dict['subs_tags_alias']
features_df = features_df.drop(index=0)
user_msg = tracker.latest_message.get('text')
food_entity = ""
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[self.name()].values:
food_entity = ent['value']
break
if food_entity == "" or food_entity is None:
prediction = tracker.latest_message
food_entity = prediction['entities'][0]['value']
tzameret_groups_lut = {}
tzameret_groups_lut['1'] = ['1', '4'] # Milk
tzameret_groups_lut['2'] = ['1', '2', '3', '4'] # Meat
tzameret_groups_lut['3'] = ['1', '2', '3', '4'] # Eggs
tzameret_groups_lut['4'] = ['1', '4'] # Dairy
tzameret_groups_lut['5'] = ['5', '6', '7', '9'] # Snacks
tzameret_groups_lut['6'] = ['5', '6', '7', '9'] # Fruits
tzameret_groups_lut['7'] = ['5', '6', '7', '9'] # Vegetables
tzameret_groups_lut['8'] = ['8', '4'] # Fat
tzameret_groups_lut['9'] = ['5', '6', '7', '9'] # Beverages
food_energy_thr = 0.05
def get_advantages(food):
advantages = []
for idx, row in food_ranges_df.iterrows():
if row["tzameret_name"] and row["tzameret_name"] in food:
if row["good_or_bad"] == "good":
value = float(food[row["tzameret_name"]])
if idx == "Protein":
threshold = 250
else:
threshold = float(row["Medium - threshold per 100gr"])
if value > threshold:
advantages.append(row["hebrew_name"])
return advantages
def get_advantages_score(food):
act = food['advantages']
ref = ast.literal_eval(food['advantages_ref'])
intersection = []
if isinstance(act, list) and isinstance(ref, list):
intersection = list(set(act) & set(ref))
return len(intersection)
food = food_entity
if food in common_df.index:
food = common_df[common_df.index == food]['shmmitzrach'][0]
food_tzameret = db_df[db_df['shmmitzrach'].str.contains(food)].iloc[0, :]
tzameret_code = int(food_tzameret['smlmitzrach'])
tzameret_code_msb = food_tzameret['smlmitzrach'][0]
food_energy = food_tzameret['food_energy']
food_features = features_df[features_df['smlmitzrach'].fillna(0).astype(int) == tzameret_code]
user_msg_feature_v = []
user_msg_feature_k = list(
set(subs_tags_alias_df.index.to_list()) & set(user_msg.replace(',', '').split(" ")))
for tag in user_msg_feature_k:
tag_df = subs_tags_alias_df[subs_tags_alias_df.index == tag]['Entity']
if tag_df.any:
user_msg_feature_v.append(tag_df.values[0])
food_filter_1 = db_df[db_df['smlmitzrach'].str[0].isin(tzameret_groups_lut[tzameret_code_msb])]
food_filter_2 = db_df[abs(db_df['food_energy'] - food_energy) / food_energy < food_energy_thr]
food_filter_1_2 = pd.merge(food_filter_1, food_filter_2, how='inner')
food_filter_1_2['smlmitzrach'] = food_filter_1_2['smlmitzrach'].astype(float)
features_df['smlmitzrach'] = features_df['smlmitzrach'].astype(float)
food_filter = features_df[features_df['smlmitzrach'].isin(food_filter_1_2['smlmitzrach'].to_list())]
food_filter = food_filter[~food_filter['Food_Name'].str.contains(food_entity)]
for tag in user_msg_feature_v:
food_filter = food_filter[food_filter[tag] == 'Yes']
food_filter = food_filter.reset_index(drop=True)
if food_features.empty:
food_filter['features_score'] = 0
else:
food_features_compact = food_features.iloc[:, 5:-4]
food_filter_compact = food_filter.iloc[:, 5:-4].reset_index(drop=True)
food_features_compact_shaped = pd.DataFrame(
np.repeat(food_features_compact.values, len(food_filter_compact), axis=0))
food_features_compact_shaped.reset_index(drop=True)
food_features_compact_shaped.columns = food_features_compact.columns
food_features_score_df = (food_filter_compact == food_features_compact_shaped).astype(int)
food_filter['features_score'] = food_features_score_df.sum(axis=1)
food_advantages = get_advantages(food_tzameret)
food_filter['advantages'] = food_filter_1_2.apply(get_advantages, axis=1)
food_filter['advantages_ref'] = str(food_advantages)
food_filter['advantages_score'] = food_filter.apply(get_advantages_score, axis=1)
food_filter = food_filter.sort_values(['features_score', 'advantages_score'], ascending=False)
res = "להלן 5 התחליפים הקרובים ביותר עבור %s" % food_entity
res += "\n"
res += '\n'.join(list(food_filter['Food_Name'].values[:5]))
res = res_timer(res, tracker)
dispatcher.utter_message(text="%s" % res)
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class ActionPersonalizationList(Action):
def name(self) -> Text:
return "action_personlization_list"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
pkl_db = './persons.pkl'
if path.exists(pkl_db):
df = pd.read_pickle(pkl_db)
dispatcher.utter_message(text="%s" % df.to_string())
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class ActionPersonalizationRemove(Action):
def name(self) -> Text:
return "action_personlization_remove"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
pkl_db = './persons.pkl'
if path.exists(pkl_db):
df = pd.read_pickle(pkl_db)
phone_slot = tracker.get_slot("phone")
if phone_slot in df.index:
df = df.drop(tracker.get_slot("phone"))
df.to_pickle(pkl_db)
dispatcher.utter_message(text="רישומך הוסר מן המערכת")
else:
dispatcher.utter_message(text="אינך מופיע במערכת, לכן אין צורך בהסרת רישום")
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class ProfileFormValidator(FormValidationAction):
"""ProfileForm Validator"""
def name(self) -> Text:
return "validate_profile_form"
# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
async def required_slots(
self,
slots_mapped_in_domain: List[Text],
dispatcher: "CollectingDispatcher",
tracker: "Tracker",
domain: "DomainDict",
) -> Optional[List[Text]]:
required_slots = ["phone", "username", "gender", "age", "weight", "height"]
return required_slots
# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
"""A dictionary to map required slots to
- an extracted entity
- intent: value pairs
- a whole message
or a list of them, where a first match will be picked"""
return {
"phone": [
self.from_entity(entity="integer", role="phone"),
self.from_entity(entity="integer"),
self.from_text(),
],
"username": [
self.from_entity(entity="name"),
self.from_text(),
],
"gender": [
self.from_entity(entity="gender"),
],
"age": [
self.from_entity(entity="integer", role="age"),
self.from_entity(entity="integer"),
self.from_text(),
],
"weight": [
self.from_entity(entity="integer", role="weight"),
self.from_entity(entity="integer"),
self.from_text(),
],
"height": [
self.from_entity(entity="integer", role="height"),
self.from_entity(entity="integer"),
self.from_text(),
],
}
# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
def validate_phone(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate phone value."""
requested_slot = tracker.get_slot("requested_slot")
phone_slot = tracker.get_slot("phone")
phone_value = None
if requested_slot == "phone":
phone_value = value.replace('-', '').replace(' ', '')
pkl_db = './persons.pkl'
if path.exists(pkl_db):
df = pd.read_pickle(pkl_db)
if phone_value in df.index:
dispatcher.utter_message(
text="פרטיך נטענו בהצלחה, ברוכים השבים %s" % df.loc[phone_value].username)
return {'phone': phone_value,
'username': df.loc[phone_value].username,
'gender': df.loc[phone_value].gender,
'age': df.loc[phone_value].age,
'weight': df.loc[phone_value].weight,
'height': df.loc[phone_value].height}
else:
df = pd.DataFrame(columns=["username", "gender", "age", "weight", "height"])
df.to_pickle(pkl_db)
elif phone_slot:
phone_value = phone_slot
return {"phone": phone_value}
# -- -- -- | |
<filename>lib/biokbase/probabilistic_annotation/DataExtractor.py
#!/usr/bin/python
#
# Call the CDMI API to get data needed to run the autorecon algorithm
# Needed data includes:
#
# For likelihood computations:
# - List of substems and the sequences of their members
# - List of OTU organisms
# - Gene neighborhoods for OTU organisms
#
# For optimization:
# - Reactions [only want mass/charge-balanced ones]
# - Metabolites
# -[Growth data? ]
# The CDMI_API is for "well-trodden paths" functions
# CDMI_EntityAPI is for ER functions (all_entities_..., get_Relationship_....)
from biokbase.cdmi.client import CDMI_API, CDMI_EntityAPI
from biokbase.probabilistic_annotation.Helpers import now
from urllib2 import URLError, HTTPError
import urllib
import sys
import operator #for itemgetter
try:
import json
except ImportError:
sys.path.append('simplejson-2.3.3')
import simplejson as json
def getFieldFromEntity(seedEntity, fieldName):
''' Get a field from a entity returned by a get_entity_XXX() or all_entities_XXX() function.
@param seedEntity Dictionary keyed by ID to a dictionary of key-value pairs
@param fieldName Name of key in dictionary of key-value pairs
@return List of values for the specified field (key) in all of the entities.
'''
if seedEntity is None:
sys.stderr.write("INTERNAL ERROR: Provided seedEntity was None - usually this means you were searching for something that doesnt exist in the database\n")
raise ValueError
# Check for an error I seem to make all the time and yell at me in a USEFUL way
if not isinstance(seedEntity, dict):
sys.stderr.write("INTERNAL ERROR: getFieldFromEntity expects a dictionary - perhaps you meant to call getFieldFromRelationship?\n")
raise ValueError
f = []
for entry in seedEntity:
if fieldName not in seedEntity[entry]:
sys.stderr.write("INTERNAL ERROR: Field name %s not found in provided entity\n" %(fieldName))
raise ValueError
f.append(seedEntity[entry][fieldName])
return f
def getFieldFromRelationship(seedRelationship, fieldName, objtype):
''' Get a field from an object returned by a get_relationship_XXX() function.
The get_relationship_XXX() functions return lists of lists.
The first list is a list of all the links
The second list has three dictionaries in it: the TO dictionary, the REL dictionary
and the FROM dictionary describing properties on either end of the link and of the link itself.
If you want to maintain duplicate relationships (many-to-one, one-to-many, many-to-many),
this function should be called at least twice (once on each end of the relationship, or once
on an end and once in the middle).
@param seedRelationship Output from a get_relationship_XXX() function.
@param fieldName Field to extract from the object
@param objtype Type of object, "TO", "REL", or "FROM"
@return List (in the same order as the list from the get_relationship function)
of the values with the specified field name
'''
if seedRelationship is None:
sys.stderr.write("INTERNAL ERROR: The provided relationship was None - usually this means you were searching for something that doesn't exist in the database.\n")
raise ValueError
objidx = None
if objtype.lower() == "from":
objidx = 0
elif objtype.lower() == "rel":
objidx = 1
elif objtype.lower() == "to":
objidx = 2
else:
sys.stderr.write("INTERNAL ERROR: In getFieldFromRelationship - objtype must be TO, REL, or FROM\n")
raise ValueError
if not isinstance(seedRelationship, list):
sys.stderr.write("INTERNAL ERROR: getFieldFromRelationship expects a list - perhaps you meant to call getFieldFromEntity?\n")
raise ValueError
# Unravel
f = []
for entry in seedRelationship:
# TO CHECK: Is it possible to have one of the links lead to nothing?
# Check field name validity - if it links to something there has to be the data request there
# or else something is wrong.
if fieldName not in entry[objidx]:
sys.stderr.write("INTERNAL ERROR: Field name %s not found in provided relationship\n" %(fieldName))
raise ValueError
f.append(entry[objidx][fieldName])
return f
def subsystemFids(count, config):
''' Query the CDMI for a list of feature IDs in the subsystems.
@param count Number of entities to retrieve in each function call
@param config Dictionary of configuration variables
@return List of subsystem feature IDs
'''
cdmi = CDMI_API(config["cdmi_url"])
cdmi_entity = CDMI_EntityAPI(config["cdmi_url"])
# Get the genes that are in subsystems and in OTUs.
ssdict = dict()
start = 0
done = False
while not done:
subdict = cdmi_entity.all_entities_Subsystem(start, count, ["id"])
ssdict.update(subdict)
start += count
if len(subdict) < count:
done = True
ssids = getFieldFromEntity(ssdict, "id")
sys.stderr.write('Found %d subsystems\n' %(len(ssids)))
# Now lets get a list of FIDs within those subsystems
# Break the complete list into smaller sub-lists to avoid timeouts
start = 0
increment = 10
end = start + increment
counter = len(ssids)
ssfids = []
while counter > 0:
try:
ssfiddict = cdmi.subsystems_to_fids(ssids[start:end], [])
except HTTPError as e:
if increment > 1:
increment = increment / 2
end = start + increment
sys.stderr.write("caught '%s' error, increment is now %d\n" %(e.reason, increment))
continue
for key in ssfiddict:
for ssfid in ssfiddict[key]:
ls = ssfiddict[key][ssfid]
for arr in ls:
if len(arr) > 1:
gl = arr[1]
for l in gl:
ssfids.append(l)
# Move to next sub-list
start += increment
end += increment
if end >= len(ssids):
end = len(ssids)
counter -= increment
# Uniquify!
return list(set(ssfids))
def getDlitFids(count, config):
''' Query the CDMI for a list of feature IDs with direct literature evidence (dlits).
@param count Number of entities to retrieve in each function call
@param config Dictionary of configuration variables
@return List of literature feature IDs
'''
cdmi = CDMI_API(config["cdmi_url"])
cdmi_entity = CDMI_EntityAPI(config["cdmi_url"])
pubdict = dict()
start = 0
done = False
while not done:
subdict = cdmi_entity.all_entities_Publication(start, count, ["id"])
pubdict.update(subdict)
start += count
if len(subdict) < count:
done = True
pubids = getFieldFromEntity(pubdict, "id")
sys.stderr.write("Found %d publication IDs\n" %(len(pubids)))
pub2seq = cdmi_entity.get_relationship_Concerns(pubids, [], [], ["id"])
pubseqs = getFieldFromRelationship(pub2seq, "id", "to")
sys.stderr.write("Found %d protein sequences from publications\n" %(len(pubseqs)))
seq2fids = cdmi_entity.get_relationship_IsProteinFor(pubseqs, [], [], ["id"])
fids = getFieldFromRelationship(seq2fids, "id", "to")
return fids
def filterFidsByOtus(fidlist, otus, config):
'''
Obsolete (I think this isn't used any more)
Given a list of representative organism IDs (OTUs) and a list of
FIDs, returns only those FIDs found in an OTU.'''
cdmi_entity = CDMI_EntityAPI(config["cdmi_url"])
# Identify the organism belonging to each fid
# If this fails to find an organism we don't want it anyway...
orgdict = cdmi_entity.get_relationship_IsOwnedBy(fidlist, [], [], ["id"])
flist = getFieldFromRelationship(orgdict, "from_link", "rel")
olist = getFieldFromRelationship(orgdict, "id", "to")
fids = []
for ii in range(len(olist)):
if olist[ii] in otus:
fids.append(flist[ii])
return fids
def filterFidsByOtusBetter(fidsToRoles, rolesToFids, oturepsToMembers, config):
'''Attempt to do a more intelligent filtering of FIDs by OTU.
Given all FIDs attached to a role in the unfiltered set we do the following:
Initialize KEEP
For each OTU and each role:
If role is found in the representative, add to KEEP and continue;
Otherwise, iterate over other genomes.
If role is found in one other genome, add to KEEP and continue;
This process should make our calculation less sensitive to the choice of OTUs...
'''
cdmi_entity = CDMI_EntityAPI(config["cdmi_url"])
# Identify the organism belonging to each fid
# If this fails to find an organism we don't want it anyway...
fidlist = fidsToRoles.keys()
orgdict = []
# Break the complete list into smaller sub-lists to avoid timeouts
start = 0
increment = 5000
end = start + increment
counter = len(fidlist)
while counter > 0:
try:
od = cdmi_entity.get_relationship_IsOwnedBy(fidlist[start:end], [], [], ["id"])
except HTTPError as e:
if increment > 1:
increment = increment / 2
end = start + increment
sys.stderr.write("caught '%s' error, increment is now %d\n" %(e.reason, increment))
continue
orgdict.extend(od)
start += increment
end += increment
if end >= len(fidlist):
end = len(fidlist)
counter -= increment
fidlist = getFieldFromRelationship(orgdict, "from_link", "rel")
orglist = getFieldFromRelationship(orgdict, "id", "to")
fidToOrg = {}
for ii in range(len(fidlist)):
fidToOrg[fidlist[ii]] = orglist[ii]
keptFidsToRoles = {}
keptRolesToFids = {}
# If the OTUs are comprehensive this should be empty.
missingRoles = []
# For each OTU
for oturep in oturepsToMembers:
# for each role
for role in rolesToFids:
fidlist = rolesToFids[role]
keepFid = None
keepRole = None
for fid in fidlist:
# This can happen due to MOL issues
if fid not in fidToOrg:
continue
org = fidToOrg[fid]
# If the organism is the representative we keep it and go to the next role
if org == oturep:
keepFid = fid
keepRole = role
break
# Otherwise look at the rest of the list (note that | |
is xr.core.dataarray.DataArray:
th_v.name = 'th_v_monc'
return th_v
def q_total(*args):
"""
Specfic Total Water Content.
Derived variable name: q_total
Parameters
----------
args : list of numpy array or xarray DataArrays
specific water variables (e.g.[ q_v, q_cl]).
Returns
-------
q_total : numpy array or xarray DataArray
sum of args
"""
qt = None
for a in args:
if a is not None:
if qt is not None:
qt += a
else:
qt = a
if type(qt) is xr.core.dataarray.DataArray:
qt.name = 'q_total'
return qt
def buoyancy(th_v):
"""
Buoyancy from theta_v.
Derived variable name: buoyancy
Parameters
----------
th_v : numpy array or xarray DataArray
Virtual potential temperature.
Returns
-------
buoyancy : numpy array or xarray DataArray
"""
xdname = [a for a in th_v.dims if a.startswith('x')][0]
ydname = [a for a in th_v.dims if a.startswith('y')][0]
mean_thv = th_v.mean(dim=(xdname, ydname))
b = tc.grav * (th_v - mean_thv)/mean_thv
if type(b) is xr.core.dataarray.DataArray:
b.name = 'buoyancy'
return b
def buoyancy_monc(th_v, thref):
"""
Buoyancy from theta_v.
Derived variable name: buoyancy_monc
MONC approximation.
Parameters
----------
th_v : numpy array or xarray DataArray
Virtual potential temperature.
Returns
-------
buoyancy : numpy array or xarray DataArray
"""
b = tc.grav * (th_v - thref)/thref
if type(b) is xr.core.dataarray.DataArray:
b.name = 'buoyancy_monc'
return b
def dbdz(th, p, q_v, q_cl, z, zn):
"""
Vertical Gradient of Buoyancy from theta_v.
Derived variable name: dbdz
Parameters
----------
theta : numpy array or xarray DataArray
Potential Temperature. (K)
thref : numpy array or xarray DataArray
Reference Potential Temperature (usually 1D). (K)
p : numpy array or xarray DataArray
Pressure (Pa).
q_v : numpy array or xarray DataArray
specific humidity
q_cl : numpy array or xarray DataArray
specific cloud liquid water content.
th_v : numpy array or xarray DataArray
Virtual potential temperature.
Returns
-------
dbuoyancy/dz : numpy array or xarray DataArray
"""
th_v = virtual_potential_temperature(th, q_v, q_cl)
b = buoyancy(th_v)
dbdz = do.d_by_dz_field(b, z, zn, grid= 'w')
if type(dbdz) is xr.core.dataarray.DataArray:
dbdz.name = 'dbdz'
return dbdz
def dbdz_monc(th, thref, p, q_v, q_cl, z, zn):
"""
Vertical Gradient of Buoyancy from theta_v.
Derived variable name: db_moncdz
MONC approximation
Parameters
----------
theta : numpy array or xarray DataArray
Potential Temperature. (K)
thref : numpy array or xarray DataArray
Reference Potential Temperature (usually 1D). (K)
p : numpy array or xarray DataArray
Pressure (Pa).
q_v : numpy array or xarray DataArray
specific humidity
q_cl : numpy array or xarray DataArray
specific cloud liquid water content.
th_v : numpy array or xarray DataArray
Virtual potential temperature.
Returns
-------
buoyancy : numpy array or xarray DataArray
"""
th_v = virtual_potential_temperature_monc(th, thref, q_v, q_cl)
b = buoyancy_monc(th_v, thref)
dbdz = do.d_by_dz_field(b, z, zn, grid= 'w')
if type(dbdz) is xr.core.dataarray.DataArray:
dbdz.name = 'db_moncdz'
return dbdz
def rh(T, p, q):
"""
Relative Humidity.
Derived variable name: rh
Parameters
----------
T: numpy array or xarray DataArray
Temperature (K).
p: numpy array or xarray DataArray
Pressure (Pa).
q: numpy array or xarray DataArray
specific humidity (kg/kg)
Returns
-------
rh: numpy array or xarray DataArray
Relative Humidity.
"""
rh=T.copy(deep=True)
# calculate vapour pressure.
e = np.clip( q * p/( tc.epsilon + q), 1e-10, None)
es = esat(T)
rh = np.clip(e / es, 0, 1)
if type(rh) is xr.core.dataarray.DataArray:
rh.name = 'rh'
return rh
def rh_ice(T, p, q):
"""
Relative Humidity wrt Ice.
Derived variable name: rh_ice
Parameters
----------
T: numpy array or xarray DataArray
Temperature.
p: numpy array or xarray DataArray
Pressure (Pa).
q: numpy array or xarray DataArray
specific humidity (kg/kg)
Returns
-------
rh: numpy array or xarray DataArray
Relative Humidity.
"""
rh=T.copy(deep=True)
# calculate vapour pressure.
e = np.clip( q * p/( tc.epsilon + q), 1e-10, None)
es = esat_ice(T)
rh = np.clip(e / es, 0, 1)
if type(rh) is xr.core.dataarray.DataArray:
rh.name = 'rh_ice'
return rh
def a_L_monc(T, p):
"""
Cloud Factor.
Derived variable name: a_L
Parameters
----------
T: numpy array or xarray DataArray
Temperature.
p: numpy array or xarray DataArray
Pressure (Pa).
Returns
-------
a_L: numpy array or xarray DataArray
Factor used in calculating liqid water content.
"""
alpha_L = dqsatbydT(T, p)
a_L = 1.0 / (1.0 + tc.L_over_cp * alpha_L)
if type(a_L) is xr.core.dataarray.DataArray:
a_L.name = 'a_L'
return a_L
def cloud_params_monc(th_ref, p_ref):
"""
Cloud Parameters.
MONC Approximation
Parameters
----------
th_ref: numpy array or xarray DataArray
Reference Potential Temperature.
p_ref: numpy array or xarray DataArray
Reference Pressure (Pa).
Returns
-------
dict
Factors used in calculating liqid water content.
"T_ref","pi_ref", "qs_ref", "a_L", "alpha_L".
"""
pi_ref = exner(p_ref)
T_ref = th_ref * pi_ref
qs_ref = qsat(T_ref, p_ref)
alpha_L = dqsatbydT(T_ref, p_ref)
a_L = 1.0 / (1.0 + tc.L_over_cp * alpha_L)
output_dir = {
"T_ref": T_ref,
"pi_ref": pi_ref,
"qs_ref": qs_ref,
"a_L": a_L,
"alpha_L": alpha_L,
}
return output_dir
def betas_monc(th, p):
"""
Beta factors in cloudy buoyancy calculation.
Parameters
----------
th : numpy array or xarray DataArray
Potential Temperature. (K)
p : numpy array or xarray DataArray
Pressure (Pa).
Returns
-------
tuple of numpy array or xarray DataArrays
(bt, bq, bc, alpha_L, a_L).
"""
betas = cloud_params_monc(th, p)
betas["bt"] = 1/th
betas["bq"] = 1/tc.epsilon -1
betas["bc"] = betas["a_L"] * (latheat(betas["T_ref"], model=1)
/ (tc.cp_air * betas["T_ref"])
- 1/tc.epsilon)
return betas
def buoyancy_moist(th, th_ref, p, q_v, q_cl, thresh = 1.0e-5):
"""
Buoyancy including cloud condensation.
Derived variable name: buoyancy_moist
MONC approximation
Parameters
----------
th : numpy array or xarray DataArray
Potential Temperature. (K)
thref : numpy array or xarray DataArray
Reference Potential Temperature (usually 1D). (K)
p : numpy array or xarray DataArray
Pressure (Pa).
q_v : numpy array or xarray DataArray
specific humidity
q_cl : numpy array or xarray DataArray
specific cloud liquid water content.
Returns
-------
buoyancy : numpy array or xarray DataArray
"""
betas = betas_monc(th_ref, p)
th_L = liquid_water_potential_temperature(th, q_cl, betas["pi_ref"])
qt = q_total(q_v, q_cl)
delta_q = (qt - betas["qs_ref"] - betas["alpha_L"] * betas["pi_ref"]
* (th_L - th_ref))
b_dry = th_L * betas["bt"] -1 + qt * betas["bq"]
if type(th) is xr.core.dataarray.DataArray:
bc_delta_q = xr.where(delta_q >= thresh, delta_q * betas["bc"], 0)
else:
bc_delta_q = np.zeros_like(delta_q)
iwet = delta_q >= thresh
bc_delta_q[iwet] = ( delta_q * betas["bc"])[iwet]
b_wet = b_dry + bc_delta_q
b = tc.g * b_wet
if type(b) is xr.core.dataarray.DataArray:
b.name = 'buoyancy_moist'
return b
def dmoist_bdz(th, th_ref, p, q_v, q_cl, z, zn, thresh = 1.0e-5):
"""
Vertical Gradient of (buoyancy including cloud condensation).
Derived variable name: dmoist_bdz
MONC approximation.
This is db/dz with b = beta_t theta_l + beta_q q_t.
Note - not to be used for vertical buoyancy flux.
Parameters
----------
th : numpy array or xarray DataArray
Potential Temperature. (K)
th_ref : numpy array or xarray DataArray
Reference Potential Temperature (usually 1D). (K)
p : numpy array or xarray DataArray
Pressure (Pa).
q_v : numpy array or xarray DataArray
specific humidity
q_cl : numpy array or xarray DataArray
specific cloud liquid water content.
z : xarray coord.
zn : xarray coord.
thresh : (Optional) float. Default is 1E-5.
Threshold for cloud water.
Returns
-------
buoyancy : numpy array or xarray DataArray
"""
b = buoyancy_moist(th, th_ref, p, q_v, q_cl, thresh = thresh)
dbdz = do.d_by_dz_field(b, z, zn, grid= 'w')
if type(dbdz) is xr.core.dataarray.DataArray:
dbdz.name = 'dmoist_bdz'
return dbdz
def moist_dbdz(th, th_ref, p_ref, q_v, q_cl, z, zn, thresh = 1.0e-5):
"""
Vertical Gradient of buoyancy (including cloud condensation).
Derived variable name: moist_dbdz
MONC approximation
This is db/dz = beta_t dtheta_l/dz + beta_q dq_t/dz.
Note - to be used for vertical buoyancy flux.
Parameters
----------
theta : numpy array or xarray DataArray
Potential Temperature. (K)
thref : numpy array or xarray DataArray
Reference Potential Temperature (usually 1D). (K)
p : numpy array or xarray DataArray
Pressure (Pa).
q_v : numpy array or xarray DataArray
specific humidity
q_cl : numpy array or xarray DataArray
specific cloud liquid water content.
z : xarray coord.
zn : xarray coord.
thresh : (Optional) float. Default is 1E-5.
Threshold for cloud water.
Returns
-------
buoyancy : numpy array or xarray DataArray
"""
betas = betas_monc(th_ref, p_ref)
th_L = liquid_water_potential_temperature(th, q_cl, betas["pi_ref"])
qt = q_total(q_v, q_cl)
delta_q | |
<reponame>Open-Earth-Foundation/traction
"""
Aries Cloud Agent
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v0.7.2
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from acapy_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel,
)
from acapy_client.exceptions import ApiAttributeError
def lazy_import():
from acapy_client.model.credential_offer import CredentialOffer
from acapy_client.model.credential_proposal import CredentialProposal
from acapy_client.model.indy_cred_abstract import IndyCredAbstract
from acapy_client.model.indy_cred_info import IndyCredInfo
from acapy_client.model.indy_cred_request import IndyCredRequest
from acapy_client.model.indy_credential import IndyCredential
globals()["CredentialOffer"] = CredentialOffer
globals()["CredentialProposal"] = CredentialProposal
globals()["IndyCredAbstract"] = IndyCredAbstract
globals()["IndyCredInfo"] = IndyCredInfo
globals()["IndyCredRequest"] = IndyCredRequest
globals()["IndyCredential"] = IndyCredential
class V10CredentialExchange(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("initiator",): {
"SELF": "self",
"EXTERNAL": "external",
},
("role",): {
"HOLDER": "holder",
"ISSUER": "issuer",
},
}
validations = {
("created_at",): {
"regex": {
"pattern": r"^\d{4}-\d\d-\d\d[T ]\d\d:\d\d(?:\:(?:\d\d(?:\.\d{1,6})?))?(?:[+-]\d\d:?\d\d|Z|)$", # noqa: E501
},
},
("credential_definition_id",): {
"regex": {
"pattern": r"^([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}):3:CL:(([1-9][0-9]*)|([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}:2:.+:[0-9.]+)):(.+)?$", # noqa: E501
},
},
("schema_id",): {
"regex": {
"pattern": r"^[123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}:2:.+:[0-9.]+$", # noqa: E501
},
},
("updated_at",): {
"regex": {
"pattern": r"^\d{4}-\d\d-\d\d[T ]\d\d:\d\d(?:\:(?:\d\d(?:\.\d{1,6})?))?(?:[+-]\d\d:?\d\d|Z|)$", # noqa: E501
},
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"auto_issue": (bool,), # noqa: E501
"auto_offer": (bool,), # noqa: E501
"auto_remove": (bool,), # noqa: E501
"connection_id": (str,), # noqa: E501
"created_at": (str,), # noqa: E501
"credential": (
{str: (bool, date, datetime, dict, float, int, list, str, none_type)},
), # noqa: E501
"credential_definition_id": (str,), # noqa: E501
"credential_exchange_id": (str,), # noqa: E501
"credential_id": (str,), # noqa: E501
"credential_offer": (
{str: (bool, date, datetime, dict, float, int, list, str, none_type)},
), # noqa: E501
"credential_offer_dict": (
{str: (bool, date, datetime, dict, float, int, list, str, none_type)},
), # noqa: E501
"credential_proposal_dict": (
{str: (bool, date, datetime, dict, float, int, list, str, none_type)},
), # noqa: E501
"credential_request": (
{str: (bool, date, datetime, dict, float, int, list, str, none_type)},
), # noqa: E501
"credential_request_metadata": (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
), # noqa: E501
"error_msg": (str,), # noqa: E501
"initiator": (str,), # noqa: E501
"parent_thread_id": (str,), # noqa: E501
"raw_credential": (
{str: (bool, date, datetime, dict, float, int, list, str, none_type)},
), # noqa: E501
"revoc_reg_id": (str,), # noqa: E501
"revocation_id": (str,), # noqa: E501
"role": (str,), # noqa: E501
"schema_id": (str,), # noqa: E501
"state": (str,), # noqa: E501
"thread_id": (str,), # noqa: E501
"trace": (bool,), # noqa: E501
"updated_at": (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"auto_issue": "auto_issue", # noqa: E501
"auto_offer": "auto_offer", # noqa: E501
"auto_remove": "auto_remove", # noqa: E501
"connection_id": "connection_id", # noqa: E501
"created_at": "created_at", # noqa: E501
"credential": "credential", # noqa: E501
"credential_definition_id": "credential_definition_id", # noqa: E501
"credential_exchange_id": "credential_exchange_id", # noqa: E501
"credential_id": "credential_id", # noqa: E501
"credential_offer": "credential_offer", # noqa: E501
"credential_offer_dict": "credential_offer_dict", # noqa: E501
"credential_proposal_dict": "credential_proposal_dict", # noqa: E501
"credential_request": "credential_request", # noqa: E501
"credential_request_metadata": "credential_request_metadata", # noqa: E501
"error_msg": "error_msg", # noqa: E501
"initiator": "initiator", # noqa: E501
"parent_thread_id": "parent_thread_id", # noqa: E501
"raw_credential": "raw_credential", # noqa: E501
"revoc_reg_id": "revoc_reg_id", # noqa: E501
"revocation_id": "revocation_id", # noqa: E501
"role": "role", # noqa: E501
"schema_id": "schema_id", # noqa: E501
"state": "state", # noqa: E501
"thread_id": "thread_id", # noqa: E501
"trace": "trace", # noqa: E501
"updated_at": "updated_at", # noqa: E501
}
read_only_vars = {}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""V10CredentialExchange - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
auto_issue (bool): Issuer choice to issue to request in this credential exchange. [optional] # noqa: E501
auto_offer (bool): Holder choice to accept offer in this credential exchange. [optional] # noqa: E501
auto_remove (bool): Issuer choice to remove this credential exchange record when complete. [optional] # noqa: E501
connection_id (str): Connection identifier. [optional] # noqa: E501
created_at (str): Time of record creation. [optional] # noqa: E501
credential ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): Credential as stored. [optional] # noqa: E501
credential_definition_id (str): Credential definition identifier. [optional] # noqa: E501
credential_exchange_id (str): Credential exchange identifier. [optional] # noqa: E501
credential_id (str): Credential identifier. [optional] # noqa: E501
credential_offer ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): (Indy) credential offer. [optional] # noqa: E501
credential_offer_dict ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): Credential offer message. [optional] # noqa: E501
credential_proposal_dict ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): Credential proposal message. [optional] # noqa: E501
credential_request ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): (Indy) credential request. [optional] # noqa: E501
credential_request_metadata (bool, date, datetime, dict, float, int, list, str, none_type): (Indy) credential request metadata. [optional] # noqa: E501
error_msg (str): Error message. [optional] # noqa: E501
initiator (str): Issue-credential exchange initiator: self or external. [optional] # noqa: E501
parent_thread_id (str): Parent thread identifier. [optional] # noqa: E501
raw_credential ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): Credential as received, prior to storage in holder wallet. [optional] # noqa: E501
revoc_reg_id (str): Revocation registry identifier. [optional] # noqa: E501
revocation_id (str): Credential identifier within revocation registry. [optional] # noqa: E501
role (str): Issue-credential exchange role: holder or issuer. [optional] # noqa: E501
schema_id (str): Schema identifier. [optional] # noqa: E501
state (str): | |
# MIT License
#
# Copyright (c) 2019 LABSS(<NAME>, <NAME>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
import numpy as np
import numba
import typing
from ast import literal_eval
if typing.TYPE_CHECKING:
from .entities import Person
from typing import List, Set, Dict, Union, Callable, Any, Tuple
import pandas as pd
import numpy
free_parameters = ["migration_on", "initial_agents", "num_ticks", "intervention",
"max_accomplice_radius", "number_arrests_per_year",
"number_crimes_yearly_per10k", "ticks_between_intervention",
"intervention_start", "intervention_end","num_oc_persons",
"num_oc_families", "education_modifier", "retirement_age",
"unemployment_multiplier", "nat_propensity_m",
"nat_propensity_sigma", "nat_propensity_threshold",
"facilitator_repression", "facilitator_repression_multiplier",
"likelihood_of_facilitators", "targets_addressed_percent",
"threshold_use_facilitators", "oc_embeddedness_radius",
"oc_boss_repression", "punishment_length", "constant_population"]
interventions_type = ["facilitators-strong", "facilitators", "students-strong", "students",
"disruptive-strong", "disruptive", "preventive-strong", "preventive",
"baseline"]
def find_neighb(netname: str, togo: int, found: Set, border: Set[Person]) -> Union[Set, Any]:
"""
Find the nearest agent in @netname within range @togo, return a Set of agents
:param netname: str, network name to search
:param togo: int, search range
:param found: Set,
:param border: Set[Person]
:return: Set,
"""
found = found | border
if togo == 0:
return found
#print_id(set().union(*[x.neighbors.get(netname) for x in border]))
nextlayer = set().union(*[x.neighbors.get(netname) for x in border]) - found
if not nextlayer:
return found
else:
togo -= 1
return find_neighb(netname, togo, found, nextlayer)
def wedding_proximity_with(ego: Person, pool: List[Person]) -> np.ndarray:
"""
Given an agent and a pool of agents this function returns a list of proximities with ego. Careful not to shuffle it!
:param ego: Person
:param pool: list of Person objects
:return: list, list of proximities with ego.
"""
proximity = np.array([(ego.social_proximity(x) + (4 - abs(x.hobby - ego.hobby)) / 4) / 2
for x in pool])
if all([True for n in proximity if n <= 0]):
proximity = np.ones(len(proximity))
proximity /= np.sum(proximity)
return proximity
def at_most(agentset: Union[List[Person], Set[Person]], n: int, rng_istance: numpy.random.default_rng) -> Union[List[Person], Set[Person]]:
"""
Given an @agentset and an integer @n, this function returns the initial @agentset if there are
less than @n agents, a subset of those agents of length @n if there are more than @n agents.
:param agentset: Union[List[Person], Set[Person]]
:param n: int
:param rng_istance: numpy.random.default_rng
:return: Union[List[Person], Set[Person]]
"""
if len(agentset) < n:
return agentset
else:
return list(rng_istance.choice(agentset, n, replace=False))
def weighted_n_of(n: int, agentset: Union[List[Person], Set[Person]],
weight_function: Callable, rng_istance: numpy.random.default_rng) -> List[Person]:
"""
Given a set or List of agents @agentset an integer @n and a lambda function @weight_function.
This function performs a weighted extraction, without replacing based on the lambda function.
This procedure takes into account negative numbers and weights equal to zero.
:param n: int
:param agentset: Union[List[Person], Set[Person]]
:param weight_function: Callable
:param rng_istance: numpy.random.default_rng
:return: List[Person]
"""
p = [float(weight_function(x)) for x in agentset]
for pi in p:
if pi < 0:
min_value = np.min(p)
p = [i - min_value for i in p]
break
sump = sum(p)
#if there are more zeros than n required in p
if np.count_nonzero(p) < n:
n = np.count_nonzero(p)
#If there are only zeros
if sump == 0:
p = None
else:
p = [i/sump for i in p]
#If the type is wrong
if type(agentset) != list:
agentset = list(agentset)
return rng_istance.choice(agentset, int(n), replace=False, p=p)
def weighted_one_of(agentset: Union[List[Person], Set[Person]],
weight_function: Callable, rng_istance: numpy.random.default_rng) -> Any:
return weighted_n_of(1, agentset, weight_function, rng_istance)[0]
def pick_from_pair_list(a_list_of_pairs: Union[List, np.ndarray],
rng_istance: numpy.random.default_rng) -> Any:
"""
given a list of pairs, containing an object and a probability (e.g. [[object, p],[object, p]])
return an object based on the probability(p)
:param a_list_of_pairs:list, a list of pairs (e.g. [[object, p],[object, p]])
:param rng_istance: numpy.random instance,
:return: object
"""
return weighted_one_of(a_list_of_pairs, lambda x: x[-1], rng_istance)[0]
def df_to_dict(df: pd.DataFrame, extra_depth: bool = False) -> Dict:
"""
Based on the number of pandas DataFrame columns, transforms the dataframe into nested dictionaries as follows:
df-columns = age, sex, education, p --> dict-keys = {age:{sex:[education, p]}}
If extra_depth is True the transformation has an extra level of depth as follows:
df-columns = age, sex, education, p --> dict-keys = {age:{sex:{education: p}}}
This transformation ensures a faster access to the values using the dictionary keys.
:param df: pd.DataFrame, the df to be transformed
:param extra_depth: bool, if True gives an extra level of depth
:return: Dict, a new dictionary
"""
dic = dict()
extra_depth_modifier = 0
if extra_depth:
extra_depth_modifier = 1
if len(df.columns) + extra_depth_modifier == 2:
for col in np.unique(df.iloc[:, 0]):
dic[col] = df[df.iloc[:, 0] == col].iloc[:, 1].values
if len(df.columns) + extra_depth_modifier == 3:
for col in np.unique(df.iloc[:, 0]):
dic[col] = df[df.iloc[:, 0] == col].iloc[:, 1:].values
if len(df.columns) + extra_depth_modifier == 4:
for col in np.unique(df.iloc[:, 0]):
dic[col] = df[df.iloc[:, 0] == col].iloc[:, 1:]
for key in dic:
subdic = dict()
for subcol in np.unique(dic[key].iloc[:, 0]):
if extra_depth:
subdic[subcol] = dic[key][dic[key].iloc[:, 0] == subcol].iloc[:, 1:].values[0][0]
else:
subdic[subcol] = dic[key][dic[key].iloc[:, 0] == subcol].iloc[:, 1:].values
dic[key] = subdic
return dic
def decide_conn_number(agents: Union[List, Set], max_lim: int, also_me: bool = True) -> int:
"""
Given a set of agents decides the number of connections to be created between them based on a maximum number.
:param agents: Union[List, Set], agents
:param max_lim: int, an arbitrary maximum number
:param also_me: bool, include caller
:return: max_lim if the agents are more than max_lim otherwise returns the number of agents minus one.
"""
if len(agents) <= max_lim:
return len(agents) - 1 if also_me else len(agents)
else:
return max_lim
def df_to_lists(df: pd.DataFrame, split_row: bool = True) -> List:
"""
This function transforms a pandas DataFrame into nested lists as follows:
df-columns = age, sex, education, p --> list = [[age,sex],[education,p]]
This transformation ensures a faster access to the values using the position in the list
:param df: pandas df, the df to be transformed
:param split_row: bool, default = True
:return: list, a new list
"""
output_list = list()
if split_row:
temp_list = df.iloc[:, :2].values.tolist()
for index, row in df.iterrows():
output_list.append([temp_list[index], [row.iloc[2], row.iloc[3]]])
else:
output_list = df.values.tolist()
return output_list
def calculate_oc_status(co_offenders: List[Person]) -> None:
"""
This procedure modify in-place the arrest_weigh attribute of the Person objects passed to co_offenders
:param co_offenders: list, of Person object
:return: None
"""
for agent in co_offenders:
agent.arrest_weight = agent.calculate_oc_member_position()
min_score = np.min([agent.arrest_weight for agent in co_offenders])
divide_score = np.mean([agent.arrest_weight - min_score for agent in co_offenders])
for agent in co_offenders:
if divide_score > 0:
agent.arrest_weight = (agent.arrest_weight - min_score) / divide_score
else:
agent.arrest_weight = 1
def generate_collector_dicts(collect_agents) -> Union[Tuple[Dict, Dict], Dict]:
"""
This returns two dictionaries consisting of as many key/value pairs as the elements
contained within the @model_reporters, @agent_reporters parameters.
:return: Tuple[Dict, Dict]
"""
model_reporters = ["seed", "family_intervention", 'social_support', 'welfare_support',
'this_is_a_big_crime', 'good_guy_threshold', 'number_deceased',
'facilitator_fails', 'facilitator_crimes', 'crime_size_fails',
'number_born', 'number_migrants', 'number_weddings',
'number_weddings_mean', 'number_law_interventions_this_tick',
'correction_for_non_facilitators', 'number_protected_recruited_this_tick',
'people_jailed', 'number_offspring_recruited_this_tick', 'number_crimes',
'crime_multiplier', 'kids_intervention_counter', 'big_crime_from_small_fish',
'arrest_rate', 'migration_on', 'initial_agents', 'intervention',
'max_accomplice_radius', 'number_arrests_per_year', 'ticks_per_year',
'num_ticks', 'tick', 'ticks_between_intervention', 'intervention_start',
'intervention_end', 'num_oc_persons', 'num_oc_families',
'education_modifier', 'retirement_age', 'unemployment_multiplier',
'nat_propensity_m', 'nat_propensity_sigma', 'nat_propensity_threshold',
'facilitator_repression', 'facilitator_repression_multiplier',
'percentage_of_facilitators', 'targets_addressed_percent',
'threshold_use_facilitators', 'oc_embeddedness_radius',
'oc_boss_repression', 'punishment_length',
'constant_population', "number_crimes_yearly_per10k",
"number_crimes_committed_of_persons", "current_oc_members",
"current_num_persons", 'criminal_tendency_mean', 'criminal_tencency_sd',
'age_mean', 'age_sd', 'education_level_mean', 'education_level_sd',
'num_crime_committed_mean', 'num_crime_committed_sd',
"crimes_committed_by_oc_this_tick", "current_prisoners", "employed",
"facilitators", "tot_friendship_link", "tot_household_link",
"tot_partner_link", "tot_offspring_link", "tot_criminal_link",
"tot_school_link", "tot_professional_link", "tot_sibling_link",
"tot_parent_link", "number_students", "number_jobs",
"likelihood_of_facilitators"]
agent_reporters = ['unique_id', 'gender_is_male', 'prisoner', 'age', 'sentence_countdown',
'num_crimes_committed', 'num_crimes_committed_this_tick',
'education_level', 'max_education_level', 'wealth_level',
'job_level', 'propensity', 'oc_member', 'retired', 'number_of_children',
'facilitator', 'hobby', 'new_recruit', 'migrant', 'criminal_tendency',
'target_of_intervention', "cached_oc_embeddedness", 'sibling',
'offspring', 'parent', 'partner', 'household', 'friendship',
'criminal', 'professional', | |
value?"):
messagebox.showinfo("Done", "Success")
aai_upperRateLimitEntry = temp
self.aaiUpperRateLimitValue.config(text="Current Value: " + aai_upperRateLimitEntry)
db.execute("UPDATE aai SET aai_upperRateLimitEntry = ? WHERE (user = ?)", (aai_upperRateLimitEntry, currentuser))
db.commit()
except:
messagebox.showinfo("Error","Please enter a valid value")
pass
#aaiAtrialAmplitude
if(value == "aaiAtrialAmplitude"):
temp = self.aaiAtrialAmplitudeEntry .get()
#Try/access to sanitize user input and ask for confirmation if there are no errors
try:
float(temp)
if (temp == '' or float(temp)<0):
messagebox.showinfo("Error","Please enter a valid value")
pass
#Ensure value is in limited range
elif(float(temp) < 0 or float(temp) > 7.0):
messagebox.showinfo("Error","The range is between 0(off) and 7.0")
pass
#If everything is good update current value
else:
if messagebox.askyesno("Confirmation", "Replace current value?"):
messagebox.showinfo("Done", "Success")
aai_atrialAmplitudeEntry = temp
self.aaiAtrialAmplitudeValue.config(text="Current Value: " + aai_atrialAmplitudeEntry)
db.execute("UPDATE aai SET aai_atrialAmplitudeEntry = ? WHERE (user = ?)", (aai_atrialAmplitudeEntry, currentuser))
db.commit()
except:
messagebox.showinfo("Error","Please enter a valid value")
pass
#aaiAtrialPulseWidth
if(value == "aaiAtrialPulseWidth"):
temp = self.aaiAtrialPulseWidthEntry.get()
#Try/access to sanitize user input and ask for confirmation if there are no errors
try:
float(temp)
if (temp == '' or float(temp)<0):
messagebox.showinfo("Error","Please enter a valid value")
pass
#Ensure value is in limited range
elif(float(temp) < 0.05 or float(temp) > 1.9):
messagebox.showinfo("Error","The range is between 0.05 and 1.9")
pass
#If everything is good update current value
else:
if messagebox.askyesno("Confirmation", "Replace current value?"):
messagebox.showinfo("Done", "Success")
aai_atrialPulseWidthEntry = temp
self.aaiAtrialPulseWidthValue.config(text="Current Value: " + aai_atrialPulseWidthEntry)
db.execute("UPDATE aai SET aai_atrialPulseWidthEntry = ? WHERE (user = ?)", (aai_atrialPulseWidthEntry, currentuser))
db.commit()
except:
messagebox.showinfo("Error","Please enter a valid value")
pass
#aaiAtrialSensitivity
if(value == "aaiAtrialSensitivity"):
temp = self.aaiAtrialSensitivityEntry.get()
#Try/access to sanitize user input and ask for confirmation if there are no errors
try:
float(temp)
if (temp == '' or float(temp)<0):
messagebox.showinfo("Error","Please enter a valid value")
pass
#Ensure value is in limited range
elif(float(temp) < 0.25 or float(temp) > 10.0):
messagebox.showinfo("Error","The range is between 0.25 and 10.0")
pass
#If everything is good update current value
else:
if messagebox.askyesno("Confirmation", "Replace current value?"):
messagebox.showinfo("Done", "Success")
aai_atrialSensitivityEntry = temp
self.aaiAtrialSensitivityValue.config(text="Current Value: " + aai_atrialSensitivityEntry)
db.execute("UPDATE aai SET aai_atrialSensitivityEntry = ? WHERE (user = ?)", (aai_atrialSensitivityEntry, currentuser))
db.commit()
except:
messagebox.showinfo("Error","Please enter a valid value")
pass
#aaiARP
if(value == "aaiARP"):
temp = self.aaiARPEntry.get()
#Try/access to sanitize user input and ask for confirmation if there are no errors
try:
int(temp)
if (temp == '' or int(temp)<0):
messagebox.showinfo("Error","Please enter a valid value")
pass
#Ensure value is in limited range
elif(int(temp) < 150 or int(temp) > 500):
messagebox.showinfo("Error","The range is between 150 and 500")
pass
#If everything is good update current value
else:
if messagebox.askyesno("Confirmation", "Replace current value?"):
messagebox.showinfo("Done", "Success")
aai_ARPEntry = temp
self.aaiARPValue.config(text="Current Value: " + aai_ARPEntry)
db.execute("UPDATE aai SET aai_ARPEntry = ? WHERE (user = ?)", (aai_ARPEntry, currentuser))
db.commit()
except:
messagebox.showinfo("Error","Please enter a valid value")
pass
#aaiAPVARP
if(value == "aaiAPVARP"):
temp = self.aaiAPVARPEntry.get()
#Try/access to sanitize user input and ask for confirmation if there are no errors
try:
int(temp)
if (temp == '' or int(temp)<0):
messagebox.showinfo("Error","Please enter a valid value")
pass
#Ensure value is in limited range
elif(int(temp) < 150 or int(temp) > 500):
messagebox.showinfo("Error","The range is between 150 and 500")
pass
#If everything is good update current value
else:
if messagebox.askyesno("Confirmation", "Replace current value?"):
messagebox.showinfo("Done", "Success")
aai_APVARPEntry = temp
self.aaiAPVARPValue.config(text="Current Value: " + aai_APVARPEntry)
db.execute("UPDATE aai SET aai_APVARPEntry = ? WHERE (user = ?)", (aai_APVARPEntry, currentuser))
db.commit()
except:
messagebox.showinfo("Error","Please enter a valid value")
pass
#aaiHysteresis
if(value == "aaiHysteresis"):
temp = self.aaiHysteresisEntry.get()
#Try/access to sanitize user input and ask for confirmation if there are no errors
try:
int(temp)
if (temp == '' or int(temp)<0):
messagebox.showinfo("Error","Please enter a valid value")
pass
#If everything is good update current value
else:
if messagebox.askyesno("Confirmation", "Replace current value?"):
messagebox.showinfo("Done", "Success")
aai_hysteresisEntry = temp
self.aaiHysteresisValue.config(text="Current Value: " + aai_hysteresisEntry)
db.execute("UPDATE aai SET aai_hysteresisEntry = ? WHERE (user = ?)", (aai_hysteresisEntry, currentuser))
db.commit()
except:
messagebox.showinfo("Error","Please enter a valid value")
pass
#aaiRateSmoothing
if(value == "aaiRateSmoothing"):
temp = self.aaiRateSmoothingEntry.get()
#Try/access to sanitize user input and ask for confirmation if there are no errors
try:
int(temp)
if (temp == '' or int(temp)<0):
messagebox.showinfo("Error","Please enter a valid value")
pass
#Ensure value is in limited range
elif(int(temp) < 0 or int(temp) > 25):
messagebox.showinfo("Error","The range is between 0(off) and 25")
pass
#If everything is good update current value
else:
if messagebox.askyesno("Confirmation", "Replace current value?"):
messagebox.showinfo("Done", "Success")
aai_rateSmoothingEntry = temp
self.aaiRateSmoothingValue.config(text="Current Value: " + aai_rateSmoothingEntry)
db.execute("UPDATE aai SET aai_rateSmoothingEntry = ? WHERE (user = ?)", (aai_rateSmoothingEntry, currentuser))
db.commit()
except:
messagebox.showinfo("Error","Please enter a valid value")
pass
#VVI
#vviLowerRateLimit
if(value == "vviLowerRateLimit"):
temp = self.vviLowerRateLimitEntry.get()
#Try/access to sanitize user input and ask for confirmation if there are no errors
try:
int(temp)
if (temp == '' or int(temp)<0):
messagebox.showinfo("Error","Please enter a valid value")
pass
#Ensure upper limit is larger than lower limit
elif(int(self.vviLowerRateLimitEntry.get()) >= int(vvi_upperRateLimitEntry) and int(vvi_upperRateLimitEntry) != 0 ):
messagebox.showinfo("Error","Please ensure your lower rate limit is lower than your upper rate limit")
pass
#Ensure value is in limited range
elif(int(temp) < 30 or int(temp) > 175):
messagebox.showinfo("Error","The range is between 30 and 175")
pass
#If everything is good update current value
else:
if messagebox.askyesno("Confirmation", "Replace current value?"):
messagebox.showinfo("Done", "Success")
vvi_lowerRateLimitEntry = temp
self.vviLowerRateLimitValue.config(text="Current Value: " + vvi_lowerRateLimitEntry)
db.execute("UPDATE vvi SET vvi_lowerRateLimitEntry = ? WHERE (user = ?)", (vvi_lowerRateLimitEntry, currentuser))
db.commit()
except:
messagebox.showinfo("Error","Please enter a valid value")
pass
#vviUpperRateLimit
if(value == "vviUpperRateLimit"):
temp = self.vviUpperRateLimitEntry.get()
#Try/access to sanitize user input and ask for confirmation if there are no errors
try:
int(temp)
if (temp == '' or int(temp)<0):
messagebox.showinfo("Error","Please enter a valid value")
pass
#Ensure upper limit is larger than lower limit
elif(int(vvi_lowerRateLimitEntry) >= int(self.vviUpperRateLimitEntry.get()) and int(vvi_lowerRateLimitEntry) != 0 ):
messagebox.showinfo("Error","Please ensure your lower rate limit is lower than your upper rate limit")
pass
#Ensure value is in limited range
elif(int(temp) < 50 or int(temp) > 175):
messagebox.showinfo("Error","The range is between 50 and 175")
pass
#If everything is good update current value
else:
if messagebox.askyesno("Confirmation", "Replace current value?"):
messagebox.showinfo("Done", "Success")
vvi_upperRateLimitEntry = temp
self.vviUpperRateLimitValue.config(text="Current Value: " + vvi_upperRateLimitEntry)
db.execute("UPDATE vvi SET vvi_upperRateLimitEntry = ? WHERE (user = ?)", (vvi_upperRateLimitEntry, currentuser))
db.commit()
except:
messagebox.showinfo("Error","Please enter a valid value")
pass
#vviAtrialAmplitude
if(value == "vviAtrialAmplitude"):
temp = self.vviAtrialAmplitudeEntry.get()
#Try/access to sanitize user input and ask for confirmation if there are no errors
try:
float(temp)
if (temp == '' or float(temp)<0):
messagebox.showinfo("Error","Please enter a valid value")
pass
#Ensure value is in limited range
elif(float(temp) < 0 or float(temp) > 7.0):
messagebox.showinfo("Error","The range is between 0(off) and 7.0")
pass
#If everything is good update current value
else:
if messagebox.askyesno("Confirmation", "Replace current value?"):
messagebox.showinfo("Done", "Success")
vvi_ventricularAmplitudeEntry = temp
self.vviAtrialAmplitudeValue.config(text="Current Value: " + vvi_ventricularAmplitudeEntry)
db.execute("UPDATE vvi SET vvi_ventricularAmplitudeEntry = ? WHERE (user = ?)", (vvi_ventricularAmplitudeEntry, currentuser))
db.commit()
except:
messagebox.showinfo("Error","Please enter a valid value")
pass
#vviAtrialPulseWidth
if(value == "vviAtrialPulseWidth"):
temp = self.vviAtrialPulseWidthEntry.get()
#Try/access to sanitize user input and ask for confirmation if there are no errors
try:
float(temp)
if (temp == '' or float(temp)<0):
messagebox.showinfo("Error","Please enter a valid value")
pass
#Ensure value is in limited range
elif(float(temp) < 0.05 or float(temp) > 1.9):
messagebox.showinfo("Error","The range is between 0.05 and 1.9")
pass
#If everything is good update current value
else:
if messagebox.askyesno("Confirmation", "Replace current value?"):
messagebox.showinfo("Done", "Success")
vvi_ventricularPulseWidthEntry = temp
self.vviAtrialPulseWidthValue.config(text="Current Value: " + vvi_ventricularPulseWidthEntry)
db.execute("UPDATE vvi SET vvi_ventricularPulseWidthEntry = ? WHERE (user = ?)", (vvi_ventricularPulseWidthEntry, currentuser))
db.commit()
except:
messagebox.showinfo("Error","Please enter a valid value")
pass
#vviAtrialSensitivity
if(value == "vviAtrialSensitivity"):
temp = self.vviAtrialSensitivityEntry.get()
#Try/access to sanitize user input and ask for confirmation if there are no errors
try:
float(temp)
if (temp == '' or float(temp)<0):
messagebox.showinfo("Error","Please enter a valid value")
pass
#Ensure value is in limited range
elif(float(temp) < 0.25 or float(temp) > 10.0):
messagebox.showinfo("Error","The range is between 0.25 and 10.0")
pass
#If everything is good update current value
else:
if messagebox.askyesno("Confirmation", "Replace current value?"):
messagebox.showinfo("Done", "Success")
vvi_ventricularSensitivityEntry = temp
self.vviAtrialSensitivityValue.config(text="Current Value: " + vvi_ventricularSensitivityEntry)
db.execute("UPDATE vvi SET vvi_ventricularSensitivityEntry = ? WHERE (user = ?)", (vvi_ventricularSensitivityEntry, currentuser))
db.commit()
except:
messagebox.showinfo("Error","Please enter a valid value")
pass
#vviARP
if(value == "vviARP"):
temp = self.vviARPEntry.get()
#Try/access to sanitize user input and ask for confirmation | |
from __future__ import print_function
import logging
from abc import ABCMeta, abstractmethod
import progressbar
from six import add_metaclass
from toolz import first
import re
logger = logging.getLogger(__name__)
def callback(func):
func._is_callback = True
return func
class TrainingExtension(object):
"""The base class for training extensions.
An extension is a set of callbacks sharing a joint context that are
invoked at certain stages of the training procedure. These callbacks
typically add a certain functionality to the training procedure,
e.g. running validation on auxiliary datasets or early stopping.
Parameters
----------
name : str, optional
The name of the extension. The names are useful in order to
distinguish between several extensions of the same type that
belongs to the same main loop. By default the name is set to
the name of the class.
Attributes
----------
main_loop : :class:`.MainLoop`
The main loop to which the extension belongs.
name : str
The name of the extension.
"""
def __init__(self, name=None):
if not name:
name = self.__class__.__name__
self.name = name
@property
def main_loop(self):
if not hasattr(self, '_main_loop'):
raise ValueError("main loop must be assigned to extension first")
return self._main_loop
@main_loop.setter
def main_loop(self, value):
self._main_loop = value
def dispatch(self, callback_name, *args):
"""Runs callback with the given name.
The reason for having this method is to allow
the descendants of the :class:`TrainingExtension` to intercept
callback invocations and do something with them, e.g. block
when certain condition does not hold. The default implementation
simply invokes the callback by its name.
"""
getattr(self, str(callback_name))(*args)
@callback
def on_resumption(self):
"""The callback invoked after training is resumed."""
pass
@callback
def on_error(self):
"""The callback invoked when an error occurs."""
pass
@callback
def before_training(self):
"""The callback invoked before training is started."""
pass
@callback
def before_epoch(self):
"""The callback invoked before starting an epoch."""
pass
@callback
def before_batch(self, batch):
"""The callback invoked before a batch is processed.
Parameters
----------
batch : object
The data batch to be processed.
"""
pass
@callback
def after_batch(self, batch):
"""The callback invoked after a batch is processed.
Parameters
----------
batch : object
The data batch just processed.
"""
pass
@callback
def after_epoch(self):
"""The callback invoked after an epoch is finished."""
pass
@callback
def after_training(self):
"""The callback invoked after training is finished."""
pass
@callback
def on_interrupt(self):
"""The callback invoked when training is interrupted."""
pass
class CallbackName(str):
"""A name of a TrainingExtension callback.
Raises
------
:class:`TypeError` on comparison with a string which is not a name of
TrainingExtension callback.
"""
def __eq__(self, other):
callback_names = [key for key, value
in TrainingExtension.__dict__.items()
if getattr(value, '_is_callback', False)]
if other not in callback_names:
raise TypeError("{} is not a valid callback.".format(other))
return str(self) == other
class Predicate(object):
def __init__(self, condition, num):
self.condition = condition
self.num = num
def __call__(self, log):
if self.condition.endswith('epochs'):
entry = log.status['epochs_done']
else:
entry = log.status['iterations_done']
if self.condition.startswith('every'):
return entry % self.num == 0
else:
return entry == self.num
def has_done_epochs(log):
return log.status['epochs_done'] == 0
def always_true(log):
return True
@add_metaclass(ABCMeta)
class SimpleExtension(TrainingExtension):
"""A base class for simple extensions.
All logic of simple extensions is concentrated in the method
:meth:`do`. This method is called when certain conditions are
fulfilled. The user can manage the conditions by calling the
`add_condition` method and by passing arguments to the constructor. In
addition to specifying when :meth:`do` is called, it is possible to
specify additional arguments passed to :meth:`do` under different
conditions.
Parameters
----------
before_training : bool
If ``True``, :meth:`do` is invoked before training.
before_first_epoch : bool
If ``True``, :meth:`do` is invoked before the first epoch.
before_epoch : bool
If ``True``, :meth:`do` is invoked before every epoch.
on_resumption : bool, optional
If ``True``, :meth:`do` is invoked when training is resumed.
on_interrupt : bool, optional
If ``True``, :meth:`do` is invoked when training is interrupted.
after_epoch : bool
If ``True``, :meth:`do` is invoked after every epoch.
after_batch: bool
If ``True``, :meth:`do` is invoked after every batch.
after_training : bool
If ``True``, :meth:`do` is invoked after training.
after_n_epochs : int, optional
If not ``None``, :meth:`do` is invoked when `after_n_epochs`
epochs are done.
every_n_epochs : int, optional
If not ``None``, :meth:`do` is invoked after every n-th epoch.
after_n_batches : int, optional
If not ``None``, :meth:`do` is invoked when `after_n_batches`
batches are processed.
every_n_batches : int, optional
If not ``None``, :meth:`do` is invoked after every n-th batch.
"""
BOOLEAN_TRIGGERS = frozenset(["before_training", "before_first_epoch",
"before_epoch", "on_resumption",
"on_interrupt", "after_epoch",
"after_batch", "after_training"])
INTEGER_TRIGGERS = frozenset(["after_n_epochs", "after_n_batches",
"every_n_epochs", "every_n_batches"])
def __init__(self, **kwargs):
self._conditions = []
super_kwargs = {}
trigger_keywords = self.BOOLEAN_TRIGGERS | self.INTEGER_TRIGGERS
conditions = {}
for key, value in kwargs.items():
if key in trigger_keywords:
conditions[key] = value
else:
super_kwargs[key] = value
self.set_conditions(**conditions)
super(SimpleExtension, self).__init__(**super_kwargs)
def set_conditions(self, **kwargs):
"""Set the conditions for which this extension should be run.
Parameters
----------
See the :class:`SimpleExtension` docstring for a list of
possible parameters.
"""
self._conditions[:] = []
predicates = {'before_first_epoch': has_done_epochs}
conditions = {
'before_first_epoch': 'before_epoch',
'after_epoch': 'after_epoch',
'after_batch': 'after_batch',
'every_n_batches': 'after_batch',
'every_n_epochs': 'after_epoch',
'after_n_batches': 'after_batch',
'after_n_epochs': 'after_epoch'
}
# Freeze the keys as a list so that we can safely modify kwargs.
for key, value in kwargs.items():
if value:
if key in self.BOOLEAN_TRIGGERS:
self.add_condition([conditions.get(key, key)],
predicate=predicates.get(key, None))
elif key in self.INTEGER_TRIGGERS:
predicate = Predicate(key, value)
self.add_condition([conditions.get(key, key)],
predicate=predicate)
else:
raise KeyError("Invalid condition: {}".format(key))
return self # For chaining calls.
def add_condition(self, callbacks_names, predicate=None, arguments=None):
"""Adds a condition under which a :meth:`do` is called.
Parameters
----------
callbacks_names : list of str
The names of the callback in which the method.
predicate : function
A predicate function the main loop's log as the
single parameter and returning ``True`` when the method
should be called and ``False`` when should not. If ``None``,
an always ``True`` predicate is used.
arguments : iterable
Additional arguments to be passed to :meth:`do`. They will
be concatenated with the ones passed from the main loop
(e.g. the batch in case of `after_epoch` callback).
Returns
-------
The extension object (allow chaining calls)
"""
if not isinstance(callbacks_names, (list, tuple)):
raise ValueError("callbacks_names must be list or tuple.")
for _callback_name in callbacks_names:
if not arguments:
arguments = []
if not predicate:
self._conditions.append((_callback_name, always_true,
arguments))
else:
self._conditions.append((_callback_name, predicate,
arguments))
return self
@abstractmethod
def do(self, which_callback, *args):
r"""Does the job of the training extension.
Parameters
----------
which_callback : str
The name of the callback in the context of which :meth:`do` is
run.
\*args : tuple
The arguments from the main loop concatenated with additional
arguments from user.
Notes
-----
Subclasses *must* accept additional positional arguments in their
call signature for this method, even if they are unused.
"""
pass
def dispatch(self, callback_invoked, *from_main_loop):
"""Check conditions and call the :meth:`do` method.
Also adds additional arguments if specified for a condition.
.. todo::
Add a check for a situation when several conditions are met
at the same time and do something.
"""
for callback_name, predicate, arguments in self._conditions:
if (callback_name == callback_invoked and
predicate(self.main_loop.log)):
self.do(callback_invoked, *(from_main_loop + tuple(arguments)))
@staticmethod
def parse_args(which_callback, args):
"""Separates :meth:`do` arguments coming from different sources.
When a :meth:`do` method receives arguments from both the main
loop (e.g. a batch) and the user, it often has to separate them.
This method is the right tool to use.
Parameters
----------
which_callback : str
The name of the callback.
args : iterable
The arguments.
Returns
-------
from_main_loop : tuple
from_user : tuple
"""
args = tuple(args)
if (which_callback == 'after_batch' or
which_callback == 'before_batch'):
return (args[0],), args[1:]
return (), args
class FinishAfter(SimpleExtension):
"""Finishes the training process when triggered."""
def __init__(self, **kwargs):
super(FinishAfter, self).__init__(**kwargs)
def do(self, which_callback, *args):
self.main_loop.log.current_row['training_finish_requested'] = True
class PrintingFilterUnderscored(object):
def __call__(self, attr):
return attr.startswith('_')
class PrintingFilterList(object):
def __init__(self, *args, **kwargs):
"""
Filters out a given set of names or regexpes
Parameters
----------
\*args : list of srt
Strings (or regexpes) to filter
filter_standard_names: bool, default True
If true, a standard list of names will be filtered
filter_underscored: bool, default True
If true, names beginning with an underscore will be filtered
"""
huge_re_parts = []
filter_standard_names = kwargs.pop('filter_standard_names', True)
filter_underscored = kwargs.pop('filter_underscored', True)
super(PrintingFilterList, self).__init__(**kwargs)
if filter_standard_names:
huge_re_parts += ['batch_interrupt_received',
'epoch_interrupt_received',
'epoch_started',
'received_first_batch',
'resumed_from',
'training_started']
if filter_underscored:
| |
LEXICON_REF)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, lingType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if lingType.subclass:
return lingType.subclass(*args_, **kwargs_)
else:
return lingType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_LINGUISTIC_TYPE_ID(self): return self.LINGUISTIC_TYPE_ID
def set_LINGUISTIC_TYPE_ID(self, LINGUISTIC_TYPE_ID): self.LINGUISTIC_TYPE_ID = LINGUISTIC_TYPE_ID
def get_TIME_ALIGNABLE(self): return self.TIME_ALIGNABLE
def set_TIME_ALIGNABLE(self, TIME_ALIGNABLE): self.TIME_ALIGNABLE = TIME_ALIGNABLE
def get_CONSTRAINTS(self): return self.CONSTRAINTS
def set_CONSTRAINTS(self, CONSTRAINTS): self.CONSTRAINTS = CONSTRAINTS
def get_GRAPHIC_REFERENCES(self): return self.GRAPHIC_REFERENCES
def set_GRAPHIC_REFERENCES(self, GRAPHIC_REFERENCES): self.GRAPHIC_REFERENCES = GRAPHIC_REFERENCES
def get_CONTROLLED_VOCABULARY_REF(self): return self.CONTROLLED_VOCABULARY_REF
def set_CONTROLLED_VOCABULARY_REF(self, CONTROLLED_VOCABULARY_REF): self.CONTROLLED_VOCABULARY_REF = CONTROLLED_VOCABULARY_REF
def get_EXT_REF(self): return self.EXT_REF
def set_EXT_REF(self, EXT_REF): self.EXT_REF = EXT_REF
def get_LEXICON_REF(self): return self.LEXICON_REF
def set_LEXICON_REF(self, LEXICON_REF): self.LEXICON_REF = LEXICON_REF
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='lingType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('lingType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='lingType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='lingType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='lingType'):
if self.LINGUISTIC_TYPE_ID is not None and 'LINGUISTIC_TYPE_ID' not in already_processed:
already_processed.add('LINGUISTIC_TYPE_ID')
outfile.write(' LINGUISTIC_TYPE_ID=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.LINGUISTIC_TYPE_ID), input_name='LINGUISTIC_TYPE_ID')), ))
if self.TIME_ALIGNABLE is not None and 'TIME_ALIGNABLE' not in already_processed:
already_processed.add('TIME_ALIGNABLE')
outfile.write(' TIME_ALIGNABLE="%s"' % self.gds_format_boolean(self.TIME_ALIGNABLE, input_name='TIME_ALIGNABLE'))
if self.CONSTRAINTS is not None and 'CONSTRAINTS' not in already_processed:
already_processed.add('CONSTRAINTS')
outfile.write(' CONSTRAINTS=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.CONSTRAINTS), input_name='CONSTRAINTS')), ))
if self.GRAPHIC_REFERENCES is not None and 'GRAPHIC_REFERENCES' not in already_processed:
already_processed.add('GRAPHIC_REFERENCES')
outfile.write(' GRAPHIC_REFERENCES="%s"' % self.gds_format_boolean(self.GRAPHIC_REFERENCES, input_name='GRAPHIC_REFERENCES'))
if self.CONTROLLED_VOCABULARY_REF is not None and 'CONTROLLED_VOCABULARY_REF' not in already_processed:
already_processed.add('CONTROLLED_VOCABULARY_REF')
outfile.write(' CONTROLLED_VOCABULARY_REF=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.CONTROLLED_VOCABULARY_REF), input_name='CONTROLLED_VOCABULARY_REF')), ))
if self.EXT_REF is not None and 'EXT_REF' not in already_processed:
already_processed.add('EXT_REF')
outfile.write(' EXT_REF=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.EXT_REF), input_name='EXT_REF')), ))
if self.LEXICON_REF is not None and 'LEXICON_REF' not in already_processed:
already_processed.add('LEXICON_REF')
outfile.write(' LEXICON_REF=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.LEXICON_REF), input_name='LEXICON_REF')), ))
def exportChildren(self, outfile, level, namespace_='', name_='lingType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('LINGUISTIC_TYPE_ID', node)
if value is not None and 'LINGUISTIC_TYPE_ID' not in already_processed:
already_processed.add('LINGUISTIC_TYPE_ID')
self.LINGUISTIC_TYPE_ID = value
value = find_attr_value_('TIME_ALIGNABLE', node)
if value is not None and 'TIME_ALIGNABLE' not in already_processed:
already_processed.add('TIME_ALIGNABLE')
if value in ('true', '1'):
self.TIME_ALIGNABLE = True
elif value in ('false', '0'):
self.TIME_ALIGNABLE = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('CONSTRAINTS', node)
if value is not None and 'CONSTRAINTS' not in already_processed:
already_processed.add('CONSTRAINTS')
self.CONSTRAINTS = value
value = find_attr_value_('GRAPHIC_REFERENCES', node)
if value is not None and 'GRAPHIC_REFERENCES' not in already_processed:
already_processed.add('GRAPHIC_REFERENCES')
if value in ('true', '1'):
self.GRAPHIC_REFERENCES = True
elif value in ('false', '0'):
self.GRAPHIC_REFERENCES = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('CONTROLLED_VOCABULARY_REF', node)
if value is not None and 'CONTROLLED_VOCABULARY_REF' not in already_processed:
already_processed.add('CONTROLLED_VOCABULARY_REF')
self.CONTROLLED_VOCABULARY_REF = value
value = find_attr_value_('EXT_REF', node)
if value is not None and 'EXT_REF' not in already_processed:
already_processed.add('EXT_REF')
self.EXT_REF = value
value = find_attr_value_('LEXICON_REF', node)
if value is not None and 'LEXICON_REF' not in already_processed:
already_processed.add('LEXICON_REF')
self.LEXICON_REF = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class lingType
class localeType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, LANGUAGE_CODE=None, COUNTRY_CODE=None, VARIANT=None):
self.original_tagname_ = None
self.LANGUAGE_CODE = _cast(None, LANGUAGE_CODE)
self.COUNTRY_CODE = _cast(None, COUNTRY_CODE)
self.VARIANT = _cast(None, VARIANT)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, localeType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if localeType.subclass:
return localeType.subclass(*args_, **kwargs_)
else:
return localeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_LANGUAGE_CODE(self): return self.LANGUAGE_CODE
def set_LANGUAGE_CODE(self, LANGUAGE_CODE): self.LANGUAGE_CODE = LANGUAGE_CODE
def get_COUNTRY_CODE(self): return self.COUNTRY_CODE
def set_COUNTRY_CODE(self, COUNTRY_CODE): self.COUNTRY_CODE = COUNTRY_CODE
def get_VARIANT(self): return self.VARIANT
def set_VARIANT(self, VARIANT): self.VARIANT = VARIANT
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='localeType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('localeType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='localeType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='localeType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='localeType'):
if self.LANGUAGE_CODE is not None and 'LANGUAGE_CODE' not in already_processed:
already_processed.add('LANGUAGE_CODE')
outfile.write(' LANGUAGE_CODE=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.LANGUAGE_CODE), input_name='LANGUAGE_CODE')), ))
if self.COUNTRY_CODE is not None and 'COUNTRY_CODE' not in already_processed:
already_processed.add('COUNTRY_CODE')
outfile.write(' COUNTRY_CODE=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.COUNTRY_CODE), input_name='COUNTRY_CODE')), ))
if self.VARIANT is not None and 'VARIANT' not in already_processed:
already_processed.add('VARIANT')
outfile.write(' VARIANT=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.VARIANT), input_name='VARIANT')), ))
def exportChildren(self, outfile, level, namespace_='', name_='localeType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('LANGUAGE_CODE', node)
if value is not None and 'LANGUAGE_CODE' not in already_processed:
already_processed.add('LANGUAGE_CODE')
self.LANGUAGE_CODE = value
value = find_attr_value_('COUNTRY_CODE', node)
if value is not None and 'COUNTRY_CODE' not in already_processed:
already_processed.add('COUNTRY_CODE')
self.COUNTRY_CODE = value
value = find_attr_value_('VARIANT', node)
if value is not None and 'VARIANT' not in already_processed:
already_processed.add('VARIANT')
self.VARIANT = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class localeType
class constraintType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, STEREOTYPE=None, DESCRIPTION=None):
self.original_tagname_ = None
self.STEREOTYPE = _cast(None, STEREOTYPE)
self.DESCRIPTION = _cast(None, DESCRIPTION)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, constraintType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if constraintType.subclass:
return constraintType.subclass(*args_, **kwargs_)
else:
return constraintType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_STEREOTYPE(self): return self.STEREOTYPE
def set_STEREOTYPE(self, STEREOTYPE): self.STEREOTYPE = STEREOTYPE
def get_DESCRIPTION(self): return self.DESCRIPTION
def set_DESCRIPTION(self, DESCRIPTION): self.DESCRIPTION = DESCRIPTION
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='constraintType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('constraintType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='constraintType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='constraintType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='constraintType'):
if self.STEREOTYPE is not None and 'STEREOTYPE' not in already_processed:
already_processed.add('STEREOTYPE')
outfile.write(' STEREOTYPE=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.STEREOTYPE), input_name='STEREOTYPE')), ))
if self.DESCRIPTION is not None and 'DESCRIPTION' not in already_processed:
already_processed.add('DESCRIPTION')
outfile.write(' DESCRIPTION=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.DESCRIPTION), input_name='DESCRIPTION')), ))
def exportChildren(self, outfile, level, namespace_='', name_='constraintType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('STEREOTYPE', node)
if value is not None and 'STEREOTYPE' not in already_processed:
already_processed.add('STEREOTYPE')
self.STEREOTYPE = value
value = find_attr_value_('DESCRIPTION', node)
if value is not None and 'DESCRIPTION' not in already_processed:
already_processed.add('DESCRIPTION')
self.DESCRIPTION = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class constraintType
class convocType(GeneratedsSuper):
"""A reference to an url of an external Controlled Vocabulary. Is
intended to be mutually exclusive with a sequence of CV_ENTRY_ML
elements."""
subclass = None
superclass = None
def __init__(self, CV_ID=None, EXT_REF=None, DESCRIPTION=None, CV_ENTRY_ML=None):
self.original_tagname_ = None
self.CV_ID = _cast(None, CV_ID)
self.EXT_REF = _cast(None, EXT_REF)
if DESCRIPTION is None:
self.DESCRIPTION = []
else:
self.DESCRIPTION = DESCRIPTION
if CV_ENTRY_ML is None:
self.CV_ENTRY_ML = []
else:
self.CV_ENTRY_ML = CV_ENTRY_ML
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, convocType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if convocType.subclass:
return convocType.subclass(*args_, **kwargs_)
else:
return convocType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_DESCRIPTION(self): return self.DESCRIPTION
def set_DESCRIPTION(self, DESCRIPTION): self.DESCRIPTION = DESCRIPTION
def add_DESCRIPTION(self, value): self.DESCRIPTION.append(value)
def insert_DESCRIPTION_at(self, index, value): self.DESCRIPTION.insert(index, value)
def | |
op in [('__radd__', operator.__add__),
('__rsub__', operator.__sub__),
('__rmul__', operator.__mul__),
('__rpow__', operator.__pow__),
('__rtruediv__', operator.__truediv__)]:
setattr(Tensor, meth_name, _make_rhand_array_promote_func(op, meth_name))
# --------------------------------------------------------------------------- #
# Tensor Network Class #
# --------------------------------------------------------------------------- #
class TensorNetwork(object):
r"""A collection of (as yet uncontracted) Tensors.
Parameters
----------
ts : sequence of Tensor or TensorNetwork
The objects to combine. The new network will copy these (but not the
underlying data) by default. For a *view* set ``virtual=True``.
virtual : bool, optional
Whether the TensorNetwork should be a *view* onto the tensors it is
given, or a copy of them. E.g. if a virtual TN is constructed, any
changes to a Tensor's indices or tags will propagate to all TNs viewing
that Tensor.
check_collisions : bool, optional
If True, the default, then ``TensorNetwork`` instances with double
indices which match another ``TensorNetwork`` instances double indices
will have those indices' names mangled. Can be explicitly turned off
when it is known that no collisions will take place -- i.e. when not
adding any new tensors.
Attributes
----------
tensor_map : dict
Mapping of unique ids to tensors, like``{tensor_id: tensor, ...}``.
I.e. this is where the tensors are 'stored' by the network.
tag_map : dict
Mapping of tags to a set of tensor ids which have those tags. I.e.
``{tag: {tensor_id_1, tensor_id_2, ...}}``. Thus to select those
tensors could do: ``map(tensor_map.__getitem__, tag_map[tag])``.
ind_map : dict
Like ``tag_map`` but for indices. So ``ind_map[ind]]`` returns the
tensor ids of those tensors with ``ind``.
exponent : float
A scalar prefactor for the tensor network, stored in base 10 like
``10**exponent``. This is mostly for conditioning purposes and will be
``0.0`` unless you use use ``equalize_norms(value)`` or
``tn.strip_exponent(tid_or_tensor)``.
"""
_EXTRA_PROPS = ()
_CONTRACT_STRUCTURED = False
def __init__(self, ts, *, virtual=False, check_collisions=True):
# short-circuit for copying TensorNetworks
if isinstance(ts, TensorNetwork):
self.tag_map = valmap(lambda tids: tids.copy(), ts.tag_map)
self.ind_map = valmap(lambda tids: tids.copy(), ts.ind_map)
self.tensor_map = dict()
for tid, t in ts.tensor_map.items():
self.tensor_map[tid] = t if virtual else t.copy()
self.tensor_map[tid].add_owner(self, tid)
self._inner_inds = ts._inner_inds.copy()
self._outer_inds = ts._outer_inds.copy()
self._tid_counter = ts._tid_counter
self.exponent = ts.exponent
for ep in ts.__class__._EXTRA_PROPS:
setattr(self, ep, getattr(ts, ep))
return
# internal structure
self._tid_counter = 0
self.tensor_map = dict()
self.tag_map = dict()
self.ind_map = dict()
self._inner_inds = oset()
self._outer_inds = oset()
self.exponent = 0.0
for t in ts:
self.add(t, virtual=virtual, check_collisions=check_collisions)
def __and__(self, other):
"""Combine this tensor network with more tensors, without contracting.
Copies the tensors.
"""
return TensorNetwork((self, other))
def __or__(self, other):
"""Combine this tensor network with more tensors, without contracting.
Views the constituent tensors.
"""
return TensorNetwork((self, other), virtual=True)
@classmethod
def from_TN(cls, tn, like=None, inplace=False, **kwargs):
"""Construct a specific tensor network subclass (i.e. one with some
promise about structure/geometry and tags/inds such as an MPS) from
a generic tensor network which should have that structure already.
Parameters
----------
cls : class
The TensorNetwork subclass to convert ``tn`` to.
tn : TensorNetwork
The TensorNetwork to convert.
like : TensorNetwork, optional
If specified, try and retrieve the neccesary attribute values from
this tensor network.
inplace : bool, optional
Whether to perform the conversion inplace or not.
kwargs
Extra properties of the TN subclass that should be specified.
"""
new_tn = tn if inplace else tn.copy()
for prop in cls._EXTRA_PROPS:
# equate real and private property name
prop_name = prop.lstrip('_')
# get value from kwargs
if prop_name in kwargs:
setattr(new_tn, prop, kwargs.pop(prop_name))
# get value from another manually specified TN
elif (like is not None) and hasattr(like, prop_name):
setattr(new_tn, prop, getattr(like, prop_name))
# get value directly from TN
elif hasattr(tn, prop_name):
setattr(new_tn, prop, getattr(tn, prop_name))
else:
raise ValueError(
f"You need to specify '{prop_name}' for the tensor network"
f" class {cls}, and ensure that it correctly corresponds "
f"to the structure of the tensor network supplied, since "
f"it cannot be found as an attribute on the TN: {tn}.")
if kwargs:
raise ValueError(
f"Options {kwargs} are invalid for the class {cls}.")
new_tn.__class__ = cls
return new_tn
def view_as(self, cls, inplace=False, **kwargs):
"""View this tensor network as subclass ``cls``.
"""
return cls.from_TN(self, inplace=inplace, **kwargs)
view_as_ = functools.partialmethod(view_as, inplace=True)
def view_like(self, like, inplace=False, **kwargs):
"""View this tensor network as the same subclass ``cls`` as ``like``
inheriting its extra properties as well.
"""
return self.view_as(like.__class__, like=like,
inplace=inplace, **kwargs)
view_like_ = functools.partialmethod(view_like, inplace=True)
# ------------------------------- Methods ------------------------------- #
def copy(self, virtual=False, deep=False):
"""Copy this ``TensorNetwork``. If ``deep=False``, (the default), then
everything but the actual numeric data will be copied.
"""
if deep:
return copy.deepcopy(self)
return self.__class__(self, virtual=virtual)
__copy__ = copy
def _link_tags(self, tags, tid):
"""Link ``tid`` to each of ``tags``.
"""
for tag in tags:
if tag in self.tag_map:
self.tag_map[tag].add(tid)
else:
self.tag_map[tag] = oset((tid,))
def _unlink_tags(self, tags, tid):
""""Unlink ``tid`` from each of ``tags``.
"""
for tag in tags:
try:
tids = self.tag_map[tag]
tids.discard(tid)
if not tids:
# tid was last tensor -> delete entry
del self.tag_map[tag]
except KeyError:
# tid already removed from x entry - e.g. repeated index
pass
def _link_inds(self, inds, tid):
"""Link ``tid`` to each of ``inds``.
"""
for ind in inds:
if ind in self.ind_map:
self.ind_map[ind].add(tid)
self._outer_inds.discard(ind)
self._inner_inds.add(ind)
else:
self.ind_map[ind] = oset((tid,))
self._outer_inds.add(ind)
def _unlink_inds(self, inds, tid):
""""Unlink ``tid`` from each of ``inds``.
"""
for ind in inds:
try:
tids = self.ind_map[ind]
tids.discard(tid)
occurences = len(tids)
if occurences == 0:
# tid was last tensor -> delete entry
del self.ind_map[ind]
self._outer_inds.discard(ind)
elif occurences == 1:
self._inner_inds.discard(ind)
self._outer_inds.add(ind)
except KeyError:
# tid already removed from x entry - e.g. repeated index
pass
def _reset_inner_outer(self, inds):
for ind in inds:
occurences = len(self.ind_map[ind])
if occurences == 1:
self._inner_inds.discard(ind)
self._outer_inds.add(ind)
else:
self._inner_inds.add(ind)
self._outer_inds.discard(ind)
def _next_tid(self):
# N.B. safer? previous behavior -> return rand_uuid('_T')
while self._tid_counter in self.tensor_map:
self._tid_counter = self._tid_counter + 1
return self._tid_counter
def add_tensor(self, tensor, tid=None, virtual=False):
"""Add a single tensor to this network - mangle its tid if neccessary.
"""
# check for tid conflict
if (tid is None) or (tid in self.tensor_map):
tid = self._next_tid()
# add tensor to the main index
T = tensor if virtual else tensor.copy()
self.tensor_map[tid] = T
T.add_owner(self, tid)
# add its tid to the relevant tag and inds maps, or create new entries
self._link_tags(T.tags, tid)
self._link_inds(T.inds, tid)
def add_tensor_network(self, tn, virtual=False, check_collisions=True):
"""
"""
if check_collisions: # add tensors individually
# check for matching inner_indices -> need to re-index
clash_ix = self._inner_inds & tn._inner_inds
reind = {ix: rand_uuid() for ix in clash_ix}
else:
clash_ix = False
reind = None
# add tensors, reindexing if necessary
for tid, tsr in tn.tensor_map.items():
if clash_ix and any(i in reind for i in tsr.inds):
tsr = tsr.reindex(reind, inplace=virtual)
self.add_tensor(tsr, virtual=virtual, tid=tid)
self.exponent = self.exponent + tn.exponent
def add(self, t, virtual=False, check_collisions=True):
"""Add Tensor, TensorNetwork or sequence thereof to self.
"""
if isinstance(t, (tuple, list)):
for each_t in t:
self.add(each_t, virtual=virtual,
check_collisions=check_collisions)
return
istensor = isinstance(t, Tensor)
istensornetwork = isinstance(t, TensorNetwork)
if not (istensor or istensornetwork):
raise TypeError("TensorNetwork should be called as "
"`TensorNetwork(ts, ...)`, where each "
"object in 'ts' is a Tensor or "
"TensorNetwork.")
if istensor:
self.add_tensor(t, virtual=virtual)
else:
self.add_tensor_network(t, virtual=virtual,
check_collisions=check_collisions)
def make_tids_consecutive(self, tid0=0):
"""Reset the `tids` - node identifies - to be consecutive integers.
"""
tids = tuple(self.tensor_map.keys())
ts = tuple(map(self._pop_tensor, tids))
self._tid_counter = tid0
self.add(ts, virtual=True)
def __iand__(self, tensor):
"""Inplace, but non-virtual, addition of a Tensor or TensorNetwork to
this network. It should not have any conflicting indices.
"""
self.add(tensor, virtual=False)
return self
def __ior__(self, tensor):
"""Inplace, virtual, addition of a Tensor or TensorNetwork to this
network. It should not have any conflicting indices.
"""
self.add(tensor, virtual=True)
return self
def _modify_tensor_tags(self, old, new, tid):
self._unlink_tags(old - new, tid)
self._link_tags(new - old, tid)
def _modify_tensor_inds(self, old, new, tid):
self._unlink_inds(old - new, tid)
self._link_inds(new - old, tid)
@property
def num_tensors(self):
"""The total number of tensors in the tensor network.
"""
return len(self.tensor_map)
@property
def num_indices(self):
"""The total number of indices in the tensor network.
"""
return len(self.ind_map)
def _pop_tensor(self, tid):
"""Remove a tensor from this network, returning said tensor.
| |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""The variant module contains data structures that are needed to manage
variants both in packages and in specs.
"""
import functools
import inspect
import itertools
import re
from six import StringIO
import llnl.util.lang as lang
import llnl.util.tty.color
from llnl.util.compat import Sequence
import spack.directives
import spack.error as error
from spack.util.string import comma_or
special_variant_values = [None, 'none', '*']
class Variant(object):
"""Represents a variant in a package, as declared in the
variant directive.
"""
def __init__(
self,
name,
default,
description,
values=(True, False),
multi=False,
validator=None,
sticky=False
):
"""Initialize a package variant.
Args:
name (str): name of the variant
default (str): default value for the variant in case
nothing has been specified
description (str): purpose of the variant
values (sequence): sequence of allowed values or a callable
accepting a single value as argument and returning True if the
value is good, False otherwise
multi (bool): whether multiple CSV are allowed
validator (callable): optional callable used to enforce
additional logic on the set of values being validated
sticky (bool): if true the variant is set to the default value at
concretization time
"""
self.name = name
self.default = default
self.description = str(description)
self.values = None
if values == '*':
# wildcard is a special case to make it easy to say any value is ok
self.single_value_validator = lambda x: True
elif isinstance(values, type):
# supplying a type means any value *of that type*
def isa_type(v):
try:
values(v)
return True
except ValueError:
return False
self.single_value_validator = isa_type
if callable(values):
# If 'values' is a callable, assume it is a single value
# validator and reset the values to be explicit during debug
self.single_value_validator = values
else:
# Otherwise assume values is the set of allowed explicit values
self.values = values
self.single_value_validator = lambda x: x in tuple(self.values)
self.multi = multi
self.group_validator = validator
self.sticky = sticky
def validate_or_raise(self, vspec, pkg=None):
"""Validate a variant spec against this package variant. Raises an
exception if any error is found.
Args:
vspec (Variant): instance to be validated
pkg (spack.package.Package): the package that required the validation,
if available
Raises:
InconsistentValidationError: if ``vspec.name != self.name``
MultipleValuesInExclusiveVariantError: if ``vspec`` has
multiple values but ``self.multi == False``
InvalidVariantValueError: if ``vspec.value`` contains
invalid values
"""
# Check the name of the variant
if self.name != vspec.name:
raise InconsistentValidationError(vspec, self)
# Check the values of the variant spec
value = vspec.value
if isinstance(vspec.value, (bool, str)):
value = (vspec.value,)
# If the value is exclusive there must be at most one
if not self.multi and len(value) != 1:
raise MultipleValuesInExclusiveVariantError(vspec, pkg)
# Check and record the values that are not allowed
not_allowed_values = [
x for x in value
if x != '*' and self.single_value_validator(x) is False
]
if not_allowed_values:
raise InvalidVariantValueError(self, not_allowed_values, pkg)
# Validate the group of values if needed
if self.group_validator is not None and value != ('*',):
self.group_validator(pkg.name, self.name, value)
@property
def allowed_values(self):
"""Returns a string representation of the allowed values for
printing purposes
Returns:
str: representation of the allowed values
"""
# Join an explicit set of allowed values
if self.values is not None:
v = tuple(str(x) for x in self.values)
return ', '.join(v)
# In case we were given a single-value validator
# print the docstring
docstring = inspect.getdoc(self.single_value_validator)
v = docstring if docstring else ''
return v
def make_default(self):
"""Factory that creates a variant holding the default value.
Returns:
MultiValuedVariant or SingleValuedVariant or BoolValuedVariant:
instance of the proper variant
"""
return self.make_variant(self.default)
def make_variant(self, value):
"""Factory that creates a variant holding the value passed as
a parameter.
Args:
value: value that will be hold by the variant
Returns:
MultiValuedVariant or SingleValuedVariant or BoolValuedVariant:
instance of the proper variant
"""
return self.variant_cls(self.name, value)
@property
def variant_cls(self):
"""Proper variant class to be used for this configuration."""
if self.multi:
return MultiValuedVariant
elif self.values == (True, False):
return BoolValuedVariant
return SingleValuedVariant
def __eq__(self, other):
return (self.name == other.name and
self.default == other.default and
self.values == other.values and
self.multi == other.multi and
self.single_value_validator == other.single_value_validator and
self.group_validator == other.group_validator)
def __ne__(self, other):
return not self == other
def implicit_variant_conversion(method):
"""Converts other to type(self) and calls method(self, other)
Args:
method: any predicate method that takes another variant as an argument
Returns: decorated method
"""
@functools.wraps(method)
def convert(self, other):
# We don't care if types are different as long as I can convert
# other to type(self)
try:
other = type(self)(other.name, other._original_value)
except (error.SpecError, ValueError):
return False
return method(self, other)
return convert
@lang.lazy_lexicographic_ordering
class AbstractVariant(object):
"""A variant that has not yet decided who it wants to be. It behaves like
a multi valued variant which **could** do things.
This kind of variant is generated during parsing of expressions like
``foo=bar`` and differs from multi valued variants because it will
satisfy any other variant with the same name. This is because it **could**
do it if it grows up to be a multi valued variant with the right set of
values.
"""
def __init__(self, name, value):
self.name = name
# Stores 'value' after a bit of massaging
# done by the property setter
self._value = None
self._original_value = None
# Invokes property setter
self.value = value
@staticmethod
def from_node_dict(name, value):
"""Reconstruct a variant from a node dict."""
if isinstance(value, list):
# read multi-value variants in and be faithful to the YAML
mvar = MultiValuedVariant(name, ())
mvar._value = tuple(value)
mvar._original_value = mvar._value
return mvar
elif str(value).upper() == 'TRUE' or str(value).upper() == 'FALSE':
return BoolValuedVariant(name, value)
return SingleValuedVariant(name, value)
def yaml_entry(self):
"""Returns a key, value tuple suitable to be an entry in a yaml dict.
Returns:
tuple: (name, value_representation)
"""
return self.name, list(self.value)
@property
def value(self):
"""Returns a tuple of strings containing the values stored in
the variant.
Returns:
tuple: values stored in the variant
"""
return self._value
@value.setter
def value(self, value):
self._value_setter(value)
def _value_setter(self, value):
# Store the original value
self._original_value = value
if not isinstance(value, (tuple, list)):
# Store a tuple of CSV string representations
# Tuple is necessary here instead of list because the
# values need to be hashed
value = re.split(r'\s*,\s*', str(value))
for val in special_variant_values:
if val in value and len(value) > 1:
msg = "'%s' cannot be combined with other variant" % val
msg += " values."
raise InvalidVariantValueCombinationError(msg)
# With multi-value variants it is necessary
# to remove duplicates and give an order
# to a set
self._value = tuple(sorted(set(value)))
def _cmp_iter(self):
yield self.name
value = self._value
if not isinstance(value, tuple):
value = (value,)
value = tuple(str(x) for x in value)
yield value
def copy(self):
"""Returns an instance of a variant equivalent to self
Returns:
AbstractVariant: a copy of self
>>> a = MultiValuedVariant('foo', True)
>>> b = a.copy()
>>> assert a == b
>>> assert a is not b
"""
return type(self)(self.name, self._original_value)
@implicit_variant_conversion
def satisfies(self, other):
"""Returns true if ``other.name == self.name``, because any value that
other holds and is not in self yet **could** be added.
Args:
other: constraint to be met for the method to return True
Returns:
bool: True or False
"""
# If names are different then `self` does not satisfy `other`
# (`foo=bar` will never satisfy `baz=bar`)
return other.name == self.name
@implicit_variant_conversion
def compatible(self, other):
"""Returns True if self and other are compatible, False otherwise.
As there is no semantic check, two VariantSpec are compatible if
either they contain the same value or they are both multi-valued.
Args:
other: instance against which we test compatibility
Returns:
bool: True or False
"""
# If names are different then `self` is not compatible with `other`
# (`foo=bar` is incompatible with `baz=bar`)
return other.name == self.name
@implicit_variant_conversion
def constrain(self, other):
"""Modify self to match all the constraints for other if both
instances are multi-valued. Returns True if self changed,
False otherwise.
Args:
other: instance against which we constrain self
Returns:
bool: True or False
| |
"""
Analyze spike shapes - pulled out of IVCurve 2/6/2016 pbm.
Allows routine to be used to analyze spike trains independent of acq4's data models.
Create instance, then call setup to define the "Clamps" object and the spike threshold.
The Clamps object must have the following variables defined:
commandLevels (current injection levels, list)
time_base (np.array of times corresponding to traces)
data_mode (string, indicating current or voltgae clamp)
tstart (time for start of looking at spikes; ms)
tend (time to stop looking at spikes; ms)
trace (the data trace itself, numpy array records x points)
sample_interval (time between samples, sec)
values (command waveforms; why it is called this in acq4 is a mystery)
Note that most of the results from this module are accessed either
as class variables, or through the class variable analysis_summary,
a dictionary with key analysis results.
IVCurve uses the analysis_summary to post results to an sql database.
<NAME>, Ph.D. 2016-2019
for Acq4 (and beyond)
"""
from collections import OrderedDict
import os
import os.path
from pathlib import Path
import inspect
import sys
import itertools
import functools
import numpy as np
import scipy
from . import Utility # pbm's utilities...
from . import Fitting # pbm's fitting stuff...
import pprint
import time
this_source_file = 'ephysanalysis.SpikeAnalysisrc'
class SpikeAnalysis():
def __init__(self):
pass
self.threshold = 0.
self.Clamps = None
self.analysis_summary = {}
self.verbose = False
self.FIGrowth = 1 # use function FIGrowth1 (can use simpler version FIGrowth 2 also)
self.analysis_summary['FI_Growth'] = [] # permit analysis of multiple growth functions.
self.detector = 'argrelmax'
def setup(self, clamps=None, threshold=None, refractory:float=0.0007, peakwidth:float=0.001,
verify=False, interpolate=True, verbose=False, mode='peak', min_halfwidth=0.010,
data_time_units:str = 's', data_volt_units:str='V'):
"""
configure the inputs to the SpikeAnalysis class
Parameters
---------
clamps : class (default: None)
PatchEphys clamp data holding/accessing all ephys data for this analysis
threshold : float (default: None)
Voltage threshold for spike detection
refractory : float (default 0.0007)
Minimum time between detected spikes, in seconds (or units of the clamp
time base)
peakwidth : float (default: 0.001)
When using "peak" as method in findspikes, this is the peak width maximum in sec
min_halfwidth : float (default: 0.010)
minimum spike half width in seconds. Default value is deliberately large...
verify : boolean (default: False)
interpolate : boolean (default: True)
Use interpolation to get spike threshold time and half-widths
mode : string (default: 'peak')
if using detector "peak", this is mode passed to findspikes
verbose : boolean (default: False)
Set true to get lots of print out while running - used
mostly for debugging.
"""
if clamps is None or threshold is None:
raise ValueError("Spike Analysis requires defined clamps and threshold")
self.Clamps = clamps
assert data_time_units in ['s', 'ms']
assert data_volt_units in ['V', 'mV']
self.time_units = data_time_units
self.volt_units = data_volt_units # needed by spike detector for data conversion
self.threshold = threshold
self.refractory = refractory
self.interpolate = interpolate # use interpolation on spike thresholds...
self.peakwidth = peakwidth
self.min_halfwidth = min_halfwidth
self.verify = verify
self.verbose = verbose
self.mode = mode
self.ar_window = 0.1
self.ar_lastspike = 0.075
self.min_peaktotrough = 0.010 # change in V on falling phase to be considered a spike
self.max_spike_look = 0.010 # msec over which to measure spike widths
def set_detector(self, detector:str='argrelmax'):
assert detector in ['argrelmax', 'threshold', 'Kalluri']
self.detector = detector
def analyzeSpikes(self, reset=True):
"""
analyzeSpikes: Using the threshold set in the control panel, count the
number of spikes in the stimulation window (self.Clamps.tstart, self.Clamps.tend)
Updates the spike plot(s).
The following class variables are modified upon successful analysis and return:
self.spikecount: a 1-D numpy array of spike counts, aligned with the
current (command)
self.adapt_ratio: the adaptation ratio of the spike train
self.fsl: a numpy array of first spike latency for each command level
self.fisi: a numpy array of first interspike intervals for each
command level
self.nospk: the indices of command levels where no spike was detected
self.spk: the indices of command levels were at least one spike
was detected
self.analysis_summary : Dictionary of results.
Parameters
----------
None
Returns
-------
Nothing, but see the list of class variables that are modified
"""
if reset:
self.analysis_summary['FI_Growth'] = [] # permit analysis of multiple growth functions.
twin = self.Clamps.tend - self.Clamps.tstart # measurements window in seconds
maxspkrate = 50 # max rate to count in adaptation is 50 spikes/second
minspk = 4
maxspk = int(maxspkrate*twin) # scale max dount by range of spike counts
#print('max spike rate: ', maxspk)
ntr = len(self.Clamps.traces)
self.spikecount = np.zeros(ntr)
self.fsl = np.zeros(ntr)
self.fisi = np.zeros(ntr)
ar = np.zeros(ntr)
self.allisi = []
self.spikes = [[] for i in range(ntr)]
self.spikeIndices = [[] for i in range(ntr)]
#print 'clamp start/end: ', self.Clamps.tstart, self.Clamps.tend
lastspikecount = 0
U = Utility.Utility()
for i in range(ntr): # this is where we should parallelize the analysis for spikes
spikes = U.findspikes(self.Clamps.time_base, np.array(self.Clamps.traces[i]),
self.threshold, t0=self.Clamps.tstart,
t1=self.Clamps.tend,
dt=self.Clamps.sample_interval,
mode=self.mode, # mode to use for finding spikes
interpolate=self.interpolate,
detector=self.detector,
mindip = 1e-2,
refract=self.refractory,
peakwidth=self.peakwidth,
data_time_units=self.time_units,
data_volt_units=self.volt_units,
verify=self.verify,
debug=False)
# print (ntr, i, self.Clamps.values[i], len(spikes))
if len(spikes) == 0:
# print ('no spikes found')
continue
spikes = np.array(spikes)
self.spikes[i] = spikes
# print 'found %d spikes in trace %d' % (len(spikes), i)
self.spikeIndices[i] = [np.argmin(np.fabs(self.Clamps.time_base-t)) for t in spikes]
self.spikecount[i] = len(spikes)
self.fsl[i] = (spikes[0] - self.Clamps.tstart)*1e3
if len(spikes) > 1:
self.fisi[i] = (spikes[1] - spikes[0])*1e3 # first ISI
self.allisi.append(np.diff(spikes)*1e3)
# for Adaptation ratio analysis: limit spike rate, and also only on monotonic increase in rate
# 8/2018:
# AR needs to be tethered to time into stimulus
# Here we return a standardized ar measured during the first 100 msec
# (standard ar)
if (minspk <= len(spikes)) and (self.spikecount[i] > lastspikecount):
spx = spikes[np.where(spikes-self.Clamps.tstart < self.ar_window)] # default is 100 msec
if len(spx) >= 4: # at least 4 spikes
if spx[-1] > self.ar_lastspike+self.Clamps.tstart: # default 75 msec
misi = np.mean(np.diff(spx[-2:]))*1e3 # last ISIs in the interval
ar[i] = misi / self.fisi[i]
lastspikecount = self.spikecount[i] # update rate (sets max rate)
iAR = np.where(ar > 0) # valid AR and monotonically rising
self.adapt_ratio = np.nan
if len(ar[iAR]) > 0:
self.adapt_ratio = np.mean(ar[iAR]) # only where we made the measurement
self.ar = ar # stores all the ar values
self.analysis_summary['AdaptRatio'] = self.adapt_ratio # only the valid values
self.nospk = np.where(self.spikecount == 0)
self.spk = np.where(self.spikecount > 0)[0]
self.analysis_summary['FI_Curve'] = np.array([self.Clamps.values, self.spikecount])
self.analysis_summary['FiringRate'] = np.max(self.spikecount)/(self.Clamps.tend - self.Clamps.tstart)
self.spikes_counted = True
# self.update_SpikePlots()
def analyzeSpikes_brief(self, mode='baseline'):
"""
analyzeSpikes_brief: Using the threshold set in the control panel, count the
number of spikes in a window and fill out ana analysis summary dict with
the spike latencies in that window (from 0 time)
Parameters
----------
mode: str (default : baseline)
baseline: from 0 to self.Clamps.tstart
poststimulus : from self.Clamps.tend to end of trace
evoked : from self.Clamps.start to self.Clamps.end
Returns:
-------
Nothing, but see the list of class variables that are modified
Class variable modified is the
self.analysis_summary : Dictionary of spike times. Key is
'spikes_baseline'
'spikes_poststimulus'
'spikes_evoked'
according to the mode in the call
"""
if mode == 'baseline':
twin = [0., self.Clamps.tstart]
elif mode == 'evoked':
twin = [self.Clamps.tstart,self.Clamps.tend]
elif mode == 'poststimulus':
twin = [self.Clamps.tend, np.max(self.Clamps.time_base)]
else:
raise ValueError(f'{thissourcefile:s}:: analyzeSpikes_brief requires mode to be "baseline", "evoked", or "poststimulus"')
ntr = len(self.Clamps.traces)
allspikes = [[] for i in range(ntr)]
spikeIndices = [[] for i in range(ntr)]
U = Utility.Utility()
for i in range(ntr):
spikes = U.findspikes(self.Clamps.time_base, np.array(self.Clamps.traces[i]),
self.threshold, t0=twin[0],
t1=twin[1],
dt=self.Clamps.sample_interval,
mode=self.mode, # mode to use for finding spikes
interpolate=self.interpolate,
detector=self.detector,
refract=self.refractory,
peakwidth=self.peakwidth,
verify=self.verify,
debug=False)
if len(spikes) == 0:
#print 'no spikes found'
continue
allspikes[i] = spikes
self.analysis_summary[mode+'_spikes'] = allspikes
def _timeindex(self, t):
"""
Find the index into the time_base of the Clamps structure that
corresponds to the time closest to t
Parameters
----------
t : float (time, no default)
Returns
-------
index : int (index to the closest time)
"""
return np.argmin(self.Clamps.time_base-t)
def _initialize_summarymeasures(self):
self.analysis_summary['AP1_Latency'] = np.inf
self.analysis_summary['AP1_HalfWidth'] = np.inf
self.analysis_summary['AP1_HalfWidth_interpolated'] = np.inf
self.analysis_summary['AP2_Latency'] | |
"""
This is an example of using the k-nearest-neighbors (KNN) algorithm for face recognition.
When should I use this example?
This example is useful when you wish to recognize a large set of known people,
and make a prediction for an unknown person in a feasible computation time.
Algorithm Description:
The knn classifier is first trained on a set of labeled (known) faces and can then predict the person
in an unknown image by finding the k most similar faces (images with closet face-features under eucledian distance)
in its training set, and performing a majority vote (possibly weighted) on their label.
For example, if k=3, and the three closest face images to the given image in the training set are one image of Biden
and two images of Obama, The result would be 'Obama'.
* This implementation uses a weighted vote, such that the votes of closer-neighbors are weighted more heavily.
Usage:
1. Prepare a set of images of the known people you want to recognize. Organize the images in a single directory
with a sub-directory for each known person.
2. Then, call the 'train' function with the appropriate parameters. Make sure to pass in the 'model_save_path' if you
want to save the model to disk so you can re-use the model without having to re-train it.
3. Call 'predict' and pass in your trained model to recognize the people in an unknown image.
NOTE: This example requires scikit-learn to be installed! You can install it with pip:
$ pip3 install scikit-learn
"""
import math
import cv2
import time
from sklearn import neighbors
import os
import os.path
import pickle
from PIL import Image, ImageDraw
import face_recognition
from face_recognition.face_recognition_cli import image_files_in_folder
import numpy as np
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
def train(train_dir, model_save_path=None, n_neighbors=None, knn_algo='ball_tree', verbose=False):
"""
Trains a k-nearest neighbors classifier for face recognition.
:param train_dir: directory that contains a sub-directory for each known person, with its name.
(View in source code to see train_dir example tree structure)
Structure:
<train_dir>/
├── <person1>/
│ ├── <somename1>.jpeg
│ ├── <somename2>.jpeg
│ ├── ...
├── <person2>/
│ ├── <somename1>.jpeg
│ └── <somename2>.jpeg
└── ...
:param model_save_path: 模型所保存的地址
:param n_neighbors: 在训练时如果没有指定权重,则自动选择权重
:param knn_algo: (optional) underlying data structure to support knn.default is ball_tree
:param verbose: verbosity of training
:return: knn_clf返回训练后的模型
"""
X = []
y = []
# 循环遍历训练集中的每一个人
for class_dir in os.listdir(train_dir):
if not os.path.isdir(os.path.join(train_dir, class_dir)):
continue
# 循环遍历当前训练集中的每个人
for img_path in image_files_in_folder(os.path.join(train_dir, class_dir)):
image = face_recognition.load_image_file(img_path)
face_bounding_boxes = face_recognition.face_locations(image, model="cnn") # =1说明只有1张人脸,可用于训练
if len(face_bounding_boxes) != 1:
# 如果该训练集中没有人或者有很多人,则跳过该图像
if verbose:
print("Image {} not suitable for training: {}".format(img_path, "Didn't find a face" if len(face_bounding_boxes) < 1 else "Found more than one face"))
else:
# 将图片中的人脸的编码加入到训练集中,并存入train_face_encodings用于后面的识别
# for train_face_encoding in train_face_encodings:
# train_face_encoding=face_recognition.face_encodings(image)
# train_face_encodings.append(train_face_encoding)
X.append(face_recognition.face_encodings(image, known_face_locations=face_bounding_boxes, model="large")[0])
y.append(class_dir)
# 确定KNN分类器中的权重
if n_neighbors is None:
n_neighbors = int(round(math.sqrt(len(X)))) #对训练集的长度开方,再四舍五入,取整
if verbose:
print("Chose n_neighbors automatically:", n_neighbors)
# 建立并训练KNN训练集
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance')
knn_clf.fit(X, y)
# 保存KNN分类器
if model_save_path is not None:
with open(model_save_path, 'wb') as f: # 'wb'打开f
pickle.dump(knn_clf, f) #序列化knn_clf,并将模型保存到f中,
return knn_clf
#对for_rec里的图片编码
def encoding_known_face(known_faces_dir):
known_faces = [] #用于存放for_rec里面的图片的编码
known_locations = [] #用于存放for_rec里面的图片的location
#known_face_names = [] #用于存放for_rec里面图片的名字
len_of_for_rec = len(os.listdir(known_faces_dir))
time_load = 0
time_location_known_faces = 0
for class_dir in os.listdir(known_faces_dir):
if not os.path.isdir(os.path.join(known_faces_dir, class_dir)):
continue
for img_path in image_files_in_folder(os.path.join(known_faces_dir, class_dir)):
#start5 = time.clock()
img = face_recognition.load_image_file(img_path)
#end5 = time.clock()
#time5 = end5 - start5
#time_load += time5
#start4 = time.clock()
known_locations.append(face_recognition.face_locations(img,model="cnn")[0])
#end4 = time.clock()
#time4 = end4 - start4
#time_location_known_faces += time4
known_faces.append(face_recognition.face_encodings(img, known_locations)[0])
#known_face_names.append(os.path.basename(os.path.splitext(img_path)[0])) #获取文件名存入face_names中
#print("加载图片需要: " + str(time_load) + ' s')
#print("定位人脸需要: " + str(time_location_known_faces) + ' s')
#print(known_faces)
#print(face_names)
#print(len_of_for_rec)
return known_faces, len_of_for_rec
name = "name"
def process_frame(known_faces_dir, knn_clf=None, model_path=None, distance_threshold=0.60):
# if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:
# raise Exception("Invalid image path: {}".format(X_img_path))
if knn_clf is None and model_path is None:
raise Exception("Must supply knn classifier either thourgh knn_clf or model_path")
# 加载KNN模型
if knn_clf is None:
with open(model_path, 'rb') as f:
knn_clf = pickle.load(f)
#video_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
#face_names = [] #用于存放识别出的对应的姓名
face_locations = []
#known_locations = []
face_encodings = []
start2 = time.clock()
known_faces, len_of_for_rec = encoding_known_face(known_faces_dir)#用于存放数据库人脸的编码和姓名
end2 = time.clock()
encoding_time = end2 - start2
print("编码时间需要:" + str(encoding_time) + ' s')
#print(len_of_for_rec)
process_this_frame = True
global name
threshold = 1/(0.75 * len_of_for_rec)
#print(threshold)
#print(known_face_names)
#print(known_faces)
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
if process_this_frame:
#start3 = time.clock()
face_locations = face_recognition.face_locations(small_frame, model="cnn") # 人脸定位
face_encodings = face_recognition.face_encodings(small_frame, face_locations, model="small") # 计算编码
face_names = []
max_sims = []
#print(face_encodings)
for face_encoding in face_encodings:
#closest_distances = knn_clf.kneighbors(known_faces, n_neighbors=1)
#print(closest_distances)
#face_names.append(knn_clf.predict([face_encoding])) #存放匹配的脸的名字
#print(knn_clf.predict([face_encoding])) #返回人脸的标签,即:train文件夹对应的名字
#start3 = time.clock()
if np.max(knn_clf.predict_proba([face_encoding])) > threshold: # 1 > threshold
#print(knn_clf.predict_proba([face_encoding])) #[[0.0.1.0.0....]]
if knn_clf.predict([face_encoding]):
#print(knn_clf.predict([face_encoding])) #['liang junyu']
name = str(knn_clf.predict([face_encoding])) #['*'] 要处理[,'
else:
name = "unknown"
else:
name = "unknown"
#print(knn_clf.predict_proba([face_encoding])) #返回人脸的概率,即:是train某个脸的概率
face_names.append(name)
similarities = []
#face_distance = face_recognition.face_distance(known_faces, face_encoding)
#print(max(face_distance))
for known_face in known_faces: #余弦相似度
feature_1 = np.array(face_encoding)
feature_2 = np.array(known_face)
similarity = (np.dot(feature_1, feature_2)) / (
np.sqrt(np.dot(feature_1, feature_1)) * np.sqrt(np.dot(feature_2, feature_2)))
similarities.append(similarity)
#print(similarities)
max_sim = str(max(similarities) * 100)[:5] + '%'
#print(max_sim)
max_sims.append(max_sim)
#end3 = time.clock()
#process_time = end3 - start3
#print("人脸识别需要:" + str(process_time) + ' s')
#print(max_sim)
#process_this_frame = not process_this_frame
#print(face_names)
for (top, right, bottom, left), name, sim in zip(face_locations, face_names, max_sims):
top *= 4
right *= 4
bottom *= 4
left *= 4
#name = face_names[-1]#name存放最新识别到的名字,放在列表最后面
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, top - 35), (right + 60, top), (0, 0, 255), 2)
font = cv2.FONT_HERSHEY_DUPLEX
# 打印名字
cv2.putText(frame, name.replace("'", '').replace("[",'').replace("]",'').title(), (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# 打印相似度
if name == "unknown":
cv2.putText(frame, "unrecognized", (left + 6, top - 10), font, 0.5, (255, 255, 255), 1)
else:
cv2.putText(frame, sim, (left + 6, top - 10), font, 0.5, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
# def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6):
# """
# Recognizes faces in given image using a trained KNN classifier
#
# :param X_img_path: 测试集的图片地址
# :param knn_clf: 训练好的模型
# :param model_path: 模型地址
# :param distance_threshold: 给出当前测试图片中的人脸向量与模型中的距离
# :return:图像中已识别面的名称和脸的定位列表: [(name, bounding box), ...].
# For faces of unrecognized persons, the name 'unknown' will be returned.
# """
#
#
# if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:
# raise Exception("Invalid image path: {}".format(X_img_path))
#
# if knn_clf is None and model_path is None:
# raise Exception("Must supply knn classifier either thourgh knn_clf or model_path")
#
# # 加载KNN模型
# if knn_clf is None:
# with open(model_path, 'rb') as f:
# knn_clf = pickle.load(f)
#
# # 加载图片文件夹以及人脸
# X_img = face_recognition.load_image_file(X_img_path)
# X_face_locations = face_recognition.face_locations(X_img)
#
# # 如果图片中没有人脸,则返回空的结果集
# if len(X_face_locations) == 0:
# return []
#
# # 找出测试集中的人脸编码
# faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations)
#
# # 利用KNN模型找出测试集中最匹配的人脸
# closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
# are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]
#
# # 预测类并删除不在阈值范围内的分类
# return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
#
#
# def show_prediction_labels_on_image(img_path, predictions):
# """
# 在图片给出标签并展示人脸.
#
# :param img_path: path to image to be recognized
# :param predictions: results of the predict function
# :return:
# """
# pil_image = Image.open(img_path).convert("RGB")
# draw = ImageDraw.Draw(pil_image)
#
# for name, (top, right, bottom, left) in predictions:
# # Draw a box around the face using the Pillow module
# draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))
#
# # There's a bug in Pillow where it blows up with non-UTF-8 text
# # when using the default bitmap font
# name = name.encode("UTF-8")
#
# # Draw a label with a name below the face
# text_width, text_height = draw.textsize(name)
# draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))
# draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255))
#
# # Remove the drawing library from memory as per the Pillow docs
# del draw
#
# # Display the resulting image
# pil_image.show()
if __name__ == "__main__":
# STEP 1: Train the KNN classifier and save it to disk
# Once the model is trained and saved, you can skip this step next time.
print("Training KNN classifier...")
#start1 = time.clock()
#classifier = train("knn_examples/train", model_save_path="trained_knn_model.clf", n_neighbors=2)
#end1 = time.clock()
#train_time = end1 - start1
#print("训练需要:" + str(train_time) + ' s')
print("Training complete!")
#start2 = time.clock()
#encoding_known_face(known_faces_dir="knn_examples\\")
#end2 | |
import argparse
import sys
import os
import sys
import time
import csv
import sys
def metadata(asset,mf,mfile,errorlog):
if asset == 'PSO': # PS OrthoTile Analytic
folder = mf
with open(mfile,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no", "system:time_start", "platform", "satType","satID", "tileID", "numBands", "cloudcover","incAngle","illAzAngle","illElvAngle","azAngle","spcAngle","rsf","refCoeffB1","refCoeffB2","refCoeffB3","refCoeffB4"], delimiter=',')
writer.writeheader()
with open(errorlog,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no"], delimiter=',')
writer.writeheader()
for filename in os.listdir(folder):
infilename = os.path.join(folder,filename)
fsp=filename.split("_x")[0]
try:
from xml.dom import minidom #This gets the main xml parse tree
xmldoc=minidom.parse(infilename)
ps=xmldoc.getElementsByTagName("ps:EarthObservationMetaData")[0]
observation=xmldoc.getElementsByTagName("ps:EarthObservationResult") [0]
eopfilename=xmldoc.getElementsByTagName("eop:fileName")[0].firstChild.data
meta=xmldoc.getElementsByTagName("ps:EarthObservationMetaData")[0]
acquisition= meta.getElementsByTagName("eop:acquisitionDate")[0].firstChild.data
tile=meta.getElementsByTagName("ps:tileId")[0].firstChild.data
equip=xmldoc.getElementsByTagName("eop:Platform")[0]
platform=equip.getElementsByTagName("eop:shortName")[0].firstChild.data
sid=equip.getElementsByTagName("eop:serialIdentifier")[0].firstChild.data
equip=xmldoc.getElementsByTagName("eop:instrument")[0]
sattype=equip.getElementsByTagName("eop:shortName")[0].firstChild.data
bands=xmldoc.getElementsByTagName("ps:numBands")[0].firstChild.data
cloud=xmldoc.getElementsByTagName("opt:cloudCoverPercentage")[0].firstChild.data
psb=xmldoc.getElementsByTagName("ps:bandNumber")[0].firstChild.data
psb1=xmldoc.getElementsByTagName("ps:bandNumber")[1].firstChild.data
psb3=xmldoc.getElementsByTagName("ps:bandNumber")[2].firstChild.data
psb4=xmldoc.getElementsByTagName("ps:bandNumber")[3].firstChild.data
psbrad=xmldoc.getElementsByTagName("ps:radiometricScaleFactor")[0].firstChild.data
psb1ref=xmldoc.getElementsByTagName("ps:reflectanceCoefficient")[0].firstChild.data
psb2ref=xmldoc.getElementsByTagName("ps:reflectanceCoefficient")[1].firstChild.data
psb3ref=xmldoc.getElementsByTagName("ps:reflectanceCoefficient")[2].firstChild.data
psb4ref=xmldoc.getElementsByTagName("ps:reflectanceCoefficient")[3].firstChild.data
psia=xmldoc.getElementsByTagName("eop:incidenceAngle")[0].firstChild.data
psilaz=xmldoc.getElementsByTagName("opt:illuminationAzimuthAngle")[0].firstChild.data
psilelv=xmldoc.getElementsByTagName("opt:illuminationElevationAngle")[0].firstChild.data
psaz=xmldoc.getElementsByTagName("ps:azimuthAngle")[0].firstChild.data
pssca=xmldoc.getElementsByTagName("ps:spaceCraftViewAngle")[0].firstChild.data
print("ID_Name:", eopfilename.split(".")[0])
print("Acquisition Date:", acquisition.split("T")[0])
print("Satellite Type:", platform)
print("ShortName:", sattype)
print("Satellite ID:", str(sid))
print("Tile ID:", tile)
print("Number of Bands:", bands)
print("Cloud Cover:", format(float(cloud),'.2f'))
print("PS Incidence Angle",format(float(psia),'.4f'))
print("PS illumination azimuth angle",format(float(psilaz),'.2f'))
print("PS illumination elevation angle",format(float(psilelv),'.2f'))
print("PS Azimuth angle",format(float(psaz),'.2f'))
print("PS SpaceCraft angle",format(float(pssca),'.4f'))
print("Radiometric Scale Factor",psbrad)
print("ReflectanceCoefficient B1",format(float(psb1ref),'.8f'))
print("ReflectanceCoefficient B2",format(float(psb2ref),'.8f'))
print("ReflectanceCoefficient B3",format(float(psb3ref),'.8f'))
print("ReflectanceCoefficient B4",format(float(psb4ref),'.8f'))
date_time = acquisition.split("T")[0]
pattern = '%Y-%m-%d'
epoch = int(time.mktime(time.strptime(date_time, pattern)))*1000
print("epoch time", epoch)
with open(mfile,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([fsp,epoch,platform,sattype,str(sid),tile,bands,format(float(cloud),'.2f'),format(float(psia),'.4f'),format(float(psilaz),'.2f'),
format(float(psilelv),'.2f'),format(float(psaz),'.2f'),format(float(pssca),'.4f'),psbrad,format(float(psb1ref),'.8f'),
format(float(psb2ref),'.8f'),format(float(psb3ref),'.8f'),format(float(psb4ref),'.8f')])
csvfile.close()
except Exception:
print(infilename)
with open(errorlog,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([infilename])
csvfile.close()
if asset == 'PSO_DN': #PS OrthoTile Analytic Derivative DN
folder = mf
with open(mfile,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no", "system:time_start", "platform", "satType","satID", "tileID", "numBands", "cloudcover","incAngle","illAzAngle","illElvAngle","azAngle","spcAngle"], delimiter=',')
writer.writeheader()
with open(errorlog,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no"], delimiter=',')
writer.writeheader()
for filename in os.listdir(folder):
infilename = os.path.join(folder, filename)
fsp = filename.split('_x')[0]
try:
from xml.dom import minidom
xmldoc = minidom.parse(infilename)
ps4band=xmldoc.getElementsByTagName('ps:EarthObservationMetaData')[0]
eopfilename = xmldoc.getElementsByTagName('eop:identifier')[0].firstChild.data
sid=xmldoc.getElementsByTagName("eop:serialIdentifier")[0].firstChild.data
acquisition = xmldoc.getElementsByTagName('eop:acquisitionDate')[0].firstChild.data
platform=xmldoc.getElementsByTagName("eop:shortName")[0].firstChild.data
tile=xmldoc.getElementsByTagName("ps:tileId")[0].firstChild.data
sattype=xmldoc.getElementsByTagName("eop:shortName")[1].firstChild.data
bands=xmldoc.getElementsByTagName("ps:numBands")[0].firstChild.data
cloud=xmldoc.getElementsByTagName("opt:cloudCoverPercentage")[0].firstChild.data
psb=xmldoc.getElementsByTagName("ps:bandNumber")[0].firstChild.data
psb1=xmldoc.getElementsByTagName("ps:bandNumber")[1].firstChild.data
psb3=xmldoc.getElementsByTagName("ps:bandNumber")[2].firstChild.data
psb4=xmldoc.getElementsByTagName("ps:bandNumber")[3].firstChild.data
psia=xmldoc.getElementsByTagName("eop:incidenceAngle")[0].firstChild.data
psilaz=xmldoc.getElementsByTagName("opt:illuminationAzimuthAngle")[0].firstChild.data
psilelv=xmldoc.getElementsByTagName("opt:illuminationElevationAngle")[0].firstChild.data
psaz=xmldoc.getElementsByTagName("ps:azimuthAngle")[0].firstChild.data
pssca=xmldoc.getElementsByTagName("ps:spaceCraftViewAngle")[0].firstChild.data
print ('ID_Name:', eopfilename.split('.')[0])
print ('Acquisition Date:', acquisition.split('T')[0])
print("Acquisition Date:", acquisition.split("T")[0])
print("Satellite Type:", platform)
print("ShortName:", sattype)
print("Satellite ID:", str(sid))
print("Tile ID:",tile)
print("Number of Bands:", bands)
print("Cloud Cover:", format(float(cloud),'.2f'))
print("PS Incidence Angle",format(float(psia),'.4f'))
print("PS illumination azimuth angle",format(float(psilaz),'.2f'))
print("PS illumination elevation angle",format(float(psilelv),'.2f'))
print("PS Azimuth angle",format(float(psaz),'.2f'))
print("PS SpaceCraft angle",format(float(pssca),'.4f'))
date_time = acquisition.split('T')[0]
pattern = '%Y-%m-%d'
epoch = int(time.mktime(time.strptime(date_time, pattern))) * 1000
print ('epoch time', epoch)
with open(mfile,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([fsp,epoch,platform,sattype,str(sid),tile,bands,format(float(cloud),'.2f'),format(float(psia),'.4f'),format(float(psilaz),'.2f'),
format(float(psilelv),'.2f'),format(float(psaz),'.2f'),format(float(pssca),'.4f')])
csvfile.close()
except Exception:
print(infilename)
with open(errorlog,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([infilename])
csvfile.close()
if asset == 'PSO_V': #PS OrthoTile Analytic Derivative Visual
folder = mf
with open(mfile,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no", "system:time_start", "platform", "satType","satID", "tileID", "numBands", "cloudcover","incAngle","illAzAngle","illElvAngle","azAngle","spcAngle"], delimiter=',')
writer.writeheader()
with open(errorlog,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no"], delimiter=',')
writer.writeheader()
for filename in os.listdir(folder):
infilename = os.path.join(folder, filename)
fsp = filename.split('_x')[0]
try:
from xml.dom import minidom
xmldoc = minidom.parse(infilename)
eopfilename = xmldoc.getElementsByTagName('eop:identifier')[0].firstChild.data
sid=xmldoc.getElementsByTagName("eop:serialIdentifier")[0].firstChild.data
acquisition = xmldoc.getElementsByTagName('eop:acquisitionDate')[0].firstChild.data
platform=xmldoc.getElementsByTagName("eop:shortName")[0].firstChild.data
tile=xmldoc.getElementsByTagName("ps:tileId")[0].firstChild.data
sattype=xmldoc.getElementsByTagName("eop:shortName")[1].firstChild.data
bands=xmldoc.getElementsByTagName("ps:numBands")[0].firstChild.data
cloud=xmldoc.getElementsByTagName("opt:cloudCoverPercentage")[0].firstChild.data
psia=xmldoc.getElementsByTagName("eop:incidenceAngle")[0].firstChild.data
psilaz=xmldoc.getElementsByTagName("opt:illuminationAzimuthAngle")[0].firstChild.data
psilelv=xmldoc.getElementsByTagName("opt:illuminationElevationAngle")[0].firstChild.data
psaz=xmldoc.getElementsByTagName("ps:azimuthAngle")[0].firstChild.data
pssca=xmldoc.getElementsByTagName("ps:spaceCraftViewAngle")[0].firstChild.data
print ('ID_Name:', eopfilename.split('.')[0])
print ('Acquisition Date:', acquisition.split('T')[0])
print("Acquisition Date:", acquisition.split("T")[0])
print("Satellite Type:", platform)
print("ShortName:", sattype)
print("Satellite ID:", str(sid))
print("Tile ID:",tile)
print("Number of Bands:", bands)
print("Cloud Cover:", format(float(cloud),'.2f'))
print("PS Incidence Angle",format(float(psia),'.4f'))
print("PS illumination azimuth angle",format(float(psilaz),'.2f'))
print("PS illumination elevation angle",format(float(psilelv),'.2f'))
print("PS Azimuth angle",format(float(psaz),'.2f'))
print("PS SpaceCraft angle",format(float(pssca),'.4f'))
date_time = acquisition.split('T')[0]
pattern = '%Y-%m-%d'
epoch = int(time.mktime(time.strptime(date_time, pattern))) * 1000
print ('epoch time', epoch)
with open(mfile,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([fsp,epoch,platform,sattype,str(sid),tile,bands,format(float(cloud),'.2f'),format(float(psia),'.4f'),format(float(psilaz),'.2f'),
format(float(psilelv),'.2f'),format(float(psaz),'.2f'),format(float(pssca),'.4f')])
csvfile.close()
except Exception:
print(infilename)
with open(errorlog,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([infilename])
csvfile.close()
if asset == 'PS4B': #PS 4 Band Scene Derivative Analytic
folder = mf
with open(mfile,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no", "system:time_start", "platform", "satType","satID", "numBands", "cloudcover","incAngle","illAzAngle","illElvAngle","azAngle","spcAngle","rsf","refCoeffB1","refCoeffB2","refCoeffB3","refCoeffB4"], delimiter=',')
writer.writeheader()
with open(errorlog,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no"], delimiter=',')
writer.writeheader()
for filename in os.listdir(folder):
infilename = os.path.join(folder, filename)
fsp = filename.split('_x')[0]
try:
from xml.dom import minidom
xmldoc = minidom.parse(infilename)
ps4band=xmldoc.getElementsByTagName('ps:EarthObservationMetaData')[0]
eopfilename = xmldoc.getElementsByTagName('eop:identifier')[0].firstChild.data
acquisition = xmldoc.getElementsByTagName('eop:acquisitionDate')[0].firstChild.data
sid=xmldoc.getElementsByTagName("eop:serialIdentifier")[0].firstChild.data
sattype=xmldoc.getElementsByTagName("eop:shortName")[1].firstChild.data
platform=xmldoc.getElementsByTagName("eop:shortName")[0].firstChild.data
bands=xmldoc.getElementsByTagName("ps:numBands")[0].firstChild.data
cloud=xmldoc.getElementsByTagName("opt:cloudCoverPercentage")[0].firstChild.data
psb=xmldoc.getElementsByTagName("ps:bandNumber")[0].firstChild.data
psb1=xmldoc.getElementsByTagName("ps:bandNumber")[1].firstChild.data
psb3=xmldoc.getElementsByTagName("ps:bandNumber")[2].firstChild.data
psb4=xmldoc.getElementsByTagName("ps:bandNumber")[3].firstChild.data
psbrad=xmldoc.getElementsByTagName("ps:radiometricScaleFactor")[0].firstChild.data
psb1ref=xmldoc.getElementsByTagName("ps:reflectanceCoefficient")[0].firstChild.data
psb2ref=xmldoc.getElementsByTagName("ps:reflectanceCoefficient")[1].firstChild.data
psb3ref=xmldoc.getElementsByTagName("ps:reflectanceCoefficient")[2].firstChild.data
psb4ref=xmldoc.getElementsByTagName("ps:reflectanceCoefficient")[3].firstChild.data
psia=xmldoc.getElementsByTagName("eop:incidenceAngle")[0].firstChild.data
psilaz=xmldoc.getElementsByTagName("opt:illuminationAzimuthAngle")[0].firstChild.data
psilelv=xmldoc.getElementsByTagName("opt:illuminationElevationAngle")[0].firstChild.data
psaz=xmldoc.getElementsByTagName("ps:azimuthAngle")[0].firstChild.data
pssca=xmldoc.getElementsByTagName("ps:spaceCraftViewAngle")[0].firstChild.data
print ('ID_Name:', eopfilename.split('.')[0])
print ('Acquisition Date:', acquisition.split('T')[0])
print("Acquisition Date:", acquisition.split("T")[0])
print("ShortName:", sattype)
print("Satellite ID:", str(sid))
print("Number of Bands:", bands)
print("Cloud Cover:", format(float(cloud),'.2f'))
print("PS Incidence Angle",format(float(psia),'.4f'))
print("PS illumination azimuth angle",format(float(psilaz),'.2f'))
print("PS illumination elevation angle",format(float(psilelv),'.2f'))
print("PS Azimuth angle",format(float(psaz),'.2f'))
print("PS SpaceCraft angle",format(float(pssca),'.4f'))
print("Radiometric Scale Factor",psbrad)
print("ReflectanceCoefficient B1",format(float(psb1ref),'.8f'))
print("ReflectanceCoefficient B2",format(float(psb2ref),'.8f'))
print("ReflectanceCoefficient B3",format(float(psb3ref),'.8f'))
print("ReflectanceCoefficient B4",format(float(psb4ref),'.8f'))
date_time = acquisition.split('T')[0]
pattern = '%Y-%m-%d'
epoch = int(time.mktime(time.strptime(date_time, pattern))) * 1000
with open(mfile,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([fsp,epoch,platform,sattype,str(sid),bands,format(float(cloud),'.2f'),format(float(psia),'.4f'),format(float(psilaz),'.2f'),
format(float(psilelv),'.2f'),format(float(psaz),'.2f'),format(float(pssca),'.4f'),psbrad,format(float(psb1ref),'.8f'),
format(float(psb2ref),'.8f'),format(float(psb3ref),'.8f'),format(float(psb4ref),'.8f')])
csvfile.close()
except Exception:
print(infilename)
with open(errorlog,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([infilename])
csvfile.close()
if asset == 'PS4B_DN': #PS 4 Band Scene Derivative DN
folder = mf
with open(mfile,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no", "system:time_start", "platform", "satType","satID", "numBands", "cloudcover","incAngle","illAzAngle","illElvAngle","azAngle","spcAngle"], delimiter=',')
writer.writeheader()
with open(errorlog,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no"], delimiter=',')
writer.writeheader()
for filename in os.listdir(folder):
infilename = os.path.join(folder, filename)
fsp = filename.split('_x')[0]
try:
from xml.dom import minidom
xmldoc = minidom.parse(infilename)
eopfilename = xmldoc.getElementsByTagName('eop:identifier')[0].firstChild.data
acquisition = xmldoc.getElementsByTagName('eop:acquisitionDate')[0].firstChild.data
bands=xmldoc.getElementsByTagName("ps:numBands")[0].firstChild.data
platform=xmldoc.getElementsByTagName("eop:shortName")[0].firstChild.data
sid=xmldoc.getElementsByTagName("eop:serialIdentifier")[0].firstChild.data
sattype=xmldoc.getElementsByTagName("eop:shortName")[1].firstChild.data
cloud=xmldoc.getElementsByTagName("opt:cloudCoverPercentage")[0].firstChild.data
psia=xmldoc.getElementsByTagName("eop:incidenceAngle")[0].firstChild.data
psilaz=xmldoc.getElementsByTagName("opt:illuminationAzimuthAngle")[0].firstChild.data
psilelv=xmldoc.getElementsByTagName("opt:illuminationElevationAngle")[0].firstChild.data
psaz=xmldoc.getElementsByTagName("ps:azimuthAngle")[0].firstChild.data
pssca=xmldoc.getElementsByTagName("ps:spaceCraftViewAngle")[0].firstChild.data
print ('ID_Name:', eopfilename.split('.')[0])
print ('Acquisition Date:', acquisition.split('T')[0])
print("Acquisition Date:", acquisition.split("T")[0])
print("Satellite Type:", platform)
print("ShortName:", sattype)
print("Number of Bands:", bands)
print("Cloud Cover:", format(float(cloud),'.2f'))
print("PS Incidence Angle",format(float(psia),'.4f'))
print("PS illumination azimuth angle",format(float(psilaz),'.2f'))
print("PS illumination elevation angle",format(float(psilelv),'.2f'))
print("PS Azimuth angle",format(float(psaz),'.2f'))
print("PS SpaceCraft angle",format(float(pssca),'.4f'))
date_time = acquisition.split('T')[0]
pattern = '%Y-%m-%d'
epoch = int(time.mktime(time.strptime(date_time, pattern))) * 1000
with open(mfile,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([fsp,epoch,platform,sattype,str(sid),bands,format(float(cloud),'.2f'),format(float(psia),'.4f'),format(float(psilaz),'.2f'),
format(float(psilelv),'.2f'),format(float(psaz),'.2f'),format(float(pssca),'.4f')])
csvfile.close()
except Exception:
print(infilename)
with open(errorlog,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([infilename])
csvfile.close()
if asset == 'PS3B': #PS 3 Band Scene Derivative Analytic
folder = mf
with open(mfile,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no", "system:time_start", "platform", "satType","satID", "numBands", "cloudcover","incAngle","illAzAngle","illElvAngle","azAngle","spcAngle","rsf","refCoeffB1","refCoeffB2","refCoeffB3"], delimiter=',')
writer.writeheader()
with open(errorlog,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no"], delimiter=',')
writer.writeheader()
for filename in os.listdir(folder):
infilename = os.path.join(folder, filename)
fsp = filename.split('_x')[0]
print(fsp)
try:
from xml.dom import minidom
xmldoc = minidom.parse(infilename)
eopfilename = xmldoc.getElementsByTagName('eop:identifier')[0].firstChild.data
acquisition = xmldoc.getElementsByTagName('eop:acquisitionDate')[0].firstChild.data
sid=xmldoc.getElementsByTagName("eop:serialIdentifier")[0].firstChild.data
platform=xmldoc.getElementsByTagName("eop:shortName")[0].firstChild.data
sattype=xmldoc.getElementsByTagName("eop:shortName")[1].firstChild.data
bands=xmldoc.getElementsByTagName("ps:numBands")[0].firstChild.data
cloud=xmldoc.getElementsByTagName("opt:cloudCoverPercentage")[0].firstChild.data
psb=xmldoc.getElementsByTagName("ps:bandNumber")[0].firstChild.data
psb1=xmldoc.getElementsByTagName("ps:bandNumber")[1].firstChild.data
psb3=xmldoc.getElementsByTagName("ps:bandNumber")[2].firstChild.data
psbrad=xmldoc.getElementsByTagName("ps:radiometricScaleFactor")[0].firstChild.data
psb1ref=xmldoc.getElementsByTagName("ps:reflectanceCoefficient")[0].firstChild.data
psb2ref=xmldoc.getElementsByTagName("ps:reflectanceCoefficient")[1].firstChild.data
psb3ref=xmldoc.getElementsByTagName("ps:reflectanceCoefficient")[2].firstChild.data
psia=xmldoc.getElementsByTagName("eop:incidenceAngle")[0].firstChild.data
psilaz=xmldoc.getElementsByTagName("opt:illuminationAzimuthAngle")[0].firstChild.data
psilelv=xmldoc.getElementsByTagName("opt:illuminationElevationAngle")[0].firstChild.data
psaz=xmldoc.getElementsByTagName("ps:azimuthAngle")[0].firstChild.data
pssca=xmldoc.getElementsByTagName("ps:spaceCraftViewAngle")[0].firstChild.data
print ('ID_Name:', eopfilename.split('.')[0])
print ('Acquisition Date:', acquisition.split('T')[0])
print("Acquisition Date:", acquisition.split("T")[0])
print("ShortName:", sattype)
print("Satellite ID:", str(sid))
print("Number of Bands:", bands)
print("Cloud Cover:", format(float(cloud),'.2f'))
print("PS Incidence Angle",format(float(psia),'.4f'))
print("PS illumination azimuth angle",format(float(psilaz),'.2f'))
print("PS illumination elevation angle",format(float(psilelv),'.2f'))
print("PS Azimuth angle",format(float(psaz),'.2f'))
print("PS SpaceCraft angle",format(float(pssca),'.4f'))
print("Radiometric Scale Factor",psbrad)
print("ReflectanceCoefficient B1",format(float(psb1ref),'.8f'))
print("ReflectanceCoefficient B2",format(float(psb2ref),'.8f'))
print("ReflectanceCoefficient B3",format(float(psb3ref),'.8f'))
date_time = acquisition.split('T')[0]
pattern = '%Y-%m-%d'
epoch = int(time.mktime(time.strptime(date_time, pattern))) * 1000
with open(mfile,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([fsp,epoch,platform,sattype,str(sid),bands,format(float(cloud),'.2f'),format(float(psia),'.4f'),format(float(psilaz),'.2f'),
format(float(psilelv),'.2f'),format(float(psaz),'.2f'),format(float(pssca),'.4f'),psbrad,format(float(psb1ref),'.8f'),
format(float(psb2ref),'.8f'),format(float(psb3ref),'.8f')])
csvfile.close()
except Exception:
print(infilename)
with open(errorlog,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([infilename])
csvfile.close()
if asset == 'PS3B_DN': #PS 3 Band Scene Derivative DN
folder = mf
with open(mfile,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no", "system:time_start", "platform", "satType","satID", "numBands", "cloudcover","incAngle","illAzAngle","illElvAngle","azAngle","spcAngle"], delimiter=',')
writer.writeheader()
with open(errorlog,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no"], delimiter=',')
writer.writeheader()
for filename in os.listdir(folder):
infilename = os.path.join(folder, filename)
fsp = filename.split('_x')[0]
try:
from xml.dom import minidom
xmldoc = minidom.parse(infilename)
eopfilename = xmldoc.getElementsByTagName('eop:identifier')[0].firstChild.data
acquisition = xmldoc.getElementsByTagName('eop:acquisitionDate')[0].firstChild.data
sid=xmldoc.getElementsByTagName("eop:serialIdentifier")[0].firstChild.data
bands=xmldoc.getElementsByTagName("ps:numBands")[0].firstChild.data
platform=xmldoc.getElementsByTagName("eop:shortName")[0].firstChild.data
sattype=xmldoc.getElementsByTagName("eop:shortName")[1].firstChild.data
cloud=xmldoc.getElementsByTagName("opt:cloudCoverPercentage")[0].firstChild.data
psia=xmldoc.getElementsByTagName("eop:incidenceAngle")[0].firstChild.data
psilaz=xmldoc.getElementsByTagName("opt:illuminationAzimuthAngle")[0].firstChild.data
psilelv=xmldoc.getElementsByTagName("opt:illuminationElevationAngle")[0].firstChild.data
psaz=xmldoc.getElementsByTagName("ps:azimuthAngle")[0].firstChild.data
pssca=xmldoc.getElementsByTagName("ps:spaceCraftViewAngle")[0].firstChild.data
print ('ID_Name:', eopfilename.split('.')[0])
print ('Acquisition Date:', acquisition.split('T')[0])
print("Acquisition Date:", acquisition.split("T")[0])
print("Satellite Type:", platform)
print("ShortName:", sattype)
print("Number of Bands:", bands)
print("Cloud Cover:", format(float(cloud),'.2f'))
print("PS Incidence Angle",format(float(psia),'.4f'))
print("PS illumination azimuth angle",format(float(psilaz),'.2f'))
print("PS illumination elevation angle",format(float(psilelv),'.2f'))
print("PS Azimuth angle",format(float(psaz),'.2f'))
print("PS SpaceCraft angle",format(float(pssca),'.4f'))
date_time = acquisition.split('T')[0]
pattern = '%Y-%m-%d'
epoch = int(time.mktime(time.strptime(date_time, pattern))) * 1000
with open(mfile,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([fsp,epoch,platform,sattype,str(sid),bands,format(float(cloud),'.2f'),format(float(psia),'.4f'),format(float(psilaz),'.2f'),
format(float(psilelv),'.2f'),format(float(psaz),'.2f'),format(float(pssca),'.4f')])
csvfile.close()
except Exception:
print(infilename)
with open(errorlog,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([infilename])
csvfile.close()
if asset == 'PS3B_V': #PS 3 Band Scene Derivative Visual
folder = mf
with open(mfile,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no", "system:time_start", "platform", "satType","satID", "numBands", "cloudcover","incAngle","illAzAngle","illElvAngle","azAngle","spcAngle"], delimiter=',')
writer.writeheader()
with open(errorlog,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no"], delimiter=',')
writer.writeheader()
for filename in os.listdir(folder):
infilename = os.path.join(folder, filename)
fsp = filename.split('_x')[0]
try:
from xml.dom import minidom
xmldoc = minidom.parse(infilename)
eopfilename = xmldoc.getElementsByTagName('eop:identifier')[0].firstChild.data
acquisition = xmldoc.getElementsByTagName('eop:acquisitionDate')[0].firstChild.data
bands=xmldoc.getElementsByTagName("ps:numBands")[0].firstChild.data
platform=xmldoc.getElementsByTagName("eop:shortName")[0].firstChild.data
sid=xmldoc.getElementsByTagName("eop:serialIdentifier")[0].firstChild.data
sattype=xmldoc.getElementsByTagName("eop:shortName")[1].firstChild.data
cloud=xmldoc.getElementsByTagName("opt:cloudCoverPercentage")[0].firstChild.data
psia=xmldoc.getElementsByTagName("eop:incidenceAngle")[0].firstChild.data
psilaz=xmldoc.getElementsByTagName("opt:illuminationAzimuthAngle")[0].firstChild.data
psilelv=xmldoc.getElementsByTagName("opt:illuminationElevationAngle")[0].firstChild.data
psaz=xmldoc.getElementsByTagName("ps:azimuthAngle")[0].firstChild.data
pssca=xmldoc.getElementsByTagName("ps:spaceCraftViewAngle")[0].firstChild.data
print ('ID_Name:', eopfilename.split('.')[0])
print ('Acquisition Date:', acquisition.split('T')[0])
print("Acquisition Date:", acquisition.split("T")[0])
print("Satellite Type:", platform)
print("ShortName:", sattype)
print("Number of Bands:", bands)
print("Cloud Cover:", format(float(cloud),'.2f'))
print("PS Incidence Angle",format(float(psia),'.4f'))
print("PS illumination azimuth angle",format(float(psilaz),'.2f'))
print("PS illumination elevation angle",format(float(psilelv),'.2f'))
print("PS Azimuth angle",format(float(psaz),'.2f'))
print("PS SpaceCraft angle",format(float(pssca),'.4f'))
date_time = acquisition.split('T')[0]
pattern = '%Y-%m-%d'
epoch = int(time.mktime(time.strptime(date_time, pattern))) * 1000
print ('epoch time', epoch)
with open(mfile,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([fsp,epoch,platform,sattype,str(sid),bands,format(float(cloud),'.2f'),format(float(psia),'.4f'),format(float(psilaz),'.2f'),
format(float(psilelv),'.2f'),format(float(psaz),'.2f'),format(float(pssca),'.4f')])
csvfile.close()
except Exception:
print(infilename)
with open(errorlog,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([infilename])
csvfile.close()
if asset == 'REO':
folder = mf
with open(mfile,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no", "system:time_start", "platform", "satID", "tileID", "numBands", "cloudcover","incAngle","illAzAngle","illElvAngle","azAngle","spcAngle","rsf"], delimiter=',')
writer.writeheader()
with open(errorlog,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no"], delimiter=',')
writer.writeheader()
for filename in os.listdir(folder):
print(filename)
infilename = os.path.join(folder,filename)
fsp=filename.split("_x")[0]
try:
from xml.dom import minidom
xmldoc=minidom.parse(infilename)
re=xmldoc.getElementsByTagName("re:EarthObservationMetaData")[0]
eopfilename=xmldoc.getElementsByTagName("eop:identifier")[0].firstChild.data
product=xmldoc.getElementsByTagName("re:EarthObservationResult")[0]
bands=product.getElementsByTagName("re:numBands")[0].firstChild.data
downlink=xmldoc.getElementsByTagName("eop:downlinkedTo")[0]
acquisition= downlink.getElementsByTagName("eop:acquisitionDate")[0].firstChild.data
tile=xmldoc.getElementsByTagName("re:tileId")[0].firstChild.data
equip=xmldoc.getElementsByTagName("eop:EarthObservationEquipment")[0]
platform=equip.getElementsByTagName("eop:shortName")[0].firstChild.data
sid=equip.getElementsByTagName("eop:serialIdentifier")[0].firstChild.data
cloud=xmldoc.getElementsByTagName("opt:cloudCoverPercentage")[0].firstChild.data
date_time = acquisition.split("T")[0]
pattern = '%Y-%m-%d'
epoch = int(time.mktime(time.strptime(date_time, pattern)))*1000
psia=xmldoc.getElementsByTagName("eop:incidenceAngle")[0].firstChild.data
psilaz=xmldoc.getElementsByTagName("opt:illuminationAzimuthAngle")[0].firstChild.data
psilelv=xmldoc.getElementsByTagName("opt:illuminationElevationAngle")[0].firstChild.data
psaz=xmldoc.getElementsByTagName("re:azimuthAngle")[0].firstChild.data
pssca=xmldoc.getElementsByTagName("re:spaceCraftViewAngle")[0].firstChild.data
psrad=xmldoc.getElementsByTagName("re:radiometricScaleFactor")[0].firstChild.data
print("ID_Name:", eopfilename.split(".")[0])
print("Acquisition Date:", acquisition.split("T")[0])
print("Satellite Type:", str(platform))
print("Satellite ID:", str(sid))
print("Tile ID:", tile)
print("Number of Bands:", bands)
print("Cloud Cover:", format(float(cloud),'.2f'))
print("Epoch Time:",epoch)
print("RE Incidence Angle",format(float(psia),'.4f'))
print("RE illumination azimuth angle",format(float(psilaz),'.2f'))
print("RE illumination elevation angle",format(float(psilelv),'.2f'))
print("RE Azimuth angle",format(float(psaz),'.2f'))
print("RE SpaceCraft angle",format(float(pssca),'.4f'))
print("Radiometric Scale Factor", format(float(psrad),'.18f'))
with open(mfile,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([fsp,epoch,str(platform),str(sid),tile,bands,format(float(cloud),'.2f'),format(float(psia),'.4f'),format(float(psilaz),'.2f'),format(float(psilelv),'.2f'),format(float(psaz),'.2f'),format(float(pssca),'.4f'),format(float(psrad),'.18f')])
csvfile.close()
except Exception:
print(infilename)
with open(errorlog,'a') as | |
<reponame>fpga-open-speech-tools/utils
#!/usr/bin/python3
"""
Download files for an SoC FPGA project from AWS.
This script downloads the bitstream, device tree overlay, and device
drivers located in a user-supplied directory in a user-supplied S3 bucket.
Parameters
----------
s3bucket : str
Name of the S3 bucket
s3directory : str
Name of the S3 directory in `s3bucket` where the desired files are located
driver_path : str
Path prefix where the device drivers will be downloaded to; drivers are
placed in a subdirectory of this path
config_path : str
Where to put the UI.json and Linker.json config files
progress : list of str
How to display download progress; options are 'bar' and 'json'
endpoint : str
HTTP endpoint, specified as http://ip:port, to send download progress to
verbose : bool
Print verbose output
Notes
-----
boto3 and tqdm must be installed on the system in order to run this script;
they can both be installed with pip.
By convention, S3 directories are all lowercase, with words separated by hyphens
when doing so improves readability. For example, the directory for the
Audio Mini sound effects project is audiomini/sound-effects. Additionally,
The bitstream and device tree overlays are named the same as the project, but
with underscores instead of hyphens, e.g. sound_effects.rbf.
The directory name can be given with or without a trailing slash.
The .dtbo and .rbf files need to be on the firmware search path, so they
will always be placed in /lib/firmware. If placing drivers in a non-default
path, users will need to supply that path as an argument to drivermgr.sh.
Displaying download progress as json messages is intended to be read by
another program that can display a progress bar to a user on a web app.
In order for the progress monitor bar to work to stay at the bottom of the
console output, tqdm.write() is used instead of print().
Examples
--------
Download files for the Audio Mini sound effects project
$ ./awsdownloader.py -b nih-demos -d audiomini/sound-effects
Download files for the Audio Mini passthrough project and show a progress bar
# ./awsdownloader.py -b nih-demos -d audiomini/passthrough --progress bar
Copyright
---------
Copyright 2020 Audio Logic
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
<NAME>, <NAME>
Audio Logic
985 Technology Blvd
Bozeman, MT 59718
<EMAIL>
"""
import boto3
import sys
import argparse
import os
import json
import requests
from tqdm import tqdm
from collections import namedtuple
from botocore.client import Config
from botocore import UNSIGNED
FIRMWARE_PATH = '/lib/firmware/'
DEFAULT_DRIVER_PATH = '/lib/modules/'
DEFAULT_CONFIG_PATH = '../config/'
FIRMWARE_EXTENSIONS = ('.rbf', '.dtbo')
DRIVER_EXTENSIONS = ('.ko')
CONFIG_EXTENSIONS = ('.json')
HTTP_HEADERS = {'Content-type': 'application/json', 'Accept': 'text/plain'}
"""
Named tuple to group together info about S3 files.
Parameters
----------
names
A tuple or list of file names
keys
A tuple or list of the S3 keys corresponding to the files
sizes
A tuple or list of the file sizes in bytes
"""
_S3Files = namedtuple('S3Files', ['names', 'keys', 'sizes'])
class _ProgressMonitor(object):
"""
A download progress monitor.
Monitors the download progress of all the S3 files and displays a
progress indicator on stdout. This class is used as the callback
to the boto3 download_files method, which calls the __call__ method.
Parameters
----------
total_download_size
Size of all the S3 files being downloaded, in bytes
show_json : bool
Show download progress as a json message
show_bar : bool
Show download progress as a progress bar
Attributes
----------
status : str
User-definable download status message
bytes_received : int
The number of bytes received from S3 so far
percent_downloaded : int
How much of the files have been downloaded so far
json_status_message : dict
Download status message and progress as JSON
Notes
-----
The JSON status message can be read from stdout and used by other programs
to report the download progress/status. It's format is
{"progress": 42, "status": "downloading file x"}
"""
def __init__(self, total_download_size, show_json=False, show_bar=False,
endpoint=None):
self.total_download_size = total_download_size
self.show_json = show_json
self.show_bar = show_bar
self.status = ""
self.bytes_received = 0
self.percent_downloaded = 0
self.json_status_message = {
"progress": 0,
"status": ""
}
self.endpoint = endpoint
if self.show_bar:
self._progress_bar = tqdm(
bar_format='{l_bar}{bar}| {n_fmt}B/{total_fmt}B [{elapsed}]', desc="Downloading", total=self.total_download_size,
mininterval=0.05, unit_scale=True)
else:
self._progress_bar = None
def __call__(self, bytes_received):
"""
Update and print the download progress.
Download progress is only printed is show_json and/or show_bar are True.
Parameters
----------
bytes_received : int
The number of bytes received since the previous callback from boto3
"""
self.bytes_received += bytes_received
self.percent_downloaded = (
int(self.bytes_received / self.total_download_size * 100)
)
self.json_status_message['progress'] = self.percent_downloaded
self.json_status_message['status'] = self.status
json_str = json.dumps(self.json_status_message)
if self.show_json:
tqdm.write(json_str)
if self.show_bar:
self._progress_bar.update(bytes_received)
if self.endpoint:
try:
requests.put(self.endpoint, data=json_str, headers=HTTP_HEADERS)
except Exception as e:
print(e)
# TODO: real error handling; at least use a more specific exception type once I know what it is
def parseargs():
"""
Parse command-line arguments.
Returns
-------
args : Namespace
Object containing the parsed arguments
"""
# Create the argument parser
parser = argparse.ArgumentParser(add_help=False)
# Create a new group for the required arguments
required_args = parser.add_argument_group('required arguments')
# Add arguments for the directory and the bucket name
required_args.add_argument('-d', '--directory', type=str, required=True,
help="S3 directory to download files from")
required_args.add_argument('-b', '--bucket', type=str, required=True,
help="S3 bucket name")
# Create a group for optional arguments so required arguments print first
optional_args = parser.add_argument_group('optional arguments')
# Add optional arguments
optional_args.add_argument('-h', '--help', action='help',
help="show this help message and exit")
optional_args.add_argument('-v', '--verbose', action='store_true',
help="print verbose output", default=False)
optional_args.add_argument(
'--driver-path', type=str, default=DEFAULT_DRIVER_PATH,
help="path prefix where kernel modules folder gets created \
(default: " + DEFAULT_DRIVER_PATH + ")"
)
optional_args.add_argument(
'--config-path', type=str, default=DEFAULT_CONFIG_PATH,
help="where to put the UI.json and Linker.json config files \
(default: " + DEFAULT_CONFIG_PATH + ")"
)
optional_args.add_argument(
'-p', '--progress', action='append', choices=['bar', 'json'],
default=[], help="progress monitoring; 'bar' displays a progress bar, \
and 'json' displays progress in json format; multiple arguments \
can be given",
)
optional_args.add_argument('-e', '--endpoint', type=str,
help="HTTP endpoint to send download progress to; format is http://ip:port"
)
# Parse the arguments
args = parser.parse_args()
# Ensure paths ends in a trailing slash
if args.driver_path[-1] != '/':
args.driver_path += '/'
if args.config_path[-1] != '/':
args.config_path += '/'
return args
def _get_file_info(s3objects, file_extensions):
"""
Get information about files in an s3 objects list.
Given a list of s3 objects, this function extracts the key, filename,
and file size for each file that ends in an extension in `file_extensions`
Parameters
----------
s3objects : list
List of dictionaries return by boto3's download_file()
file_extensions : tuple
File extensions to match keys against
Returns
-------
_S3Files
A named tuple containing tuples of file names, keys, and sizes
Notes
-----
boto3's list_objects_v2 returns a dictionary of information about the S3
objects. Within the 'Contents' key, which is what needs to be fed to this
function, each object in the list has 'Key' and 'Size' keys.
The 'Key' attribute is of the form "some/directory/filename.extension".
"""
# Get firmware keys that end with any extension in file_extensions
keys = tuple(obj['Key'] for obj in s3objects
if obj['Key'].endswith(file_extensions))
# If no keys matching file_extensions were found, exit early
if not keys:
return None
# Get firmware filenames (the part of the key after the last slash)
# A typical key will be '<device name>/<project name>/<file name>'
names = tuple(key.split('/')[-1] for key in keys)
# Get file sizes for all keys that end with an extension in file_extensions
sizes = tuple(obj['Size'] for obj in s3objects
if obj['Key'].endswith(file_extensions))
# Pack everything into a _S3Files named tuple
return _S3Files(names=names, keys=keys, sizes=sizes)
def main(s3bucket, s3directory, driver_path=DEFAULT_DRIVER_PATH,
config_path=DEFAULT_CONFIG_PATH, progress=[], endpoint=None,
verbose=False):
"""
Download files for an SoC FPGA project from AWS.
This script downloads the bitstream, device tree overlay, and device
drivers located in a user-supplied directory in a user-supplied S3 bucket.
Parameters
----------
s3bucket : str
Name of the S3 bucket
s3directory : str
Name of the S3 directory in `s3bucket` where the desired files are
driver_path : str
Path prefix where the device drivers will be downloaded to; drivers are
placed in | |
factor
tau1 = ToolBox.tauPoint(numDeps, tauRos, 1.0)
#outline = ("tau1 "+ str(tau1) + "\n")
#dbgHandle.write(outline)
#//System.out.println("LINEGRID: Tau1: " + tau1);
#//logA = 2.0 * logLam0 + logGamma - ln4pi - logC - logDopp;
#//a = Math.exp(logA);
#//System.out.println("LINEGRID: logA: " + logE * logA);
#//Set up a half-profile Delta_lambda grid in Doppler width units
#//from line centre to wing
numPoints = len(linePoints[0])
#//System.out.println("LineProf: numPoints: " + numPoints);
#// Return a 2D numPoints X numDeps array of normalized line profile points (phi)
lineProf = [ [ 0.0 for i in range(numDeps) ] for j in range(numPoints) ]
#// Line profiel points in Doppler widths - needed for Voigt function, H(a,v):
v = [0.0 for i in range(numPoints)]
#double logV, ii;
#// lineProf[0][0] = 0.0; v[0] = 0.0; //Line centre - cannot do logaritmically!
#double gamma, logGamma, a, logA, voigt, core, wing, logWing, logVoigt;
Aij = math.pow(10.0, logAij)
il0 = 36
#// For Hjerting function approximation:
#double vSquare, vFourth, vAbs, a2, a3, a4, Hjert0, Hjert1, Hjert2, Hjert3, Hjert4, hjertFn;
#//System.out.println("il0 " + il0 + " temp[il] " + temp[0][il0] + " press[il] " + logE*press[1][il0]);
for id in range(numDeps):
#//Formula from p. 56 of Radiative Transfer in Stellar Atmospheres (Rutten),
#// logarithmically with respect to solar value:
logGamma = pGas[1][id] - pGasSun[1][tau1] + 0.7 * (tempSun[1][tau1] - temp[1][id]) + logGammaSun
#if (id%5 == 1):
# outline = ("id "+ str(id)+ " logGamma "+ str(logGamma) + "\n")
# dbgHandle.write(outline)
#//logGamma = logGamma + logFudge + logGammaCol;
logGamma = logGamma + logGammaCol
#//Add radiation (natural) broadning:
gamma = math.exp(logGamma) + Aij
logGamma = math.log(gamma)
#//
#//if (id == 12){
#//System.out.println("LineGrid: logGamma: " + id + " " + logE * logGamma + " press[1][id] " + press[1][id] + " pressSun[1][tau1] "
#// + pressSun[1][tau1] + " temp[1][id] " + temp[1][id] + " tempSun[1][tau1] " + tempSun[1][tau1]);
#// }
#//Voigt "a" parameter with line centre wavelength:
logA = 2.0 * logLam0 + logGamma - ln4pi - logC - logDopp
a = math.exp(logA)
a2 = math.exp(2.0*logA)
a3 = math.exp(3.0*logA)
a4 = math.exp(4.0*logA)
#// if (id == 12) {
#//System.out.println("LineGrid: lam0: " + lam0 + " logGam " + logE * logGamma + " logA " + logE * logA);
#// }
#//if (id == 30) {
#// //System.out.println("il v[il] iy y logNumerator logDenominator logInteg ");
#// System.out.println("voigt: v logVoigt: ");
#//}
for il in range(numPoints):
v[il] = linePoints[1][il]
vAbs = abs(v[il])
vSquare = vAbs * vAbs
vFourth = vSquare * vSquare
#//System.out.println("LineProf: il, v[il]: " + il + " " + v[il]);
#//Approximate Hjerting fn from tabulated expansion coefficients:
#// Interpolate in Hjerting table to exact "v" value for each expanstion coefficient:
#// Row 0 of Hjerting component table used for tabulated abscissae, Voigt "v" parameter
if (vAbs <= 12.0):
#//we are within abscissa domain of table
Hjert0 = ToolBox.interpol(hjertComp[0], hjertComp[1], vAbs)
Hjert1 = ToolBox.interpol(hjertComp[0], hjertComp[2], vAbs)
Hjert2 = ToolBox.interpol(hjertComp[0], hjertComp[3], vAbs)
Hjert3 = ToolBox.interpol(hjertComp[0], hjertComp[4], vAbs)
Hjert4 = ToolBox.interpol(hjertComp[0], hjertComp[5], vAbs)
else:
#// We use the analytic expansion
Hjert0 = 0.0
Hjert1 = (0.56419 / vSquare) + (0.846 / vFourth)
Hjert2 = 0.0
Hjert3 = -0.56 / vFourth
Hjert4 = 0.0
#//Approximate Hjerting fn with power expansion in Voigt "a" parameter
#// "Observation & Analysis of Stellar Photospeheres" (<NAME>), 3rd Ed., p. 258:
hjertFn = Hjert0 + a*Hjert1 + a2*Hjert2 + a3*Hjert3 + a4*Hjert4
#if ((id%5 == 1) and (il%2 == 0)):
# outline = ("il "+ str(il)+ " hjertFn "+ str(hjertFn) + "\n")
# dbgHandle.write(outline)
"""/* Gaussian + Lorentzian approximation:
//if (il <= numCore) {
if (v[il] <= 2.0 && v[il] >= -2.0) {
// - Gaussian ONLY - at line centre Lorentzian will diverge!
core = Math.exp(-1.0 * (v[il] * v[il]));
voigt = core;
//System.out.println("LINEGRID- CORE: core: " + core);
} else {
logV = Math.log(Math.abs(v[il]));
//Gaussian core:
core = Math.exp(-1.0 * (v[il] * v[il]));
// if (id == 12) {
// System.out.println("LINEGRID- WING: core: " + core);
// }
//Lorentzian wing:
logWing = logA - lnSqRtPi - (2.0 * logV);
wing = Math.exp(logWing);
voigt = core + wing;
// if (id == 12) {
// System.out.println("LINEGRID- WING: wing: " + wing + " logV " + logV);
// }
} // end else
*/"""
#//System.out.println("LINEGRID: il, v[il]: " + il + " " + v[il] + " lineProf[0][il]: " + lineProf[0][il]);
#//System.out.println("LINEGRID: il, Voigt, H(): " + il + " " + voigt);
#//Convert from H(a,v) in dimensionless Voigt units to physical phi((Delta lambda) profile:
#//logVoigt = Math.log(voigt) + 2.0 * logLam0 - lnSqRtPi - logDopp - logC;
#//System.out.println("voigt: Before log... id " + id + " il " + il + " hjertFn " + hjertFn);
#logVoigt = math.log(hjertFn) + 2.0 * logLam0 - lnSqRtPi - logDopp - logC
voigt = hjertFn * math.pow(lam0, 2) / sqRtPi / doppler / c
#logVoigt = math.log(voigt)
#lineProf[il][id] = math.exp(logVoigt)
lineProf[il][id] = voigt
if (lineProf[il][id] <= 0.0):
lineProf[il][id] = 1.0e-49
#// if (id == 12) {
#// System.out.println("il " + il + " linePoints " + 1.0e7 * linePoints[0][il] + " id " + id + " lineProf[il][id] " + lineProf[il][id]);
#// }
#//System.out.println("LineProf: il, id, lineProf[il][id]: " + il + " " + id + " " + lineProf[il][id]);
#} // il lambda loop
#// if (id == 20) {
#// for (int il = 0; il < numPoints; il++) {
#// System.out.format("Voigt: %20.16f %20.16f%n", linePoints[1][il], logE * Math.log(lineProf[il][id]));
#// }
#// }
#} //id loop
return lineProf
#} //end method voigt()
def stark(linePoints, lam0In, logAij, logGammaCol,
numDeps, teff, tauRos, temp, pGas, Ne,
tempSun, pGasSun, hjertComp, lineName):
c = Useful.c()
logC = Useful.logC()
#//double k = Useful.k;
logK = Useful.logK()
#//double e = Useful.e;
#//double mE = Useful.mE;
lam0 = lam0In #// * 1.0E-7; //nm to cm
logLam0 = math.log(lam0)
logLam0A = math.log(lam0) + 8.0*math.log(10.0) #//cm to A
ln10 = math.log(10.0)
ln2 = math.log(2.0)
ln4pi = math.log(4.0 * math.pi)
lnSqRtPi = 0.5 * math.log(math.pi)
sqRtPi = math.sqrt(math.pi)
sqPi = math.sqrt(math.pi)
#//double ln100 = 2.0*Math.log(10.0);
logE = math.log10(math.e) #// for debug output
doppler = linePoints[0][1] / linePoints[1][1]
logDopp = math.log(doppler)
#//System.out.println("LineProf: doppler, logDopp: " + doppler + " " + logE*logDopp);
#//Put input parameters into linear cgs units:
#//double gammaCol = Math.pow(10.0, logGammaCol);
#// Lorentzian broadening:
#// Assumes Van der Waals dominates radiative damping
#// log_10 Gamma_6 for van der Waals damping around Tau_Cont = 1 in Sun
#// - p. 57 of Radiative Transfer in Stellar Atmospheres (Rutten)
logGammaSun = 9.0 * ln10 #// Convert to base e
#//double logFudge = Math.log(2.5); // Van der Waals enhancement factor
tau1 = ToolBox.tauPoint(numDeps, tauRos, 1.0)
#//System.out.println("LINEGRID: Tau1: " + tau1);
#//logA = 2.0 * logLam0 + logGamma - ln4pi - logC - logDopp;
#//a = Math.exp(logA);
#//System.out.println("LINEGRID: logA: " + logE * logA);
#//Set up a half-profile Delta_lambda grid in Doppler width units
#//from line centre to wing
numPoints = len(linePoints[0])
#//System.out.println("LineProf: numPoints: " + numPoints);
#// Return a 2D numPoints X numDeps array of normalized line profile points (phi)
lineProf = [ [ 0.0 for i in range(numDeps) ] for j in range(numPoints) ]
#// Line profiel points in Doppler widths - needed for Voigt function, H(a,v):
v = [0.0 for i in range(numPoints)]
#double logV, ii;
#// lineProf[0][0] = 0.0; v[0] = 0.0; //Line centre - cannot do logaritmically!
#double gamma, logGamma, a, logA, voigt, core, wing, logWing, logVoigt;
Aij = math.pow(10.0, logAij)
il0 = 36
#// For Hjerting function approximation:
#double vSquare, vFourth, vAbs, a2, a3, a4, Hjert0, Hjert1, Hjert2, Hjert3, Hjert4, hjertFn;
#//Parameters for linear Stark broadening:
#//Assymptotic ("far wing") "K" parameters
#//Stehle & Hutcheon, 1999, A&A Supp Ser, 140, 93 and CDS data table
#//Assume K has something to do with "S" and proceed as in Observation and Analysis of
#// Stellar Photosphere, 3rd Ed. (D. Gray), Eq. | |
else:
self.MathsRegion = MathsRegion
self.MathsRegion_nsprefix_ = None
if NoiseRegion is None:
self.NoiseRegion = []
else:
self.NoiseRegion = NoiseRegion
self.NoiseRegion_nsprefix_ = None
if FrameRegion is None:
self.FrameRegion = []
else:
self.FrameRegion = FrameRegion
self.FrameRegion_nsprefix_ = None
if UnknownRegion is None:
self.UnknownRegion = []
else:
self.UnknownRegion = UnknownRegion
self.UnknownRegion_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, FrameRegionType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if FrameRegionType.subclass:
return FrameRegionType.subclass(*args_, **kwargs_)
else:
return FrameRegionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Coords(self):
return self.Coords
def set_Coords(self, Coords):
self.Coords = Coords
def get_TextRegion(self):
return self.TextRegion
def set_TextRegion(self, TextRegion):
self.TextRegion = TextRegion
def add_TextRegion(self, value):
self.TextRegion.append(value)
def insert_TextRegion_at(self, index, value):
self.TextRegion.insert(index, value)
def replace_TextRegion_at(self, index, value):
self.TextRegion[index] = value
def get_ImageRegion(self):
return self.ImageRegion
def set_ImageRegion(self, ImageRegion):
self.ImageRegion = ImageRegion
def add_ImageRegion(self, value):
self.ImageRegion.append(value)
def insert_ImageRegion_at(self, index, value):
self.ImageRegion.insert(index, value)
def replace_ImageRegion_at(self, index, value):
self.ImageRegion[index] = value
def get_LineDrawingRegion(self):
return self.LineDrawingRegion
def set_LineDrawingRegion(self, LineDrawingRegion):
self.LineDrawingRegion = LineDrawingRegion
def add_LineDrawingRegion(self, value):
self.LineDrawingRegion.append(value)
def insert_LineDrawingRegion_at(self, index, value):
self.LineDrawingRegion.insert(index, value)
def replace_LineDrawingRegion_at(self, index, value):
self.LineDrawingRegion[index] = value
def get_GraphicRegion(self):
return self.GraphicRegion
def set_GraphicRegion(self, GraphicRegion):
self.GraphicRegion = GraphicRegion
def add_GraphicRegion(self, value):
self.GraphicRegion.append(value)
def insert_GraphicRegion_at(self, index, value):
self.GraphicRegion.insert(index, value)
def replace_GraphicRegion_at(self, index, value):
self.GraphicRegion[index] = value
def get_TableRegion(self):
return self.TableRegion
def set_TableRegion(self, TableRegion):
self.TableRegion = TableRegion
def add_TableRegion(self, value):
self.TableRegion.append(value)
def insert_TableRegion_at(self, index, value):
self.TableRegion.insert(index, value)
def replace_TableRegion_at(self, index, value):
self.TableRegion[index] = value
def get_ChartRegion(self):
return self.ChartRegion
def set_ChartRegion(self, ChartRegion):
self.ChartRegion = ChartRegion
def add_ChartRegion(self, value):
self.ChartRegion.append(value)
def insert_ChartRegion_at(self, index, value):
self.ChartRegion.insert(index, value)
def replace_ChartRegion_at(self, index, value):
self.ChartRegion[index] = value
def get_SeparatorRegion(self):
return self.SeparatorRegion
def set_SeparatorRegion(self, SeparatorRegion):
self.SeparatorRegion = SeparatorRegion
def add_SeparatorRegion(self, value):
self.SeparatorRegion.append(value)
def insert_SeparatorRegion_at(self, index, value):
self.SeparatorRegion.insert(index, value)
def replace_SeparatorRegion_at(self, index, value):
self.SeparatorRegion[index] = value
def get_MathsRegion(self):
return self.MathsRegion
def set_MathsRegion(self, MathsRegion):
self.MathsRegion = MathsRegion
def add_MathsRegion(self, value):
self.MathsRegion.append(value)
def insert_MathsRegion_at(self, index, value):
self.MathsRegion.insert(index, value)
def replace_MathsRegion_at(self, index, value):
self.MathsRegion[index] = value
def get_NoiseRegion(self):
return self.NoiseRegion
def set_NoiseRegion(self, NoiseRegion):
self.NoiseRegion = NoiseRegion
def add_NoiseRegion(self, value):
self.NoiseRegion.append(value)
def insert_NoiseRegion_at(self, index, value):
self.NoiseRegion.insert(index, value)
def replace_NoiseRegion_at(self, index, value):
self.NoiseRegion[index] = value
def get_FrameRegion(self):
return self.FrameRegion
def set_FrameRegion(self, FrameRegion):
self.FrameRegion = FrameRegion
def add_FrameRegion(self, value):
self.FrameRegion.append(value)
def insert_FrameRegion_at(self, index, value):
self.FrameRegion.insert(index, value)
def replace_FrameRegion_at(self, index, value):
self.FrameRegion[index] = value
def get_UnknownRegion(self):
return self.UnknownRegion
def set_UnknownRegion(self, UnknownRegion):
self.UnknownRegion = UnknownRegion
def add_UnknownRegion(self, value):
self.UnknownRegion.append(value)
def insert_UnknownRegion_at(self, index, value):
self.UnknownRegion.insert(index, value)
def replace_UnknownRegion_at(self, index, value):
self.UnknownRegion[index] = value
def get_id(self):
return self.id
def set_id(self, id):
self.id = id
def get_bgColour(self):
return self.bgColour
def set_bgColour(self, bgColour):
self.bgColour = bgColour
def get_borderPresent(self):
return self.borderPresent
def set_borderPresent(self, borderPresent):
self.borderPresent = borderPresent
def validate_ColourSimpleType(self, value):
# Validate type pc:ColourSimpleType, a restriction on string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['black', 'blue', 'brown', 'cyan', 'green', 'grey', 'indigo', 'magenta', 'orange', 'pink', 'red', 'turquoise', 'violet', 'white', 'yellow']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on ColourSimpleType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
def hasContent_(self):
if (
self.Coords is not None or
self.TextRegion or
self.ImageRegion or
self.LineDrawingRegion or
self.GraphicRegion or
self.TableRegion or
self.ChartRegion or
self.SeparatorRegion or
self.MathsRegion or
self.NoiseRegion or
self.FrameRegion or
self.UnknownRegion
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='FrameRegionType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('FrameRegionType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'FrameRegionType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='FrameRegionType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='FrameRegionType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='FrameRegionType'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id), input_name='id')), ))
if self.bgColour is not None and 'bgColour' not in already_processed:
already_processed.add('bgColour')
outfile.write(' bgColour=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.bgColour), input_name='bgColour')), ))
if self.borderPresent is not None and 'borderPresent' not in already_processed:
already_processed.add('borderPresent')
outfile.write(' borderPresent="%s"' % self.gds_format_boolean(self.borderPresent, input_name='borderPresent'))
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='FrameRegionType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Coords is not None:
namespaceprefix_ = self.Coords_nsprefix_ + ':' if (UseCapturedNS_ and self.Coords_nsprefix_) else ''
self.Coords.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Coords', pretty_print=pretty_print)
for TextRegion_ in self.TextRegion:
namespaceprefix_ = self.TextRegion_nsprefix_ + ':' if (UseCapturedNS_ and self.TextRegion_nsprefix_) else ''
TextRegion_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='TextRegion', pretty_print=pretty_print)
for ImageRegion_ in self.ImageRegion:
namespaceprefix_ = self.ImageRegion_nsprefix_ + ':' if (UseCapturedNS_ and self.ImageRegion_nsprefix_) else ''
ImageRegion_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='ImageRegion', pretty_print=pretty_print)
for LineDrawingRegion_ in self.LineDrawingRegion:
namespaceprefix_ = self.LineDrawingRegion_nsprefix_ + ':' if (UseCapturedNS_ and self.LineDrawingRegion_nsprefix_) else ''
LineDrawingRegion_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='LineDrawingRegion', pretty_print=pretty_print)
for GraphicRegion_ in self.GraphicRegion:
namespaceprefix_ = self.GraphicRegion_nsprefix_ + ':' if (UseCapturedNS_ and self.GraphicRegion_nsprefix_) else ''
GraphicRegion_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='GraphicRegion', pretty_print=pretty_print)
for TableRegion_ in self.TableRegion:
namespaceprefix_ = self.TableRegion_nsprefix_ + ':' if (UseCapturedNS_ and self.TableRegion_nsprefix_) else ''
TableRegion_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='TableRegion', pretty_print=pretty_print)
for ChartRegion_ in self.ChartRegion:
namespaceprefix_ = self.ChartRegion_nsprefix_ + ':' if (UseCapturedNS_ and self.ChartRegion_nsprefix_) else ''
ChartRegion_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='ChartRegion', pretty_print=pretty_print)
for SeparatorRegion_ in self.SeparatorRegion:
namespaceprefix_ = self.SeparatorRegion_nsprefix_ + ':' if (UseCapturedNS_ and self.SeparatorRegion_nsprefix_) else ''
SeparatorRegion_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='SeparatorRegion', pretty_print=pretty_print)
for MathsRegion_ in self.MathsRegion:
namespaceprefix_ = self.MathsRegion_nsprefix_ + ':' if (UseCapturedNS_ and self.MathsRegion_nsprefix_) else ''
MathsRegion_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='MathsRegion', pretty_print=pretty_print)
for NoiseRegion_ in self.NoiseRegion:
namespaceprefix_ = self.NoiseRegion_nsprefix_ + ':' if (UseCapturedNS_ and self.NoiseRegion_nsprefix_) else ''
NoiseRegion_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='NoiseRegion', pretty_print=pretty_print)
for FrameRegion_ in self.FrameRegion:
namespaceprefix_ = self.FrameRegion_nsprefix_ + ':' if (UseCapturedNS_ and self.FrameRegion_nsprefix_) else ''
FrameRegion_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='FrameRegion', pretty_print=pretty_print)
for UnknownRegion_ in self.UnknownRegion:
namespaceprefix_ = self.UnknownRegion_nsprefix_ + ':' if (UseCapturedNS_ and self.UnknownRegion_nsprefix_) else ''
UnknownRegion_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='UnknownRegion', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('bgColour', node)
if value is not None and 'bgColour' not in already_processed:
already_processed.add('bgColour')
self.bgColour = value
self.validate_ColourSimpleType(self.bgColour) # validate type ColourSimpleType
value = find_attr_value_('borderPresent', node)
if value is not None and 'borderPresent' not in already_processed:
already_processed.add('borderPresent')
if value in ('true', '1'):
self.borderPresent = True
elif value in ('false', '0'):
self.borderPresent = False
else:
raise_parse_error(node, 'Bad boolean attribute')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Coords':
obj_ = CoordsType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Coords = obj_
obj_.original_tagname_ = 'Coords'
elif nodeName_ == 'TextRegion':
obj_ = TextRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.TextRegion.append(obj_)
obj_.original_tagname_ = 'TextRegion'
elif nodeName_ == 'ImageRegion':
obj_ = ImageRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.ImageRegion.append(obj_)
obj_.original_tagname_ = 'ImageRegion'
elif nodeName_ == 'LineDrawingRegion':
obj_ = LineDrawingRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.LineDrawingRegion.append(obj_)
obj_.original_tagname_ = 'LineDrawingRegion'
elif nodeName_ == 'GraphicRegion':
obj_ = GraphicRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.GraphicRegion.append(obj_)
obj_.original_tagname_ = 'GraphicRegion'
elif nodeName_ == 'TableRegion':
obj_ = TableRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.TableRegion.append(obj_)
obj_.original_tagname_ = 'TableRegion'
elif nodeName_ == 'ChartRegion':
obj_ = ChartRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.ChartRegion.append(obj_)
obj_.original_tagname_ = 'ChartRegion'
elif nodeName_ == 'SeparatorRegion':
obj_ = SeparatorRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.SeparatorRegion.append(obj_)
obj_.original_tagname_ = 'SeparatorRegion'
elif nodeName_ == 'MathsRegion':
obj_ = MathsRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.MathsRegion.append(obj_)
obj_.original_tagname_ = 'MathsRegion'
elif nodeName_ == 'NoiseRegion':
obj_ = NoiseRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.NoiseRegion.append(obj_)
obj_.original_tagname_ = 'NoiseRegion'
elif nodeName_ == 'FrameRegion':
obj_ = FrameRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.FrameRegion.append(obj_)
obj_.original_tagname_ = 'FrameRegion'
elif nodeName_ == 'UnknownRegion':
obj_ = UnknownRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.UnknownRegion.append(obj_)
obj_.original_tagname_ = 'UnknownRegion'
# end class FrameRegionType
class PrintSpaceType(GeneratedsSuper):
"""Determines the effective area on | |
4 * ksize):min(shape[0], max0 + 4 * ksize), max(0, min1 - 4 * ksize):min(shape[1], max1 + 4 * ksize)]
while subset.max() > maxt:
subset = ndimage.median_filter(subset, ksize)
ds[max(0, min0 - 4 * ksize):min(shape[0], max0 + 4 * ksize), max(0, min1 - 4 * ksize):min(shape[1], max1 + 4 * ksize)] = subset
return ds
def average_dark(lstimg, center_method="mean", cutoff=None, quantiles=(0.5, 0.5)):
"""
Averages a serie of dark (or flat) images.
Centers the result on the mean or the median ...
but averages all frames within cutoff*std
:param lstimg: list of 2D images or a 3D stack
:param str center_method: is the center calculated by a "mean", "median",
"quantile", "std"
:param cutoff: keep all data where (I-center)/std < cutoff
:type cutoff: float or None
:param quantiles: 2-tuple of floats average out data between the two
quantiles
:type quantiles: tuple(float, float) or None
:return: 2D image averaged
"""
if "ndim" in dir(lstimg) and lstimg.ndim == 3:
stack = lstimg.astype(numpy.float32)
shape = stack.shape[1:]
length = stack.shape[0]
else:
shape = lstimg[0].shape
length = len(lstimg)
if length == 1:
return lstimg[0].astype(numpy.float32)
stack = numpy.zeros((length, shape[0], shape[1]), dtype=numpy.float32)
for i, img in enumerate(lstimg):
stack[i] = img
if center_method in dir(stack):
center = stack.__getattribute__(center_method)(axis=0)
elif center_method == "median":
logger.info("Filtering data (median)")
center = numpy.median(stack, axis=0)
elif center_method.startswith("quantil"):
logger.info("Filtering data (quantiles: %s)", quantiles)
sorted_ = numpy.sort(stack, axis=0)
lower = max(0, int(numpy.floor(min(quantiles) * length)))
upper = min(length, int(numpy.ceil(max(quantiles) * length)))
if (upper == lower):
if upper < length:
upper += 1
elif lower > 0:
lower -= 1
else:
logger.warning("Empty selection for quantil %s, would keep points from %s to %s", quantiles, lower, upper)
center = sorted_[lower:upper].mean(axis=0)
else:
raise RuntimeError("Cannot understand method: %s in average_dark" % center_method)
if cutoff is None or cutoff <= 0:
output = center
else:
std = stack.std(axis=0)
strides = 0, std.strides[0], std.strides[1]
std.shape = 1, shape[0], shape[1]
std.strides = strides
center.shape = 1, shape[0], shape[1]
center.strides = strides
mask = ((abs(stack - center) / std) > cutoff)
stack[numpy.where(mask)] = 0.0
summed = stack.sum(axis=0)
output = summed / numpy.float32(numpy.maximum(1, (length - mask.sum(axis=0))))
return output
def _normalize_image_stack(image_stack):
"""
Convert input data to a list of 2D numpy arrays or a stack
of numpy array (3D array).
:param image_stack: slice of images
:type image_stack: list or numpy.ndarray
:return: A stack of image (list of 2D array or a single 3D array)
:rtype: list or numpy.ndarray
"""
if image_stack is None:
return None
if isinstance(image_stack, numpy.ndarray) and image_stack.ndim == 3:
# numpy image stack (single 3D image)
return image_stack
if isinstance(image_stack, list):
# list of numpy images (multi 2D images)
result = []
for image in image_stack:
if isinstance(image, six.string_types):
data = fabio.open(image).data
elif isinstance(image, numpy.ndarray) and image.ndim == 2:
data = image
else:
raise Exception("Unsupported image type '%s' in image_stack" % type(image))
result.append(data)
return result
raise Exception("Unsupported type '%s' for image_stack" % type(image_stack))
class AverageWriter():
"""Interface for using writer in `Average` process."""
def write_header(self, merged_files, nb_frames, monitor_name):
"""Write the header of the average
:param list merged_files: List of files used to generate this output
:param int nb_frames: Number of frames used
:param str monitor_name: Name of the monitor used. Can be None.
"""
raise NotImplementedError()
def write_reduction(self, algorithm, data):
"""Write one reduction
:param ImageReductionFilter algorithm: Algorithm used
:param object data: Data of this reduction
"""
raise NotImplementedError()
def close(self):
"""Close the writer. Must not be used anymore."""
raise NotImplementedError()
class MultiFilesAverageWriter(AverageWriter):
"""Write reductions into multi files. File headers are duplicated."""
def __init__(self, file_name_pattern, file_format, dry_run=False):
"""
:param str file_name_pattern: File name pattern for the output files.
If it contains "{method_name}", it is updated for each
reduction writing with the name of the reduction.
:param str file_format: File format used. It is the default
extension file.
:param bool dry_run: If dry_run, the file is created on memory but not
saved on the file system at the end
"""
self._file_name_pattern = file_name_pattern
self._global_header = {}
self._fabio_images = weakref.WeakKeyDictionary()
self._dry_run = dry_run
# in case "edf.gz"
if "." in file_format:
file_format = file_format.split(".")[0]
self._fabio_class = fabio.factory(file_format + "image")
def write_header(self, merged_files, nb_frames, monitor_name):
self._global_header["nfiles"] = len(merged_files)
self._global_header["nframes"] = nb_frames
if monitor_name is not None:
self._global_header["monitor_name"] = monitor_name
pattern = "merged_file_%%0%ii" % len(str(len(merged_files)))
for i, f in enumerate(merged_files):
name = pattern % i
self._global_header[name] = f.filename
def _get_file_name(self, reduction_name):
keys = {"method_name": reduction_name}
return stringutil.safe_format(self._file_name_pattern, keys)
def write_reduction(self, algorithm, data):
file_name = self._get_file_name(algorithm.name)
# overwrite the method
header = fabio.fabioimage.OrderedDict()
header["method"] = algorithm.name
for name, value in self._global_header.items():
header[name] = str(value)
filter_parameters = algorithm.get_parameters()
for name, value in filter_parameters.items():
header[name] = str(value)
image = self._fabio_class.__class__(data=data, header=header)
if not self._dry_run:
image.write(file_name)
logger.info("Wrote %s", file_name)
self._fabio_images[algorithm] = image
def get_fabio_image(self, algorithm):
"""Get the constructed fabio image
:rtype: fabio.fabioimage.FabioImage
"""
return self._fabio_images[algorithm]
def close(self):
"""Close the writer. Must not be used anymore."""
self._header = None
def common_prefix(string_list):
"""Return the common prefix of a list of strings
TODO: move it into utils package
:param list(str) string_list: List of strings
:rtype: str
"""
prefix = ""
for ch in zip(string_list):
c = ch[0]
good = True
for i in ch:
if i != c:
good = False
break
if good:
prefix += c
else:
break
return prefix
class AverageObserver(object):
def image_loaded(self, fabio_image, image_index, images_count):
"""Called when an input image is loaded"""
pass
def process_started(self):
"""Called when the full processing is started"""
pass
def algorithm_started(self, algorithm):
"""Called when an algorithm is started"""
pass
def frame_processed(self, algorithm, frame_index, frames_count):
"""Called after providing a frame to an algorithm"""
pass
def result_processing(self, algorithm):
"""Called before the result of an algorithm is computed"""
pass
def algorithm_finished(self, algorithm):
"""Called when an algorithm is finished"""
pass
def process_finished(self):
"""Called when the full process is finished"""
pass
class Average(object):
"""Process images to generate an average using different algorithms."""
def __init__(self):
"""Constructor"""
self._dark = None
self._raw_flat = None
self._flat = None
self._monitor_key = None
self._threshold = None
self._minimum = None
self._maximum = None
self._fabio_images = []
self._writer = None
self._algorithms = []
self._nb_frames = 0
self._correct_flat_from_dark = False
self._results = weakref.WeakKeyDictionary()
self._observer = None
def set_observer(self, observer):
"""Set an observer to the average process.
:param AverageObserver observer: An observer
"""
self._observer = observer
def set_dark(self, dark_list):
"""Defines images used as dark.
:param list dark_list: List of dark used
"""
if dark_list is None:
self._dark = None
return
darks = _normalize_image_stack(dark_list)
self._dark = average_dark(darks, center_method="mean", cutoff=4)
def set_flat(self, flat_list):
"""Defines images used as flat.
:param list flat_list: List of dark used
"""
if flat_list is None:
self._raw_flat = None
return
flats = _normalize_image_stack(flat_list)
self._raw_flat = average_dark(flats, center_method="mean", cutoff=4)
def set_correct_flat_from_dark(self, correct_flat_from_dark):
"""Defines if the dark must be applied on the flat.
:param bool correct_flat_from_dark: If true, the dark is applied.
"""
self._correct_flat_from_dark = correct_flat_from_dark
def get_counter_frames(self):
"""Returns the number of frames used for the process.
:rtype: int
"""
return self._nb_frames
def get_fabio_images(self):
"""Returns source images as fabio images.
:rtype: list(fabio.fabioimage.FabioImage)"""
return self._fabio_images
def set_images(self, image_list):
"""Defines the set set of source images to used to process an average.
:param list image_list: List of filename, numpy arrays, fabio images
used as source for the computation.
"""
self._fabio_images = []
self._nb_frames = 0
if len(image_list) > 100:
# if too many files are opened, it may crash. The har limit is 1024
copy_data = True
else:
copy_data = False
for image_index, image in enumerate(image_list):
if isinstance(image, six.string_types):
logger.info("Reading %s", image)
fabio_image = fabio.open(image)
if copy_data and fabio_image.nframes == 1:
# copy the data so that we can close the file right now.
fimg = fabio_image.convert(fabio_image.__class__)
fimg.filename = image
fabio_image.close()
fabio_image = fimg
elif isinstance(image, fabio.fabioimage.fabioimage):
fabio_image = image
else:
if fabio.hexversion < 262148:
logger.error("Old version of fabio detected, upgrade to 0.4 or newer")
# Assume this is a numpy array like
if not isinstance(image, numpy.ndarray):
raise RuntimeError("Not good type for input, got %s, expected numpy array" % type(image))
fabio_image = fabio.numpyimage.NumpyImage(data=image)
if self._observer:
self._observer.image_loaded(fabio_image, image_index, len(image_list))
self._fabio_images.append(fabio_image)
self._nb_frames += fabio_image.nframes
def set_monitor_name(self, monitor_name):
"""Defines the monitor name | |
=18
simx_headeroffset_crc =0 # 1 simxUShort. Generated by the client or server. The CRC for the message
simx_headeroffset_version =2 # 1 byte. Generated by the client or server. The version of the remote API software
simx_headeroffset_message_id =3 # 1 simxInt. Generated by the client (and used in a reply by the server)
simx_headeroffset_client_time =7 # 1 simxInt. Client time stamp generated by the client (and sent back by the server)
simx_headeroffset_server_time =11 # 1 simxInt. Generated by the server when a reply is generated. The server timestamp
simx_headeroffset_scene_id =15 # 1 simxUShort. Generated by the server. A unique ID identifying the scene currently displayed
simx_headeroffset_server_state =17 # 1 byte. Generated by the server. Bit coded 0 set --> simulation not stopped 1 set --> simulation paused 2 set --> real-time switch on 3-5 edit mode type (0=no edit mode 1=triangle 2=vertex 3=edge 4=path 5=UI)
# Remote API command header
SIMX_SUBHEADER_SIZE =26
simx_cmdheaderoffset_mem_size =0 # 1 simxInt. Generated by the client or server. The buffer size of the command.
simx_cmdheaderoffset_full_mem_size =4 # 1 simxInt. Generated by the client or server. The full buffer size of the command (applies to split chunks).
simx_cmdheaderoffset_pdata_offset0 =8 # 1 simxUShort. Generated by the client or server. The amount of data that is part of the command identification.
simx_cmdheaderoffset_pdata_offset1 =10 # 1 simxInt. Generated by the client or server. The amount of shift of the pure data buffer (applies to split chunks).
simx_cmdheaderoffset_cmd=14 # 1 simxInt. Generated by the client (and used in a reply by the server). The command combined with the operation mode of the command.
simx_cmdheaderoffset_delay_or_split =18 # 1 simxUShort. Generated by the client or server. The amount of delay in ms of a continuous command or the max. pure data size to send at once (applies to split commands).
simx_cmdheaderoffset_sim_time =20 # 1 simxInt. Generated by the server. The simulation time (in ms) when the command was executed (or 0 if simulation is not running)
simx_cmdheaderoffset_status =24 # 1 byte. Generated by the server. (1 bit 0 is set --> error in function execution on server side). The client writes bit 1 if command cannot be overwritten
simx_cmdheaderoffset_reserved =25 # 1 byte. Not yet used
# Regular operation modes
simx_opmode_oneshot =0x000000 # sends command as one chunk. Reply will also come as one chunk. Doesn't wait for the reply.
simx_opmode_blocking =0x010000 # sends command as one chunk. Reply will also come as one chunk. Waits for the reply (_REPLY_WAIT_TIMEOUT_IN_MS is the timeout).
simx_opmode_oneshot_wait =0x010000 # sends command as one chunk. Reply will also come as one chunk. Waits for the reply (_REPLY_WAIT_TIMEOUT_IN_MS is the timeout).
simx_opmode_continuous =0x020000
simx_opmode_streaming =0x020000 # sends command as one chunk. Command will be stored on the server and always executed
#(every x ms (as far as possible) where x can be 0-65535. just add x to opmode_continuous).
# A reply will be sent continuously each time as one chunk. Doesn't wait for the reply.
# Operation modes for heavy data
simx_opmode_oneshot_split =0x030000 # sends command as several chunks (max chunk size is x bytes where x can be _MIN_SPLIT_AMOUNT_IN_BYTES-65535. Just add x to opmode_oneshot_split). Reply will also come as several chunks. Doesn't wait for the reply.
simx_opmode_continuous_split =0x040000
simx_opmode_streaming_split =0x040000 # sends command as several chunks (max chunk size is x bytes where x can be _MIN_SPLIT_AMOUNT_IN_BYTES-65535. Just add x to opmode_continuous_split). Command will be stored on the server and always executed. A reply will be sent continuously each time as several chunks. Doesn't wait for the reply.
# Special operation modes
simx_opmode_discontinue =0x050000 # removes and cancels all commands stored on the client or server side (also continuous commands)
simx_opmode_buffer =0x060000 # doesn't send anything but checks if a reply for the given command is available in the input buffer (i.e. previously received from the server)
simx_opmode_remove =0x070000 # doesn't send anything and doesn't return any specific value. It just erases a similar command reply in the inbox (to free some memory)
# Command return codes
simx_return_ok =0x000000
simx_return_novalue_flag =0x000001 # input buffer doesn't contain the specified command
simx_return_timeout_flag =0x000002 # command reply not received in time for opmode_oneshot_wait operation mode
simx_return_illegal_opmode_flag =0x000004 # command doesn't support the specified operation mode
simx_return_remote_error_flag =0x000008 # command caused an error on the server side
simx_return_split_progress_flag =0x000010 # previous similar command not yet fully processed (applies to opmode_oneshot_split operation modes)
simx_return_local_error_flag =0x000020 # command caused an error on the client side
simx_return_initialize_error_flag =0x000040 # simxStart was not yet called
# Following for backward compatibility (same as above)
simx_error_noerror =0x000000
simx_error_novalue_flag =0x000001 # input buffer doesn't contain the specified command
simx_error_timeout_flag =0x000002 # command reply not received in time for opmode_oneshot_wait operation mode
simx_error_illegal_opmode_flag =0x000004 # command doesn't support the specified operation mode
simx_error_remote_error_flag =0x000008 # command caused an error on the server side
simx_error_split_progress_flag =0x000010 # previous similar command not yet fully processed (applies to opmode_oneshot_split operation modes)
simx_error_local_error_flag =0x000020 # command caused an error on the client side
simx_error_initialize_error_flag =0x000040 # simxStart was not yet called
# vrepConst.py
#load library
libsimx = None
try:
file_extension = '.so'
if platform.system() =='cli':
file_extension = '.dll'
elif platform.system() =='Windows':
file_extension = '.dll'
elif platform.system() == 'Darwin':
file_extension = '.dylib'
else:
file_extension = '.so'
libfullpath = os.path.join(os.path.dirname(__file__), 'remoteApi' + file_extension)
libsimx = ct.CDLL(libfullpath)
except:
print ('----------------------------------------------------')
print ('The remoteApi library could not be loaded. Make sure')
print ('it is located in the same folder as "vrep.py", or')
print ('appropriately adjust the file "vrep.py"')
print ('----------------------------------------------------')
print ('')
#ctypes wrapper prototypes
c_GetJointPosition = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetJointPosition", libsimx))
c_SetJointPosition = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_float, ct.c_int32)(("simxSetJointPosition", libsimx))
c_GetJointMatrix = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetJointMatrix", libsimx))
c_SetSphericalJointMatrix = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxSetSphericalJointMatrix", libsimx))
c_SetJointTargetVelocity = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_float, ct.c_int32)(("simxSetJointTargetVelocity", libsimx))
c_SetJointTargetPosition = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_float, ct.c_int32)(("simxSetJointTargetPosition", libsimx))
c_GetJointForce = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetJointForce", libsimx))
c_SetJointForce = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_float, ct.c_int32)(("simxSetJointForce", libsimx))
c_ReadForceSensor = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_ubyte), ct.POINTER(ct.c_float), ct.POINTER(ct.c_float), ct.c_int32)(("simxReadForceSensor", libsimx))
c_BreakForceSensor = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32)(("simxBreakForceSensor", libsimx))
c_ReadVisionSensor = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_ubyte), ct.POINTER(ct.POINTER(ct.c_float)), ct.POINTER(ct.POINTER(ct.c_int32)), ct.c_int32)(("simxReadVisionSensor", libsimx))
c_GetObjectHandle = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetObjectHandle", libsimx))
c_GetVisionSensorImage = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_byte)), ct.c_ubyte, ct.c_int32)(("simxGetVisionSensorImage", libsimx))
c_SetVisionSensorImage = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_byte), ct.c_int32, ct.c_ubyte, ct.c_int32)(("simxSetVisionSensorImage", libsimx))
c_GetVisionSensorDepthBuffer= ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_float)), ct.c_int32)(("simxGetVisionSensorDepthBuffer", libsimx))
c_GetObjectChild = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetObjectChild", libsimx))
c_GetObjectParent = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetObjectParent", libsimx))
c_ReadProximitySensor = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_ubyte), ct.POINTER(ct.c_float), ct.POINTER(ct.c_int32), ct.POINTER(ct.c_float), ct.c_int32)(("simxReadProximitySensor", libsimx))
c_LoadModel = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.c_ubyte, ct.POINTER(ct.c_int32), ct.c_int32)(("simxLoadModel", libsimx))
c_LoadUI = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.c_ubyte, ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_int32)), ct.c_int32)(("simxLoadUI", libsimx))
c_LoadScene = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.c_ubyte, ct.c_int32)(("simxLoadScene", libsimx))
c_StartSimulation = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32)(("simxStartSimulation", libsimx))
c_PauseSimulation = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32)(("simxPauseSimulation", libsimx))
c_StopSimulation = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32)(("simxStopSimulation", libsimx))
c_GetUIHandle = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetUIHandle", libsimx))
c_GetUISlider = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetUISlider", libsimx))
c_SetUISlider = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.c_int32, ct.c_int32)(("simxSetUISlider", libsimx))
c_GetUIEventButton = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetUIEventButton", libsimx))
c_GetUIButtonProperty = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetUIButtonProperty", libsimx))
c_SetUIButtonProperty = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.c_int32, ct.c_int32)(("simxSetUIButtonProperty", libsimx))
c_AddStatusbarMessage = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.c_int32)(("simxAddStatusbarMessage", libsimx))
c_AuxiliaryConsoleOpen = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.POINTER(ct.c_int32), ct.POINTER(ct.c_float), ct.POINTER(ct.c_float), ct.POINTER(ct.c_int32), ct.c_int32)(("simxAuxiliaryConsoleOpen", libsimx))
c_AuxiliaryConsoleClose = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32)(("simxAuxiliaryConsoleClose", libsimx))
c_AuxiliaryConsolePrint = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_char), ct.c_int32)(("simxAuxiliaryConsolePrint", libsimx))
c_AuxiliaryConsoleShow = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_ubyte, ct.c_int32)(("simxAuxiliaryConsoleShow", libsimx))
c_GetObjectOrientation = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetObjectOrientation", libsimx))
c_GetObjectPosition = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetObjectPosition", libsimx))
c_SetObjectOrientation = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxSetObjectOrientation", libsimx))
c_SetObjectPosition = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxSetObjectPosition", libsimx))
c_SetObjectParent = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.c_ubyte, ct.c_int32)(("simxSetObjectParent", libsimx))
c_SetUIButtonLabel = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_char), ct.c_int32)(("simxSetUIButtonLabel", libsimx))
c_GetLastErrors = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_char)), ct.c_int32)(("simxGetLastErrors", libsimx))
c_GetArrayParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetArrayParameter", libsimx))
c_SetArrayParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxSetArrayParameter", libsimx))
c_GetBooleanParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_ubyte), ct.c_int32)(("simxGetBooleanParameter", libsimx))
c_SetBooleanParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_ubyte, ct.c_int32)(("simxSetBooleanParameter", libsimx))
c_GetIntegerParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetIntegerParameter", libsimx))
c_SetIntegerParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.c_int32)(("simxSetIntegerParameter", libsimx))
c_GetFloatingParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetFloatingParameter", libsimx))
c_SetFloatingParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_float, ct.c_int32)(("simxSetFloatingParameter", libsimx))
c_GetStringParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.POINTER(ct.c_char)), ct.c_int32)(("simxGetStringParameter", libsimx))
c_GetCollisionHandle = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetCollisionHandle", libsimx))
c_GetDistanceHandle = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetDistanceHandle", libsimx))
c_GetCollectionHandle = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetCollectionHandle", libsimx))
c_ReadCollision = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_ubyte), ct.c_int32)(("simxReadCollision", libsimx))
c_ReadDistance = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxReadDistance", libsimx))
c_RemoveObject = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32)(("simxRemoveObject", libsimx))
c_RemoveModel = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32)(("simxRemoveModel", libsimx))
c_RemoveUI = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32)(("simxRemoveUI", libsimx))
c_CloseScene = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32)(("simxCloseScene", libsimx))
c_GetObjects = | |
<gh_stars>0
import unittest
import sys
from robot.variables import variables, is_list_var, is_scalar_var, is_var
from robot.errors import *
from robot import utils
from robot.utils.asserts import *
SCALARS = [ '${var}', '${ v A R }' ]
LISTS = [ '@{var}', '@{ v A R }' ]
NOKS = [ 'var', '$var', '${var', '${va}r', '@{va}r', '@var', '%{var}',
' ${var}', '@{var} ', '\\${var}', '\\\\${var}' ]
# Simple objects needed when testing assigning objects to variables.
# JavaObject lives in '../../acceptance/testdata/libraries'
class PythonObject:
def __init__(self, a, b):
self.a = a
self.b = b
def __str__(self):
return '(%s, %s)' % (self.a, self.b)
__repr__ = __str__
if utils.is_jython:
import JavaObject
class TestIsMethods(unittest.TestCase):
def test_is_var(self):
for ok in SCALARS + LISTS:
assert is_var(ok)
for nok in NOKS:
assert not is_var(nok)
def test_is_scalar_var(self):
for ok in SCALARS:
assert is_scalar_var(ok)
for nok in LISTS + NOKS:
assert not is_scalar_var(nok)
def test_is_list_var(self):
for ok in LISTS:
assert is_list_var(ok)
for nok in SCALARS + NOKS:
assert not is_list_var(nok)
class TestVariables(unittest.TestCase):
def setUp(self):
self.varz = variables.Variables()
def test_set(self):
for var in SCALARS + LISTS:
assert not self.varz.has_key(var)
self.varz[var] = ['value']
assert self.varz.has_key(var), var
assert self.varz.has_key(var.lower().replace(' ',''))
self.varz.clear()
def test_set_invalid(self):
for var in NOKS:
try:
self.varz[var] = ['value']
except DataError:
pass
else:
fail()
def test_set_scalar(self):
for var in SCALARS:
for value in [ 'str', 10, ['hi','u'], ['hi',2], {'a':1,'b':2}, self,
None, unittest.TestCase ]:
self.varz[var] = value
act = self.varz[var]
assert act == value, '%s: %s %s != %s %s' \
% (var, act, type(act), value, type(value))
self.varz.clear()
self.varz['${myvar}'] = ''
assert_equals(self.varz['${myvar}'], '')
def test_update(self):
self.varz['${a}'] = 1
self.varz.update({'${b}':2})
for k, v in [('${a}', 1), ('${b}', 2)]:
assert_true(k in self.varz)
assert_true(k in self.varz.keys())
assert_equals(self.varz[k], v)
def test_update_invalid(self):
self.varz['${a}'] = 1
assert_raises(DataError, self.varz.update, {'invalid variable name':2})
def test_set_list(self):
for var in LISTS:
for value in [ [], [''], ['str'], [10], ['hi','u'], ['hi',2],
[{'a':1,'b':2}, self, None] ]:
self.varz[var] = value
assert_equals(self.varz[var], value)
self.varz.clear()
def test_getitem_invalid(self):
for var in NOKS:
self.assertRaises(DataError, self.varz.__getitem__, var)
def test_has_key(self):
self.varz['${k}'] = 'v'
assert self.varz.has_key('${k}')
assert self.varz.has_key('${1}')
assert self.varz.has_key('${k.upper()}')
assert not self.varz.has_key('${non-existing}')
def test_contains(self):
self.varz['${k}'] = 'v'
assert '${k}' in self.varz
assert '${-3}' in self.varz
assert '${k.upper()}' in self.varz
assert '${nok}' not in self.varz
def test_replace_scalar(self):
self.varz['${foo}'] = 'bar'
self.varz['${a}'] = 'ari'
for inp, exp in [ ('${foo}','bar'), ('${a}','ari'),
('${a','${a'), ('',''), ('hii','hii'),
("Let's go to ${foo}!", "Let's go to bar!"),
('${foo}ba${a}-${a}', 'barbaari-ari') ]:
assert_equals(self.varz.replace_scalar(inp), exp)
def test_replace_list(self):
self.varz['@{L}'] = ['v1','v2']
self.varz['@{E}'] = []
self.varz['@{S}'] = ['1','2','3']
for inp, exp in [ (['@{L}'], ['v1','v2']),
(['@{L}','v3'], ['v1','v2','v3']),
(['v0','@{L}','@{E}','v@{S}[2]'], ['v0','v1','v2','v3']),
([], []), (['hi u','hi 2',3], ['hi u','hi 2',3]) ]:
assert_equals(self.varz.replace_list(inp), exp)
def test_replace_list_in_scalar_context(self):
self.varz['@{list}'] = ['v1','v2']
assert_equals(self.varz.replace_list(['@{list}']), ['v1', 'v2'])
assert_equals(self.varz.replace_list(['-@{list}-']), ["-['v1', 'v2']-"])
def test_replace_list_item(self):
self.varz['@{L}'] = ['v0','v1']
assert_equal(self.varz.replace_list(['@{L}[0]']), ['v0'])
assert_equal(self.varz.replace_scalar('@{L}[1]'), 'v1')
assert_equal(self.varz.replace_scalar('-@{L}[0]@{L}[1]@{L}[0]-'), '-v0v1v0-')
self.varz['@{L2}'] = ['v0',['v11','v12']]
assert_equal(self.varz.replace_list(['@{L2}[0]']), ['v0'])
assert_equal(self.varz.replace_list(['@{L2}[1]']), [['v11','v12']])
assert_equal(self.varz.replace_scalar('@{L2}[0]'), 'v0')
assert_equal(self.varz.replace_scalar('@{L2}[1]'), ['v11','v12'])
assert_equal(self.varz.replace_list(['@{L}[0]','@{L2}[1]']), ['v0',['v11','v12']])
def test_replace_non_strings(self):
self.varz['${d}'] = {'a':1,'b':2}
self.varz['${n}'] = None
assert_equal(self.varz.replace_scalar('${d}'), {'a':1,'b':2})
assert_equal(self.varz.replace_scalar('${n}'), None)
def test_replace_non_strings_inside_string(self):
class Example:
def __str__(self):
return 'Hello'
self.varz['${h}'] = Example()
self.varz['${w}'] = 'world'
res = self.varz.replace_scalar('Another "${h} ${w}" example')
assert_equals(res, 'Another "Hello world" example')
def test_replace_list_item_invalid(self):
self.varz['@{L}'] = ['v0','v1','v3']
for inv in [ '@{L}[3]', '@{NON}[0]', '@{L[2]}' ]:
self.assertRaises(DataError, self.varz.replace_list, [inv])
def test_replace_non_existing_list(self):
self.assertRaises(DataError, self.varz.replace_list, ['${nonexisting}'])
def test_replace_non_existing_scalar(self):
self.assertRaises(DataError, self.varz.replace_scalar, '${nonexisting}')
def test_replace_non_existing_string(self):
self.assertRaises(DataError, self.varz.replace_string, '${nonexisting}')
def test_replace_escaped(self):
self.varz['${foo}'] = 'bar'
for inp, exp in [ (r'\${foo}', r'${foo}'),
(r'\\${foo}', r'\bar'),
(r'\\\${foo}', r'\${foo}'),
(r'\\\\${foo}', r'\\bar'),
(r'\\\\\${foo}', r'\\${foo}') ]:
assert_equals(self.varz.replace_scalar(inp), exp)
def test_variables_in_value(self):
self.varz['${exists}'] = 'Variable exists but is still not replaced'
self.varz['${test}'] = '${exists} & ${does_not_exist}'
assert_equals(self.varz['${test}'], '${exists} & ${does_not_exist}')
self.varz['@{test}'] = ['${exists}', '&', '${does_not_exist}']
assert_equals(self.varz['@{test}'], '${exists} & ${does_not_exist}'.split())
def test_variable_as_object(self):
obj = PythonObject('a', 1)
self.varz['${obj}'] = obj
assert_equals(self.varz['${obj}'], obj)
expected = ['Some text here %s and %s there' % (obj,obj)]
actual = self.varz.replace_list(['Some text here ${obj} and ${obj} there'])
assert_equals(actual, expected)
def test_extended_variables(self):
# Extended variables are vars like ${obj.name} when we have var ${obj}
obj = PythonObject('a', [1,2,3])
dic = { 'a': 1, 'o': obj }
self.varz['${obj}'] = obj
self.varz['${dic}'] = dic
assert_equals(self.varz.replace_scalar('${obj.a}'), 'a')
assert_equals(self.varz.replace_scalar('${obj.b}'), [1,2,3])
assert_equals(self.varz.replace_scalar('${obj.b[0]}-${obj.b[1]}'), '1-2')
assert_equals(self.varz.replace_scalar('${dic["a"]}'), 1)
assert_equals(self.varz.replace_scalar('${dic["o"]}'), obj)
assert_equals(self.varz.replace_scalar('-${dic["o"].b[2]}-'), '-3-')
def test_space_is_not_ignored_after_newline_in_extend_variable_syntax(self):
self.varz['${x}'] = 'test string'
self.varz['${lf}'] = '\\n'
self.varz['${lfs}'] = '\\n '
for inp, exp in [('${x.replace(" ", """\\n""")}', 'test\nstring'),
('${x.replace(" ", """\\n """)}', 'test\n string'),
('${x.replace(" ", """${lf}""")}', 'test\nstring'),
('${x.replace(" ", """${lfs}""")}', 'test\n string')]:
assert_equals(self.varz.replace_scalar(inp), exp)
def test_escaping_with_extended_variable_syntax(self):
self.varz['${p}'] = 'c:\\temp'
assert self.varz['${p}'] == 'c:\\temp'
assert_equals(self.varz.replace_scalar('${p + "\\\\foo.txt"}'),
'c:\\temp\\foo.txt')
def test_internal_variables(self):
# Internal variables are variables like ${my${name}}
self.varz['${name}'] = 'name'
self.varz['${my name}'] = 'value'
assert_equals(self.varz.replace_scalar('${my${name}}'), 'value')
self.varz['${whos name}'] = 'my'
assert_equals(self.varz.replace_scalar('${${whos name} ${name}}'), 'value')
assert_equals(self.varz.replace_scalar('${${whos${name}}${name}}'), 'value')
self.varz['${my name}'] = [1,2,3]
assert_equals(self.varz.replace_scalar('${${whos${name}}${name}}'), [1,2,3])
assert_equals(self.varz.replace_scalar('- ${${whos${name}}${name}} -'), '- [1, 2, 3] -')
def test_math_with_internal_vars(self):
assert_equals(self.varz.replace_scalar('${${1}+${2}}'), 3)
assert_equals(self.varz.replace_scalar('${${1}-${2}}'), -1)
assert_equals(self.varz.replace_scalar('${${1}*${2}}'), 2)
assert_equals(self.varz.replace_scalar('${${1}/${2}}'), 0)
def test_math_with_internal_vars_with_spaces(self):
assert_equals(self.varz.replace_scalar('${${1} + ${2.5}}'), 3.5)
assert_equals(self.varz.replace_scalar('${${1} - ${2} + 1}'), 0)
assert_equals(self.varz.replace_scalar('${${1} * ${2} - 1}'), 1)
assert_equals(self.varz.replace_scalar('${${1} / ${2.0}}'), 0.5)
def test_math_with_internal_vars_does_not_work_if_first_var_is_float(self):
assert_raises(DataError, self.varz.replace_scalar, '${${1.1}+${2}}')
assert_raises(DataError, self.varz.replace_scalar, '${${1.1} - ${2}}')
assert_raises(DataError, self.varz.replace_scalar, '${${1.1} * ${2}}')
assert_raises(DataError, self.varz.replace_scalar, '${${1.1}/${2}}')
def test_list_variable_as_scalar(self):
self.varz['@{name}'] = exp = ['spam', 'eggs']
assert_equals(self.varz.replace_scalar('${name}'), exp)
assert_equals(self.varz.replace_list(['${name}', 42]), [exp, 42])
assert_equals(self.varz.replace_string('${name}'), str(exp))
assert_true(self.varz.has_key('${name}'))
def test_copy(self):
varz = variables.Variables(identifiers=['$'])
varz['${foo}'] = 'bar'
copy = varz.copy()
assert_equals(copy['${foo}'], 'bar')
assert_equals(copy._identifiers, ['$'])
if utils.is_jython:
def test_variable_as_object_in_java(self):
obj = JavaObject('hello')
self.varz['${obj}'] = obj
assert_equals(self.varz['${obj}'], obj)
assert_equals(self.varz.replace_scalar('${obj} world'), 'hello world')
def test_extended_variables_in_java(self):
obj = JavaObject('my name')
self.varz['${obj}'] = obj
assert_equals(self.varz.replace_list(['${obj.name}']), ['my name'])
class TestVariableSplitter(unittest.TestCase):
_identifiers = ['$','@','%','&','*']
def test_empty(self):
self._test('', None)
def test_no_vars(self):
for inp in ['hello world', '$hello', '{hello}', '$\\{hello}',
'${hello', '$hello}' ]:
self._test(inp, None)
def test_backslashes(self):
for inp in ['\\', '\\\\', '\\\\\\\\\\',
'\\hello\\\\world\\\\\\']:
self._test(inp, None)
def test_one_var(self):
self._test('${hello}', '${hello}', 0)
self._test('1 @{hello} more', '@{hello}', 2)
self._test('*{hi}}', '*{hi}', 0)
self._test('{%{{hi}}', '%{{hi}', 1)
self._test('-= ${} =-', '${}', 3)
# In this case splitter thinks there are internal but there aren't.
# Better check would probably spent more time than that is saved when
# variable base is processed again in this special case.
self._test('%{hi%{u}', '%{hi%{u}', 0, internal=True)
def test_multiple_vars(self):
self._test('${hello} ${world}', '${hello}', 0)
self._test('hi %{u}2 and @{u2} and also *{us3}', '%{u}', 3)
self._test('0123456789 %{1} and @{2', '%{1}', 11)
def test_escaped_var(self):
self._test('\\${hello}', None)
self._test('hi \\\\\\${hello} moi', None)
def test_not_escaped_var(self):
self._test('\\\\${hello}', '${hello}', 2)
self._test('\\hi \\\\\\\\\\\\${hello} moi', '${hello}',
len('\\hi \\\\\\\\\\\\'))
self._test('\\ ${hello}', '${hello}', 2)
self._test('${hello}\\', '${hello}', 0)
self._test('\\ \\ ${hel\\lo}\\', '${hel\\lo}', 4)
def test_escaped_and_not_escaped_vars(self):
for inp, var, start in [
('\\${esc} ${not}', '${not}', len('\\${esc} ')),
('\\\\\\${esc} \\\\${not}', '${not}',
len('\\\\\\${esc} \\\\')),
('\\${esc}\\\\${not}${n2}', '${not}', len('\\${esc}\\\\')) ]:
self._test(inp, var, start)
def test_internal_vars(self):
for inp, var, start in [
('${hello${hi}}', '${hello${hi}}', 0),
('bef ${${hi}hello} aft', '${${hi}hello}', 4),
('\\${not} ${hel${hi}lo} ', '${hel${hi}lo}', len('\\${not} ')),
('${${hi}${hi}}\\', '${${hi}${hi}}', 0),
('${${hi${hi}}} ${xx}', '${${hi${hi}}}', 0),
('${xx} ${${hi${hi}}}', '${xx}', 0),
('${\\${hi${hi}}}', '${\\${hi${hi}}}', 0),
('\\${${hi${hi}}}', '${hi${hi}}', len('\\${')),
('\\${\\${hi\\\\${hi}}}', '${hi}', len('\\${\\${hi\\\\')) ]:
internal = var.count('{') > 1
self._test(inp, var, start, internal=internal)
def test_index(self):
self._test('@{x}[0]', '@{x}', 0, '0')
self._test('.@{x}[42]..', '@{x}', 1, '42')
self._test('@{x}[${i}] ${xyz}', '@{x}', 0, '${i}')
self._test('@{x}[]', '@{x}', 0, '')
self._test('@{x}[inv]', '@{x}', 0, 'inv')
self._test('@{x}[0', '@{x}', 0, None)
self._test('@{x}}[0]', '@{x}', 0, None)
self._test('${x}[0]', '${x}', 0, None)
self._test('%{x}[0]', '%{x}', 0, None)
self._test('*{x}[0]', '*{x}', 0, None)
self._test('&{x}[0]', '&{x}', 0, None)
def test_custom_identifiers(self):
for inp, start in [ ('@{x}${y}', 4),
('%{x} ${y}', 5),
('*{x}567890${y}', 10),
('&{x}%{x}@{x}\\${x}${y}',
len('&{x}%{x}@{x}\\${x}')) ]:
self._test(inp, '${y}', start, identifiers=['$'])
def test_identifier_as_variable_name(self):
for i in self._identifiers:
for count in 1,2,3,42:
var = '%s{%s}' % (i, i*count)
self._test(var, var)
self._test(var+'spam', var)
self._test('eggs'+var+'spam', var, start=4)
self._test(i+var+i, var, start=1)
def test_identifier_as_variable_name_with_internal_vars(self):
for i in self._identifiers:
for count in 1,2,3,42:
var = '%s{%s{%s}}' % (i, i*count, i)
self._test(var, var, internal=True)
self._test('eggs'+var+'spam', var, start=4, internal=True)
var = '%s{%s{%s}}' % (i, i*count, i*count)
self._test(var, var, internal=True)
self._test('eggs'+var+'spam', var, start=4, internal=True)
def _test(self, inp, variable, start=0, index=None, identifiers=None,
internal=False):
if variable is not None:
identifier = variable[0]
base = variable[2:-1]
end = start + len(variable)
if index is not None:
end += len(index) + 2
else:
identifier = base = None
start = end = -1
if not identifiers:
identifiers = self._identifiers
res = variables.VariableSplitter(inp, identifiers)
assert_equals(res.base, base, | |
admin_password,
injected_files)
if rescue:
# NOTE(johannes): Attach root disk to rescue VM now, before
# booting the VM, since we can't hotplug block devices
# on non-PV guests
@step
def attach_root_disk_step(undo_mgr, vm_ref):
orig_vm_ref = vm_utils.lookup(self._session, instance['name'])
vdi_ref = self._find_root_vdi_ref(orig_vm_ref)
vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
DEVICE_RESCUE, bootable=False)
@step
def setup_network_step(undo_mgr, vm_ref, vdis):
self._setup_vm_networking(instance, vm_ref, vdis, network_info,
rescue)
@step
def inject_instance_data_step(undo_mgr, vm_ref):
self.inject_instance_metadata(instance, vm_ref)
self.inject_auto_disk_config(instance, vm_ref)
@step
def prepare_security_group_filters_step(undo_mgr):
try:
self.firewall_driver.setup_basic_filtering(
instance, network_info)
except NotImplementedError:
# NOTE(salvatore-orlando): setup_basic_filtering might be
# empty or not implemented at all, as basic filter could
# be implemented with VIF rules created by xapi plugin
pass
self.firewall_driver.prepare_instance_filter(instance,
network_info)
@step
def boot_instance_step(undo_mgr, vm_ref):
self._boot_new_instance(instance, vm_ref, injected_files,
admin_password)
@step
def apply_security_group_filters_step(undo_mgr):
self.firewall_driver.apply_instance_filter(instance, network_info)
@step
def bdev_set_default_root(undo_mgr):
if block_device_info:
LOG.debug(_("Block device information present: %s")
% block_device_info, instance=instance)
if block_device_info and not block_device_info['root_device_name']:
block_device_info['root_device_name'] = self.default_root_dev
undo_mgr = utils.UndoManager()
try:
# NOTE(sirp): The create_disks() step will potentially take a
# *very* long time to complete since it has to fetch the image
# over the network and images can be several gigs in size. To
# avoid progress remaining at 0% for too long, make sure the
# first step is something that completes rather quickly.
bdev_set_default_root(undo_mgr)
disk_image_type = determine_disk_image_type_step(undo_mgr)
vdis = create_disks_step(undo_mgr, disk_image_type, image_meta)
kernel_file, ramdisk_file = create_kernel_ramdisk_step(undo_mgr)
vm_ref = create_vm_record_step(undo_mgr, vdis, disk_image_type,
kernel_file, ramdisk_file)
attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type)
setup_network_step(undo_mgr, vm_ref, vdis)
inject_instance_data_step(undo_mgr, vm_ref)
prepare_security_group_filters_step(undo_mgr)
if rescue:
attach_root_disk_step(undo_mgr, vm_ref)
boot_instance_step(undo_mgr, vm_ref)
apply_security_group_filters_step(undo_mgr)
except Exception:
msg = _("Failed to spawn, rolling back")
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def _create_vm(self, context, instance, name_label, vdis,
disk_image_type, network_info, kernel_file=None,
ramdisk_file=None, rescue=False):
"""Create VM instance."""
vm_ref = self._create_vm_record(context, instance, name_label,
vdis, disk_image_type, kernel_file, ramdisk_file)
self._attach_disks(instance, vm_ref, name_label, vdis,
disk_image_type)
self._setup_vm_networking(instance, vm_ref, vdis, network_info,
rescue)
self.inject_instance_metadata(instance, vm_ref)
return vm_ref
def _setup_vm_networking(self, instance, vm_ref, vdis, network_info,
rescue):
# Alter the image before VM start for network injection.
if CONF.flat_injected:
vm_utils.preconfigure_instance(self._session, instance,
vdis['root']['ref'], network_info)
self._create_vifs(vm_ref, instance, network_info)
self.inject_network_info(instance, network_info, vm_ref)
hostname = instance['hostname']
if rescue:
hostname = 'RESCUE-%s' % hostname
self.inject_hostname(instance, vm_ref, hostname)
def _create_vm_record(self, context, instance, name_label, vdis,
disk_image_type, kernel_file, ramdisk_file):
"""Create the VM record in Xen, making sure that we do not create
a duplicate name-label. Also do a rough sanity check on memory
to try to short-circuit a potential failure later. (The memory
check only accounts for running VMs, so it can miss other builds
that are in progress.)
"""
vm_ref = vm_utils.lookup(self._session, name_label)
if vm_ref is not None:
raise exception.InstanceExists(name=name_label)
# Ensure enough free memory is available
if not vm_utils.ensure_free_mem(self._session, instance):
raise exception.InsufficientFreeMemory(uuid=instance['uuid'])
mode = self._determine_vm_mode(instance, vdis, disk_image_type)
if instance['vm_mode'] != mode:
# Update database with normalized (or determined) value
self._virtapi.instance_update(context,
instance['uuid'], {'vm_mode': mode})
use_pv_kernel = (mode == vm_mode.XEN)
vm_ref = vm_utils.create_vm(self._session, instance, name_label,
kernel_file, ramdisk_file, use_pv_kernel)
return vm_ref
def _determine_vm_mode(self, instance, vdis, disk_image_type):
current_mode = vm_mode.get_from_instance(instance)
if current_mode == vm_mode.XEN or current_mode == vm_mode.HVM:
return current_mode
is_pv = False
if 'root' in vdis:
os_type = instance['os_type']
vdi_ref = vdis['root']['ref']
is_pv = vm_utils.determine_is_pv(self._session, vdi_ref,
disk_image_type, os_type)
if is_pv:
return vm_mode.XEN
else:
return vm_mode.HVM
def _attach_disks(self, instance, vm_ref, name_label, vdis,
disk_image_type, admin_password=None, files=None):
ctx = nova_context.get_admin_context()
instance_type = flavors.extract_flavor(instance)
# Attach (required) root disk
if disk_image_type == vm_utils.ImageType.DISK_ISO:
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
root_disk_size = instance_type['root_gb']
if root_disk_size > 0:
vm_utils.generate_iso_blank_root_disk(self._session, instance,
vm_ref, DEVICE_ROOT, name_label, root_disk_size)
cd_vdi = vdis.pop('iso')
vm_utils.attach_cd(self._session, vm_ref, cd_vdi['ref'],
DEVICE_CD)
else:
root_vdi = vdis['root']
if instance['auto_disk_config']:
LOG.debug(_("Auto configuring disk, attempting to "
"resize partition..."), instance=instance)
vm_utils.try_auto_configure_disk(self._session,
root_vdi['ref'],
instance_type['root_gb'])
vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
DEVICE_ROOT, bootable=True,
osvol=root_vdi.get('osvol'))
# Attach (optional) additional block-devices
for type_, vdi_info in vdis.items():
# Additional block-devices for boot use their device-name as the
# type.
if not type_.startswith('/dev'):
continue
# Convert device name to userdevice number, e.g. /dev/xvdb -> 1
userdevice = ord(block_device.strip_prefix(type_)) - ord('a')
vm_utils.create_vbd(self._session, vm_ref, vdi_info['ref'],
userdevice, bootable=False,
osvol=vdi_info.get('osvol'))
# Attach (optional) swap disk
swap_mb = instance_type['swap']
if swap_mb:
vm_utils.generate_swap(self._session, instance, vm_ref,
DEVICE_SWAP, name_label, swap_mb)
# Attach (optional) ephemeral disk
ephemeral_gb = instance_type['ephemeral_gb']
if ephemeral_gb:
vm_utils.generate_ephemeral(self._session, instance, vm_ref,
DEVICE_EPHEMERAL, name_label,
ephemeral_gb)
# Attach (optional) configdrive v2 disk
if configdrive.required_by(instance):
vm_utils.generate_configdrive(self._session, instance, vm_ref,
DEVICE_CONFIGDRIVE,
admin_password=<PASSWORD>,
files=files)
def _boot_new_instance(self, instance, vm_ref, injected_files,
admin_password):
"""Boot a new instance and configure it."""
LOG.debug(_('Starting VM'), instance=instance)
self._start(instance, vm_ref)
ctx = nova_context.get_admin_context()
# Wait for boot to finish
LOG.debug(_('Waiting for instance state to become running'),
instance=instance)
expiration = time.time() + CONF.xenapi_running_timeout
while time.time() < expiration:
state = self.get_info(instance, vm_ref)['state']
if state == power_state.RUNNING:
break
greenthread.sleep(0.5)
if self.agent_enabled(instance):
agent_build = self._virtapi.agent_build_get_by_triple(
ctx, 'xen', instance['os_type'], instance['architecture'])
if agent_build:
LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s'
'/%(architecture)s is %(version)s') % agent_build)
else:
LOG.info(_('No agent build found for %(hypervisor)s/%(os)s'
'/%(architecture)s') % {
'hypervisor': 'xen',
'os': instance['os_type'],
'architecture': instance['architecture']})
# Update agent, if necessary
# This also waits until the agent starts
agent = self._get_agent(instance, vm_ref)
version = agent.get_agent_version()
if version:
LOG.info(_('Instance agent version: %s'), version,
instance=instance)
if (version and agent_build and
cmp_version(version, agent_build['version']) < 0):
agent.agent_update(agent_build)
# if the guest agent is not available, configure the
# instance, but skip the admin password configuration
no_agent = version is None
# Inject ssh key.
agent.inject_ssh_key()
# Inject files, if necessary
if injected_files:
# Inject any files, if specified
agent.inject_files(injected_files)
# Set admin password, if necessary
if admin_password and not no_agent:
agent.set_admin_password(admin_password)
# Reset network config
agent.resetnetwork()
self.remove_hostname(instance, vm_ref)
def _get_vm_opaque_ref(self, instance, check_rescue=False):
"""Get xapi OpaqueRef from a db record.
:param check_rescue: if True will return the 'name'-rescue vm if it
exists, instead of just 'name'
"""
vm_ref = vm_utils.lookup(self._session, instance['name'], check_rescue)
if vm_ref is None:
raise exception.NotFound(_('Could not find VM with name %s') %
instance['name'])
return vm_ref
def _acquire_bootlock(self, vm):
"""Prevent an instance from booting."""
self._session.call_xenapi(
"VM.set_blocked_operations",
vm,
{"start": ""})
def _release_bootlock(self, vm):
"""Allow an instance to boot."""
self._session.call_xenapi(
"VM.remove_from_blocked_operations",
vm,
"start")
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
:param context: request context
:param instance: instance to be snapshotted
:param image_id: id of image to upload to
Steps involved in a XenServer snapshot:
1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
Snapshot VHD
2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
a 'base-copy' VDI. The base_copy is immutable and may be chained
with other base_copies. If chained, the base_copies
coalesce together, so, we must wait for this coalescing to occur to
get a stable representation of the data on disk.
3. Push-to-data-store: Once coalesced, we call
'xenapi_image_upload_handler' to upload the images.
"""
vm_ref = self._get_vm_opaque_ref(instance)
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label,
update_task_state) as vdi_uuids:
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self.image_upload_handler.upload_image(context,
self._session,
instance,
vdi_uuids,
image_id)
LOG.debug(_("Finished snapshot and upload for VM"),
instance=instance)
def _migrate_vhd(self, instance, vdi_uuid, dest, sr_path, seq_num):
LOG.debug(_("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"),
{'vdi_uuid': vdi_uuid, 'seq_num': seq_num},
instance=instance)
instance_uuid = instance['uuid']
try:
self._session.call_plugin_serialized('migration', 'transfer_vhd',
instance_uuid=instance_uuid, host=dest, vdi_uuid=vdi_uuid,
sr_path=sr_path, seq_num=seq_num)
except self._session.XenAPI.Failure:
msg = _("Failed to transfer vhd to new host")
raise exception.MigrationError(reason=msg)
def _get_orig_vm_name_label(self, instance):
return instance['name'] + '-orig'
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# FIXME(sirp): for now we're taking a KISS approach to instance
# progress:
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the _create_disks step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
LOG.debug(_("Updating progress to %d"), progress,
instance=instance)
self._virtapi.instance_update(context, instance['uuid'],
{'progress': progress})
def _resize_ensure_vm_is_shutdown(self, instance, vm_ref):
if vm_utils.is_vm_shutdown(self._session, vm_ref):
LOG.debug(_("VM was already shutdown."), instance=instance)
return
if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
LOG.debug(_("Clean shutdown did not complete successfully, "
"trying hard shutdown."), instance=instance)
if not vm_utils.hard_shutdown_vm(self._session, instance, vm_ref):
raise exception.ResizeError(
reason=_("Unable to terminate instance."))
def _migrate_disk_resizing_down(self, context, instance, dest,
instance_type, vm_ref, sr_path):
| |
<filename>topi/tests/python_cpp/test_topi_transform.py
"""Test code for broadcasting operators."""
import numpy as np
import tvm
import topi
def verify_expand_dims(in_shape, out_shape, axis, num_newaxis):
A = tvm.placeholder(shape=in_shape, name="A")
B = topi.cpp.expand_dims(A, axis, num_newaxis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [B])
else:
s = topi.cpp.cuda.schedule_injective(target, [B])
foo = tvm.build(s, [A, B], device, name="expand_dims")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = data_npy.reshape(out_shape)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), ctx)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_tranpose(in_shape, axes):
A = tvm.placeholder(shape=in_shape, name="A")
B = topi.cpp.transpose(A, axes)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [B])
else:
s = topi.cpp.cuda.schedule_injective(target, [B])
ctx = tvm.context(device, 0)
foo = tvm.build(s, [A, B], device, name="tranpose")
data_npy = np.arange(np.prod(in_shape)).reshape(in_shape).astype(A.dtype)
out_npy = data_npy.transpose(axes)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd = tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_reshape(src_shape, dst_shape):
A = tvm.placeholder(shape=src_shape, name="A")
B = topi.cpp.reshape(A, dst_shape)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [B])
else:
s = topi.cpp.cuda.schedule_injective(target, [B])
foo = tvm.build(s, [A, B], device, name="reshape")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npy = np.reshape(data_npy, newshape=dst_shape)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd = tvm.nd.empty(dst_shape, ctx=ctx, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_squeeze(src_shape, axis):
A = tvm.placeholder(shape=src_shape, name="A")
B = topi.cpp.squeeze(A, axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [B])
else:
s = topi.cpp.cuda.schedule_injective(target, [B])
foo = tvm.build(s, [A, B], device, name="squeeze")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npy = np.squeeze(data_npy, axis=axis)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd_shape = out_npy.shape
out_nd = tvm.nd.empty(out_nd_shape, ctx=ctx, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_concatenate(shapes, axis):
tensor_l = []
for i, shape in enumerate(shapes):
tensor_l.append(tvm.placeholder(shape, name="A" + str(i)))
out_tensor = topi.cpp.concatenate(tensor_l, axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [out_tensor])
else:
s = topi.cpp.cuda.schedule_injective(target, [out_tensor])
foo = tvm.build(s, tensor_l + [out_tensor], device, name="concatenate")
data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes]
out_npy = np.concatenate(data_npys, axis=axis)
data_nds = [tvm.nd.array(data_npy, ctx) for data_npy in data_npys]
out_nd = tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=out_tensor.dtype)
foo(*(data_nds + [out_nd]))
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_split(src_shape, indices_or_sections, axis):
A = tvm.placeholder(shape=src_shape, name="A")
tensor_l = topi.cpp.split(A, indices_or_sections, axis)
tensor_l = list(tensor_l)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, tensor_l)
else:
s = topi.cpp.cuda.schedule_injective(target, tensor_l)
ctx = tvm.context(device, 0)
foo = tvm.build(s, [A] + tensor_l, device, name="split")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npys = np.split(data_npy, indices_or_sections, axis=axis)
data_nd = tvm.nd.array(data_npy, ctx)
out_nds = [tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=tensor_l[0].dtype) for out_npy in out_npys]
foo(*([data_nd] + out_nds))
for out_nd, out_npy in zip(out_nds, out_npys):
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_take(src_shape, indices_src, axis=None):
src_dtype = "float32"
indices_dtype = "int32"
indices_src = np.array(indices_src, dtype=indices_dtype)
A = tvm.placeholder(shape=src_shape, dtype=src_dtype, name="A")
indices = tvm.placeholder(shape=indices_src.shape, dtype=indices_dtype, name="indices")
if axis is None:
out_tensor = topi.cpp.take(A, indices)
else:
out_tensor = topi.cpp.take(A, indices, axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(out_tensor)
foo = tvm.build(s, [A] + [indices] + [out_tensor] , device, name="take")
shape_size = 1
for i in range(len(src_shape)):
shape_size = shape_size * src_shape[i]
data_npy = np.arange(shape_size, dtype=src_dtype).reshape((src_shape))
if axis is None:
out_npys = np.take(data_npy, indices_src)
else:
out_npys = np.take(data_npy, indices_src, axis=axis)
data_nd = tvm.nd.array(data_npy, ctx)
indices_nd = tvm.nd.array(indices_src, ctx)
out_nd = tvm.nd.empty(out_npys.shape, ctx=ctx, dtype=src_dtype)
foo(data_nd, indices_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npys)
for device in ["llvm", "opencl"]:
check_device(device)
def verify_where(condition, x, y):
dtype = "float32"
if len(condition.shape) == 1:
np_out = np.array([xv if c else yv for (c,xv,yv) in zip(condition,x,y)])
else:
np_out = np.where(condition, x, y)
A = tvm.placeholder(shape=condition.shape, dtype=dtype, name="condition")
B = tvm.placeholder(shape=x.shape, dtype=dtype, name="x")
C = tvm.placeholder(shape=y.shape, dtype=dtype, name="y")
out_tensor = topi.cpp.where(A, B, C)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(out_tensor)
foo = tvm.build(s, [A, B, C, out_tensor], device, name="where")
tvm_out = tvm.nd.empty(x.shape, ctx=ctx, dtype=dtype)
foo(tvm.nd.array(condition, ctx), tvm.nd.array(x, ctx),
tvm.nd.array(y, ctx), tvm_out)
tvm.testing.assert_allclose(tvm_out.asnumpy(), np_out)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_concatenate_split(shapes, axis, indices_or_sections):
tensor_l_concatenate = []
for i, shape in enumerate(shapes):
tensor_l_concatenate.append(tvm.placeholder(shape, name="A" + str(i)))
out_tensor = topi.cpp.concatenate(tensor_l_concatenate, axis)
tensor_l = topi.cpp.split(out_tensor, indices_or_sections, axis)
tensor_l = list(tensor_l)
def check_device(device):
if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, tensor_l)
else:
s = topi.cpp.cuda.schedule_injective(target, tensor_l)
ctx = tvm.context(device, 0)
foo = tvm.build(s, tensor_l_concatenate + tensor_l, device, name="concatenate_split")
data_npys = [np.random.normal(size=shape).astype(tensor_l_concatenate[0].dtype) for shape in shapes]
out_npy_conc = np.concatenate(data_npys, axis=axis)
out_npys_split = np.split(out_npy_conc, indices_or_sections, axis=axis)
data_nds = [tvm.nd.array(data_npy, ctx) for data_npy in data_npys]
out_nds = [tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=tensor_l[0].dtype) for out_npy in out_npys_split]
foo(*(data_nds + out_nds))
for out_nd, out_npy in zip(out_nds, out_npys_split):
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_concatenate_broadcast(shapes, axis, rhs_shape):
B = tvm.placeholder(shape=rhs_shape, name="B")
tensor_l = []
for i, shape in enumerate(shapes):
tensor_l.append(tvm.placeholder(shape, name="A" + str(i)))
out_tensor = topi.cpp.concatenate(tensor_l, axis)
C = out_tensor + B
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [C])
else:
s = topi.cpp.cuda.schedule_injective(target, [C])
ctx = tvm.context(device, 0)
foo = tvm.build(s, tensor_l + [B, C], device, name="broadcast_binary_add")
data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes]
lhs_npy = np.concatenate(data_npys, axis=axis)
rhs_npy = np.random.uniform(size=rhs_shape).astype(B.dtype)
out_npy = lhs_npy + rhs_npy
data_nds = [tvm.nd.array(data_npy, ctx) for data_npy in data_npys]
rhs_nd = tvm.nd.array(rhs_npy, ctx)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), ctx)
for _ in range(1):
foo(*(data_nds + [rhs_nd] + [out_nd]))
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy, rtol=1E-4, atol=1E-4)
for device in ["llvm", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def test_expand_dims():
verify_expand_dims((3, 10), (3, 10, 1, 1), 2, 2)
verify_expand_dims((3, 10), (1, 3, 10), -3, 1)
def test_tranpose():
verify_tranpose((3, 10, 2), (1, 0, 2))
verify_tranpose((3, 10, 5), (2, 0, 1))
verify_tranpose((3, 10), None)
verify_tranpose((3, 10, 5), (2, -3, 1))
def test_reshape():
verify_reshape((1, 2, 3, 4), (2, 3, 4))
verify_reshape((4, 2, 3, 4), (2, 4, 12))
verify_reshape((4, 2, 3, 4), (2, 48))
verify_reshape((16, ), (2, 2, 2, 2))
def test_squeeze():
verify_squeeze((1, 2, 3, 4), 0)
verify_squeeze((1, 2, 1, 4), None)
verify_squeeze((1, 1, 1, 4), (1, 2))
verify_squeeze((1, 1, 1, 1), None)
def test_concatenate():
verify_concatenate([(2,), (2,), (2,)], 0)
verify_concatenate([(2, 3, 4), (2, 2, 4), (2, 5, 4)], 1)
verify_concatenate([(1, 2, 4), (1, 2, 3), (1, 2, 7), (1, 2, 8), (1, 2, 1)], -1)
verify_concatenate([(5, 6, 7, 3),
(16, 6, 7, 3),
(12, 6, 7, 3),
(8, 6, 7, 3),
(2, 6, 7, 3)], 0)
def test_split():
verify_split((2, 12, 3), 3, 1)
verify_split((2, 12, 3), 3, -1)
verify_split((2, 12, 3), [2, 4], 1)
verify_split((10, 12, 24), [5, 7, 9], -1)
def test_take():
verify_take((4,), [1])
verify_take((4,), [[0,1,2,3]])
verify_take((3,3,3), [[11,25]])
verify_take((4,), [[0,1],[2,3]])
verify_take((4,), [1], 0)
verify_take((2,2), [[[1,0],[0,1]]], 0)
verify_take((2,2), [[[1,0],[0,1]]], 1)
verify_take((4,3,5,6), [[2,1,0,0]], -2)
def test_where():
shape = (10, 3, 7, 13)
condition = np.random.uniform(low=-1, high=1, | |
<reponame>mdnls/tramp<filename>tramp/channels/linear/conv_channel.py
import numpy as np
from numpy.fft import fftn, ifftn
from ..base_channel import Channel
from tramp.utils.conv_filters import (
gaussian_filter, differential_filter, laplacian_filter
)
from tramp.channels import LinearChannel
from tramp.utils.misc import complex2array, array2complex
import logging
logger = logging.getLogger(__name__)
class ConvChannel(Channel):
"""Conv (complex or real) channel x = w * z.
Parameters
----------
- filter: real or complex array
Filter weights. The conv weights w are given by w[u] = f*[-u].
The conv and filter weights ffts are conjugate.
- real: bool
if True assume x, w, z real
if False assume x, w, z complex
Notes
-----
For message passing it is more convenient to represent a complex array x
as a real array X where X[0] = x.real and X[1] = x.imag
In particular when real=False (x, w, z complex):
- input of sample(): Z real array of shape (2, z.shape)
- output of sample(): X real array of shape (2, x.shape)
- message bz, posterior rz: real arrays of shape (2, z.shape)
- message bx, posterior rx: real arrays of shape (2, x.shape)
"""
def __init__(self, filter, real=True):
self.shape = filter.shape
self.real = real
self.repr_init()
self.filter = filter
# conv weights and filter ffts are conjugate
self.w_fft_bar = fftn(filter)
self.w_fft = np.conjugate(self.w_fft_bar)
self.spectrum = np.absolute(self.w_fft)**2
def convolve(self, z):
"We assume x,z,w complex for complex fft or x,w,z real for real fft"
z_fft = fftn(z)
x_fft = self.w_fft * z_fft
x = ifftn(x_fft)
if self.real:
x = np.real(x)
return x
def sample(self, Z):
"When real=False we assume Z[0] = Z.real and Z[1] = Z.imag"
if not self.real:
Z = array2complex(Z)
X = self.convolve(Z)
if not self.real:
X = complex2array(X)
return X
def math(self):
return r"$\ast$"
def second_moment(self, tau_z):
return tau_z * self.spectrum.mean()
def compute_n_eff(self, az, ax):
"Effective number of parameters = overlap in z"
if ax == 0:
logger.info(f"ax=0 in {self} compute_n_eff")
return 0.
if az / ax == 0:
logger.info(f"az/ax=0 in {self} compute_n_eff")
return 1.
n_eff = np.mean(self.spectrum / (az / ax + self.spectrum))
return n_eff
def compute_backward_mean(self, az, bz, ax, bx, return_fft=False):
# estimate z from x = Wz
if not self.real:
bz = array2complex(bz)
bx = array2complex(bx)
bx_fft = fftn(bx)
bz_fft = fftn(bz)
resolvent = 1 / (az + ax * self.spectrum)
rz_fft = resolvent * (bz_fft + self.w_fft_bar * bx_fft)
if return_fft:
return rz_fft
rz = ifftn(rz_fft)
if self.real:
rz = np.real(rz)
else:
rz = complex2array(rz)
return rz
def compute_forward_mean(self, az, bz, ax, bx):
# estimate x from x = Wz we have rx = W rz
rz_fft = self.compute_backward_mean(az, bz, ax, bx, return_fft=True)
rx_fft = self.w_fft * rz_fft
rx = ifftn(rx_fft)
if self.real:
rx = np.real(rx)
else:
rx = complex2array(rx)
return rx
def compute_backward_variance(self, az, ax):
assert az > 0
n_eff = self.compute_n_eff(az, ax)
vz = (1 - n_eff) / az
return vz
def compute_forward_variance(self, az, ax):
if ax == 0:
s_mean = np.mean(self.spectrum)
return s_mean / az
n_eff = self.compute_n_eff(az, ax)
vx = n_eff / ax
return vx
def compute_backward_posterior(self, az, bz, ax, bx):
# estimate z from x = Wz
rz = self.compute_backward_mean(az, bz, ax, bx)
vz = self.compute_backward_variance(az, ax)
return rz, vz
def compute_forward_posterior(self, az, bz, ax, bx):
# estimate x from x = Wz
rx = self.compute_forward_mean(az, bz, ax, bx)
vx = self.compute_forward_variance(az, ax)
return rx, vx
def compute_backward_error(self, az, ax, tau_z):
vz = self.compute_backward_variance(az, ax)
return vz
def compute_forward_error(self, az, ax, tau_z):
vx = self.compute_forward_variance(az, ax)
return vx
def compute_log_partition(self, az, bz, ax, bx):
rz = self.compute_backward_mean(az, bz, ax, bx)
rx = self.compute_forward_mean(az, bz, ax, bx)
a = az + ax * self.spectrum
coef = 0.5 if self.real else 1
logZ = (
0.5 * np.sum(bz * rz) + 0.5 * np.sum(bx*rx) +
coef * np.sum(np.log(2 * np.pi / a))
)
return logZ
def compute_mutual_information(self, az, ax, tau_z):
I = 0.5*np.log((az + ax * self.spectrum)*tau_z)
I = I.mean()
return I
def compute_free_energy(self, az, ax, tau_z):
tau_x = self.second_moment(tau_z)
I = self.compute_mutual_information(az, ax, tau_z)
A = 0.5*(az*tau_z + ax*tau_x) - I + 0.5*np.log(2*np.pi*tau_z/np.e)
return A
class DifferentialChannel(ConvChannel):
def __init__(self, D1, D2, shape, real=True):
self.D1 = D1
self.D2 = D2
self.repr_init()
f = differential_filter(shape=shape, D1=D1, D2=D2)
super().__init__(filter=f, real=real)
def math(self):
return r"$\partial$"
class LaplacianChannel(ConvChannel):
def __init__(self, shape, real=True):
self.repr_init()
f = laplacian_filter(shape)
super().__init__(filter=f, real=real)
def math(self):
return r"$\Delta$"
class Blur1DChannel(ConvChannel):
def __init__(self, sigma, N, real=True):
self.sigma = sigma
self.repr_init()
f = gaussian_filter(sigma=sigma, N=N)
super().__init__(filter=f, real=real)
class Blur2DChannel(ConvChannel):
def __init__(self, sigma, shape, real=True):
if len(sigma) != 2:
raise ValueError("sigma must be a length 2 array")
if len(shape) != 2:
raise ValueError("shape must be a length 2 tuple")
self.sigma = sigma
self.repr_init()
f0 = gaussian_filter(sigma=sigma[0], N=shape[0])
f1 = gaussian_filter(sigma=sigma[1], N=shape[1])
f = np.outer(f0, f1)
super().__init__(filter=f, real=real)
class MultiConvChannel(LinearChannel):
"""
Convolution channel for (real valued) multi-color data.
Inputs and outputs x = [x1 ... xn] and z = [z1 ... zm] have block structure with n, m blocks
respectively. If we define the product of two blocks as their convolution xi * zj, then
this layer implements a dense matrix product of blocks:
[x1 x2 ... xm] = [ w11 * z1 + w12 * z2 + ... + w1n * zn,
w21 * z1 + ... + w2n * zn,
...
wm1 * z1 + ... + wmn * zn]
Each block xi or zj is called a 'color' in reference to color channels of RGB images. In
other sources, 'colors' may be called 'channels' instead.
Parameters
---------
- filters: real or complex array
Filter weights of dimensions (m, n, r1, ..., rk).
- block_shape: Dimensions of each block. All blocks are assumed to be (d1, ..., dk) dimensional
tensors.
Notes
-----
This layer implements the common operation of neural networks called 'convolution'
even though, technically, it is a cross correlation.
"""
def __init__(self, filter, block_shape, name="M"):
# This class uses LinearChannel method implementations but will completely overwrite its fields.
super().__init__(W = np.eye(2), precompute_svd=False, name=name)
del self.name, self.Nx, self.Nz, self.precompute_svd, self.W, \
self.rank, self.alpha, self.C, self.spectrum, self.singular
self.name = name
m, n = filter.shape[:2]
filter_shape = filter.shape[2:]
self.macro_shape = (m, n)
self.block_shape = block_shape
self.block_order = len(block_shape)
self.filter_shape = filter_shape
self.Nz = n * np.prod(block_shape)
self.Nx = m * np.prod(block_shape)
self.repr_init()
if(self.block_order > 24):
raise ValueError(f"Input data blocks have tensor order {self.block_order} > 24 which will \
break einstein sums used in this implementation.")
self.filter = filter
U, S, V = self._svd(filter)
self.U = U
self.S = S
self.V = V
# TODO: this code does not currently match dfns of what singular, spectrum should be.
self.singular = np.zeros(block_shape + (m,))
self.spectrum = np.zeros(block_shape + (n,))
self.singular[tuple(slice(0, k) for k in self.S.shape)] = S**2
self.spectrum[tuple(slice(0, k) for k in self.S.shape)] = S**2
self.alpha = self.Nx / self.Nz
self.rank = np.sum(self.S != 0)
def sample(self, Z):
X = self.at(Z)
return X
def second_moment(self, tau_z):
return tau_z * self.spectrum.sum() / self.Nx
def compute_backward_mean(self, az, bz, ax, bx):
bx_svd = self.U.T(bx)
bz_svd = self.V.T(bz)
resolvent = 1/(az + ax * self.spectrum)
rz_svd = resolvent * (bz_svd + self._scale(bx_svd, transpose=True))
rz = self.V(rz_svd)
return rz
def compute_forward_mean(self, az, bz, ax, bx):
rz = self.compute_backward_mean(az, bz, ax, bx)
rx = self.at(rz)
return rx
def compute_forward_variance(self, az, ax):
if ax == 0:
s_mean = np.mean(self.singular[self.singular > 0])
return s_mean * self.rank / (self.Nx * az)
n_eff = self.compute_n_eff(az, ax)
vx = n_eff / (self.alpha * ax)
return vx
def compute_log_partition(self, az, bz, ax, bx):
rz = self.compute_backward_mean(az, bz, ax, bx)
b = bz + self.T(bx)
a = az + ax * self.spectrum
logZ = 0.5 * np.sum(b * rz) + 0.5 * np.sum(np.log(2 * np.pi / a))
return logZ
def compute_free_energy(self, az, ax, tau_z):
tau_x = self.second_moment(tau_z)
I = self.compute_mutual_information(az, ax, tau_z)
A = 0.5*(az*tau_z + self.alpha*ax*tau_x) - I + 0.5*np.log(2*np.pi*tau_z/np.e)
return A
def at(self, z):
''' Right multiply z by this MCC matrix. '''
return self.U(self._scale(self.V.T(z)))
def __tmatmul__(self, z):
return self.V(self._scale(self.U.T(z), transpose=True))
# TODO: add a @property which returns a transposed version of this array, | |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License 2.0;
# This module is used for version 2 of the Google Data APIs.
"""Provides classes and constants for the XML in the Google Spreadsheets API.
Documentation for the raw XML which these classes represent can be found here:
http://code.google.com/apis/spreadsheets/docs/3.0/reference.html#Elements
"""
# __author__ = '<EMAIL> (<NAME>)'
import atom.core
import gdata.data
GS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s'
GSX_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006/extended'
INSERT_MODE = 'insert'
OVERWRITE_MODE = 'overwrite'
WORKSHEETS_REL = 'http://schemas.google.com/spreadsheets/2006#worksheetsfeed'
BATCH_POST_ID_TEMPLATE = ('https://spreadsheets.google.com/feeds/cells'
'/%s/%s/private/full')
BATCH_ENTRY_ID_TEMPLATE = '%s/R%sC%s'
BATCH_EDIT_LINK_TEMPLATE = '%s/batch'
class Error(Exception):
pass
class FieldMissing(Exception):
pass
class HeaderNotSet(Error):
"""The desired column header had no value for the row in the list feed."""
class Cell(atom.core.XmlElement):
"""The gs:cell element.
A cell in the worksheet. The <gs:cell> element can appear only as a child
of <atom:entry>.
"""
_qname = GS_TEMPLATE % 'cell'
col = 'col'
input_value = 'inputValue'
numeric_value = 'numericValue'
row = 'row'
class ColCount(atom.core.XmlElement):
"""The gs:colCount element.
Indicates the number of columns in the worksheet, including columns that
contain only empty cells. The <gs:colCount> element can appear as a child
of <atom:entry> or <atom:feed>
"""
_qname = GS_TEMPLATE % 'colCount'
class Field(atom.core.XmlElement):
"""The gs:field element.
A field single cell within a record. Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'field'
index = 'index'
name = 'name'
class Column(Field):
"""The gs:column element."""
_qname = GS_TEMPLATE % 'column'
class Data(atom.core.XmlElement):
"""The gs:data element.
A data region of a table. Contained in an <atom:entry> element.
"""
_qname = GS_TEMPLATE % 'data'
column = [Column]
insertion_mode = 'insertionMode'
num_rows = 'numRows'
start_row = 'startRow'
class Header(atom.core.XmlElement):
"""The gs:header element.
Indicates which row is the header row. Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'header'
row = 'row'
class RowCount(atom.core.XmlElement):
"""The gs:rowCount element.
Indicates the number of total rows in the worksheet, including rows that
contain only empty cells. The <gs:rowCount> element can appear as a
child of <atom:entry> or <atom:feed>.
"""
_qname = GS_TEMPLATE % 'rowCount'
class Worksheet(atom.core.XmlElement):
"""The gs:worksheet element.
The worksheet where the table lives.Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'worksheet'
name = 'name'
class Spreadsheet(gdata.data.GDEntry):
"""An Atom entry which represents a Google Spreadsheet."""
def find_worksheets_feed(self):
return self.find_url(WORKSHEETS_REL)
FindWorksheetsFeed = find_worksheets_feed
def get_spreadsheet_key(self):
"""Extracts the spreadsheet key unique to this spreadsheet."""
return self.get_id().split('/')[-1]
GetSpreadsheetKey = get_spreadsheet_key
class SpreadsheetsFeed(gdata.data.GDFeed):
"""An Atom feed listing a user's Google Spreadsheets."""
entry = [Spreadsheet]
class WorksheetEntry(gdata.data.GDEntry):
"""An Atom entry representing a single worksheet in a spreadsheet."""
row_count = RowCount
col_count = ColCount
def get_worksheet_id(self):
"""The worksheet ID identifies this worksheet in its spreadsheet."""
return self.get_id().split('/')[-1]
GetWorksheetId = get_worksheet_id
class WorksheetsFeed(gdata.data.GDFeed):
"""A feed containing the worksheets in a single spreadsheet."""
entry = [WorksheetEntry]
class Table(gdata.data.GDEntry):
"""An Atom entry that represents a subsection of a worksheet.
A table allows you to treat part or all of a worksheet somewhat like a
table in a database that is, as a set of structured data items. Tables
don't exist until you explicitly create them before you can use a table
feed, you have to explicitly define where the table data comes from.
"""
data = Data
header = Header
worksheet = Worksheet
def get_table_id(self):
if self.id.text:
return self.id.text.split('/')[-1]
return None
GetTableId = get_table_id
class TablesFeed(gdata.data.GDFeed):
"""An Atom feed containing the tables defined within a worksheet."""
entry = [Table]
class Record(gdata.data.GDEntry):
"""An Atom entry representing a single record in a table.
Note that the order of items in each record is the same as the order of
columns in the table definition, which may not match the order of
columns in the GUI.
"""
field = [Field]
def value_for_index(self, column_index):
for field in self.field:
if field.index == column_index:
return field.text
raise FieldMissing('There is no field for %s' % column_index)
ValueForIndex = value_for_index
def value_for_name(self, name):
for field in self.field:
if field.name == name:
return field.text
raise FieldMissing('There is no field for %s' % name)
ValueForName = value_for_name
def get_record_id(self):
if self.id.text:
return self.id.text.split('/')[-1]
return None
class RecordsFeed(gdata.data.GDFeed):
"""An Atom feed containing the individuals records in a table."""
entry = [Record]
class ListRow(atom.core.XmlElement):
"""A gsx column value within a row.
The local tag in the _qname is blank and must be set to the column
name. For example, when adding to a ListEntry, do:
col_value = ListRow(text='something')
col_value._qname = col_value._qname % 'mycolumnname'
"""
_qname = '{http://schemas.google.com/spreadsheets/2006/extended}%s'
class ListEntry(gdata.data.GDEntry):
"""An Atom entry representing a worksheet row in the list feed.
The values for a particular column can be get and set using
x.get_value('columnheader') and x.set_value('columnheader', 'value').
See also the explanation of column names in the ListFeed class.
"""
def get_value(self, column_name):
"""Returns the displayed text for the desired column in this row.
The formula or input which generated the displayed value is not accessible
through the list feed, to see the user's input, use the cells feed.
If a column is not present in this spreadsheet, or there is no value
for a column in this row, this method will return None.
"""
values = self.get_elements(column_name, GSX_NAMESPACE)
if len(values) == 0:
return None
return values[0].text
def set_value(self, column_name, value):
"""Changes the value of cell in this row under the desired column name.
Warning: if the cell contained a formula, it will be wiped out by setting
the value using the list feed since the list feed only works with
displayed values.
No client side checking is performed on the column_name, you need to
ensure that the column_name is the local tag name in the gsx tag for the
column. For example, the column_name will not contain special characters,
spaces, uppercase letters, etc.
"""
# Try to find the column in this row to change an existing value.
values = self.get_elements(column_name, GSX_NAMESPACE)
if len(values) > 0:
values[0].text = value
else:
# There is no value in this row for the desired column, so add a new
# gsx:column_name element.
new_value = ListRow(text=value)
new_value._qname = new_value._qname % (column_name,)
self._other_elements.append(new_value)
def to_dict(self):
"""Converts this row to a mapping of column names to their values."""
result = {}
values = self.get_elements(namespace=GSX_NAMESPACE)
for item in values:
result[item._get_tag()] = item.text
return result
def from_dict(self, values):
"""Sets values for this row from the dictionary.
Old values which are already in the entry will not be removed unless
they are overwritten with new values from the dict.
"""
for column, value in values.items():
self.set_value(column, value)
class ListsFeed(gdata.data.GDFeed):
"""An Atom feed in which each entry represents a row in a worksheet.
The first row in the worksheet is used as the column names for the values
in each row. If a header cell is empty, then a unique column ID is used
for the gsx element name.
Spaces in a column name are removed from the name of the corresponding
gsx element.
Caution: The columnNames are case-insensitive. For example, if you see
a <gsx:e-mail> element in a feed, you can't know whether the column
heading in the original worksheet was "e-mail" or "E-Mail".
Note: If two or more columns have the same name, then subsequent columns
of the same name have _n appended to the columnName. For example, if the
first column name is "e-mail", followed by columns named "E-Mail" and
"E-mail", then the columnNames will be gsx:e-mail, gsx:e-mail_2, and
gsx:e-mail_3 respectively.
"""
entry = [ListEntry]
class CellEntry(gdata.data.BatchEntry):
"""An Atom entry representing a single cell in a worksheet."""
cell = Cell
class CellsFeed(gdata.data.BatchFeed):
"""An Atom feed contains one entry per cell in a worksheet.
The cell feed supports batch operations, you can send multiple cell
operations in one HTTP request.
"""
entry = [CellEntry]
def add_set_cell(self, row, col, input_value):
"""Adds a request to change the contents of a cell to this batch request.
Args:
row: int, The row number for this cell. Numbering starts at 1.
col: int, The column number for this cell. Starts at 1.
input_value: str, The desired formula/content this cell should contain.
"""
self.add_update(CellEntry(
id=atom.data.Id(text=BATCH_ENTRY_ID_TEMPLATE % (
self.id.text, row, col)),
cell=Cell(col=str(col), row=str(row), input_value=input_value)))
return self
AddSetCell = add_set_cell
def build_batch_cells_update(spreadsheet_key, worksheet_id):
"""Creates an empty cells feed for adding batch cell updates to.
Call batch_set_cell on the resulting | |
<filename>src/mlsquare/adapters/sklearn.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
import ray
from ray import tune
from ..optmizers import get_best_model
from ..utils.functions import _parse_params
import pickle
import onnxmltools
import numpy as np
import matplotlib.pyplot as plt
import time
import keras.backend as K
import warnings
warnings.filterwarnings("ignore")
class IrtKerasRegressor():
"""
Adapter to connect Irt Rasch One Parameter, Two parameter model and Birnbaum's Three Parameter model with keras models.
This class is used as an adapter for a IRT signature model that initilalises with similar parameters along the line of R's
Rasch and tpm(3-PL) models from ltm package that are as proxy models using keras.
Parameters
----------
proxy_model : proxy model instance
The proxy model passed from dope.
primal_model : primal model instance
The primal model passed from dope.
params : dict, optional
Additional model params passed by the user.
Methods
-------
fit(X_users, X_questions, y)
Method to train a transpiled model.
plot()
Method to plot model's train-validation loss.
coefficients()
Method to output model coefficients -- Difficulty level,
Discrimination parameter, Guessing params
save(filename)
Method to save a trained model. This method saves
the models in three formals -- pickle, h5 and onnx.
Expects 'filename' as a string.
score(X, y)
Method to score a trained model.
predict(X)
This method returns the predicted values for a
trained model.
explain()
Method to provide model interpretations(Yet to be implemented)
"""
def __init__(self, proxy_model, primal_model, **kwargs):
kwargs.setdefault('params', None)
self.primal_model = primal_model
self.proxy_model = proxy_model
self.proxy_model.primal = self.primal_model
self.params = kwargs['params']
def fit(self, x_user, x_questions, y_vals, **kwargs):
kwargs.setdefault('latent_traits', None)
kwargs.setdefault('batch_size', 16)
kwargs.setdefault('epochs', 64)
kwargs.setdefault('validation_split', 0.2)
kwargs.setdefault('params', self.params)
self.proxy_model.l_traits = kwargs['latent_traits']
self.proxy_model.x_train_user = x_user
self.proxy_model.x_train_questions = x_questions
self.proxy_model.y_ = y_vals
self.l_traits = kwargs['latent_traits']
# affirming if params are given in either of(init or fit) methods
self.params = self.params or kwargs['params']
if self.params != None: # Validate implementation with different types of tune input
if not isinstance(self.params, dict):
raise TypeError("Params should be of type 'dict'")
self.params = _parse_params(self.params, return_as='flat')
self.proxy_model.update_params(self.params)
# triggers for fourPL model
if self.proxy_model.name is 'tpm' and 'slip_params' in self.params and 'train' in self.params['slip_params'].keys():
if self.params['slip_params']['train']:
self.proxy_model.name = 'fourPL'
ray_verbose = False
_ray_log_level = logging.INFO if ray_verbose else logging.ERROR
ray.init(log_to_driver=False, logging_level=_ray_log_level, ignore_reinit_error=True, redis_max_memory=20*1000*1000*1000, object_store_memory=1000000000,
num_cpus=4)
def train_model(config, reporter):
self.proxy_model.set_params(params=config, set_by='optimizer')
print('\nIntitializing fit for {} model. . .\nBatch_size: {}; epochs: {};'.format(
self.proxy_model.name, kwargs['batch_size'], kwargs['epochs']))
model = self.proxy_model.create_model()
self.history = model.fit(x=[x_user, x_questions], y=y_vals, batch_size=kwargs['batch_size'],
epochs=kwargs['epochs'], verbose=0, validation_split=kwargs['validation_split'])
_, mae, accuracy = model.evaluate(
x=[x_user, x_questions], y=y_vals) # [1]
last_checkpoint = "weights_tune_{}.h5".format(
list(zip(np.random.choice(10, len(config), replace=False), config)))
model.save_weights(last_checkpoint)
reporter(mean_error=mae, mean_accuracy=accuracy,
checkpoint=last_checkpoint)
t1 = time.time()
configuration = tune.Experiment("experiment_name",
run=train_model,
resources_per_trial={"cpu": 4},
stop={"mean_error": 0.15,
"mean_accuracy": 95},
config=self.proxy_model.get_params())
trials = tune.run_experiments(configuration, verbose=0)
self.trials = trials
metric = "mean_error" # "mean_accuracy"
# Restore a model from the best trial.
def get_sorted_trials(trial_list, metric):
return sorted(trial_list, key=lambda trial: trial.last_result.get(metric, 0), reverse=True)
sorted_trials = get_sorted_trials(trials, metric)
for best_trial in sorted_trials:
try:
print("Creating model...")
self.proxy_model.set_params(
params=best_trial.config, set_by='optimizer')
best_model = self.proxy_model.create_model()
weights = os.path.join(
best_trial.logdir, best_trial.last_result["checkpoint"])
print("Loading from", weights)
# TODO Validate this loaded model.
best_model.load_weights(weights)
break
except Exception as e:
print(e)
print("Loading failed. Trying next model")
exe_time = time.time()-t1
self.model = best_model
#self.model = model
#print('\nIntitializing fit for {} model. . .\nBatch_size: {}; epochs: {};'.format(self.proxy_model.name, kwargs['batch_size'], kwargs['epochs']))
#model = self.proxy_model.create_model()
#t1= time.time()
# self.history= model.fit(x=[x_user, x_questions], y=y_vals, batch_size=kwargs['batch_size'], epochs=kwargs['epochs'], verbose=0, validation_split=kwargs['validation_split'])#, callbacks= kwargs['callbacks'])#added callbacks
#exe_time = time.time()-t1
#
#self.model = model
# Following lets user access each coeffs as and when required
self.difficulty = self.coefficients()['difficulty_level']
self.discrimination = self.coefficients()['disc_param']
self.guessing = self.coefficients()['guessing_param']
self.slip = self.coefficients()['slip_param']
num_trainables = np.sum([K.count_params(layer)
for layer in self.model.trainable_weights])
sample_size = y_vals.shape[0]
log_lik, _, _ = self.model.evaluate(x=[x_user, x_questions], y=y_vals)
self.AIC = 2*num_trainables - 2*np.log(log_lik)
self.AICc = self.AIC + (2*np.square(num_trainables) +
2*num_trainables)/(sample_size - num_trainables - 1)
print('\nTraining on : {} samples for : {} epochs has completed in : {} seconds.'.format(
self.proxy_model.x_train_user.shape[0], kwargs['epochs'], np.round(exe_time, decimals=3)))
print('\nAIC value: {} and AICc value: {}'.format(
np.round(self.AIC, 3), np.round(self.AICc, 3)))
print('\nUse `object.plot()` to view train/validation loss curves;\nUse `object.history` to obtain train/validation loss across all the epochs.\nUse `object.coefficients()` to obtain model parameters--Question difficulty, discrimination, guessing & slip')
print('Use `object.AIC` & `object.AIC` to obtain Akaike Information Criterion(AIC & AICc) values.')
return self
def plot(self):
plt.plot(self.history.history['loss'])
plt.plot(self.history.history['val_loss'])
plt.title('Model loss for "{} model" '.format(self.proxy_model.name))
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['train', 'validation'], loc='upper right')
return plt.show()
def coefficients(self):
rel_layers_idx = list()
for idx, layer in enumerate(self.model.layers):
if layer.name in ['latent_trait/ability', 'difficulty_level', 'disc_param', 'guessing_param', 'slip_param']:
rel_layers_idx.append(idx)
coef = {self.model.layers[idx].name: self.model.layers[idx].get_weights()[
0] for idx in rel_layers_idx}
t_4PL = {'tpm': ['guessing_param'], 'fourPL': [
'guessing_param', 'slip_param']}
if self.proxy_model.name in t_4PL.keys(): # reporting guess & slip
for layer in t_4PL[self.proxy_model.name]:
coef.update(
{layer: np.exp(coef[layer])/(1 + np.exp(coef[layer]))})
coef.update({'disc_param': np.exp(coef['disc_param'])})
# if not self.proxy_model.name=='tpm':#for 1PL & 2PL
# coef.update({'disc_param':np.exp(coef['disc_param'])})
# else:
# coef.update({'guessing_param':np.exp(coef['guessing_param'])/(1+ np.exp(coef['guessing_param']))})
# coef.update({'disc_param':np.exp(coef['disc_param'])})
return coef
def predict(self, x_user, x_questions):
if len(x_user.shape) != len(self.proxy_model.x_train_user.shape) or len(x_questions.shape) != len(self.proxy_model.x_train_user.shape):
raise ValueError("While checking User/Question input shape, Expected users to have shape(None,{}) and questions to have shape(None,{})".format(
self.proxy_model.x_train_user.shape[1], self.proxy_model.x_train_questions.shape[1]))
if x_user.shape[1] != self.proxy_model.x_train_user.shape[1] or x_questions.shape[1] != self.proxy_model.x_train_questions.shape[1]:
raise ValueError("User/Question seem to be an anomaly to current training dataset; Expected Users to have shape(None,{}) and Questions to have shape(None,{})".format(
self.proxy_model.x_train_user.shape[1], self.proxy_model.x_train_questions.shape[1]))
pred = self.model.predict([x_user, x_questions])
return pred
class SklearnTfTransformer():
"""
Adapter to connect sklearn decomposition methods to respective TF implementations.
This class can be used as an adapter for primal decomposition methods that can
utilise TF backend for proxy model.
Parameters
----------
proxy_model : proxy model instance
The proxy model passed from dope.
primal_model : primal model instance
The primal model passed from dope.
params : dict, optional
Additional model params passed by the user.
Methods
-------
fit(X, y)
Method to train a transpiled model
transform(X)
Method to transform the input matrix to truncated dimensions;
Only once the decomposed values are computed.
fit_transform(X)
Method to right away transform the input matrix to truncated dimensions.
inverse_transform(X)
This method returns Original values from the resulting decomposed matrices.
"""
def __init__(self, proxy_model, primal_model, **kwargs):
self.primal_model = primal_model
self.proxy_model = proxy_model
self.proxy_model.primal = self.primal_model
self.params = None
def fit(self, X, y=None, **kwargs):
self.proxy_model.X = X
self.proxy_model.y = y
if self.params != None: # Validate implementation with different types of tune input
if not isinstance(self.params, dict):
raise TypeError("Params should be of type 'dict'")
self.params = _parse_params(self.params, return_as='flat')
self.proxy_model.update_params(self.params)
self.fit_transform(X)
# self.proxy_model.fit(X)
#self.params = self.proxy_model.get_params()
# to avoid calling model.fit(X).proxy_model for sigma & Vh
#self.components_= self.params['components_']
#self.singular_values_= self.params['singular_values_']
return self
def transform(self, X):
return self.proxy_model.transform(X)
def fit_transform(self, X, y=None):
x_transformed = self.proxy_model.fit_transform(X)
self.params = self.proxy_model.get_params()
# to avoid calling model.fit(X).proxy_model for sigma & Vh
self.components_ = self.params['components_']
self.singular_values_ = self.params['singular_values_']
return x_transformed
def inverse_transform(self, X):
return self.proxy_model.inverse_transform(X)
class SklearnKerasClassifier():
"""
Adapter to connect sklearn classifier algorithms with keras models.
This class can be used as an adapter for any primal classifier that relies
on keras as the backend for proxy model.
Parameters
----------
proxy_model : proxy model instance
The proxy model passed from dope.
primal_model : primal model instance
The primal model passed from dope.
params : dict, optional
Additional model params passed by the user.
Methods
-------
fit(X, y)
Method to train a transpiled model
save(filename)
Method to save a trained model. This method saves
the models in three formals -- pickle, h5 and onnx.
Expects 'filename' as a string.
score(X, y)
Method to score a trained model.
predict(X)
This method returns the predicted values for a
trained model.
explain()
Method to provide model interpretations(Yet to be implemented)
"""
def __init__(self, proxy_model, primal_model, **kwargs):
self.primal_model = primal_model
self.params = None # Temporary!
self.proxy_model = proxy_model
def fit(self, X, y, **kwargs):
kwargs.setdefault('cuts_per_feature', None) # Better way to handle?
# For all models?
self.proxy_model.cuts_per_feature = kwargs['cuts_per_feature']
kwargs.setdefault('verbose', 0)
kwargs.setdefault('params', self.params)
kwargs.setdefault('space', False)
kwargs.setdefault('epochs', 250)
kwargs.setdefault('batch_size', 30)
self.params = kwargs['params']
X = np.array(X)
y = np.array(y)
primal_model = self.primal_model
primal_model.fit(X, y)
y_pred = primal_model.predict(X)
X, y, y_pred = self.proxy_model.transform_data(X, y, y_pred)
# This should happen only after transformation.
self.proxy_model.X = X # abstract -> model_skeleton
self.proxy_model.y = y
self.proxy_model.primal = self.primal_model
if self.params != None: # Validate implementation with different types of tune input
if not isinstance(self.params, dict):
raise | |
<filename>toontown/ai/ToontownAIRepository.py
import time
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.PyDatagram import *
from panda3d.core import *
from panda3d.toontown import *
from otp.ai.AIZoneData import AIZoneDataStore
from otp.ai.TimeManagerAI import TimeManagerAI
from otp.distributed.OtpDoGlobals import *
from otp.friends.FriendManagerAI import FriendManagerAI
from otp.otpbase import OTPGlobals
from toontown.ai.DistributedPolarPlaceEffectMgrAI import DistributedPolarPlaceEffectMgrAI
from toontown.ai.DistributedResistanceEmoteMgrAI import DistributedResistanceEmoteMgrAI
from toontown.ai.HolidayManagerAI import HolidayManagerAI
from toontown.ai.NewsManagerAI import NewsManagerAI
from toontown.ai.WelcomeValleyManagerAI import WelcomeValleyManagerAI
from toontown.building.DistributedTrophyMgrAI import DistributedTrophyMgrAI
from toontown.catalog.CatalogManagerAI import CatalogManagerAI
from toontown.coghq.CogSuitManagerAI import CogSuitManagerAI
from toontown.coghq.CountryClubManagerAI import CountryClubManagerAI
from toontown.coghq.FactoryManagerAI import FactoryManagerAI
from toontown.coghq.LawOfficeManagerAI import LawOfficeManagerAI
from toontown.coghq.MintManagerAI import MintManagerAI
from toontown.coghq.PromotionManagerAI import PromotionManagerAI
from toontown.distributed.ToontownDistrictAI import ToontownDistrictAI
from toontown.distributed.ToontownDistrictStatsAI import ToontownDistrictStatsAI
from toontown.distributed.ToontownInternalRepository import ToontownInternalRepository
from toontown.estate.EstateManagerAI import EstateManagerAI
from toontown.fishing.FishManagerAI import FishManagerAI
from toontown.hood import ZoneUtil
from toontown.hood.BRHoodDataAI import BRHoodDataAI
from toontown.hood.BossbotHQDataAI import BossbotHQDataAI
from toontown.hood.CSHoodDataAI import CSHoodDataAI
from toontown.hood.CashbotHQDataAI import CashbotHQDataAI
from toontown.hood.DDHoodDataAI import DDHoodDataAI
from toontown.hood.DGHoodDataAI import DGHoodDataAI
from toontown.hood.DLHoodDataAI import DLHoodDataAI
from toontown.hood.GSHoodDataAI import GSHoodDataAI
from toontown.hood.GZHoodDataAI import GZHoodDataAI
from toontown.hood.LawbotHQDataAI import LawbotHQDataAI
from toontown.hood.MMHoodDataAI import MMHoodDataAI
from toontown.hood.OZHoodDataAI import OZHoodDataAI
from toontown.hood.TTHoodDataAI import TTHoodDataAI
from toontown.parties.ToontownTimeManager import ToontownTimeManager
from toontown.pets.PetManagerAI import PetManagerAI
from toontown.quest.QuestManagerAI import QuestManagerAI
from toontown.racing import RaceGlobals
from toontown.racing.DistributedLeaderBoardAI import DistributedLeaderBoardAI
from toontown.racing.DistributedRacePadAI import DistributedRacePadAI
from toontown.racing.DistributedStartingBlockAI import DistributedStartingBlockAI, DistributedViewingBlockAI
from toontown.racing.DistributedViewPadAI import DistributedViewPadAI
from toontown.racing.RaceManagerAI import RaceManagerAI
from toontown.safezone.SafeZoneManagerAI import SafeZoneManagerAI
from toontown.shtiker.CogPageManagerAI import CogPageManagerAI
from toontown.spellbook.TTOffMagicWordManagerAI import TTOffMagicWordManagerAI
from toontown.suit.SuitInvasionManagerAI import SuitInvasionManagerAI
from toontown.toon import NPCToons
from toontown.toonbase import ToontownGlobals, TTLocalizer
from toontown.tutorial.TutorialManagerAI import TutorialManagerAI
from toontown.uberdog.DistributedInGameNewsMgrAI import DistributedInGameNewsMgrAI
from toontown.uberdog.DistributedPartyManagerAI import DistributedPartyManagerAI
class ToontownAIRepository(ToontownInternalRepository):
notify = DirectNotifyGlobal.directNotify.newCategory('ToontownAIRepository')
def __init__(self, baseChannel, serverId, districtName):
ToontownInternalRepository.__init__(self, baseChannel, serverId, dcSuffix='AI')
self.districtName = districtName
self.doLiveUpdates = self.config.GetBool('want-live-updates', True)
self.wantCogdominiums = self.config.GetBool('want-cogdominiums', True)
self.wantEmblems = self.config.GetBool('want-emblems', True)
self.useAllMinigames = self.config.GetBool('want-all-minigames', True)
self.districtId = None
self.district = None
self.districtStats = None
self.timeManager = None
self.newsManager = None
self.holidayManager = None
self.welcomeValleyManager = None
self.catalogManager = None
self.zoneDataStore = None
self.inGameNewsMgr = None
self.trophyMgr = None
self.petMgr = None
self.dnaStoreMap = {}
self.dnaDataMap = {}
self.zoneTable = {}
self.hoods = []
self.buildingManagers = {}
self.suitPlanners = {}
self.suitInvasionManager = None
self.zoneAllocator = None
self.zoneId2owner = {}
self.questManager = None
self.cogPageManager = None
self.fishManager = None
self.factoryMgr = None
self.mintMgr = None
self.lawMgr = None
self.countryClubMgr = None
self.promotionMgr = None
self.cogSuitMgr = None
self.partyManager = None
self.safeZoneManager = None
self.raceMgr = None
self.polarPlaceEffectMgr = None
self.resistanceEmoteMgr = None
self.tutorialManager = None
self.friendManager = None
self.toontownTimeManager = None
self.estateMgr = None
self.magicWordManager = None
self.deliveryManager = None
self.defaultAccessLevel = OTPGlobals.accessLevelValues.get('TTOFF_DEVELOPER')
def getTrackClsends(self):
return False
def handleConnected(self):
ToontownInternalRepository.handleConnected(self)
# Generate our district...
self.districtId = self.allocateChannel()
self.notify.info('Creating district (%d)...' % self.districtId)
self.district = ToontownDistrictAI(self)
self.district.setName(self.districtName)
self.district.generateWithRequiredAndId(self.districtId, self.getGameDoId(), OTP_ZONE_ID_MANAGEMENT)
# Claim ownership of that district...
self.notify.info('Claiming ownership of district (%d)...' % self.districtId)
datagram = PyDatagram()
datagram.addServerHeader(self.districtId, self.ourChannel, STATESERVER_OBJECT_SET_AI)
datagram.addChannel(self.ourChannel)
self.send(datagram)
# Create our local objects.
self.notify.info('Creating local objects...')
self.createLocals()
# Create our global objects.
self.notify.info('Creating global objects...')
self.createGlobals()
# Create our zones.
self.notify.info('Creating zones (Playgrounds and Cog HQs)...')
self.createZones()
# Make our district available, and we're done.
self.notify.info('Making district available...')
self.district.b_setAvailable(1)
self.notify.info('District is now ready. Have fun in Toontown Online!')
def createLocals(self):
"""
Creates "local" objects.
"""
# Create our holiday manager...
self.holidayManager = HolidayManagerAI(self)
# Create our zone data store...
self.zoneDataStore = AIZoneDataStore()
# Create our pet manager...
self.petMgr = PetManagerAI(self)
# Create our suit invasion manager...
self.suitInvasionManager = SuitInvasionManagerAI(self)
# Create our zone allocator...
self.zoneAllocator = UniqueIdAllocator(ToontownGlobals.DynamicZonesBegin, ToontownGlobals.DynamicZonesEnd)
# Create our quest manager...
self.questManager = QuestManagerAI(self)
# Create our Cog page manager...
self.cogPageManager = CogPageManagerAI(self)
# Create our fish manager...
self.fishManager = FishManagerAI(self)
# Create our factory manager...
self.factoryMgr = FactoryManagerAI(self)
# Create our mint manager...
self.mintMgr = MintManagerAI(self)
# Create our law office manager...
self.lawMgr = LawOfficeManagerAI(self)
# Create our country club manager...
self.countryClubMgr = CountryClubManagerAI(self)
# Create our promotion manager...
self.promotionMgr = PromotionManagerAI(self)
# Create our Cog suit manager...
self.cogSuitMgr = CogSuitManagerAI(self)
# Create our race manager...
self.raceMgr = RaceManagerAI(self)
# Create our Toontown time manager...
self.toontownTimeManager = ToontownTimeManager(serverTimeUponLogin=int(time.time()),
globalClockRealTimeUponLogin=globalClock.getRealTime())
def createGlobals(self):
"""
Creates "global" objects.
"""
# Generate our district stats...
districtStatsId = self.allocateChannel()
self.notify.info('Creating district stats AI (%d)...' % districtStatsId)
self.districtStats = ToontownDistrictStatsAI(self)
self.districtStats.settoontownDistrictId(self.districtId)
self.districtStats.generateWithRequiredAndId(districtStatsId, self.getGameDoId(), OTP_ZONE_ID_DISTRICTS)
# Generate our time manager...
self.timeManager = TimeManagerAI(self)
self.timeManager.generateWithRequired(OTP_ZONE_ID_MANAGEMENT)
# Generate our news manager...
self.newsManager = NewsManagerAI(self)
self.newsManager.generateWithRequired(OTP_ZONE_ID_MANAGEMENT)
# Generate our Welcome Valley manager...
self.welcomeValleyManager = WelcomeValleyManagerAI(self)
self.welcomeValleyManager.generateWithRequired(OTP_ZONE_ID_MANAGEMENT)
# Generate our catalog manager...
self.catalogManager = CatalogManagerAI(self)
self.catalogManager.generateWithRequired(OTP_ZONE_ID_MANAGEMENT)
# Generate our in-game news manager...
self.inGameNewsMgr = DistributedInGameNewsMgrAI(self)
self.inGameNewsMgr.setLatestIssueStr('2013-08-22 23:49:46')
self.inGameNewsMgr.generateWithRequired(OTP_ZONE_ID_MANAGEMENT)
# Generate our trophy manager...
self.trophyMgr = DistributedTrophyMgrAI(self)
self.trophyMgr.generateWithRequired(OTP_ZONE_ID_MANAGEMENT)
# Generate our party manager...
self.partyManager = DistributedPartyManagerAI(self)
self.partyManager.generateWithRequired(OTP_ZONE_ID_MANAGEMENT)
# Generate our safezone manager...
self.safeZoneManager = SafeZoneManagerAI(self)
self.safeZoneManager.generateWithRequired(OTP_ZONE_ID_MANAGEMENT)
# Generate our Polar Place effect manager...
self.polarPlaceEffectMgr = DistributedPolarPlaceEffectMgrAI(self)
self.polarPlaceEffectMgr.generateWithRequired(3821)
# Generate our resistance emote manager...
self.resistanceEmoteMgr = DistributedResistanceEmoteMgrAI(self)
self.resistanceEmoteMgr.generateWithRequired(9720)
# Generate our tutorial manager...
self.tutorialManager = TutorialManagerAI(self)
self.tutorialManager.generateWithRequired(OTP_ZONE_ID_MANAGEMENT)
# Generate our friend manager...
self.friendManager = FriendManagerAI(self)
self.friendManager.generateWithRequired(OTP_ZONE_ID_MANAGEMENT)
# Generate our estate manager...
self.estateMgr = EstateManagerAI(self)
self.estateMgr.generateWithRequired(OTP_ZONE_ID_MANAGEMENT)
# Generate our Magic Word manager...
self.magicWordManager = TTOffMagicWordManagerAI(self)
self.magicWordManager.generateWithRequired(OTP_ZONE_ID_MANAGEMENT)
# Generate our delivery manager...
self.deliveryManager = self.generateGlobalObject(OTP_DO_ID_TOONTOWN_DELIVERY_MANAGER,
'DistributedDeliveryManager')
def createHood(self, hoodCtr, zoneId):
# Bossbot HQ doesn't use DNA, so we skip over that.
if zoneId != ToontownGlobals.BossbotHQ:
self.dnaStoreMap[zoneId] = DNAStorage()
self.dnaDataMap[zoneId] = loadDNAFileAI(self.dnaStoreMap[zoneId], self.genDNAFileName(zoneId))
if zoneId in ToontownGlobals.HoodHierarchy:
for streetId in ToontownGlobals.HoodHierarchy[zoneId]:
self.dnaStoreMap[streetId] = DNAStorage()
self.dnaDataMap[streetId] = loadDNAFileAI(self.dnaStoreMap[streetId], self.genDNAFileName(streetId))
hood = hoodCtr(self, zoneId)
hood.startup()
self.hoods.append(hood)
def createZones(self):
# First, generate our zone2NpcDict...
NPCToons.generateZone2NpcDict()
# Toontown Central
self.zoneTable[ToontownGlobals.ToontownCentral] = (
(ToontownGlobals.ToontownCentral, 1, 0), (ToontownGlobals.SillyStreet, 1, 1),
(ToontownGlobals.LoopyLane, 1, 1),
(ToontownGlobals.PunchlinePlace, 1, 1)
)
self.createHood(TTHoodDataAI, ToontownGlobals.ToontownCentral)
# Donald's Dock
self.zoneTable[ToontownGlobals.DonaldsDock] = (
(ToontownGlobals.DonaldsDock, 1, 0), (ToontownGlobals.BarnacleBoulevard, 1, 1),
(ToontownGlobals.SeaweedStreet, 1, 1), (ToontownGlobals.LighthouseLane, 1, 1)
)
self.createHood(DDHoodDataAI, ToontownGlobals.DonaldsDock)
# Daisy Gardens
self.zoneTable[ToontownGlobals.DaisyGardens] = (
(ToontownGlobals.DaisyGardens, 1, 0), (ToontownGlobals.ElmStreet, 1, 1),
(ToontownGlobals.MapleStreet, 1, 1), (ToontownGlobals.OakStreet, 1, 1)
)
self.createHood(DGHoodDataAI, ToontownGlobals.DaisyGardens)
# Minnie's Melodyland
self.zoneTable[ToontownGlobals.MinniesMelodyland] = (
(ToontownGlobals.MinniesMelodyland, 1, 0), (ToontownGlobals.AltoAvenue, 1, 1),
(ToontownGlobals.BaritoneBoulevard, 1, 1), (ToontownGlobals.TenorTerrace, 1, 1)
)
self.createHood(MMHoodDataAI, ToontownGlobals.MinniesMelodyland)
# The Brrrgh
self.zoneTable[ToontownGlobals.TheBrrrgh] = (
(ToontownGlobals.TheBrrrgh, 1, 0), (ToontownGlobals.WalrusWay, 1, 1),
(ToontownGlobals.SleetStreet, 1, 1), (ToontownGlobals.PolarPlace, 1, 1)
)
self.createHood(BRHoodDataAI, ToontownGlobals.TheBrrrgh)
# Donald's Dreamland
self.zoneTable[ToontownGlobals.DonaldsDreamland] = (
(ToontownGlobals.DonaldsDreamland, 1, 0), (ToontownGlobals.LullabyLane, 1, 1),
(ToontownGlobals.PajamaPlace, 1, 1)
)
self.createHood(DLHoodDataAI, ToontownGlobals.DonaldsDreamland)
# Sellbot HQ
self.zoneTable[ToontownGlobals.SellbotHQ] = (
(ToontownGlobals.SellbotHQ, 0, 1), (ToontownGlobals.SellbotFactoryExt, 0, 1)
)
self.createHood(CSHoodDataAI, ToontownGlobals.SellbotHQ)
# Cashbot HQ
self.zoneTable[ToontownGlobals.CashbotHQ] = (
(ToontownGlobals.CashbotHQ, 0, 1),
)
self.createHood(CashbotHQDataAI, ToontownGlobals.CashbotHQ)
# Lawbot HQ
self.zoneTable[ToontownGlobals.LawbotHQ] = (
(ToontownGlobals.LawbotHQ, 0, 1),
)
self.createHood(LawbotHQDataAI, ToontownGlobals.LawbotHQ)
# Bossbot HQ
self.zoneTable[ToontownGlobals.BossbotHQ] = (
(ToontownGlobals.BossbotHQ, 0, 0),
)
self.createHood(BossbotHQDataAI, ToontownGlobals.BossbotHQ)
# Goofy Speedway
self.zoneTable[ToontownGlobals.GoofySpeedway] = (
(ToontownGlobals.GoofySpeedway, 1, 0),
)
self.createHood(GSHoodDataAI, ToontownGlobals.GoofySpeedway)
# Chip 'n Dale's Acorn Acres
self.zoneTable[ToontownGlobals.OutdoorZone] = (
(ToontownGlobals.OutdoorZone, 1, 0),
)
self.createHood(OZHoodDataAI, ToontownGlobals.OutdoorZone)
# Chip 'n Dale's MiniGolf
self.zoneTable[ToontownGlobals.GolfZone] = (
(ToontownGlobals.GolfZone, 1, 0),
)
self.createHood(GZHoodDataAI, ToontownGlobals.GolfZone)
# Welcome Valley hoods (Toontown Central & Goofy Speedway)
self.notify.info('Creating ' + TTLocalizer.WelcomeValley[2] + '...')
self.welcomeValleyManager.createWelcomeValleyHoods()
# Assign the initial suit buildings.
self.notify.info('Assigning initial Cog buildings and Field Offices...')
for suitPlanner in self.suitPlanners.values():
suitPlanner.assignInitialSuitBuildings()
def incrementPopulation(self):
self.districtStats.b_setAvatarCount(self.districtStats.getAvatarCount() + 1)
def decrementPopulation(self):
self.districtStats.b_setAvatarCount(self.districtStats.getAvatarCount() - 1)
def getAvatarExitEvent(self, avId):
return 'distObjDelete-%d' % avId
def getZoneDataStore(self):
return self.zoneDataStore
def genDNAFileName(self, zoneId):
zoneId = ZoneUtil.getCanonicalZoneId(zoneId)
hoodId = ZoneUtil.getCanonicalHoodId(zoneId)
hood = ToontownGlobals.dnaMap[hoodId]
if hoodId == zoneId:
zoneId = 'sz'
phase = ToontownGlobals.phaseMap[hoodId]
else:
phase = ToontownGlobals.streetPhaseMap[hoodId]
if 'outdoor_zone' in hood or 'golf_zone' in hood:
phase = '6'
return 'phase_%s/dna/%s_%s.dna' % (phase, hood, zoneId)
def findFishingPonds(self, dnaData, zoneId, area):
fishingPonds = []
fishingPondGroups = []
if isinstance(dnaData, DNAGroup) and ('fishing_pond' in dnaData.getName()):
fishingPondGroups.append(dnaData)
pond = self.fishManager.generatePond(area, zoneId)
fishingPonds.append(pond)
elif isinstance(dnaData, DNAVisGroup):
zoneId = ZoneUtil.getTrueZoneId(int(dnaData.getName().split(':')[0]), zoneId)
for i in xrange(dnaData.getNumChildren()):
foundFishingPonds, foundFishingPondGroups = self.findFishingPonds(dnaData.at(i), zoneId, area)
fishingPonds.extend(foundFishingPonds)
fishingPondGroups.extend(foundFishingPondGroups)
return fishingPonds, fishingPondGroups
def findFishingSpots(self, dnaData, fishingPond):
fishingSpots = []
if isinstance(dnaData, DNAGroup) and ('fishing_spot' in dnaData.getName()):
spot = self.fishManager.generateSpots(dnaData, fishingPond)
fishingSpots.append(spot)
for i in xrange(dnaData.getNumChildren()):
foundFishingSpots = self.findFishingSpots(dnaData.at(i), fishingPond)
fishingSpots.extend(foundFishingSpots)
return fishingSpots
def findPartyHats(self, dnaData, zoneId):
return []
def loadDNAFileAI(self, dnaStore, dnaFileName):
return loadDNAFileAI(dnaStore, dnaFileName)
def allocateZone(self, owner=None):
zoneId = self.zoneAllocator.allocate()
if owner:
self.zoneId2owner[zoneId] = owner
return zoneId
def deallocateZone(self, zone):
if self.zoneId2owner.get(zone):
del self.zoneId2owner[zone]
self.zoneAllocator.free(zone)
def trueUniqueName(self, idString):
return self.uniqueName(idString)
def findRacingPads(self, dnaData, zoneId, area, type='racing_pad', overrideDNAZone=False):
racingPads, racingPadGroups = [], []
if type in dnaData.getName():
if type == 'racing_pad':
nameSplit = dnaData.getName().split('_')
racePad = DistributedRacePadAI(self)
racePad.setArea(area)
racePad.index = int(nameSplit[2])
racePad.genre = nameSplit[3]
trackInfo = RaceGlobals.getNextRaceInfo(-1, racePad.genre, racePad.index)
racePad.setTrackInfo([trackInfo[0], trackInfo[1]])
racePad.laps = trackInfo[2]
racePad.generateWithRequired(zoneId)
racingPads.append(racePad)
| |
<reponame>BobDenny/alpyca-client<filename>alpaca/switch.py
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# switch - Implements ASCOM Alpaca Switch device class
#
# Part of the Alpyca Client application interface package
#
# Author: <NAME> <<EMAIL>> (rbd)
#
# Python Compatibility: Requires Python 3.7 or later
# Doc Environment: Sphinx v4.5.0 with autodoc, autosummary, napoleon, and autoenum
# GitHub: https://github.com/BobDenny/alpyca-client
#
# -----------------------------------------------------------------------------
# MIT License
#
# Copyright (c) 2022 <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
# Edit History:
# 02-May-22 (rbd) Initial Edit
# -----------------------------------------------------------------------------
from alpaca.device import Device
class Switch(Device):
"""ASCOM Standard ISwitch V2 Interface"""
def __init__(
self,
address: str,
device_number: int,
protocol: str = "http"
):
"""Initialize the Switch object.
Args:
address (str): IP address and port of the device (x.x.x.x:pppp)
device_number (int): The index of the device (usually 0)
protocol (str, optional): Only if device needs https. Defaults to "http".
"""
super().__init__(address, "switch", device_number, protocol)
@property
def MaxSwitch(self) -> int:
"""Count of switch devices managed by this driver.
Raises:
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* Number of switch devices managed by this driver. Devices are numbered from 0
to MaxSwitch - 1.
"""
return self._get("maxswitch")
def CanWrite(self, Id: int) -> bool:
"""The specified switch device can be written to.
Args:
Id: the specified switch number (see Notes)
Raises:
InvalidValueException: The Id is out of range (see :py:attr:`MaxSwitch`)
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* Switch devices are numbered from 0 to :py:attr:`MaxSwitch` - 1.
* Examples of witches that cannot be written to include a
limit switch or a sensor.
"""
return self._get("canwrite", ID=Id)
def GetSwitch(self, Id: int) -> bool:
"""The state of the specified switch device.
Args:
Id: the specified switch number (see Notes)
Raises:
InvalidValueException: The Id is out of range (see :py:attr:`MaxSwitch`)
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* Devices are numbered from 0 to :py:attr:`MaxSwitch` - 1.
* On is True, Off is False.
"""
return self._get("getswitch", ID=Id)
def GetSwitchDescription(self, Id: int) -> str:
"""The textual description of the specified switch device.
Args:
Id: the specified switch number (see Notes)
Raises:
InvalidValueException: The Id is out of range (see :py:attr:`MaxSwitch`)
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* Devices are numbered from 0 to :py:attr:`MaxSwitch` - 1.
"""
return self._get("getswitchdescription", ID=Id)
def GetSwitchName(self, Id: int) -> str:
"""The textual name of the specified switch device.
Args:
Id: the specified switch number (see Notes)
Raises:
InvalidValueException: The Id is out of range (see :py:attr:`MaxSwitch`)
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* Devices are numbered from 0 to :py:attr:`MaxSwitch` - 1.
"""
return self._get("getswitchname", ID=Id)
def GetSwitchValue(self, Id: int) -> float:
"""The value of the specified switch device as a float.
Args:
Id: the specified switch number (see Notes)
Raises:
InvalidValueException: The Id is out of range (see :py:attr:`MaxSwitch`)
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* Devices are numbered from 0 to :py:attr:`MaxSwitch` - 1.
"""
return self._get("getswitchvalue", ID=Id)
def MaxSwitchValue(self, Id: int) -> float:
"""The maximum value of the specified switch device as a double.
Args:
Id: the specified switch number (see Notes)
Raises:
InvalidValueException: The Id is out of range (see :py:attr:`MaxSwitch`)
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* Devices are numbered from 0 to :py:attr:`MaxSwitch` - 1.
"""
return self._get("maxswitchvalue", ID=Id)
def MinSwitchValue(self, Id: int) -> float:
"""The minimum value of the specified switch device as a double.
Args:
Id: the specified switch number (see Notes)
Raises:
InvalidValueException: The Id is out of range (see :py:attr:`MaxSwitch`)
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* Devices are numbered from 0 to :py:attr:`MaxSwitch` - 1.
"""
return self._get("minswitchvalue", ID=Id)
def SetSwitch(self, Id: int, State: bool) -> None:
"""Set a switch device to the specified state
Args:
Id: the specified switch number (see Notes)
State: The required control state
Raises:
InvalidValueException: The Id is out of range (see :py:attr:`MaxSwitch`)
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* Devices are numbered from 0 to :py:attr:`MaxSwitch` - 1.
* On is True, Off is False.
"""
self._put("setswitch", ID=Id, State=State)
def SetSwitchName(self, Id: int, Name: str) -> None:
"""Set a switch device name to the specified value.
Args:
Id: the specified switch number (see Notes)
Name: The desired (new) name for the switch
Raises:
InvalidValueException: The Id is out of range (see :py:attr:`MaxSwitch`)
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* Devices are numbered from 0 to :py:attr:`MaxSwitch` - 1.
* On is True, Off is False.
"""
self._put("setswitchname", ID=Id, Name=Name)
def SetSwitchValue(self, Id: int, Value: float) -> None:
"""Set a switch device value to the specified value.
Args:
Id: the specified switch number (see Notes)
Value: Value to be set, between :py:attr:`MinSwitchValue` and
:py:attr:`MinSwitchValue`.
Raises:
InvalidValueException: The Id is out of range (see :py:attr:`MaxSwitch`), or
the Value is out of range, not between :py:attr:`MinSwitchValue` and
:py:attr:`MinSwitchValue`.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* Devices are numbered from 0 to :py:attr:`MaxSwitch` - 1.
* On is True, Off is False.
"""
self._put("setswitchvalue", ID=Id, Value=Value)
def SwitchStep(self, Id: int) -> float:
"""The step size of the specified switch device (see Notes).
Args:
Id: the specified switch number (see Notes)
Raises:
InvalidValueException: The Id is out of range (see :py:attr:`MaxSwitch`)
NotConnectedException: If the device is not connected
DriverException: An | |
lactis",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGGCCU4AGCUCAGCUGGGAGAGCGCCUGCUU5GC6CGCAGGAG7UCAGCGGUUCGAUCCCGCUAGGCUCCA",
"sequence_bpforms": "GGGGCCU{74U}AGCUCAGCUGGGAGAGCGCCUGCUU{501U}GC{62A}CGCAGGAG{7G}UCAGCGGUUCGAUCCCGCUAGGCUCCA",
"sequence_iupac": "GGGGCCUUAGCUCAGCUGGGAGAGCGCCUGCUUUGCACGCAGGAGGUCAGCGGUUCGAUCCCGCUAGGCUCCA",
"length": 73,
"number_of_modifications": 4,
"number_of_modified_a": 1,
"number_of_modified_c": 0,
"number_of_modified_g": 1,
"number_of_modified_u": 2,
"formula": "C701H805N281O518P73S",
"molecular_weight": 23747.74463,
"charge": -73,
"canonical_formula": "C694H790N279O515P73",
"canonical_molecular_weight": 23540.47663,
"canonical_charge": -74,
"extra_formula": "C7H15N2O3S",
"extra_molecular_weight": 207.268,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:34:00.788Z"
},
"ncbi_taxonomy_id": 1358
},
{
"anticodon": "GGC",
"organism": "Streptomyces griseus",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGGCUAUAGCUBAGUDGGDAGAGCGCCUGCAUGGCAUGCAGGAG7UCAGGAGUUCA\"UUCUCCUUAGCUCCACAA",
"sequence_bpforms": "GGGGCUAUAGCU{0C}AGU{8U}GG{8U}AGAGCGCCUGCAUGGCAUGCAGGAG{7G}UCAGGAGUUCA{1A}UUCUCCUUAGCUCCACAA",
"sequence_iupac": "GGGGCUAUAGCUCAGUUGGUAGAGCGCCUGCAUGGCAUGCAGGAGGUCAGGAGUUCAAUUCUCCUUAGCUCCACAA",
"length": 76,
"number_of_modifications": 5,
"number_of_modified_a": 1,
"number_of_modified_c": 1,
"number_of_modified_g": 1,
"number_of_modified_u": 2,
"formula": "C727H832N290O534P76",
"molecular_weight": 24530.15491,
"charge": -76,
"canonical_formula": "C724H819N290O534P76",
"canonical_molecular_weight": 24481.01791,
"canonical_charge": -77,
"extra_formula": "C3H13",
"extra_molecular_weight": 49.137,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:34:02.506Z"
},
"ncbi_taxonomy_id": 1911
},
{
"anticodon": "GGC",
"organism": "Streptomyces griseus",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGGCUAUAGCUBAGUDGGDAGAGCGCCUGCAUGGCAUGCAGGAG7UCAGGAGUUCA\"UUCUCCUUAGCUCCA",
"sequence_bpforms": "GGGGCUAUAGCU{0C}AGU{8U}GG{8U}AGAGCGCCUGCAUGGCAUGCAGGAG{7G}UCAGGAGUUCA{1A}UUCUCCUUAGCUCCA",
"sequence_iupac": "GGGGCUAUAGCUCAGUUGGUAGAGCGCCUGCAUGGCAUGCAGGAGGUCAGGAGUUCAAUUCUCCUUAGCUCCA",
"length": 73,
"number_of_modifications": 5,
"number_of_modified_a": 1,
"number_of_modified_c": 1,
"number_of_modified_g": 1,
"number_of_modified_u": 2,
"formula": "C698H799N277O515P73",
"molecular_weight": 23569.57863,
"charge": -73,
"canonical_formula": "C695H786N277O515P73",
"canonical_molecular_weight": 23520.44163,
"canonical_charge": -74,
"extra_formula": "C3H13",
"extra_molecular_weight": 49.137,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:34:02.535Z"
},
"ncbi_taxonomy_id": 1911
},
{
"anticodon": "CGC",
"organism": "Streptomyces griseus",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGCCUGUGGCGCAGUCUGGDAGCGCACCUCGUUCGCAUCGAGGGGGUCUGGGGUUCA\"AUCCCCACAGGUCCA",
"sequence_bpforms": "GGGCCUGUGGCGCAGUCUGG{8U}AGCGCACCUCGUUCGCAUCGAGGGGGUCUGGGGUUCA{1A}AUCCCCACAGGUCCA",
"sequence_iupac": "GGGCCUGUGGCGCAGUCUGGUAGCGCACCUCGUUCGCAUCGAGGGGGUCUGGGGUUCAAAUCCCCACAGGUCCA",
"length": 74,
"number_of_modifications": 2,
"number_of_modified_a": 1,
"number_of_modified_c": 0,
"number_of_modified_g": 0,
"number_of_modified_u": 1,
"formula": "C704H804N281O523P74",
"molecular_weight": 23861.67839,
"charge": -75,
"canonical_formula": "C703H800N281O523P74",
"canonical_molecular_weight": 23845.63539,
"canonical_charge": -75,
"extra_formula": "CH4",
"extra_molecular_weight": 16.043,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:34:02.564Z"
},
"ncbi_taxonomy_id": 1911
},
{
"anticodon": "UGC",
"organism": "Streptomyces griseus",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGGCCUUAGCUBAGUDGGDAGAGCGCUGCCUUUGCAAGGCAGAUGUCAGGAGUUCGAAUCUCCUAGGCUCCA",
"sequence_bpforms": "GGGGCCUUAGCU{0C}AGU{8U}GG{8U}AGAGCGCUGCCUUUGCAAGGCAGAUGUCAGGAGUUCGAAUCUCCUAGGCUCCA",
"sequence_iupac": "GGGGCCUUAGCUCAGUUGGUAGAGCGCUGCCUUUGCAAGGCAGAUGUCAGGAGUUCGAAUCUCCUAGGCUCCA",
"length": 73,
"number_of_modifications": 3,
"number_of_modified_a": 0,
"number_of_modified_c": 1,
"number_of_modified_g": 0,
"number_of_modified_u": 2,
"formula": "C695H792N275O516P73",
"molecular_weight": 23514.47463,
"charge": -74,
"canonical_formula": "C694H786N275O516P73",
"canonical_molecular_weight": 23496.41563,
"canonical_charge": -74,
"extra_formula": "CH6",
"extra_molecular_weight": 18.059,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:34:02.594Z"
},
"ncbi_taxonomy_id": 1911
}
]
}
result = self.src.build_rna_modification_entity(obj)
print(result)
@unittest.skip("passed")
def test_build_rna_modification_observation(self):
null = None
obj = {
"amino_acid": "Ala",
"aa_code": "A",
"aa_name": "Alanine",
"kegg_orthology_id": "K14218",
"kegg_gene_name": "tRNA-Ala",
"definition": "tRNA Ala",
"kegg_pathway_id": "ko00970 ",
"kegg_pathway_name": "Aminoacyl-tRNA biosynthesis",
"modifications": [
{
"anticodon": "VGC",
"organism": "Escherichia coli",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGGCUAUAGCUCAGCDGGGAGAGCGCCUGCUUVGCACGCAGGAG7UCUGCGGTPCGAUCCCGCAUAGCUCCACCA",
"sequence_bpforms": "GGGGCUAUAGCUCAGC{8U}GGGAGAGCGCCUGCUU{502U}GCACGCAGGAG{7G}UCUGCGG{5U}{9U}CGAUCCCGCAUAGCUCCACCA",
"sequence_iupac": "GGGGCUAUAGCUCAGCUGGGAGAGCGCCUGCUUUGCACGCAGGAGGUCUGCGGUUCGAUCCCGCAUAGCUCCACCA",
"length": 76,
"number_of_modifications": 5,
"number_of_modified_a": 0,
"number_of_modified_c": 0,
"number_of_modified_g": 1,
"number_of_modified_u": 4,
"formula": "C726H832N289O538P76",
"molecular_weight": 24568.13291,
"charge": -77,
"canonical_formula": "C722H822N289O535P76",
"canonical_molecular_weight": 24462.01191,
"canonical_charge": -77,
"extra_formula": "C4H10O3",
"extra_molecular_weight": 106.12100000000001,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:09.439Z"
},
"ncbi_taxonomy_id": 562
},
{
"anticodon": "GGC",
"organism": "Escherichia coli",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGGCUAUAGCUCAGCDGGGAGAGCGCUUGCAUGGCAUGCAAGAG7UCAGCGGTPCGAUCCCGCUUAGCUCCACCA",
"sequence_bpforms": "GGGGCUAUAGCUCAGC{8U}GGGAGAGCGCUUGCAUGGCAUGCAAGAG{7G}UCAGCGG{5U}{9U}CGAUCCCGCUUAGCUCCACCA",
"sequence_iupac": "GGGGCUAUAGCUCAGCUGGGAGAGCGCUUGCAUGGCAUGCAAGAGGUCAGCGGUUCGAUCCCGCUUAGCUCCACCA",
"length": 76,
"number_of_modifications": 4,
"number_of_modified_a": 0,
"number_of_modified_c": 0,
"number_of_modified_g": 1,
"number_of_modified_u": 3,
"formula": "C726H831N293O533P76",
"molecular_weight": 24543.157909999998,
"charge": -76,
"canonical_formula": "C724H822N293O533P76",
"canonical_molecular_weight": 24510.06391,
"canonical_charge": -77,
"extra_formula": "C2H9",
"extra_molecular_weight": 33.094,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:09.559Z"
},
"ncbi_taxonomy_id": 562
},
{
"anticodon": "AGC",
"organism": "Saccharomyces cerevisiae",
"organellum": "cytosolic",
"sequence_modomics": "GGGCGUGUKGCGUAGDCGGDAGCGCRCUCCCUUIGCOPGGGAGAGGDCUCCGGTPCGAUUCCGGACUCGUCCACCA",
"sequence_bpforms": "GGGCGUGU{1G}GCGUAG{8U}CGG{8U}AGCGC{22G}CUCCCUU{9A}GC{19A}{9U}GGGAGAGG{8U}CUCCGG{5U}{9U}CGAUUCCGGACUCGUCCACCA",
"sequence_iupac": "GGGCGUGUGGCGUAGUCGGUAGCGCGCUCCCUUAGCAUGGGAGAGGUCUCCGGUUCGAUUCCGGACUCGUCCACCA",
"length": 76,
"number_of_modifications": 10,
"number_of_modified_a": 2,
"number_of_modified_c": 0,
"number_of_modified_g": 2,
"number_of_modified_u": 6,
"formula": "C726H834N283O542P76",
"molecular_weight": 24550.102909999998,
"charge": -77,
"canonical_formula": "C721H820N285O540P76",
"canonical_molecular_weight": 24471.95191,
"canonical_charge": -77,
"extra_formula": "C5H14N-2O2",
"extra_molecular_weight": 78.15100000000001,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:10.684Z"
},
"ncbi_taxonomy_id": 4932
},
{
"anticodon": "UGC",
"organism": "Bacillus subtilis",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGAGCCUUAGCUCAGCDGGGAGAGCGCCUGCUU5GC=CGCAGGAG7UCAGCGGTPCGAUCCCGCUAGGCUCCACCA",
"sequence_bpforms": "GGAGCCUUAGCUCAGC{8U}GGGAGAGCGCCUGCUU{501U}GC{6A}CGCAGGAG{7G}UCAGCGG{5U}{9U}CGAUCCCGCUAGGCUCCACCA",
"sequence_iupac": "GGAGCCUUAGCUCAGCUGGGAGAGCGCCUGCUUUGCACGCAGGAGGUCAGCGGUUCGAUCCCGCUAGGCUCCACCA",
"length": 76,
"number_of_modifications": 6,
"number_of_modified_a": 1,
"number_of_modified_c": 0,
"number_of_modified_g": 1,
"number_of_modified_u": 4,
"formula": "C726H836N290O535P76",
"molecular_weight": 24538.174909999998,
"charge": -76,
"canonical_formula": "C722H823N290O534P76",
"canonical_molecular_weight": 24461.02791,
"canonical_charge": -77,
"extra_formula": "C4H13O",
"extra_molecular_weight": 77.14699999999999,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:11.889Z"
},
"ncbi_taxonomy_id": 1423
},
{
"anticodon": "UGC",
"organism": "Mycoplasma capricolum",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGCCCU4AGCUCAGCDGGGAGAGCACCUGCCUUGC=CGCAGGGG7UCGACGGUPCGAUCCCGUUAGGGUCCACCA",
"sequence_bpforms": "GGGCCCU{74U}AGCUCAGC{8U}GGGAGAGCACCUGCCUUGC{6A}CGCAGGGG{7G}UCGACGGU{9U}CGAUCCCGUUAGGGUCCACCA",
"sequence_iupac": "GGGCCCUUAGCUCAGCUGGGAGAGCACCUGCCUUGCACGCAGGGGGUCGACGGUUCGAUCCCGUUAGGGUCCACCA",
"length": 76,
"number_of_modifications": 5,
"number_of_modified_a": 1,
"number_of_modified_c": 0,
"number_of_modified_g": 1,
"number_of_modified_u": 3,
"formula": "C724H832N290O534P76S",
"molecular_weight": 24526.18191,
"charge": -76,
"canonical_formula": "C722H823N290O535P76",
"canonical_molecular_weight": 24477.02691,
"canonical_charge": -77,
"extra_formula": "C2H9O-1S",
"extra_molecular_weight": 49.155,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:11.918Z"
},
"ncbi_taxonomy_id": 2095
},
{
"anticodon": "GGC",
"organism": "Bacillus subtilis",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGGCCAUAGCUCAGCDGGGAGAGCGCUACGCUGGCAGCGUAGAG7UCAGGGGTPCGAGCCCCCUUGGCUCCACCA",
"sequence_bpforms": "GGGGCCAUAGCUCAGC{8U}GGGAGAGCGCUACGCUGGCAGCGUAGAG{7G}UCAGGGG{5U}{9U}CGAGCCCCCUUGGCUCCACCA",
"sequence_iupac": "GGGGCCAUAGCUCAGCUGGGAGAGCGCUACGCUGGCAGCGUAGAGGUCAGGGGUUCGAGCCCCCUUGGCUCCACCA",
"length": 76,
"number_of_modifications": 4,
"number_of_modified_a": 0,
"number_of_modified_c": 0,
"number_of_modified_g": 1,
"number_of_modified_u": 3,
"formula": "C727H834N298O532P76",
"molecular_weight": 24612.228909999998,
"charge": -76,
"canonical_formula": "C725H825N298O532P76",
"canonical_molecular_weight": 24579.13491,
"canonical_charge": -77,
"extra_formula": "C2H9",
"extra_molecular_weight": 33.094,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:11.947Z"
},
"ncbi_taxonomy_id": 1423
},
{
"anticodon": "CGC",
"organism": "Halobacterium salinarum",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGCUCGUAGAUCAGCGGUAGAUCRCUUCCUUCGCAAGGAAGAGGCC?UGGG]PBOAAUCCCAGCGAGUCCACCA",
"sequence_bpforms": "GGGCUCGUAGAUCAGCGGUAGAUC{22G}CUUCCUUCGCAAGGAAGAGGCC{5C}UGGG{19U}{9U}{0C}{19A}AAUCCCAGCGAGUCCACCA",
"sequence_iupac": "GGGCUCGUAGAUCAGCGGUAGAUCGCUUCCUUCGCAAGGAAGAGGCCCUGGGUUCAAAUCCCAGCGAGUCCACCA",
"length": 75,
"number_of_modifications": 6,
"number_of_modified_a": 1,
"number_of_modified_c": 2,
"number_of_modified_g": 1,
"number_of_modified_u": 2,
"formula": "C720H823N288O524P75",
"molecular_weight": 24218.02815,
"charge": -76,
"canonical_formula": "C714H812N289O523P75",
"canonical_molecular_weight": 24132.88215,
"canonical_charge": -76,
"extra_formula": "C6H11N-1O",
"extra_molecular_weight": 85.146,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:13.398Z"
},
"ncbi_taxonomy_id": 2242
},
{
"anticodon": "CGC",
"organism": "Haloferax volcanii",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGCUCGUAGAUCAGUGGCAGAUCRCUUCCUUCGCAAGGAAGAGGC??GGGG]PBOAAUCCCCGCGAGUCCACCA",
"sequence_bpforms": "GGGCUCGUAGAUCAGUGGCAGAUC{22G}CUUCCUUCGCAAGGAAGAGGC{5C}{5C}GGGG{19U}{9U}{0C}{19A}AAUCCCCGCGAGUCCACCA",
"sequence_iupac": "GGGCUCGUAGAUCAGUGGCAGAUCGCUUCCUUCGCAAGGAAGAGGCCCGGGGUUCAAAUCCCCGCGAGUCCACCA",
"length": 75,
"number_of_modifications": 7,
"number_of_modified_a": 1,
"number_of_modified_c": 3,
"number_of_modified_g": 1,
"number_of_modified_u": 2,
"formula": "C721H826N289O524P75",
"molecular_weight": 24247.07015,
"charge": -76,
"canonical_formula": "C714H813N290O523P75",
"canonical_molecular_weight": 24147.89715,
"canonical_charge": -76,
"extra_formula": "C7H13N-1O",
"extra_molecular_weight": 99.17299999999999,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:13.428Z"
},
"ncbi_taxonomy_id": 2246
},
{
"anticodon": "GGC",
"organism": "Haloferax volcanii",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGCUCGUAGAUCAGGGGUAGAUCACUCCCUUGGCAUGGGAGAGGC??CGGG]PBOAAUCCCGGCGAGUCCACCA",
"sequence_bpforms": "GGGCUCGUAGAUCAGGGGUAGAUCACUCCCUUGGCAUGGGAGAGGC{5C}{5C}CGGG{19U}{9U}{0C}{19A}AAUCCCGGCGAGUCCACCA",
"sequence_iupac": "GGGCUCGUAGAUCAGGGGUAGAUCACUCCCUUGGCAUGGGAGAGGCCCCGGGUUCAAAUCCCGGCGAGUCCACCA",
"length": 75,
"number_of_modifications": 6,
"number_of_modified_a": 1,
"number_of_modified_c": 3,
"number_of_modified_g": 0,
"number_of_modified_u": 2,
"formula": "C720H822N291O525P75",
"molecular_weight": 24275.04015,
"charge": -76,
"canonical_formula": "C715H813N292O524P75",
"canonical_molecular_weight": 24203.92115,
"canonical_charge": -76,
"extra_formula": "C5H9N-1O",
"extra_molecular_weight": 71.119,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:13.457Z"
},
"ncbi_taxonomy_id": 2246
},
{
"anticodon": "UGC",
"organism": "Haloferax volcanii",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGCCCAUAGCUCAGUGGUAGAGULCCUCCUUUGCAAGGAGGAUGC??AGGG]PBGAAUCCCUGUGGGUCCACCA",
"sequence_bpforms": "GGGCCCAUAGCUCAGUGGUAGAGU{2G}CCUCCUUUGCAAGGAGGAUGC{5C}{5C}AGGG{19U}{9U}{0C}GAAUCCCUGUGGGUCCACCA",
"sequence_iupac": "GGGCCCAUAGCUCAGUGGUAGAGUGCCUCCUUUGCAAGGAGGAUGCCCAGGGUUCGAAUCCCUGUGGGUCCACCA",
"length": 75,
"number_of_modifications": 6,
"number_of_modified_a": 0,
"number_of_modified_c": 3,
"number_of_modified_g": 1,
"number_of_modified_u": 2,
"formula": "C718H820N285O528P75",
"molecular_weight": 24212.95715,
"charge": -76,
"canonical_formula": "C713H810N285O528P75",
"canonical_molecular_weight": 24142.82215,
"canonical_charge": -76,
"extra_formula": "C5H10",
"extra_molecular_weight": 70.135,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:13.483Z"
},
"ncbi_taxonomy_id": 2246
},
{
"anticodon": "UGC",
"organism": "Mycoplasma mycoides",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGCCCUUAGCUCAGCDGGGAGAGCACCUGCCUUGC=CGCAGGGG7UCGACGGUUCGAUCCCGUUAGGGUCCACCA",
"sequence_bpforms": "GGGCCCUUAGCUCAGC{8U}GGGAGAGCACCUGCCUUGC{6A}CGCAGGGG{7G}UCGACGGUUCGAUCCCGUUAGGGUCCACCA",
"sequence_iupac": "GGGCCCUUAGCUCAGCUGGGAGAGCACCUGCCUUGCACGCAGGGGGUCGACGGUUCGAUCCCGUUAGGGUCCACCA",
"length": 76,
"number_of_modifications": 3,
"number_of_modified_a": 1,
"number_of_modified_c": 0,
"number_of_modified_g": 1,
"number_of_modified_u": 1,
"formula": "C724H832N290O535P76",
"molecular_weight": 24510.120909999998,
"charge": -76,
"canonical_formula": "C722H823N290O535P76",
"canonical_molecular_weight": 24477.02691,
"canonical_charge": -77,
"extra_formula": "C2H9",
"extra_molecular_weight": 33.094,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:13.512Z"
},
"ncbi_taxonomy_id": 2102
},
{
"anticodon": "UGC",
"organism": "Mycoplasma mycoides",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGCCCUUAGCUCAGCDGGGAGAGCACCUGCCUUGC=CGCAGGGG7UCGACGGUUCGAUCCCGUUAGGGUCCACCA",
"sequence_bpforms": "GGGCCCUUAGCUCAGC{8U}GGGAGAGCACCUGCCUUGC{6A}CGCAGGGG{7G}UCGACGGUUCGAUCCCGUUAGGGUCCACCA",
"sequence_iupac": "GGGCCCUUAGCUCAGCUGGGAGAGCACCUGCCUUGCACGCAGGGGGUCGACGGUUCGAUCCCGUUAGGGUCCACCA",
"length": 76,
"number_of_modifications": 3,
"number_of_modified_a": 1,
"number_of_modified_c": 0,
"number_of_modified_g": 1,
"number_of_modified_u": 1,
"formula": "C724H832N290O535P76",
"molecular_weight": 24510.120909999998,
"charge": -76,
"canonical_formula": "C722H823N290O535P76",
"canonical_molecular_weight": 24477.02691,
"canonical_charge": -77,
"extra_formula": "C2H9",
"extra_molecular_weight": 33.094,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:33:47.158Z"
},
"ncbi_taxonomy_id": 2102
},
{
"anticodon": "IGC",
"organism": "Pichia jadinii",
"organellum": "cytosolic",
"sequence_modomics": "GGGCGUGUKGCGUAGDDGGDAGCGCRPUCGCUUIGCOPGCGAAAGGDCUCCGGTPCG\"CUCCGGACUCGUCCACCA",
"sequence_bpforms": "GGGCGUGU{1G}GCGUAG{8U}{8U}GG{8U}AGCGC{22G}{9U}UCGCUU{9A}GC{19A}{9U}GCGAAAGG{8U}CUCCGG{5U}{9U}CG{1A}CUCCGGACUCGUCCACCA",
"sequence_iupac": "GGGCGUGUGGCGUAGUUGGUAGCGCGUUCGCUUAGCAUGCGAAAGGUCUCCGGUUCGACUCCGGACUCGUCCACCA",
"length": 76,
"number_of_modifications": 13,
"number_of_modified_a": 3,
"number_of_modified_c": 0,
"number_of_modified_g": 2,
"number_of_modified_u": 8,
"formula": "C727H837N282O542P76",
"molecular_weight": 24551.13091,
"charge": -77,
"canonical_formula": "C721H819N284O540P76",
"canonical_molecular_weight": 24456.93691,
"canonical_charge": -77,
"extra_formula": "C6H18N-2O2",
"extra_molecular_weight": 94.194,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:33:47.281Z"
},
"ncbi_taxonomy_id": null
},
{
"anticodon": "IGC",
"organism": "Bombyx mori",
"organellum": "cytosolic",
"sequence_modomics": "GGGGGCGUALCUCAGADGGUAGAGCRCUCGCJUIGCOP#PGAGAG7UA?CGGGAPCG\"UACCCGGCGCCUCCACCA",
"sequence_bpforms": "GGGGGCGUA{2G}CUCAGA{8U}GGUAGAGC{22G}CUCGC{0U}U{9A}GC{19A}{9U}{0G}{9U}GAGAG{7G}UA{5C}CGGGA{9U}CG{1A}UACCCGGCGCCUCCACCA",
"sequence_iupac": "GGGGGCGUAGCUCAGAUGGUAGAGCGCUCGCUUAGCAUGUGAGAGGUACCGGGAUCGAUACCCGGCGCCUCCACCA",
"length": 76,
"number_of_modifications": 13,
"number_of_modified_a": 3,
"number_of_modified_c": 1,
"number_of_modified_g": 4,
"number_of_modified_u": 5,
"formula": "C735H845N297O533P76",
"molecular_weight": 24721.39691,
"charge": -76,
"canonical_formula": "C726H824N299O531P76",
"canonical_molecular_weight": 24588.14591,
"canonical_charge": -77,
"extra_formula": "C9H21N-2O2",
"extra_molecular_weight": 133.251,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:33:47.310Z"
},
"ncbi_taxonomy_id": 7091
},
{
"anticodon": "IGC",
"organism": "Bombyx mori",
"organellum": "cytosolic",
"sequence_modomics": "GGGGGCGUALCUCAGADGGUAGAGCRCUCGCJUIGCOP#PGAGAG7UA?CGGGAPCG\"UACCCGGCGCCUCCACCA",
"sequence_bpforms": "GGGGGCGUA{2G}CUCAGA{8U}GGUAGAGC{22G}CUCGC{0U}U{9A}GC{19A}{9U}{0G}{9U}GAGAG{7G}UA{5C}CGGGA{9U}CG{1A}UACCCGGCGCCUCCACCA",
"sequence_iupac": "GGGGGCGUAGCUCAGAUGGUAGAGCGCUCGCUUAGCAUGUGAGAGGUACCGGGAUCGAUACCCGGCGCCUCCACCA",
"length": 76,
"number_of_modifications": 13,
"number_of_modified_a": 3,
"number_of_modified_c": 1,
"number_of_modified_g": 4,
"number_of_modified_u": 5,
"formula": "C735H845N297O533P76",
"molecular_weight": 24721.39691,
"charge": -76,
"canonical_formula": "C726H824N299O531P76",
"canonical_molecular_weight": 24588.14591,
"canonical_charge": -77,
"extra_formula": "C9H21N-2O2",
"extra_molecular_weight": 133.251,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:33:47.345Z"
},
"ncbi_taxonomy_id": 7091
},
{
"anticodon": "IGC",
"organism": "Homo sapiens",
"organellum": "cytosolic",
"sequence_modomics": "GGGGGAUUALCUCAAADGGDAGAGCRCUCGCJUIGCOP#CGAGAG7UAGCGGGAPCG\"UGCCCGCAUCCUCCACCA",
"sequence_bpforms": "GGGGGAUUA{2G}CUCAAA{8U}GG{8U}AGAGC{22G}CUCGC{0U}U{9A}GC{19A}{9U}{0G}CGAGAG{7G}UAGCGGGA{9U}CG{1A}UGCCCGCAUCCUCCACCA",
"sequence_iupac": "GGGGGAUUAGCUCAAAUGGUAGAGCGCUCGCUUAGCAUGCGAGAGGUAGCGGGAUCGAUGCCCGCAUCCUCCACCA",
"length": 76,
"number_of_modifications": 12,
"number_of_modified_a": 3,
"number_of_modified_c": 0,
"number_of_modified_g": 4,
"number_of_modified_u": 5,
"formula": "C734H844N296O532P76",
"molecular_weight": 24678.371909999998,
"charge": -76,
"canonical_formula": "C726H823N298O530P76",
"canonical_molecular_weight": 24557.13191,
"canonical_charge": -77,
"extra_formula": "C8H21N-2O2",
"extra_molecular_weight": 121.24,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
| |
128167,
-52755,
837509650,
129316,
-1,
837572577,
128042,
837759472,
837423424,
128009,
-1,
837681614,
128050,
-52751,
837716734,
129345,
-1,
837824871,
128087,
838021624,
837360951,
-1,
-52748,
837974050,
983161,
-52747,
838011779,
983090,
-52746,
838077130,
983088,
-52745,
838142450,
983086,
-1,
838207830,
983084,
-52743,
837979754,
128192,
-52742,
838359987,
129516,
-1,
838422308,
983082,
838611760,
810847797,
-1,
838677245,
838563874,
-1,
838742636,
838601046,
983240,
838808073,
838667910,
983270,
-52736,
838733446,
983450,
-52735,
838798855,
983449,
-52734,
838864225,
983448,
-52733,
838929609,
983447,
-52732,
838994975,
983446,
-52731,
839060355,
983445,
-52730,
839125706,
983444,
-52729,
839191026,
983443,
-52728,
839256406,
983442,
-1,
839321798,
983441,
839528980,
838733319,
983269,
-52725,
839454342,
983440,
-52724,
839519751,
983439,
-52723,
839585121,
983438,
-52722,
839650505,
983437,
-52721,
839715871,
983436,
-52720,
839781251,
983435,
-52719,
839846602,
983434,
-52718,
839911922,
983433,
-52717,
839977302,
983432,
-1,
840042694,
983431,
840249887,
839454049,
983268,
-52714,
840175238,
983430,
-52713,
840240647,
983429,
-52712,
840306017,
983428,
-52711,
840371401,
983427,
-52710,
840436767,
983426,
-52709,
840502147,
983425,
-52708,
840567498,
983424,
-52707,
840632818,
983423,
-52706,
840698198,
983422,
-1,
840763590,
983421,
840970794,
840174793,
983255,
-52703,
840896134,
983420,
-52702,
840961543,
983419,
-52701,
841026913,
983418,
-52700,
841092297,
983417,
-52699,
841157663,
983416,
-52698,
841223043,
983415,
-52697,
841288394,
983414,
-52696,
841353714,
983413,
-52695,
841419094,
983412,
-1,
841484486,
983411,
841691701,
840895519,
983254,
-52692,
841617030,
983410,
-52691,
841682439,
983409,
-52690,
841747809,
983408,
-52689,
841813193,
983407,
-52688,
841878559,
983406,
-52687,
841943939,
983405,
-52686,
842009290,
983404,
-52685,
842074610,
983403,
-52684,
842139990,
983402,
-1,
842205382,
983401,
842412608,
841616259,
983253,
-52681,
842337926,
983400,
-52680,
842403335,
983399,
-52679,
842468705,
983398,
-52678,
842534089,
983397,
-52677,
842599455,
983396,
-52676,
842664835,
983395,
-52675,
842730186,
983394,
-52674,
842795506,
983393,
-52673,
842860886,
983392,
-1,
842926278,
983391,
843133515,
842336970,
983252,
-52670,
843058822,
983390,
-52669,
843124231,
983389,
-52668,
843189601,
983388,
-52667,
843254985,
983387,
-52666,
843320351,
983386,
-52665,
843385731,
983385,
-52664,
843451082,
983384,
-52663,
843516402,
983383,
-52662,
843581782,
983382,
-1,
843647174,
983381,
843854422,
843057650,
983251,
-52659,
843779718,
983380,
-52658,
843845127,
983379,
-52657,
843910497,
983378,
-52656,
843975881,
983377,
-52655,
844041247,
983376,
-52654,
844106627,
983375,
-52653,
844171978,
983374,
-52652,
844237298,
983373,
-52651,
844302678,
983372,
-1,
844368070,
983371,
844575329,
843778390,
983250,
-52648,
844500614,
983370,
-52647,
844566023,
983369,
-52646,
844631393,
983368,
-52645,
844696777,
983367,
-52644,
844762143,
983366,
-52643,
844827523,
983365,
-52642,
844892874,
983364,
-52641,
844958194,
983363,
-52640,
845023574,
983362,
-1,
845088966,
983361,
845348863,
844499142,
983249,
-52637,
845221510,
983360,
-52636,
845286919,
983359,
-52635,
845352289,
983358,
-52634,
845417673,
983357,
-52633,
845483039,
983356,
-52632,
845548419,
983355,
-52631,
845613770,
983354,
-52630,
845679090,
983353,
-52629,
845744470,
983352,
-1,
845809862,
983351,
846017200,
838666738,
983241,
846082680,
845941635,
983275,
-52625,
846007942,
983500,
-52624,
846073351,
983499,
-52623,
846138721,
983498,
-52622,
846204105,
983497,
-52621,
846269471,
983496,
-52620,
846334851,
983495,
-52619,
846400202,
983494,
-52618,
846465522,
983493,
-52617,
846530902,
983492,
-1,
846596294,
983491,
846803587,
846006986,
983274,
-52614,
846728838,
983490,
-52613,
846794247,
983489,
-52612,
846859617,
983488,
-52611,
846925001,
983487,
-52610,
846990367,
983486,
-52609,
847055747,
983485,
-52608,
847121098,
983484,
-52607,
847186418,
983483,
-52606,
847251798,
983482,
-1,
847317190,
983481,
847524494,
846727666,
983273,
-52603,
847449734,
983480,
-52602,
847515143,
983479,
-52601,
847580513,
983478,
-52600,
847645897,
983477,
-52599,
847711263,
983476,
-52598,
847776643,
983475,
-52597,
847841994,
983474,
-52596,
847907314,
983473,
-52595,
847972694,
983472,
-1,
848038086,
983471,
848245401,
847448406,
983272,
-52592,
848170630,
983470,
-52591,
848236039,
983469,
-52590,
848301409,
983468,
-52589,
848366793,
983467,
-52588,
848432159,
983466,
-52587,
848497539,
983465,
-52586,
848562890,
983464,
-52585,
848628210,
983463,
-52584,
848693590,
983462,
-1,
848758982,
983461,
848966308,
848169158,
983271,
-52581,
848891526,
983460,
-52580,
848956935,
983459,
-52579,
849022305,
983458,
-52578,
849087689,
983457,
-52577,
849153055,
983456,
-52576,
849218435,
983455,
-52575,
849283786,
983454,
-52574,
849349106,
983453,
-52573,
849414486,
983452,
-1,
849479878,
983451,
849687212,
848890911,
983276,
-52570,
849611977,
983507,
-52569,
849677343,
983506,
-52568,
849742723,
983505,
-52567,
849808074,
983504,
-52566,
849873394,
983503,
-52565,
849938774,
983502,
-1,
850004166,
983501,
-52563,
849612422,
983280,
-52562,
850136583,
983279,
-52561,
850201953,
983278,
-1,
850267337,
983277,
850473659,
845942406,
983248,
-52558,
850398854,
983350,
-52557,
850464263,
983349,
-52556,
850529633,
983348,
-52555,
850595017,
983347,
-52554,
850660383,
983346,
-52553,
850725763,
983345,
-52552,
850791114,
983344,
-52551,
850856434,
983343,
-52550,
850921814,
983342,
-1,
850987206,
983341,
851194566,
850398727,
983247,
-52547,
851119750,
983340,
-52546,
851185159,
983339,
-52545,
851250529,
983338,
-52544,
851315913,
983337,
-52543,
851381279,
983336,
-52542,
851446659,
983335,
-52541,
851512010,
983334,
-52540,
851577330,
983333,
-52539,
851642710,
983332,
-1,
851708102,
983331,
851915473,
851119457,
983246,
-52536,
851840646,
983330,
-52535,
851906055,
983329,
-52534,
851971425,
983328,
-52533,
852036809,
983327,
-52532,
852102175,
983326,
-52531,
852167555,
983325,
-52530,
852232906,
983324,
-52529,
852298226,
983323,
-52528,
852363606,
983322,
-1,
852428998,
983321,
852636380,
851840201,
983245,
-52525,
852561542,
983320,
-52524,
852626951,
983319,
-52523,
852692321,
983318,
-52522,
852757705,
983317,
-52521,
852823071,
983316,
-52520,
852888451,
983315,
-52519,
852953802,
983314,
-52518,
853019122,
983313,
-52517,
853084502,
983312,
-1,
853149894,
983311,
853357287,
852560927,
983244,
-52514,
853282438,
983310,
-52513,
853347847,
983309,
-52512,
853413217,
983308,
-52511,
853478601,
983307,
-52510,
853543967,
983306,
-52509,
853609347,
983305,
-52508,
853674698,
983304,
-52507,
853740018,
983303,
-52506,
853805398,
983302,
-1,
853870790,
983301,
854078194,
853281667,
983243,
-52503,
854003334,
983300,
-52502,
854068743,
983299,
-52501,
854134113,
983298,
-52500,
854199497,
983297,
-52499,
854264863,
983296,
-52498,
854330243,
983295,
-52497,
854395594,
983294,
-52496,
854460914,
983293,
-52495,
854526294,
983292,
-1,
854591686,
983291,
854851583,
854002378,
983242,
-52492,
854724230,
983290,
-52491,
854789639,
983289,
-52490,
854855009,
983288,
-52489,
854920393,
983287,
-52488,
854985759,
983286,
-52487,
855051139,
983285,
-52486,
855116490,
983284,
-52485,
855181810,
983283,
-52484,
855247190,
983282,
-1,
855312582,
983281,
855520033,
838609759,
-1,
855585559,
855469900,
-1,
855651087,
855545519,
-1,
855716622,
855588109,
-1,
855782149,
855668770,
-1,
-52477,
855728646,
128636,
-52476,
855787915,
128953,
-1,
855839427,
128941,
856044296,
855741109,
-1,
-52473,
855998114,
128914,
-1,
856039470,
128904,
856240907,
855977478,
-1,
-52470,
856194702,
128959,
-1,
856255518,
128976,
-52468,
856191666,
128637,
-52467,
856373847,
128934,
-1,
856438046,
128947,
-1,
855645102,
128933,
856752127,
855606365,
-1,
856765206,
856654158,
-1,
856830741,
856690566,
-1,
856896276,
856787650,
983069,
-1,
856818782,
983143,
-1,
856818277,
11135,
-1,
856780731,
128678,
-1,
856718555,
11837,
857276415,
855517158,
-1,
857289501,
857177168,
-1,
-52454,
857246768,
7414,
-52453,
857306907,
7411,
-52452,
857361005,
7413,
-1,
857413421,
7415,
857669631,
857245058,
-1,
-52449,
857568582,
7416,
-52448,
857614737,
7417,
-1,
857677249,
7412,
857879337,
855457885,
-1,
857944871,
857810707,
-1,
858010406,
857866240,
-1,
-52443,
857946150,
127918,
-1,
858004895,
128249,
-1,
857939536,
128252,
-52440,
857891341,
127931,
-1,
858201161,
128243,
858403627,
857833806,
983070,
-1,
858355746,
983144,
858534702,
858350068,
-1,
-52435,
858477158,
127952,
-1,
858529260,
127755,
-52433,
858491594,
11238,
-1,
858657563,
129499,
858862714,
838547369,
-1,
858928052,
858812236,
-1,
858993559,
858853137,
-1,
859059093,
858939346,
-1,
859124599,
859001257,
-1,
859190079,
859082293,
-1,
859255611,
859136203,
-1,
859321145,
859205452,
70411,
-1,
859270988,
70496,
859504639,
859262199,
70412,
-1,
859393271,
70497,
859583294,
859188496,
-1,
-52419,
859515090,
70495,
-1,
859575433,
70494,
-1,
859508497,
70453,
859845445,
859144526,
-1,
859910979,
859799886,
-1,
-52414,
859847381,
70432,
-1,
859901713,
70431,
-52412,
859847381,
70437,
-1,
860032785,
70436,
860238666,
859790167,
-1,
-52409,
860198664,
70430,
-52408,
860249008,
70435,
-52407,
860305378,
70425,
-1,
860360465,
70440,
860566352,
860169567,
-1,
860631886,
860497247,
-1,
-52403,
860568277,
70434,
-1,
860622609,
70433,
-52401,
860568277,
70439,
-1,
860753681,
70438,
860959572,
860491537,
70405,
-52398,
860915863,
70420,
-52397,
860962909,
70416,
-1,
861015825,
70406,
861221720,
860911650,
-1,
-52394,
861173918,
70455,
-52393,
861223637,
70454,
-1,
861277969,
70456,
861483866,
861178007,
70409,
-1,
861440151,
70410,
861614941,
861432017,
-1,
-52388,
861551317,
70443,
-1,
861605649,
70442,
861811552,
861555959,
-1,
-52385,
861752649,
70451,
-1,
861802257,
70450,
862008163,
861752003,
-1,
-52382,
861944533,
70422,
-1,
861998865,
70421,
862204774,
861948405,
-1,
-52379,
862141141,
70429,
-1,
862195473,
70428,
862401384,
862142557,
70407,
-1,
862339165,
70408,
862532459,
862336937,
-1,
-52374,
862468821,
70424,
-1,
862523153,
70423,
862729070,
862461239,
-1,
-52371,
862665429,
70427,
-1,
862719761,
70426,
862925681,
862656546,
-1,
-52368,
862862037,
70445,
-1,
862916369,
70444,
-52366,
862885640,
70447,
-52365,
863072153,
70448,
-52364,
863134803,
70419,
-52363,
863196766,
70446,
-52362,
863255253,
70457,
-1,
863317282,
70415,
863515527,
859083000,
-1,
863581053,
863473790,
-1,
863646587,
863530828,
70467,
-1,
863596364,
70468,
863830015,
863587575,
70498,
-1,
863718647,
70499,
863908737,
863506193,
-1,
-52353,
863864983,
70476,
-52352,
863912029,
70472,
-1,
863964945,
70462,
864170883,
863864983,
70465,
-1,
864127127,
70466,
864301957,
864108637,
70463,
-1,
864239709,
70464,
-52346,
864248915,
70475,
-1,
864365858,
70471,
864564115,
863468624,
-1,
864629643,
864522241,
-1,
-52342,
864581954,
70403,
-1,
864645183,
70477,
864826254,
864558391,
-1,
-52339,
864772709,
70400,
-1,
864818123,
70401,
865022865,
864751377,
-1,
-52336,
864980554,
70461,
-1,
865034301,
70402,
-52334,
864971782,
70493,
-1,
865165294,
70460,
-52332,
864510517,
70480,
-1,
865277850,
70487,
-52330,
859007741,
127815,
-1,
| |
<reponame>jam1garner/msc-debugger
condition_table_true = ["lt", "gt", "eq"]
condition_table_false = ["ge", "le", "ne"]
trap_condition_table = {
1: "lgt",
2: "llt",
4: "eq",
5: "lge",
8: "gt",
12: "ge",
16: "lt",
20: "le",
31: "u"
}
spr_table = {
8: "lr",
9: "ctr"
}
def decodeI(value):
return (value >> 2) & 0xFFFFFF, (value >> 1) & 1, value & 1
def decodeB(value):
return (value >> 21) & 0x1F, (value >> 16) & 0x1F, (value >> 2) & 0x3FFF, (value >> 1) & 1, value & 1
def decodeD(value):
return (value >> 21) & 0x1F, (value >> 16) & 0x1F, value & 0xFFFF
def decodeX(value):
return (value >> 21) & 0x1F, (value >> 16) & 0x1F, (value >> 11) & 0x1F, (value >> 1) & 0x3FF, value & 1
def extend_sign(value, bits=16):
if value & 1 << (bits - 1):
value -= 1 << bits
return value
def ihex(value):
return "-" * (value < 0) + "0x" + hex(value).lstrip("-0x").rstrip("L").zfill(1).upper()
def decodeCond(BO, BI):
#TODO: Better condition code
if BO == 20: return ""
if BO & 1: return "?"
if BI > 2: return "?"
if BO == 4: return condition_table_false[BI]
if BO == 12: return condition_table_true[BI]
return "?"
def loadStore(value, regtype="r"):
D, A, d = decodeD(value)
d = extend_sign(d)
return "%s%i, %s(r%i)" %(regtype, D, ihex(d), A)
def loadStoreX(D, A, B, pad):
if pad: return "<invalid>"
return "r%i, %s, r%i" %(D, ("r%i" %A) if A else "0", B)
def add(D, A, B, Rc):
return "add%s" %("." * Rc), "r%i, r%i, r%i" %(D, A, B)
def addi(value, addr):
D, A, SIMM = decodeD(value)
SIMM = extend_sign(SIMM)
if A == 0:
return "li", "r%i, %s" %(D, ihex(SIMM))
return "addi", "r%i, r%i, %s" %(D, A, ihex(SIMM))
def addic(value, addr):
D, A, SIMM = decodeD(value)
SIMM = extend_sign(SIMM)
return "addic", "r%i, r%i, %s" %(D, A, ihex(SIMM))
def addic_(value, addr):
D, A, SIMM = decodeD(value)
SIMM = extend_sign(SIMM)
return "addic.", "r%i, r%i, %s" %(D, A, ihex(SIMM))
def addis(value, addr):
D, A, SIMM = decodeD(value)
SIMM = extend_sign(SIMM)
if A == 0:
return "lis", "r%i, %s" %(D, ihex(SIMM))
return "addis", "r%i, r%i, %s" %(D, A, ihex(SIMM))
def and_(S, A, B, Rc):
return "and%s" % ("." * Rc), "r%i, r%i, r%i" % (A, S, B)
def b(value, addr):
LI, AA, LK = decodeI(value)
LI = extend_sign(LI, 24) * 4
if AA:
dst = LI
else:
dst = addr + LI
return "b%s%s" %("l" * LK, "a" * AA), ihex(dst)
def bc(value, addr):
BO, BI, BD, AA, LK = decodeB(value)
LI = extend_sign(LK, 14) * 4
instr = "b" + decodeCond(BO, BI)
if LK: instr += "l"
if AA:
instr += "a"
dst = LI
else:
dst = addr + LI
return instr, ihex(dst)
def bcctr(BO, BI, pad, LK):
if pad: return "<invalid>"
instr = "b" + decodeCond(BO, BI) + "ctr"
if LK:
instr += "l"
return instr
def bclr(BO, BI, pad, LK):
if pad: return "<invalid>"
instr = "b" + decodeCond(BO, BI) + "lr"
if LK:
instr += "l"
return instr
def cmp(cr, A, B, pad):
if pad: return "<invalid>"
if cr & 3:
return "<invalid>"
return "cmp", "cr%i, r%i, r%i" %(cr >> 2, A, B)
def cmpi(value, addr):
cr, A, SIMM = decodeD(value)
SIMM = extend_sign(SIMM)
if cr & 3:
return "<invalid>"
return "cmpwi", "cr%i, r%i, %s" %(cr >> 2, A, ihex(SIMM))
def cmpl(cr, A, B, pad):
if pad: return "<invalid>"
if cr & 3:
return "<invalid>"
return "cmplw", "cr%i, r%i, r%i" %(cr >> 2, A, B)
def cmpli(value, addr):
cr, A, UIMM = decodeD(value)
if cr & 3:
return "<invalid>"
return "cmplwi", "cr%i, r%i, %s" %(cr >> 2, A, ihex(UIMM))
def cntlzw(S, A, pad, Rc):
if pad: return "<invalid>"
return "cntlzw%s" %("." * Rc), "r%i, r%i" %(A, S)
def dcbst(pad1, A, B, pad2):
if pad1 or pad2: return "<invalid>"
return "dcbst", "r%i, r%i" %(A, B)
def fmr(D, pad, B, Rc):
if pad: return "<invalid>"
return "fmr%s" %("." * Rc), "f%i, f%i" %(D, B)
def fneg(D, pad, B, Rc):
if pad: return "<invalid>"
return "fneg%s" %("." * Rc), "f%i, f%i" %(D, B)
def mfspr(D, sprLo, sprHi, pad):
if pad: return "<invalid>"
sprnum = (sprHi << 5) | sprLo
if sprnum not in spr_table:
spr = "?"
else:
spr = spr_table[sprnum]
return "mf%s" %spr, "r%i" %D
def mtspr(S, sprLo, sprHi, pad):
if pad: return "<invalid>"
sprnum = (sprHi << 5) | sprLo
if sprnum not in spr_table:
spr = ihex(sprnum)
else:
spr = spr_table[sprnum]
return "mt%s" %spr, "r%i" %S
def lbz(value, addr): return "lbz", loadStore(value)
def lfd(value, addr): return "lfd", loadStore(value, "f")
def lfs(value, addr): return "lfs", loadStore(value, "f")
def lmw(value, addr): return "lmw", loadStore(value)
def lwz(value, addr): return "lwz", loadStore(value)
def lwzu(value, addr): return "lwzu", loadStore(value)
def lwarx(D, A, B, pad): return "lwarx", loadStoreX(D, A, B, pad)
def lwzx(D, A, B, pad): return "lwzx", loadStoreX(D, A, B, pad)
def or_(S, A, B, Rc):
if S == B:
return "mr%s" %("." * Rc), "r%i, r%i" %(A, S)
return "or%s" %("." * Rc), "r%i, r%i, r%i" %(A, S, B)
def ori(value, addr):
S, A, UIMM = decodeD(value)
if UIMM == 0:
return "nop"
return "ori", "r%s, r%s, %s" %(A, S, ihex(UIMM))
def oris(value, addr):
S, A, UIMM = decodeD(value)
return "oris", "r%s, r%s, %s" %(A, S, ihex(UIMM))
def rlwinm(value, addr):
S, A, SH, M, Rc = decodeX(value)
MB = M >> 5
ME = M & 0x1F
dot = "." * Rc
if SH == 0 and MB == 0 and ME == 31:
return "nop"
if MB == 0 and ME == 31 - SH:
return "slwi%s" %dot, "r%i, r%i, %i" %(A, S, SH)
if ME == 31 and SH == 32 - MB:
return "srwi%s" %dot, "r%i, r%i, %i" %(A, S, MB)
if MB == 0 and ME < 31:
return "extlwi%s" %dot, "r%i, r%i, %i,%i" %(A, S, ME + 1, SH)
#extrwi
if MB == 0 and ME == 31:
if SH >= 16:
return "rotlwi%s" %dot, "r%i, r%i, %i" %(A, S, SH)
return "rotrwi%s" %dot, "r%i, r%i, %i" %(A, S, 32 - SH)
if SH == 0 and ME == 31:
return "clrlwi%s" %dot, "r%i, r%i, %i" %(A, S, MB)
if SH == 0 and MB == 0:
return "clrrwi%s" %dot, "r%i, r%i, %i" %(A, S, 31 - ME)
#clrlslwi
return "rlwinm%s" %dot, "r%i, r%i, r%i,r%i,r%i" %(A, S, SH, MB, ME)
def sc(value, addr):
if value & 0x3FFFFFF != 2:
return "<invalid>"
return "sc"
def stb(value, addr): return "stb", loadStore(value)
def stfd(value, addr): return "stfd", loadStore(value, "f")
def stfs(value, addr): return "stfs", loadStore(value, "f")
def stfsu(value, addr): return "stfsu", loadStore(value, "f")
def stmw(value, addr): return "stmw", loadStore(value)
def stw(value, addr): return "stw", loadStore(value)
def stwu(value, addr): return "stwu", loadStore(value)
def stbx(S, A, B, pad): return "stbx", loadStoreX(S, A, B, pad)
def stwx(S, A, B, pad): return "stwx", loadStoreX(S, A, B, pad)
def stwcx(S, A, B, pad): return "stwcx", loadStoreX(S, A, B, pad ^ 1)
def tw(TO, A, B, pad):
if pad: return "<invalid>"
if TO == 31 and A == 0 and B == 0:
return "trap"
if TO not in trap_condition_table:
condition = "?"
else:
condition = trap_condition_table[TO]
return "tw%s" %condition, "r%i, r%i" %(A, B)
opcode_table_ext1 = {
16: bclr,
528: bcctr
}
opcode_table_ext2 = {
0: cmp,
4: tw,
20: lwarx,
23: lwzx,
26: cntlzw,
28: and_,
32: cmpl,
54: dcbst,
150: stwcx,
151: stwx,
215: stbx,
266: add,
339: mfspr,
444: or_,
467: mtspr
}
opcode_table_float_ext1 = {
40: fneg,
72: fmr
}
def ext1(value, addr):
DS, A, B, XO, Rc = decodeX(value)
if not XO in opcode_table_ext1:
return "ext1 - %s" %bin(XO)
return opcode_table_ext1[XO](DS, A, B, Rc)
def ext2(value, addr):
DS, A, B, XO, Rc = decodeX(value)
if not XO in opcode_table_ext2:
return "ext2 - %s" %bin(XO)
return opcode_table_ext2[XO](DS, A, B, Rc)
def float_ext1(value, addr):
D, A, B, XO, Rc = decodeX(value)
if not XO in opcode_table_float_ext1:
return "float_ext1 - %s" %bin(XO)
return opcode_table_float_ext1[XO](D, A, B, Rc)
opcode_table = {
10: cmpli,
11: cmpi,
12: addic,
13: addic_,
14: addi,
15: addis,
16: bc,
17: sc,
18: b,
19: ext1,
21: rlwinm,
24: ori,
25: oris,
31: ext2,
32: lwz,
33: lwzu,
34: lbz,
36: stw,
37: stwu,
38: stb,
46: lmw,
47: stmw,
48: lfs,
50: lfd,
52: stfs,
53: stfsu,
54: stfd,
| |
<reponame>lamsoa729/FoXlink<filename>foxlink/me_zrl_odes.py
#!/usr/bin/env python
"""@package docstring
File: me_zrl_odes.py
Author: <NAME>
Email: <EMAIL>
Description: Class that contains the all ODEs relevant to solving the moment
expansion formalism of the Fokker-Planck equation for bound crosslinking motors.
"""
from scipy.integrate import dblquad
from numba import njit
from .me_zrl_helpers import (boltz_fact_zrl, weighted_boltz_fact_zrl,
fast_zrl_src_kl)
from .me_helpers import dr_dt, du_dt
@njit
def dui_dt_zrl(r_ij, u_i, u_j, mu10, mu11, a_ij, b, ks, grot_i):
"""!Calculate the time-derivative of rod1's orientation vector with respect
to the current state of the crosslinked rod system when crosslinkers have
zero rest length.
@param r_ij: Vector from rod1's center of mass to rod2's center of mass
@param u_i: Orientation unit vector of rod1
@param u_j: Orientation unit vector of rod2
@param mu00: Zeroth motor moment
@param mu10: First motor moment of s1
@param mu11: Second motor momement of s1,s2
@param a_ij: dot product of r_ij and u_i
@param b: dot product of u_i and u_j
@param ks: motor spring constant
@param grot_j: Rotational drag coefficient of rod1
@return: Time-derivative of rod1's orientation vector
"""
return (ks / grot_i) * ((r_ij - (a_ij * u_i))
* mu10 + (u_j - (b * u_i)) * mu11)
def rod_geom_derivs_zrl(f_ij, r_ij, u_i, u_j,
scalar_geom, mu_kl_arr, fric_coeff, ks):
"""!TODO: Docstring for rod_derivs.
@param r_ij: TODO
@param u_i: TODO
@param u_j: TODO
@return: TODO
"""
(mu00, mu10, mu01, mu11, mu20, mu02) = mu_kl_arr
(gpara_i, gperp_i, grot_i, gpara_j, gperp_j, grot_j) = fric_coeff
(rsqr, a_ij, a_ji, b) = scalar_geom
# Evolution of position vectors
dr_i = dr_dt(-1. * f_ij, u_i, gpara_i, gperp_i)
dr_j = dr_dt(f_ij, u_j, gpara_j, gperp_j)
# Evolution of orientation vectors
du_i = dui_dt_zrl(r_ij, u_i, u_j, mu10, mu11, a_ij, b, ks, grot_i)
du_j = dui_dt_zrl(-1. * r_ij, u_j, u_i, mu01, mu11, a_ji, b, ks, grot_j)
return (dr_i, dr_j, du_i, du_j)
def calc_moment_derivs_zrl(mu_kl, scalar_geom, q_arr, params):
(rsqr, a_ij, a_ji, b) = scalar_geom
vo = params['vo']
ks = params['ks']
fs = params['fs']
ko = params['ko']
hL_i = .5 * params['L_i']
hL_j = .5 * params['L_j']
kappa = vo * ks / fs
# Evolution of zeroth moment
dmu00 = dmu00_dt_zrl(mu_kl[0], a_ij, a_ji, b, hL_i, hL_j, ko, vo, kappa,
q_arr[0])
# Evoultion of first moments
dmu10 = dmu10_dt_zrl(mu_kl[0], mu_kl[1], mu_kl[2], a_ij, a_ji, b, hL_i, hL_j,
ko, vo, kappa, q_arr[1])
dmu01 = dmu10_dt_zrl(mu_kl[0], mu_kl[2], mu_kl[1], a_ji, a_ij, b, hL_j, hL_i,
ko, vo, kappa, q_arr[2])
# Evolution of second moments
dmu11 = dmu11_dt_zrl(mu_kl[1], mu_kl[2], mu_kl[3], mu_kl[4], mu_kl[5],
a_ij, a_ji, b, hL_j, hL_i, ko, vo, kappa, q_arr[3])
dmu20 = dmu20_dt_zrl(mu_kl[1], mu_kl[3], mu_kl[4], a_ij, a_ji, b, hL_i, hL_j,
ko, vo, kappa, q_arr[4])
dmu02 = dmu20_dt_zrl(mu_kl[2], mu_kl[3], mu_kl[5], a_ji, a_ij, b, hL_j, hL_i,
ko, vo, kappa, q_arr[5])
return (dmu00, dmu10, dmu01, dmu11, dmu20, dmu02)
def calc_moment_derivs_zrl_B_terms(mu_kl, scalar_geom, q_arr, B_terms, params):
(rsqr, a_ij, a_ji, b) = scalar_geom
vo = params['vo']
ks = params['ks']
fs = params['fs']
ko = params['ko']
hL_i = .5 * params['L_i']
hL_j = .5 * params['L_j']
kappa = vo * ks / fs
mu00, mu10, mu01, mu11, mu20, mu02 = mu_kl
B0_j, B0_i, B1_j, B1_i, B2_j, B2_i, B3_j, B3_i = B_terms
# Evolution of zeroth moment
dmu00 = dmu00_dt_zrl(mu00, a_ij, a_ji, b, hL_i, hL_j, ko, vo, kappa,
q_arr[0], B0_j, B0_i, B1_j, B1_i)
# Evoultion of first moments
dmu10 = dmu10_dt_zrl(mu00, mu10, mu01, a_ij, a_ji, b, hL_i, hL_j,
ko, vo, kappa, q_arr[1], B0_j, B1_j, B1_i, B2_i)
dmu01 = dmu10_dt_zrl(mu00, mu01, mu10, a_ji, a_ij, b, hL_j, hL_i,
ko, vo, kappa, q_arr[2], B0_i, B1_i, B1_j, B2_j)
# Evolution of second moments
dmu11 = dmu11_dt_zrl(mu10, mu01, mu11, mu20, mu02, a_ij, a_ji, b,
hL_j, hL_i, ko, vo, kappa, q_arr[3],
B1_j, B1_i, B2_j, B2_i)
dmu20 = dmu20_dt_zrl(mu10, mu11, mu20, a_ij, a_ji, b, hL_i, hL_j,
ko, vo, kappa, q_arr[4], B0_j, B1_j, B2_i, B3_i)
dmu02 = dmu20_dt_zrl(mu01, mu11, mu02, a_ji, a_ij, b, hL_j, hL_i,
ko, vo, kappa, q_arr[5], B0_i, B1_i, B2_j, B3_j)
return (dmu00, dmu10, dmu01, dmu11, dmu20, dmu02)
def calc_boundary_derivs_zrl(B_terms, scalar_geom, Q_arr, params):
(rsqr, a_ij, a_ji, b) = scalar_geom
B0_j, B0_i, B1_j, B1_i, B2_j, B2_i, B3_j, B3_i = B_terms
Q0_j, Q0_i, Q1_j, Q1_i, Q2_j, Q2_i, Q3_j, Q3_i = Q_arr
vo = params['vo']
ks = params['ks']
fs = params['fs']
ko = params['ko']
hL_i = .5 * params['L_i']
hL_j = .5 * params['L_j']
kappa = vo * ks / fs
dB0_j = dBl_j_dt_zrl(0., 0., B0_j, a_ij, a_ji, b, hL_i, vo, ko, kappa,
Q0_j)
dB0_i = dBl_j_dt_zrl(0., 0., B0_i, a_ji, a_ij, b, hL_j, vo, ko, kappa,
Q0_i)
dB1_j = dBl_j_dt_zrl(1., B0_j, B1_j, a_ij, a_ji, b, hL_i, vo, ko, kappa,
Q1_j)
dB1_i = dBl_j_dt_zrl(1., B0_i, B1_i, a_ji, a_ij, b, hL_j, vo, ko, kappa,
Q1_i)
dB2_j = dBl_j_dt_zrl(2., B1_j, B2_j, a_ij, a_ji, b, hL_i, vo, ko, kappa,
Q2_j)
dB2_i = dBl_j_dt_zrl(2., B1_i, B2_i, a_ji, a_ij, b, hL_j, vo, ko, kappa,
Q2_i)
dB3_j = dBl_j_dt_zrl(3., B2_j, B3_j, a_ij, a_ji, b, hL_i, vo, ko, kappa,
Q3_j)
dB3_i = dBl_j_dt_zrl(3., B2_i, B3_i, a_ji, a_ij, b, hL_j, vo, ko, kappa,
Q3_i)
return [dB0_j, dB0_i, dB1_j, dB1_i, dB2_j, dB2_i, dB3_j, dB3_i]
################################
# Moment evolution functions #
################################
@njit
def dmu00_dt_zrl(mu00, a_ij, a_ji, b, hL_i, hL_j, ko, vo, kappa,
q00=0, B0_j=0., B0_i=0., B1_j=0., B1_i=0.):
"""!Calculate the time-derivative of the zeroth moment of the zero rest
length crosslinkers bound to rods.
@param mu00: Zeroth motor moment
@param ko: Turnover rate of motors
@param q00: Binding source term (i.e. partition function)
@return: Time derivative of the zeroth moment of motors
"""
return ko * (q00 - mu00) + ((-vo + kappa * (hL_i - a_ji)) * B0_j
- (kappa * b * B1_j)
+ (-vo + kappa * (hL_j - a_ij)) * B0_i
- (kappa * b * B1_i))
@njit
def dmu10_dt_zrl(mu00, mu10, mu01,
a_ij, a_ji, b, hL_i, hL_j, ko, vo, kappa,
q10=0, B0_j=0, B1_j=0, B1_i=0, B2_i=0):
"""!Calculate the time-derivative of the first moment(s1) of the zero rest
length crosslinkers bound to rods.
@param mu00: Zeroth motor moment
@param mu10: First motor moment of s1
@param mu01: First motor moment of s2
@param a_ij: Dot product of u_i and r_ij
@param b: Dot product of u_i and u_j
@param ko: Turnover rate of motors
@param vo: Velocity of motor when no force is applied
@param q10: Binding source term of first moment
@return: Time derivative of the first(s1) moment of motors
"""
return ((ko * q10) + ((vo + kappa * a_ij) * mu00) - ((ko + kappa) * mu10)
+ (kappa * b * mu01)
+ hL_i * (kappa * (hL_i - a_ij) - vo) * B0_j
- kappa * b * hL_i * B1_j
+ (kappa * (hL_j - a_ji) - vo) * B1_i - kappa * b * B2_i)
@njit
def dmu11_dt_zrl(mu10, mu01, mu11, mu20, mu02,
a_ij, a_ji, b, hL_i, hL_j, ko, vo, kappa,
q11=0, B1_j=0, B1_i=0, B2_j=0, B2_i=0):
"""!Calculate the time-derivative of the second moment(s1,s2) of zero rest
length crosslinkers bound to rods.
@param mu10: First motor moment of s1
@param mu01: First motor moment of s2
@param mu11: Second motor moment of s1 and s2
@param mu20: Second motor moment of s1
@param mu02: Second motor moment of s2
@param a_ij: Dot product of u_i and r_ij
@param a_ji: Dot product of u_j and r_ij
@param b: Dot product of u_i and u_j
@param vo: Velocity of motor when no force is applied
@param fs: Stall force of motor ends
@param ko: Turnover rate of motors
@param c: Effective concentration of motors in solution
@param ks: Motor spring constant
@param beta: 1/(Boltzmann's constant * Temperature)
@param L_i: Length of rod1
@param L_j: Length of rod2
@return: Time derivative of the second(s1,s2) moment of motors
"""
return ((ko * q11) + (vo + kappa * a_ji) * mu10 + (vo + kappa * a_ij) * mu01
- (ko + 2. * kappa) * mu11 + kappa * b * (mu20 + mu02)
+ hL_i * ((kappa * (hL_i - a_ij) - vo) * B1_j - b * kappa * B2_j)
+ hL_j * ((kappa * (hL_j - a_ji) - vo) * B1_i - b * kappa * B2_i))
@njit
def dmu20_dt_zrl(mu10, mu11, mu20, a_ij, a_ji, b, hL_i, hL_j, ko, vo, kappa,
| |
<reponame>ArnavM1499/Hanabi-HumanAI
from common_game_functions import *
from Agents.common_player_functions import *
from Agents.player import *
import time
import copy
import random
def count_card_list(knowledge, ls):
for card in ls:
remove_card(card, knowledge)
def count_board(knowledge, board):
for i in range(len(board)):
for j in range(1, board[i][1]):
remove_card((i, j), knowledge)
def remove_card(card, knowledge):
for slot in knowledge:
slot[card[0]][card[1] - 1] = max(0, slot[card[0]][card[1] - 1] - 1)
def weight_knowledge(knowledge, weights):
new_knowledge = copy.deepcopy(weights)
for slot in range(len(new_knowledge)):
for col in range(5):
for num in range(5):
new_knowledge[slot][col][num] *= knowledge[slot][col][num]
return new_knowledge
def update_weights(weights, weight, board, target):
if target != -1:
for col in range(5):
for nr in range(5):
if card_playable((col, nr + 1), board):
weights[target][col][nr] *= weight
return weights
def slot_pct(knowledge, list):
total_combos = 0.0
satisf_combos = 0.0
# print(len(knowledge))
for col in range(len(knowledge)):
# there are 5 possible numbers
for num in range(5):
total_combos += knowledge[col][num]
if (col, num + 1) in list:
satisf_combos += knowledge[col][num]
if total_combos < 1:
return 0
print("slot_pct error")
return satisf_combos / total_combos
# all info (knowledge, weights, partner info, etc.) is updated every time we are informed of an action
# weights/partner weights are maintained; everything else is just replaced
# this keeps get_action light/fast, but informing takes more time
class ValuePlayer(Player):
def __init__(self, name, pnr, **kwargs):
super().__init__(name, pnr)
self.partner_nr = 1 - self.pnr # hard code for two players
self.turn = 0
# self knowledge
self.knowledge = []
self.weights = [
[[1 for _ in range(5)] for _ in range(5)] for _ in range(5)
]
self.weighted_knowledge = None
self.last_hint = None
# partner knowledge
self.partner_hand = None
self.partner_knowledge = []
self.partner_weights = [
[[1 for _ in range(5)] for _ in range(5)] for _ in range(5)
]
self.partner_weighted_knowledge = None
# state/model information state; maintained by simply copying whenever we receive new ones via inform
self.state = None
self.model = None
self.protect = []
self.discard = []
self.play = []
self.partner_knowledge_index = 0.0
# whether we return a dictionary of all actions/values, or just the best action
self.get_action_values = False
# parameters and default values below
self.hint_weight = 1000.0
self.hint_direction = "right"
# left, right, best
self.play_preference = "best"
self.discard_preference = "best"
# card counting
self.card_count = True
self.card_count_partner = True
# we want to assign a nonzero value to an arbitrary (average) discard:
self.discard_base_value = 0.2
# how much we care about protecting our high cards for future play
self.protect_importance = 0.4
# multiplier for low confidence (i.e. below threshold) plays/discards
# perhaps it would be more appropriate to replace with a continuous map e.g. x^2
self.play_low_multiplier = 0.1
self.discard_low_multiplier = 0.5
self.play_threshold = 0.95
self.discard_threshold = 0.55
# how much we like playing, discarding, hinting in general
self.play_bias = 1.0
self.disc_bias = 0.6
self.hint_bias = 2.5
# if dynamic bias is true, then hint and play biases are further multiplied
# by the following values as the game goes on
self.dynamic_bias = True
# [0 hints left, 1 hint, 2 hint, etc.]
self.hint_biases = [0, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0]
# [0 lives left, 1 life, etc.]
self.play_biases = [0, 0.7, 0.9, 1.0]
# TODO: make a function for discard risk
# default when dynamic bias is off
self.hint_risk_weight = 1.0
self.play_risk_weight = 1.0
for k, v in kwargs.items():
setattr(self, k, v)
def _update_protect_discard(self):
self.protect = []
self.discard = []
self.play = []
trash = self.state.get_trash()
# print(trash)
# if trash:
# print(trash[0])
# print(trash.count(trash[0]))
for col in range(5):
nr = self.state.get_board()[col][1]
if nr < 5:
self.play.append((col, nr + 1))
for i in range(1, nr + 1):
self.discard.append((col, i))
trash_mode = False
for j in range(nr + 1, 6):
if trash_mode:
self.discard.append((col, j))
elif trash.count((col, j)) == COUNTS[j - 1] - 1:
self.protect.append((col, j))
elif trash.count((col, j)) == COUNTS[j - 1]:
trash_mode = True
# print(self.play)
# print(self.protect)
# print(self.discard)
def _update_bias(self):
if self.dynamic_bias:
self.hint_risk_weight = self.hint_biases[self.state.get_num_hints()]
self.play_risk_weight = self.play_biases[self.state.get_hits()]
def _update_info(self, state, model):
self.state = state
self.model = model
self.knowledge = copy.deepcopy(model.get_knowledge())
self.partner_hand = state.get_hands()[self.partner_nr]
self.partner_knowledge = state.get_all_knowledge()[self.partner_nr]
self._update_protect_discard()
self._update_bias()
def _count_cards(self):
if self.card_count:
count_card_list(self.knowledge, self.state.get_trash())
# count_card_list(self.knowledge, self.state.get_hands()[self.partner_nr])
count_board(self.knowledge, self.state.get_board())
def _count_partner_cards(self, partner_knowledge):
if self.card_count_partner:
count_card_list(partner_knowledge, self.state.get_trash())
count_board(partner_knowledge, self.state.get_board())
def _eval_play(self, action):
assert(action.type == PLAY)
pct = slot_pct(self.weighted_knowledge[action.cnr], self.play)
# pct = slot_playable_pct(
# self.weighted_knowledge[action.cnr], self.state.get_board()
# )
if self.play_preference == "left":
pct *= ([1.2, 1.1, 1.0, 0.9, 0.8][action.cnr])
elif self.play_preference == "right":
pct *= ([0.8, 0.9, 1.0, 1.1, 1.2][action.cnr])
if pct > self.play_threshold:
return pct
else:
return pct * self.play_low_multiplier
def _eval_discard(self, action):
assert(action.type == DISCARD)
value = self.discard_base_value + slot_pct(self.weighted_knowledge[action.cnr], self.discard)
value -= self.protect_importance * slot_pct(self.weighted_knowledge[action.cnr], self.protect)
# no negatives
value = max(value, 0)
# pct = slot_discardable_pct(
# self.weighted_knowledge[action.cnr], self.state.get_board(), self.state.get_trash()
# )
if self.discard_preference == "left":
value *= ([1.2, 1.1, 1.0, 0.9, 0.8][action.cnr])
elif self.discard_preference == "right":
value *= ([0.8, 0.9, 1.0, 1.1, 1.2][action.cnr])
if value > self.discard_threshold:
return value
else:
return value * self.discard_low_multiplier
def _eval_partner_knowledge(self, knowledge):
diff_score = 0
for i in range(len(knowledge)):
if self.partner_hand[i] in self.play:
variance = (1 - slot_pct(knowledge[i], self.play))
diff_score += variance * variance
elif self.partner_hand[i] in self.discard:
variance = slot_pct(knowledge[i], self.discard)
diff_score += 0.2 * variance * variance
return diff_score
def _eval_hint(self, action):
# if self.last_hint is not None and action == self.last_hint:
# return 0
# assert(action.type in [HINT_COLOR, HINT_NUMBER])
target = get_multi_target(action, self.partner_hand, self.partner_weighted_knowledge,
self.state.get_board(), self.play_threshold, self.hint_direction)
copy_weights = copy.deepcopy(self.partner_weights)
new_partner_weights = update_weights(copy_weights, self.hint_weight, self.state.get_board(), target)
copy_knowledge = copy.deepcopy(self.partner_knowledge)
# update knowledge ourselves as part of simulation
if action.type == HINT_COLOR:
for i in range(len(copy_knowledge)):
if self.partner_hand[i][0] == action.col:
for j in range(0, 5):
if j != action.col:
copy_knowledge[i][j] = [0, 0, 0, 0, 0]
else:
copy_knowledge[i][action.col] = [0, 0, 0, 0, 0]
elif action.type == HINT_NUMBER:
for i in range(len(copy_knowledge)):
if self.partner_hand[i][1] == action.num:
for j in range(0, 5): # number of colors
for k in range(0, 5):
if k != action.num - 1:
copy_knowledge[i][j][k] = 0
else:
for j in range(0, 5):
copy_knowledge[i][j][action.num - 1] = 0
new_weighted_knowledge = weight_knowledge(copy_knowledge, new_partner_weights)
# print(action)
return self.partner_knowledge_index - self._eval_partner_knowledge(new_weighted_knowledge)
#if target == -1:
# return 0.25
#if target_possible(action, target, self.partner_weighted_knowledge, self.state.get_board()):
# if card_playable(self.partner_hand[target], self.state.get_board()):
# # TODO: differentiate between valid hints
# return 0.8
# else:
# return 0.1
#return 0.25
def eval_action(self, action):
if action.type == PLAY:
return self.play_bias * self.play_risk_weight * self._eval_play(action)
elif action.type == DISCARD:
return self.disc_bias * self._eval_discard(action)
return self.hint_bias * self.hint_risk_weight * self._eval_hint(action)
def get_action(self, game_state, player_model):
self.turn += 1
# because of valid_action's implementation we need to update this here as well to get the correct legal moves
self._update_info(game_state, player_model)
# print(self.name)
# for hint in self.model.get_hints():
# print(hint[1])
# count cards
self._count_cards()
# compute weighted knowledge, weighted partner knowledge
self.weighted_knowledge = weight_knowledge(self.knowledge, self.weights)
self.partner_weighted_knowledge = weight_knowledge(self.partner_knowledge, self.partner_weights)
self.partner_knowledge_index = self._eval_partner_knowledge(self.partner_weighted_knowledge)
value_dict = {}
# evaluate all moves and return maximum
best_action = None
# all values are in [0, 1], so this is lower than all possible values
max_value = -1.0
# print(self.name)
# print(self.hint_risk_weight)
for action in self.state.get_valid_actions():
# print(action)
value = self.eval_action(action)
# print(value)
if value > max_value:
best_action = action
max_value = value
if self.get_action_values:
value_dict[action] = value
if self.get_action_values:
return value_dict
return best_action
def inform(self, action, player, new_state, new_model):
self._update_info(new_state, new_model)
self._count_cards()
if player == self.pnr:
if action.type in [PLAY, DISCARD]:
# reset weights for specific slot
del self.weights[action.cnr]
if len(self.knowledge) != len(self.weights):
self.weights.append([
[1 for _ in range(5)] for _ in range(5)
])
else:
self.last_hint = action
# on hint, update partner weights accordingly
target = -1
hint_indices = copy.deepcopy(new_state.get_hinted_indices())
if self.hint_direction == "left":
hint_indices.reverse()
while hint_indices:
potential_target = hint_indices[-1]
if slot_playable_pct(self.partner_weighted_knowledge[potential_target], new_state.get_board()) \
<= self.play_threshold:
target = potential_target
break
del hint_indices[-1]
update_weights(
self.partner_weights, self.hint_weight, new_state.get_board(), target)
self.weighted_knowledge = weight_knowledge(self.knowledge, self.weights)
self.partner_weighted_knowledge = weight_knowledge(self.partner_knowledge, self.partner_weights)
return
# for 2 player games there's only 1 other player
assert player == self.partner_nr
if action.type in [HINT_COLOR, HINT_NUMBER]:
target = -1
self.weighted_knowledge = weight_knowledge(self.knowledge, self.weights)
hint_indices = copy.deepcopy(new_state.get_hinted_indices())
if self.hint_direction == "left":
hint_indices.reverse()
while hint_indices:
potential_target | |
vectors in a document use the suffix (ends with) "_vector_" for the field name. e.g. "product_description_vector_".
- When inserting or specifying chunks in a document the suffix (ends with) "_chunk_" for the field name. e.g. "products_chunk_".
- When inserting or specifying chunk vectors in a document's chunks use the suffix (ends with) "_chunkvector_" for the field name. e.g. "products_chunk_.product_description_chunkvector_".
- Try to keep each batch of documents to insert under 200mb to avoid the insert timing out.
Parameters
----------
dataset_id : string
Unique name of dataset
documents : list
A list of documents. Document is a JSON-like data that we store our metadata and vectors with. For specifying id of the document use the field '_id', for specifying vector field use the suffix of '_vector_'
insert_date : bool
Whether to include insert date as a field 'insert_date_'.
overwrite : bool
Whether to overwrite document if it exists.
update_schema : bool
Whether the api should check the documents for vector datatype to update the schema.
include_inserted_ids: bool
Include the inserted IDs in the response
field_transformers: list
An example field_transformers object:
>>> {
>>> "field": "string",
>>> "output_field": "string",
>>> "remove_html": true,
>>> "split_sentences": true
>>> }
"""
field_transformers = [] if field_transformers is None else field_transformers
base_url = self.base_url
if return_documents is False:
return self.make_http_request(
endpoint=f"/datasets/{dataset_id}/documents/bulk_insert",
base_url=base_url,
method="POST",
parameters={
"documents": documents,
"insert_date": insert_date,
"overwrite": overwrite,
"update_schema": update_schema,
"field_transformers": field_transformers,
},
)
else:
response_json = self.make_http_request(
endpoint=f"/datasets/{dataset_id}/documents/bulk_insert",
base_url=base_url,
method="POST",
parameters={
"documents": documents,
"insert_date": insert_date,
"overwrite": overwrite,
"update_schema": update_schema,
"field_transformers": field_transformers,
},
)
try:
status_code = response_json.status_code
except:
status_code = 200
return {
"response_json": response_json,
"documents": documents,
"status_code": status_code,
}
async def bulk_insert_async(
self,
dataset_id: str,
documents: list,
insert_date: bool = True,
overwrite: bool = True,
update_schema: bool = True,
field_transformers: Optional[list] = None,
):
"""
Asynchronous version of bulk_insert. See bulk_insert for details.
Parameters
----------
dataset_id: str
Unique name of dataset
documents: list
A list of documents. A document is a JSON-like data that we store our metadata and vectors with. For specifying id of the document use the field '_id', for specifying vector field use the suffix of '_vector_'
insert_date: bool
Whether to include insert date as a field 'insert_date_'.
overwrite: bool
Whether to overwrite document if it exists.
update_schema: bool
Whether the api should check the documents for vector datatype to update the schema.
field_transformers: list
"""
field_transformers = [] if field_transformers is None else field_transformers
return await self.make_async_http_request(
base_url=self.base_url,
endpoint=f"/datasets/{dataset_id}/documents/bulk_insert",
method="POST",
parameters={
"documents": documents,
"insert_date": insert_date,
"overwrite": overwrite,
"update_schema": update_schema,
"field_transformers": field_transformers,
},
)
def delete(self, dataset_id: str, confirm: bool = False):
"""
Delete a dataset
Parameters
----------
dataset_id : string
Unique name of dataset
"""
if confirm:
# confirm with the user
self.logger.critical(f"You are about to delete {dataset_id}")
user_input = input("Confirm? [Y/N] ")
else:
user_input = "y"
# input validation
if user_input.lower() in ("y", "yes"):
if "gateway-api-aueast" in self.base_url:
return self.make_http_request(
endpoint=f"/datasets/delete",
method="POST",
parameters={"dataset_id": dataset_id},
raise_error=False,
)
else:
return self.make_http_request(
endpoint=f"/datasets/{dataset_id}/delete",
method="POST",
raise_error=False
# parameters={"dataset_id": dataset_id},
)
elif user_input.lower() in ("n", "no"):
self.logger.critical(f"{dataset_id} not deleted")
return
else:
self.logger.critical(f"Error: Input {user_input} unrecognised.")
return
def clone(
self,
old_dataset: str,
new_dataset: str,
schema: Optional[dict] = None,
rename_fields: Optional[dict] = None,
remove_fields: Optional[list] = None,
filters: Optional[list] = None,
):
"""
Clone a dataset into a new dataset. You can use this to rename fields and change data schemas. This is considered a project job.
Parameters
----------
old_dataset : string
Unique name of old dataset to copy from
new_dataset : string
Unique name of new dataset to copy to
schema : dict
Schema for specifying the field that are vectors and its length
rename_fields : dict
Fields to rename {'old_field': 'new_field'}. Defaults to no renames
remove_fields : list
Fields to remove ['random_field', 'another_random_field']. Defaults to no removes
filters : list
Query for filtering the search results
"""
schema = {} if schema is None else schema
rename_fields = {} if rename_fields is None else rename_fields
remove_fields = [] if remove_fields is None else remove_fields
filters = [] if filters is None else filters
dataset_id = old_dataset
return self.make_http_request(
endpoint=f"/datasets/{dataset_id}/clone",
method="POST",
parameters={
"new_dataset_id": new_dataset,
"schema": schema,
"rename_fields": rename_fields,
"remove_fields": remove_fields,
"filters": filters,
},
)
def search(
self,
query,
sort_by_created_at_date: bool = False,
asc: bool = False,
):
"""
Search datasets by their names with a traditional keyword search.
Parameters
----------
query : string
Any string that belongs to part of a dataset.
sort_by_created_at_date : bool
Sort by created at date. By default shows the newest datasets. Set asc=False to get oldest dataset.
asc : bool
Whether to sort results by ascending or descending order
"""
return self.make_http_request(
endpoint="/datasets/search",
method="GET",
parameters={
"query": query,
"sort_by_created_at_date": sort_by_created_at_date,
"asc": asc,
},
)
def task_status(self, dataset_id: str, task_id: str):
"""
Check the status of an existing encoding task on the given dataset. \n
The required task_id was returned in the original encoding request such as datasets.vectorize.
Parameters
----------
dataset_id : string
Unique name of dataset
task_id : string
The task ID of the earlier queued vectorize task
"""
return self.make_http_request(
endpoint=f"/datasets/{dataset_id}/task_status",
method="GET",
parameters={"task_id": task_id},
)
def get_file_upload_urls(self, dataset_id: str, files: List):
"""
Specify a list of file paths. For each file path, a url upload_url is returned. files can be POSTed on upload_url to upload them. They can then be accessed on url. Upon dataset deletion, these files will be deleted.
Parameters
-------------
files: list
List of files to be uploaded
dataset_id: str
The dataset
"""
return self.make_http_request(
endpoint=f"/datasets/{dataset_id}/get_file_upload_urls",
method="POST",
parameters={"files": files},
)
def details(
self,
dataset_id: str,
include_schema: bool = True,
include_stats: bool = True,
include_metadata: bool = True,
include_schema_stats: bool = False,
include_vector_health: bool = False,
include_active_jobs: bool = False,
include_settings: bool = False,
):
"""
Get details about your dataset.
"""
return self.make_http_request(
endpoint=f"/datasets/{dataset_id}/details",
method="POST",
parameters={
"include_schema": include_schema,
"include_stats": include_stats,
"include_metadata": include_metadata,
"include_schema_stats": include_schema_stats,
"include_vector_health": include_vector_health,
"include_active_jobs": include_active_jobs,
"include_settings": include_settings,
},
)
def aggregate(
self,
dataset_id: str,
groupby: List = None,
metrics: List = None,
select_fields: List = None,
sort: List[str] = None,
asc: bool = False,
filters: List = None,
page_size: int = 20,
page: int = 1,
aggregation_query: dict = None,
):
"""
Aggregation/Groupby of a collection using an aggregation query. The aggregation query is a json body that follows the schema of:
.. code-block::
{
"groupby" : [
{"name": <alias>, "field": <field in the collection>, "agg": "category"},
{"name": <alias>, "field": <another groupby field in the collection>, "agg": "numeric"}
],
"metrics" : [
{"name": <alias>, "field": <numeric field in the collection>, "agg": "avg"}
{"name": <alias>, "field": <another numeric field in the collection>, "agg": "max"}
]
}
For example, one can use the following aggregations to group score based on region and player name.
{
"groupby" : [
{"name": "region", "field": "player_region", "agg": "category"},
{"name": "player_name", "field": "name", "agg": "category"}
],
"metrics" : [
{"name": "average_score", "field": "final_score", "agg": "avg"},
{"name": "max_score", "field": "final_score", "agg": "max"},
{'name':'total_score','field':"final_score", 'agg':'sum'},
{'name':'average_deaths','field':"final_deaths", 'agg':'avg'},
{'name':'highest_deaths','field':"final_deaths", 'agg':'max'},
]
}
"""
# "https://api-dev.ap-southeast-2.relevance.ai/latest/datasets/{DATASET_ID}/aggregate"
filters = [] if filters is None else filters
if aggregation_query is None:
if metrics is None:
metrics = []
aggregation_query = {"metrics": metrics}
if groupby:
aggregation_query["groupby"] = groupby
if sort:
aggregation_query["sort"] = sort
return self.make_http_request(
endpoint=f"/datasets/{dataset_id}/aggregate",
method="POST",
parameters={
"aggregation_query": aggregation_query,
"filters": filters,
"page_size": page_size,
"page": page,
"asc": asc,
"select_fields": select_fields,
},
)
def fast_search(
self,
dataset_id: str,
query: str = None,
queryConfig: dict = None,
vectorSearchQuery: dict = None,
instantAnswerQuery: dict = None,
fieldsToSearch: List = None,
page: int = 0,
pageSize: int = 10,
minimumRelevance: int = 0,
includeRelevance: bool = True,
includeDataset: bool = False,
cleanPayloadUsingSchema: bool = True,
sort: dict = None,
includeFields: Optional[List] = None,
excludeFields: Optional[List] = None,
includeVectors: bool = True,
textSort: Optional[dict] = None,
fieldsToAggregate: Optional[List] = None,
fieldsToAggregateStats: Optional[List] = None,
filters: Optional[List] = None,
relevanceBoosters: Optional[List] = None,
afterId: Optional[List] = None,
):
"""
Parameters
------------
query: | |
<gh_stars>0
from .utils import *
class Filter(object):####TODO add logging
def __init__(self, measure, cutting_rule):
"""
Basic univariate filter class with chosen(even custom) measure and cutting rule
:param measure:
Examples
--------
>>> f=Filter("PearsonCorr", GLOB_CR["K best"](6))
"""
inter_class = 0.0
intra_class = 0.0
for value in np.unique(y_data):
index_for_this_value = np.where(y_data == value)[0]
n = np.sum(row[index_for_this_value])
mu = np.mean(row[index_for_this_value])
var = np.var(row[index_for_this_value])
inter_class += n * np.power((mu - mu), 2)
intra_class += (n - 1) * var
f_ratio = inter_class / intra_class
return f_ratio
@classmethod
def __f_ratio_measure(cls, X, y, n):
X, y = _DefaultMeasures.__check_input(X, y)
assert not 1 < X.shape[1] < n, 'incorrect number of features'
f_ratios = []
for feature in X.T:
f_ratio = _DefaultMeasures.__calculate_F_ratio(feature, y.T)
f_ratios.append(f_ratio)
f_ratios = np.array(f_ratios)
return np.argpartition(f_ratios, -n)[-n:]
@staticmethod
def f_ratio_measure(n):
return partial(_DefaultMeasures.__f_ratio_measure, n=n)
@staticmethod
def gini_index(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
cum_x = np.cumsum(X / np.linalg.norm(X, 1, axis=0), axis=0)
cum_y = np.cumsum(y / np.linalg.norm(y, 1))
diff_x = (cum_x[1:] - cum_x[:-1])
diff_y = (cum_y[1:] + cum_y[:-1])
return np.abs(1 - np.sum(np.multiply(diff_x.T, diff_y).T, axis=0))
# Calculate the entropy of y.
@staticmethod
def __calc_entropy(y):
dict_label = dict()
for label in y:
if label not in dict_label:
dict_label.update({label: 1})
else:
dict_label[label] += 1
entropy = 0.0
for i in dict_label.values():
entropy += -i / len(y) * log(i / len(y), 2)
return entropy
@staticmethod
def __calc_conditional_entropy(x_j, y):
dict_i = dict()
for i in range(x_j.shape[0]):
if x_j[i] not in dict_i:
dict_i.update({x_j[i]: [i]})
else:
dict_i[x_j[i]].append(i)
# Conditional entropy of a feature.
con_entropy = 0.0
# get corresponding values in y.
for f in dict_i.values():
# Probability of each class in a feature.
p = len(f) / len(x_j)
# Dictionary of corresponding probability in labels.
dict_y = dict()
for i in f:
if y[i] not in dict_y:
dict_y.update({y[i]: 1})
else:
dict_y[y[i]] += 1
# calculate the probability of corresponding label.
sub_entropy = 0.0
for l in dict_y.values():
sub_entropy += -l / sum(dict_y.values()) * log(l / sum(dict_y.values()), 2)
con_entropy += sub_entropy * p
return con_entropy
# IGFilter = filters.IGFilter() # TODO: unexpected .run() interface; .run() feature_names; no default constructor
@staticmethod
def ig_measure(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
entropy = _DefaultMeasures.__calc_entropy(y)
f_ratios = np.empty(X.shape[1])
for index in range(X.shape[1]):
f_ratios[index] = entropy - _DefaultMeasures.__calc_conditional_entropy(X[:, index], y)
return f_ratios
@staticmethod
def __contingency_matrix(labels_true, labels_pred):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
# TODO redo it with numpy
contingency = sp.csr_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int)
contingency.sum_duplicates()
return contingency
@staticmethod
def __mi(U, V):
contingency = _DefaultMeasures.__contingency_matrix(U, V)
nzx, nzy, nz_val = sp.find(contingency)
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = (pi.take(nzx).astype(np.int64, copy=False)
* pj.take(nzy).astype(np.int64, copy=False))
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
@classmethod
def __mrmr_measure(cls, X, y, n):
assert not 1 < X.shape[1] < n, 'incorrect number of features'
x, y = _DefaultMeasures.__check_input(X, y)
# print([_DefaultMeasures.__mi(X[:, j].reshape(-1, 1), y) for j in range(X.shape[1])])
return [MI(x[:, j].reshape(-1, 1), y) for j in range(x.shape[1])]
@staticmethod
def mrmr_measure(n):
return partial(_DefaultMeasures.__mrmr_measure, n=n)
# RandomFilter = filters.RandomFilter() # TODO: bad .run() interface; .run() feature_names; no default constructor
@staticmethod
def su_measure(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
entropy = _DefaultMeasures.__calc_entropy(y)
f_ratios = np.empty(X.shape[1])
for index in range(X.shape[1]):
entropy_x = _DefaultMeasures.__calc_entropy(X[:, index])
con_entropy = _DefaultMeasures.__calc_conditional_entropy(X[:, index], y)
f_ratios[index] = 2 * (entropy - con_entropy) / (entropy_x + entropy)
return f_ratios
@staticmethod
def spearman_corr(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
np.sort(X, axis=1) # need to sort, because Spearman is a rank correlation
np.sort(y)
n = X.shape[0]
c = 6 / (n * (n - 1) * (n + 1))
dif = X - np.repeat(y, X.shape[1]).reshape(X.shape)
return 1 - c * np.sum(dif * dif, axis=0)
@staticmethod
def pearson_corr(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
x_dev = X - np.mean(X, axis=0)
y_dev = y - np.mean(y)
sum_dev = y_dev.T.dot(x_dev)
sq_dev_x = x_dev * x_dev
sq_dev_y = y_dev * y_dev
return (sum_dev / np.sqrt(np.sum(sq_dev_y) * np.sum(sq_dev_x))).reshape((-1,))
# TODO concordation coef
@staticmethod
def fechner_corr(X, y):
"""
Sample sign correlation (also known as Fechner correlation)
"""
X, y = _DefaultMeasures.__check_input(X, y)
y_mean = np.mean(y)
n = X.shape[0]
f_ratios = np.zeros(X.shape[1])
for j in range(X.shape[1]):
y_dev = y[j] - y_mean
x_j_mean = np.mean(X[:, j])
for i in range(n):
x_dev = X[i, j] - x_j_mean
if x_dev >= 0 & y_dev >= 0:
f_ratios[j] += 1
else:
f_ratios[j] -= 1
f_ratios[j] /= n
return f_ratios
@staticmethod
def __label_binarize(y):
"""
Binarize labels in a one-vs-all fashion
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
"""
classes = np.unique(y)
n_samples = len(y)
n_classes = len(classes)
row = np.arange(n_samples)
col = [np.where(classes == el)[0][0] for el in y]
data = np.repeat(1, n_samples)
# TODO redo it with numpy
return sp.csr_matrix((data, (row, col)), shape=(n_samples, n_classes)).toarray()
@staticmethod
def __chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
with np.errstate(invalid="ignore"):
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq
@staticmethod
def chi2_measure(X, y):
"""
This score can be used to select the n_features features with the highest values
for the test chi-squared statistic from X,
which must contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
"""
X, y = _DefaultMeasures.__check_input(X, y)
if np.any(X < 0):
raise ValueError("Input X must be non-negative.")
Y = _DefaultMeasures.__label_binarize(y)
# If you use sparse input
# you can use sklearn.utils.extmath.safe_sparse_dot instead
observed = np.dot(Y.T, X) # n_classes * n_features
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _DefaultMeasures.__chisquare(observed, expected)
@staticmethod
def __distance_matrix(X, y, n_samples):
dm = np.zeros((n_samples, n_samples), dtype=tuple)
for i in range(n_samples):
for j in range(i, n_samples):
# using the Manhattan (L1) norm rather than
# the Euclidean (L2) norm,
# although the rationale is not specified
value = np.linalg.norm(X[i, :] - X[j, :], 1)
dm[i, j] = (value, j, y[j])
dm[j, i] = (value, i, y[i])
# sort_indices = dm.argsort(1)
# dm.sort(1)
# indices = np.arange(n_samples) #[sort_indices]
# dm = np.dstack((dm, indices))
return dm
# TODO redo with np.where
@staticmethod
def __take_k(dm_i, k, r_index, choice_func):
hits = []
dm_i = sorted(dm_i, key=lambda x: x[0])
for samp in dm_i:
if (samp[1] != r_index) & (k > 0) & (choice_func(samp[2])):
hits.append(samp)
k -= 1
return np.array(hits, int)
@staticmethod
def reliefF_measure(X, y, k_neighbors=1):
"""
Based on the ReliefF algorithm as introduced in:
<NAME> al. Relief-based feature selection: Introduction and review
Journal of Biomedical Informatics 85 (2018) 189–203
Differs with skrebate.ReliefF
Only for complete X
Rather than repeating the algorithm m(TODO Ask Nikita about user defined) times,
implement it exhaustively (i.e. n times, once for each instance)
for relatively small n (up to one thousand).
:param X: array-like {n_samples, n_features}
Training instances to compute the feature importance scores from
:param y: array-like {n_samples}
Training labels
:param k_neighbors: int (default: 1)
The number of neighbors to consider when assigning feature importance scores.
More neighbors | |
design on sample whether fit_weight is same with measure_weight or not.For example,for reducing effect from large class sample,it`s a good way to improve weights of small class sample when trainning model but the weight between large class sample and small class sample returns back to original weight value when measuring with some index like KS or ROC_AUC.Why doing like this is that the lost function of regression is large class sensitive.So the user need adjust sample weights.Some index like KS or ROC_AUC,their calculate way is non-sensitive in unbalanced sample situation,so the user need not adjust sample weights unless the user thinks that the loss penalty between samples is different.
note:Although the user set measure='KS' or 'ROC_AUC' to measure performance and pick features,but the MultiProcessMStepRegression.LogisticReg is still large class sensitive,due to the base algorithm is standard logistic regression yet.
measure:str ks(默认) | accuracy | roc_auc | balanced_accuracy | average_precision
Performance evaluate function.The y_true,y_hat and measure_weight will be put into measure function automatically and the other parameters will be put into measure function with kw_measure_args
measure_weight:Series
The length of measure_weight is same with length of y.The measure_weight is for measuring function.If None(default),every sample has a same measuring weight.
See also fit_weight
kw_measure_args:dict | None(默认)
Except y_true,y_hat and measure_weight,the other parameters need be put in kw_measure_args to deliver into measure function.
None means that no other parameters delivers into measure function.
max_pvalue_limit:float
The max P-VALUE limit.
0.05(default)
max_vif_limit:float
The max VIF limit.
3(default)
max_corr_limit:float
The max coefficient of correlation limit.
0.6(default)
coef_sign:'+','-',dict,None(默认)
If the user have a priori knowledge on relation between X and y,like positive correlation or negtive correlation,user can make a constraint restriction on sign of resression coefficient by this parameter.
'+':all signs of resression coefficients are positive
'-':all signs of resression coefficients are negtive
dict:the format is as {'x_name1':'+','x_name2':'-'}.Put coefficient and coefficient`s sign on which you have a priori knowledge into a dict and then constraint these signs that are in this dict. The coefficients not included in this dict will not be constrainted.
None:all coefficients are not constrainted.
iter_num:int
The iteration num for picking features.Default is 20.When np.inf,no limit to iteration num,if features are many,then the running time is long.If all features are already picked in model or no imporve on perfermance by adding/removing any feature,the actual iteration num should be samller than iter_num.The steps inclueed in every iteration is:1.Try adding feature which is not added in current model yet and then pick up one feature that makes most promotion for performance of model with satisfying user`s setting. 2.Try removing feature and then remove out one feature that makes most promotion for performance of model with satisfying user`s setting.It is means finshing one time iteration that step 1 and step 2 is completed.If all step 1 and step 2 can`t pick up any feature then iteration is pre-terminated,no matter whether iter_num is reached.
kw_algorithm_class_args:dict
Except X,y,fit_weight,the other parameters that are delivered into logistic regression algorithm is in kw_algorithm_class_args
Note:y,X is called endog and exog in statsmodels.genmod.generalized_linear_model.GLM
n_core:int | float | None
Count of CPU processing.If int,user point the count.If float,the count is as percentage of all count transfered to int(ceil).If None(default),all count of CPU processing -1.
logger_file_CH:str
A log file name where log for step-wise procedure is recorded in Chinese.If None(default),not recording Chinese log.
logger_file_EN:str
A log file name where log for step-wise procedure is recorded in English.If None(default),not recording English log.
'''
def __init__(self,X,y,fit_weight=None,measure='ks',measure_weight=None,kw_measure_args=None,max_pvalue_limit=0.05,max_vif_limit=3,max_corr_limit=0.6,coef_sign=None,iter_num=20,kw_algorithm_class_args=None,n_core=None,logger_file_CH=None,logger_file_EN=None):
Regression.__init__(self,X,y,fit_weight,measure,measure_weight,kw_measure_args,max_pvalue_limit,max_vif_limit,max_corr_limit,coef_sign,iter_num,kw_algorithm_class_args,n_core,logger_file_CH,logger_file_EN)
def _regression(self,in_vars):
X = self.X[in_vars]
if self.fit_weight is None:
if self.kw_algorithm_class_args is not None:
glm = GLM(self.y,sm.add_constant(X),family = Binomial(link=logit),**self.kw_algorithm_class_args)
else:
glm = GLM(self.y,sm.add_constant(X),family = Binomial(link=logit))
else:
if self.kw_algorithm_class_args is not None:
glm = GLM(self.y,sm.add_constant(X),family = Binomial(link=logit),freq_weights = self.fit_weight,**self.kw_algorithm_class_args)
else:
glm = GLM(self.y,sm.add_constant(X),family = Binomial(link=logit),freq_weights = self.fit_weight)
clf = glm.fit()
clf.intercept_=[clf.params.const]
clf.coef_=[clf.params[1:]]
return clf
class LinearReg(Regression):
'''
中文版文档(Document in English is in the next.)
MultiProcessMStepRegression.LinearReg:多进程逐步线性回归,其底层的线性回归算法使用的是statsmodels.api.OLS或statsmodels.api.WLS,依据用户是否使用训练样本权重来绝定。
每一次向前添加过程中都会使用多进程来同时遍历多个解释变量,然后选取其中符合使用者设定的条件且能给线性回归带来最大性能提升的解释变量加入到模型中,如果所有变量都不能在满足使用者设置条件的前提下提升模型性能,则此次添加过程不加入任何变量。
每一次的向后删除过程中也使用与向前添加过程同样的原则来决定删除哪个变量。
在添加过程中模型性能有提升,但是部分条件不被满足,此时会额外触发一轮向后删除的过程,如果删除的变量与正要添加的变量为同一个,则此变量不被加入,添加流程结束。如果删除的变量与正要添加的变量不是同一个,则添加当前的变量,并将需要删除的变量从当前选中变量列表中排除。额外触发的向后删除过程与正常的向后删除过程的流程一致。
在建模结束后,会将没有入选的解释变量分别加入到现有模型变量中,通过重新建模,会给出一个准确的没有入选该变量的原因。
支持的功能点如下:
1.支持双向逐步回归(Step_Wise)
2.支持多进程,在每步增加变量或删除变量时,使用多进程来遍历每个候选变量。Windows系统也支持多进程。
3.支持使用者指定的指标来作为变量添加或删除的依据,而不是使用AIC或BIC,在处理不平衡数据时可以让使用者选择衡量不平衡数据的指标
4.支持使用者指定P-VALUE的阈值,如果超过该阈值,即使指标有提升,也不会被加入到变量中
5.支持使用者指定VIF的阈值,如果超过该阈值,即使指标有提升,也不会被加入到变量中
6.支持使用者指定相关系数的阈值,如果超过该阈值,即使指标有提升,也不会被加入到变量中
7.支持使用者指定回归系数的正负号,在某些业务中,有些特征有明显的业务含义,例如WOE转换后的数据,就会要求回归系数均为正或均为负,加入对系数正负号的限制,如果回归系数不满足符号要求,则当前变量不会被加入到变量中
8.上述4,5,6,7均在逐步回归中完成,挑选变量的同时校验各类阈值与符号
9.会给出每一个没有入模变量被剔除的原因,如加入后指标下降,P-VALUE超出指定阈值,正负号与使用者的预期不符等等。
10.支持中英文双语的日志,会将逐步回归中的每一轮迭代的情况记录到中文日志和英文日志中
注意:因为该类会将数据X和y作为该类一个实例的属性,所以实例会比较大,因此非必要时,尽量不要保存MultiProcessMStepRegression.LinearReg的实例。而是保存其返回的模型和删除原因等信息。
Parameters
----------
X:DataFrame
features
y:Series
target
fit_weight:Series
长度与样本量相同,为训练模型时的weight,如果取值为None(默认),则认为各个样本的训练权重相同,选用statsmodels.api.OLS做为底层的实现算法。如果不为空,则会选用statsmodels.api.WLS做为底层的实现算法。在线性回归中设置权重的目的是,在异方差的情况下,训练出稳定的模型。
measure:str r2(默认) | explained_variance_score | max_error
计算线性回归模型性能的函数,y_true,y_hat和measure_weight会被自动传递进指定measure函数中,其余参数会由kw_measure_args传入
measure_weight:Series
长度与样本量相同,为度量模型性能时的weight,如果取值为None(默认),则认为各个样本的度量权重相同。
kw_measure_args:dict | None(默认)
measure函数除y_true,y_hat,measure_weight外,其余需要传入的参数都写入该dict里。None意味着不需要传入额外的参数
max_pvalue_limit:float
允许的P-VALUE的最大值。0.05(默认)
max_vif_limit:float
允许的VIF的最大值。3(默认)
max_corr_limit:float
允许的相关系数的最大值。0.6(默认)
coef_sign:'+','-',dict,None(默认)
如果知道X对y的影响关系--正相关或负相关,则可以对变量的符号进行约束。
'+':所有X的系数都应为正数
'-':所有X的系数都应为负数
dict:格式如{'x_name1':'+','x_name2':'-'},将已知的X的系数符号配置在dict中,以对回归结果中X的系数的正负号进行约束。没有被包含在dict中的变量,不对其系数进行约束
None:所有X的系数的正负号都不被约束
iter_num:int
挑选变量的轮数,默认为20。np.inf表示不限制轮数,当变量很多时,需要较长的运行时间。如果所有的变量都已经被选入到模型,或者不能通过增加或删除变量来进一步提升模型性能,则实际迭代轮数可能小于iter_num。每一轮挑选变量包含如下步骤:1.尝试将每一个还未被加入到模型中的变量加入到当前模型中,选出一个满足使用者设置的条件且使模型性能提升最多的变量加入到模型中。2.在当前模型中的每一个变量尝试删除,选出一个满足使用者设置的条件且使模型性能提升最多的变量移出模型。完成1,2两步即为完成一轮迭代。如果步骤1和2均未能挑选出变量,则迭代提前终止,无论是否达到了iter_num。
kw_algorithm_class_args:dict
除X,y,fit_weight外,其它需要传入线性回归算法(OLS,WLS)的参数。
n_core:int | float | None
CPU的进程数。如果是int类型,则为使用CPU的进程数。如果是float类型,则为CPU全部进程数的百分比所对应的进程数(向上取整)。如果为None,则为使用全部CPU进程数-1
logger_file_CH:str
使用者指定的用于记录逐步回归过程的文件名,日志为中文日志。如果为None(默认)则不记录中文日志
logger_file_EN:str
使用者指定的用于记录逐步回归过程的文件名,日志为英文日志。如果为None(默认)则不记录英文日志
Document in English
MultiProcessMStepRegression.LinearReg:A Step-Wise Linear Regression handling with multi-processing.It bases on statsmodels.api.OLS or statsmodels.api.WLS supplying a linear regression algorithm.Which algorithm should be used depends on the setting of train sample weight.
In adding feature process,multi-processing is used to traversal several features concurrently.The feature which meets the conditions which the user set and get a max lift on measure index is added in the model.If any feature can`t improve the performance of model undering the conditions set by user ,no feature is added in current iteration.
The removing feature process has same policy with adding feature process to decide which feature should be removed.
When adding process, if there is improving on performance of model but some conditions user set are missed,a additional removing process will start to run.If the feature to remove is same with the feature to add,the feature will not be added and the adding process is over.If They are not same,the feature to add is added in and the feature to remove is excluded from current list in which the picked features stay.The additional removing process has same procedure with removing process.
When modeling is compeleted,the features not picked up will respectively be added in picked features list. And then by rebuilding model with those features,a exact deletion reasons will return.
The characteristics are listed below:
1.Supporting forward-backward Step-Wise.
2.Supporting multi-processing.When adding or removing features,multi-processing is used to traversal all candidate features.
3.Supporting that user could point the index instead of AIC/BIC for measuring model performance when adding or removing feaures.That is benifit when user`s data is unbalanced.
4.Supporting that user could point p-value threshold.If max p-value is more than this threshold,the current features will not be added,although getting a lift on performance of model.
5.Supporting that user could point VIF threshold.If max VIF is more than this threshold,the current features will not be added,although getting a lift on performance of model.
6.Supporting that user could point coefficient of correlation threshold.If max coefficient of correlation is more than this threshold,the current features will not be added,although getting a lift on performance of model.
7.Supporting that user could point sign to coefficients of regression. A part of features have sense in some business like woe transfer which require that all coefficients of regression are postive or negtive.If the signs requirement is not met,the current features will not be added,although getting a lift on performance of model.
8.[4,5,6,7] above are completed in step-wise procedure.Picking features and verifing those thresholds and signs are simultaneous.
9.Users will get reasons of which features isn`t picked up,as performance is fall or p-value is more than threshold or signs is not in accord with user`s expect and so on after adding this feature
10.Supporting the Chinese and English log in whcih user can get record of every iteration
Note:As X and y is a property in a instance of MultiProcessMStepRegression.LinearReg class,so that instance will be very large.Saving that instance is not recommended instead of saving the returned model and remove reasons.
Parameters
----------
X:DataFrame
features
y:Series
target
fit_weight:Series
The length of fit_weight is same with length of y.The fit_weight is for trainning data.If None(default),every sample has a same trainning weight and statsmodels.api.OLS is used as base linear algorithm.If | |
<reponame>mariakesa/UdacityMachineLearningEngineerNanoDegree
"""
This work modifies code from the pl_bolts library.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including | |
{
"city": "Chesterfield",
"growth_from_2000_to_2013": "1.9%",
"latitude": 38.6631083,
"longitude": -90.5770675,
"population": "47749",
"rank": "782",
"state": "Missouri",
},
{
"city": "Leesburg",
"growth_from_2000_to_2013": "66.0%",
"latitude": 39.1156615,
"longitude": -77.56360149999999,
"population": "47673",
"rank": "783",
"state": "Virginia",
},
{
"city": "Dunwoody",
"growth_from_2000_to_2013": "",
"latitude": 33.9462125,
"longitude": -84.3346473,
"population": "47591",
"rank": "784",
"state": "Georgia",
},
{
"city": "Hattiesburg",
"growth_from_2000_to_2013": "3.1%",
"latitude": 31.3271189,
"longitude": -89.29033919999999,
"population": "47556",
"rank": "785",
"state": "Mississippi",
},
{
"city": "Roseville",
"growth_from_2000_to_2013": "-1.0%",
"latitude": 42.4972583,
"longitude": -82.9371409,
"population": "47555",
"rank": "786",
"state": "Michigan",
},
{
"city": "Bonita Springs",
"growth_from_2000_to_2013": "43.8%",
"latitude": 26.339806,
"longitude": -81.7786972,
"population": "47547",
"rank": "787",
"state": "Florida",
},
{
"city": "Portage",
"growth_from_2000_to_2013": "5.7%",
"latitude": 42.2011538,
"longitude": -85.5800022,
"population": "47523",
"rank": "788",
"state": "Michigan",
},
{
"city": "St. Louis Park",
"growth_from_2000_to_2013": "7.3%",
"latitude": 44.9597376,
"longitude": -93.3702186,
"population": "47411",
"rank": "789",
"state": "Minnesota",
},
{
"city": "Collierville",
"growth_from_2000_to_2013": "43.4%",
"latitude": 35.042036,
"longitude": -89.6645266,
"population": "47333",
"rank": "790",
"state": "Tennessee",
},
{
"city": "Middletown",
"growth_from_2000_to_2013": "3.6%",
"latitude": 41.5623209,
"longitude": -72.6506488,
"population": "47333",
"rank": "791",
"state": "Connecticut",
},
{
"city": "Stillwater",
"growth_from_2000_to_2013": "20.1%",
"latitude": 36.1156071,
"longitude": -97.0583681,
"population": "47186",
"rank": "792",
"state": "Oklahoma",
},
{
"city": "East Providence",
"growth_from_2000_to_2013": "-3.3%",
"latitude": 41.8137116,
"longitude": -71.3700545,
"population": "47149",
"rank": "793",
"state": "Rhode Island",
},
{
"city": "Lawrence",
"growth_from_2000_to_2013": "20.5%",
"latitude": 39.8386516,
"longitude": -86.0252612,
"population": "47135",
"rank": "794",
"state": "Indiana",
},
{
"city": "Wauwatosa",
"growth_from_2000_to_2013": "0.0%",
"latitude": 43.0494572,
"longitude": -88.0075875,
"population": "47134",
"rank": "795",
"state": "Wisconsin",
},
{
"city": "Mentor",
"growth_from_2000_to_2013": "-6.6%",
"latitude": 41.6661573,
"longitude": -81.339552,
"population": "46979",
"rank": "796",
"state": "Ohio",
},
{
"city": "Ceres",
"growth_from_2000_to_2013": "34.0%",
"latitude": 37.5949316,
"longitude": -120.9577098,
"population": "46714",
"rank": "797",
"state": "California",
},
{
"city": "Cedar Hill",
"growth_from_2000_to_2013": "42.4%",
"latitude": 32.5884689,
"longitude": -96.9561152,
"population": "46663",
"rank": "798",
"state": "Texas",
},
{
"city": "Mansfield",
"growth_from_2000_to_2013": "-10.1%",
"latitude": 40.75839,
"longitude": -82.5154471,
"population": "46454",
"rank": "799",
"state": "Ohio",
},
{
"city": "Binghamton",
"growth_from_2000_to_2013": "-1.7%",
"latitude": 42.09868669999999,
"longitude": -75.91797380000001,
"population": "46444",
"rank": "800",
"state": "New York",
},
{
"city": "Coeur d'Alene",
"growth_from_2000_to_2013": "32.8%",
"latitude": 47.6776832,
"longitude": -116.7804664,
"population": "46402",
"rank": "801",
"state": "Idaho",
},
{
"city": "San Luis Obispo",
"growth_from_2000_to_2013": "4.4%",
"latitude": 35.2827524,
"longitude": -120.6596156,
"population": "46377",
"rank": "802",
"state": "California",
},
{
"city": "Minot",
"growth_from_2000_to_2013": "26.6%",
"latitude": 48.2329668,
"longitude": -101.2922906,
"population": "46321",
"rank": "803",
"state": "North Dakota",
},
{
"city": "Palm Springs",
"growth_from_2000_to_2013": "7.7%",
"latitude": 33.8302961,
"longitude": -116.5452921,
"population": "46281",
"rank": "804",
"state": "California",
},
{
"city": "Pine Bluff",
"growth_from_2000_to_2013": "-16.2%",
"latitude": 34.2284312,
"longitude": -92.00319549999999,
"population": "46094",
"rank": "805",
"state": "Arkansas",
},
{
"city": "Texas City",
"growth_from_2000_to_2013": "10.3%",
"latitude": 29.383845,
"longitude": -94.9027002,
"population": "46081",
"rank": "806",
"state": "Texas",
},
{
"city": "Summerville",
"growth_from_2000_to_2013": "62.9%",
"latitude": 33.0185039,
"longitude": -80.17564809999999,
"population": "46074",
"rank": "807",
"state": "South Carolina",
},
{
"city": "Twin Falls",
"growth_from_2000_to_2013": "31.5%",
"latitude": 42.5629668,
"longitude": -114.4608711,
"population": "45981",
"rank": "808",
"state": "Idaho",
},
{
"city": "Jeffersonville",
"growth_from_2000_to_2013": "53.3%",
"latitude": 38.2775702,
"longitude": -85.7371847,
"population": "45929",
"rank": "809",
"state": "Indiana",
},
{
"city": "San Jacinto",
"growth_from_2000_to_2013": "91.8%",
"latitude": 33.7839084,
"longitude": -116.958635,
"population": "45851",
"rank": "810",
"state": "California",
},
{
"city": "Madison",
"growth_from_2000_to_2013": "53.7%",
"latitude": 34.6992579,
"longitude": -86.74833180000002,
"population": "45799",
"rank": "811",
"state": "Alabama",
},
{
"city": "Altoona",
"growth_from_2000_to_2013": "-7.3%",
"latitude": 40.5186809,
"longitude": -78.3947359,
"population": "45796",
"rank": "812",
"state": "Pennsylvania",
},
{
"city": "Columbus",
"growth_from_2000_to_2013": "16.4%",
"latitude": 39.2014404,
"longitude": -85.9213796,
"population": "45775",
"rank": "813",
"state": "Indiana",
},
{
"city": "Beavercreek",
"growth_from_2000_to_2013": "19.0%",
"latitude": 39.7092262,
"longitude": -84.06326849999999,
"population": "45712",
"rank": "814",
"state": "Ohio",
},
{
"city": "Apopka",
"growth_from_2000_to_2013": "63.9%",
"latitude": 28.6934076,
"longitude": -81.5322149,
"population": "45587",
"rank": "815",
"state": "Florida",
},
{
"city": "Elmhurst",
"growth_from_2000_to_2013": "5.7%",
"latitude": 41.8994744,
"longitude": -87.9403418,
"population": "45556",
"rank": "816",
"state": "Illinois",
},
{
"city": "Maricopa",
"growth_from_2000_to_2013": "2503.4%",
"latitude": 33.0581063,
"longitude": -112.0476423,
"population": "45508",
"rank": "817",
"state": "Arizona",
},
{
"city": "Farmington",
"growth_from_2000_to_2013": "18.1%",
"latitude": 36.72805830000001,
"longitude": -108.2186856,
"population": "45426",
"rank": "818",
"state": "New Mexico",
},
{
"city": "Glenview",
"growth_from_2000_to_2013": "5.2%",
"latitude": 42.0697509,
"longitude": -87.7878408,
"population": "45417",
"rank": "819",
"state": "Illinois",
},
{
"city": "Cleveland Heights",
"growth_from_2000_to_2013": "-10.3%",
"latitude": 41.5200518,
"longitude": -81.556235,
"population": "45394",
"rank": "820",
"state": "Ohio",
},
{
"city": "Draper",
"growth_from_2000_to_2013": "77.4%",
"latitude": 40.5246711,
"longitude": -111.8638226,
"population": "45285",
"rank": "821",
"state": "Utah",
},
{
"city": "Lincoln",
"growth_from_2000_to_2013": "285.2%",
"latitude": 38.891565,
"longitude": -121.2930079,
"population": "45237",
"rank": "822",
"state": "California",
},
{
"city": "Sierra Vista",
"growth_from_2000_to_2013": "19.3%",
"latitude": 31.5455001,
"longitude": -110.2772856,
"population": "45129",
"rank": "823",
"state": "Arizona",
},
{
"city": "Lacey",
"growth_from_2000_to_2013": "41.7%",
"latitude": 47.03426289999999,
"longitude": -122.8231915,
"population": "44919",
"rank": "824",
"state": "Washington",
},
{
"city": "Biloxi",
"growth_from_2000_to_2013": "-11.5%",
"latitude": 30.3960318,
"longitude": -88.88530779999999,
"population": "44820",
"rank": "825",
"state": "Mississippi",
},
{
"city": "Strongsville",
"growth_from_2000_to_2013": "1.9%",
"latitude": 41.3144966,
"longitude": -81.83569,
"population": "44730",
"rank": "826",
"state": "Ohio",
},
{
"city": "Barnstable Town",
"growth_from_2000_to_2013": "-7.1%",
"latitude": 41.7003208,
"longitude": -70.3002024,
"population": "44641",
"rank": "827",
"state": "Massachusetts",
},
{
"city": "Wylie",
"growth_from_2000_to_2013": "185.2%",
"latitude": 33.0151201,
"longitude": -96.5388789,
"population": "44575",
"rank": "828",
"state": "Texas",
},
{
"city": "Sayreville",
"growth_from_2000_to_2013": "9.6%",
"latitude": 40.45940210000001,
"longitude": -74.360846,
"population": "44412",
"rank": "829",
"state": "New Jersey",
},
{
"city": "Kannapolis",
"growth_from_2000_to_2013": "18.6%",
"latitude": 35.4873613,
"longitude": -80.6217341,
"population": "44359",
"rank": "830",
"state": "North Carolina",
},
{
"city": "Charlottesville",
"growth_from_2000_to_2013": "10.5%",
"latitude": 38.0293059,
"longitude": -78.47667810000002,
"population": "44349",
"rank": "831",
"state": "Virginia",
},
{
"city": "Littleton",
"growth_from_2000_to_2013": "9.4%",
"latitude": 39.613321,
"longitude": -105.0166498,
"population": "44275",
"rank": "832",
"state": "Colorado",
},
{
"city": "Titusville",
"growth_from_2000_to_2013": "7.8%",
"latitude": 28.6122187,
"longitude": -80.8075537,
"population": "44206",
"rank": "833",
"state": "Florida",
},
{
"city": "Hackensack",
"growth_from_2000_to_2013": "2.9%",
"latitude": 40.8859325,
"longitude": -74.0434736,
"population": "44113",
"rank": "834",
"state": "New Jersey",
},
{
"city": "Newark",
"growth_from_2000_to_2013": "3.3%",
"latitude": 37.5296593,
"longitude": -122.0402399,
"population": "44096",
"rank": "835",
"state": "California",
},
{
"city": "Pittsfield",
"growth_from_2000_to_2013": "-3.6%",
"latitude": 42.4500845,
"longitude": -73.2453824,
"population": "44057",
"rank": "836",
"state": "Massachusetts",
},
{
"city": "York",
"growth_from_2000_to_2013": "6.4%",
"latitude": 39.9625984,
"longitude": -76.727745,
"population": "43935",
"rank": "837",
"state": "Pennsylvania",
},
{
"city": "Lombard",
"growth_from_2000_to_2013": "2.9%",
"latitude": 41.8800296,
"longitude": -88.00784349999999,
"population": "43907",
"rank": "838",
"state": "Illinois",
},
{
"city": "Attleboro",
"growth_from_2000_to_2013": "4.6%",
"latitude": 41.94454409999999,
"longitude": -71.2856082,
"population": "43886",
"rank": "839",
"state": "Massachusetts",
},
{
"city": "DeKalb",
"growth_from_2000_to_2013": "11.8%",
"latitude": 41.9294736,
"longitude": -88.75036469999999,
"population": "43849",
"rank": "840",
"state": "Illinois",
},
{
"city": "Blacksburg",
"growth_from_2000_to_2013": "9.4%",
"latitude": 37.2295733,
"longitude": -80.4139393,
"population": "43609",
"rank": "841",
"state": "Virginia",
},
{
"city": "Dublin",
"growth_from_2000_to_2013": "37.6%",
"latitude": 40.0992294,
"longitude": -83.1140771,
"population": "43607",
"rank": "842",
"state": "Ohio",
},
{
"city": "Haltom City",
"growth_from_2000_to_2013": "11.4%",
"latitude": 32.7995738,
"longitude": -97.26918169999999,
"population": "43580",
"rank": "843",
"state": "Texas",
},
{
"city": "Lompoc",
"growth_from_2000_to_2013": "5.5%",
"latitude": 34.6391501,
"longitude": -120.4579409,
"population": "43509",
"rank": "844",
"state": "California",
},
{
"city": "El Centro",
"growth_from_2000_to_2013": "13.7%",
"latitude": 32.792,
"longitude": -115.5630514,
"population": "43363",
"rank": "845",
"state": "California",
},
{
"city": "Danville",
"growth_from_2000_to_2013": "3.7%",
"latitude": 37.8215929,
"longitude": -121.9999606,
"population": "43341",
"rank": "846",
"state": "California",
},
{
"city": "Jefferson City",
"growth_from_2000_to_2013": "6.7%",
"latitude": 38.57670170000001,
"longitude": -92.1735164,
"population": "43330",
"rank": "847",
"state": "Missouri",
},
{
"city": "Cutler Bay",
"growth_from_2000_to_2013": "42.9%",
"latitude": 25.5808323,
"longitude": -80.34685929999999,
"population": "43328",
"rank": "848",
"state": "Florida",
},
{
"city": "Oakland Park",
"growth_from_2000_to_2013": "2.7%",
"latitude": 26.1723065,
"longitude": -80.1319893,
"population": "43286",
"rank": "849",
"state": "Florida",
},
{
"city": "North Miami Beach",
"growth_from_2000_to_2013": "3.6%",
"latitude": 25.9331488,
"longitude": -80.1625463,
"population": "43250",
"rank": "850",
"state": "Florida",
},
{
"city": "Freeport",
"growth_from_2000_to_2013": "-1.4%",
"latitude": 40.6576022,
"longitude": -73.58318349999999,
"population": "43167",
"rank": "851",
"state": "New York",
},
{
"city": "Moline",
"growth_from_2000_to_2013": "-1.9%",
"latitude": 41.5067003,
"longitude": -90.51513419999999,
"population": "43116",
"rank": "852",
"state": "Illinois",
},
{
"city": "Coachella",
"growth_from_2000_to_2013": "88.4%",
"latitude": 33.6803003,
"longitude": -116.173894,
"population": "43092",
"rank": "853",
"state": "California",
},
{
"city": "<NAME>",
"growth_from_2000_to_2013": "6.9%",
"latitude": 27.4467056,
"longitude": -80.3256056,
"population": "43074",
"rank": "854",
"state": "Florida",
},
{
"city": "Smyrna",
"growth_from_2000_to_2013": "54.9%",
"latitude": 35.9828412,
"longitude": -86.5186045,
"population": "43060",
"rank": "855",
"state": "Tennessee",
},
{
"city": "Bountiful",
"growth_from_2000_to_2013": "3.9%",
"latitude": 40.8893895,
"longitude": -111.880771,
"population": "43023",
"rank": "856",
"state": "Utah",
},
{
"city": "<NAME>",
"growth_from_2000_to_2013": "1.7%",
"latitude": 43.7730448,
"longitude": -88.4470508,
"population": "42970",
"rank": "857",
"state": "Wisconsin",
},
{
"city": "Everett",
"growth_from_2000_to_2013": "12.1%",
"latitude": 42.40843,
"longitude": -71.0536625,
"population": "42935",
"rank": "858",
"state": "Massachusetts",
},
{
"city": "Danville",
"growth_from_2000_to_2013": "-11.0%",
"latitude": 36.5859718,
"longitude": -79.39502279999999,
"population": "42907",
"rank": "859",
"state": "Virginia",
},
| |
import requests
from requests.utils import quote
import re
import json
from articledownloader import scrapers
from autologging import logged, traced
from csv import reader
from time import sleep
@logged
class ArticleDownloader:
def __init__(self, els_api_key=None, sleep_sec=1, timeout_sec=30):
'''
Initialize and set up API keys
:param els_api_key: API key for Elsevier (for Elsevier's API)
:type els_api_key: str
:param sleep_sec: Sleep time between API calls (default = 1s)
:type sleep_sec: int
:param timeout_sec: Max time before timeout (default = 30s)
:type timeout_sec: int
'''
self.els_api_key = els_api_key
self.sleep_sec = sleep_sec
self.timeout_sec = timeout_sec
@traced
def get_dois_from_search(self, query, rows=500, mailto="<EMAIL>"):
'''
Grabs a set of unique DOIs based on a search query using the CrossRef API
:param query: the search string
:type query: str
:param rows: the maximum number of DOIs to find
:type rows: int
:param mailto: mailto address for API
:type rows: str
:returns: the unique set of DOIs as a list
:rtype: list
'''
dois = []
base_url = 'https://api.crossref.org/works?query='
max_rows = 1000 #Defined by CrossRef API
headers = {
'Accept': 'application/json',
'User-agent': 'mailto:' + mailto
}
if rows <= max_rows: #No multi-query needed
search_url = base_url + query + '&rows=' + str(rows)
response = requests.get(search_url, headers=headers, timeout=self.timeout_sec).json()
for item in response["message"]["items"]:
dois.append(item["DOI"])
else: #Need to split queries
cursor = "*"
keep_paging = True
while (keep_paging):
sleep(self.sleep_sec)
r = requests.get(base_url + query + "&rows=" + str(max_rows) + "&cursor=" + cursor,
headers=headers, timeout=self.timeout_sec)
cursor = quote(r.json()['message']['next-cursor'], safe='')
if len(r.json()['message']['items']) == 0:
keep_paging = False
for item in r.json()['message']['items']:
dois.append(item['DOI'])
return list(set(dois))
@traced
def get_dois_from_journal_issn(self, issn, rows=500, pub_after=2000, mailto="<EMAIL>"):
'''
Grabs a set of unique DOIs based on a journal ISSN using the CrossRef API
:param issn: The ISSN of the journal
:type issn: str
:param rows: the maximum number of DOIs to find
:type rows: int
:param pub_after: the minimum publication year for DOIs returned
:type pub_after: int
:param mailto: mailto address for API
:type rows: str
:returns: the unique set of DOIs as a list
:rtype: list
'''
dois = []
base_url = 'https://api.crossref.org/journals/' + issn + '/works?filter=from-pub-date:' + str(pub_after)
max_rows = 1000 #Defined by CrossRef API
headers = {
'Accept': 'application/json',
'User-agent': 'mailto:' + mailto
}
if rows <= max_rows: #No multi-query needed
search_url = str(base_url) + '&rows=' + str(rows)
response = requests.get(search_url, headers=headers, timeout=self.timeout_sec).json()
for item in response["message"]["items"]:
dois.append(item["DOI"])
else: #Need to split queries
cursor = "*"
keep_paging = True
while (keep_paging):
sleep(self.sleep_sec)
r = requests.get(base_url + "&rows=" + str(max_rows) + "&cursor=" + cursor,
headers=headers, timeout=self.timeout_sec)
cursor = quote(r.json()['message']['next-cursor'], safe='')
if len(r.json()['message']['items']) == 0:
keep_paging = False
for item in r.json()['message']['items']:
dois.append(item['DOI'])
return list(set(dois))
@traced
def get_metadata_from_doi(self, doi, mailto="<EMAIL>"):
base_url = 'https://api.crossref.org/works/' + str(doi)
headers = {
'Accept': 'application/json',
'User-agent': 'mailto:' + mailto
}
search_url = str(base_url)
response = requests.get(search_url, headers=headers, timeout=self.timeout_sec).json()
item = response["message"]
metadata_record = None
try:
if "volume" in item:
volume = item["volume"]
else:
volume = None
if "published-print" in item:
year = item['published-print']['date-parts'][0][0]
else:
year = None
if "issue" in item:
issue = item["issue"]
else:
issue = None
if "page" in item:
page = item["page"]
else:
page = None
metadata_record = {
"doi": item["DOI"],
"issn": item["ISSN"][0],
"title": item["title"][0],
"prefix": item["prefix"],
"journal": item["container-title"][0],
"publisher": item["publisher"],
"volume": volume,
"issue": issue,
"page": page,
"year": year,
"num_references": item['references-count'],
"times_cited": item['is-referenced-by-count']
}
except:
pass
return metadata_record
@traced
def get_metadata_from_journal_issn(self, issn, rows=500, pub_after=2000, mailto="<EMAIL>"):
'''
Grabs metadata based on a journal ISSN using the CrossRef API
:param issn: The ISSN of the journal
:type issn: str
:param rows: the maximum number of DOIs to find
:type rows: int
:param pub_after: the minimum publication year for DOIs returned
:type pub_after: int
:param mailto: mailto address for API
:type rows: str
:returns: the metadata for the articles according to this ISSN
:rtype: list
'''
metadata_records = []
base_url = 'https://api.crossref.org/journals/' + issn + '/works?filter=from-pub-date:' + str(pub_after)
max_rows = 1000 #Defined by CrossRef API
headers = {
'Accept': 'application/json',
'User-agent': 'mailto:' + mailto
}
if rows <= max_rows: #No multi-query needed
search_url = str(base_url) + '&rows=' + str(rows)
response = requests.get(search_url, headers=headers, timeout=self.timeout_sec).json()
for item in response["message"]["items"]:
try:
if "volume" in item:
volume = item["volume"]
else:
volume = None
if "published-print" in item:
year = item['published-print']['date-parts'][0][0]
else:
year = None
if "issue" in item:
issue = item["issue"]
else:
issue = None
if "page" in item:
page = item["page"]
else:
page = None
metadata_records.append({
"doi": item["DOI"],
"issn": item["ISSN"][0],
"title": item["title"][0],
"prefix": item["prefix"],
"journal": item["container-title"][0],
"publisher": item["publisher"],
"volume": volume,
"issue": issue,
"page": page,
"year": year,
"num_references": item['references-count'],
"times_cited": item['is-referenced-by-count']
})
except:
pass
else: #Need to split queries
cursor = "*"
keep_paging = True
while (keep_paging):
sleep(self.sleep_sec)
r = requests.get(base_url + "&rows=" + str(max_rows) + "&cursor=" + cursor,
headers=headers, timeout=self.timeout_sec)
cursor = quote(r.json()['message']['next-cursor'], safe='')
if len(r.json()['message']['items']) == 0:
keep_paging = False
for item in r.json()['message']['items']:
try:
if "volume" in item:
volume = item["volume"]
else:
volume = None
if "published-print" in item:
year = item['published-print']['date-parts'][0][0]
else:
year = None
if "issue" in item:
issue = item["issue"]
else:
issue = None
if "page" in item:
page = item["page"]
else:
page = None
metadata_records.append({
"doi": item["DOI"],
"issn": item["ISSN"][0],
"title": item["title"][0],
"prefix": item["prefix"],
"journal": item["container-title"][0],
"publisher": item["publisher"],
"volume": volume,
"issue": issue,
"page": page,
"year": year,
"num_references": item['references-count'],
"times_cited": item['is-referenced-by-count']
})
except:
pass
return metadata_records
@traced
def get_xml_from_doi(self, doi, writefile, mode):
'''
Downloads and writes an HTML article to a file, given a DOI and operating mode
:param doi: DOI string for the article we want to download
:type doi: str
:param writefile: file object to write to
:type writefile: file
:param mode: choose from {'elsevier' | 'aps'}, depending on how we wish to access the file
:type mode: str
:returns: True on successful write, False otherwise
:rtype: bool
'''
if mode == 'elsevier':
try:
xml_url='https://api.elsevier.com/content/article/doi/' + doi + '?view=FULL'
headers = {
'X-ELS-APIKEY': self.els_api_key,
'Accept': 'text/xml'
}
r = requests.get(xml_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
# API download limit exceeded
return False
return False
if mode == 'aps':
try:
xml_url='http://harvest.aps.org/v2/journals/articles/' + doi
headers = {
'Accept': 'text/xml'
}
r = requests.get(xml_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
# API download limit exceeded
return False
return False
return False
@traced
def get_html_from_doi(self, doi, writefile, mode):
'''
Downloads and writes an HTML article to a file, given a DOI and operating mode
:param doi: DOI string for the article we want to download
:type doi: str
:param writefile: file object to write to
:type writefile: file
:param mode: choose from {'elsevier' | 'springer' | 'acs' | 'ecs' | 'rsc' | 'nature' | 'wiley' | 'aaas' | 'emerald'}, depending on how we wish to access the file
:type mode: str
:returns: True on successful write, False otherwise
:rtype: bool
'''
if mode == 'springer':
base_url = 'http://link.springer.com/'
api_url = base_url + doi + '.html'
try:
headers = {
'Accept': 'text/html',
'User-agent': 'Mozilla/5.0'
}
r = requests.get(api_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
if mode == 'wiley':
base_url = 'http://onlinelibrary.wiley.com/doi/'
api_url = base_url + doi + '/full'
try:
headers = {
'Accept': 'text/html',
'User-agent': 'Mozilla/5.0'
}
r = requests.get(api_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
if mode == 'acs':
base_url = 'http://pubs.acs.org/doi/full/'
api_url = base_url + doi
try:
headers = {
'Accept': 'text/html',
'User-agent': 'Mozilla/5.0'
}
r = requests.get(api_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
if mode == 'emerald':
base_url = 'http://www.emeraldinsight.com/doi/full/'
api_url = base_url + doi
try:
headers = {
'Accept': 'text/html',
'User-agent': 'Mozilla/5.0'
}
r = requests.get(api_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return | |
'folder_index':
newIndexItem = menu.Append(wx.ID_ANY, "Create new index")
self.Bind(wx.EVT_MENU, lambda e: self.onNewIndex(e, dataSourceTreeNode=dataSourceTreeNode, node=item), newIndexItem)
elif dataSourceTreeNode.nodeType in ('folder_column', 'table'):
newColumnItem = menu.Append(wx.ID_ANY, "Add new column")
self.Bind(wx.EVT_MENU, lambda e: self.onNewColumn(e, dataSourceTreeNode=dataSourceTreeNode, node=node), newColumnItem)
elif dataSourceTreeNode.nodeType in ('column') :
copyColumnItem = menu.Append(wx.ID_COPY, "Copy \tCtrl+C")
copyColumnItem.SetBitmap(self.fileOperations.getImageBitmap(imageName="copy_edit_co.png"))
renameColumnItem = menu.Append(wx.ID_ANY, "Rename Column \tF2")
self.Bind(wx.EVT_MENU, lambda e: self.onColumnCopy(e, dataSourceTreeNode=dataSourceTreeNode, node=node), copyColumnItem)
self.Bind(wx.EVT_MENU, lambda e: self.onRenameColumn(e, dataSourceTreeNode=dataSourceTreeNode, node=node), renameColumnItem)
if len(nodes) == 2:
bmp = wx.MenuItem(menu, wx.NewIdRef(), "Compare with each other")
bmp.SetBitmap(wx.Bitmap(self.fileOperations.getImageBitmap(imageName="compare.png")))
compareMenu = menu.Append(bmp)
self.Bind(wx.EVT_MENU, lambda e: self.onCompareDatabase(e, nodes), compareMenu)
refreshBmp = wx.MenuItem(menu, ID_ROOT_REFERESH, "&Refresh \tF5")
refreshBmp.SetBitmap(wx.Bitmap(self.fileOperations.getImageBitmap(imageName="database_refresh.png")))
rootRefresh = menu.Append(refreshBmp)
infoMenuItem = wx.MenuItem(menu, ID_CONNECTION_PROPERTIES, "Properties")
infoBmp = wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, wx.ART_MENU, (16, 16))
infoMenuItem.SetBitmap(infoBmp)
item4 = menu.Append(infoMenuItem)
if dataSourceTreeNode != None and dataSourceTreeNode.nodeType == 'connection':
menu.AppendSeparator()
if self.isAllConnected(nodes=nodes):
def onDisconnectDb(event):
logger.debug('inner onDisconnectDb')
item1 = menu.Append(ID_DISCONNECT_DB, "Disconnect")
self.Bind(wx.EVT_MENU, lambda e: self.onDisconnectDb(e, nodes), item1)
elif self.isAllDisconnected(nodes=nodes):
item2 = menu.Append(ID_CONNECT_DB, "Connect")
self.Bind(wx.EVT_MENU, lambda e: self.onConnectDatabase(e, nodes), item2)
else:
item2 = menu.Append(ID_CONNECT_DB, "Connect")
self.Bind(wx.EVT_MENU, lambda e: self.onConnectDatabase(e, nodes), item2)
item1 = menu.Append(ID_DISCONNECT_DB, "Disconnect")
self.Bind(wx.EVT_MENU, lambda e: self.onDisconnectDb(e, nodes), item1)
deleteMenuItem = wx.MenuItem(menu, wx.ID_DELETE, "Delete reference \t Delete")
delBmp = wx.ArtProvider.GetBitmap(wx.ART_DELETE, wx.ART_MENU, (16, 16))
deleteMenuItem.SetBitmap(delBmp)
delMenu = menu.Append(deleteMenuItem)
deleteWithDatabaseMenuItem = wx.MenuItem(menu, ID_deleteWithDatabase, "Delete with database \t Shift + Delete")
delBmp = wx.ArtProvider.GetBitmap(wx.ART_DELETE, wx.ART_MENU, (16, 16))
deleteWithDatabaseMenuItem.SetBitmap(delBmp)
deleteWithDatabaseMenu = menu.Append(deleteWithDatabaseMenuItem)
self.Bind(wx.EVT_MENU, lambda e: self.onDeleteConnection(e, nodes), delMenu)
self.Bind(wx.EVT_MENU, lambda e: self.onDeleteWithDatabaseTable(e, nodes), deleteWithDatabaseMenu)
self.Bind(wx.EVT_MENU, lambda e: self.onProperties(e, nodes), item4)
self.Bind(wx.EVT_MENU, lambda e: self.onRefresh(e, nodes), rootRefresh)
return menu
def onGenerateSql(self, event, dataSourceTreeNode=None):
logger.debug('onGenerateSql')
sqlText = ''
manageSqliteDatabase = ManageSqliteDatabase(connectionName=dataSourceTreeNode.dataSource.connectionName ,
databaseAbsolutePath=dataSourceTreeNode.dataSource.filePath)
if event.Id == ID_SELECT_SQL:
sqlText = manageSqliteDatabase.getSelectForTable(dataSourceTreeNode.nodeLabel)
if event.Id == ID_INSERT_SQL:
sqlText = manageSqliteDatabase.getInsertForTable(dataSourceTreeNode.nodeLabel)
if event.Id == ID_UPDATE_SQL:
sqlText = manageSqliteDatabase.getUpdateForTable(dataSourceTreeNode.nodeLabel)
if event.Id == ID_DELETE_SQL:
sqlText = manageSqliteDatabase.getDeleteForTable(dataSourceTreeNode.nodeLabel)
logger.debug(f'{sqlText}')
frame = GenerateSqlFrame(self, 'Generate Sql', size=(513, 441), sqlText=sqlText)
frame.Show()
def onDeleteWithDatabaseTable(self, event, nodes=None):
logger.debug('onDeleteWithDatabaseTable')
# self.onDeleteConnection(event)
##################################################################################
# sqlExecuter = SQLExecuter(database='_opal.sqlite')
# selectedItemId = self.tree.GetSelection()
# dbFilePath = sqlExecuter.getDbFilePath(selectedItemText)
# logger.debug("dbFilePath: %s", dbFilePath)
fileOperations = FileOperations()
for node in nodes:
selectedItemText = self.GetItemText(node)
dataSourceTreeNode = self.GetItemData(node)
fileRemoved = fileOperations.removeFile(filename=dataSourceTreeNode.dataSource.filePath)
if selectedItemText and fileRemoved:
self.sqlExecuter.removeConnctionRow(selectedItemText)
self.initialize()
##################################################################################
def onColumnCopy(self, event, dataSourceTreeNode=None, node=None):
logger.debug('onColumnCopy')
logger.debug('onTreeCopy')
nodes = self.GetSelections()
nodeTexts = []
for node in nodes:
nodeTexts.append(self.GetItemText(node))
self.dataObj = wx.TextDataObject()
self.dataObj.SetText("\n".join(nodeTexts))
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(self.dataObj)
wx.TheClipboard.Close()
else:
wx.MessageBox("Unable to open the clipboard", "Error")
def onRenameColumn(self, event, dataSourceTreeNode=None, node=None):
logger.debug('onRenameColumn')
initialColumnName = self.GetItemText(node)
dlg = wx.TextEntryDialog(self, 'Rename column ' + initialColumnName, 'Rename column ' + initialColumnName, 'Python')
dlg.SetValue(initialColumnName)
if dlg.ShowModal() == wx.ID_OK:
logger.info('You entered: %s\n', dlg.GetValue())
if dlg.GetValue() != initialColumnName:
logger.info('update table execute')
if os.path.isfile(dataSourceTreeNode.dataSource.filePath):
'''
First you rename the old table:
ALTER TABLE orig_table_name RENAME TO tmp_table_name;
Then create the new table, based on the old table but with the updated column name:
Then copy the contents across from the original table.
'''
logger.debug("TODO logic for rename column goes here.")
# dbObjects = ManageSqliteDatabase(connectionName=connectionName , databaseAbsolutePath=databaseAbsolutePath).executeText(text)
dlg.Destroy()
def onCreateErDiagramItem(self, event, dataSourceTreeNode=None, node=None):
logger.debug('onCreateErDiagramItem')
dbObjects = ManageSqliteDatabase(connectionName=dataSourceTreeNode.dataSource.connectionName , databaseAbsolutePath=dataSourceTreeNode.dataSource.filePath).getObject()
createErDiagramFrame = CreateErDiagramFrame(None)
createErDiagramFrame.setDbObjects(dbObjects=dbObjects)
createErDiagramFrame.Show()
def onNewColumn(self, event, dataSourceTreeNode=None, node=None):
logger.debug('onNewColumn')
logger.debug("TODO add a new column")
def onDeleteTable(self, event, dataSourceTreeNode=None, node=None):
logger.debug('onDeleteTable')
nodes = self.GetSelections()
for node in nodes:
dataSourceTreeNode = self.GetItemData(node)
text = "DROP TABLE '{}'".format(dataSourceTreeNode.nodeLabel)
dbObjects = ManageSqliteDatabase(connectionName=dataSourceTreeNode.dataSource.connectionName , databaseAbsolutePath=dataSourceTreeNode.dataSource.filePath).executeText(text)
self.Delete(node)
def onEditTable(self, event, dataSourceTreeNode=None, node=None):
logger.debug('onEditTable')
def onRenameConnection(self, event, dataSourceTreeNode=None, node=None):
logger.debug('onRenameConnection')
'''
1. disconnect Connection.
2. fire database conn alter connectionName
3. call init method to load all the connection
'''
def onRenameTable(self, event, dataSourceTreeNode=None, node=None):
logger.debug('onRenameTable')
oldTableName = initialTableName = self.GetItemText(node)
dlg = wx.TextEntryDialog(self, 'Rename table {} to'.format(initialTableName), 'Rename table {} '.format(initialTableName), 'Python')
dlg.SetValue(initialTableName)
if dlg.ShowModal() == wx.ID_OK:
logger.info('You entered: %s\n', dlg.GetValue())
if dlg.GetValue() != initialTableName:
logger.info('update table execute')
newTableName = dlg.GetValue()
if os.path.isfile(dataSourceTreeNode.dataSource.filePath):
'''
First you rename the old table:
'''
logger.debug("TODO logic to rename table should go here.")
# dropTableSql="DROP TABLE '{}'".format()
alterTableSql = f"ALTER TABLE '{oldTableName}' RENAME TO {newTableName}"
db = ManageSqliteDatabase(connectionName=dataSourceTreeNode.dataSource.connectionName , databaseAbsolutePath=dataSourceTreeNode.dataSource.filePath)
try:
db.executeText(alterTableSql)
except Exception as e:
self.consoleOutputLog(e)
self.onRefresh(event, nodes=[node])
dlg.Destroy()
def onCopyCreateTableStatement(self, event, dataSourceTreeNode=None, node=None):
logger.debug('onCopyCreateTableStatement')
def onNewTable(self, event, dataSourceTreeNode=None, node=None):
logger.debug('onNewTable')
connectionName = self.GetItemText(self.GetItemParent(node))
newTableName = SQLUtils().definingTableName(connectionName)
tableFrame = CreateTableFrame(None, Title='Table creation', size=(1000, 600))
# frame = CreateTableFrame(None, 'table creation')
# tableDict = dict()
# tableFrame.setData(tableDict)
tableFrame.Show()
# app.MainLoop()
def onNewView(self, event, dataSourceTreeNode=None, node=None):
logger.debug('onNewView')
# tableFrame = CreateTableFrame(None, 'Table creation')
def onNewIndex(self, event, dataSourceTreeNode=None, node=None):
logger.debug('onNewIndex')
logger.debug("TODO add a new Index")
def isAllConnected(self, nodes=None):
allConnected = True
for node in nodes:
dataSourceTreeNode = self.GetItemData(node)
if not dataSourceTreeNode.dataSource.isConnected and dataSourceTreeNode.nodeType == 'connection':
allConnected = False
break
return allConnected
def isAllDisconnected(self, nodes=None):
allDisconnected = True
for node in nodes:
dataSourceTreeNode = self.GetItemData(node)
if dataSourceTreeNode.dataSource.isConnected and dataSourceTreeNode.nodeType == 'connection':
allDisconnected = False
break
return allDisconnected
def isAllNodeOfGivenDepth(self, depth=None, nodes=None):
allNodeOfGivenDepth = True
for node in nodes:
dataSourceTreeNode = self.GetItemData(node)
logger.debug(dataSourceTreeNode.dataSource.connectionName)
if dataSourceTreeNode.depth != depth:
allNodeOfGivenDepth = False
break
return allNodeOfGivenDepth
def onOpenSqlEditorTab(self, event, nodes):
logger.debug('onOpenSqlEditorTab')
for node in nodes:
dataSourceTreeNode = self.GetItemData(node)
pub.sendMessage('onNewWorksheet', event=event, dataSourceTreeNode=dataSourceTreeNode)
def onProperties(self, event, nodes):
if event.Id == ID_CONNECTION_PROPERTIES:
logger.debug(f'onProperties {nodes}')
frame = None
title=''
for node in nodes:
dataSourceTreeNode = self.GetItemData(node)
logger.debug(dataSourceTreeNode)
if dataSourceTreeNode.depth == 0:
title="Connection properties"
frame = PropertiesFrame(None, title, size=(500, 420), depth=dataSourceTreeNode.depth, dataSource=dataSourceTreeNode.dataSource)
frame.Show()
def onExport(self, event, nodes):
logger.debug('onExport')
def onImport(self, event, nodes):
logger.debug('onImport')
for node in nodes:
dataSourceTreeNode = self.GetItemData(node)
frame = ImportingCsvExcelFrame(None, 'Import CSV Excel', dataSourceTreeNode.dataSource.connectionName)
frame.Show()
def onDeleteConnection(self, event, nodes=None):
logger.debug('onDeleteConnection')
for node in nodes:
dataSourceTreeNode = self.GetItemData(node)
logger.debug(dataSourceTreeNode.dataSource.connectionName)
SQLExecuter().removeConnctionRow(dataSourceTreeNode.dataSource.connectionName)
self.Delete(node)
# self.onRefresh(event, nodes)
def onCompareDatabase(self, event, nodes):
logger.debug('onCompareDatabase')
# asf=self.OnCompareItems(nodes[0], nodes[1])
# print(asf)
def onDisconnectDb(self, event, nodes):
logger.debug('onDisconnectDb')
# selectedItem = self.GetSelections()
for node in nodes:
dataSourceTreeNode = self.GetItemData(node)
dataSourceTreeNode.dataSource.isConnected = False
self.SetItemHasChildren(node, self.hasNodeChildren(dataSourceTreeNode))
self.DeleteChildren(node)
def onConnectDatabase(self, event, nodes):
logger.debug('onConnectDatabase')
self.onConnectDb(event, nodes)
self.connectingDatabase(event, nodes)
def onConnectDb(self, event, nodes):
'''
this method have been used to expand database navigator tree.
'''
logger.debug('onConnectDb')
for node in nodes:
itemId = node
dataSourceTreeNode = self.GetItemData(node)
dataSourceTreeNode.dataSource.isConnected = True
self.SetItemHasChildren(itemId, self.hasNodeChildren(dataSourceTreeNode))
# logic to connect
self.deleteChildren(itemId)
dataSource = dataSourceTreeNode.dataSource
if os.path.isfile(dataSourceTreeNode.dataSource.filePath):
manageSqliteDatabase = ManageSqliteDatabase(connectionName=dataSourceTreeNode.dataSource.connectionName , databaseAbsolutePath=dataSourceTreeNode.dataSource.filePath)
sqlTypeObjectList = manageSqliteDatabase.getSqlObjects()
# group for table , view, index and trigger type
for key, group in itertools.groupby(sqlTypeObjectList, key=lambda sqlTypeObj:sqlTypeObj.type):
# logger.debug(f'{key}:{group}')
groupList = list(group)
nodeLabel = f'{key} ( {len(groupList)})'
imageName = f"folder.png"
if key in ['view', 'table']:
imageName = f"folder_{key}.png"
dataSourceTreeNode = DataSourceTreeNode(dataSource=dataSource, nodeLabel=nodeLabel, imageName=imageName, children=None, nodeType=f'folder_{key}')
tableNode = self.appendNode(targetNode=itemId, nodeLabel=dataSourceTreeNode.nodeLabel , dataSourceTreeNode=dataSourceTreeNode)
for sqlTypeObject in groupList:
dataSourceTreeNode = DataSourceTreeNode(dataSource=dataSource, nodeLabel=f'{sqlTypeObject.name}', imageName=f"{sqlTypeObject.type}.png", children=None, nodeType=f"{sqlTypeObject.type}")
dataSourceTreeNode.setSqlType(sqlTypeObject)
child_itemId_1 = self.appendNode(targetNode=tableNode, nodeLabel=f'{sqlTypeObject.name}' , dataSourceTreeNode=dataSourceTreeNode)
if sqlTypeObject.type == 'table':
dataSourceTreeNode = DataSourceTreeNode(dataSource=dataSource, nodeLabel=nodeLabel, imageName=f"folder.png", children=None, nodeType="folder_column")
dataSourceTreeNode.setSqlType(sqlTypeObject)
child1_1 = self.appendNode(targetNode=child_itemId_1, nodeLabel=f'Columns ({len(sqlTypeObject.columns)})', dataSourceTreeNode=dataSourceTreeNode)
dataSourceTreeNode = DataSourceTreeNode(dataSource=dataSource, nodeLabel=nodeLabel, imageName=f"folder.png", children=None, nodeType="folder_unique_key")
dataSourceTreeNode.setSqlType(sqlTypeObject)
child1_2 = self.appendNode(targetNode=child_itemId_1, nodeLabel='Unique Keys', dataSourceTreeNode=dataSourceTreeNode)
dataSourceTreeNode = DataSourceTreeNode(dataSource=dataSource, nodeLabel=nodeLabel, imageName=f"folder.png", children=None, nodeType="folder_foreign_key")
dataSourceTreeNode.setSqlType(sqlTypeObject)
child1_3 = self.appendNode(targetNode=child_itemId_1, nodeLabel='Foreign Keys', dataSourceTreeNode=dataSourceTreeNode)
dataSourceTreeNode = DataSourceTreeNode(dataSource=dataSource, nodeLabel=nodeLabel, imageName=f"folder.png", children=None, nodeType="folder_references")
dataSourceTreeNode.setSqlType(sqlTypeObject)
child1_4 = self.appendNode(targetNode=child_itemId_1, nodeLabel='References', dataSourceTreeNode=dataSourceTreeNode)
for column in sqlTypeObject.columns:
dataSourceTreeNode = DataSourceTreeNode(dataSource=dataSource, nodeLabel=f'{column.name}', imageName=self.getColumnImageName(column), children=None, nodeType="column")
dataSourceTreeNode.setSqlType(sqlTypeObject)
child_itemId_1_0 = self.appendNode(targetNode=child1_1, nodeLabel=f'{column.name}' , dataSourceTreeNode=dataSourceTreeNode)
else:
updateStatus = f"Unable to connect '{ dataSourceTreeNode.dataSource.filePath } , No such file. "
self.consoleOutputLog(updateStatus)
font = self.GetTopLevelParent().statusbar.GetFont()
font.SetWeight(wx.BOLD)
self.GetTopLevelParent().statusbar.SetFont(font)
self.GetTopLevelParent().statusbar.SetForegroundColour(wx.RED)
self.GetTopLevelParent().statusbar.SetStatusText(updateStatus, 1)
logger.error(updateStatus)
def getColumnImageName(self, column):
imageName = "string.png"
if column.primaryKey == 1:
imageName = 'key.png'
elif column.dataType in ['INTEGER', 'INT']:
imageName = "column.png"
elif column.dataType in ['VARCHAR', 'CHAR', 'REAL', 'TEXT']:
imageName = "textfield.png"
return imageName
def consoleOutputLog(self, exception=None):
now = datetime.datetime.now()
strftime = now.strftime("%Y-%m-%d %H:%M:%S")
newline = "\n"
if self.GetTopLevelParent()._mgr.GetPane("consoleOutput").window.text.Value.strip() == "":
newline = ""
self.GetTopLevelParent()._mgr.GetPane("consoleOutput").window.text.AppendText("{}{} {}".format(newline, strftime, exception))
# def onEditTable(self, event):
# logger.debug('onEditTable')
def deleteChildren(self, itemId):
'''
node: itemId
'''
return TreeCtrl.DeleteChildren(self, itemId)
def _OnBeginEdit(self, evt):
logger.debug('_OnBeginEdit')
if not self._editlabels:
evt.Veto()
else:
item = evt.GetItem()
if self.DoBeginEdit(item):
evt.Skip()
else:
evt.Veto()
def _OnEndEdit(self, evt):
logger.debug('_OnEndEdit')
if self._editlabels:
item = evt.GetItem()
newlabel = evt.GetLabel()
if self.DoEndEdit(item, newlabel):
evt.Skip()
return
evt.Veto()
def doSetupImageList(self):
| |
"""
Copyright (c) 2022 ZOOMi Technologies Inc.
all rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
maintainers : <EMAIL>, <EMAIL>
This file contain image to video converter.
"""
import os
import cv2
from PIL import Image
import shutil
from mongo_manager import MongoDBmanager
from s3_manager import s3_bucket_manager
from datetime import timedelta
from datetime import datetime
from bson.objectid import ObjectId
from annotation_processor import inference
import random
import math
import configparser
# Read settings from config file
params = configparser.ConfigParser(os.environ)
#logging configeration
from logger import get_debug_logger
dataset_logger = get_debug_logger('image_dataset','./logs/image_processor.log')
CONFIG_FILE_PATH = 'config.cfg'
params.read(CONFIG_FILE_PATH)
MDB_USER = f"{params.get('MongoDB', 'user_name')}"
MDB_PASS = f"{params.get('MongoDB', 'password')}"
MDB_NAME = f"{params.get('MongoDB', 'db_name')}"
S3_REGION = f"{params.get('S3', 'region')}"
S3_ACESS_KEY = f"{params.get('S3', 'access_key_id')}"
S3_SECRET_KEY = f"{params.get('S3', 'secret_access_key')}"
S3_BUCKET = f"{params.get('S3', 'bucket_name')}"
TASK_BASE = f"{params.get('Folders', 'task_base')}"
CLIP_BASE = f"{params.get('Folders', 'clip_base')}"
#number of cpu cores to use
PROCESSES = int(f"{params.get('Cores', 'cpu_cores')}")
# Mongo DB configurations
task_db = MongoDBmanager(MDB_USER, MDB_PASS, MDB_NAME, 'AnnotationTask')
mdb_progress = MongoDBmanager(MDB_USER, MDB_PASS, MDB_NAME,'AnnotationContentUpload')
project_update = MongoDBmanager(MDB_USER, MDB_PASS, MDB_NAME,'AnnotationProject')
frame_db = MongoDBmanager(MDB_USER, MDB_PASS, MDB_NAME,'AnnotationFrame')
# S3 configurations
s3_bucket = s3_bucket_manager(S3_REGION, S3_ACESS_KEY, S3_SECRET_KEY,S3_BUCKET)
# Get the resizing dimension for the image
def get_resized_image_dim(image, max_w, max_h, re_scale_w, re_scale_h):
dim = None
# horizontal_image = False
# vertical_image = False
(h, w) = image.shape[:2]
# R = w/h
# if R > 0:
# horizontal_image = True
# elif R < 0:
# vertical_image = True
# case 1: im_w < re_scale_w and im_h < re_scale_h
if w < re_scale_w and h < re_scale_h:
dim = (re_scale_w, re_scale_h)
# case 2: im_w < re_scale_w and im_h > re_scale_h
elif w < re_scale_w and h > re_scale_h:
dim = (re_scale_w, h)
# case 3: im_w > re_scale_w and im_h < re_scale_h
elif w > re_scale_w and h < re_scale_h:
dim = (w, re_scale_h)
# case 4: im_w > re_scale_w and im_h > re_scale_h
elif w > re_scale_w and h > re_scale_h:
dim = (w, h)
# case 5: im_w = max_w and im_h < re_scale_h
elif w == max_w and h < re_scale_h:
dim = (w, re_scale_h)
# case 6: im_w = max_w and im_h > re_scale_h
elif w == max_w and h > re_scale_h:
dim = (w, h)
# case 7: im_w < re_scale_w and im_h = max_h
elif w < re_scale_w and h == max_h:
dim = (re_scale_w, h)
# case 8: im_w > re_scale_w and im_h = max_h
elif w > re_scale_w and h == max_h:
dim = (w, h)
# case 9: im_w = max_w and im_h = max_h
elif w == max_w and h == max_h:
dim = (w, h)
return dim
# resize and center the image
def resize_and_center_image(image_path, max_w, max_h, re_scale_w, re_scale_h):
image = cv2.imread(image_path)
dim = get_resized_image_dim(image, max_w, max_h, re_scale_w, re_scale_h)
if dim is not None:
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
t, b, l, r = center_image(max_w, max_h, resized)
img_pad = cv2.copyMakeBorder(resized, t, b, l, r, cv2.BORDER_CONSTANT, (0,0,0))
return img_pad, t, b, l, r
# center the resized image by adding boders (top, bottom, left, right)
def center_image(max_w, max_h, resized_image):
(h, w) = resized_image.shape[:2]
delta_w = max_w - w
delta_h = max_h - h
# top and bottom pad width
pd_t = int(delta_h/2)
pd_b = pd_t
# left and right pad width
pd_l = int(delta_w/2)
pd_r = pd_l
return pd_t, pd_b, pd_l, pd_r
# pad images to a given size
def pad_image(image_path, max_h, max_w):
frame = cv2.imread(image_path)
h,w,c = frame.shape
pad_w = max_w - w
if pad_w <= 0:
pad_w = 0
pad_h = max_h - h
if pad_h <= 0:
pad_h = 0
img_pad = cv2.copyMakeBorder(frame, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, (0,0,0))
return img_pad
# Resize images to given resolutions for a task
def reshape_one_task(task_folder, m_width, m_height):
re_scale_w = 0.75*m_width
re_scale_h = 0.75*m_height
border_info = {}
for f_id,file in enumerate(os.listdir(task_folder)):
if file.endswith(".jpg") or file.endswith(".jpeg") or file.endswith("png"):
image_path = os.path.join(task_folder, file)
# Pad images
# paded_image = pad_image(image_path, m_height, m_width)
paded_image, t, b, l, r = resize_and_center_image(image_path, m_width, m_height, re_scale_w, re_scale_h)
border_info[f'{f_id}'] = (t, b, l, r)
os.remove(image_path)
cv2.imwrite(image_path, paded_image)
return border_info
def update_mdb_progress(upload_id, object_update, project_ID):
mdb_progress.delete_one_document(ObjectId(str(upload_id)))
ret_id = mdb_progress.post_one_document(object_update)
project_update.find_one_push({'_id': ObjectId(str(project_ID))}, {'contentUploads': ret_id})
return ret_id
def delete_mdb_tasks(content_path, project_ID, permision_flag):
if permision_flag:
doc = task_db.get_documents({"projectId": ObjectId(project_ID)})
for data in doc:
db_vid = '-'.join(data['taskName'].split('-')[2:])
_id_ = data['_id']
if content_path.split('/')[-1].split('.')[0] == db_vid:
task_db.delete_one_document(ObjectId(str(_id_)))
# Video Generating function
def generate_video(image_folder, video_name, fps):
annotation_object = []
images = [img for img in os.listdir(image_folder)
if img.endswith(".jpg") or
img.endswith(".jpeg") or
img.endswith("png")]
frame = cv2.imread(os.path.join(image_folder, images[0]))
# setting the frame width, height width
# the width, height of first image
height, width, layers = frame.shape
codec = cv2.VideoWriter_fourcc(*'avc1')
out = cv2.VideoWriter(video_name, codec, fps, (width, height))
# Appending the images to the video one by one
for frame_id,image in enumerate(images):
out.write(cv2.imread(os.path.join(image_folder, image)))
anno_object = {
"frameId": frame_id,
"status": 0,
"boxes": [],
"taskId": None,
"commentBoxes": [],
'isEmpty': True
}
annotation_object.append(anno_object)
# Deallocating memories taken for window creation
cv2.destroyAllWindows()
out.release() # releasing the video generated
return annotation_object
# this function select frames from the user dataset
# Parameters :
# input : path : path of the original dataset
# frames_per_task : number of frames to select for a task
# Return: max_h, max_w : maximum height and width of the images
# list of original resolutions of images to put in the Annotation task
def move_taks_images(content_path, task_path, frames_per_task):
# read file list from content_path
file_list = []
for file in os.listdir(content_path):
if file.endswith(".jpg") or file.endswith(".jpeg") or file.endswith("png"):
file_list.append(file)
# select frames_per_task images from list and move to task folder
if len(file_list) > frames_per_task:
task_list = random.sample(file_list, frames_per_task)
elif len(file_list) < frames_per_task:
task_list = file_list
for file in task_list:
if file.endswith(".jpg") or file.endswith(".jpeg") or file.endswith("png"):
# print(file)
os.rename(content_path + '/'+file, task_path + file)
# shutil.copyfile(source + f, destination + f)
# calculate the max height and widths per task and create list of original resolution
max_h = 0
max_w = 0
original_resolusion = {}
for f_id,image in enumerate(os.listdir(task_path)):
if image.endswith('.jpg') or image.endswith(".jpeg") or image.endswith("png"):
im = cv2.imread(os.path.join(task_path, image))
h,w,c = im.shape
original_resolusion[f'{f_id}'] = (w, h)
if h >= max_h:
max_h = h
if w >= max_w:
max_w = w
return max_w, max_h, original_resolusion, len(task_list)
"""
inputs:
content_path : path to the folder where images are saved
"""
def content_process_imaged(content_path, frames_per_task, project_ID, upload_id, permision_flag=True):
# response array
res = []
frame_rate = 4
total_frames = 0
# count the number of images in the content path
for file in os.listdir(content_path):
if file.endswith(".jpg") or file.endswith(".jpeg") or file.endswith("png"):
total_frames += 1
print(f' Total frames : {total_frames}')
# calculate task count
if total_frames < frames_per_task:
# raise Exception("Not enough frames")
Total_task_count = 1
else:
Total_full_task_count = math.floor(total_frames / frames_per_task)
print(Total_full_task_count)
if (total_frames % frames_per_task) != 0:
Total_task_count = Total_full_task_count + 1
elif (total_frames % frames_per_task) == 0:
Total_task_count = Total_full_task_count
print(f' Total tasks : {Total_task_count}')
# Progress start
create_time = datetime.utcnow()
progress_update = {
"_id": ObjectId(upload_id),
"projectId": ObjectId(project_ID),
"createdAt": create_time,
"finishedAt": "",
"errorMessage": {},
"status": int(0),
"request_type": int(0),
"request_annotation_version": int(0),
"sourceFilePath": content_path,
"progress": 0,
"taskCount": 0,
"frames_per_task": frames_per_task
}
# Progress update to mongo DB
ret_id = update_mdb_progress(upload_id, progress_update, project_ID)
delete_mdb_tasks(content_path, project_ID, permision_flag)
for task_count in range(Total_task_count):
# create a taskID
taskID = 'task-' + str(task_count) + '-' + str(content_path).split('/')[-2]
task_folder = './contents/' + taskID + '/'
# Create a folder for task
if not os.path.exists(task_folder):
os.mkdir(task_folder)
# move frames_per_task images to this folder from content_path
m_w, m_h, original_resolusion, frames_in_task = move_taks_images(content_path, task_folder, frames_per_task)
elif os.path.exists(task_folder):
# first delete it and create a new one
shutil.rmtree(task_folder)
os.makedirs(task_folder)
m_w, m_h, original_resolusion, frames_in_task = move_taks_images(content_path, task_folder, frames_per_task)
out_put_path = './contents/' + taskID + '/' + taskID + '.mp4'
print(out_put_path)
# reshape images
border_info = reshape_one_task(task_folder, m_w, m_h)
# add border_info to the original_resolusion dictionary
for f_id,info in enumerate(border_info):
print("for border info")
# genarate video from images and frame contents for the AnnotationFrame
annotation_object = generate_video(task_folder, out_put_path, frame_rate)
s3_path = s3_bucket.s3_upload(str(content_path).split('/')[-2], out_put_path)
# print(s3_path)
# s3_path = None
task_object = {
"projectId": ObjectId(project_ID),
"taskName": str(taskID),
"frameCount": frames_in_task,
"status": int(0),
"videoPath": str(out_put_path),
"createdAt": create_time,
"S3_url": str(s3_path),
"skipFrameCount": 24.0 / frame_rate,
"frameRate": frame_rate,
"videoResolutionWidth": m_w,
"videoResolutionHeight": | |
321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[797.168, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 2475.037, 0.000, 21584.441],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21266.406],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20623.683],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21404.957],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 20765.509],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21248.748],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21256.041],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 22018.958],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21764.725],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21413.241],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22417.021],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22567.685],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22427.699],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21889.359],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22381.938],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21416.358],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22332.786],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 0.000, 2386.698, 0.000, 23557.595],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 23336.992],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 22907.742],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24059.201],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24941.902],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25817.514],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24127.939],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25459.688],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25147.370],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25005.842],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 1086.639, 2752.004, 0.000, 25598.700],
[2138.154, 929.921, 503.586, 288.667, 0.000, 761.900, 1086.639, 4818.835, 0.000, 35944.098],
[661.356, 929.921, 503.586, 553.843, 0.000, 1954.237, 1086.639, 8831.252, 0.000, 35237.243],
[0.000, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 9460.955, 0.000, 35154.442],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36166.632],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 34293.883],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 35976.901],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37848.552],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39512.574],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39538.024],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37652.984],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36687.909],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37749.277],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37865.518],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38481.190],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37425.087],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38051.341],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38065.478],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37429.495],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37154.479],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 36692.717],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37327.055],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37937.630],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 38298.645],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 39689.369],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40992.397],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 41092.265],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40733.622],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40708.515],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40485.321],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 39768.059],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 40519.595],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 41590.937],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42354.983],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41175.149],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41037.902],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42706.213],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 40539.205],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 41608.692],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39992.148],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39134.828]])
# 模拟PS信号回测结果
# PS信号,先卖后买,交割期为0
self.ps_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, | |
#!/usr/bin/env python
import ConfigParser
import sys, getopt, os
import numpy as np
import Nio
import time
import re
import json
import random
import asaptools.simplecomm as simplecomm
import fnmatch
import glob
#
# Parse header file of a netcdf to get the varaible 3d/2d/1d list
#
def parse_header_file(filename):
command ='ncdump -h ' + filename
print(command)
retvalue=(os.popen(command).readline())
print(retvalue)
#
# Create RMSZ zscores for ensemble file sets
#
def calc_rmsz(o_files,var_name3d,var_name2d,is_SE,opts_dict):
threshold=1e-12
popens = opts_dict['popens']
tslice = opts_dict['tslice']
if 'cumul' in opts_dict:
cumul=opts_dict['cumul']
else:
cumul=False
input_dims = o_files[0].dimensions
if popens:
nbin = opts_dict['nbin']
minrange = opts_dict['minrange']
maxrange = opts_dict['maxrange']
nlev=input_dims['z_t']
else:
nlev=input_dims["lev"]
# Create array variables based on is_SE
if (is_SE == True):
ncol=input_dims["ncol"]
npts2d=ncol
npts3d=nlev*ncol
output3d = np.zeros((len(o_files),nlev,ncol),dtype=np.float32)
output2d = np.zeros((len(o_files),ncol),dtype=np.float32)
ens_avg3d=np.zeros((len(var_name3d),nlev,ncol),dtype=np.float32)
ens_stddev3d=np.zeros((len(var_name3d),nlev,ncol),dtype=np.float32)
ens_avg2d=np.zeros((len(var_name2d),ncol),dtype=np.float32)
ens_stddev2d=np.zeros((len(var_name2d),ncol),dtype=np.float32)
else:
if 'nlon' in input_dims:
nlon = input_dims["nlon"]
nlat = input_dims["nlat"]
elif 'lon' in input_dims:
nlon = input_dims["lon"]
nlat = input_dims["lat"]
npts2d=nlat*nlon
npts3d=nlev*nlat*nlon
output3d = np.zeros((len(o_files),nlev,nlat,nlon),dtype=np.float32)
output2d = np.zeros((len(o_files),nlat,nlon),dtype=np.float32)
ens_avg3d=np.zeros((len(var_name3d),nlev,nlat,nlon),dtype=np.float32)
ens_stddev3d=np.zeros((len(var_name3d),nlev,nlat,nlon),dtype=np.float32)
ens_avg2d=np.zeros((len(var_name2d),nlat,nlon),dtype=np.float32)
ens_stddev2d=np.zeros((len(var_name2d),nlat,nlon),dtype=np.float32)
if popens:
Zscore3d = np.zeros((len(var_name3d),len(o_files),(nbin)),dtype=np.float32)
Zscore2d = np.zeros((len(var_name2d),len(o_files),(nbin)),dtype=np.float32)
else:
Zscore3d = np.zeros((len(var_name3d),len(o_files)),dtype=np.float32)
Zscore2d = np.zeros((len(var_name2d),len(o_files)),dtype=np.float32)
avg3d={}
stddev3d={}
avg2d={}
stddev2d={}
indices = np.arange(0,len(o_files),1)
gm3d=[]
gm2d=[]
if cumul:
temp1,temp2,area_wgt,z_wgt=get_area_wgt(o_files,is_SE,input_dims,nlev,popens)
gm3d = np.zeros((len(var_name3d)),dtype=np.float32)
gm2d = np.zeros((len(var_name2d)),dtype=np.float32)
for vcount,vname in enumerate(var_name3d):
#Read in vname's data of all files
for fcount, this_file in enumerate(o_files):
data=this_file.variables[vname]
if (is_SE == True):
output3d[fcount,:,:]=data[tslice,:,:]
else:
output3d[fcount,:,:,:]=data[tslice,:,:,:]
#Generate ens_avg and ens_stddev to store in the ensemble summary file
if popens:
moutput3d=np.ma.masked_values(output3d,data._FillValue)
ens_avg3d[vcount]=np.ma.average(moutput3d,axis=0)
ens_stddev3d[vcount]=np.ma.std(moutput3d,axis=0,dtype=np.float32)
else:
ens_avg3d[vcount]=np.average(output3d,axis=0).astype(np.float32)
ens_stddev3d[vcount]=np.std(output3d.astype(np.float64),axis=0,dtype=np.float64).astype(np.float32)
if cumul:
gm3d[vcount],temp3=calc_global_mean_for_onefile(this_file,area_wgt,[vname],[],ens_avg3d[vcount],temp2,tslice,is_SE,nlev,opts_dict)
if not cumul:
#Generate avg, stddev and zscore for 3d variable
for fcount,this_file in enumerate(o_files):
data=this_file.variables[vname]
if not popens:
new_index=np.where(indices!=fcount)
ensemble3d = output3d[new_index]
avg3d=np.average(ensemble3d,axis=0)
stddev3d=np.std(ensemble3d,axis=0,dtype=np.float64)
flag3d = False
count3d = 0
count3d,ret_val=calc_Z(output3d[fcount].astype(np.float64),avg3d.astype(np.float64),stddev3d.astype(np.float64),count3d,flag3d)
Zscore=np.sum(np.square(ret_val))
if (count3d < npts3d):
Zscore3d[vcount,fcount]=np.sqrt(Zscore/(npts3d-count3d))
else:
print("WARNING: no variance in "+vname)
else:
rmask=this_file.variables['REGION_MASK']
Zscore=pop_zpdf(output3d[fcount],nbin,(minrange,maxrange),ens_avg3d[vcount],ens_stddev3d[vcount],data._FillValue,threshold,rmask,opts_dict)
Zscore3d[vcount,fcount,:]=Zscore[:]
for vcount,vname in enumerate(var_name2d):
#Read in vname's data of all files
for fcount, this_file in enumerate(o_files):
data=this_file.variables[vname]
if (is_SE == True):
output2d[fcount,:]=data[tslice,:]
else:
output2d[fcount,:,:]=data[tslice,:,:]
#Generate ens_avg and ens_stddev to store in the ensemble summary file
if popens:
moutput2d=np.ma.masked_values(output2d,data._FillValue)
ens_avg2d[vcount]=np.ma.average(moutput2d,axis=0)
ens_stddev2d[vcount]=np.ma.std(moutput2d,axis=0,dtype=np.float32)
else:
ens_avg2d[vcount]=np.average(output2d,axis=0).astype(np.float32)
ens_stddev2d[vcount]=np.std(output2d,axis=0,dtype=np.float64).astype(np.float32)
if cumul:
temp3,gm2d[vcount]=calc_global_mean_for_onefile(this_file,area_wgt,[],[vname],temp1,ens_avg2d[vcount],tslice,is_SE,nlev,opts_dict)
if not cumul:
#Generate avg, stddev and zscore for 3d variable
for fcount,this_file in enumerate(o_files):
data=this_file.variables[vname]
if not popens:
new_index=np.where(indices!=fcount)
ensemble2d = output2d[new_index]
avg2d=np.average(ensemble2d,axis=0)
stddev2d=np.std(ensemble2d,axis=0,dtype=np.float64)
flag2d = False
count2d = 0
#count2d,ret_val=calc_Z(output2d[fcount].astype(np.float64),avg2d.astype(np.float64),stddev2d.astype(np.float64),count2d,flag2d)
count2d,ret_val=calc_Z(output2d[fcount],avg2d,stddev2d.astype(np.float64),count2d,flag2d)
Zscore=np.sum(np.square(ret_val))
if (count2d < npts2d):
Zscore2d[vcount,fcount]=np.sqrt(Zscore/(npts2d-count2d))
else:
print("WARNING: no variance in "+vname)
else:
rmask=this_file.variables['REGION_MASK']
Zscore=pop_zpdf(output2d[fcount],nbin,(minrange,maxrange),ens_avg2d[vcount],ens_stddev2d[vcount],data._FillValue,threshold,rmask,opts_dict)
Zscore2d[vcount,fcount,:]=Zscore[:]
return Zscore3d,Zscore2d,ens_avg3d,ens_stddev3d,ens_avg2d,ens_stddev2d,gm3d,gm2d
#
# Calculate pop zscore pass rate (ZPR) or pop zpdf values
#
def pop_zpdf(input_array,nbin,zrange,ens_avg,ens_stddev,FillValue,threshold,rmask,opts_dict):
if 'test_failure' in opts_dict:
test_failure=opts_dict['test_failure']
else:
test_failure=False
#Masked the missing value
moutput=np.ma.masked_values(input_array,FillValue)
#print 'before count=',moutput.count()
if input_array.ndim==3:
rmask3d=np.zeros(input_array.shape,dtype=np.int32)
for i in rmask3d:
i[:,:]=rmask[:,:]
rmask_array=rmask3d
elif input_array.ndim==2:
rmask_array=np.zeros(input_array.shape,dtype=np.int32)
rmask_array[:,:]=rmask[:,:]
#Masked the rmask<1 or rmask>6
moutput2=np.ma.masked_where((rmask_array<1)|(rmask_array>6),moutput)
#Use the masked array moutput2 to calculate Zscore_temp=(data-avg)/stddev
Zscore_temp=np.fabs((moutput2.astype(np.float64)-ens_avg)/np.where(ens_stddev<=threshold,FillValue,ens_stddev))
#To retrieve only the valid entries of Zscore_temp
Zscore_nomask=Zscore_temp[~Zscore_temp.mask]
#If just test failure, calculate ZPR only
if test_failure:
#Zpr=the count of Zscore_nomask is less than pop_tol (3.0)/ the total count of Zscore_nomask
Zpr=np.where(Zscore_nomask<=opts_dict['pop_tol'])[0].size/float(Zscore_temp.count())
return Zpr
#Else calculate zpdf and return as zscore
#Count the unmasked value
count=Zscore_temp.count()
Zscore,bins = np.histogram(Zscore_temp.compressed(),bins=nbin,range=zrange)
#Normalize the number by dividing the count
if count != 0:
Zscore=Zscore.astype(np.float32)/count
print('sum=',np.sum(Zscore))
else:
print('count=0,sum=',np.sum(Zscore))
return Zscore
#
# Calculate rmsz score by compare the run file with the ensemble summary file
#
def calculate_raw_score(k,v,npts3d,npts2d,ens_avg,ens_stddev,is_SE,opts_dict,FillValue,timeslice,rmask):
count=0
Zscore=0
threshold = 1.0e-12
has_zscore=True
popens=opts_dict['popens']
if popens:
minrange=opts_dict['minrange']
maxrange=opts_dict['maxrange']
Zscore=pop_zpdf(v,opts_dict['nbin'],(minrange,maxrange),ens_avg,ens_stddev,FillValue,threshold,rmask,opts_dict)
else:
if k in ens_avg:
if is_SE:
if ens_avg[k].ndim == 1:
npts=npts2d
else:
npts=npts3d
else:
if ens_avg[k].ndim == 2:
npts=npts2d
else:
npts=npts3d
count,return_val=calc_Z(v,ens_avg[k].astype(np.float64),ens_stddev[k].astype(np.float64),count,False)
Zscore=np.sum(np.square(return_val))
if npts == count:
Zscore=0
else:
Zscore=np.sqrt(Zscore/(npts-count))
else:
has_zscore=False
return Zscore,has_zscore
#
# Create some variables and call a function to calculate PCA
#
def pre_PCA(gm):
threshold= 1.0e-12
FillValue= 1.0e+30
gm_len=gm.shape
nvar=gm_len[0]
nfile=gm_len[1]
mu_gm=np.average(gm,axis=1).astype(np.float64)
sigma_gm=np.std(gm,axis=1,dtype=np.float64)
standardized_global_mean=np.zeros(gm.shape,dtype=np.float64)
scores_gm=np.zeros(gm.shape,dtype=np.float64)
for var in range(nvar):
for file in range(nfile):
standardized_global_mean[var,file]=(gm[var,file]-mu_gm[var])/sigma_gm[var]
loadings_gm=princomp(standardized_global_mean)
#now do coord transformation on the standardized meana to get the scores
scores_gm=np.dot(loadings_gm.T,standardized_global_mean)
sigma_scores_gm =np.std(scores_gm,axis=1,dtype=np.float64)
return mu_gm.astype(np.float32),sigma_gm.astype(np.float32),standardized_global_mean.astype(np.float32),loadings_gm.astype(np.float32),sigma_scores_gm.astype(np.float32)
#
# Performs principal components analysis (PCA) on the p-by-n data matrix A
# rows of A correspond to (p) variables AND cols of A correspond to the (n) tests
# assume already standardized
#
# Returns the loadings: p-by-p matrix, each column containing coefficients
# for one principal component.
#
def princomp(standardized_global_mean):
# find covariance matrix (will be pxp)
co_mat= np.cov(standardized_global_mean)
# Calculate evals and evecs of covariance matrix (evecs are also pxp)
[evals, evecs] = np.linalg.eig(co_mat)
# Above may not be sorted - sort largest first
new_index = np.argsort(evals)[::-1]
evecs = evecs[:,new_index]
evals = evals[new_index]
return evecs
#
# Calculate (val-avg)/stddev and exclude zero value
#
def calc_Z(val,avg,stddev,count,flag):
return_val=np.empty(val.shape,dtype=np.float64,order='C')
tol =1.0e-12
if stddev[(stddev > tol)].size ==0:
if flag:
print("WARNING: ALL standard dev = 0")
flag = False
count =count + stddev[(stddev <= tol)].size
return_val = 0.
else:
if stddev[(stddev <= tol)].size > 0:
if flag:
print("WARNING: some standard dev = 0")
flag =False
count =count + stddev[(stddev <= tol)].size
return_val[np.where(stddev <= tol)]=0.
return_val[np.where(stddev > tol)]= (val[np.where(stddev> tol)]-avg[np.where(stddev> tol)])/stddev[np.where(stddev>tol)]
else:
return_val=(val-avg)/stddev
return count,return_val
#
# Read a json file for the excluded list of variables
#
def read_jsonlist(metajson,method_name):
fd=open(metajson)
metainfo = json.load(fd)
if method_name == 'ES':
varList = metainfo['ExcludedVar']
return varList
elif method_name == 'ESP':
var2d = metainfo['Var2d']
var3d = metainfo['Var3d']
return var2d, var3d
#
# Calculate Normalized RMSE metric
#
def calc_nrmse(orig_array,comp_array):
orig_size=orig_array.size
sumsqr=np.sum(np.square(orig_array.astype(np.float64)-comp_array.astype(np.float64)))
rng=np.max(orig_array)-np.min(orig_array)
if abs(rng) < 1e-18:
rmse=0.0
else:
rmse=np.sqrt(sumsqr/orig_size)/rng
return rmse
#
# Calculate weighted global mean for one level of CAM output
#
def area_avg(data, weight, is_SE):
#TO DO: tke into account missing values
if (is_SE == True):
a = np.average(data, weights=weight)
else: #FV
#a = wgt_areaave(data, weight, 1.0, 1)
#weights are for lat
a_lat = np.average(data,axis=0, weights=weight)
a = np.average(a_lat)
return a
#
# Calculate weighted global mean for one level of OCN output
#
def pop_area_avg(data, weight):
#Take into account missing values
#a = wgt_areaave(data, weight, 1.0, 1)
#weights are for lat
a = np.ma.average(data, weights=weight)
return a
def get_lev(file_dim_dict,lev_name):
return file_dim_dict[lev_name]
#
# Get dimension 'lev' or 'z_t'
#
def get_nlev(o_files,popens):
#get dimensions and compute area_wgts
input_dims = o_files[0].dimensions
if not popens:
nlev = get_lev(input_dims,'lev')
else:
nlev = get_lev(input_dims,'z_t')
return input_dims,nlev
#
# Calculate area_wgt when processes cam se/cam fv/pop files
#
def get_area_wgt(o_files,is_SE,input_dims,nlev,popens):
z_wgt={}
if (is_SE == True):
ncol = input_dims["ncol"]
output3d = np.zeros((nlev, ncol))
output2d = np.zeros(ncol)
area_wgt = np.zeros(ncol)
area = o_files[0].variables["area"]
area_wgt[:] = area[:]
total = np.sum(area_wgt)
area_wgt[:] /= total
else:
if not popens:
nlon = get_lev(input_dims,'lon')
nlat = get_lev(input_dims,'lat')
gw = o_files[0].variables["gw"]
else:
if 'nlon' in input_dims:
nlon = get_lev(input_dims,'nlon')
nlat = get_lev(input_dims,'nlat')
elif 'lon' in input_dims:
nlon = get_lev(input_dims,'lon')
nlat = get_lev(input_dims,'lat')
gw = o_files[0].variables["TAREA"]
z_wgt = o_files[0].variables["dz"]
output3d = np.zeros((nlev, nlat, nlon))
output2d = np.zeros((nlat, nlon))
#area_wgt = np.zeros(nlat) #note gaues weights are length nlat
area_wgt = gw
return output3d,output2d,area_wgt,z_wgt
#
# Open input files,compute area_wgts, and then loop through all files to call calc_global_means_for_onefile
#
def generate_global_mean_for_summary(o_files,var_name3d,var_name2d,is_SE,pepsi_gm,opts_dict):
tslice=opts_dict['tslice']
popens=opts_dict['popens']
#openfile - should have already been opened by Nio.open_file()
n3d = len(var_name3d)
n2d = len(var_name2d)
tot = n3d + n2d
gm3d = np.zeros((n3d,len(o_files)),dtype=np.float32)
gm2d = np.zeros((n2d,len(o_files)),dtype=np.float32)
input_dims,nlev=get_nlev(o_files,popens)
output3d,output2d,area_wgt,z_wgt=get_area_wgt(o_files,is_SE,input_dims,nlev,popens)
#loop through the input file list to calculate global means
#var_name3d=[]
for fcount,fname in enumerate(o_files):
if pepsi_gm:
# Generate global mean for pepsi challenge data timeseries daily files, they all are 2d variables
var_name2d=[]
for k,v in fname.variables.iteritems():
if v.typecode() == 'f':
var_name2d.append(k)
fout = open(k+"_33.txt","w")
if k == 'time':
ntslice=v[:]
for i in np.nditer(ntslice):
temp1,temp2=calc_global_mean_for_onefile(fname,area_wgt,var_name3d,var_name2d,output3d,output2d,int(i),is_SE,nlev,opts_dict)
fout.write(str(temp2[0])+'\n')
elif popens:
gm3d[:,fcount],gm2d[:,fcount]=calc_global_mean_for_onefile_pop(fname,area_wgt,z_wgt,var_name3d,var_name2d,output3d,output2d,tslice,is_SE,nlev,opts_dict)
else:
gm3d[:,fcount],gm2d[:,fcount]=calc_global_mean_for_onefile(fname,area_wgt,var_name3d,var_name2d,output3d,output2d,tslice,is_SE,nlev,opts_dict)
return gm3d,gm2d
#
# Calculate global means for one OCN input file
#
def calc_global_mean_for_onefile_pop(fname, area_wgt,z_wgt,var_name3d, var_name2d,output3d,output2d, tslice, is_SE, nlev,opts_dict):
n3d = len(var_name3d)
n2d = len(var_name2d)
gm3d = np.zeros((n3d),dtype=np.float32)
gm2d = np.zeros((n2d),dtype=np.float32)
#calculate global mean for each 3D variable
for count, vname in enumerate(var_name3d):
#if (verbose == True):
# print "calculating GM for variable ", vname
gm_lev = np.zeros(nlev)
data = fname.variables[vname]
output3d[:,:,:] = data[tslice,:,:,:]
for k in range(nlev):
moutput3d=np.ma.masked_values(output3d[k,:,:],data._FillValue)
gm_lev[k] = pop_area_avg(moutput3d, area_wgt)
#note: averaging over levels should probably be pressure-weighted(TO DO)
gm3d[count] = np.average(gm_lev,weights=z_wgt)
#calculate global mean for each 2D variable
for count, vname in enumerate(var_name2d):
#if (verbose == True):
# print "calculating GM for variable ", vname
data = fname.variables[vname]
output2d[:,:] = data[tslice,:,:]
moutput2d=np.ma.masked_values(output2d[:,:],data._FillValue)
gm2d_mean = pop_area_avg(moutput2d, area_wgt)
gm2d[count]=gm2d_mean
return gm3d,gm2d
#
# Calculate global means for one CAM input file
#
def calc_global_mean_for_onefile(fname, area_wgt,var_name3d, var_name2d,output3d,output2d, tslice, is_SE, nlev,opts_dict):
if 'cumul' in opts_dict:
cumul = opts_dict['cumul']
else:
cumul = False
n3d = len(var_name3d)
n2d = len(var_name2d)
gm3d = np.zeros((n3d),dtype=np.float32)
| |
k - 1) * (m + 1) / n)
)
/ (r ** 2 + 2 * r * s * x * cos(Pi * (2 * k - 1) / n) + s ** 2 * x ** 2),
x,
)
return Simp(
2
* (-1) ** (m / 2)
* r ** (m + 2)
* s ** (-m)
* Int(1 / (r ** 2 + s ** 2 * x ** 2), x)
/ (a * n)
+ Dist(
2 * r ** (m + 1) * s ** (-m) / (a * n),
Sum_doit(u, List(k, 1, n / 4 - 1 / 2)),
x,
),
x,
)
def With790(a, b, m, n, x):
r = Numerator(Rt(-a / b, n))
s = Denominator(Rt(-a / b, n))
k = Symbol("k")
u = Symbol("u")
u = Int(
(r * cos(S(2) * Pi * k * m / n) - s * x * cos(S(2) * Pi * k * (m + S(1)) / n))
/ (
r ** S(2)
- S(2) * r * s * x * cos(S(2) * Pi * k / n)
+ s ** S(2) * x ** S(2)
),
x,
) + Int(
(r * cos(S(2) * Pi * k * m / n) + s * x * cos(S(2) * Pi * k * (m + S(1)) / n))
/ (
r ** S(2)
+ S(2) * r * s * x * cos(S(2) * Pi * k / n)
+ s ** S(2) * x ** S(2)
),
x,
)
u = Int(
(r * cos(2 * Pi * k * m / n) - s * x * cos(2 * Pi * k * (m + 1) / n))
/ (r ** 2 - 2 * r * s * x * cos(2 * Pi * k / n) + s ** 2 * x ** 2),
x,
) + Int(
(r * cos(2 * Pi * k * m / n) + s * x * cos(2 * Pi * k * (m + 1) / n))
/ (r ** 2 + 2 * r * s * x * cos(2 * Pi * k / n) + s ** 2 * x ** 2),
x,
)
return Simp(
Dist(
2 * r ** (m + 1) * s ** (-m) / (a * n),
Sum_doit(u, List(k, 1, n / 4 - 1 / 2)),
x,
)
+ 2
* r ** (m + 2)
* s ** (-m)
* Int(1 / (r ** 2 - s ** 2 * x ** 2), x)
/ (a * n),
x,
)
def With791(a, b, x):
r = Numerator(Rt(a / b, S(2)))
s = Denominator(Rt(a / b, S(2)))
return -Dist(
S(1) / (S(2) * s), Int((r - s * x ** S(2)) / (a + b * x ** S(4)), x), x
) + Dist(S(1) / (S(2) * s), Int((r + s * x ** S(2)) / (a + b * x ** S(4)), x), x)
def With792(a, b, x):
r = Numerator(Rt(-a / b, S(2)))
s = Denominator(Rt(-a / b, S(2)))
return -Dist(s / (S(2) * b), Int(S(1) / (r - s * x ** S(2)), x), x) + Dist(
s / (S(2) * b), Int(S(1) / (r + s * x ** S(2)), x), x
)
def With793(a, b, m, n, x):
r = Numerator(Rt(a / b, S(4)))
s = Denominator(Rt(a / b, S(4)))
return Dist(
sqrt(S(2)) * s ** S(3) / (S(4) * b * r),
Int(
x ** (m - n / S(4))
/ (
r ** S(2)
- sqrt(S(2)) * r * s * x ** (n / S(4))
+ s ** S(2) * x ** (n / S(2))
),
x,
),
x,
) - Dist(
sqrt(S(2)) * s ** S(3) / (S(4) * b * r),
Int(
x ** (m - n / S(4))
/ (
r ** S(2)
+ sqrt(S(2)) * r * s * x ** (n / S(4))
+ s ** S(2) * x ** (n / S(2))
),
x,
),
x,
)
def With794(a, b, m, n, x):
r = Numerator(Rt(-a / b, S(2)))
s = Denominator(Rt(-a / b, S(2)))
return Dist(r / (S(2) * a), Int(x ** m / (r - s * x ** (n / S(2))), x), x) + Dist(
r / (S(2) * a), Int(x ** m / (r + s * x ** (n / S(2))), x), x
)
def With795(a, b, m, n, x):
r = Numerator(Rt(-a / b, S(2)))
s = Denominator(Rt(-a / b, S(2)))
return -Dist(
s / (S(2) * b), Int(x ** (m - n / S(2)) / (r - s * x ** (n / S(2))), x), x
) + Dist(s / (S(2) * b), Int(x ** (m - n / S(2)) / (r + s * x ** (n / S(2))), x), x)
def replacement796(a, b, m, n, x):
return Int(PolynomialDivide(x ** m, a + b * x ** n, x), x)
def With797(a, b, x):
r = Numer(Rt(b / a, S(3)))
s = Denom(Rt(b / a, S(3)))
return Dist(
S(1) / r, Int((r * x + s * (S(1) - sqrt(S(3)))) / sqrt(a + b * x ** S(3)), x), x
) + Dist(
sqrt(S(2)) * s / (r * sqrt(sqrt(S(3)) + S(2))),
Int(S(1) / sqrt(a + b * x ** S(3)), x),
x,
)
def With798(a, b, x):
r = Numer(Rt(b / a, S(3)))
s = Denom(Rt(b / a, S(3)))
return Dist(
S(1) / r, Int((r * x + s * (S(1) + sqrt(S(3)))) / sqrt(a + b * x ** S(3)), x), x
) - Dist(
sqrt(S(2)) * s / (r * sqrt(S(2) - sqrt(S(3)))),
Int(S(1) / sqrt(a + b * x ** S(3)), x),
x,
)
def With799(a, b, x):
q = Rt(b / a, S(2))
return -Dist(
S(1) / q, Int((-q * x ** S(2) + S(1)) / sqrt(a + b * x ** S(4)), x), x
) + Dist(S(1) / q, Int(S(1) / sqrt(a + b * x ** S(4)), x), x)
def With800(a, b, x):
q = Rt(-b / a, S(2))
return -Dist(
S(1) / q, Int((-q * x ** S(2) + S(1)) / sqrt(a + b * x ** S(4)), x), x
) + Dist(S(1) / q, Int(S(1) / sqrt(a + b * x ** S(4)), x), x)
def With801(a, b, x):
q = Rt(-b / a, S(2))
return Dist(
S(1) / q, Int((q * x ** S(2) + S(1)) / sqrt(a + b * x ** S(4)), x), x
) - Dist(S(1) / q, Int(S(1) / sqrt(a + b * x ** S(4)), x), x)
def With802(a, b, x):
r = Numer(Rt(b / a, S(3)))
s = Denom(Rt(b / a, S(3)))
return -Dist(
S(1) / (S(2) * r ** S(2)),
Int(
(-S(2) * r ** S(2) * x ** S(4) + s ** S(2) * (S(-1) + sqrt(S(3))))
/ sqrt(a + b * x ** S(6)),
x,
),
x,
) + Dist(
s ** S(2) * (S(-1) + sqrt(S(3))) / (S(2) * r ** S(2)),
Int(S(1) / sqrt(a + b * x ** S(6)), x),
x,
)
def replacement803(a, b, x):
return -Dist(
S(1) / (S(2) * Rt(b / a, S(4))),
Int((-(x ** S(2)) * Rt(b / a, S(4)) + S(1)) / sqrt(a + b * x ** S(8)), x),
x,
) + Dist(
S(1) / (S(2) * Rt(b / a, S(4))),
Int((x ** S(2) * Rt(b / a, S(4)) + S(1)) / sqrt(a + b * x ** S(8)), x),
x,
)
def replacement804(a, b, x):
return -Dist(
a / S(2), Int(x ** S(2) / (a + b * x ** S(4)) ** (S(5) / 4), x), x
) + Simp(x ** S(3) / (S(2) * (a + b * x ** S(4)) ** (S(1) / | |
<reponame>DerekRein/.nuke
INPUTS = HIDDEN_INPUTS = None
GUI = True
DISABLED = 0x00000080 # DISABLED Set by disable(), cleared by enable().
NO_ANIMATION = 0x00000100 # NO_ANIMATION Prevent the value from being animated. This removes any anymation or view buttons, and it stops tcl expressions from being evaluated in string knobs, and may make it ignore attempts to set expressions or key frames (nyi).
DO_NOT_WRITE = 0x00000200 # DO_NOT_WRITE Don't ever save this knob to a script (including copy & paste!)
INVISIBLE = 0x00000400 # INVISIBLE The knob does not appear in the panels. No widgets are created. This is not the same as hide(), and show() will not undo it!
RESIZABLE = 0x00000800 # RESIZABLE The knob can stretch in the panel so that it fills up all the remaining space in the line. Defaults to true for most of the complex knobs, but off for buttons, checkmarks, and pulldown lists.
STARTLINE = 0x00001000 # STARTLINE This knob starts a new row in the panel. The default is true unless a zero-length (not NULL) string is passed as the label. Currently the default is false for checkmarks and buttons but this may change in future versions.
ENDLINE = 0x00002000 # ENDLINE This knob will end a row, acts exactly like STARTLINE was set on the next knob. Set true for divider lines.
NO_RERENDER = 0x00004000 # NO_RERENDER This knob does not contribute to the hash value for the op. This should be used on knobs that have no effect on the op's output.
NO_HANDLES = 0x00008000 # NO_HANDLES Don't draw anything in the viewer, this is useful if the Op draws it's own indicators.
KNOB_CHANGED_ALWAYS = 0x00010000 # KNOB_CHANGED_ALWAYS will call node()->knob_changed() every time the value of the knob changes. Normally it is only called if the user changes the value with the panel open. This allows you to track all changes to the value. Be careful as knob_changed() will be called without storing the new values into your structure.
NO_KNOB_CHANGED = 0x00020000 # NO_KNOB_CHANGED: Don't bother calling Op::knob_changed() with this knob. This is turned on automatically if the knob_changed() returns false.
KNOB_CHANGED_RECURSIVE = 0x08000000
HIDDEN = 0x00040000 # HIDDEN Set by hide(), cleared by show().
NO_UNDO = 0x00080000 # NO_UNDO Don't undo/redo any changes to this knob. May be replaced with "output knob" in the future.
ALWAYS_SAVE = 0x00100000 # ALWAYS_SAVE save the knob to a script even if not_default() returns false. Deprecated, instead override not_default() and make it return true!
NODE_KNOB = 0x00200000 # NODE_KNOB is used by Nuke internally for controls on the DAG appearance such as xpos and ypos.
HANDLES_ANYWAY = 0x00400000 # HANDLES_ANYWAY makes the handles appear in the viewer when the panel is open even if a different tab is selected.
READ_ONLY = 0x10000000 # knob cannot be modified by UI intervention but can still be copied from etc
# internal use:
INDETERMINATE = 0x00800000
COLOURCHIP_HAS_UNSET = 0x01000000 # < whether a color chip can be in the 'unset' state DEFAULTS TO FALSE
SMALL_UI = 0x02000000
NO_NUMERIC_FIELDS = 0x04000000
NO_CURVE_EDITOR = 0x20000000
NO_MULTIVIEW = 0x40000000
EARLY_STORE = 0x80000000
# Numeric knobs: Values that work for knobs that store numbers:
MAGNITUDE = 0x00000001 # MAGNITUDE If there are several numbers, this enables a button to only show a single number, and all are set equal to this number. Default is true for WH_knob() and Color_knob().
SLIDER = 0x00000002 # SLIDER Turns on the slider. Currently this only works if the size is 1 or MAGNITUDE is enabled and it is set to single numbers. Defaults to on for most non-integer numerical controls.
LOG_SLIDER = 0x00000004 # LOG_SLIDER Tick marks on the slider (if enabled with SLIDER) are spaced logarithmically. This is turned on for WH_knob() and Color_knob(), and if the range has both ends greater than zero. If you turn this on and the range passes through zero, the scale is actually the cube root of the number, not the logarithim.
STORE_INTEGER = 0x00000008 # STORE_INTEGER Only integer values should be displayed or stored.
FORCE_RANGE = 0x00000010 # FORCE_RANGE Clamps the value to the range when storing.
ANGLE = 0x00000020 # ANGLE Turn on a little widget depicting this number as an angle.
NO_PROXYSCALE = 0x00000040 # NO_PROXYSCALE disables proxy scaling for XY or WH knobs. Useful if you just want two numbers called "x" and "y" that are not really a position. You probably also want to do NO_HANDLES.
# String Knobs
GRANULAR_UNDO = 0x00000001
NO_RECURSIVE_PATHS = 0x00000002
# Enumeration: Values that work for Enumeration_knob():
SAVE_MENU = 0x02000000 # SAVE_MENU writes the contents of the menu to the saved script. Useful if your plugin modifies the list of items.
# BeginGroup: Values that work for BeginGroup():
CLOSED = 0x00000001 # CLOSED True for a BeginGroup knob that is closed
TOOLBAR_GROUP = 0x00000002 # Make the group into a viewer toolbar
TOOLBAR_LEFT = 0x00000000 # Position in the viewer. Only set one of these:
TOOLBAR_TOP = 0x00000010
TOOLBAR_BOTTOM = 0x00000020
TOOLBAR_RIGHT = 0x00000030
TOOLBAR_POSITION = 0x00000030 # A mask for the position part of the flags
# ChannelSet/Channel: Values that work for ChanneSet_knob() and Channel_knob():
NO_CHECKMARKS = 0x00000001 # NO_CHECKMARKS Get rid of the individual channel checkmarks.
NO_ALPHA_PULLDOWN = 0x00000002 # NO_ALPHA_PULLDOWN Get rid of the extra pulldown that lets you set the 4th channel to an arbitrary different layer than the first 3.
TABENDGROUP = 0x00000000
TABBEGINCLOSEDGROUP = 0x00000000
ADD_VIEWS = 0x00000000
AFTER_CONST = 0x00000000
AFTER_LINEAR = 0x00000000
ALL = 0x00000000
BEFORE_CONST = 0x00000000
BEFORE_LINEAR = 0x00000000
BREAK = 0x00000000
CATMULL_ROM = 0x00000000
CONSTANT = 0x00000000
CUBIC = 0x00000000
DONT_CREATE_VIEWS = 0x00000000
DONT_SAVE_TO_NODEPRESET = 0x00000000
EXE_PATH = 0x00000000
EXPAND_TO_WIDTH = 0x00000000
EXPRESSIONS = 0x00000000
FLOAT = 0x00000000
FONT = 0x00000000
GEO = 0x00000000
HORIZONTAL = 0x00000000
IMAGE = 0x00000000
INT16 = 0x00000000
INT8 = 0x00000000
INTERACTIVE = 0x00000000
INVALIDHINT = 0x00000000
LINEAR = 0x00000000
LOG = 0x00000000
MATCH_CLASS = 0x00000000
MATCH_COLOR = 0x00000000
MATCH_LABEL = 0x00000000
MONITOR = 0x00000000
NODIR = 0x00000000
NO_POSTAGESTAMPS = 0x00000000
NUKE_VERSION_DATE = 0x00000000
NUKE_VERSION_MAJOR = 0x00000000
NUKE_VERSION_MINOR = 0x00000000
NUKE_VERSION_PHASE = 0x00000000
NUKE_VERSION_PHASENUMBER = 0x00000000
NUKE_VERSION_RELEASE = 0x00000000
NUKE_VERSION_STRING = 0x00000000
NUM_CPUS = 0x00000000
NUM_INTERPOLATIONS = 0x00000000
PLUGIN_EXT = 0x00000000
PREPEND = 0x00000000
PROFILE_ENGINE = 0x00000000
PROFILE_REQUEST = 0x00000000
PROFILE_STORE = 0x00000000
PROFILE_VALIDATE = 0x00000000
PYTHON = 0x00000000
REPLACE = 0x00000000
REPLACE_VIEWS = 0x00000000
SCRIPT = 0x00000000
SMOOTH = 0x00000000
STRIP_CASCADE_PREFIX = 0x00000000
TABBEGINGROUP = 0x00000000
TABKNOB = 0x00000000
THREADS = 0x00000000
TO_SCRIPT = 0x00000000
TO_VALUE = 0x00000000
USER_SET_SLOPE = 0x00000000
VIEWER = 0x00000000
VIEW_NAMES = 0x00000000
WRITE_ALL = 0x00000000
WRITE_NON_DEFAULT_ONLY = 0x00000000
WRITE_USER_KNOB_DEFS = 0x00000000
class nodes(object):
def S_ConvolveComp(self):
pass
def S_Deband(self):
pass
def S_DefocusPrism(self):
pass
def S_EdgeAwareBlur(self):
pass
def S_EdgeBlur(self):
pass
def S_GrainRemove(self):
pass
def S_Median(self):
pass
def S_RackDefocus(self):
pass
def S_RackDfComp(self):
pass
def S_Sharpen(self):
pass
def S_SoftFocus(self):
pass
def S_ZBlur(self):
pass
def S_ZConvolve(self):
pass
def S_ZDefocus(self):
pass
def S_EdgeFlash(self):
pass
def S_Layer(self):
pass
def S_MathOps(self):
pass
def S_MatteOps(self):
pass
def S_MatteOpsComp(self):
pass
def S_ZComp(self):
pass
def S_Distort(self):
pass
def S_DistortBlur(self):
pass
def S_DistortChroma(self):
pass
def S_DistortRGB(self):
pass
def S_InfiniteZoom(self):
pass
def S_Shake(self):
pass
def S_StretchFrameEdges(self):
pass
def S_WarpBubble(self):
pass
def S_WarpBubble2(self):
pass
def S_WarpChroma(self):
pass
def S_WarpCornerPin(self):
pass
def S_WarpDrops(self):
pass
def S_WarpFishEye(self):
pass
def S_WarpMagnify(self):
pass
def S_WarpPerspective(self):
pass
def S_WarpPolar(self):
pass
def S_WarpPuddle(self):
pass
def S_WarpPuff(self):
pass
def S_WarpRepeat(self):
pass
def S_WarpTransform(self):
pass
def S_WarpVortex(self):
pass
def S_WarpWaves(self):
pass
def S_WarpWaves2(self):
pass
def S_BokehLights(self):
pass
def S_DropShadow(self):
pass
def S_EdgeRays(self):
pass
def S_Flashbulbs(self):
pass
def S_Glare(self):
pass
def S_Glint(self):
pass
def S_GlintRainbow(self):
pass
def S_Glow(self):
pass
def S_GlowAura(self):
pass
def S_GlowDarks(self):
pass
def S_GlowDist(self):
pass
def S_GlowEdges(self):
pass
def S_GlowNoise(self):
pass
def S_GlowOrthicon(self):
pass
def S_GlowRainbow(self):
pass
def S_GlowRings(self):
pass
def S_LensFlare(self):
pass
def S_LensFlareAutoTrack(self):
pass
def S_Light3D(self):
pass
def S_LightLeak(self):
pass
def S_Rays(self):
pass
def S_SpotLight(self):
pass
def S_Streaks(self):
pass
def S_ZGlow(self):
pass
def S_Aurora(self):
pass
def S_Caustics(self):
pass
def S_Clouds(self):
pass
def S_CloudsColorSmooth(self):
pass
def S_CloudsMultColor(self):
pass
def S_CloudsPerspective(self):
pass
def S_CloudsPsyko(self):
pass
def S_CloudsVortex(self):
pass
def S_Gradient(self):
pass
def S_GradientMulti(self):
pass
def S_GradientRadial(self):
pass
def S_Grid(self):
pass
def S_Grunge(self):
pass
def S_LaserBeam(self):
pass
def S_MuzzleFlash(self):
pass
def S_NightSky(self):
pass
def S_Shape(self):
pass
def S_Sparkles(self):
pass
def S_SparklesColor(self):
pass
def S_TextureCells(self):
pass
def S_TextureChromaSpiral(self):
pass
def S_TextureFlux(self):
pass
def S_TextureFolded(self):
pass
def S_TextureLoops(self):
pass
def S_TextureMicro(self):
pass
def S_TextureMoire(self):
pass
def S_TextureNeurons(self):
pass
def S_TextureNoiseEmboss(self):
pass
def S_TextureNoisePaint(self):
pass
def S_TexturePlasma(self):
pass
def S_TextureSpots(self):
pass
def S_TextureTiles(self):
pass
def S_TextureWeave(self):
pass
def S_Zap(self):
pass
def S_ZapFrom(self):
pass
def S_ZapTo(self):
pass
def S_AutoPaint(self):
| |
<reponame>shunchaowang/dbmi-annotator<filename>translation/mp-evidence-base-ETL/deprecated/mpEvidenceLoad.py<gh_stars>1-10
# Copyright 2016-2017 University of Pittsburgh
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http:www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import psycopg2
import uuid, datetime
curr_date = datetime.datetime.now()
# QUALIFIER ################################################################
def insert_qualifier(conn, qtype, qvalue, concept_code, vocab_id, qtype_concept_code, qtype_vocab_id, claim_body_id, enantiomer, metabolite):
cur = conn.cursor()
s_boo = False; p_boo = False; o_boo = False
if qtype == "subject":
s_boo = True
elif qtype == "predicate":
p_boo = True
elif qtype == "object":
o_boo = True
cur.execute("""INSERT INTO qualifier (urn, claim_body_id, subject, predicate, object, qvalue, concept_code, vocabulary_id, qualifier_type_concept_code, qualifier_type_vocabulary_id, enantiomer, metabolite) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""", (uuid.uuid4().hex, claim_body_id, s_boo, p_boo, o_boo, qvalue, concept_code, vocab_id, qtype_concept_code, qtype_vocab_id, enantiomer, metabolite))
# OPEN ANNOTATION - TARGET AND SELECTOR #############################################
# load table "oa_selector" one row
# input: conn, prefix, exact, suffix
# return: selector id
def insert_oa_selector(conn, prefix, exact, suffix):
cur = conn.cursor()
urn = uuid.uuid4().hex
qry1 = "INSERT INTO oa_selector (urn, selector_type, exact, prefix, suffix) VALUES ('%s', '%s', '%s', '%s', '%s');" % (urn, "oa_selector", exact, prefix, suffix)
cur.execute(qry1)
qry2 = "SELECT * FROM oa_selector WHERE urn = '%s';" % (urn)
cur.execute(qry2)
for row in cur.fetchall():
return row[0]
return None
# load table "oa_target" one row
# input: conn, doc source url, selector id
# return: target id
def insert_oa_target(conn, source, selector_id):
cur = conn.cursor()
urn = uuid.uuid4().hex
qry1 = "INSERT INTO oa_target (urn, has_source, has_selector) VALUES ('%s', '%s', '%s');" % (urn, source, selector_id)
cur.execute(qry1)
qry2 = "SELECT * FROM oa_target WHERE urn = '%s'" % (urn);
cur.execute(qry2)
for row in cur.fetchall():
return row[0]
return None
# MP CLAIM ########################################################
# insert to table "mp_claim_annotation"
# return claim annotation id
def insert_mp_claim_annotation(conn, curr_date, has_body, has_target, creator, annId, negation, rejected, rejected_reason, rejected_comment):
cur = conn.cursor()
urn = annId
qry1 = "INSERT INTO mp_claim_annotation (urn, has_body, has_target, creator, date_created, date_updated, negation, rejected_statement, rejected_statement_reason, rejected_statement_comment)" + "VALUES ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s');" % (urn, str(has_body), str(has_target), creator, curr_date, curr_date, negation, rejected, rejected_reason, rejected_comment)
cur.execute(qry1)
cur.execute("SELECT * FROM mp_claim_annotation WHERE urn = '" + urn + "';")
for row in cur.fetchall():
return row[0]
return None
# load table "oa_claim_body" one row
# return claim body id
def insert_oa_claim_body(conn, claimlabel, exact):
cur = conn.cursor()
urn = uuid.uuid4().hex
qry1 = "INSERT INTO oa_claim_body (urn, label, claim_text) VALUES ('%s', '%s', '%s');" % (urn, claimlabel, exact)
cur.execute(qry1)
qry2 = "SELECT * FROM oa_claim_body WHERE urn = '%s';" % (urn)
cur.execute(qry2)
for row in cur.fetchall():
return row[0]
return None
def update_oa_claim_body(conn, is_oa_body_of, oa_claim_body_id):
cur = conn.cursor()
cur.execute("UPDATE oa_claim_body SET is_oa_body_of = " + str(is_oa_body_of) +
" WHERE id = " + str(oa_claim_body_id) + ";")
# MP MATERIAL ########################################################
## dose_role: subject or object, drug_idx: drug1 or drug2
def insert_material_dose(conn, row, creator, dose_role, drug_idx, mp_claim_id, mp_data_index):
if row[drug_idx + 'dose']:
exact = row[drug_idx + 'dosetext']
selector_id = insert_oa_selector(conn, '', exact, '')
target_id = insert_oa_target(conn, row["document"], selector_id)
material_body_id = insert_material_annotation(conn, row, mp_claim_id, target_id, creator, dose_role + "_dose", mp_data_index)
insert_material_field(conn, row, material_body_id, drug_idx)
def insert_material_annotation(conn, row, mp_claim_id, has_target, creator, data_type, mp_data_index):
ev_supports = True
if "evRelationship" in row and row['evRelationship']:
if 'refutes' in row['evRelationship']:
ev_supports = False
cur = conn.cursor()
urn = uuid.uuid4().hex
cur.execute("INSERT INTO mp_material_annotation (urn, type, has_target, creator, mp_claim_id, mp_data_index, ev_supports, date_created) VALUES (%s,%s,%s,%s,%s,%s,%s,%s);", (urn, data_type, has_target, creator, mp_claim_id, mp_data_index, ev_supports, curr_date))
cur.execute("SELECT * FROM mp_material_annotation WHERE urn = '" + urn + "';")
for result in cur.fetchall():
material_annotation_id = result[0]
urn = uuid.uuid4().hex
cur.execute("INSERT INTO oa_material_body (urn, material_type, is_oa_body_of)" +
"VALUES ( '" + urn + "', '" + data_type + "', " + str(material_annotation_id) + ");")
cur.execute("SELECT * FROM oa_material_body WHERE urn = '" + urn + "';")
for result in cur.fetchall():
has_body = result[0]
cur.execute("UPDATE mp_material_annotation SET has_body = " + str(has_body) +
" WHERE id = " + str(material_annotation_id) + ";")
return has_body
# load table "material_field" one row
def insert_material_field(conn, row, material_body_id, material_type):
cur = conn.cursor()
if material_type == "participants":
cur.execute("INSERT INTO material_field (urn, material_body_id, material_field_type, value_as_string, value_as_number) VALUES ( '" + uuid.uuid4().hex + "', " + str(material_body_id) + ", 'participants', NULL, " + row['participants'] + ");")
elif material_type in ["drug1","drug2"]:
value = material_type + "dose"
regimens = material_type + "regimens"
formulation = material_type + "formulation"
duration = material_type + "duration"
cur.execute("INSERT INTO material_field (urn, material_body_id, material_field_type, value_as_string, value_as_number) VALUES ( '" + uuid.uuid4().hex + "', " + str(material_body_id) + ", 'value', '" + row[value] + "', NULL);")
if row[regimens]:
cur.execute("INSERT INTO material_field (urn, material_body_id, material_field_type, value_as_string, value_as_number)" +
"VALUES ( '" + uuid.uuid4().hex + "', " + str(material_body_id) + ", 'regimens', '" + row[regimens] + "', NULL);")
if row[formulation]:
cur.execute("INSERT INTO material_field (urn, material_body_id, material_field_type, value_as_string, value_as_number)" +
"VALUES ( '" + uuid.uuid4().hex + "', " + str(material_body_id) + ", 'formulation', '" + row[formulation] + "', NULL);")
if row[duration]:
cur.execute("INSERT INTO material_field (urn, material_body_id, material_field_type, value_as_string, value_as_number)" +
"VALUES ( '" + uuid.uuid4().hex + "', " + str(material_body_id) + ", 'duration', '" + row[duration] + "', NULL);")
elif material_type == "phenotype":
cur.execute("INSERT INTO material_field (urn, material_body_id, material_field_type, value_as_string, value_as_number) VALUES (%s,%s,%s,%s,%s)",(uuid.uuid4().hex, str(material_body_id),'type',row["phenotypetype"],None))
cur.execute("INSERT INTO material_field (urn, material_body_id, material_field_type, value_as_string, value_as_number) VALUES (%s,%s,%s,%s,%s)",(uuid.uuid4().hex, str(material_body_id),'value',row["phenotypevalue"],None))
cur.execute("INSERT INTO material_field (urn, material_body_id, material_field_type, value_as_string, value_as_number) VALUES (%s,%s,%s,%s,%s)",(uuid.uuid4().hex, str(material_body_id),'metabolizer',row["phenotypemetabolizer"],None))
cur.execute("INSERT INTO material_field (urn, material_body_id, material_field_type, value_as_string, value_as_number) VALUES (%s,%s,%s,%s,%s)",(uuid.uuid4().hex, str(material_body_id),'population',row["phenotypepopulation"],None))
else:
print "[ERROR] material_type (%s) invalid!" % material_type
# MP DATA ########################################################
def insert_mp_data_annotation(conn, row, mp_claim_id, has_target, creator, data_type, mp_data_index):
ev_supports = 'true'
if "evRelationship" in row and row["evRelationship"]:
if 'refutes' in row['evRelationship']:
ev_supports = 'false'
cur = conn.cursor()
urn = str(uuid.uuid4().hex)
cur.execute("""INSERT INTO mp_data_annotation (urn, type, has_target, creator, mp_claim_id, mp_data_index, ev_supports, date_created, rejected, rejected_reason, rejected_comment) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);""",(urn ,data_type, has_target, creator, mp_claim_id, mp_data_index, ev_supports, curr_date, None, None, None))
cur.execute("SELECT * FROM mp_data_annotation WHERE urn = '" + urn + "';")
for result in cur.fetchall():
data_annotation_id = result[0]
urn = uuid.uuid4().hex
cur.execute("INSERT INTO oa_data_body (urn, data_type, is_oa_body_of)" +
"VALUES ( '" + urn + "', '" + data_type + "', " + str(data_annotation_id) + ");")
cur.execute("SELECT * FROM oa_data_body WHERE urn = '" + urn + "';")
for result in cur.fetchall():
has_body = result[0]
cur.execute("UPDATE mp_data_annotation SET has_body = " + str(has_body) +
" WHERE id = " + str(data_annotation_id) + ";")
return has_body
# load table "data_field" one row
def insert_data_field(conn, row, data_body_id, data_type):
cur = conn.cursor()
if data_type in ["auc", "cmax", "clearance", "halflife"]:
value = data_type + "value"
ttype = data_type + "type"
direction = data_type + "direction"
cur.execute("""INSERT INTO data_field (urn, data_body_id, data_field_type, value_as_string, value_as_number) VALUES (%s, %s, %s, %s, %s)""", (uuid.uuid4().hex, data_body_id, "value", row[value], None))
cur.execute("""INSERT INTO data_field (urn, data_body_id, data_field_type, value_as_string, value_as_number) VALUES (%s, %s, %s, %s, %s)""", (uuid.uuid4().hex, data_body_id, "type", row[ttype], None))
cur.execute("""INSERT INTO data_field (urn, data_body_id, data_field_type, value_as_string, value_as_number) VALUES (%s, %s, %s, %s, %s)""", (uuid.uuid4().hex, data_body_id, "direction", row[direction], None))
if data_type == "reviewer":
cur.execute("INSERT INTO data_field (urn, data_body_id, data_field_type, value_as_string, value_as_number) VALUES ( '" + uuid.uuid4().hex + "', " + str(data_body_id) + ", 'reviewer', '" + (row["reviewer"] or "") + "', NULL), ( '" + uuid.uuid4().hex + "', " + str(data_body_id) + ", 'date', '" + (row["reviewerdate"] or "") + "', NULL), ( '" + uuid.uuid4().hex + "', " + str(data_body_id) + ", 'total', '" + (str(row["reviewertotal"]) or "") + "', NULL), ( '" + uuid.uuid4().hex + "', " + str(data_body_id) + ", 'lackinfo', '" + (str(row["reviewerlackinfo"]) or "") + "', NULL);")
if data_type == "dipsquestion" and "|" in row["dipsquestion"]:
dipsQsL = row["dipsquestion"].split('|')
idx = 1
if dipsQsL:
for qs in dipsQsL:
if qs and qs != "":
cur.execute("INSERT INTO data_field (urn, data_body_id, data_field_type, value_as_string, value_as_number) VALUES (%s, %s, %s, %s, %s);",(uuid.uuid4().hex , str(data_body_id), 'q'+str(idx), qs, None))
idx += 1
# HIGHLIGHT ANNOTATION ########################################################
def insert_highlight_annotation(conn, type, has_body, has_target, creator, date_created, date_updated):
urn = uuid.uuid4().hex
cur = conn.cursor()
qry2 = "INSERT INTO highlight_annotation (urn, type, has_body, has_target, creator, date_created, date_updated) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s');" % (urn, "all", str(has_body), str(has_target), creator, date_created, date_updated);
cur.execute(qry2);
qry2 = "SELECT * FROM highlight_annotation WHERE urn = '%s';" % (urn)
cur.execute(qry2)
for row in cur.fetchall():
return row[0]
return None
def insert_oa_highlight_body(conn, drug, url):
urn = uuid.uuid4().hex
cur = conn.cursor()
qry1 = "INSERT INTO oa_highlight_body (urn, drugname, uri) VALUES ('%s', '%s', '%s');" % (urn, drug, url);
cur.execute(qry1);
qry2 = "SELECT * FROM oa_highlight_body WHERE urn = '%s';" % (urn)
cur.execute(qry2)
for row in cur.fetchall():
return row[0]
return None
def update_oa_highlight_body(conn, highlight_annotation_id, oa_highlight_body_id):
cur = conn.cursor()
cur.execute("UPDATE oa_highlight_body SET is_oa_body_of = " + str(highlight_annotation_id) + " WHERE id = " + str(oa_highlight_body_id) + ";")
# MP METHOD ################################################################
def insert_method(conn, row, mp_claim_id, mp_data_index):
cur = conn.cursor()
enteredVal = row['method']
cur.execute("INSERT INTO method (entered_value, inferred_value, mp_claim_id, | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
"""
X-Check Validation Filtering Module for Sparky
@author: <NAME>
"""
import sys
if sys.version_info[0] == 2:
import Tkinter as tk
import tkMessageBox
import tkFont
else:
import tkinter as tk
import tkinter.messagebox as tkMessageBox
import tkinter.font as tkFont
try:
import sparky
from sparky import sputil, tkutil, pyutil
except:
import poky
from poky import sputil, tkutil, pyutil
import peak_list_dialog
from itertools import combinations
import collections
import math
from decimal import Decimal, getcontext
try:
from matplotlib import use as matplotlib_use
matplotlib_use('TkAgg')
except:
print("Exception happened for importing 'matplotlib use'")
from matplotlib.pyplot import subplots, subplots_adjust, ion, show, draw
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
try:
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg as NavTB
except:
from matplotlib.backends.backend_tkagg import NavigationToolbar2Tk as NavTB
#try:
# # for Mac support
# from matplotlib.pyplot import pause
#except:
# pass
class xcheck_dialog(tkutil.Dialog, tkutil.Stoppable):
def __init__(self, session):
self.specs_peaks = []
self.session = session
tkutil.Dialog.__init__(self, session.tk, 'Resonance Cross-Validation')
# xcheck_label = tk.Label(self.top, text="X-Check Validation Filtering Module", font=20)
# xcheck_label.pack(side='top', fill='both', expand=1, pady=15)
# separator = tk.Frame(self.top, height=2, bd=1, relief="ridge")
# separator.pack(fill="both", padx=5, pady=5, side ='top')
# frames
# listbox
topfrm = tk.Frame(self.top)
topfrm.pack(side='top', fill='both', expand=0, padx=8)
# update, select all buttons
midfrm = tk.Frame(self.top)
midfrm.pack(fill='both', expand=1, padx=8)
# settings
btmfrm = tk.Frame(self.top)
btmfrm.pack(fill='both', expand=1, padx=8)
# run buttons 1st row
buttonsfrm1 = tk.Frame(self.top)
buttonsfrm1.pack(fill='both', expand=1, padx=(15,0), pady=(10,2))
# run buttons 2nd row
buttonsfrm2 = tk.Frame(self.top)
buttonsfrm2.pack(fill='both', expand=1, padx=(15,0))
# status
statusfrm = tk.Frame(self.top)
statusfrm.pack(fill='both', expand=1, padx=8)
#spectra list
self.spectra_list = tkutil.scrolling_list(topfrm, 'Select the spectra for cross-validation:', 5, True)
self.spectra_list.listbox['selectmode'] = 'extended'
self.spectra_list.listbox.bind('<ButtonRelease-1>', self.spectra_selected)
self.spectra_list.frame.pack(side='top', fill='both', expand=1, pady=(5,5))
tkutil.create_hint(self.spectra_list.frame, 'You can select multiple experiments by holding down the Ctrl key and clicking on the experiments')
# buttons
update_button = tk.Button(midfrm, text='Update List', command=self.update_list)
update_button.pack(side='left', anchor='w', expand=0, pady=(0, 15))
tkutil.create_hint(update_button, 'This will refresh the list in case a new experiment is loaded')
select_all_button = tk.Button(midfrm, text='Select All', command=self.select_all)
select_all_button.pack(side='left', anchor='w', expand=0, pady=(0, 15), padx=1)
tkutil.create_hint(select_all_button, 'This will select all the loaded experiments for the cross validation')
# tolerance frame
tolerance_font = tkFont.Font(size=11)
tolerance_label = tk.Label(btmfrm, text="Tolerances:", font=tolerance_font)
tolerance_label.pack(side='top', anchor='w')
tkutil.create_hint(tolerance_label, 'These tolerances are used in comparing peaks from different experiments')
tol_frm = tk.Frame(btmfrm)
tol_frm.pack(side='top', fill='both', expand=1)
self.tol_H = tkutil.entry_field(tol_frm, '1H: ', width=5, initial='0.05')
self.tol_H.frame.pack(side='left', padx=(20,10))
tkutil.create_hint(self.tol_H.frame, 'Maximum distance for H to be considered the same resonance')
self.tol_N = tkutil.entry_field(tol_frm, '15N:', width=5, initial='0.3')
self.tol_N.frame.pack(side='left', padx=(5,10))
tkutil.create_hint(self.tol_N.frame, 'Maximum distance for N to be considered the same resonance')
self.tol_C = tkutil.entry_field(tol_frm, '13C:', width=5, initial='0.35')
self.tol_C.frame.pack(side='left', padx=(5,10))
tkutil.create_hint(self.tol_C.frame, 'Maximum distance for C to be considered the same resonance')
# exclude frame
exclude_font = tkFont.Font(size=11)
exclude_label = tk.Label(btmfrm, text="Exclude Range:", font=tolerance_font)
exclude_label.pack(side='top', anchor='w', pady=(15,0))
tkutil.create_hint(exclude_label, 'Any peak with their H resonance in this range will be excluded from the cross validation')
exclude_frm = tk.Frame(btmfrm)
exclude_frm.pack(side='top', fill='both', expand=1, pady=(5,0))
self.exclude_from = tkutil.entry_field(exclude_frm, 'From: ', width=5, initial='4.7')
self.exclude_from.frame.pack(side='left', padx=(25,0))
tkutil.create_hint(self.exclude_from.frame, 'Lower bound for the exclusion range. Any peak with their H resonance in this range will be excluded from the cross validation')
self.exclude_to = tkutil.entry_field(exclude_frm, 'To: ', width=5, initial='4.9')
self.exclude_to.frame.pack(side='left', padx=(5,10))
tkutil.create_hint(self.exclude_to.frame, 'Upper bound for the exclusion range. Any peak with their H resonance in this range will be excluded from the cross validation')
# checkbox
checkbox_frm = tk.Frame(btmfrm)
checkbox_frm.pack(side='top', fill='both', expand=1, pady=(15,0))
self.note_append = tk.BooleanVar()
self.note_append.set(True)
checkbox_note_append = tk.Checkbutton(checkbox_frm, highlightthickness=0, text='Append to Note',
variable=self.note_append)
checkbox_note_append.pack(side='top', anchor='w', padx=(0,0))
tkutil.create_hint(checkbox_note_append, 'If checked, the result will be appended to the Note property of each peak. If unchecked, the Note will be OVERWRITTEN!')
# separator
sep = tk.Frame(btmfrm, height=2, bd=1, relief="ridge")
sep.pack(fill="both", padx=5, pady=(10,5), side='top')
# histogram bins
bin_font = tkFont.Font(size=11)
bin_label = tk.Label(btmfrm, text="Histogram Bins:", font=bin_font)
bin_label.pack(side='top', anchor='w', pady=(5,3))
tkutil.create_hint(bin_label, 'These bins are used in generating the histograms')
bins_frm = tk.Frame(btmfrm)
bins_frm.pack(side='top', fill='both', expand=1, pady=(0,10))
self.bin_H = tkutil.entry_field(bins_frm, '1H: ', width=5, initial='0.02')
self.bin_H.frame.pack(side='left', padx=(18,10))
tkutil.create_hint(self.bin_H.frame, 'Bin steps for H histogram')
self.bin_N = tkutil.entry_field(bins_frm, '15N:', width=5, initial='0.2')
self.bin_N.frame.pack(side='left', padx=(3,10))
tkutil.create_hint(self.bin_N.frame, 'Bin steps for N histogram')
self.bin_C = tkutil.entry_field(bins_frm, '13C:', width=5, initial='0.2')
self.bin_C.frame.pack(side='left', padx=(3,10))
tkutil.create_hint(self.bin_C.frame, 'Bin steps for C histogram')
# run button & status
xcheck_button = tk.Button(buttonsfrm1, text='Run Cross-Validation', command=self.run_xcheck_button)
xcheck_button.pack(side='left')
tkutil.create_hint(xcheck_button, 'Runs the cross validation using the settings above')
peaklist_button = tk.Button(buttonsfrm1, text='Peak List', width=10, command=self.show_peak_list)
peaklist_button.pack(side='left')
tkutil.create_hint(peaklist_button, 'Shows the Peak list')
hist_button = tk.Button(buttonsfrm2, text='Peak Histogram', width=16, command=self.run_histogram)
hist_button.pack(side='left')
tkutil.create_hint(hist_button, 'Generates and shows the histograms for the peak resonances.')
stop_button = tk.Button(buttonsfrm2, text='Stop', command=self.stop_cb)
stop_button.pack(side='left')
tkutil.create_hint(stop_button, 'Stops the cross validations process')
#TODO: Add the section for CrossValidation to the extensions.html file
help_button = tk.Button(buttonsfrm2, text='Help', command=sputil.help_cb(session, 'CrossValidation'))
help_button.pack(side='left')
tkutil.create_hint(help_button, 'Opens a help page with more information about this module.')
self.status = tk.Label(statusfrm, text="Status: Ready!")
self.status.pack(side='left', anchor='w', pady=(10,5), padx=(5,0))
progress_label = tk.Label(statusfrm)
progress_label.pack(side='left', anchor='w')
tkutil.Stoppable.__init__(self, progress_label, stop_button)
# fix the fonts
suggested_fonts = ['Arial', 'NotoSans', 'Ubuntu', 'SegoeUI', 'Helvetica',
'Calibri', 'Verdana', 'DejaVuSans', 'FreeSans']
for fnt in suggested_fonts:
if fnt in tkFont.families():
self.spectra_list.listbox['font'] = (fnt, 9)
self.spectra_list.heading['font'] = (fnt, 11)
break
self.update_list()
# ---------------------------------------------------------------------------
# functions
# ---------------------------------------------------------------------------
def select_all(self):
self.spectra_list.listbox.select_set(0, tk.END)
self.spectra_selected()
self.status.config(text="Status: Selection is complete.")
self.status.update()
# ---------------------------------------------------------------------------
def update_list(self):
if self.session.project == None:
tkMessageBox.showwarning(title='Error', message='No spectrum is loaded!')
return
self.spectra_list.clear()
self.spec_list = self.session.project.spectrum_list()
self.spec_list.sort(key=lambda x: x.name, reverse=False)
for spec in self.spec_list:
self.spectra_list.append(spec.name)
# ---------------------------------------------------------------------------
def spectra_selected(self, *args):
data_list = self.spectra_list.selected_line_data()
if len(data_list) < 1:
tkMessageBox.showwarning(title='Error', message='No spectrum was selected!')
return
selected_spec_ids = self.spectra_list.selected_line_numbers()
self.status.config(text="Status: Selection is complete.")
self.status.update()
if selected_spec_ids == None:
tkMessageBox.showwarning(title='Error', message='No spectrum was selected!')
return
self.specs_names = []
self.specs_peaks = []
self.specs_nuclei = []
for spec_id in selected_spec_ids:
self.specs_peaks.append(self.spec_list[spec_id].peak_list())
self.specs_nuclei.append(self.spec_list[spec_id].nuclei)
self.specs_names.append(self.spec_list[spec_id].name)
# ---------------------------------------------------------------------------
def run_xcheck_button(self):
self.stoppable_call(self.run_xcheck)
# ---------------------------------------------------------------------------
def run_xcheck(self, *args):
self.status.config(text="Status: Running ...")
self.status.update()
# specs_peaks[each_selected_spec] --> all peaks in that spec
# specs_nuclei[i] -- > i.e. ('15N', '13C', '1H') or ('13C', '1H', '1H')
num_of_specs = len(self.specs_peaks)
combinations_list = list(combinations(range(num_of_specs), 2))
# list(combinations(range(3), 2)) -- > [(0, 1), (0, 2), (1, 2)]
# 8 spec is 28 combinations!
if num_of_specs == 1:
tkMessageBox.showwarning(title='Error!', message='You need to select at least two experiments to validate against each other')
return
for spec in self.specs_peaks:
for peak in spec:
if self.note_append.get():
# if the user is re-running xcheck, remove the previous results
if len(peak.note) == 0:
peak.note = 'xcheck:'
else:
xcheck_str_start = peak.note.find('xcheck:')
if xcheck_str_start == 0:
peak.note = 'xcheck:'
elif xcheck_str_start > 0:
peak.note = peak.note[ : xcheck_str_start] + 'xcheck:'
else:
peak.note += ';xcheck:'
else:
peak.note = 'xcheck:'
total_peaks = 0
for spec_pair in combinations_list:
s1 = spec_pair[0]
s2 = spec_pair[1]
if (self.specs_nuclei[s1][0] != self.specs_nuclei[s2][0]):
continue
total_peaks += len(self.specs_peaks[s1]) * len(self.specs_peaks[s2])
processed_peaks = 0
for spec_pair in combinations_list:
self.top.update()
s1 = spec_pair[0]
s2 = spec_pair[1]
print("Comparing " + self.specs_names[s1] + " with " + self.specs_names[s2] + ":")
if (self.specs_nuclei[s1][0] != self.specs_nuclei[s2][0]):
print("Will not compare a N-HSQC based experiments with a C-HSQC one")
print('_' * 50)
continue
tol = float(self.tol_C.variable.get())
if self.specs_nuclei[s1][0] == '15N':
tol = float(self.tol_N.variable.get())
tol_H = float(self.tol_H.variable.get())
exclude_from = float(self.exclude_from.variable.get())
exclude_to = float(self.exclude_to.variable.get())
print('Total peaks in the first one: ' + str(len(self.specs_peaks[s1])))
print('Total peaks in the second one: ' + str(len(self.specs_peaks[s2])))
for i, peak1 in enumerate(self.specs_peaks[s1]):
#print('Peak ' + str(i) + ': ', end='')
#print 'Peak1 ' + str(i) + ': '
#print(peak1.frequency)
#if peak1.assignment.find('?') == -1:
if peak1.is_assigned == 1:
continue # skip if already assigned
if ((peak1.frequency[-1] > exclude_from) and (peak1.frequency[-1] < exclude_to)):
continue
match_flag = 0
for j, peak2 in enumerate(self.specs_peaks[s2]):
#print 'Peak2 ' + str(j) + ': '
#print(peak2.frequency)
#if peak2.assignment.find('?') == -1:
if peak2.is_assigned == 1:
continue # skip if already assigned
if ((peak2.frequency[-1] > exclude_from) and (peak2.frequency[-1] < exclude_to)):
continue
if abs(peak1.frequency[0] - peak2.frequency[0]) < tol:
if abs(peak1.frequency[-1] - peak2.frequency[-1]) < tol_H:
print("\nMatch:")
print(peak1.frequency)
print(peak2.frequency)
match_flag = 1
peak1.note += self.specs_names[s2] + ','
peak2.note += self.specs_names[s1] + ','
break
if match_flag == 0:
print('\n' + '*' * 20 + '\nNo match:')
print(peak1.frequency)
processed_peaks += len(self.specs_peaks[s2])
percent = "{:2.0f}".format(100 * processed_peaks / total_peaks)
self.status.config(text="Status: Running ... (" + percent + "%)")
self.status.update()
print('_' * 50)
# Update the Note to count the frequency
for spec in self.specs_peaks:
for peak in spec:
xcheck_str_start = peak.note.find('xcheck:')
main_note = ''
if xcheck_str_start > 0:
main_note = peak.note[ : xcheck_str_start]
xcheck = peak.note[xcheck_str_start + len('xcheck:') : ].strip(',')
| |
FigureCanvasTkAgg(fig, master=frame_Method_Plot)
toolbar1 = NavigationToolbar2Tk(canvas1, frame_Method_Plot)
Plot_Graph(X_time, Y_disp, ax1, canvas1)
#-------------------------------------------------------------------------------------------------------------------------
# Functions to start or stop the measurement when test is started or stopped
#-------------------------------------------------------------------------------------------------------------------------
def start_measuring(tabControl,measuretab):
"""starts the data stream the plotted data depends on whether self.plotforce= true or false
creates a new graphframe on top of the old one"""
measuretab.switchon()
tabControl.select(measuretab)
def stop_measuring(measuretab):
"""stops the datastream and the animation, reads out the buffer and appends its content to sens.emptystreamdict"""
measuretab.switchoff()
savealldata_temp()
print('data saved')
def recorddata1(drec, NUMVALUES=300, RECRATE=100):
"""Set up data recorder to record 2 tables for first axis after next motion command.
@type drec : pipython.datarectools.Datarecorder
"""
#Start_Record = time.time()
drec.numvalues = NUMVALUES
drec.samplefreq = RECRATE
print('data recorder rate: {:.2f} Hz'.format(drec.samplefreq))
drec.options = (datarectools.RecordOptions.ACTUAL_POSITION_2, datarectools.RecordOptions.COMMANDED_POSITION_1)
drec.sources = drec.gcs.axes[0]
drec.trigsources = datarectools.TriggerSources.NEXT_COMMAND_WITH_RESET_2
#-------------------------------------------------------------------------------------------------------------------------
stop_event = threading.Event() # when the "stop test" button is pressed, the thread will start and the test loop will stop
def Read_method(filename, List_Mot, BAUDRATE, stop_event,tabControl, measuretab):
"""The function Read method reads the method file and calls the appropriate function to execute the successive segments
depending on the type of method (stage movement, displacement control, force control)
# filename: name of file containing the method
# List_Mot: List of motors
# BAUDRATE : baudrate used for the controllers (same for all stages)
# stop_event : thread started when "stop test" button is pressed
# tabControl : tab of measurements
# measuretab :
"""
global Method_Content
global NUMVALUES
global Mot_freq
global RECRATE
global testdata
global testmetadata
global offset_rec
global PosRec
# Make sure the correct beam is selected and distances are entered correctly
forcedist = FC.forcelength
measdist = FC.measurelength
Tare_dist = FC.tare
beam_test = sens.current_beam
answer = popup_beam(beam_test,forcedist,measdist,Tare_dist)
if answer == True:
# Make sure data from previous test is cleared
print("forcelist: ", sens.make_forcelist())
print("timelist: ", sens.timelist)
ans_clear = True
if sens.make_forcelist(): # Check if force array is not empty
ans_clear = popup_clear()
# Read the file and execute the test
if ans_clear:
try:
Current_Segment.set("Reading file")
with open(filename) as file:
# Create dictionary of the method
try:
List = yaml.load(file, Loader = yaml.FullLoader) #Loader does not work here
except:
List = yaml.load(file)
stop_event.clear()
# Calculate the duration of the test and frequency of recording + number of recording values read
RECRATE = Get_Recrate(List,float(entry_freq.get()))
offset_reading = RECRATE # number of recording values read when recording function is called
# Create frame containing progression of the test
Frame_Test_Progression = ttk.LabelFrame(tab1, text = 'Progression', borderwidth=2, relief='ridge')
Frame_Test_Progression.grid(column=0, row=4, sticky="nsew", columnspan=2)
Frame_Test_Progression.columnconfigure(1, weight=1)
Frame_Test_Progression.rowconfigure(1, weight=1)
Label_Test_Progression = tk.Label(Frame_Test_Progression, textvariable=Current_Segment)
Label_Test_Progression.grid(row=0, column=0, padx=5, pady=5, columnspan=8)
# Getting the initial position for the grid and update label in GUI
GetCurPos_x()
PosX_init = CurPos_x.get()
GetCurPos_y()
PosY_init = CurPos_y.get()
GetCurPos_z()
PosZ_init = CurPos_z.get()
Initial_Pos = [PosX_init,PosY_init,PosZ_init]
# Perform test depending on type of control
if any(Combobox_MethodType['values'][0] in d.values() for d in List.values()): # if stage movement
Test_Stage_Movement(List, Initial_Pos, PosZ_init, filename, List_Mot, BAUDRATE, stop_event,tabControl, measuretab)
elif any(Combobox_MethodType['values'][1] in d.values() for d in List.values()): # if displacement (stage displacement - deflection) control
print("Displacement control function not written yet")
elif any(Combobox_MethodType['values'][2] in d.values() for d in List.values()): # if force control
print("Force control function not written yet")
except:
Method_Content.set('Unable to run the test')
#-------------------------------------------------------------------------------------------------------------------------
def Test_Stage_Movement(List, Initial_Pos, PosZ_init, filename, List_Mot, BAUDRATE, stop_event,tabControl, measuretab):
""" Function to perform a test in stage movement control
# List: Dictionary of the method
# Initial_Pos: Initial position of all stages when performing the test
# PosZ_Init: Initial position of Z stage
# filemame: Name of the method file
# Lis_Mot: list of motors of the 3 stages
# BAUDRATE: Baudrate of the controller
# stop_event: thread that is started when the test is stopped before the end
# tabControl : tab of measurements
# measuretab :
"""
global Method_Content
global NUMVALUES
global Mot_freq
global RECRATE
global testdata
global testmetadata
global offset_rec
global PosRec
# Check if stage movements in the range of motion and velocity does not exceed maximal velocity
[minZ, maxZ, max_VelZ] = Check_Range_Motion(List,PosZ_init)
if (minZ>PosminZ)&(maxZ<PosmaxZ)&(max_VelZ<VelmaxZ):
# Connexion to the stages
deviceX=GCSDevice(motorX["ctr"])
deviceX.ConnectRS232(motorX["usb"], BAUDRATE)
deviceY=GCSDevice(motorY["ctr"])
deviceY.ConnectRS232(motorY["usb"], BAUDRATE)
deviceZ=GCSDevice(motorZ["ctr"])
deviceZ.ConnectRS232(motorZ["usb"], BAUDRATE)
devices = [deviceX, deviceY, deviceZ]
# Settings for recording stage position
drec = datarectools.Datarecorder(deviceZ)
recorddata1(drec, NUMVALUES, RECRATE)
offset_rec = 1
testdata = []
PosRec = []
Current_Segment.set("test starting")
# Manage the grid
Grid_Point = 0 #Point in the grid
# Start measurements and performing the test
start_measuring(tabControl,measuretab) # Force measurements + record
drec.arm() # Record position
time.sleep(1) # wait for 1s so that force measurements start before the test
Initial_Time = time.time()
testmetadata = Method_to_List(filename, Initial_Pos, [1000,1000,1000], Initial_Time, 1000) #temporary metadata in case test is stopped
# Read dictionary and perform successive segments
for item, doc in List.items():
if stop_event.is_set(): #Will stop the test after the end of the current segment
Current_Segment.set('Test stopped')
break
Type = doc.get('Type')
print("Type", Type)
if (Type == 'Preload'):
Stg = doc.get('Stage')
For = doc.get('Force')
Vel = doc.get('Velocity')
t_hold = doc.get("Duration")
mot = motors[np.where(List_Mot==Stg)[0][0]] # Identify the motor corresponding to the stage
Current_Segment.set('Point {} - Performing segment: {}, Stage: {}, Velocity: {:.2f}, Hold for: {:.2f} seconds'.format(Grid_Point+1, item, Stg, Vel, t_hold))
device = devices[np.where(List_Mot==Stg)[0][0]]
Preload(device, For, t_hold)
if (Type == "Move stage"):
Stg = doc.get('Stage')
print("Stage", Stg)
Dis = doc.get('Value')
print("Disp", Dis)
Vel = doc.get('Velocity')
print("Stage : ", Stg, ", Displacement : ", Dis, ", Velocity : ", Vel)
mot = motors[np.where(List_Mot==Stg)[0][0]] # Identify the motor corresponding to the stage
Current_Segment.set('Point {} - Performing segment: {}, Stage: {}, Displacement: {:.2f}, Velocity: {:.2f}'.format(Grid_Point+1, item, Stg, Dis, Vel))
device = devices[np.where(List_Mot==Stg)[0][0]]
background(deviceRelativePosRec(device, Dis, Vel, True,drec,PosRec))
elif (Type == "Hold"):
t_init = time.time()
t_current = time.time()
t_hold = doc.get("Duration")
Current_Segment.set('Point {} - Performing segment: {}, Hold for {:.2f} seconds'.format(Grid_Point+1, item, t_hold))
while t_current-t_init<t_hold:
t_current = time.time()
if t_hold>1:
try:
Data_Pos = drec.read(numvalues=offset_reading,offset=offset_rec)
offset_rec = offset_rec+offset_reading
print(offset_rec)
for i in (Data_Pos[1][0]):
PosRec.append(i)
except:
t_current = time.time()
elif (Type == "Go to"):
PosX = doc.get("Pos X") + PosX_init
PosY = doc.get("Pos Y") + PosY_init
FC.forcelength = FC.forcelength - doc.get("Pos Y") # update distance to force application
Z_up = doc.get("Z up")
Current_Segment.set('Going to point {} - Moving to position X: {:.2f} Y: {:.2f}'.format(Grid_Point+1, PosX, PosY))
background(deviceRelativePos(deviceZ, -Z_up, Default_Vel_Z, wait_target=True)) #move up by -Z up
#background(Move_Update_Pos(deviceRelativePos,(deviceZ, -Time = time.time()Z_up, Default_Vel_Z, True))) #move up by -Z up
pool = mp.Pool(3)
results = pool.map(multi_run_wrapper_Absolute,[(motorX, BAUDRATE, PosX, Default_Vel_X, False, True),(motorY, BAUDRATE, PosY, Default_Vel_Y, False, True)])
background(deviceRelativePos(deviceZ, Z_up, Default_Vel_Z, True)) #move down by Z up
GetCurPos_x() # Update X position
GetCurPos_y() # Update Y position
Grid_Point = Grid_Point+1
Final_Time = time.time()
GetCurPos_z()
Current_Segment.set('Test finished')
for k in range(100):
try:
Data_Pos = drec.read(numvalues=10,offset=offset_rec)
for i in (Data_Pos[1][0]):
PosRec.append(i)
offset_rec = offset_rec+10
except:
break
# Get the final position for the grid and update label in GUI
Final_Pos = Get_FinalPos()
# Create metadata and data
Time = list(np.arange(0,(1/RECRATE)*len(PosRec),(1/RECRATE)))
Disp = PosRec
testmetadata = Method_to_List(filename, Initial_Pos, Final_Pos, Initial_Time, Final_Time)
testdata = StageDisp_To_Dict(Time,Disp)
stop_event.clear()
print('Stopping test')
stop_measuring(measuretab)
else:
if (minZ<PosminZ) or (maxZ>PosmaxZ):
Current_Segment.set("Unable to run the test, position out of range (0 - 150mm)")
print("Unable to run the test, position out of range (0 - 150mm)")
if (max_VelZ>VelmaxZ):
Current_Segment.set("Unable tu run the test, velocity exceeds the maximal velocity allowed (50mm/s)")
print("Unable tu run the test, velocity exceeds the maximal velocity allowed (50mm/s)")
if (max_VelZ>VelmaxZ)&((minZ<PosminZ) or (maxZ>PosmaxZ)):
Current_Segment.set("Unable to run the test, position out of range (0 - 150mm) and velocity exceeds the maximal velocity allowed (50mm/s)")
#-------------------------------------------------------------------------------------------------------------------------
def Preload(device, Force_Target, Time_Preload, Velocity = 0.5):
""" Preload function - the stage moves at a constant velocity until a force threshold is reached.
# device: stage that needs to be moved
# Force_Target: value of the preload
# Time_Preload: Holding time for the preload (as of nowm the stage movement is stopped, which does not garanty that the force will be holded)
# Velocity: velocity with which thr stage is moved
"""
# Get initial force
Force_Init = sens.forcelist[-1]
Force = Force_Init
# Move the stage until preload is reached
for axis in device.axes:
device.VEL(axis,Velocity)
background(device.MVR(axis,20)) # the value 20 is arbitrary- the movement will actually stop when the force target is reached
while (abs(Force-Force_Init)<Force_Target):
Force = sens.forcelist[-1]
# Stop stage, update current position and hold the preload
device.STP(noraise=True)
GetCurPos_z()
time.sleep(Time_Preload)
#-------------------------------------------------------------------------------------------------------------------------
def Get_FinalPos():
""" The function gets the final position of each stage and updates in the GUI
"""
GetCurPos_x()
PosX_Final = CurPos_x.get()
GetCurPos_y()
PosY_Final = CurPos_y.get()
GetCurPos_z()
PosZ_Final = CurPos_z.get()
Final_Pos = [PosX_Final,PosY_Final,PosZ_Final]
return Final_Pos
#-------------------------------------------------------------------------------------------------------------------------
def Stop_Test():
""" The function creates the test data for the output file, stops force measurementsm saves data, stops the stages
and stops the test
"""
global testdata
global testmetadata
global PosRec
global RECRATE
# Create the data array for the output file (for the stages)
Time = list(np.arange(0,(1/RECRATE)*len(PosRec),(1/RECRATE)))
testdata = StageDisp_To_Dict(Time,PosRec)
# Stop recordings and movements
stop_measuring(measuretab)
StopStages(motors)
stop_event.set()
#-------------------------------------------------------------------------------------------------------------------------
def deviceRelativePosRec(device, disp, vel, wait_target,drec,PosRec):
""" This function moves the stage by a relative dispacemenet and records the position
The way position is recorded could be improved...
"""
global offset_rec
for axis in device.axes:
# Move stage at the desired velocity
InitialPos = device.qPOS(axis)[axis]
print('initial position on axis {} is {:.2f}'.format(axis, InitialPos))
target = InitialPos+disp
print('move axis {} to {:.2f}'.format(axis, target))
device.VEL(axis, vel)
device.MOV(axis, target)
# Wait until the target is reached (function to stop stage movements does not work with waitontarget)
if wait_target:
ret=device.qONT(axis) # Returns if target is reached
ret = ret['1']
while ret == False:
try:
# Qurey if position is reached
ret=device.qONT(axis)[axis]
position = device.qPOS(axis)[axis]
# Record position data
try:
Data_Pos = drec.read(numvalues=offset_reading-1,offset=offset_rec)
offset_rec = offset_rec+offset_reading
for i in (Data_Pos[1][0]):
PosRec.append(i)
print("PosRec: ", PosRec)
except:
offset_rec = offset_rec
except:
device.STP(noraise=True) # When the stages are stopped, the current position cannot be obtained
break
FinalPos = device.qPOS(axis)[axis]
print('current position of axis {} is {:.2f}'.format(axis, FinalPos))
print("end moving time: ", time.time())
#--------------------------------------------------------------------------------------------------------------------
def Get_Recrate(List, freq):
"""The function | |
<reponame>Kortemme-Lab/covariation<filename>analysis/utils/pdb.py<gh_stars>1-10
#!/usr/bin/env python2
# encoding: utf-8
# The MIT License (MIT)
#
# Copyright (c) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
import sys
import os
import types
import string
import types
from basics import Residue, PDBResidue, Sequence, SequenceMap, residue_type_3to1_map, protonated_residue_type_3to1_map, non_canonical_amino_acids, protonated_residues_types_3, residue_types_3, Mutation, ChainMutation, SimpleMutation
from basics import dna_nucleotides, rna_nucleotides, dna_nucleotides_3to1_map, dna_nucleotides_2to1_map, non_canonical_dna, non_canonical_rna, all_recognized_dna, all_recognized_rna
from fsio import read_file, write_file
### Residue types
allowed_PDB_residues_types = protonated_residues_types_3.union(residue_types_3)
allowed_PDB_residues_and_nucleotides = allowed_PDB_residues_types.union(dna_nucleotides).union(rna_nucleotides)
### Parsing-related variables
COMPND_field_map = {
'MOL_ID' : 'MoleculeID',
'MOLECULE' : 'Name',
'CHAIN' : 'Chains',
'FRAGMENT' : 'Fragment',
'SYNONYM' : 'Synonym',
'EC' : 'EC',
'ENGINEERED' : 'Engineered',
'MUTATION' : 'Mutation',
'OTHER_DETAILS' : 'OtherDetails',
}
SOURCE_field_map = {
'MOL_ID' : 'MoleculeID',
'SYNTHETIC' : 'Synthetic',
'ORGANISM_SCIENTIFIC' : 'OrganismScientificName',
'ORGANISM_COMMON' : 'OrganismCommonName',
'ORGANISM_TAXID' : 'OrganismNCBITaxonomyID',
}
modified_residues_patch = {
'1A2C' : {
'34H' : 'UNK',
},
'2ATC' : {
'ASX' : 'ASN',
},
'1XY1' : {
'MPT' : 'UNK',
},
'1CVW' : { # Note: more recent versions of this file do not require this patch
'ANS' : 'UNK',
'0QE' : 'UNK',
},
'1FAK' : {
'CGU' : 'GLU', # Gamma-carboxy-glutamic acid
},
'1JXQ' : {
'PHQ' : 'UNK', # benzyl chlorocarbonate
'CF0' : 'UNK', # fluoromethane
},
'1YJ1' : {
'DGN' : 'GLN', # D-glutamine
},
'2CN0' : {
'SIN' : 'UNK', # Succinic acid
},
'2FTL' : {
'IAS' : 'ASP', # Beta-L-aspartic acid/L-isoaspartate. Mismatch to asparagine - "the expected l-Asn residue had been replaced with a non-standard amino acid" (10.1016/j.jmb.2006.11.003).
},
}
### Record types
order_of_records = [
"HEADER","OBSLTE","TITLE","SPLIT","CAVEAT","COMPND","SOURCE","KEYWDS",
"EXPDTA","NUMMDL","MDLTYP","AUTHOR","REVDAT","SPRSDE","JRNL","REMARK",
"DBREF","DBREF1","DBREF2","DBREF1/DBREF2","SEQADV","SEQRES","MODRES",
"HET","HETNAM","HETSYN","FORMUL","HELIX","SHEET","SSBOND","LINK","CISPEP",
"SITE","CRYST1","ORIGX1","ORIGX2","ORIGX3","SCALE1","SCALE2","SCALE3",
"MTRIX1","MTRIX2","MTRIX3","MODEL","ATOM","ANISOU","TER","HETATM",
"ENDMDL","CONECT","MASTER","END"
]
order_of_records = [x.ljust(6) for x in order_of_records]
allowed_record_types = set([
# One time, single line:
'CRYST1', # Unit cell parameters, space group, and Z.
'END ', # Last record in the file.
'HEADER', # First line of the entry, contains PDB ID code, classification, and date of deposition.
'NUMMDL', # Number of models.
'MASTER', # Control record for bookkeeping.
'ORIGXn', # Transformation from orthogonal coordinates to the submitted coordinates (n = 1, 2, or 3).
'SCALEn', # Transformation from orthogonal coordinates to fractional crystallographic coordinates (n = 1, 2, or 3).
# One time, multiple lines:
'AUTHOR', # List of contributors.
'CAVEAT', # Severe error indicator.
'COMPND', # Description of macromolecular contents of the entry.
'EXPDTA', # Experimental technique used for the structure determination.
'MDLTYP', # Contains additional annotation pertinent to the coordinates presented in the entry.
'KEYWDS', # List of keywords describing the macromolecule.
'OBSLTE', # Statement that the entry has been removed from distribution and list of the ID code(s) which replaced it.
'SOURCE', # Biological source of macromolecules in the entry.
'SPLIT ', # List of PDB entries that compose a larger macromolecular complexes.
'SPRSDE', # List of entries obsoleted from public release and replaced by current entry.
'TITLE ', # Description of the experiment represented in the entry.
# Multiple times, one line:
'ANISOU', # Anisotropic temperature factors.
'ATOM ', # Atomic coordinate records for standard groups.
'CISPEP', # Identification of peptide residues in cis conformation.
'CONECT', # Connectivity records.
'DBREF ', # Reference to the entry in the sequence database(s).
'HELIX ', # Identification of helical substructures.
'HET ', # Identification of non-standard groups heterogens).
'HETATM', # Atomic coordinate records for heterogens.
'LINK ', # Identification of inter-residue bonds.
'MODRES', # Identification of modifications to standard residues.
'MTRIXn', # Transformations expressing non-crystallographic symmetry (n = 1, 2, or 3). There may be multiple sets of these records.
'REVDAT', # Revision date and related information.
'SEQADV', # Identification of conflicts between PDB and the named sequence database.
'SHEET ', # Identification of sheet substructures.
'SSBOND', # Identification of disulfide bonds.
# Multiple times, multiple lines:
'FORMUL', # Chemical formula of non-standard groups.
'HETNAM', # Compound name of the heterogens.
'HETSYN', # Synonymous compound names for heterogens.
'SEQRES', # Primary sequence of backbone residues.
'SITE ', # Identification of groups comprising important entity sites.
# Grouping:
'ENDMDL', # End-of-model record for multiple structures in a single coordinate entry.
'MODEL ', # Specification of model number for multiple structures in a single coordinate entry.
'TER ', # Chain terminator.
# Other:
'JRNL ', # Literature citation that defines the coordinate set.
'REMARK', # General remarks; they can be structured or free form.
])
# This set is probably safer to use to allow backwards compatibility
all_record_types = allowed_record_types.union(set(order_of_records))
### Exception classes
class PDBParsingException(Exception): pass
class MissingRecordsException(Exception): pass
class NonCanonicalResidueException(Exception): pass
class PDBValidationException(Exception): pass
class PDBMissingMainchainAtomsException(Exception): pass
class PDB:
"""A class to store and manipulate PDB data"""
### Constructor ###
def __init__(self, pdb_content, pdb_id = None, strict = True):
'''Takes either a pdb file, a list of strings = lines of a pdb file, or another object.'''
self.pdb_content = pdb_content
if type(pdb_content) is types.StringType:
self.lines = pdb_content.split("\n")
else:
self.lines = [line.strip() for line in pdb_content]
self.parsed_lines = {}
self.structure_lines = [] # For ATOM and HETATM records
self.journal = None
self.chain_types = {}
self.format_version = None
self.modified_residues = None
self.modified_residue_mapping_3 = {}
self.pdb_id = None
self.strict = strict
self.seqres_chain_order = [] # A list of the PDB chains in document-order of SEQRES records
self.seqres_sequences = {} # A dict mapping chain IDs to SEQRES Sequence objects
self.atom_chain_order = [] # A list of the PDB chains in document-order of ATOM records (not necessarily the same as seqres_chain_order)
self.atom_sequences = {} # A dict mapping chain IDs to ATOM Sequence objects
self.chain_atoms = {} # A dict mapping chain IDs to a set of ATOM types. This is useful to test whether some chains only have CA entries e.g. in 1LRP, 1AIN, 1C53, 1HIO, 1XAS, 2TMA
# PDB deprecation fields
self.deprecation_date = None
self.deprecated = False
self.replacement_pdb_id = None
self.rosetta_to_atom_sequence_maps = {}
self.rosetta_residues = []
self.residue_types = set() # the set of 3-letter residue types present in the file (useful for checking against e.g. CSE, MSE)
self.fix_pdb()
self._split_lines()
self.pdb_id = pdb_id
self.pdb_id = self.get_pdb_id() # parse the PDB ID if it is not passed in
self._get_pdb_format_version()
self._get_modified_residues()
self._get_replacement_pdb_id()
self._get_SEQRES_sequences()
self._get_ATOM_sequences()
def fix_pdb(self):
'''A function to fix fatal errors in PDB files when they can be automatically fixed. At present, this only runs if
self.strict is False. We may want a separate property for this since we may want to keep strict mode but still
allow PDBs to be fixed.
The only fixes at the moment are for missing chain IDs which get filled in with a valid PDB ID, if possible.'''
if self.strict:
return
# Get the list of chains
chains = set()
for l in self.lines:
if l.startswith('ATOM ') or l.startswith('HETATM'):
chains.add(l[21])
# If there is a chain with a blank ID, change that ID to a valid unused ID
if ' ' in chains:
fresh_id = None
allowed_chain_ids = list(string.uppercase) + list(string.lowercase) + map(str, range(10))
for c in chains:
try: allowed_chain_ids.remove(c)
except: pass
if allowed_chain_ids:
fresh_id = allowed_chain_ids[0]
# Rewrite the lines
new_lines = []
if fresh_id:
for l in self.lines:
if (l.startswith('ATOM ') or l.startswith('HETATM')) and l[21] == ' ':
new_lines.append('%s%s%s' % (l[:21], fresh_id, l[22:]))
else:
new_lines.append(l)
self.lines = new_lines
### Class functions ###
@staticmethod
def from_filepath(filepath, strict = True):
'''A function to replace the old constructor call where a filename was passed in.'''
return PDB(read_file(filepath), strict = strict)
@staticmethod
def from_lines(pdb_file_lines, strict = True):
'''A function to replace the old constructor call where a list of the | |
<filename>ichnaea/scripts/datamap.py
#!/usr/bin/env python3
"""
Generate datamap image tiles and upload them to Amazon S3.
The process is:
1. Export data from datamap tables to CSV.
The data is exported as pairs of latitude and longitude,
converted into 0 to 6 pairs randomly around that point.
2. Convert the data into quadtree structures.
This structure is more efficient for finding the points that
apply to a tile.
3. Merge the per-table quadtrees into a single file
4. Generate tiles for each zoom level.
More tiles, covering a smaller distance, are created at each
higher zoom level.
5. Update the S3 bucket with the new tiles.
The MD5 checksum is used to determine if a tile is unchanged.
New tiles are uploaded, and orphaned tiles are deleted.
The quadtree and tile generators are from:
https://github.com/ericfischer/datamaps
The generated tiles are minimized with pngquant:
https://pngquant.org
"""
import argparse
import glob
import hashlib
import os
import os.path
import shutil
import subprocess
import sys
import uuid
from collections import defaultdict
from json import dumps
from multiprocessing import Pool
from timeit import default_timer
import boto3
import botocore
import structlog
from more_itertools import chunked
from sqlalchemy import text
from geocalc import random_points
from ichnaea import util
from ichnaea.conf import settings
from ichnaea.db import configure_db, db_worker_session
from ichnaea.log import configure_logging, configure_raven
from ichnaea.models.content import DataMap, decode_datamap_grid
LOG = structlog.get_logger("ichnaea.scripts.datamap")
S3_CLIENT = None # Will be re-initialized in each pool thread
class Timer:
"""Context-based timer."""
def __enter__(self):
self.start = default_timer()
return self
def __exit__(self, *args):
self.end = default_timer()
self.duration_s = round(self.end - self.start, 3)
@property
def elapsed(self):
return default_timer() - self.start
def generate(
output_dir,
bucket_name,
raven_client,
create=True,
upload=True,
concurrency=2,
max_zoom=11,
):
"""
Process datamaps tables into tiles and optionally upload them.
:param output_dir: The base directory for working files and tiles
:param bucket_name: The name of the S3 bucket for upload
:param raven_client: A raven client to log exceptions
:param upload: True (default) if tiles should be uploaded to S3
:param concurrency: The number of simultanous worker processes
:param max_zoom: The maximum zoom level to generate
:return: Details of the process
:rtype: dict
"""
result = {}
# Setup directories
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
csv_dir = os.path.join(output_dir, "csv")
quadtree_dir = os.path.join(output_dir, "quadtrees")
shapes_dir = os.path.join(output_dir, "shapes")
tiles_dir = os.path.abspath(os.path.join(output_dir, "tiles"))
if create:
LOG.debug("Generating tiles from datamap tables...")
# Export datamap table to CSV files
if not os.path.isdir(csv_dir):
os.mkdir(csv_dir)
row_count = None
with Pool(processes=concurrency) as pool, Timer() as export_timer:
row_count, csv_count = export_to_csvs(pool, csv_dir)
result["export_duration_s"] = export_timer.duration_s
result["row_count"] = row_count
result["csv_count"] = csv_count
LOG.debug(
f"Exported {row_count:,} row{_s(row_count)}"
f" to {csv_count:,} CSV{_s(csv_count)}"
f" in {export_timer.duration_s:0.1f} seconds"
)
if result["row_count"] == 0:
LOG.debug("No rows to export, so no tiles to generate.")
return result
# Convert CSV files to per-table quadtrees
if os.path.isdir(quadtree_dir):
shutil.rmtree(quadtree_dir)
os.mkdir(quadtree_dir)
with Pool(processes=concurrency) as pool, Timer() as quadtree_timer:
quad_result = csv_to_quadtrees(pool, csv_dir, quadtree_dir)
csv_converted, intermediate, final = quad_result
result["quadtree_duration_s"] = quadtree_timer.duration_s
result["csv_converted_count"] = csv_converted
result["intermediate_quadtree_count"] = intermediate
result["quadtree_count"] = final
LOG.debug(
f"Processed {csv_converted:,} CSV{_s(csv_converted)}"
f" into {intermediate:,} intermediate quadtree{_s(intermediate)}"
f" and {final:,} region quadtree{_s(final)}"
f" in {quadtree_timer.duration_s:0.1f} seconds"
)
# Merge quadtrees and make points unique.
if os.path.isdir(shapes_dir):
shutil.rmtree(shapes_dir)
with Timer() as merge_timer:
merge_quadtrees(quadtree_dir, shapes_dir)
result["merge_duration_s"] = merge_timer.duration_s
LOG.debug(f"Merged quadtrees in {merge_timer.duration_s:0.1f} seconds")
# Render tiles
with Pool(processes=concurrency) as pool, Timer() as render_timer:
tile_count = render_tiles(pool, shapes_dir, tiles_dir, max_zoom)
result["tile_count"] = tile_count
result["render_duration_s"] = render_timer.duration_s
LOG.debug(
f"Rendered {tile_count:,} tile{_s(tile_count)}"
f" in {render_timer.duration_s:0.1f} seconds"
)
if upload:
LOG.debug(f"Syncing tiles to S3 bucket {bucket_name}...")
# Determine the sync plan by comparing S3 to the local tiles
# This function times itself
plan, unchanged_count = get_sync_plan(bucket_name, tiles_dir)
# Sync local tiles with S3 bucket
# Double concurrency since I/O rather than CPU bound
# Max tasks to free accumulated memory from the S3 clients
with Pool(
processes=concurrency * 2, maxtasksperchild=1000
) as pool, Timer() as sync_timer:
sync_counts = sync_tiles(
pool, plan, bucket_name, tiles_dir, max_zoom, raven_client
)
result["sync_duration_s"] = sync_timer.duration_s
result["tiles_unchanged"] = unchanged_count
result.update(sync_counts)
LOG.debug(
f"Synced tiles to S3 in {sync_timer.duration_s:0.1f} seconds: "
f"{sync_counts['tile_new']:,} new, "
f"{sync_counts['tile_changed']:,} changed, "
f"{sync_counts['tile_deleted']:,} deleted, "
f"{sync_counts['tile_failed']:,} failed, "
f"{unchanged_count:,} unchanged"
)
upload_status_file(bucket_name, result)
return result
def _s(count):
"""Add an s, like rows, if the count is not 1."""
if count == 1:
return ""
else:
return "s"
def export_to_csvs(pool, csv_dir):
"""
Export from database tables to CSV.
For small database tables, there will be one CSV created, such as
"map_ne.csv" for the datamap_ne (northeast) table.
For large database tables, there will be multiple CSVs created,
such as "submap_ne_0001.csv".
:param pool: A multiprocessing pool
:csv_dir: The directory to write CSV output files
:return: A tuple of counts (rows, CSVs)
"""
jobs = []
result_rows = 0
result_csvs = 0
for shard_id, shard in sorted(DataMap.shards().items()):
# sorting the shards prefers the north which contains more
# data points than the south
filename = f"map_{shard_id}.csv"
jobs.append(
pool.apply_async(export_to_csv, (filename, csv_dir, shard.__tablename__))
)
# Run export jobs to completion
def on_success(result):
nonlocal result_rows, result_csvs
rows, csvs = result
result_rows += rows
result_csvs += csvs
def on_progress(tables_complete, table_percent):
nonlocal result_rows
LOG.debug(
f" Exported {result_rows:,} row{_s(result_rows)}"
f" from {tables_complete:,} table{_s(tables_complete)}"
f" to {result_csvs:,} CSV file{_s(result_csvs)}"
f" ({table_percent:0.1%})"
)
watch_jobs(jobs, on_success=on_success, on_progress=on_progress)
return result_rows, result_csvs
def watch_jobs(
jobs,
on_success=None,
on_error=None,
on_progress=None,
raven_client=None,
progress_seconds=5.0,
):
"""Watch async jobs as they complete, periodically reporting progress.
:param on_success: A function to call with the job output, skip if None
:param on_error: A function to call with the exception, re-raises if None
:param on_progress: A function to call to report progress, passed jobs complete and percent of total
:param raven_client: The raven client to capture exceptions (optional)
:param progress_seconds: How often to call on_progress
"""
with Timer() as timer:
last_elapsed = 0.0
total_jobs = len(jobs)
jobs_complete = 0
for job in jobs:
if timer.elapsed > (last_elapsed + progress_seconds):
job_percent = jobs_complete / total_jobs
on_progress(jobs_complete, job_percent)
last_elapsed = timer.elapsed
try:
job_resp = job.get()
if on_success:
on_success(job_resp)
except KeyboardInterrupt:
# Skip Raven for Ctrl-C, reraise to halt execution
raise
except Exception as e:
if raven_client:
raven_client.captureException()
if on_error:
on_error(e)
else:
raise
jobs_complete += 1
def csv_to_quadtrees(pool, csvdir, quadtree_dir):
"""
Convert CSV to quadtrees.
:param pool: A multiprocessing pool
:param csvdir: The directory with the input CSV files
:param quadtree_dir: The directory with the output quadtree files
:return: A tuple of counts (CSVs processed, intermediate quadtrees, final quads)
If multiple CSVs were generated for a datamap table, then per-CSV intermediate
quadtrees will be created in a subfolder, and then merged (allowing duplicates)
to a standard quadtree.
"""
jobs = []
intermediate_count = 0
intermediates = defaultdict(list)
final_count = 0
for name in os.listdir(csvdir):
if name.startswith("map_") and name.endswith(".csv"):
final_count += 1
jobs.append(pool.apply_async(csv_to_quadtree, (name, csvdir, quadtree_dir)))
if name.startswith("submap_") and name.endswith(".csv"):
intermediate_count += 1
prefix, shard, suffix = name.split("_")
basename, suffix = name.split(".")
intermediates[shard].append(basename)
submap_dir = os.path.join(quadtree_dir, f"submap_{shard}")
if not os.path.isdir(submap_dir):
os.mkdir(submap_dir)
jobs.append(pool.apply_async(csv_to_quadtree, (name, csvdir, submap_dir)))
# Run conversion jobs to completion
def on_progress(converted, percent):
if converted == 1:
LOG.debug(f" Converted 1 CSV to a quadtree ({percent:0.1%})")
else:
LOG.debug(f" Converted {converted:,} CSVs to quadtrees ({percent:0.1%})")
watch_jobs(jobs, on_progress=on_progress)
csv_count = len(jobs)
# Queue jobs to merge intermediates
merge_jobs = []
for shard, basenames in intermediates.items():
submap_dir = os.path.join(quadtree_dir, f"submap_{shard}")
map_dir = os.path.join(quadtree_dir, f"map_{shard}")
merge_jobs.append(
pool.apply_async(
merge_quadtrees,
(submap_dir, map_dir),
{"remove_duplicates": False, "pattern": "submap*"},
)
)
final_count += 1
def on_merge_progress(merged, percent):
LOG.debug(
f" Merged intermediate quadtrees to {merged:,}"
f" quadtree{_s(merged)} ({percent:0.1%})"
)
watch_jobs(merge_jobs, on_progress=on_merge_progress)
return (csv_count, intermediate_count, final_count)
def merge_quadtrees(quadtree_dir, shapes_dir, remove_duplicates=True, pattern="map*"):
"""Merge multiple quadtree files into one, removing duplicates."""
quadtree_files = glob.glob(os.path.join(quadtree_dir, pattern))
assert quadtree_files
cmd = ["merge"]
if remove_duplicates:
cmd.append("-u")
cmd += ["-o", shapes_dir] # Output to shapes directory
cmd += quadtree_files # input files
subprocess.run(cmd, check=True, capture_output=True)
def render_tiles(pool, shapes_dir, tiles_dir, max_zoom):
"""Render the tiles at all zoom levels, and the front-page 2x tile."""
# Render tiles at all zoom levels
tile_count = render_tiles_for_zoom_levels(pool, shapes_dir, tiles_dir, max_zoom)
# Render front-page tile
tile_count += render_tiles_for_zoom_levels(
pool,
shapes_dir,
tiles_dir,
max_zoom=0,
tile_type="high-resolution tile",
extra_args=("-T", "512"), # Tile size 512 instead of default of 256
suffix="@2x", # Suffix for high-res variant images
)
return tile_count
def get_sync_plan(bucket_name, tiles_dir, bucket_prefix="tiles/"):
"""Compare S3 bucket and tiles directory to determine the sync plan."""
# Get objects currently in the S3 bucket
| |
<filename>JSONWireProtocol/skeletonAppiumRealGirishRout.py
####################################################################
# Skeleton for Appium tests on Sauce Labs RDC
####################################################################
###################################################################
# Imports that are good to use
###################################################################
from appium import webdriver
from time import sleep
import os
import sys
from appium.webdriver.common.touch_action import TouchAction
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import multiprocessing
# from reusableFxns import *
import requests
import random
import json
from termcolor import (colored)
from datetime import datetime
import datetime
from selenium.webdriver.common.action_chains import ActionChains
androidTest = False
iosTest = False
US_Datacenter=False
EU_Datacenter=False
US_Datacenter_TO=False
EU_Datacenter_TO=False
##################################################################
# Selenium with Python doesn't like using HTTPS correctly
# and displays a warning that it uses Unverified HTTPS request
# The following disables that warning to clear the clutter
# But I should find a way to do the proper requests
###################################################################
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
###################################################################
# Choose if you want Android of iOS capabilities
# Uncomment one of those lines
###################################################################
# androidTest = True
iosTest = True
###################################################################
# Choose The Platform and Data Center you want to test on
# Uncomment one of those lines
###################################################################
US_Datacenter=True
# EU_Datacenter=True
# US_Datacenter_TO=True
# EU_Datacenter_TO=True
RandoNumber = random.randint(0,100000)
# print(RandoNumber)
###################################################################
# This makes the functions below execute 'run' amount of times
###################################################################
run = 1
###################################################################
# Choose if you want Android of iOS capabilities
# Uncomment one of those lines
###################################################################
# androidTest = True
# iosTest = True
###################################################################
# Declare as a function in order to do multiple runs
###################################################################
def run_sauce_test():
###################################################################
# Common parameters (desired capabilities)
# For Test Object tests
###################################################################
projectParameters = {
'name': 'Run: ' + str(datetime.datetime.now()),
'commandTimeout':600,
# "recordDeviceVitals": 'true',
# 'tunnelIdentifier': 'tj1',
# 'locale' : "fr_CA",
# On CSTEAM: # The API generated for the Test Object project
# 'sauce:options': {
# 'appiumVersion': '1.20.1',
# 'name': 'Run: ' + str(datetime.datetime.now()),
#
# },
# "public": "public",
# "appiumVersion":"1.17.1",
# 'tunnelIdentifier':'try1tj'
# 'sauceLabsImageInjectionEnabled': 'true',
'wdaEventloopIdleDelay': '5',
"waitForQuiescence": False,
# "cacheId": "1234",
# "noReset": "true",
# 'appiumVersion': '1.16.0',
# 'testobject_test_live_view_url': True
# "testobject_session_creation_timeout": "180000",
# 'name': 'Run: ' + getNumber(),
# "relaxedSecurityEnabled": "true",
# "autoAcceptAlerts": True,
# "cacheId": "test1"
}
testObjectAppStorage = {
# 'testobject_api_key' : 'B2A207D1BF6945108D0FF5EC4EB952BB', # test on voi-stage
# 'testobject_api_key' : 'D98E6C58A01C4B3B954087B35E605C63', # test on Google website
# 'testobject_api_key': '<KEY>', #prijil app
# 'testobject_api_key' : '47061F3DBA644FB5A2DE1C950D94EA92', #bersa-uat
# 'testobject_api_key' : '<KEY>', # test on 1Cashify app
# 'testobject_api_key' : '37696AA9E1274A94B65339806E21A5C4', # test on Varo SIT Debug app
# 'testobject_api_key' : '7F9F9BD657414E73A556F1AD9941C951', # test on FlashFlood iOS app
# 'maxInstances': 5,
}
unifiedPlatformAppStorage = {
# 'app': 'storage:filename=BankOfTheWest.ipa',
# 'app': 'storage:264d3821-e02c-4aa6-a678-e9df4f164d9e', #bersa-uat
}
androidParameters = { # Define Android parameters here
'platformVersion' : '10',
# 'automationName': 'uiautomator2',
# 'deviceName' : 'Samsung.*',
# 'deviceName' : 'Samsung Galaxy S[1-2]0.*',
# 'deviceName' : 'Samsung_Galaxy_S[1-2]0_real',
# 'deviceName' : 'Google_Pixel_4_XL_real_us',
'browserName' : 'chrome',
'deviceOrientation' : 'portrait',
'platformName' : 'Android',
# 'platformVersion' : '9',
# "recordDeviceVitals": 'true',
# 'app': 'storage:7b50a469-89f2-4d1b-ae8e-8d1997de2f2a'
# 'testobject_app_id': '9',
# 'appID': '10',
# "newCommandTimeout": "7000",
# "testobject_cache_device" :'true',
# "reportDirectory": "reports",
# "reportFormat": "xml",
# "autoGrantPermissions": "true",
# "testobject_session_creation_timeout": "300000",
# "commandTimeouts": "180000",
# "maxDuration": "300",
# "idleTimeout": "300",
# "deviceType": "phone",
# "phoneOnly": 'true',
# "resetKeyboard": 'true',
# "unicodeKeyboard": 'true',
# "ignoreUnimportantViews": 'true',
# "disableAndroidWatchers": 'true',
# "automationName": "uiautomator2",
# 'maxSessions': 5,
}
iosParameters = { # Define iOS Parameters here
'phoneOnly': 'true',
# 'deviceName' : 'iPhone_11_14_real_us',
# 'deviceName' : 'iPhone X Simulator',
# 'deviceName' : 'iPhone_11_13_real_us',
'deviceOrientation' : 'portrait',
# 'browserName' : 'Chrome',
# 'browserName' : 'safari',
# 'platformVersion' : '13',
# 'platformVersion' : '14',
'appium:platformVersion': '14',
'platformName' : 'iOS',
# "recordDeviceVitals": 'true',
# "bundleId" : "com.apple.Preferences",
# 'name' : "Sauce Labs Test",
# 'testobject_suite_name': 'BOTW_Mobile_App_Automation_iOS',
# 'testobject_test_name': "iPhone App: #{scenario.name}",
# 'testobject_app_id': ['1'],
'autoAcceptAlerts': 'true',
# 'nativeWebTap': True, # iOS only capability.
}
###################################################################
# Merge parameters into a single capability dictionary
###################################################################
sauceParameters = {}
sauceParameters.update(projectParameters)
if androidTest != True and iosTest != True:
print('You need to specify a platform to test on!')
sys.exit()
elif androidTest == True and iosTest == True:
print('Don\'t be greedy! Only choose one platform!')
sys.exit()
elif androidTest:
sauceParameters.update(androidParameters)
elif iosTest:
sauceParameters.update(iosParameters)
###################################################################
# Connect to Test Object (RDC Cloud)
###################################################################
if US_Datacenter==True:
sauceParameters.update(unifiedPlatformAppStorage)
print (colored('You are testing on the Sauce Labs US Datacenter', 'green', attrs=['blink', 'underline']))
driver = webdriver.Remote(
command_executor='https://'+os.environ['SAUCE_USERNAME']+':'+os.environ['SAUCE_ACCESS_KEY']+' @ondemand.us-west-1.saucelabs.com:443/wd/hub',
# command_executor='https://tj.invitationtest1:24168dc8-0900-4994-9ef9-f3442fb9683a@ondemand.us-west-1.saucelabs.com:443/wd/hub',
# command_executor='https://'+os.environ['SAUCE_USERNAME']+':'+os.environ['SAUCE_ACCESS_KEY']+'@ondemand.saucelabs.com:443/wd/hub',
# command_executor='https://<user>.<access_key>.us-west-1.saucelabs.com:443/wd/hub',
desired_capabilities=sauceParameters)
elif EU_Datacenter==True:
sauceParameters.update(unifiedPlatformAppStorage)
print (colored('You are testing on the Sauce Labs EU Datacenter', 'green', attrs=['blink', 'underline']))
driver = webdriver.Remote(
command_executor='https://'+os.environ['SAUCE_USERNAME']+':'+os.environ['SAUCE_ACCESS_KEY']+' @ondemand.eu-central-1.saucelabs.com:443/wd/hub',
desired_capabilities=sauceParameters)
if US_Datacenter_TO==True:
sauceParameters.update(testObjectAppStorage)
print (colored('You are testing on the Test Object/Legacy RDC US Datacenter', 'green', attrs=['blink', 'underline']))
driver = webdriver.Remote(
command_executor='https://us1.appium.testobject.com/wd/hub',
desired_capabilities=sauceParameters)
elif EU_Datacenter_TO==True:
sauceParameters.update(testObjectAppStorage)
print (colored('You are testing on the Test Object/Legacy RDC EU Datacenter', 'green', attrs=['blink', 'underline']))
driver = webdriver.Remote(
command_executor='https://eu1.appium.testobject.com/wd/hub',
desired_capabilities=sauceParameters)
elif US_Datacenter==True and EU_Datacenter==True:
print (colored('Please select either ', 'red', attrs=[ 'underline']))
print (colored('US', 'red', attrs=['blink', 'underline', 'bold']))
print (colored(' or ', 'red', attrs=['underline']))
print (colored('EU', 'red', attrs=['blink', 'underline', 'bold']))
print (colored(', not both', 'red', attrs=['underline']))
print (driver.capabilities)
print ('Test Name == ', colored(driver.capabilities['testobject_test_name'], 'green', attrs=['reverse', 'blink']))
print ('Device Name == ', colored(driver.capabilities['testobject_device_name'], 'green', attrs=['reverse', 'blink']))
print ('Device descriptor == ', colored(driver.capabilities['testobject_device'], 'green', attrs=['reverse', 'blink']))
print ('Platform Version == ', colored(driver.capabilities['platformVersion'], 'green', attrs=['reverse', 'blink']))
# ###################################################################
# # Test logic goes here
# ###################################################################
# # Navigating to a website
# #__________________________________________________________________
# # driver.get_capability("testobject_test_report_url");
# # driver.get_capabilities().get_capability("testobject_test_live_view_url");
# # driver.desired_capabilities['testobject_test_report_url']
# # print driver.capabilities['testobject_test_report_url']
#
# # console.log(driver.capabilities['testobject_test_report_url'])
# # print(driver.capabilities['testobject_test_live_view_url'])
# sleep(10)
# source = driver.page_source
# print(colored(source.split(' '), 'red'))
# driver.execute_script('sauce:context=Open google.com')
# driver.get("https://se2sbl-qa-uatx.unqork.io/?style=se2sblterm#/display/6008fa52e78d1d025ae63209?firstName=John&lastName=Smith&email=<EMAIL>")
# driver.execute_script('sauce:context=names ' + str(driver.capabilities['platformVersion']) + ' yup')
driver.get("https://app.qa.everlylife.io/app/termlifewf#/")
# try:
# print (colored("looking for btnGetStarted", 'green'))
# WebDriverWait(driver, 45).until(EC.presence_of_element_located((By.XPATH, "//*[@id=\"btnGetStarted\"]/span")))
# # WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, "//*[@name='quickBalance_header']")))
# # interact = driver.find_element_by_xpath("//*XCUIElementTypeButton[@name='noThanks_btn']")
# interact = driver.find_element_by_xpath("//*[@id=\"btnGetStarted\"]/span")
# interact.click()
# print (colored("found btnGetStarted!!!", 'green'))
#
# except:
# print (colored("Can not find btnGetStarted", 'red'))
try:
print (colored("looking for username and password", 'green'))
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, 'username')))
show = EC.presence_of_element_located((By.ID, 'username'))
print (show)
interact = driver.find_element_by_id("username")
interact.click()
print (colored("found username!!!", 'green'))
interact.send_keys("<EMAIL>")
print (colored("looking for password", 'green'))
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, 'password')))
interact = driver.find_element_by_id("password")
interact.click()
print (colored("found password!!!", 'green'))
interact.send_keys("<PASSWORD>123")
interact.submit()
except:
print (colored("Can not find username and password", 'red'))
# try:
# print (colored("looking for password", 'green'))
# WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, 'password')))
# interact = driver.find_element_by_id("password")
# interact.click()
# print (colored("found password!!!", 'green'))
# interact.send_keys("<PASSWORD>123")
# interact.submit()
# except:
# print (colored("Can not find password", 'red'))
try:
print (colored("looking for remove", 'green'))
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, "//button[text()=' Remove ']")))
print (colored("found remove!!!", 'green'))
interact = driver.find_element_by_xpath("//button[text()=' Remove ']")
interact.click()
print (colored("clicked remove!!!", 'green'))
except:
print (colored("Can not find remove", 'red'))
try:
print (colored("looking for insuredFirstName", 'green'))
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, "//*[@id=\"insuredFirstName\"]")))
# WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, "//*[@name='quickBalance_header']")))
# interact = driver.find_element_by_xpath("//*XCUIElementTypeButton[@name='noThanks_btn']")
if EC.presence_of_element_located((By.XPATH, "//button[text()=' Remove ']")) == True:
interact = find_element_by_xpath("//button[text()=' Remove ']")
interact.click()
print ("remove removed")
else:
print ("remove not here")
interact = driver.find_element_by_xpath("//*[@id=\"insuredFirstName\"]")
interact.click()
interact.clear()
print (colored("found insuredFirstName!!!", 'green'))
interact.send_keys("<NAME>")
except:
print (colored("Can not find insuredFirstName", 'red'))
try:
print (colored("looking for insuredLastName", 'green'))
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, "//*[@id=\"insuredLastName\"]")))
# WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, "//*[@name='quickBalance_header']")))
# interact = driver.find_element_by_xpath("//*XCUIElementTypeButton[@name='noThanks_btn']")
interact = driver.find_element_by_xpath("//*[@id=\"insuredLastName\"]")
interact.click()
interact.clear()
print (colored("found insuredLastName!!!", 'green'))
interact.send_keys("Moore")
except:
print (colored("Can not find insuredLastName", 'red'))
try:
print (colored("looking for insuredDOB", 'green'))
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, "//*[@id=\"insuredDOB\"]")))
# WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, "//*[@name='quickBalance_header']")))
# interact = driver.find_element_by_xpath("//*XCUIElementTypeButton[@name='noThanks_btn']")
interact = driver.find_element_by_xpath("//*[@id=\"insuredDOB\"]")
interact.click()
interact.clear()
print (colored("found insuredDOB!!!", 'green'))
interact.send_keys("11162001")
except:
print (colored("Can not find insuredDOB", 'red'))
try:
print (colored("looking for Male", 'green'))
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, "//*[contains(text(),\"Male\")]")))
# WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, "//*[@name='quickBalance_header']")))
# interact = driver.find_element_by_xpath("//*XCUIElementTypeButton[@name='noThanks_btn']")
interact = driver.find_element_by_xpath("//*[contains(text(),\"Male\")]")
interact.click()
# interact.clear()
print (colored("found Male!!!", 'green'))
# interact.send_keys("11162001")
except:
print (colored("Can not find Male", 'red'))
# try:
# print (colored("looking for Search...", 'green'))
# WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, "//div[text()='Search...']")))
# # WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, "//*[@name='quickBalance_header']")))
# # interact = driver.find_element_by_xpath("//*XCUIElementTypeButton[@name='noThanks_btn']")
# interact = driver.find_element_by_xpath("//div[text()='Search...']")
# interact.click()
# # interact.clear()
# print (colored("found Search...!!!", 'green'))
# # interact.send_keys("11162001")
#
#
# except:
# print (colored("Can not find Search...", 'red'))
# try:
# print (colored("looking for react-select-2-input", 'green'))
# WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, "//*[@id=\"react-select-2-input\"]")))
# # WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, "//*[@name='quickBalance_header']")))
# # interact = driver.find_element_by_xpath("//*XCUIElementTypeButton[@name='noThanks_btn']")
# interact = driver.find_element_by_xpath("//*[@id=\"react-select-2-input\"]")
# interact.click()
# interact.clear()
# print (colored("found react-select-2-input!!!", 'green'))
# interact.send_keys("2201 University Blvd, Tuscaloosa, AL 35401")
#
#
# except:
# print (colored("Can not find react-select-2-input", 'red'))
try:
print (colored("looking for hiddenAddressLine1", 'green'))
# print (colored("looking for react-select-2-input", 'green'))
| |
add on the object
:type collaborator: get_user_model()
:param role: the role of the collaborator on the object
:type role: CollaboratorRole
:return: the newly created collaborator instance
:rtype: ObjectCollaboratorMixin
"""
user_is_collaborator = collaborator in self.collaborators.all()
user_is_author = collaborator == self.author
if user_is_author:
raise learning.exc.UserIsAlreadyAuthor(
_("The user “%(user)s” is already the author of this %(object)s. "
"The user %(user)s cannot be added as a collaborator.")
% {"object": _(self.__class__.__name__.lower()), "user": collaborator}
)
if user_is_collaborator:
raise learning.exc.UserIsAlreadyCollaborator(
_("The user “%(user)s” already collaborates on this %(object)s. "
"Maybe you just want to change its role?")
% {"object": _(self.__class__.__name__.lower()), "user": collaborator}
)
return self.object_collaborators.create(collaborator=collaborator, role=role.name)
def remove_collaborator(self, collaborator: get_user_model()) -> None:
"""
Remove a collaborator from the object
:raises UserNotCollaboratorError: when the user is not a collaborator on the object
:param collaborator: the collaborator to remove from the object
:type collaborator: get_user_model()
"""
user_is_collaborator = collaborator in self.collaborators.all()
if not user_is_collaborator:
raise learning.exc.UserNotCollaboratorError(
_("The user “%(user)s” is not already a collaborator on this %(object)s.")
% {"object": _(self.__class__.__name__.lower()), "user": collaborator}
)
self.object_collaborators.filter(collaborator=collaborator).delete()
def change_collaborator_role(self, collaborator: get_user_model(), role: CollaboratorRole) -> None:
"""
Change the role of a collaborator on the object
:raises UserNotCollaboratorError: when the user is not a collaborator on the object
:param collaborator: the collaborator for which to change his role
:type collaborator: get_user_model()
:param role: the new role for the collaborator
:type role: CollaboratorRole
"""
user_is_collaborator = collaborator in self.collaborators.all()
if not user_is_collaborator:
raise learning.exc.UserNotCollaboratorError(
_("The user “%(user)s” does not collaborates on this %(object)s. "
"Maybe you just want to add it as a collaborator?")
% {"object": _(self.__class__.__name__.lower()), "user": collaborator}
)
self.object_collaborators.filter(collaborator=collaborator).update(role=role.name)
def _get_user_perms(self, user: get_user_model()) -> Set[str]:
permissions = set()
if user == self.author:
permissions.update([
# Basic CRUD actions
"view", "delete", "add", "change",
# Extra access for objects
"view_similar",
# Collaborators permissions
"add_collaborator", "delete_collaborator", "change_collaborator", "view_collaborators",
# Objective permissions
"add_objective", "view_objective", "delete_objective", "change_objective"
])
return permissions
def clean(self) -> None:
"""
Check whether the model is clean or not.
:return: True is the model is clean.
"""
if self.language == str():
raise ValidationError(_("No language selected."))
def add_objective(self, objective: Objective, taxonomy_level: TaxonomyLevel, objective_reusable: bool) -> None:
"""
Add a learning course_objective on the object.
FIXME: why taxonomy level is not an attribute of Objective?
:param objective: the course_objective to add on this object
:type objective: Objective
:param taxonomy_level: the taxonomy level the object is attached with
:type taxonomy_level: TaxonomyLevel
:param objective_reusable: whether the course_objective is reusable
:type objective_reusable: bool
"""
objective_already_in_model = objective in self.objectives.all()
if objective_already_in_model:
raise learning.exc.ObjectiveAlreadyInModel(
_("The %(ability)s is already linked with this %(model_name)s.") %
{"model_name": _(type(self).__name__.lower()), "ability": objective.ability})
created_objective = self.object_objectives.create(
objective=objective, taxonomy_level=taxonomy_level,objective_reusable=objective_reusable
)
if objective_reusable:
for validator in objective.validators.all():
created_objective.add_validator(validator)
def remove_objective(self, objective: Objective) -> None:
"""
Remove an course_objective from this entity.
:param objective:
:return:
"""
objective_not_in_model = objective not in self.objectives.all()
if objective_not_in_model:
raise learning.exc.ObjectiveNotInModel(
_("The %(ability)s is not linked with this entity %(model_name)s yet.")
% {"model_name": _(type(self).__name__.lower()), "ability": objective.ability}
)
self.object_objectives.filter(objective=objective).delete()
class Meta:
abstract = True
def extract_all_included_objects(base_object: BasicModelMixin) -> Generator[BasicModelMixin, None, None]:
"""
This generator return iteratively every included objects, whatever the depth is.
:param base_object: the object that has included dependencies
:type base_object: BasicModelMixin
:return: a generator of included objects
:rtype: Generator[BasicModelMixin, None, None]
"""
for an_object in base_object.linked_objects:
for linked_object in extract_all_included_objects(an_object):
yield linked_object
yield an_object
class ActivityAccess(OrderedEnum):
"""
Defines the different access rights for activities.
"""
PUBLIC = (_("Public"), 0)
EXISTING_COURSES = (_("Only in courses"), 1)
COLLABORATORS_ONLY = (_("Collaborators only"), 2)
PRIVATE = (_("Private"), 3)
class ActivityReuse(OrderedEnum):
"""
Defines the different reuse rights for resources.
"""
NO_RESTRICTION = (_("Reusable"), 0)
ONLY_AUTHOR = (_("Author only"), 1)
NON_REUSABLE = (_("Non reusable"), 2)
class CourseState(Enum):
"""
State of a course
"""
DRAFT = _("Draft")
PUBLISHED = _("Published")
ARCHIVED = _("Archived")
class CourseAccess(OrderedEnum):
"""
Access permissions on a course
"""
PUBLIC = (_("Public"), 0)
STUDENTS_ONLY = (_("Students only"), 1)
COLLABORATORS_ONLY = (_("Collaborators only"), 2)
PRIVATE = (_("Private"), 3)
class ResourceManager(BasicModelManager):
"""
The resource specific Model Manager
"""
# noinspection PyMissingOrEmptyDocstring
def public(self, **kwargs) -> QuerySet:
return self._filter_with_query(
super().get_queryset().filter(access=ResourceAccess.PUBLIC.name), kwargs.get("query", "")
)
def recommendations_for(self, user: get_user_model(), **kwargs) -> QuerySet:
"""
Get all resources opened for registration and recommended for a user
.. note:: A recommendation concerns resources the user is not registered as a \
student or as a teacher and is public and published.
:param user: the user for which to query recommendations
:type user: get_user_model()
:param kwargs: kwargs that can contain a key “query” to filter name and description
:type kwargs: dict
:return: Resources recommended for the user
:rtype: QuerySet
"""
qs = super().get_queryset() \
.exclude(Q(author=user) | Q(collaborators=user)) \
.filter(Q(reuse=ResourceReuse.NO_RESTRICTION.name) & Q(access=ResourceAccess.PUBLIC.name))
return self._filter_with_query(qs, kwargs.get("query", ""))
def reusable(self, activity: "Activity", user: get_user_model(), **kwargs) -> QuerySet:
"""
Get all the reusable resources for a specific activity.
:param activity: The Activity for which to search reusable resources.
:type activity: Activity
:param user: the user for which resources should be reusable
:type user: get_user_model()
:return: a queryset of reusable resources
:rtype QuerySet
"""
qs = super().get_queryset().exclude(
activities=activity
).exclude(
reuse=ResourceReuse.NON_REUSABLE.name
).exclude(
Q(reuse=ResourceReuse.ONLY_AUTHOR.name) & ~Q(author=user)
# Resources that can be reused by their author
)
return self._filter_with_query(qs, kwargs.get("query", ""))
def resource_attachment_upload_to_callback(resource: 'Resource', filename: str):
"""
Set the upload filename.
:param resource:
:type: Resource
:param filename:
:return: str
"""
return "resources/{resource_id}/{filename}".format(resource_id=resource.id, filename=filename)
class Resource(BasicModelMixin):
"""
The resource object: this object may contained an attached resource which include educative material.
"""
type = models.CharField(
max_length=10,
choices=[(rtype.name, rtype.value) for rtype in ResourceType],
verbose_name=_("Type"),
help_text=_("Whether this resource is a common file, a video file or an audio")
)
author = models.ForeignKey(
get_user_model(),
on_delete=models.CASCADE,
related_name="resources",
verbose_name=_("Author"),
help_text=_("The user that created this resource, or that is considered as the current owner")
)
collaborators = models.ManyToManyField(
get_user_model(),
through="ResourceCollaborator",
related_name="collaborates_on_resource",
verbose_name=_("Collaborators"),
help_text=_("The users that collaborate on this resource alongside the author")
)
duration = models.CharField(
max_length=30,
choices=[(duration.name, duration.value) for duration in Duration],
default=Duration.NOT_SPECIFIED.name,
verbose_name=_("Duration"),
help_text=_("The estimated, required duration to consult and understand the resource")
)
licence = models.CharField(
max_length=20,
choices=[(licence.name, licence.value) for licence in Licences],
default=Licences.CC_BY.name,
verbose_name=_("Licence"),
help_text=_(
"The licence under which the content is provided. If you want to share your work with the community"
"a Creative Commons Licence is a bit more adapted. Anyway you can choose to keep your rights on "
"your resource")
)
access = models.CharField(
max_length=20,
choices=[(access.name, access.value) for access in ResourceAccess],
default=ResourceAccess.PUBLIC.name,
verbose_name=_("Access"),
help_text=_("Whether the resource should remain private (for you only), visible only in activities that use it"
", restricted to your collaborators or public")
)
reuse = models.CharField(
max_length=20,
choices=[(reuse.name, reuse.value) for reuse in ResourceReuse],
default=ResourceReuse.ONLY_AUTHOR.name,
verbose_name=_("Reuse"),
help_text=_("Whether you want the resource to be reusable in an activity created by other users."
" Resources can be fully reusable, only by you or not reusable")
)
attachment = models.FileField(
blank=True, null=True,
verbose_name=_("File"),
upload_to=resource_attachment_upload_to_callback
)
objectives = models.ManyToManyField(
Objective,
through="ResourceObjective",
related_name="objectives_on_resource",
verbose_name=_("Objectives"),
help_text=_("The objectives that are in the resource")
)
# noinspection PyMissingOrEmptyDocstring
class PermissionMessage(Enum):
VIEW = _("Can view the resource")
CHANGE = _("Can change the resource")
DELETE = _("Can delete the resource")
objects = ResourceManager()
@property
def object_collaborators(self) -> QuerySet:
"""
Get the resource collaborators
:return: the resource collaborators
:rtype: QuerySet
"""
return self.resource_collaborators
@property
def object_objectives(self):
return self.resource_objectives
@property
def linked_objects(self) -> None:
"""
A resource does not have any linked object.
:return: None
"""
yield
def is_reusable(self, for_activity=None) -> bool:
"""
Check if it is possible to use the resource in an activity. Resource linking depends on a few conditions, based on
access and reuse Resource attributes.
:raises ResourceNotReusableError: when reuse condition do not allow the Resource to be reused by any Activity
:raises ResourceNotReusableOnlyAuthorError: when reuse condition is set to “Only author” and the Resource \
author and the author of the activity given in parameter do not match.
:raises RuntimeError: when reuse condition is set to “Only author” but no activity is given in parameter
:param for_activity: in case the reuse attribute is set to “Only author”, this argument must be provided. \
It indicates | |
<reponame>vmray/vmray-misp-feed<gh_stars>1-10
import base64
import json
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from pathlib import PureWindowsPath
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
from pymisp import MISPAttribute, MISPEvent, MISPObject
from pymisp.mispevent import MISPOrganisation
from vmray.rest_api import VMRayRESTAPIError
from lib.config import Config
from .api_wrapper import VMRay
USER_RE = re.compile(r".:.Users\\(.*?)\\", re.IGNORECASE)
DOC_RE = re.compile(r".:.DOCUME~1.\\(.*?)\\", re.IGNORECASE)
DOC_AND_SETTINGS_RE = re.compile(r".:.Documents and Settings\\(.*?)\\", re.IGNORECASE)
USERPROFILES = [USER_RE, DOC_RE, DOC_AND_SETTINGS_RE]
def classifications_to_str(classifications: List[str]) -> Optional[str]:
if classifications:
return "Classifications: " + ", ".join(classifications)
return None
def merge_lists(target: List[Any], source: List[Any]):
return list({*target, *source})
@dataclass
class Artifact:
is_ioc: bool
verdict: Optional[str]
@abstractmethod
def to_misp_object(self, tag: bool) -> MISPObject:
raise NotImplementedError()
@abstractmethod
def merge(self, other: "Artifact") -> None:
raise NotImplementedError()
@abstractmethod
def __eq__(self, other: "Artifact") -> bool:
raise NotImplementedError()
def tag_artifact_attribute(self, attribute: MISPAttribute) -> None:
if self.is_ioc:
attribute.add_tag('vmray:artifact="IOC"')
if self.verdict:
attribute.add_tag(f'vmray:verdict="{self.verdict}"')
@dataclass
class DomainArtifact(Artifact):
domain: str
sources: List[str]
ips: List[str] = field(default_factory=list)
classifications: List[str] = field(default_factory=list)
def to_misp_object(self, tag: bool) -> MISPObject:
obj = MISPObject(name="domain-ip")
classifications = classifications_to_str(self.classifications)
attr = obj.add_attribute(
"domain", value=self.domain, to_ids=self.is_ioc, comment=classifications
)
if tag:
self.tag_artifact_attribute(attr)
for ip in self.ips:
obj.add_attribute("ip", value=ip, to_ids=self.is_ioc)
return obj
def merge(self, other: Artifact) -> None:
if not isinstance(other, DomainArtifact):
return
self.ips = merge_lists(self.ips, other.ips)
self.classifications = merge_lists(self.classifications, other.classifications)
def __eq__(self, other: Artifact) -> bool:
if not isinstance(other, DomainArtifact):
return NotImplemented
return self.domain == other.domain
@dataclass
class EmailArtifact(Artifact):
sender: Optional[str]
subject: Optional[str]
recipients: List[str] = field(default_factory=list)
classifications: List[str] = field(default_factory=list)
def to_misp_object(self, tag: bool) -> MISPObject:
obj = MISPObject(name="email")
if self.sender:
classifications = classifications_to_str(self.classifications)
attr = obj.add_attribute(
"from", value=self.sender, to_ids=self.is_ioc, comment=classifications
)
if tag:
self.tag_artifact_attribute(attr)
if self.subject:
obj.add_attribute("subject", value=self.subject, to_ids=False)
for recipient in self.recipients:
obj.add_attribute("to", value=recipient, to_ids=False)
return obj
def merge(self, other: Artifact) -> None:
if not isinstance(other, EmailArtifact):
return
self.recipients = merge_lists(self.recipients, other.recipients)
self.classifications = merge_lists(self.classifications, other.classifications)
def __eq__(self, other: Artifact) -> bool:
if not isinstance(other, EmailArtifact):
return NotImplemented
return self.sender == other.sender and self.subject == other.subject
@dataclass
class FileArtifact(Artifact):
filenames: List[str]
operations: List[str]
md5: str
sha1: str
sha256: str
ssdeep: Optional[str]
imphash: Optional[str]
classifications: List[str]
size: Optional[int]
mimetype: Optional[str] = None
def to_misp_object(self, tag: bool) -> MISPObject:
obj = MISPObject(name="file")
if self.size:
obj.add_attribute("size-in-bytes", value=self.size)
classifications = classifications_to_str(self.classifications)
hashes = [
("md5", self.md5),
("sha1", self.sha1),
("sha256", self.sha256),
("ssdeep", self.ssdeep),
]
for (key, value) in hashes:
if not value:
continue
attr = obj.add_attribute(
key, value=value, to_ids=self.is_ioc, comment=classifications
)
if tag:
self.tag_artifact_attribute(attr)
if self.mimetype:
obj.add_attribute("mimetype", value=self.mimetype, to_ids=False)
operations = None
if self.operations:
operations = "Operations: " + ", ".join(self.operations)
for filename in self.filenames:
filename = PureWindowsPath(filename)
obj.add_attribute("filename", value=filename.name, comment=operations)
fullpath = str(filename)
for regex in USERPROFILES:
fullpath = regex.sub(r"%USERPROFILE%\\", fullpath)
obj.add_attribute("fullpath", fullpath)
return obj
def merge(self, other: Artifact) -> None:
if not isinstance(other, FileArtifact):
return
self.filenames = merge_lists(self.filenames, other.filenames)
self.operations = merge_lists(self.operations, other.operations)
self.classifications = merge_lists(self.classifications, other.classifications)
def __eq__(self, other: Artifact) -> bool:
if not isinstance(other, FileArtifact):
return NotImplemented
return self.sha256 == other.sha256
@dataclass
class IpArtifact(Artifact):
ip: str
sources: List[str]
classifications: List[str] = field(default_factory=list)
def to_misp_object(self, tag: bool) -> MISPObject:
obj = MISPObject(name="ip-port")
classifications = classifications_to_str(self.classifications)
attr = obj.add_attribute(
"ip", value=self.ip, comment=classifications, to_ids=self.is_ioc
)
if tag:
self.tag_artifact_attribute(attr)
return obj
def merge(self, other: Artifact) -> None:
if not isinstance(other, IpArtifact):
return
self.sources = merge_lists(self.sources, other.sources)
self.classifications = merge_lists(self.classifications, other.classifications)
def __eq__(self, other: Artifact) -> bool:
if not isinstance(other, IpArtifact):
return NotImplemented
return self.ip == other.ip
@dataclass
class MutexArtifact(Artifact):
name: str
operations: List[str]
classifications: List[str] = field(default_factory=list)
def to_misp_object(self, tag: bool) -> MISPObject:
obj = MISPObject(name="mutex")
classifications = classifications_to_str(self.classifications)
attr = obj.add_attribute(
"name",
value=self.name,
category="External analysis",
to_ids=False,
comment=classifications,
)
if tag:
self.tag_artifact_attribute(attr)
operations = None
if self.operations:
operations = "Operations: " + ", ".join(self.operations)
obj.add_attribute("description", value=operations, to_ids=False)
return obj
def merge(self, other: Artifact) -> None:
if not isinstance(other, MutexArtifact):
return
self.operations = merge_lists(self.operations, other.operations)
self.classifications = merge_lists(self.classifications, other.classifications)
def __eq__(self, other: Artifact) -> bool:
if not isinstance(other, MutexArtifact):
return NotImplemented
return self.name == other.name
@dataclass
class ProcessArtifact(Artifact):
filename: str
pid: Optional[int] = None
parent_pid: Optional[int] = None
cmd_line: Optional[str] = None
operations: List[str] = field(default_factory=list)
classifications: List[str] = field(default_factory=list)
def to_misp_object(self, tag: bool) -> MISPObject:
obj = MISPObject(name="process")
if self.pid:
obj.add_attribute("pid", value=self.pid, category="External analysis")
if self.parent_pid:
obj.add_attribute(
"parent-pid", value=self.parent_pid, category="External analysis"
)
classifications = classifications_to_str(self.classifications)
name_attr = obj.add_attribute(
"name", self.filename, category="External analysis", comment=classifications
)
cmd_attr = obj.add_attribute("command-line", value=self.cmd_line)
if tag:
self.tag_artifact_attribute(name_attr)
self.tag_artifact_attribute(cmd_attr)
return obj
def merge(self, other: Artifact) -> None:
if not isinstance(other, ProcessArtifact):
return
self.operations = merge_lists(self.operations, other.operations)
self.classifications = merge_lists(self.classifications, other.classifications)
def __eq__(self, other: Artifact) -> bool:
if not isinstance(other, ProcessArtifact):
return NotImplemented
return self.filename == other.filename and self.cmd_line == other.cmd_line
@dataclass
class RegistryArtifact(Artifact):
key: str
operations: List[str]
def to_misp_object(self, tag: bool) -> MISPObject:
obj = MISPObject(name="registry-key")
operations = None
if self.operations:
operations = "Operations: " + ", ".join(self.operations)
attr = obj.add_attribute(
"key", value=self.key, to_ids=self.is_ioc, comment=operations
)
if tag:
self.tag_artifact_attribute(attr)
return obj
def merge(self, other: Artifact) -> None:
if not isinstance(other, RegistryArtifact):
return
self.operations = merge_lists(self.operations, other.operations)
def __eq__(self, other: Artifact) -> bool:
if not isinstance(other, RegistryArtifact):
return NotImplemented
return self.key == other.key
@dataclass
class UrlArtifact(Artifact):
url: str
operations: List[str]
domain: Optional[str] = None
ips: List[str] = field(default_factory=list)
def to_misp_object(self, tag: bool) -> MISPObject:
obj = MISPObject(name="url")
operations = None
if self.operations:
operations = "Operations: " + ", ".join(self.operations)
attr = obj.add_attribute(
"url",
value=self.url,
comment=operations,
category="External analysis",
to_ids=False,
)
if tag:
self.tag_artifact_attribute(attr)
if self.domain:
obj.add_attribute(
"domain", self.domain, category="External analysis", to_ids=False
)
for ip in self.ips:
obj.add_attribute("ip", ip, category="External analysis", to_ids=False)
return obj
def merge(self, other: Artifact) -> None:
if not isinstance(other, UrlArtifact):
return
self.ips = merge_lists(self.ips, other.ips)
self.operations = merge_lists(self.operations, other.operations)
def __eq__(self, other: Artifact) -> bool:
if not isinstance(other, UrlArtifact):
return NotImplemented
return self.url == other.url and self.domain == other.domain
@dataclass
class MitreAttack:
description: str
id: str
def to_misp_galaxy(self) -> str:
return f'misp-galaxy:mitre-attack-pattern="{self.description} - {self.id}"'
@dataclass
class VTI:
category: str
operation: str
technique: str
score: int
class VMRayParseError(Exception):
pass
class ReportParser(ABC):
@abstractmethod
def is_static_report(self) -> bool:
raise NotImplementedError()
@abstractmethod
def artifacts(self) -> Iterator[Artifact]:
raise NotImplementedError()
@abstractmethod
def classifications(self) -> Optional[str]:
raise NotImplementedError()
@abstractmethod
def details(self) -> Iterator[str]:
raise NotImplementedError()
@abstractmethod
def mitre_attacks(self) -> Iterator[MitreAttack]:
raise NotImplementedError()
@abstractmethod
def sandbox_type(self) -> str:
raise NotImplementedError()
@abstractmethod
def score(self) -> str:
raise NotImplementedError()
@abstractmethod
def vtis(self) -> Iterator[VTI]:
raise NotImplementedError()
class Summary(ReportParser):
def __init__(self, analysis_id: int, api: VMRay):
self.analysis_id = analysis_id
data = api.get_summary(analysis_id)
self.report = json.load(data)
@staticmethod
def to_verdict(score: Union[int, str]) -> Optional[str]:
if isinstance(score, int):
if 0 <= score <= 24:
return "clean"
if 25 <= score <= 74:
return "suspicious"
if 75 <= score <= 100:
return "malicious"
return "n/a"
if isinstance(score, str):
score = score.lower()
if score in ("not_suspicious", "whitelisted"):
return "clean"
if score == "blacklisted":
return "malicious"
if score in ("not_available", "unknown"):
return "n/a"
return score
return None
def is_static_report(self) -> bool:
return self.report["vti"]["vti_rule_type"] == "Static"
def artifacts(self) -> Iterator[Artifact]:
artifacts = self.report["artifacts"]
domains = artifacts.get("domains", [])
for domain in domains:
classifications = domain.get("classifications", [])
is_ioc = domain.get("ioc", False)
verdict = self.to_verdict(domain.get("severity"))
ips = domain.get("ip_addresses", [])
artifact = DomainArtifact(
domain=domain["domain"],
sources=domain["sources"],
ips=ips,
classifications=classifications,
is_ioc=is_ioc,
verdict=verdict,
)
yield artifact
emails = artifacts.get("emails", [])
for email in emails:
sender = email.get("sender")
subject = email.get("subject")
verdict = self.to_verdict(email.get("severity"))
recipients = email.get("recipients", [])
classifications = email.get("classifications", [])
is_ioc = email.get("ioc", False)
artifact = EmailArtifact(
sender=sender,
subject=subject,
verdict=verdict,
recipients=recipients,
classifications=classifications,
is_ioc=is_ioc,
)
yield artifact
files = artifacts.get("files", [])
for file_ in files:
if file_["filename"] is None:
continue
filenames = [file_["filename"]]
if "filenames" in file_:
filenames += file_["filenames"]
hashes = file_["hashes"]
classifications = file_.get("classifications", [])
operations = file_.get("operations", [])
is_ioc = file_.get("ioc", False)
mimetype = file_.get("mime_type")
verdict = self.to_verdict(file_.get("severity"))
for hash_dict in hashes:
imp = hash_dict.get("imp_hash")
artifact = FileArtifact(
filenames=filenames,
imphash=imp,
md5=hash_dict["md5_hash"],
ssdeep=hash_dict.get("ssdeep_hash"),
sha256=hash_dict["sha256_hash"],
sha1=hash_dict["sha1_hash"],
operations=operations,
classifications=classifications,
size=file_.get("file_size"),
is_ioc=is_ioc,
mimetype=mimetype,
verdict=verdict,
)
yield artifact
ips = artifacts.get("ips", [])
for ip in ips:
is_ioc = ip.get("ioc", False)
verdict = self.to_verdict(ip.get("severity"))
classifications = ip.get("classifications", [])
artifact = IpArtifact(
ip=ip["ip_address"],
sources=ip["sources"],
classifications=classifications,
verdict=verdict,
is_ioc=is_ioc,
)
yield artifact
mutexes = artifacts.get("mutexes", [])
for mutex in mutexes:
verdict = self.to_verdict(mutex.get("severity"))
is_ioc = mutex.get("ioc", False)
artifact = MutexArtifact(
name=mutex["mutex_name"],
operations=mutex["operations"],
classifications=[],
verdict=verdict,
is_ioc=is_ioc,
)
yield artifact
processes = artifacts.get("processes", [])
for process in processes:
classifications = process.get("classifications", [])
cmd_line = process.get("cmd_line")
name = process["image_name"]
verdict | |
import matplotlib
matplotlib.use("Agg")
from itertools import combinations
from collections import defaultdict
from enum import Enum
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.ckdtree import cKDTree as KDTree
from scipy.optimize import shgo
from sklearn.covariance import MinCovDet
import gtsam
from .sonar import OculusProperty
from .utils.conversions import *
from .utils.visualization import *
from .utils.io import *
from . import pcl
class STATUS(Enum):
NOT_ENOUGH_POINTS = "Not enough points"
LARGE_TRANSFORMATION = "Large transformation"
NOT_ENOUGH_OVERLAP = "Not enough overlap"
NOT_CONVERGED = "Not converged"
INITIALIZATION_FAILURE = "Initialization failure"
SUCCESS = "Success"
def __init__(self, *args, **kwargs):
Enum.__init__(*args, **kwargs)
self.description = None
def __bool__(self):
return self == STATUS.SUCCESS
def __nonzero__(self):
return self == STATUS.SUCCESS
def __str__(self):
if self.description:
return self.value + ": " + self.description
else:
return self.value
class Keyframe(object):
def __init__(
self, status, time, dr_pose3, points=np.zeros((0, 2), np.float32), cov=None
):
self.status = status # used to mark keyframe
self.time = time # time
self.dr_pose3 = dr_pose3 # dead reckoning 3d pose
self.dr_pose = pose322(dr_pose3) # dead reckoning 2d pose
self.pose3 = dr_pose3 # estimated 3d pose (will be updated later)
self.pose = pose322(dr_pose3) # estimated 2d pose
self.cov = cov # cov in local frame (always 2d)
self.transf_cov = None # cov in global frame
self.points = points.astype(np.float32) # points in local frame (always 2d)
self.transf_points = None # transformed points in global frame based on pose
self.constraints = [] # Non-sequential constraints (key, odom)
self.twist = None # twist message for publishing odom
def update(self, new_pose, new_cov=None):
self.pose = new_pose
self.pose3 = n2g(
(
new_pose.x(),
new_pose.y(),
self.dr_pose3.translation().z(),
self.dr_pose3.rotation().roll(),
self.dr_pose3.rotation().pitch(),
new_pose.theta(),
),
"Pose3",
)
self.transf_points = Keyframe.transform_points(self.points, self.pose)
if new_cov is not None:
self.cov = new_cov
if self.cov is not None:
c, s = np.cos(self.pose.theta()), np.sin(self.pose.theta())
R = np.array([[c, -s], [s, c]])
self.transf_cov = np.array(self.cov)
self.transf_cov[:2, :2] = R.dot(self.transf_cov[:2, :2]).dot(R.T)
self.transf_cov[:2, 2] = R.dot(self.transf_cov[:2, 2])
self.transf_cov[2, :2] = self.transf_cov[2, :2].dot(R.T)
@staticmethod
def transform_points(points, pose):
if len(points) == 0:
return np.empty_like(points, np.float32)
T = pose.matrix().astype(np.float32)
return points.dot(T[:2, :2].T) + T[:2, 2]
class InitializationResult(object):
def __init__(self):
# all points are in local frame
self.source_points = np.zeros((0, 2))
self.target_points = np.zeros((0, 2))
self.source_key = None
self.target_key = None
self.source_pose = None
self.target_pose = None
# Cov for sampling
self.cov = None
self.occ = None
self.status = None
self.estimated_source_pose = None
self.source_pose_samples = None
def plot(self, title):
# fmt: off
plt.figure()
# Plot in global frame
points = Keyframe.transform_points(self.target_points, self.target_pose)
plt.plot(points[:, 0], points[:, 1], "k.", ms=1, label="target points")
plt.plot(self.source_pose.x(), self.source_pose.y(), "r+", ms=10)
points = Keyframe.transform_points(self.source_points, self.source_pose)
plt.plot(points[:, 0], points[:, 1], "r.", ms=1, label="source points (guess)")
if self.cov is not None:
c, s = np.cos(self.source_pose.theta()), np.sin(self.source_pose.theta())
R = np.array([[c, -s], [s, c]])
cov = R.dot(self.cov[:2, :2]).dot(R.T)
plot_cov_ellipse((self.source_pose.x(), self.source_pose.y()), cov, nstd=3, fill=False, color="r")
if self.estimated_source_pose is not None:
plt.plot(self.estimated_source_pose.x(), self.estimated_source_pose.y(), "g+", ms=10)
points = Keyframe.transform_points(self.source_points, self.estimated_source_pose)
plt.plot(points[:, 0], points[:, 1], "g.", ms=1, label="source points (initialized)")
if self.source_pose_samples is not None:
poses = np.array(self.source_pose_samples)
plt.scatter(poses[:, 0], poses[:, 1], c=poses[:, 3], s=1, label="pose samples")
plt.colorbar()
if self.occ:
x0, y0, resolution, occ_arr = self.occ
x1 = x0 + (occ_arr.shape[1] - 0.5) * resolution
y1 = y0 + (occ_arr.shape[0] - 0.5) * resolution
plt.imshow(occ_arr, origin='upper', extent=(x0, x1, y1, y0), cmap='Greys', vmin=0, vmax=1, alpha=0.5)
plt.colorbar()
plt.legend()
plt.gca().invert_yaxis()
plt.axis("equal")
plt.title(str(self.status))
plt.savefig(title, dpi=100)
plt.close("all")
# fmt: on
def save(self, filename):
np.savez(
filename,
source_points=self.source_points,
target_points=self.target_points,
source_pose=g2n(self.source_pose),
target_pose=g2n(self.target_pose),
estimated_source_pose=g2n(self.estimated_source_pose),
)
class ICPResult(object):
def __init__(self, init_ret, use_samples=False, sample_eps=0.01):
# all points are in local frame
self.source_points = init_ret.source_points
self.target_points = init_ret.target_points
self.source_key = init_ret.source_key
self.target_key = init_ret.target_key
self.source_pose = init_ret.source_pose
self.target_pose = init_ret.target_pose
self.status = init_ret.status
if init_ret.estimated_source_pose is not None:
self.initial_transform = self.target_pose.between(
init_ret.estimated_source_pose
)
else:
self.initial_transform = self.target_pose.between(self.source_pose)
self.estimated_transform = None
# Cov derived from ICP
self.cov = None
self.initial_transforms = None
if use_samples and init_ret.source_pose_samples is not None:
idx = np.argsort(init_ret.source_pose_samples[:, -1])
transforms = [
self.target_pose.between(n2g(g, "Pose2"))
for g in init_ret.source_pose_samples[idx, :3]
]
filtered = [transforms[0]]
for b in transforms[1:]:
d = np.linalg.norm(g2n(filtered[-1].between(b)))
if d < sample_eps:
continue
else:
filtered.append(b)
self.initial_transforms = filtered
self.sample_transforms = None
# Whether the result is inserted to factor graph
self.inserted = False
def plot(self, title):
# fmt: off
plt.figure()
# Plot in target frame
plt.plot(self.target_points[:, 0], self.target_points[:, 1], "k.", ms=1, label="target points")
plt.plot(self.initial_transform.x(), self.initial_transform.y(), "r+", ms=10)
points = Keyframe.transform_points(self.source_points, self.initial_transform)
plt.plot(points[:, 0], points[:, 1], "r.", ms=1, label="source points (guess)")
if self.estimated_transform is not None:
plt.plot(self.estimated_transform.x(), self.estimated_transform.y(), "g+", ms=10)
points = Keyframe.transform_points(self.source_points, self.estimated_transform)
plt.plot(points[:, 0], points[:, 1], "g.", ms=1, label="source points (estimated)")
if self.cov is not None:
cov = self.cov[:2, :2]
c, s = np.cos(self.estimated_transform.theta()), np.sin(self.estimated_transform.theta())
R = np.array([[c, -s], [s, c]])
cov = R.dot(cov).dot(R.T)
plot_cov_ellipse((self.estimated_transform.x(), self.estimated_transform.y()), cov, nstd=3, color="g", fill=False)
if self.sample_transforms is not None:
plt.scatter(self.sample_transforms[:, 0], self.sample_transforms[:, 1], color='c', s=1, label="sample estimate")
plt.legend()
plt.axis("equal")
plt.gca().invert_yaxis()
plt.title(str(self.status))
plt.savefig(title, dpi=100)
plt.close("all")
# fmt: on
def save(self, filename):
np.savez(
filename,
source_points=self.source_points,
target_points=self.target_points,
source_pose=g2n(self.source_pose),
target_pose=g2n(self.target_pose),
initial_transform=g2n(self.initial_transform),
estimated_transform=g2n(self.estimated_transform),
cov=self.cov,
)
class SMParams(object):
def __init__(self):
# Use occupancy probability map matching to initialize ICP
self.initialization = None
# Global search params
self.initialization_params = None
# Minimum number of points
self.min_points = None
# Max deviation from initial guess
self.max_translation = None
self.max_rotation = None
# Min separation between source key and the last target frame
self.min_st_sep = None
# Number of source frames to build source points
# Not used in SSM
self.source_frames = None
# Number of target frames to build target points
# Not used in NSSM
self.target_frames = None
# Number of ICP instances to run to calculate cov
self.cov_samples = None
class SLAM(object):
def __init__(self):
self.oculus = OculusProperty()
# Create a new factor when
# - |ti - tj| > min_duration and
# - |xi - xj| > max_translation or
# - |ri - rj| > max_rotation
self.keyframe_duration = None
self.keyframe_translation = None
self.keyframe_rotation = None
# List of keyframes
self.keyframes = []
# Current (non-key)frame with real-time pose update
# FIXME propagate cov from previous keyframe
self.current_frame = None
self.isam_params = gtsam.ISAM2Params()
self.graph = gtsam.NonlinearFactorGraph()
self.values = gtsam.Values()
# [x, y, theta]
self.prior_sigmas = None
# Noise model without ICP
# [x, y, theta]
self.odom_sigmas = None
# Downsample point cloud for ICP and publishing
self.point_resolution = 0.5
# Noise radius in overlap
self.point_noise = 0.5
self.ssm_params = SMParams()
self.ssm_params.initialization = True
self.ssm_params.initialization_params = 50, 1, 0.01
self.ssm_params.min_st_sep = 1
self.ssm_params.min_points = 50
self.ssm_params.max_translation = 2.0
self.ssm_params.max_rotation = np.pi / 6
self.ssm_params.target_frames = 3
# Don't use ICP covariance
self.ssm_params.cov_samples = 0
self.nssm_params = SMParams()
self.nssm_params.initialization = True
self.nssm_params.initialization_params = 100, 5, 0.01
self.nssm_params.min_st_sep = 10
self.nssm_params.min_points = 100
self.nssm_params.max_translation = 6.0
self.nssm_params.max_rotation = np.pi / 2
self.nssm_params.source_frames = 5
self.nssm_params.cov_samples = 30
self.icp = pcl.ICP()
# Pairwise consistent measurement
self.nssm_queue = []
self.pcm_queue_size = 5
self.min_pcm = 3
# Use fixed noise model in two cases
# - Sequential scan matching
# - ICP cov is too small in non-sequential scan matching
# [x, y, theta]
self.icp_odom_sigmas = None
# FIXME Can't save fig in online mode
self.save_fig = False
self.save_data = False
@property
def current_keyframe(self):
return self.keyframes[-1]
@property
def current_key(self):
return len(self.keyframes)
def configure(self):
assert (
self.nssm_params.cov_samples == 0
or self.nssm_params.cov_samples
< self.nssm_params.initialization_params[0]
* self.nssm_params.initialization_params[1]
)
assert (
self.ssm_params.cov_samples == 0
or self.ssm_params.cov_samples
< self.ssm_params.initialization_params[0]
* self.ssm_params.initialization_params[1]
)
assert self.nssm_params.source_frames < self.nssm_params.min_st_sep
self.prior_model = self.create_noise_model(self.prior_sigmas)
self.odom_model = self.create_noise_model(self.odom_sigmas)
self.icp_odom_model = self.create_noise_model(self.icp_odom_sigmas)
self.isam = gtsam.ISAM2(self.isam_params)
def get_states(self):
"""
Retrieve all states as array which are represented as
[time, pose2, dr_pose3, cov]
- pose2: [x, y, yaw]
- dr_pose3: [x, y, z, roll, pitch, yaw]
- cov: 3 x 3
"""
states = np.zeros(
self.current_key,
dtype=[
("time", np.float64),
("pose", np.float32, 3),
("dr_pose3", np.float32, 6),
("cov", np.float32, 9),
],
)
# Update all
values = self.isam.calculateEstimate()
for key in range(self.current_key):
pose = values.atPose2(X(key))
cov = self.isam.marginalCovariance(X(key))
self.keyframes[key].update(pose, cov)
t0 = self.keyframes[0].time
for key in range(self.current_key):
keyframe = self.keyframes[key]
states[key]["time"] = (keyframe.time - t0).to_sec()
states[key]["pose"] = g2n(keyframe.pose)
states[key]["dr_pose3"] = g2n(keyframe.dr_pose3)
states[key]["cov"] = keyframe.transf_cov.ravel()
return states
@staticmethod
def sample_pose(pose, covariance):
delta = np.random.multivariate_normal(np.zeros(3), covariance)
return pose.compose(n2g(delta, "Pose2"))
def sample_current_pose(self):
return self.sample_pose(self.current_keyframe.pose, self.current_keyframe.cov)
def get_points(self, frames=None, ref_frame=None, return_keys=False):
| |
#!/usr/bin/python3
#-----------------------------------------------------------------------------
# IoT Communication Monitoring Tools ( IoTCMT ) Create 2021.06
# for Raspberry Pi
# このツールは、Raspberry Pi でWi-Fi接続を行う場合にネットワークの通信状態を
# 監視し、状態変化があった場合に通知と修復処理を行うプログラムです。
# ツールの動作設定は、IoTCNTconfig.json(json形式)にて定義できます。
#
# Author : <EMAIL>ROKU<EMAIL>a
# License : See the license file for the license.
#-----------------------------------------------------------------------------
import os
from posixpath import dirname
import sys
import time
import platform
import subprocess
import struct
import json
import base64
from argparse import ArgumentParser
from datetime import datetime
import logging
from logging import FileHandler, Formatter
from logging import INFO, DEBUG, NOTSET
from typing import List
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto import Random
# ----------------------------------------------------------
# 定数定義部
# ----------------------------------------------------------
MSG_GET_OPTIONS_HELP = "Specify the installation target definition file (json format). "
MSG_TOOL_E = "The network monitoring tool is running."
MSG_TOOL_D = "Exit the network monitoring tool."
MSG_ANOMALY_DETECTION_PING_F = "There is an error in communication to the default route. Retry and check."
MSG_ANOMALY_DETECTION_PING_R = "As a result of retrying communication confirmation, it was determined that communication was interrupted."
MSG_ANOMALY_DETECTION_TXT = "[ NETWORK DOWN NOW ] PLEASE CONFIRM IT. (E001-01)"
# ネットワーク自動復旧関連メッセージ
MSG_AUTO_RECOVERY_E = "The automatic recovery function is [ enabled ]."
MSG_AUTO_RECOVERY_D = "The automatic recovery function is [ disabled ]."
MSG_NW_DEVICE_CHECK_RF_E = "Wi-Fi function is enabled."
MSG_NW_DEVICE_CHECK_RF_D = "Wi-Fi function is disabled. (E001-02)"
MSG_NW_DEVICE_CHECK_SV_E = "Network service (dhcpcd) is enabled."
MSG_NW_DEVICE_CHECK_SV_D = "There is a problem with the network service (dhcpcd). (E001-03)"
MSG_AUTO_RECOVERY_RF_E = "The Wi-Fi function has been restored."
MSG_AUTO_RECOVERY_RF_D = "An error occurred while recovering the Wi-Fi function (E001-05)"
MSG_AUTO_RECOVERY_SV_E = "The network service has been restarted."
MSG_AUTO_RECOVERY_SV_D = "An error occurred while restarting the network service. (E001-06)"
MSG_AUTO_RECOVERY_CMD_E = "Since the network cannot be automatically restored, an alternative process will be performed. (E001-07)"
PASS_PHRASE = "<PASSWORD>"
DHCPCD_CMDLIST = ["sudo", "systemctl","restart", "dhcpcd"]
#RFKILL unblock
_PY3_ = sys.version_info.major == 3
DPATH = "/dev/rfkill"
RFKILL_IDX = 0
RFKILL_HARD = 0
RFKILL_SOFT = 0
RFKILL_TYPE_WLAN = 1
RFKILL_OP_CHANGE = 2
RFKILL_EVENT ='IBBBB'
RFKILL_EVENTLEN = struct.calcsize(RFKILL_EVENT)
# ----------------------------------------------------------
# 変数定義部
# ----------------------------------------------------------
# 監視間隔と異常検知条件
# 監視間隔を元にdefault routeへのping確認を行い、通信断閾値を超えた場合は異常検知状態へ遷移する
d_ping_retry_save_cnt = 8 # default routeへのping通信断閾値(異常回数:初期値8/10回で異常検知)
d_ping_retry_max_cnt = 10 # default routeへのping通信断閾値(確認回数:初期値10回)
# 異常検知後の動作
# --[復旧]自動復旧の条件
# ---サービス復旧:異常検知後、リトライ間隔×リトライ回数を行い、異常状態に変化がない場合に自動復旧処理を行う(自動復旧有効時)
d_auto_recovery = False # 異常検知時に復旧動作を行うかのフラグ
d_comm_retry_intarval = 5 # 状態確認のリトライ間隔(秒)
d_comm_retry_cnt = 2 # 状態確認のリトライ回数(回)
d_comm_recovery_err_cmd = "" # 自動復旧処理を行っても改善しない場合に実行するコマンド
# ----------------------------------------------------------
# 関数定義部
# ----------------------------------------------------------
# オプションの構成
def get_option():
argparser = ArgumentParser()
argparser.add_argument('file', help=MSG_GET_OPTIONS_HELP)
return argparser.parse_args()
# システム情報の取得
# Rassbery PiとJetson以外のLinuxで実行された場合に実行環境を取得するための処理
def get_system_data(p_passphrase):
lshw_cmd = ['sudo', 'lshw', '-json']
proc = subprocess.Popen(lshw_cmd,
stdin=p_passphrase + '/n',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return proc.communicate()[0]
# Rassbery PiとJetson以外のLinuxで実行された場合に実行環境を読み込むための処理
def read_data(proc_output, class_='system'):
proc_result = []
proc_json = json.loads(proc_output)
for entry in proc_json:
proc_result.append(entry.get('product', ''))
return proc_result
# 外部コマンドの実行処理用の関数 Function for executing external commands.
def call_subprocess_run(cmd):
try:
res = subprocess.run(cmd,
shell=True,
check=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
for line in res.stdout.splitlines():
yield line
except subprocess.CalledProcessError:
logger.error('Failed to execute the external command.[' + cmd + ']', file = sys.stderr)
sys.exit(1)
# 外部コマンドの実行処理用の関数 Function for executing external commands.
def call_subprocess_run_sudo(cmd, p_passphrase):
try:
res = subprocess.run(cmd,
shell=True,
check=True,
stdin=p_passphrase + '\n',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
for line in res.stdout.splitlines():
yield line
except subprocess.CalledProcessError:
logger.error('Failed to execute the external command.[' + cmd + ']', file = sys.stderr)
sys.exit(1)
# 外部コマンドの実行処理用の関数 Function for executing external commands.
def call_subprocess_run_sudo_list(p_cmdlist, p_passphrase):
print('start')
try:
res = subprocess.run(p_cmdlist,
shell=True,
check=True,
stdin=p_passphrase + '\n',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
for line in res.stdout.splitlines():
yield line
except subprocess.CalledProcessError:
logger.error('Failed to execute the external command.[' + cmd + ']', file = sys.stderr)
sys.exit(1)
#---------------------------------------------
# json function
#---------------------------------------------
# json read to dict
def read_json_entry(p_input_file_name):
# jsonファイルを開く
json_file_path = os.path.join(dir_path, p_input_file_name)
json_open = open(json_file_path, 'r', encoding="utf-8")
p_json_data_dict = json.load(json_open)
return p_json_data_dict
# Read dict(from json)
def read_json_dict_entry(p_json_data_dict:dict, p_dict_entry_name:str):
p_entry_data = p_json_data_dict.get(p_dict_entry_name, "")
return p_entry_data
def read_parameters(p_input_file_name):
# jsonファイルを開く
json_data_dict = read_json_entry(p_input_file_name)
r_defaunlt_route_addr = read_json_dict_entry(json_data_dict,'default_route_addr')
r_ping_retry_save_cnt = read_json_dict_entry(json_data_dict,'ping_retry_save_cnt')
r_ping_retry_max_cnt = read_json_dict_entry(json_data_dict,'ping_retry_max_cnt')
r_auto_recovery = read_json_dict_entry(json_data_dict,'auto_recovery')
r_comm_retry_intarval = read_json_dict_entry(json_data_dict,'comm_retry_intarval')
r_comm_retry_cnt = read_json_dict_entry(json_data_dict,'comm_retry_cnt')
r_comm_recovery_err_cmd = read_json_dict_entry(json_data_dict,'comm_recovery_err_cmd')
r_log_save_count = read_json_dict_entry(json_data_dict,'log_save_count')
r_passphrase = read_json_dict_entry(json_data_dict,'user_passphrase_enc')
return r_defaunlt_route_addr,r_ping_retry_save_cnt,r_ping_retry_max_cnt,r_auto_recovery, r_comm_retry_intarval, r_comm_retry_cnt,r_comm_recovery_err_cmd, r_log_save_count, r_passphrase
#def read_dhcpcd_entry():
#
# # Wi-Fiに設定されているIPアドレスを取得する
# i_ip_addr = call_subprocess_run('ip -f inet -o addr show wlan0 | awk \'{print$4}\' | cut -d \'/\' -f 1')
# # Wi-Fiに設定されているデフォルトルートを取得する
# i_defroute = call_subprocess_run('ip route | grep default | awk \'{print$3}\'')
#
# # Wi-Fiに設定されているサブネットマスクを取得する
# i_mask = call_subprocess_run('ifconfig wlan0 | grep inet | awk NR==1\'{print$4}\'')
#
# return next(i_ip_addr), next(i_defroute), next(i_mask)
def create_aes(password, iv):
sha = SHA256.new()
sha.update(password.encode())
key = sha.digest()
return AES.new(key, AES.MODE_CFB, iv)
def decrypt(encrypted_data, password):
iv, cipher = encrypted_data[:AES.block_size], encrypted_data[AES.block_size:]
return create_aes(password, iv).decrypt(cipher)
# rfkill unblock function
def rfkill_unblock(rfkill_idx = RFKILL_IDX):
rfke = struct.pack(RFKILL_EVENT,RFKILL_IDX,RFKILL_TYPE_WLAN,RFKILL_OP_CHANGE,RFKILL_HARD,RFKILL_SOFT)
if _PY3_: rfke = rfke.decode('ascii')
file_out= open(DPATH, 'w')
file_out.write(rfke)
file_out.close()
# ----------------------------------------------------------
# メイン処理部
# ----------------------------------------------------------
if __name__=="__main__":
# ----------------------------------------------------------
# Get Current path process
# ----------------------------------------------------------
# 実行環境の取得 Get the execution environment
# 動作カレントパス取得 Get operation current path
# 実行方法(実行形式ファイル、または.pyファイル)によりカレントパスの取得方法が違うため処理を分ける
if getattr(sys, 'frozen', False):
os_current_path = os.path.dirname(os.path.abspath(sys.executable))
else:
os_current_path = os.path.dirname(os.path.abspath(__file__))
dir_path = os_current_path
# ----------------------------------------------------------
# Set Logger process
# ----------------------------------------------------------
# ロギングの設定(ログファイルに出力する)
# --ログ出力用ライブラリの所在確認と作成
log_path = os.path.join(dir_path, "Log")
if not os.path.isdir(log_path):
os.makedirs(log_path, exist_ok = True)
# --ファイル出力用ハンドラ
file_handler = FileHandler(
f"{log_path}/log{datetime.now():%Y%m%d%H%M%S}.log"
)
file_handler.setLevel(DEBUG)
file_handler.setFormatter(
Formatter("%(asctime)s@ %(name)s [%(levelname)s] %(funcName)s: %(message)s")
)
# --ルートロガーの設定
logging.basicConfig(level=NOTSET, handlers=[file_handler])
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------
# Read json file
# ---------------------------------------------------------------
# コマンドで指定されたインストール定義ファイル名の確認
args = get_option()
input_file_name = args.file
p_filename, p_ext = os.path.splitext(input_file_name)
if p_ext == '.json':
logger.info('Input file is [' + input_file_name + '] I checked the configuration file. The process will start.')
else:
logger.error('Input file is [' + input_file_name + '] The extension of the specified file is different. Please specify a .json format file.')
sys.exit()
# jsonファイル内の設定情報読み込み
p_defroute,d_ping_retry_save_cnt,d_ping_retry_max_cnt,d_auto_recovery,d_comm_retry_intarval,d_comm_retry_cnt,d_comm_recovery_err_cmd, d_log_save_count, d_passphrase_enc = read_parameters(input_file_name)
# Older Log File delete
files = os.listdir(log_path) # ディレクトリ内のファイルリストを取得
if len(files) >= int(d_log_save_count) + 1:
del_files = len(files)-int(d_log_save_count)
files.sort() # ファイルリストを昇順に並び替え
for i in range(del_files):
del_file_name = os.path.join(log_path, files[i])
logger.info("delete log file : " + del_file_name)
os.remove(del_file_name) # 一番古いファイル名から削除
# Decode passphrase
d_passphrase_dec64 = base64.b64decode(d_passphrase_enc.encode('utf-8'))
d_passphrase_decrypt = decrypt(d_passphrase_dec64, PASS_PHRASE)
d_passphrase = d_passphrase_decrypt.decode('utf-8')
# ---------------------------------------------------------------
# Check System engironment
# ---------------------------------------------------------------
# システム環境の判別 Determining the system environment.
system_label = ''
os_name = platform.system()
logger.info('The operating system is [' + os_name + ']')
if os_name == 'Linux':
if os.path.exists('/proc/device-tree/model'):
res = call_subprocess_run('cat /proc/device-tree/model')
os_info = res.__next__()
if 'Raspberry Pi' in os_info:
system_label = 'raspi'
logger.info('The model name is [' + os_info + ']')
elif 'NVIDIA Jetson' in os_info:
system_label = 'jetson'
logger.info('The model name is [' + os_info + '] This environment is not supported. Exit the tool.')
else:
system_label = 'other'
logger.error('The model name is [' + os_info + '] This environment is not supported. Exit the tool.')
sys.exit()
else:
for product in read_data(get_system_data()):
os_info = product
logger.error('The model name is [' + os_info + '] This environment is not supported. Exit the tool.')
sys.exit()
# ---------------------------------------------------------------
# Check Parametors
# ---------------------------------------------------------------
# システム環境の判別 Determining the system environment.
# 各ツール機能の有効化状態をログに出力
# 自動復旧機能の有効化状態出力
if bool(d_auto_recovery) == False:
logger.info(MSG_AUTO_RECOVERY_D)
else:
logger.info(MSG_AUTO_RECOVERY_E)
# ---------------------------------------------------------------
# Start Main Process
# ---------------------------------------------------------------
logger.info(MSG_TOOL_E)
# ネットワーク状態の初期化(正常:0)
p_status = 0
# 実行時のネットワーク情報の読み込み
#p_ipadr, p_defroute, p_mask = read_dhcpcd_entry()
# pingを1回送信した結果、エラー(1)となった場合に続き9回Pingを実行し8回失敗したら対処処理を行う
p_unreachable = call_subprocess_run("ping -4 -c 1 " + p_defroute + " | awk NR==2\'{print$4}\' | grep -c \'" + p_defroute + "\'")
p_unreachable_int = int(next(p_unreachable))
if p_unreachable_int == 1:
logger.info('Default rtoute ping result [ ' + p_defroute + ': OK ]')
else:
p_unreachable_cnt = 0
logger.error('Default rtoute ping result [ ' + p_defroute + ': NG ]')
logger.error(MSG_ANOMALY_DETECTION_PING_F)
for i in range(int(d_ping_retry_max_cnt)-1):
p_unreachable_int = int(next(call_subprocess_run("ping -4 -c 1 " + p_defroute + " | awk NR==2\'{print$4}\' | grep -c \'" + p_defroute + "\'")))
if p_unreachable_int == 0:
p_unreachable_cnt += 1
if p_unreachable_cnt == int(d_ping_retry_save_cnt):
logger.error('Default rtoute ping result Count ' + d_ping_retry_save_cnt +' [ ' + p_defroute + ': NG ]')
logger.error(MSG_ANOMALY_DETECTION_PING_R)
logger.error(MSG_ANOMALY_DETECTION_TXT)
p_status = 1
# ネットワーク障害と認識された場合の処理
if p_status ==1 :
# ネットワークサービスとWi-Fi機能を確認する
p_rf_status = int(next(call_subprocess_run("rfkill list 0 | grep -c 'Soft blocked: yes'")))
if p_rf_status == 0:
logger.info(MSG_NW_DEVICE_CHECK_RF_E)
else:
# Wi-Fi機能がブロックされている場合
logger.error(MSG_NW_DEVICE_CHECK_RF_D)
# 自動復旧機能が有効な場合
if bool(d_auto_recovery) == True:
for i in range(int(d_comm_retry_cnt)):
rfkill_unblock()
p_rf_status = int(next(call_subprocess_run("rfkill list 0 | grep -c 'Soft blocked: yes'")))
if p_rf_status == 0:
logger.info(MSG_AUTO_RECOVERY_RF_E)
time.sleep(int(d_comm_retry_intarval))
break
else:
logger.error(MSG_AUTO_RECOVERY_RF_D)
time.sleep(int(d_comm_retry_intarval))
if p_rf_status == 1:
logger.error(MSG_AUTO_RECOVERY_CMD_E)
call_subprocess_run(d_comm_recovery_err_cmd)
logger.error(MSG_TOOL_D)
sys.exit()
p_dhcpcd_status = str(next(call_subprocess_run("systemctl status dhcpcd | grep 'Active' | awk '{print$2$3}'")))
if p_dhcpcd_status == "active(running)":
logger.info(MSG_NW_DEVICE_CHECK_SV_E)
else:
# ネットワークサービスがActiveでない場合
logger.error(MSG_NW_DEVICE_CHECK_SV_D)
# 自動復旧機能が有効な場合
if bool(d_auto_recovery) == True:
for i in range(int(d_comm_retry_cnt)):
call_subprocess_run_sudo_list(DHCPCD_CMDLIST, d_passphrase)
time.sleep(int(d_comm_retry_intarval))
p_dhcpcd_status = str(next(call_subprocess_run("systemctl status dhcpcd | grep 'Active' | awk '{print$2$3}'")))
if p_dhcpcd_status == "active(running)":
logger.info(MSG_AUTO_RECOVERY_SV_E)
break
| |
import os
import posixpath
from enum import Enum
from pystac.errors import RequiredPropertyMissing
from typing import Any, Callable, Dict, List, Optional, TypeVar, Union, cast
from urllib.parse import urljoin, urlparse, urlunparse, ParseResult as URLParseResult
from datetime import datetime, timezone
import dateutil.parser
# Allow for modifying the path library for testability
# (i.e. testing Windows path manipulation on non-Windows systems)
_pathlib = os.path
def safe_urlparse(href: str) -> URLParseResult:
"""Wrapper around :func:`urllib.parse.urlparse` that returns consistent results for
both Windows and UNIX file paths.
For Windows paths, this function will include the drive prefix (e.g. ``"D:\\"``) as
part of the ``path`` of the :class:`urllib.parse.ParseResult` rather than as the
``scheme`` for consistency with handling of UNIX/LINUX file paths.
Args:
href (str) : The HREF to parse. May be a local file path or URL.
Returns:
urllib.parse.ParseResult : The named tuple representing the parsed HREF.
"""
parsed = urlparse(href)
if parsed.scheme != "" and href.lower().startswith("{}:\\".format(parsed.scheme)):
return URLParseResult(
scheme="",
netloc="",
path="{}:{}".format(
# We use this more complicated formulation because parsed.scheme
# converts to lower-case
href[: len(parsed.scheme)],
parsed.path,
),
params=parsed.params,
query=parsed.query,
fragment=parsed.fragment,
)
else:
return parsed
class StringEnum(str, Enum):
"""Base :class:`enum.Enum` class for string enums that will serialize as the string
value."""
def __str__(self) -> str:
return cast(str, self.value)
class JoinType(StringEnum):
"""Allowed join types for :func:`~pystac.utils.join_path_or_url`."""
@staticmethod
def from_parsed_uri(parsed_uri: URLParseResult) -> "JoinType":
"""Determines the appropriate join type based on the scheme of the parsed
result.
Args:
parsed_uri (urllib.parse.ParseResult) : A named tuple representing the
parsed URI.
Returns:
JoinType : The join type for the URI.
"""
if parsed_uri.scheme == "":
return JoinType.PATH
else:
return JoinType.URL
PATH = "path"
URL = "url"
def join_path_or_url(join_type: JoinType, *args: str) -> str:
"""Functions similarly to :func:`os.path.join`, but can be used to join either a
local file path or a URL.
Args:
join_type (JoinType) : One of ``JoinType.PATH`` or ``JoinType.URL``. If
``JoinType.PATH``, then :func:`os.path.join` is used for the join.
If ``JoinType.URL``, then :func:`posixpath.join` is used.
*args (str): Additional positional string arguments to be joined.
Returns:
str : The joined path
"""
if join_type == JoinType.PATH:
return _pathlib.join(*args)
else:
return posixpath.join(*args)
def _make_relative_href_url(
parsed_source: URLParseResult,
parsed_start: URLParseResult,
start_is_dir: bool = False,
) -> str:
# If the start path is not a directory, get the parent directory
start_dir = (
parsed_start.path if start_is_dir else _pathlib.dirname(parsed_start.path)
)
# Strip the leading slashes from both paths
start_dir = start_dir.lstrip("/")
source_path = parsed_source.path.lstrip("/")
# Get the relative path
rel_url = posixpath.relpath(source_path, start_dir)
# Ensure we retain a trailing slash from the original source path
if parsed_source.path.endswith("/"):
rel_url += "/"
# Prepend the "./", if necessary
if rel_url != "./" and not rel_url.startswith("../"):
rel_url = "./" + rel_url
return rel_url
def _make_relative_href_path(
parsed_source: URLParseResult,
parsed_start: URLParseResult,
start_is_dir: bool = False,
) -> str:
# If the start path is not a directory, get the parent directory
start_dir = (
parsed_start.path if start_is_dir else _pathlib.dirname(parsed_start.path)
)
# Strip the leading slashes from both paths
start_dir = start_dir.lstrip("/")
source_path = parsed_source.path.lstrip("/")
relpath = _pathlib.relpath(source_path, start_dir)
# Ensure we retain a trailing slash from the original source path
if parsed_source.path.endswith("/"):
relpath += "/"
if relpath != "./" and not relpath.startswith(".." + _pathlib.sep):
relpath = _pathlib.join(".", relpath)
return relpath
def make_relative_href(
source_href: str, start_href: str, start_is_dir: bool = False
) -> str:
"""Returns a new string that represents the ``source_href`` as a path relative to
``start_href``. If ``source_href`` and ``start_href`` do not share a common parent,
then ``source_href`` is returned unchanged.
May be used on either local file paths or URLs.
Args:
source_href : The HREF to make relative.
start_href : The HREF that the resulting HREF will be relative to.
start_is_dir : If ``True``, ``start_href`` is treated as a directory.
Otherwise, ``start_href`` is considered to be a path to a file. Defaults to
``False``.
Returns:
str: The relative HREF.
"""
parsed_source = safe_urlparse(source_href)
parsed_start = safe_urlparse(start_href)
if not (
parsed_source.scheme == parsed_start.scheme
and parsed_source.netloc == parsed_start.netloc
):
return source_href
if JoinType.from_parsed_uri(parsed_start) == JoinType.PATH:
return _make_relative_href_path(parsed_source, parsed_start, start_is_dir)
else:
return _make_relative_href_url(parsed_source, parsed_start, start_is_dir)
def _make_absolute_href_url(
parsed_source: URLParseResult,
parsed_start: URLParseResult,
start_is_dir: bool = False,
) -> str:
# If the source is already absolute, just return it
if parsed_source.scheme != "":
return urlunparse(parsed_source)
# If the start path is not a directory, get the parent directory
if start_is_dir:
start_dir = parsed_start.path
else:
# Ensure the directory has a trailing slash so urljoin works properly
start_dir = parsed_start.path.rsplit("/", 1)[0] + "/"
# Join the start directory to the relative path and find the absolute path
abs_path = urljoin(start_dir, parsed_source.path)
abs_path = abs_path.replace("\\", "/")
return urlunparse(
(
parsed_start.scheme,
parsed_start.netloc,
abs_path,
parsed_source.params,
parsed_source.query,
parsed_source.fragment,
)
)
def _make_absolute_href_path(
parsed_source: URLParseResult,
parsed_start: URLParseResult,
start_is_dir: bool = False,
) -> str:
# If the source is already absolute, just return it
if _pathlib.isabs(parsed_source.path):
return urlunparse(parsed_source)
# If the start path is not a directory, get the parent directory
start_dir = (
parsed_start.path if start_is_dir else _pathlib.dirname(parsed_start.path)
)
# Join the start directory to the relative path and find the absolute path
abs_path = _pathlib.abspath(_pathlib.join(start_dir, parsed_source.path))
# Account for the normalization of abspath for
# things like /vsitar// prefixes by replacing the
# original start_dir text when abspath modifies the start_dir.
if not start_dir == _pathlib.abspath(start_dir):
abs_path = abs_path.replace(_pathlib.abspath(start_dir), start_dir)
return abs_path
def make_absolute_href(
source_href: str, start_href: Optional[str] = None, start_is_dir: bool = False
) -> str:
"""Returns a new string that represents ``source_href`` as an absolute path. If
``source_href`` is already absolute it is returned unchanged. If ``source_href``
is relative, the absolute HREF is constructed by joining ``source_href`` to
``start_href``.
May be used on either local file paths or URLs.
Args:
source_href : The HREF to make absolute.
start_href : The HREF that will be used as the basis for resolving relative
paths, if ``source_href`` is a relative path. Defaults to the current
working directory.
start_is_dir : If ``True``, ``start_href`` is treated as a directory.
Otherwise, ``start_href`` is considered to be a path to a file. Defaults to
``False``.
Returns:
str: The absolute HREF.
"""
if start_href is None:
start_href = os.getcwd()
start_is_dir = True
parsed_start = safe_urlparse(start_href)
parsed_source = safe_urlparse(source_href)
if (
JoinType.from_parsed_uri(parsed_source) == JoinType.URL
or JoinType.from_parsed_uri(parsed_start) == JoinType.URL
):
return _make_absolute_href_url(parsed_source, parsed_start, start_is_dir)
else:
return _make_absolute_href_path(parsed_source, parsed_start, start_is_dir)
def is_absolute_href(href: str) -> bool:
"""Determines if an HREF is absolute or not.
May be used on either local file paths or URLs.
Args:
href : The HREF to consider.
Returns:
bool: ``True`` if the given HREF is absolute, ``False`` if it is relative.
"""
parsed = safe_urlparse(href)
return parsed.scheme != "" or _pathlib.isabs(parsed.path)
def datetime_to_str(dt: datetime) -> str:
"""Converts a :class:`datetime.datetime` instance to an ISO8601 string in the
`RFC 3339, section 5.6
<https://datatracker.ietf.org/doc/html/rfc3339#section-5.6>`__ format required by
the :stac-spec:`STAC Spec <master/item-spec/common-metadata.md#date-and-time>`.
Args:
dt : The datetime to convert.
Returns:
str: The ISO8601 (RFC 3339) formatted string representing the datetime.
"""
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
timestamp = dt.isoformat()
zulu = "+00:00"
if timestamp.endswith(zulu):
timestamp = "{}Z".format(timestamp[: -len(zulu)])
return timestamp
def str_to_datetime(s: str) -> datetime:
"""Converts a string timestamp to a :class:`datetime.datetime` instance using
:meth:`dateutil.parser.parse` under the hood. The input string may be in any
format :std:doc:`supported by the parser <parser>`.
Args:
s (str) : The string to convert to :class:`datetime.datetime`.
"""
return dateutil.parser.parse(s)
def geometry_to_bbox(geometry: Dict[str, Any]) -> List[float]:
"""Extract the bounding box from a geojson geometry
Args:
geometry : GeoJSON geometry dict
Returns:
list: Bounding box of geojson geometry, formatted according to:
https://tools.ietf.org/html/rfc7946#section-5
"""
coords = geometry["coordinates"]
lats: List[float] = []
lons: List[float] = []
def extract_coords(coords: List[Union[List[float], List[List[Any]]]]) -> None:
for x in coords:
# This handles points
if isinstance(x, float):
assert isinstance(
coords[0], float
), f"Type mismatch: {coords[0]} is not a float"
assert isinstance(
coords[1], float
), f"Type mismatch: {coords[1]} is not a float"
lats.append(coords[0])
lons.append(coords[1])
return
if isinstance(x[0], list):
extract_coords(x) # type:ignore
else:
lat, lon = x
lats.append(lat) # type:ignore
lons.append(lon) # type:ignore
extract_coords(coords)
lons.sort()
lats.sort()
bbox = [lats[0], lons[0], lats[-1], lons[-1]]
return bbox
T = TypeVar("T")
U = TypeVar("U")
def map_opt(fn: Callable[[T], U], v: Optional[T]) -> Optional[U]:
"""Maps the value of an optional type to | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: <NAME>
# Date: 2021/1/31 9:47 PM
"""
Lightweight class to record training and dataset file info for later retrieval.
Experiment ID is generated from the exp_ID_config.csv file; 1 if file not exist.
Also includes data FalsePositiveCategorizer class for categorizing false negatives and false positives
"""
import os
from datetime import date
import pickle
from typing import Union, Dict, Tuple, List
import pandas as pd
from tensorflow.keras import Sequential
import numpy as np
from processing.marsdataloader import MARSDataLoader, generate_all_feat_df
from processing.extract_features import extract_destabilize
import consts as C
class Recorder():
def __init__(self,
loader: MARSDataLoader,
train_args: dict,
seq_y: bool,
verbose=True):
self.loader = loader
self.verbose = verbose
self.train_args = train_args
self.configID = self.train_args["configID"]
self.exp_date = date.today().strftime("%B %d, %Y")
self.using_seq_label = seq_y
# get unique experiment ID for current project folder
self.exp_ID = int(_find_next_exp_ID())
# unique experiment folder path
# i.e. fill in exp{}_{}win_{}ahead_conf{}_{}
self.exp_dir = C.EXP_FORMAT.format(self.exp_ID,
self.train_args["window"],
self.train_args["ahead"],
self.train_args["configID"],
self.train_args["model"])
# get prediction path
self.pred_path = C.PRED_PATH.format(self.exp_ID,
self.train_args["window"],
self.train_args["ahead"],
self.train_args["configID"],
self.train_args["model"])
self.model_path = os.path.join(self.exp_dir, C.MODEL_PATH) # path to model
self.recorder_path = os.path.join(self.exp_dir, C.REC_BASENAME)
self.norm_stats_path = os.path.join(self.exp_dir, C.NORM_STATS_PATH)
# to be recorded on record_experiment
self.history: dict = {} # hisotry dict from keras history object, if any passed
self.time_taken: str = "" # string of time taken in this experiment
self.average_epochs: float = 0
self.std_epochs: float = 0
self.best_split: int = -1 # index of the best performing split, 0-based
if self.verbose:
print("Now recording experiment #{}".format(self.exp_ID))
def record_experiment(self,
test_results: dict,
time_taken: str,
epoch_list: list,
best_split: int,
model: Sequential = None,
norm_stats: dict = None,
train_history: list = None,
save_model: bool = False):
"""record experiment configuration and statistics"""
# link references
if train_history:
self.history = train_history
self.average_epochs = float(np.mean(epoch_list))
self.std_epochs = float(np.std(epoch_list))
self.best_split = best_split
self.time_taken = time_taken
# create new path in results and experiment folders
if not os.path.exists(self.exp_dir):
os.mkdir(self.exp_dir)
if model is not None and save_model:
self.__save_model(model)
if norm_stats is not None:
self.__save_norm_stats(norm_stats)
# append test set metrics to results/exp_results_all.csv
self.__save_results(test_results)
# once all of the above done, append experiment info to results/exp_ID_config.csv
self.__save_exp_config()
# pickle this recorder to its path
pickle.dump(self, open(self.recorder_path, "wb"))
if self.verbose:
print("Experiment {} recorded successfully!".format(self.exp_ID))
def save_predictions(self,
test_inds: Union[list, np.ndarray],
y_pred: Union[list, np.ndarray],
true_preds_path: str="",
false_preds_path: str="",
custom_ahead: float=None,
save_lookahead_windows=False) -> None:
"""save prediction for specified rows; separate files will be generated if no sequence label used and true and
false pred paths are given."""
# generate test DataFrame
test_df = generate_all_feat_df(self.loader, self.configID, inds=test_inds)
# append predictions
if y_pred.ndim <=2:
test_df[C.PRED_COL] = y_pred
else:
# squeeze sequence labels to (num_samples, sampling_rate)
y_pred = y_pred.squeeze(-1)
# convert to list of arrays for DataFrame to correctly append new column
test_df[C.PRED_COL] = [y_pred[i, :] for i in range(y_pred.shape[0])]
# reorder so that false predictions come up first and label true and false predictions
if self.using_seq_label:
# compare seq predictions by row
test_df["pred_seq_is_correct"] = test_df.apply(lambda row: np.array_equal(row.seq_label, row[C.PRED_COL]), axis=1)
test_df.sort_values("pred_seq_is_correct", inplace=True)
else:
# show false negatives first
test_df.sort_values(["label", C.PRED_COL], ascending=[False, True], inplace=True)
# pop seq_label column since not needed
test_df.drop(["seq_label"], axis=1, inplace=True)
# save correct and incorrect predictions separately if both paths are given; otherwise, save in one file
if true_preds_path and false_preds_path and not self.using_seq_label:
pred_label_is_correct = test_df.apply(lambda row: np.array_equal(row.label, row[C.PRED_COL]), axis=1)
# categorize false negatives for non-sequential labels
if not self.using_seq_label:
print("now processing destab joystick in lookahead windows...")
test_df = append_lookahead_stats(test_df, self, custom_ahead, save_lookahead_windows=save_lookahead_windows)
grouped = test_df.groupby(pred_label_is_correct)
# find respective rows and save separately
true_df = grouped.get_group(True)
false_df = grouped.get_group(False)
true_df.to_csv(true_preds_path, index=False)
print(f"saved {len(true_df)} true/correct predictions to {true_preds_path}")
false_df.to_csv(false_preds_path, index=False)
print(f"saved {len(false_df)} true/correct predictions to {false_preds_path}")
print(f"accuracy (for debugging): {len(true_df)/(len(true_df) + len(false_df))}")
else:
test_df.to_csv(self.pred_path, index=False)
if self.verbose:
print("Model test set input and prediction saved successfully!")
def list_training_columns(self) -> list:
return C.CONFIG_SPECS[self.configID][C.COLS_USED]
def __save_model(self, model) -> None:
"""helper to save models"""
assert type(model) == Sequential, "Only Keras Sequential models are supported! " \
"Consider adding new code and updating model saving methods."
# append number to avoid collision, if needed
collision_n = 0
if os.path.exists(self.model_path):
while os.path.exists(self.model_path + "_" + str(collision_n)):
collision_n += 1
self.model_path = self.model_path + "_" + str(collision_n)
if collision_n:
print("Model path has been revised to {} to avoid collision. \n"
"In principal, this shouldn't happen since model path has unique experiment ID.".format(
self.model_path))
model.save(self.model_path)
def __save_norm_stats(self, norm_stats: dict):
"""helper to save normalization stats"""
pickle.dump(norm_stats, open(self.norm_stats_path, "wb"))
def __save_results(self, cv_results: Dict[str, list]) -> None:
"""calculate and append CV test results to results/exp_results_all.csv"""
# compute mean and std of CV results
calculated_results = {}
for metric_name in cv_results:
calculated_results[metric_name + C.MEAN_SUFFIX] = np.nanmean(cv_results[metric_name])
calculated_results[metric_name + C.STD_SUFFIX] = np.nanstd(cv_results[metric_name])
# add ID to current results
calculated_results[C.EXP_COL_CONV[C.EXP_ID_COL]] = self.exp_ID
# retrieve previous results
try:
results_df = pd.read_csv(C.ALL_RES_CSV_PATH)
except IOError:
results_df = pd.read_csv(C.TEMPLATE_ALL_RES)
# save current results
results_df = results_df.append(calculated_results, ignore_index=True)
results_df.to_csv(C.ALL_RES_CSV_PATH, index=False)
def __save_exp_config(self) -> None:
"""save current configuration to exp_ID_config.csv for easy retrieval"""
# load configuration file
if os.path.exists(C.EXP_ID_LOG):
config_df = pd.read_csv(C.EXP_ID_LOG, dtype={C.EXP_ID_COL: int})
else:
config_df = pd.read_csv(C.TEMPLATE_ID_LOG, dtype={C.EXP_ID_COL: int})
config_df = config_df.append(self.__compile_exp_dict(), ignore_index=True)
config_df.to_csv(C.EXP_ID_LOG, index=False)
def __compile_exp_dict(self) -> dict:
"""compile experiment configuration dictionary"""
# put together attributes for extraction
all_atts = {**vars(self), **vars(self.loader), **self.train_args}
# keep only savable atts--filter out lists, dicts, etc.
savable_atts = _filter_values(all_atts)
# convert the convertable columns, if possible, for output
output = {}
for (column, value) in savable_atts.items():
if column in C.EXP_COL_CONV:
output[C.EXP_COL_CONV[column]] = value
else:
output[column] = value
# Lastly, add info not included in class fields.
# text description of dataset configuration (e.g. basic triple)
output[C.CONFIG_DESC_COL_NAME] = C.CONFIG_SPECS[self.configID][C.CONFIG_OVERVIEW]
return output
def _find_next_exp_ID() -> int:
"""helper to find the next unique exp ID in given exp dir, fast operation to avoid collision"""
# find ID based on ID record file
try:
with open(C.EXP_ID_RECORD, "r") as id_file:
next_id = int(id_file.read())
except IOError:
next_id = 1
# save ID to record
with open(C.EXP_ID_RECORD, 'w') as count_file:
count_file.write(str(next_id + 1))
return next_id
def _filter_values(vars_dict: dict)->dict:
"""helper function to filter out dictionary entries whose values are not str, num or bool; called before converting args to column names"""
output = {key: value for key, value in vars_dict.items() if type(value) in C.ACCEPTABLE_TYPES}
# ad-hoc popping duplicate keys
output.pop("seq_label") # same as using_seq_label in Recorder
# ad-hoc for adding layer sizes
if vars_dict["model"] in {C.CNN, C.MLP}:
output["layer_sizes"] = vars_dict["layer_sizes"]
else:
output["layer_sizes"] = "NA"
# ad-hoc change filter_number to NA for non-CNN models
if vars_dict["model"] != C.CNN:
output["filter_number"] = "NA"
return output
class TestSetProcessor:
def __init__(self,
recorder: Recorder,
current_ahead: float
):
if not os.path.exists(C.RAW_DATA_PATH):
raise FileNotFoundError("Raw data file cannot be found at {}".format(C.RAW_DATA_PATH))
# extract all needed columns
self.raw_data = pd.read_csv(C.RAW_DATA_PATH, usecols=C.ESSENTIAL_RAW_COLS)
# filter out non-human controls in data for faster processing
self.raw_data = self.raw_data[self.raw_data.trialPhase != 1]
# group by trials for easy locating
self.grouped = self.raw_data.groupby('peopleTrialKey')
# get data from recorder
self.window_size = recorder.loader.window
self.lookahead = current_ahead
self.velocity_col = "calculated_vel" if "velocity_cal" in recorder.list_training_columns() else "currentVelRoll"
def generate_categories(self, data_df: pd.DataFrame) -> Tuple[List[float], List[float], List[int], List[int], List[float], List[float]]:
"""append a new column containing entry stats"""
# apply categorization function to each data point to assign error type.
# ico = including carryover destabilizing joystick from input window (ie seen by machine); eco = exclude such
lookahead_avg_destab_mag_ico, lookahead_avg_destab_mag_eco = [], []
lookahead_total_destab_steps_ico, lookahead_total_destab_steps_eco = [], []
lookahead_destab_sustained_ico, lookahead_destab_sustained_eco = [], []
for _, row in data_df.iterrows():
avg_destab_mag_ico, avg_destab_mag_eco, \
total_destab_steps_ico, total_destab_steps_eco, \
destab_sustained_ico, destab_sustained_eco = self._extract_lookahead_stats(float(row.end_seconds),
self.grouped.get_group(row.trial_key))
lookahead_avg_destab_mag_ico.append(avg_destab_mag_ico)
lookahead_avg_destab_mag_eco.append(avg_destab_mag_eco)
lookahead_total_destab_steps_ico.append(total_destab_steps_ico)
lookahead_total_destab_steps_eco.append(total_destab_steps_eco)
lookahead_destab_sustained_ico.append(destab_sustained_ico)
lookahead_destab_sustained_eco.append(destab_sustained_eco)
return lookahead_avg_destab_mag_ico, lookahead_avg_destab_mag_eco, \
lookahead_total_destab_steps_ico, lookahead_total_destab_steps_eco, \
lookahead_destab_sustained_ico, lookahead_destab_sustained_eco
def save_lookahead_windows(self, data_df: pd.DataFrame) -> pd.DataFrame:
"""save lookahead windows of each entry in given DataFrame"""
# TODO where to put this? additional arg in predict.py?
# output: [trial key, window_end], vel, pos, joystick,
# locate lookahead sequences, note that first entry is last time step in input window
lookahead_df_dict = {key:[] for key in [# "trial_key", "window_end",
"lookahead_vel", "lookahead_pos", "lookahead_joy", "lookahead_times"]}
for _, row in data_df.iterrows():
end_sec = float(row.end_seconds)
trial_entries = self.grouped.get_group(row.trial_key)
lookahead_readings = trial_entries[
trial_entries.seconds.between(end_sec, end_sec + self.lookahead, inclusive="neither")]
# record data into df
# lookahead_df_dict["trial_key"].append(row.trial_key)
# lookahead_df_dict["window_end"].append(end_sec)
lookahead_df_dict["lookahead_vel"].append(lookahead_readings[self.velocity_col].to_numpy())
lookahead_df_dict["lookahead_pos"].append(lookahead_readings['currentPosRoll'].to_numpy())
lookahead_df_dict["lookahead_joy"].append(lookahead_readings['joystickX'].to_numpy())
lookahead_df_dict["lookahead_times"].append(lookahead_readings.seconds.to_numpy())
return | |
SequenceSet: ' + str(e))
header_id = input_many_sequenceSet['sequences'][0]['sequence_id']
target_fasta_file_path = os.path.join(self.scratch, header_id+'.fasta')
target_fasta_file_handle = open(target_fasta_file_path, 'w')
self.log(console, 'writing reads file: '+str(target_fasta_file_path))
for seq_obj in input_many_sequenceSet['sequences']:
header_id = seq_obj['sequence_id']
sequence_str = seq_obj['sequence']
if not self.validateSeq (seq_type, sequence_str, header_id):
raise ValueError ("BAD record for sequence_id: "+header_id+"\n"+sequence_str+"\n")
else:
appropriate_sequence_found_in_many_input = True
target_fasta_file_handle.write('>'+header_id+"\n")
target_fasta_file_handle.write(sequence_str+"\n")
target_fasta_file_handle.close();
self.log(console, 'done')
# SingleEndLibrary
#
elif target_type_name == 'SingleEndLibrary':
# DEBUG
#for k in data:
# self.log(console,"SingleEndLibrary ["+k+"]: "+str(data[k]))
try:
if 'lib' in input_many_data:
target_fasta = input_many_data['lib']['file']
elif 'handle' in input_many_data:
target_fasta = input_many_data['handle']
else:
self.log(console,"bad structure for 'target_fasta'")
raise ValueError("bad structure for 'target_fasta'")
#if 'lib2' in data:
# reverse_reads = data['lib2']['file']
#elif 'handle_2' in data:
# reverse_reads = data['handle_2']
#else:
# reverse_reads={}
### NOTE: this section is what could be replaced by the transform services
target_fasta_file_path = os.path.join(self.scratch,target_fasta['file_name'])
target_fasta_file_handle = open(target_fasta_file_path, 'w')
self.log(console, 'downloading reads file: '+str(target_fasta_file_path))
headers = {'Authorization': 'OAuth '+self.ctx['token']}
r = requests.get(target_fasta['url']+'/node/'+target_fasta['id']+'?download', stream=True, headers=headers)
for chunk in r.iter_content(1024):
appropriate_sequence_found_in_many_input = True
target_fasta_file_handle.write(chunk)
target_fasta_file_handle.close();
self.log(console, 'done')
### END NOTE
# remove carriage returns
new_file_path = target_fasta_file_path+"-CRfree"
new_file_handle = open(new_file_path, 'w')
target_fasta_file_handle = open(target_fasta_file_path, 'r')
for line in target_fasta_file_handle:
line = re.sub("\r","",line)
new_file_handle.write(line)
target_fasta_file_handle.close();
new_file_handle.close()
target_fasta_file_path = new_file_path
# convert FASTQ to FASTA (if necessary)
new_file_path = target_fasta_file_path+".fna"
new_file_handle = open(new_file_path, 'w')
if target_fasta_file_compression == 'gz':
target_fasta_file_handle = gzip.open(target_fasta_file_path, 'r')
else:
target_fasta_file_handle = open(target_fasta_file_path, 'r')
header = None
last_header = None
last_seq_buf = None
last_line_was_header = False
was_fastq = False
for line in target_fasta_file_handle:
if line.startswith('>'):
break
elif line.startswith('@'):
was_fastq = True
header = line[1:]
if last_header != None:
new_file_handle.write('>'+last_header)
new_file_handle.write(last_seq_buf)
last_seq_buf = None
last_header = header
last_line_was_header = True
elif last_line_was_header:
last_seq_buf = line
last_line_was_header = False
else:
continue
if last_header != None:
new_file_handle.write('>'+last_header)
new_file_handle.write(last_seq_buf)
new_file_handle.close()
target_fasta_file_handle.close()
if was_fastq:
target_fasta_file_path = new_file_path
except Exception as e:
print((traceback.format_exc()))
raise ValueError('Unable to download single-end read library files: ' + str(e))
"""
# FeatureSet
#
#elif target_type_name == 'FeatureSet':
if target_type_name == 'FeatureSet':
# retrieve sequences for features
input_many_featureSet = input_many_data
target_fasta_file_dir = self.scratch
target_fasta_file = input_many_name+".fasta"
# DEBUG
#beg_time = (datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()
FeatureSetToFASTA_params = {
'featureSet_ref': input_many_ref,
'file': target_fasta_file,
'dir': target_fasta_file_dir,
'console': console,
'invalid_msgs': invalid_msgs,
'residue_type': seq_type,
'feature_type': 'ALL',
'record_id_pattern': '%%genome_ref%%'+self.genome_id_feature_id_delim+'%%feature_id%%',
'record_desc_pattern': '[%%genome_ref%%]',
'case': 'upper',
'linewrap': 50,
'merge_fasta_files': 'TRUE'
}
#self.log(console,"callbackURL='"+self.callbackURL+"'") # DEBUG
#SERVICE_VER = 'release'
SERVICE_VER = 'beta' # DEBUG
DOTFU = KBaseDataObjectToFileUtils (url=self.callbackURL, token=self.ctx['token'], service_ver=SERVICE_VER)
FeatureSetToFASTA_retVal = DOTFU.FeatureSetToFASTA (FeatureSetToFASTA_params)
target_fasta_file_path = FeatureSetToFASTA_retVal['fasta_file_path']
target_feature_info['feature_ids_by_genome_ref'] = FeatureSetToFASTA_retVal['feature_ids_by_genome_ref']
if len(list(target_feature_info['feature_ids_by_genome_ref'].keys())) > 0:
appropriate_sequence_found_in_many_input = True
target_feature_info['feature_id_to_function'] = FeatureSetToFASTA_retVal['feature_id_to_function']
target_feature_info['genome_ref_to_sci_name'] = FeatureSetToFASTA_retVal['genome_ref_to_sci_name']
target_feature_info['genome_ref_to_obj_name'] = FeatureSetToFASTA_retVal['genome_ref_to_obj_name']
# DEBUG
#end_time = (datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()
#self.log(console, "FeatureSetToFasta() took "+str(end_time-beg_time)+" secs")
# Genome
#
elif target_type_name == 'Genome':
target_fasta_file_dir = self.scratch
target_fasta_file = input_many_name+".fasta"
# DEBUG
#beg_time = (datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()
GenomeToFASTA_params = {
'genome_ref': input_many_ref,
'file': target_fasta_file,
'dir': target_fasta_file_dir,
'console': console,
'invalid_msgs': invalid_msgs,
'residue_type': seq_type,
'feature_type': 'ALL',
'record_id_pattern': '%%feature_id%%',
'record_desc_pattern': '[%%genome_id%%]',
'case': 'upper',
'linewrap': 50
}
#self.log(console,"callbackURL='"+self.callbackURL+"'") # DEBUG
#SERVICE_VER = 'release'
SERVICE_VER = 'beta' # DEBUG
DOTFU = KBaseDataObjectToFileUtils (url=self.callbackURL, token=self.ctx['token'], service_ver=SERVICE_VER)
GenomeToFASTA_retVal = DOTFU.GenomeToFASTA (GenomeToFASTA_params)
target_fasta_file_path = GenomeToFASTA_retVal['fasta_file_path']
target_feature_info['feature_ids'] = GenomeToFASTA_retVal['feature_ids']
if len(target_feature_info['feature_ids']) > 0:
appropriate_sequence_found_in_many_input = True
target_feature_info['feature_id_to_function'] = GenomeToFASTA_retVal['feature_id_to_function']
target_feature_info['genome_ref_to_sci_name'] = GenomeToFASTA_retVal['genome_ref_to_sci_name']
target_feature_info['genome_ref_to_obj_name'] = GenomeToFASTA_retVal['genome_ref_to_obj_name']
# DEBUG
#end_time = (datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()
#self.log(console, "Genome2Fasta() took "+str(end_time-beg_time)+" secs")
# GenomeSet
#
elif target_type_name == 'GenomeSet':
input_many_genomeSet = input_many_data
target_fasta_file_dir = self.scratch
target_fasta_file = input_many_name+".fasta"
# DEBUG
#beg_time = (datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()
GenomeSetToFASTA_params = {
'genomeSet_ref': input_many_ref,
'file': target_fasta_file,
'dir': target_fasta_file_dir,
'console': console,
'invalid_msgs': invalid_msgs,
'residue_type': seq_type,
'feature_type': 'ALL',
'record_id_pattern': '%%genome_ref%%'+self.genome_id_feature_id_delim+'%%feature_id%%',
'record_desc_pattern': '[%%genome_ref%%]',
'case': 'upper',
'linewrap': 50,
'merge_fasta_files': 'TRUE'
}
#self.log(console,"callbackURL='"+self.callbackURL+"'") # DEBUG
#SERVICE_VER = 'release'
SERVICE_VER = 'beta' # DEBUG
DOTFU = KBaseDataObjectToFileUtils (url=self.callbackURL, token=self.ctx['token'], service_ver=SERVICE_VER)
GenomeSetToFASTA_retVal = DOTFU.GenomeSetToFASTA (GenomeSetToFASTA_params)
target_fasta_file_path = GenomeSetToFASTA_retVal['fasta_file_path_list'][0]
target_feature_info['feature_ids_by_genome_id'] = GenomeSetToFASTA_retVal['feature_ids_by_genome_id']
if len(list(target_feature_info['feature_ids_by_genome_id'].keys())) > 0:
appropriate_sequence_found_in_many_input = True
target_feature_info['feature_id_to_function'] = GenomeSetToFASTA_retVal['feature_id_to_function']
target_feature_info['genome_ref_to_sci_name'] = GenomeSetToFASTA_retVal['genome_ref_to_sci_name']
target_feature_info['genome_ref_to_obj_name'] = GenomeSetToFASTA_retVal['genome_ref_to_obj_name']
target_feature_info['genome_id_to_genome_ref'] = dict()
for genome_id in input_many_genomeSet['elements'].keys():
genome_ref = input_many_genomeSet['elements'][genome_id]['ref']
target_feature_info['genome_id_to_genome_ref'][genome_id] = genome_ref
# DEBUG
#end_time = (datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()
#self.log(console, "GenomeSetToFasta() took "+str(end_time-beg_time)+" secs")
# AnnotatedMetagenomeAssembly
#
elif target_type_name == 'AnnotatedMetagenomeAssembly':
target_fasta_file_dir = self.scratch
target_fasta_file = input_many_name+".fasta"
# DEBUG
#beg_time = (datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()
AnnotatedMetagenomeAssemblyToFASTA_params = {
'ama_ref': input_many_ref,
'file': target_fasta_file,
'dir': target_fasta_file_dir,
'console': console,
'invalid_msgs': invalid_msgs,
'residue_type': seq_type,
'feature_type': 'ALL',
'record_id_pattern': '%%feature_id%%',
'record_desc_pattern': '[%%genome_id%%]',
'case': 'upper',
'linewrap': 50
}
#self.log(console,"callbackURL='"+self.callbackURL+"'") # DEBUG
#SERVICE_VER = 'release'
SERVICE_VER = 'beta' # DEBUG
DOTFU = KBaseDataObjectToFileUtils (url=self.callbackURL, token=self.ctx['token'], service_ver=SERVICE_VER)
AnnotatedMetagenomeAssemblyToFASTA_retVal = DOTFU.AnnotatedMetagenomeAssemblyToFASTA (AnnotatedMetagenomeAssemblyToFASTA_params)
target_fasta_file_path = AnnotatedMetagenomeAssemblyToFASTA_retVal['fasta_file_path']
target_feature_info['feature_ids'] = AnnotatedMetagenomeAssemblyToFASTA_retVal['feature_ids']
if len(target_feature_info['feature_ids']) > 0:
appropriate_sequence_found_in_many_input = True
target_feature_info['feature_id_to_function'] = AnnotatedMetagenomeAssemblyToFASTA_retVal['feature_id_to_function']
target_feature_info['ama_ref_to_obj_name'] = AnnotatedMetagenomeAssemblyToFASTA_retVal['ama_ref_to_obj_name']
# DEBUG
#with open (target_fasta_file_path, 'r') as fasta_handle:
# for fasta_line in fasta_handle.readlines():
# print ("FASTA_LINE: '"+fasta_line)
# DEBUG
#end_time = (datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()
#self.log(console, "Genome2Fasta() took "+str(end_time-beg_time)+" secs")
# Missing proper input_target_type
#
else:
raise ValueError('Cannot yet handle input_many type of: '+target_type_name)
return ({ 'target_name': input_many_name,
'target_type_name': target_type_name,
'target_fasta_file_path': target_fasta_file_path,
'appropriate_sequence_found_in_many_input': appropriate_sequence_found_in_many_input,
'invalid_msgs': invalid_msgs,
'target_feature_info': target_feature_info
})
# input data failed validation. Need to return
#
def save_error_report_with_invalid_msgs (self, invalid_msgs, input_one_ref, input_many_refs, method_name):
console = []
# build output report object
#
self.log(console,"BUILDING REPORT") # DEBUG
report += "FAILURE:\n\n"+"\n".join(invalid_msgs)+"\n"
reportObj = {
'objects_created':[],
'text_message':report
}
reportName = 'blast_report_'+str(uuid.uuid4())
report_obj_info = self.wsClient.save_objects({
#'id':info[6],
'workspace':params['workspace_name'],
'objects':[
{
'type':'KBaseReport.Report',
'data':reportObj,
'name':reportName,
'meta':{},
'hidden':1,
'provenance': self._instantiate_provenance(method_name = method_name,
input_obj_refs = [input_one_ref]+input_many_refs)
}
]
})[0]
error_report_info = { 'name': reportName,
'ref': str(report_obj_info[6]) + '/' + str(report_obj_info[0]) + '/' + str(report_obj_info[4])
}
return error_report_info
#### format_BLAST_db()
##
def format_BLAST_db (self, search_tool_name, target_fasta_file_path):
console = []
BLAST_FORMAT_successful = True
# set seq type
if search_tool_name == 'BLASTn' or search_tool_name == 'tBLASTn':
seq_type = 'nucl'
else:
seq_type = 'prot'
# FORMAT DB
#
# OLD SYNTAX: formatdb -i $database -o T -p F -> $database.nsq or $database.00.nsq
# NEW SYNTAX: makeblastdb -in $database -parse_seqids -dbtype prot/nucl -out <basename>
makeblastdb_cmd = [self.Make_BLAST_DB]
# check for necessary files
if not os.path.isfile(self.Make_BLAST_DB):
self.log(console,"no such file '"+self.Make_BLAST_DB+"'")
BLAST_DB_FORMAT_successful = False
if not os.path.isfile(target_fasta_file_path):
self.log(console,"no such file '"+target_fasta_file_path+"'")
BLAST_DB_FORMAT_successful = False
elif not os.path.getsize(target_fasta_file_path) > 0:
self.log(console,"empty file '"+target_fasta_file_path+"'")
BLAST_DB_FORMAT_successful = False
makeblastdb_cmd.append('-in')
makeblastdb_cmd.append(target_fasta_file_path)
makeblastdb_cmd.append('-parse_seqids')
makeblastdb_cmd.append('-dbtype')
makeblastdb_cmd.append(seq_type)
makeblastdb_cmd.append('-out')
makeblastdb_cmd.append(target_fasta_file_path)
# Run Make_BLAST_DB, capture output as it happens
#
self.log(console, 'RUNNING Make_BLAST_DB:')
self.log(console, ' '+' '.join(makeblastdb_cmd))
# report += "\n"+'running Make_BLAST_DB:'+"\n"
# report += ' '+' '.join(makeblastdb_cmd)+"\n"
p = subprocess.Popen(makeblastdb_cmd, \
cwd = self.scratch, \
stdout = subprocess.PIPE, \
stderr = subprocess.STDOUT, \
shell = False)
while True:
line = p.stdout.readline().decode()
if not line: break
self.log(console, line.replace('\n', ''))
p.stdout.close()
p.wait()
self.log(console, 'return code: ' + str(p.returncode))
if p.returncode != 0:
self.log(console,'Error running makeblastdb, return code: '+str(p.returncode) + '\n\n')
#'\n\n'+ '\n'.join(console))
BLAST_DB_FORMAT_successful = False
# Check for db output
if seq_type.lower().startswith('n'):
db_ext = 'nsq'
else:
db_ext = 'psq'
if not os.path.isfile(target_fasta_file_path+"."+db_ext) and not os.path.isfile(target_fasta_file_path+".00."+db_ext):
self.log(console,"makeblastdb failed to create DB file '"+target_fasta_file_path+"."+db_ext+"'")
BLAST_DB_FORMAT_successful = False
elif not os.path.getsize(target_fasta_file_path+"."+db_ext) > 0 and not os.path.getsize(target_fasta_file_path+".00."+db_ext) > 0:
self.log(console,"makeblastdb created empty DB file '"+target_fasta_file_path+"."+db_ext+"'")
BLAST_DB_FORMAT_successful = False
return BLAST_FORMAT_successful
# _check_BLAST_input_ready()
#
def _check_BLAST_input_ready (self, blast_bin, query_fasta_file_path, target_fasta_file_path):
console = []
BLAST_ready = True
# check for necessary files
if not os.path.isfile(blast_bin):
self.log(console, "no such file '"+blast_bin+"'")
BLAST_ready = False
if not os.path.isfile(query_fasta_file_path):
self.log(console, "no such file '"+query_fasta_file_path+"'")
BLAST_ready = False
elif not os.path.getsize(query_fasta_file_path) > 0:
self.log(console, "empty file '"+query_fasta_file_path+"'")
BLAST_ready = False
if not os.path.isfile(target_fasta_file_path):
self.log(console, "no such file '"+target_fasta_file_path+"'")
BLAST_ready = False
elif not os.path.getsize(target_fasta_file_path):
self.log(console, "empty file '"+target_fasta_file_path+"'")
BLAST_ready = False
return BLAST_ready
# _set_BLAST_bin()
#
def _set_BLAST_bin (self, search_tool_name):
blast_bin = { 'BLASTn': self.BLASTn,
'BLASTp': self.BLASTp,
'BLASTx': self.BLASTx,
'tBLASTn': self.tBLASTn,
'tBLASTx': self.tBLASTx,
'psiBLAST': self.psiBLAST
}
return blast_bin[search_tool_name]
# _set_BLAST_seq_types()
#
def _set_BLAST_seq_types (self, search_tool_name):
query_seq_type = { 'BLASTn': 'NUC',
'BLASTp': 'PRO',
'BLASTx': 'NUC',
'tBLASTn': 'PRO',
'tBLASTx': 'NUC',
'psiBLAST': 'PRO'
}
target_seq_type = { 'BLASTn': 'NUC',
'BLASTp': 'PRO',
'BLASTx': 'PRO',
'tBLASTn': 'NUC',
'tBLASTx': 'NUC',
'psiBLAST': 'PRO'
}
return (query_seq_type[search_tool_name], target_seq_type[search_tool_name])
# _set_BLAST_output_path()
#
def _set_BLAST_output_path (self, BLAST_output_format_str):
timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)
output_dir = os.path.join(self.scratch,'output.'+str(timestamp))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
return os.path.join(output_dir, 'alnout_m='+BLAST_output_format_str+'.txt');
#output_filtered_fasta_file_path = os.path.join(output_dir, 'output_filtered.fna'); # only for SingleEndLibrary
| |
"""
Original Author: SilentSpike (https://github.com/SilentSpike)
Modified: Dorbedo for TFAR
The functions searches all functions headers and creates an documentation for public functions
Supported header sections:
- Name (the function name)
- Author(s) (with description below)
- Arguments
- Return Value
- Example(s)
- Public (by default function will only be documented if set to "Yes")
EXAMPLES
doc_functions core --output tfar
Crawl only functions in addons/core and only reports debug messages.
"""
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# pylint: disable=W0702
# pylint: disable=C0301
import os
import re
import argparse
import logging
def main():
"""Main"""
parser = argparse.ArgumentParser(prog='Document SQF functions')
parser.add_argument('directory', nargs="?", type=str, help='only crawl specified component addon folder')
parser.add_argument('--output', default='tfar', choices=['tfar', 'ace'], help='The style of the output')
parser.add_argument('--loglevel', default=30, type=int, help='The Loglevel (default: 30)')
parser.add_argument('--logfile', type=str, help='Write log to file')
parser.add_argument('--version', action='version', version='%(prog)s 1.1')
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s:%(message)s', level=args.loglevel, filename=args.logfile)
logging.info('Creating documentation')
logging.debug(args)
addonsdir = os.path.abspath(os.path.normpath(__file__ + '/../../addons'))
docfolder = os.path.abspath(os.path.normpath(__file__ + '/../../docs/functions'))
logging.debug("AddOn path: %s", addonsdir)
logging.debug("Document path: %s", docfolder)
all_components = {}
if args.directory:
logging.info('Documenting only component: %s', args.directory)
cur_component = Component(os.path.join(addonsdir, args.directory))
if cur_component.functions:
all_components[cur_component.name] = cur_component
cur_component.style = args.output
else:
logging.info('Documenting all components')
for folder in os.listdir(addonsdir):
if os.path.isdir(os.path.join(addonsdir, folder)):
cur_component = Component(os.path.join(addonsdir, folder))
if cur_component.functions:
all_components[cur_component.name] = cur_component
cur_component.style = args.output
if all_components:
logging.debug(all_components)
create_documentations(all_components, docfolder)
else:
logging.debug('No components found')
def create_documentations(all_components, docfolder):
"""Document all components"""
if not os.path.exists(docfolder):
logging.debug("Creating folder: %s", docfolder)
os.makedirs(docfolder)
for item in sorted(all_components.keys()):
filepath = os.path.join(docfolder, '{}.md'.format(all_components[item].name))
if os.path.exists(filepath):
logging.debug("Removing old file: %s", filepath)
os.remove(filepath)
logging.debug("Writing Component: %s", all_components[item].name)
docfile = open(filepath, 'w+')
docfile.write(document_component(all_components[item]))
for function in all_components[item].functions:
logging.debug("Writing function: %s", function.name)
if all_components[item].style == 'ace':
docfile.write(document_function_ace(function))
else:
docfile.write(document_function_tfar(function))
docfile.close()
class FunctionFile:
"""Function"""
def __init__(self, filepath):
logging.debug("Processing: %s", filepath)
self.path = filepath
self.header = ""
self.import_header()
self.component = ""
self.name = ""
self.public = False
self.authors = []
self.description = []
self.arguments = []
self.return_value = []
self.example = ""
def import_header(self):
"""imports the header"""
logging.debug(" Importing Header: %s", self.path)
file = open(self.path)
code = file.read()
file.close()
header_match = re.match(r"(#include\s\"script_component.hpp\"\n\n\s*)?(/\*.+?\*/)", code, re.S)
if header_match:
logging.debug(" Header is matching")
self.header = header_match.group(2)
else:
logging.debug(" Header is not matching")
def has_header(self):
"""Function has a header"""
return bool(self.header)
def process_header(self):
"""Analyze the header"""
logging.debug(" Processing header")
# Detailed debugging occurs here so value is set
# Preemptively cut away the comment characters (and leading/trailing whitespace)
header_text = self.header.strip()
if header_text.startswith('/*'):
header_text = header_text[2:]
if header_text.endswith('*/'):
header_text = header_text[:-1]
result = []
for line in header_text.splitlines():
line = line.strip()
if line.startswith('*'):
result.append(line[1:])
else:
result.append(line)
header_text = '\n'.join(result)
# Split the header into expected sections
self.sections = re.split(r"^(Name|Author|Argument|Return Value|Example|Public)s?:\s?", header_text, 0, re.M)
logging.debug(" Header Sections: %s", self.sections)
# If public section is missing we can't continue
public_raw = self.get_section("Public")
if not public_raw:
logging.warning('Public value undefined')
logging.warning(' in file: %s', self.path)
return
# Determine whether the header is public
self.public = self.process_public(public_raw)
# Don't bother to process the rest if private
# Unless in debug mode
if not self.public:
logging.debug("Function is not public: %s", self.path)
return
# Retrieve the raw sections text for processing
author_raw = self.get_section("Author")
arguments_raw = self.get_section("Argument")
return_value_raw = self.get_section("Return Value")
example_raw = self.get_section("Example")
name_raw = self.get_section("Name")
# Author and description are stored in first section
if author_raw:
self.authors = self.process_author(author_raw)
self.description = self.process_description(author_raw)
logging.debug(" Description: %s", self.description)
if name_raw:
self.name = self.process_name(name_raw)
if arguments_raw:
self.arguments = self.process_arguments(arguments_raw)
# Process return
if return_value_raw:
self.return_value = self.process_arguments(return_value_raw)
# Process example
if example_raw:
self.example = example_raw.strip()
def get_section(self, section_name):
"""returns a header section"""
try:
section_text = self.sections[self.sections.index(section_name) + 1]
return section_text
except ValueError:
logging.debug(' Missing "%s" header section in %s', section_name, self.path)
return ""
def process_public(self, raw):
"""Raw just includes an EOL character"""
public_text = raw[:-1]
if not re.match(r"(Yes|No)", public_text, re.I):
logging.warning('Invalid public value "%s"', public_text)
logging.warning(' in file: %s', self.path)
return public_text.capitalize() == "Yes"
def is_public(self):
"""function is public"""
return self.public
def process_author(self, raw):
"""process the author"""
# Authors are listed on the first line
authors_text = raw.splitlines()[0]
# Seperate authors are divided by commas
return authors_text.split(", ")
def process_name(self, raw):
"""process the functionname"""
return raw.splitlines()[0]
def process_description(self, raw):
"""process the description"""
# Just use all the lines after the authors line
return [line.rstrip() for line in raw.splitlines(1)[1:]]
def process_arguments(self, raw):
"""process the arguments"""
lines = raw.splitlines()
if lines[0] == "None":
return []
if lines.count("") == len(lines):
logging.warning("No arguments provided (use \"None\" where appropriate)")
logging.warning(' in file: %s', self.path)
return []
if lines[-1] == "":
lines.pop()
else:
logging.warning("No blank line after arguments list")
logging.warning(' in file: %s', self.path)
arguments = []
for argument in lines:
valid = re.match(r"^((\d+):\s)?(.+?)\<([\s\w\|]+?)\>(\s\(default: (.+?)\))?(.+?)?$", argument)
if valid:
arg_index = valid.group(2)
arg_name = valid.group(3)
arg_types = valid.group(4)
arg_default = valid.group(6)
arg_notes = valid.group(7)
if arg_default is None:
arg_default = ""
if arg_notes is None:
arg_notes = ""
arguments.append([arg_index, arg_name, arg_types, arg_default, arg_notes])
else:
# Notes about the above argument won't start with an index
# Only applies if there exists an above argument
if arguments or re.match(r"^(\d+):", argument):
logging.warning('Malformed argument "%s"', argument)
logging.warning(' in file: %s', self.path)
arguments.append(["?", "Malformed", "?", "?", "?"])
else:
if arguments:
arguments[-1][-1].append(argument)
return arguments
def process_return_value(self, raw):
return_value = raw.strip()
if return_value == "None":
return []
valid = re.match(r"^(.+?)\<([\s\w]+?)\>", return_value)
if valid:
return_name = valid.group(1)
return_types = valid.group(2)
else:
logging.warning('Malformed return value "%s"', return_value)
logging.warning(' in file: %s', self.path)
return ["Malformed", ""]
return [return_name, return_types]
class Component:
"""defines a component to be defined"""
def __init__(self, path_to_component):
self.path = path_to_component
self.name = self.get_componentname()
self.functions = []
self.style = ''
self.get_functions()
self.functions.sort(key=lambda func: func.name.lower(), reverse=False)
logging.debug("Component %s functions: %s", self.name, self.functions)
if not self.functions:
del self
def get_functions(self):
"""gets all functions from inside a component"""
for root, dirs, files in os.walk(self.path):
for file in files:
if file.endswith(".sqf"):
file_path = os.path.join(root, file)
function = FunctionFile(file_path)
function.component = self.name
function.import_header()
if function.has_header():
logging.debug("Function %s has header", function.path)
function.process_header()
if function.is_public():
logging.debug("Function %s is public", function.name)
self.functions.append(function)
else:
logging.debug("Function %s is not public", function.name)
del function
else:
logging.debug("Function %s has NO header", file_path)
del function
if 'function' in locals():
logging.info("Documenting file: %s", file_path)
else:
logging.info("Skipping file: %s", file_path)
def get_componentname(self):
"""returns the name of the component"""
#component_file = open(os.path.join(self.path, "script_component.hpp"), 'r')
name = os.path.basename(self.path)
return name
def document_function_ace(function):
"""returns the function documentation in the style of ace"""
str_list = []
# Title
str_list.append("\n## ace_{}_fnc_{}\n".format(function.component, os.path.basename(function.path)[4:-4]))
# Description
str_list.append("__Description__\n\n" + '\n'.join(function.description) + '\n')
# Arguments
if function.arguments:
if function.arguments[0][0]:
str_list.append("__Parameters__\n\nIndex | Description | Datatype(s) | Default Value\n--- | --- | --- | ---\n")
for argument in function.arguments:
str_list.append("{} | {} | {} | {}\n".format(*argument))
str_list.append("\n")
else:
str_list.append("__Parameters__\n\nDescription | Datatype(s) | Default value\n--- | --- | ---\n{} | {} | {} \n\n".format(\
function.arguments[0][1], function.arguments[0][2], function.arguments[0][3]))
else:
str_list.append("__Parameters__\n\nNone\n\n")
# Return Value
if function.return_value:
if function.return_value[0][0]:
str_list.append("__Return Value__\n\nIndex | Description | Datatype(s) | Default Value\n--- | --- | --- | ---\n")
for argument in function.return_value:
str_list.append("{} | {} | {} | {}\n".format(*argument))
str_list.append("\n")
else:
str_list.append("__Return Value__\n\nDescription | Datatype(s)\n--- | ---\n{} | {} \n\n".format(\
function.return_value[0][1], function.return_value[0][2]))
else:
str_list.append("__Return Value__\n\nNone\n\n")
# Example
str_list.append("__Example__\n\n```sqf\n{}\n```\n\n".format(function.example))
# Authors
str_list.append("\n__Authors__\n\n")
for author in function.authors:
str_list.append("- {}\n".format(author))
# Horizontal rule
str_list.append("\n---\n")
return ''.join(str_list)
def document_component(component):
"""Document the component header"""
str_list = []
if component.style == 'tfar':
# TFAR header
str_list.append('<h2>Index of API functions</h2>\n')
str_list.append('<table border="1">\n <tbody>\n <tr>\n <td>\n <ul style="list-style-type:square">\n')
for function in component.functions:
str_list.append(' <li><code><a href="{0}">{0}</a></code></li>\n'.format(function.name))
str_list.append(' </ul>\n </td>\n </tr>\n </tbody>\n</table>\n<br><hr>\n')
elif component.style == 'ace':
# ACE header
str_list.append('')
logging.debug("Contents: %s", str_list)
return ''.join(str_list)
def document_function_tfar(function):
"""Document the function"""
str_list = []
# Title
str_list.append('<table border="1">\n')
str_list.append(' <thead>\n')
str_list.append(' <tr>\n')
str_list.append(' <th scope="colgroup" colspan="2" width="640px">\n')
str_list.append(' <a name="{0}">{0}</a>\n'.format(function.name))
str_list.append(' </th>\n')
str_list.append(' </tr>\n')
str_list.append(' </thead>\n')
str_list.append(' <tbody>\n')
str_list.append(' <tr>\n')
str_list.append(' <td colspan="2" align="left">\n')
str_list.append(' <p>{}</p>\n'.format('<br>'.join(function.description)))
str_list.append(' </td>\n')
str_list.append(' </tr>\n')
str_list.append(' <tr>\n')
str_list.append(' <td valign="top" width="50%">\n')
str_list.append(' <strong><sub>Parameters</sub></strong>\n')
str_list.append(' <ol start=\"0\">\n')
if function.arguments:
for argument in function.arguments:
if argument[0]:
str_list.append(' <li><kbd>{2}</kbd> - {1}</li>\n'.format(*argument))
else:
str_list.append(' <kbd>{2}</kbd> - {1}\n'.format(*argument))
else:
str_list.append(' None\n')
str_list.append(' </ol>\n')
str_list.append(' </td>\n')
str_list.append(' <td valign="top" width="50%">\n')
str_list.append(' <strong><sub>Returns</sub></strong>\n')
str_list.append(' <ol start="0">\n')
if function.return_value:
for argument in function.return_value:
if argument[0]:
str_list.append(' <li><kbd>{2}</kbd> | |
case for kill-to-end-of-line after last visible char"""
before_b = """\
line 1
# The next line contains two trailing blanks.
line 3
line 4
"""
after_b = """\
line 1
# The next line contains two trailing blanks.
line 3line 4
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("3.6", "3.6"),
after_sel=("3.6", "3.6"),
command_name="kill-to-end-of-line",
)
#@+node:ekr.20201130090918.80: *3* kill-to-end-of-line end-body-text
def test_kill_to_end_of_line_end_body_text(self):
"""Test case for kill-to-end-of-line end-body-text"""
before_b = """\
line 1
line 2
line 3
"""
after_b = """\
line 1
line 2
line 3"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("4.1", "4.1"),
after_sel=("3.6", "3.6"),
command_name="kill-to-end-of-line",
)
#@+node:ekr.20201130090918.81: *3* kill-to-end-of-line end-line
def test_kill_to_end_of_line_end_line(self):
"""Test case for kill-to-end-of-line end-line"""
before_b = """\
line 1
line 2
line 3
"""
after_b = """\
line 1
line 2line 3
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("2.6", "2.6"),
after_sel=("2.6", "2.6"),
command_name="kill-to-end-of-line",
)
#@+node:ekr.20201130090918.85: *3* kill-to-end-of-line middle-line
def test_kill_to_end_of_line_middle_line(self):
"""Test case for kill-to-end-of-line middle-line"""
before_b = """\
line 1
line 2
line 3
"""
after_b = """\
line 1
li
line 3
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("2.2", "2.2"),
after_sel=("2.2", "2.2"),
command_name="kill-to-end-of-line",
)
#@+node:ekr.20201130090918.84: *3* kill-to-end-of-line start-blank-line
def test_kill_to_end_of_line_start_blank_line(self):
"""Test case for kill-to-end-of-line start-blank-line"""
before_b = """\
line 1
line 2
line 4
"""
after_b = """\
line 1
line 2
line 4
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("3.0", "3.0"),
after_sel=("3.0", "3.0"),
command_name="kill-to-end-of-line",
)
#@+node:ekr.20201130090918.83: *3* kill-to-end-of-line start-line
def test_kill_to_end_of_line_start_line(self):
"""Test case for kill-to-end-of-line start-line"""
before_b = """\
line 1
line 2
line 3
line 4
"""
after_b = """\
line 1
line 2
line 4
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("3.0", "3.0"),
after_sel=("3.0", "3.0"),
command_name="kill-to-end-of-line",
)
#@+node:ekr.20201130090918.75: *3* kill-word
def test_kill_word(self):
"""Test case for kill-word"""
before_b = """\
This is the first sentence. This
is the second sentence. And
this is the last sentence.
"""
after_b = """\
This is the first sentence. This
is the sentence. And
this is the last sentence.
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("2.6", "2.6"),
after_sel=("2.7", "2.7"),
command_name="kill-word",
)
#@+node:ekr.20201130090918.86: *3* move-lines-down
def test_move_lines_down(self):
"""Test case for move-lines-down"""
before_b = """\
first line
line 1
line a
line b
line c
last line
"""
after_b = """\
first line
line 1
line c
line a
line b
last line
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("3.3", "4.3"),
after_sel=("4.3", "5.3"),
command_name="move-lines-down",
)
#@+node:ekr.20201130090918.87: *3* move-lines-up
def test_move_lines_up(self):
"""Test case for move-lines-up"""
before_b = """\
first line
line 1
line a
line b
line c
last line
"""
after_b = """\
line 1
first line
line a
line b
line c
last line
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("2.2", "2.2"),
after_sel=("1.2", "1.2"),
command_name="move-lines-up",
)
#@+node:ekr.20201130090918.88: *3* move-lines-up (into docstring)
def test_move_lines_up_into_docstring(self):
"""Test case for move-lines-up (into docstring)"""
before_b = '''\
#@@language python
def test():
""" a
b
c
"""
print 1
print 2
'''
after_b = '''\
#@@language python
def test():
""" a
b
c
print 1
"""
print 2
'''
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("7.1", "7.1"),
after_sel=("6.1", "6.1"),
command_name="move-lines-up",
)
#@+node:ekr.20201130090918.89: *3* move-past-close
def test_move_past_close(self):
"""Test case for move-past-close"""
before_b = """\
first (line)
line 1
line a
line b
line c
last line
"""
after_b = """\
first (line)
line 1
line a
line b
line c
last line
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("1.10", "1.10"),
after_sel=("1.12", "1.12"),
command_name="move-past-close",
)
#@+node:ekr.20201130090918.90: *3* move-past-close-extend-selection
def test_move_past_close_extend_selection(self):
"""Test case for move-past-close-extend-selection"""
before_b = """\
first line
line 1
(line )a
line b
line c
last line
"""
after_b = """\
first line
line 1
(line )a
line b
line c
last line
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("3.7", "3.7"),
after_sel=("3.7", "3.11"),
command_name="move-past-close-extend-selection",
)
#@+node:ekr.20201130090918.91: *3* newline-and-indent
def test_newline_and_indent(self):
"""Test case for newline-and-indent"""
before_b = """\
first line
line 1
line a
line b
line c
last line
"""
after_b = """\
first line
line 1
line a
line b
line c
last line
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("2.6", "2.6"),
after_sel=("3.4", "3.4"),
command_name="newline-and-indent",
)
#@+node:ekr.20201130090918.92: *3* next-line
def test_next_line(self):
"""Test case for next-line"""
before_b = """\
a
b
"""
after_b = """\
a
b
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("1.1", "1.1"),
after_sel=("2.0", "2.0"),
command_name="next-line",
)
#@+node:ekr.20201130090918.93: *3* previous-line
def test_previous_line(self):
"""Test case for previous-line"""
before_b = """\
a
b
"""
after_b = """\
a
b
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("3.0", "3.0"),
after_sel=("2.0", "2.0"),
command_name="previous-line",
)
#@+node:ekr.20201130090918.94: *3* rectangle-clear
def test_rectangle_clear(self):
"""Test case for rectangle-clear"""
before_b = """\
before
aaaxxxbbb
aaaxxxbbb
aaaxxxbbb
aaaxxxbbb
after
"""
after_b = """\
before
aaa bbb
aaa bbb
aaa bbb
aaa bbb
after
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("2.3", "5.6"),
after_sel=("2.3", "5.6"),
command_name="rectangle-clear",
)
#@+node:ekr.20201130090918.95: *3* rectangle-close
def test_rectangle_close(self):
"""Test case for rectangle-close"""
before_b = """\
before
aaa bbb
aaa bbb
aaa bbb
aaa bbb
after
"""
after_b = """\
before
aaabbb
aaabbb
aaabbb
aaabbb
after
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("2.3", "5.6"),
after_sel=("2.3", "5.3"),
command_name="rectangle-close",
)
#@+node:ekr.20201130090918.96: *3* rectangle-delete
def test_rectangle_delete(self):
"""Test case for rectangle-delete"""
before_b = """\
before
aaaxxxbbb
aaaxxxbbb
aaaxxxbbb
aaaxxxbbb
after
"""
after_b = """\
before
aaabbb
aaabbb
aaabbb
aaabbb
after
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("2.3", "5.6"),
after_sel=("2.3", "5.3"),
command_name="rectangle-delete",
)
#@+node:ekr.20201130090918.97: *3* rectangle-kill
def test_rectangle_kill(self):
"""Test case for rectangle-kill"""
before_b = """\
before
aaaxxxbbb
aaaxxxbbb
aaaxxxbbb
aaaxxxbbb
after
"""
after_b = """\
before
aaabbb
aaabbb
aaabbb
aaabbb
after
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("2.3", "5.6"),
after_sel=("5.3", "5.3"),
command_name="rectangle-kill",
)
#@+node:ekr.20201130090918.98: *3* rectangle-open
def test_rectangle_open(self):
"""Test case for rectangle-open"""
before_b = """\
before
aaaxxxbbb
aaaxxxbbb
aaaxxxbbb
aaaxxxbbb
after
"""
after_b = """\
before
aaa xxxbbb
aaa xxxbbb
aaa xxxbbb
aaa xxxbbb
after
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("2.3", "5.6"),
after_sel=("2.3", "5.6"),
command_name="rectangle-open",
)
#@+node:ekr.20201130090918.99: *3* rectangle-string
def test_rectangle_string(self):
"""Test case for rectangle-string"""
before_b = """\
before
aaaxxxbbb
aaaxxxbbb
aaaxxxbbb
aaaxxxbbb
after
"""
after_b = """\
before
aaas...sbbb
aaas...sbbb
aaas...sbbb
aaas...sbbb
after
"""
# A hack. The command tests for g.app.unitTesting.
g.app.unitTesting = True
try:
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("2.3", "5.6"),
after_sel=("2.3", "5.8"),
command_name="rectangle-string",
)
finally:
g.app.unitTesting = False
#@+node:ekr.20201130090918.100: *3* rectangle-yank
def test_rectangle_yank(self):
"""Test case for rectangle-yank"""
before_b = """\
before
aaaxxxbbb
aaaxxxbbb
aaaxxxbbb
aaaxxxbbb
after
"""
after_b = """\
before
aaaY1Ybbb
aaaY2Ybbb
aaaY3Ybbb
aaaY4Ybbb
after
"""
# A hack. The command tests for g.app.unitTesting.
g.app.unitTesting = True
try:
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("2.3", "5.6"),
after_sel=("2.3", "5.6"),
command_name="rectangle-yank",
)
finally:
g.app.unitTesting = False
#@+node:ekr.20201201201052.1: *3* reformat-paragraph tests
#@+node:ekr.20201130090918.122: *4* reformat-paragraph list 1 of 5
def test_reformat_paragraph_list_1_of_5(self):
"""Test case for reformat-paragraph list 1 of 5"""
before_b = """\
This paragraph leads of this test. It is the "lead"
paragraph.
1. This is item
number 1. It is the first item in the list.
2. This is item
number 2. It is the second item in the list.
3. This is item
number 3. It is the third item in the list.
This paragraph ends the test. It is the "final"
paragraph.
"""
after_b = """\
This paragraph leads of this test. It is
the "lead" paragraph.
1. This is item
number 1. It is the first item in the list.
2. This is item
number 2. It is the second item in the list.
3. This is item
number 3. It is the third item in the list.
This paragraph ends the test. It is the "final"
paragraph.
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("1.0", "1.0"),
after_sel=("4.0", "4.0"),
command_name="reformat-paragraph",
directives="@language plain\n@pagewidth 40\n@tabwidth 8",
)
#@+node:ekr.20201130090918.123: *4* reformat-paragraph list 2 of 5
def test_reformat_paragraph_list_2_of_5(self):
"""Test case for reformat-paragraph list 2 of 5"""
before_b = """\
This paragraph leads of this test. It is
the "lead" paragraph.
1. This is item number 1. It is the
first item in the list.
2. This is item
number 2. It is the second item in the list.
3. This is item
number 3. It is the third item in the list.
This paragraph ends the test. It is the "final"
paragraph.
"""
after_b = """\
This paragraph leads of this test. It is
the "lead" paragraph.
1. This is item number 1. It is the
first item in the list.
2. This is item
| |
- sublist[j + 1])
if difference_of_ele > 1:
num_of_splits = num_of_splits + 1
pos_to_split = []
# Find the position where the split should take place
for k, n in enumerate(ele_diff):
if n > 1:
pos_to_split.append(k)
# Split the sublist into further subist based on different identified position boundaries
for pos_split in pos_to_split:
size = len(sublist)
# Find the idex which is matching the position to split
idx_list = [idx + 1 for idx, val in enumerate(sublist) if idx == pos_split]
# Split and merge the values present in the position split
split_based_on_pos = [sublist[i: j] for i, j in zip([0] + idx_list, idx_list
+ ([size] if idx_list[-1] != size else []))]
split_ele_lst.append(split_based_on_pos)
# If there is no elements in sublist to split, then append the sublist
if not split_ele_lst:
boundary_splits.append(sublist)
else:
# Append the "split and merged list" to the sublist
for i in range(len(split_ele_lst)):
for j in range(len(split_ele_lst) + 1):
sub_split_lst = split_ele_lst[i][j]
boundary_splits.append(sub_split_lst)
return boundary_splits
# Identify the link between the elements of the list
def link_between_the_elements(final_list):
"""
The task of this function is to identify the relationship between a current sublist and its succeeding sublist.
Then we store how many elements are matching between the lists.
Arguments: final_list- manipulated input grid
Return: ele_link - list of list holding elements that are having connections with the elementsts in the successive list.
"""
ele_link = []
# Iterate over each row of the boundary list
for index in range(len(final_list) - 1):
# Elements matching in the current list and next sublistr
elements_matching = len([x for x in final_list[index] if x in final_list[index + 1]])
ele_link.append(elements_matching)
return ele_link
# Check if the list created after spliting is in the correct order
def ordered_lst_ele(ident_boud_lst):
"""
The task of this function is to identify if the elements boundaries list created are in a proper order i.e., to check if the connected elements
are present next to each other in the list. If the current element is having connections with the element in successive second index position,
then we change the position of the lists.
Arguments: ident_boud_lst- Identified boundary list
Return: ident_boud_lst - correctly ordered boundary list.
"""
# Iterate over the created list
for index, val in enumerate(ident_boud_lst):
current_sublist = ident_boud_lst[index]
index_1 = index + 1
if index_1 < (len(ident_boud_lst) - 1):
next_sublist = ident_boud_lst[index + 1]
# check if there is any elements matching between current list and next sublist
if len(set(current_sublist) & set(next_sublist)) == 0:
index_2 = index + 2
if index_2 < (len(ident_boud_lst) - 1):
# check if there is any match of elements on the next to next sublist
nxt_to_nxt_sublist = ident_boud_lst[index_2]
if len(set(current_sublist) & set(nxt_to_nxt_sublist)) != 0:
# If there is an element matching the element in our current list then change the
# position of the sublists
ident_boud_lst[index_2], ident_boud_lst[index_1] = ident_boud_lst[index_1], ident_boud_lst[
index_2]
return ident_boud_lst
# Idenitfy different shapes based on the link between the elements
def identify_diff_shapes(store_link):
size = len(store_link)
# If there is no connection between the shapes then the difference between the list is represented by 0
# Find the occourance of the value 0 in the list having the list of elements mapping the of boundaries
boundary_idx_list = [idx + 1 for idx, val in enumerate(store_link) if val == 0]
# Create sublists representing different shapes present in boundary list
shapes_present_in_grid = [store_link[i: j] for i, j in
zip([0] + boundary_idx_list, boundary_idx_list +
([size] if boundary_idx_list[-1] != size else []))]
return shapes_present_in_grid
# Creating a diagnal matrix whose diagnol elements represents different shapes present in the input grid
def creating_diagnol_matrix(empty_op_grid):
len_of_seq = len(empty_op_grid)
# assigning iter the value of length of the matrix
i = len_of_seq
pos_counter = [0]
pos_counter_len = len(pos_counter)
puzzle_ele = []
# Colour code the diagnol elements into blue clour
target = [8]
# Iterating till the index is 1
while (i >= 1):
i = i - 1
# Elements in the row
curr_lst_ele = empty_op_grid[i]
# Assigning colour value to the diagnol index of the elements
for x, y in zip(pos_counter, target):
if x < len_of_seq:
curr_lst_ele[x] = y
# Storing the assigned values to the list
puzzle_ele.append(curr_lst_ele)
# Increasing the counter to get the dignol positions for that colour in each row
pos_counter = [x + 1 for x in pos_counter]
manipulated_puzzle_op = [arr.tolist() for arr in puzzle_ele]
return manipulated_puzzle_op
# ---------------------------------------------------solve_d0f5fe59 end------------------------------------------------
# ---------------------------------------------------solve_ae3edfdc start-------------------------------------
def solve_ae3edfdc(x):
"""
Difficulty: Medium
The problem description: Gravity- well, not the regular kind. There are two centres of gravity, blue and
red - which has the ability to bring orange and green squares in it's path towards itself, so that it
occupies the closest postition wrt to it. The one condition, the attracted squares must be
perpendicular to the centre of gravity to get attracted to it.
Assumptions: There are no other colours in the space. The non-centres are always perpendicular to the centres.
The approach: Locate all the colourfull squares in the 'space'. Then locate the centres of gravity.
Pair them up together as blue to orange and red to green. Check along the perpendicular path.
If there are any squares in it's path, move it to the closest position in the same line.
Testing:All test cases passed
Argument: x, the n-d array representing the space
return: x, after the above transformation is done.
"""
# find all the squares where colour not equal to black
row, column = np.where(x > 0)
colour_dict = {}
# put them all into one dictionary
for r, c in zip(row, column):
if x[r][c] in colour_dict:
colour_dict[x[r][c]].append((r, c))
else:
colour_dict[x[r][c]] = [(r, c)]
# -------------------Hardcoding the colours for the centres and it's pairs
center1 = 2
center2 = 1
pair1 = 3
pair2 = 7
# -----------------
# Creating two dictionaries based on the centre-pair value
keyPair1 = [center1, pair1]
keyPair2 = [center2, pair2]
d1 = {x: colour_dict[x] for x in colour_dict if x in keyPair1}
d2 = {x: colour_dict[x] for x in colour_dict if x in keyPair2}
# moving the position of the first centre-pair pair
half_done = match_pattern(d1, x, keyPair1)
# sending the half transformed to transform the rest
final = match_pattern(d2, half_done, keyPair2)
x = final
return x
def match_pattern(dict_fig, x, keyPair):
# get the row and column of the centre
r = dict_fig[keyPair[0]][0][0]
c = dict_fig[keyPair[0]][0][1]
# for every square belonging to this key-pair
for v in dict_fig[keyPair[1]]:
# if in the same row as the centre of gravity but before it
if v[0] == r and v[1] < c:
# closest point to centre on the same side
x[r][c - 1] = keyPair[1]
# set the old position to 0
x[v[0]][v[1]] = 0
# if in the same row as the centre of gravity but after it
elif v[0] == r and v[1] > c:
# closest point to centre on the same side
x[r][c + 1] = keyPair[1]
x[v[0]][v[1]] = 0
# if in the same column as the centre of gravity but above it
elif v[1] == c and v[0] < c:
x[r - 1][c] = keyPair[1]
x[v[0]][v[1]] = 0
# if in the same column as the centre of gravity but below it
elif v[1] == c and v[0] > c:
x[r + 1][c] = keyPair[1]
x[v[0]][v[1]] = 0
else:
# not per assumption
raise Exception("Pattern not handled")
return x
# ---------------------------------------------------solve_ae3edfdc end------------------------------------------------
# ---------------------------------------------------solve_feca6190 start----------------------------------------------
def solve_feca6190(x):
"""
Difficulty: Medium
The Problem: The input grid is a rectangular (list of lists) matrix with variable shape, with numbers ranging
from 0 to 9. (inclusive). Different colors of the color spectrum are represented by the integers.
The task is to determine the color schemes in the input grid, generate a matrix whose shape is given by
multiplication of size of input matrix and the number of colors present inside the grid.Next step is to
fill the formed matrix | |
# -*- encoding: utf-8 -*-
__version__ = "0.9"
__author__ = "fcFn"
from tkinter.constants import N,E,W,S
from tkinter import messagebox
from tkinter import ttk
import tkinter
class App:
def __init__(self, name):
self.MESSAGES = ['Duplicate item!', 'Enter item name', 'No name provided!']
self.projectname = ''
self.containers = []
self.name = name
self.createall()
self.gridall()
self.model = []
self.addnewcontainer(name = 'A container')
self.allselecteditems= []
self.style = ttk.Style()
self.style.layout("SelectedItem", [
('Checkbutton.padding', {'children':
[('Checkbutton.focus', {'children':
[('Checkbutton.indicator', {'side': 'left', 'sticky': ''}),
('Checkbutton.label', {'sticky': 'nswe'})], 'side': 'left', 'sticky': 'w'})], 'sticky': 'nswe'})])
self.style.configure("SelectedItem", font = 'helvetica 10 bold', foreground='blue')
self.root.mainloop()
def createmainframe(self):
self.root = tkinter.Tk()
self.root.protocol("WM_DELETE_WINDOW", self.quit)
self.root.title(self.name)
self.mainframe = ttk.Frame(self.root)
self.contframe = ttk.Frame(self.mainframe)
def gridframes(self):
self.mainframe.grid(row=0, column=0, sticky=(N,E,W,S))
self.contframe.grid(row=1, column=0, sticky=(N,E,W,S))
def createmenus(self):
self.topbar = ttk.Frame(self.mainframe)
self.addcontainer = ttk.Button(self.topbar, text = 'Add new container', underline = 0, command = self.addnewcontainer)
self.topbar.columnconfigure(0, weight=1)
self.botbar = ttk.Frame(self.mainframe)
self.botbar.columnconfigure(0,weight=1)
self.botbar.columnconfigure(1,weight=1)
self.botbar.columnconfigure(2,weight=1)
self.savebutton = ttk.Button(self.botbar, text='Save', command = self.save)
self.loadbutton = ttk.Button(self.botbar, text='Load', command = self.load)
self.exportbutton = ttk.Button(self.botbar, text = 'Export', command = self.export)
self.helpbutton = ttk.Button(self.botbar, text='Help', command = self.showhelp)
self.bindkeys()
def export(self):
self.updatemodel()
exportstring = ''
for i in self.model:
exportstring += str(i[0]+1) + '. ' + i[1] + '\n\n'
if len(i) == 2:
exportstring += '\tEMPTY\n'
exportstring += '-' * 40 + '\n'
continue
for w in range(2, len(i)):
exportstring += '\t' + str(i[w][0]+1) + '.' + i[w][1]
if i[w][2]:
exportstring += ' --- CHECK\n'
else:
exportstring += '\n'
exportstring += '-' * 40 + '\n'
self.root.clipboard_clear()
self.root.clipboard_append(exportstring)
messagebox.showinfo('Oi!', 'Current list copied to clipboard!')
def bindkeys(self):
self.root.bind('<Escape>', lambda e: self.quit())
self.root.bind('<Control-space>', self.addnewcontainer)
#this was used to bind to russian equiv of Control-A (hotkey was changed to Ctrl-Space
#which is international
#self.root.bind('<Control-ocircumflex>', self.addnewcontainer)
def quit(self):
sure = messagebox.askyesno('Oi!', "Are you sure you want to quit?\n\nAll unsaved changes will be lost!")
if sure: self.root.destroy()
return
def showhelp(self):
helptext = """
Lugman v0.9
This app manages lists of items.
To add a new container, press the "Add new container" button at the top of the main window. If you only have a default empty container, the newly created container will replace it.
To add a new item to a container, enter the item name into the input box under the container and press the plus button or hit the Enter key.
Click an item to mark it.
The counter beside the name of a container displays the amount of marked items against the total items in the container.
You can also hit Control-Space anywhere to add a new container.
To delete a container or change the container name, right-click the container.
To delete all containers, right-click any container and select "Delete all containers". You will be prompted to save your list beforehand.
To delete an item from a container or edit the item name, right-click the item.
You can move individual items between containers by dragging and dropping.
To move multiple items, use the middle mouse button to mark them and drag and drop them into the desired container (duplicated items will not be moved).
Right-click an item and choose "Drop selection" to drop multiple item selection without moving the items.
Press the "Save" button to save the current list.
Press the "Load" button to select and load a save file.
Press the "Export" button to export the list into the clipboard in human-readable format. Paste it to share it via IM, email, etc.
Press the "Help" button to read this message.
"""
messagebox.showinfo('Help', helptext)
def gridmenus(self):
self.topbar.grid(row=0, column=0, padx = 10, sticky=(N))
self.addcontainer.grid(row=0, column=0, sticky=(N,E,W))
self.botbar.grid(row=2, column=0, sticky=(N,S,E,W))
self.savebutton.grid(row=0, column=0, sticky=(S,W))
self.loadbutton.grid(row=0, column=1, sticky=(S,W))
self.exportbutton.grid(row=0, column=2, sticky=(S,W))
self.helpbutton.grid(row=0, column=3, sticky=(S,E))
def createall(self):
self.createmainframe()
self.createmenus()
def gridall(self):
self.gridframes()
self.gridmenus()
def addnewcontainer(self, container = None, recreate = False, name = None):
if not recreate:
if not name:
query = Query('Enter container name', None, self)
self.root.wait_window(query.question)
try:
result = query.getresult().get()
except:
return
#check if there is just one empty default container
if self.containers[0].name == 'A container' and (len(self.containers) == 1 and self.containers[0].isempty()):
#if so - replace the default container with newly created one
self.containers[0].container.destroy()
self.containers.clear()
self.containers.append(Container(result, self))
self.redraw()
else:
#if not, just create new container besides the last one
self.containers.append(Container(result, self))
if name:
self.containers.append(Container(name, self))
if recreate:
entries = container[2:]
newcont = Container(container[1], self)
self.containers.append(newcont)
#recreate entries
if entries:
newcont.additem(True, None, 0, entries)
else:
newcont.emptylabel.grid()
def getmainframe(self):
return self.contframe
def updatemodel(self):
self.model.clear()
conts = list(enumerate([i.name for i in self.containers]))
states = [i.itemdata for i in self.containers]
for i in enumerate(conts):
self.model.append(i[1] + tuple(states[i[0]]))
return self.model
def save(self):
model = self.updatemodel()
if not model:
messagebox.showinfo('Oi!', 'Nothing to save yet! Create some lists!')
return
from tkinter import filedialog
file = filedialog.asksaveasfile(defaultextension = ".txt", filetype=(("Lug JSON files", "*.txt"),("All Files", "*.*")))
if not file:
return False
import json
json.dump(model, file)
messagebox.showinfo('Save complete!', 'Successfully saved to {0}'.format(file.name))
self.projectname = str(file.name).split(sep='/')[-1]
self.root.title(self.name + ' - ' + self.projectname)
file.close()
return True
def load(self):
from tkinter import filedialog
file = filedialog.askopenfile(defaultextension = ".txt", filetype=(("Lug JSON files", "*.txt"),("All Files", "*.*")))
if not file:
return
import json
self.model = json.load(file)
for i in self.containers:
i.container.destroy()
Container.relcol = 0
Container.relrow = 0
self.projectname = str(file.name).split(sep='/')[-1]
self.root.title(self.name + ' - ' + self.projectname)
print(self.projectname)
self.recreate()
self.updatemodel()
file.close()
def recreate(self):
self.containers.clear()
for container in self.model:
self.addnewcontainer(container, True)
def deleteallcontainers(self):
def deleteall():
for i in self.containers:
i.container.destroy()
self.containers.clear()
self.addnewcontainer(name = 'A container')
self.redraw()
if not (self.containers[0].name == 'A container' and len(self.containers) == 1 and self.containers[0].isempty()):
maybesave = messagebox.askyesnocancel('Oi!', 'Do you want to save your list before deleting ALL containers?')
if maybesave:
if self.save():
deleteall()
else:
return
elif maybesave == False:
deleteall()
else:
return
def redraw(self):
Container.relcol = 0
Container.relrow = 0
for i in self.contframe.winfo_children():
i.grid_forget()
for i in self.contframe.winfo_children():
i.grid(row = Container.relrow, column = Container.relcol, sticky=(N,S))
Container.relcol += 1
if Container.relcol == 4:
Container.relcol = 0
Container.relrow += 1
class Container:
#Container positions (used for redrawing all containers after deleting one)
relcol = 0
relrow = 0
def __init__(self, name, app):
try:
app.getmainframe()
except:
print("No application found!")
raise
self.app = app
self.items = []
self.itemdata = []
self.itemorder = []
self.itemcount = 0
#copy name of the entries labelframe
self.name = name
#total items contained in container entries frame and amount of selected items
self.totalitems = 0
self.selecteditems = 0
#name of the item to add
self.item = tkinter.StringVar(value = 'Enter item name')
#initialize widgets
self.container = ttk.Frame(app.getmainframe())
self.container.me = self
self.entries = ttk.Labelframe(self.container, text = name + ' (empty)')
self.emptylabel = ttk.Label(self.entries, text="Empty container") #default label used to pad entries box
self.add = ttk.Button(self.container, text='+', command = self.additem)
self.addentry = ttk.Entry(self.container, textvariable=self.item, width = 30)
self.options = tkinter.Menu(self.container)
self.options.add_command(label = 'Sort items by selection', command = self.sortitemsbyselection)
self.options.add_command(label = 'Change container name', command = self.changecontainername)
self.options.add_command(label = 'Delete container', command = self.deletecontainer)
self.options.add_command(label = 'Delete ALL containers', command = self.deleteallcontainers)
self.pack()
def isempty(self):
if len(self.itemdata) == 0:
return True
else:
return False
def additem(self, recreate = False, name = None, state = 0, *args):
def dupe():
nonlocal self
self.item.set('Duplicate item!')
self.addentry.focus()
self.addentry.select_range(0, len(self.item.get()))
if state:
state = state
else:
state = 0
if name:
value = name
else:
value = self.item.get()
if not recreate:
if value in self.app.MESSAGES:
self.addentry.focus()
self.addentry.select_range(0, len(value))
return
elif not value:
self.item.set('No name provided!')
self.addentry.focus()
self.addentry.select_range(0, len(value))
return
self.emptylabel.grid_remove()
#check if trying to add duplicate item
itemnames = [w for i in self.items for w in i]
if value in itemnames:
self.addentry.focus()
self.addentry.select_range(0, len(self.item.get()))
if name and self.app.allselecteditems:
self.item.set('Some duplicate items not moved')
raise DuplicateItem
if name:
dupe()
raise DuplicateItem
else:
dupe()
return
newitem=Item(self, value, state = state)
self.addentry.focus()
self.addentry.select_range(0, len(value))
self.items.append({newitem.name:newitem})
self.itemorder.append((self.itemcount, newitem))
self.itemcount += 1
self.updatecounters()
self.addentry.focus()
else:
self.emptylabel.grid_remove()
items = args
for i in items[0]:
newitem = Item(self, i[1], state = i[2])
self.items.append({newitem.name:newitem})
self.itemorder.append((self.itemcount, newitem))
self.itemcount += 1
self.updatecounters()
def | |
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Define basic states
This includes notably the fixed, functional, and counter states.
"""
import numpy as np
from pyrobolearn.states.state import State
from pyrobolearn.actions import Action
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["<NAME>"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class FixedState(State):
r"""Fixed State.
This is a dummy fixed state which always returns the value it was initialized with.
"""
def __init__(self, value, window_size=1, axis=None, ticks=1):
"""
Initialize the dummy fixed state.
Args:
value (int, float, object): always return this value.
window_size (int): window size of the state. This is the total number of states we should remember. That
is, if the user wants to remember the current state :math:`s_t` and the previous state :math:`s_{t-1}`,
the window size is 2. By default, the :attr:`window_size` is one which means we only remember the
current state. The window size has to be bigger than 1. If it is below, it will be set automatically
to 1. The :attr:`window_size` attribute is only valid when the state is not a combination of states,
but is given some :attr:`data`.
axis (int, None): axis to concatenate or stack the states in the current window. If you have a state with
shape (n,), then if the axis is None (by default), it will just concatenate it such that resulting
state has a shape (n*w,) where w is the window size. If the axis is an integer, then it will just stack
the states in the specified axis. With the example, for axis=0, the resulting state has a shape of
(w,n), and for axis=-1 or 1, it will have a shape of (n,w). The :attr:`axis` attribute is only when the
state is not a combination of states, but is given some :attr:`data`.
ticks (int): number of ticks to sleep before getting the next state data.
"""
super(FixedState, self).__init__(data=value, window_size=window_size, axis=axis, ticks=ticks)
class FunctionalState(State):
r"""Functional State.
This is a state which accepts a function which has to output the data.
"""
def __init__(self, function, window_size=1, axis=None, ticks=1, *args, **kwargs):
"""
Initialize the functional state.
Args:
function (callable): callable function or class that has to output the next state data.
window_size (int): window size of the state. This is the total number of states we should remember. That
is, if the user wants to remember the current state :math:`s_t` and the previous state :math:`s_{t-1}`,
the window size is 2. By default, the :attr:`window_size` is one which means we only remember the
current state. The window size has to be bigger than 1. If it is below, it will be set automatically
to 1. The :attr:`window_size` attribute is only valid when the state is not a combination of states,
but is given some :attr:`data`.
axis (int, None): axis to concatenate or stack the states in the current window. If you have a state with
shape (n,), then if the axis is None (by default), it will just concatenate it such that resulting
state has a shape (n*w,) where w is the window size. If the axis is an integer, then it will just stack
the states in the specified axis. With the example, for axis=0, the resulting state has a shape of
(w,n), and for axis=-1 or 1, it will have a shape of (n,w). The :attr:`axis` attribute is only when the
state is not a combination of states, but is given some :attr:`data`.
ticks (int): number of ticks to sleep before getting the next state data.
*args: list of arguments given to the function.
**kwargs: dictionary of arguments given to the function.
"""
self.function = function
self.args, self.kwargs = args, kwargs
data = function(*args, **kwargs) # call one time to get data
super(FunctionalState, self).__init__(data=data, window_size=window_size, axis=axis, ticks=ticks)
def _reset(self):
"""Reset the functional state."""
self.data = self.function(*self.args, **self.kwargs)
def _read(self):
"""Read the next functional state data."""
self.data = self.function(*self.args, **self.kwargs)
class CounterState(State):
r"""Counter State.
Counts the number of time this step has been called.
"""
def __init__(self, cnt=0, window_size=1, axis=None, ticks=1):
"""
Initialize the counter state.
Args:
cnt (int): initial value for the counter.
window_size (int): window size of the state. This is the total number of states we should remember. That
is, if the user wants to remember the current state :math:`s_t` and the previous state :math:`s_{t-1}`,
the window size is 2. By default, the :attr:`window_size` is one which means we only remember the
current state. The window size has to be bigger than 1. If it is below, it will be set automatically
to 1. The :attr:`window_size` attribute is only valid when the state is not a combination of states,
but is given some :attr:`data`.
axis (int, None): axis to concatenate or stack the states in the current window. If you have a state with
shape (n,), then if the axis is None (by default), it will just concatenate it such that resulting
state has a shape (n*w,) where w is the window size. If the axis is an integer, then it will just stack
the states in the specified axis. With the example, for axis=0, the resulting state has a shape of
(w,n), and for axis=-1 or 1, it will have a shape of (n,w). The :attr:`axis` attribute is only when the
state is not a combination of states, but is given some :attr:`data`.
ticks (int): number of ticks to sleep before getting the next state data.
"""
self.count = cnt
if isinstance(cnt, int):
cnt = np.array([cnt])
if not (isinstance(cnt, np.ndarray) and cnt.size == 1 and len(cnt.shape) == 1
and cnt.dtype.kind in np.typecodes['AllInteger']):
raise TypeError("Expecting an int, or a numpy array (integer) with size 1")
super(CounterState, self).__init__(data=cnt, window_size=window_size, axis=axis, ticks=ticks)
def _reset(self):
"""Reset the counter state."""
self.data = self.count
def _read(self):
"""Read the next counter state."""
self.data = self.last_data + 1
class PreviousActionState(State):
r"""Previous Action State
This state copies the previous action data.
"""
def __init__(self, action, window_size=1, axis=None, ticks=1):
"""
Initialize the previous action state.
Args:
action (Action): action to copy the data from.
window_size (int): window size of the state. This is the total number of states we should remember. That
is, if the user wants to remember the current state :math:`s_t` and the previous state :math:`s_{t-1}`,
the window size is 2. By default, the :attr:`window_size` is one which means we only remember the
current state. The window size has to be bigger than 1. If it is below, it will be set automatically
to 1. The :attr:`window_size` attribute is only valid when the state is not a combination of states,
but is given some :attr:`data`.
axis (int, None): axis to concatenate or stack the states in the current window. If you have a state with
shape (n,), then if the axis is None (by default), it will just concatenate it such that resulting
state has a shape (n*w,) where w is the window size. If the axis is an integer, then it will just stack
the states in the specified axis. With the example, for axis=0, the resulting state has a shape of
(w,n), and for axis=-1 or 1, it will have a shape of (n,w). The :attr:`axis` attribute is only when the
state is not a combination of states, but is given some :attr:`data`.
ticks (int): number of ticks to sleep before getting the next state data.
"""
if not isinstance(action, Action):
raise TypeError("Expecting the action to be an instance of Action, instead got {}".format(action))
self.action = action
super(PreviousActionState, self).__init__(window_size=window_size, axis=axis, ticks=ticks)
def _reset(self):
"""Reset the action state."""
self.data = self.action.data
def _read(self):
"""Read the next action state."""
self.data = self.action.data
class GoalState(State):
r"""Goal state
This is a wrapper around any states to specify it is a goal state. This is notably useful for Hindsight Experience
Replay (HER).
.. seealso:: `pyrobolearn/storages/her.py`
References:
- [1] "Hindsight Experience Replay", Andrychowicz et al., 2017
"""
def __init__(self, state, | |
business_workspace = workspace_api.get_one(1)
tool_folder = content_api.get_one(1, content_type=content_type_list.Any_SLUG)
test_file = content_api.create(
content_type_slug=content_type_list.File.slug,
workspace=business_workspace,
parent=tool_folder,
label="Test file",
do_save=False,
do_notify=False,
)
with new_revision(session=dbsession, tm=transaction.manager, content=test_file):
content_api.update_file_data(
test_file, "Test_file.txt", new_mimetype="plain/text", new_content=b"Test file"
)
with new_revision(session=dbsession, tm=transaction.manager, content=test_file):
content_api.update_content(test_file, "Test_file", "<p>description</p>")
dbsession.flush()
transaction.commit()
self.testapp.authorization = ("Basic", ("<EMAIL>", "<EMAIL>"))
res = self.testapp.get(
"/api/v2/workspaces/1/files/{}/revisions".format(test_file.content_id), status=200
)
revisions = res.json_body
assert len(revisions) == 1
revision = revisions[0]
assert revision["content_type"] == "file"
assert revision["content_id"] == test_file.content_id
assert revision["is_archived"] is False
assert revision["is_deleted"] is False
assert revision["is_editable"] is True
assert revision["label"] == "Test_file"
assert revision["parent_id"] == 1
assert revision["show_in_ui"] is True
assert revision["slug"] == "test-file"
assert revision["status"] == "open"
assert revision["workspace_id"] == 1
assert revision["revision_id"]
assert revision["sub_content_types"]
# TODO - G.M - 2018-06-173 - Test with real comments
assert revision["comment_ids"] == []
# TODO - G.M - 2018-06-173 - check date format
assert revision["created"]
assert revision["author"]
assert revision["author"]["user_id"] == 1
assert revision["author"]["avatar_url"] is None
assert revision["author"]["public_name"] == "Global manager"
assert revision["mimetype"] == "plain/text"
assert revision["size"] == len(b"Test file")
assert revision["page_nb"] == 1
assert revision["has_pdf_preview"] is True
def test_api__set_file_status__ok_200__nominal_case(self) -> None:
"""
set file status
"""
dbsession = get_tm_session(self.session_factory, transaction.manager)
admin = dbsession.query(User).filter(User.email == "<EMAIL>").one()
workspace_api = WorkspaceApi(current_user=admin, session=dbsession, config=self.app_config)
content_api = ContentApi(current_user=admin, session=dbsession, config=self.app_config)
business_workspace = workspace_api.get_one(1)
tool_folder = content_api.get_one(1, content_type=content_type_list.Any_SLUG)
test_file = content_api.create(
content_type_slug=content_type_list.File.slug,
workspace=business_workspace,
parent=tool_folder,
label="Test file",
do_save=False,
do_notify=False,
)
test_file.file_extension = ".txt"
test_file.depot_file = FileIntent(b"Test file", "Test_file.txt", "text/plain")
with new_revision(session=dbsession, tm=transaction.manager, content=test_file):
content_api.update_content(test_file, "Test_file", "<p>description</p>")
dbsession.flush()
transaction.commit()
self.testapp.authorization = ("Basic", ("<EMAIL>", "<EMAIL>"))
params = {"status": "closed-deprecated"}
# before
res = self.testapp.get(
"/api/v2/workspaces/1/files/{}".format(test_file.content_id), status=200
)
content = res.json_body
assert content["content_type"] == "file"
assert content["content_id"] == test_file.content_id
assert content["status"] == "open"
# set status
self.testapp.put_json(
"/api/v2/workspaces/1/files/{}/status".format(test_file.content_id),
params=params,
status=204,
)
# after
res = self.testapp.get(
"/api/v2/workspaces/1/files/{}".format(test_file.content_id), status=200
)
content = res.json_body
assert content["content_type"] == "file"
assert content["content_id"] == test_file.content_id
assert content["status"] == "closed-deprecated"
def test_api__set_file_status__err_400__wrong_status(self) -> None:
"""
set file status
"""
dbsession = get_tm_session(self.session_factory, transaction.manager)
admin = dbsession.query(User).filter(User.email == "<EMAIL>").one()
workspace_api = WorkspaceApi(current_user=admin, session=dbsession, config=self.app_config)
content_api = ContentApi(current_user=admin, session=dbsession, config=self.app_config)
business_workspace = workspace_api.get_one(1)
tool_folder = content_api.get_one(1, content_type=content_type_list.Any_SLUG)
test_file = content_api.create(
content_type_slug=content_type_list.File.slug,
workspace=business_workspace,
parent=tool_folder,
label="Test file",
do_save=False,
do_notify=False,
)
test_file.file_extension = ".txt"
test_file.depot_file = FileIntent(b"Test file", "Test_file.txt", "text/plain")
with new_revision(session=dbsession, tm=transaction.manager, content=test_file):
content_api.update_content(test_file, "Test_file", "<p>description</p>")
dbsession.flush()
transaction.commit()
self.testapp.authorization = ("Basic", ("<EMAIL>", "<EMAIL>"))
params = {"status": "unexistant-status"}
# before
res = self.testapp.get(
"/api/v2/workspaces/1/files/{}".format(test_file.content_id), status=200
)
content = res.json_body
assert content["content_type"] == "file"
assert content["content_id"] == test_file.content_id
assert content["status"] == "open"
# set status
res = self.testapp.put_json(
"/api/v2/workspaces/1/files/{}/status".format(test_file.content_id),
params=params,
status=400,
)
assert isinstance(res.json, dict)
assert "code" in res.json.keys()
assert res.json_body["code"] == ErrorCode.GENERIC_SCHEMA_VALIDATION_ERROR
def test_api__get_file_raw__ok_200__nominal_case(self) -> None:
"""
Get one file of a content
"""
dbsession = get_tm_session(self.session_factory, transaction.manager)
admin = dbsession.query(User).filter(User.email == "<EMAIL>").one()
workspace_api = WorkspaceApi(current_user=admin, session=dbsession, config=self.app_config)
content_api = ContentApi(current_user=admin, session=dbsession, config=self.app_config)
business_workspace = workspace_api.get_one(1)
tool_folder = content_api.get_one(1, content_type=content_type_list.Any_SLUG)
test_file = content_api.create(
content_type_slug=content_type_list.File.slug,
workspace=business_workspace,
parent=tool_folder,
label="Test file",
do_save=False,
do_notify=False,
)
test_file.file_extension = ".txt"
test_file.depot_file = FileIntent(b"Test file", "Test_file.txt", "text/plain")
with new_revision(session=dbsession, tm=transaction.manager, content=test_file):
content_api.update_content(test_file, "Test_file", "<p>description</p>")
dbsession.flush()
transaction.commit()
content_id = int(test_file.content_id)
self.testapp.authorization = ("Basic", ("<EMAIL>", "<EMAIL>"))
filename = "Test_file.txt"
res = self.testapp.get(
"/api/v2/workspaces/1/files/{}/raw/{}".format(content_id, filename), status=200
)
assert res.body == b"Test file"
assert res.content_type == "text/plain"
assert res.content_length == len(b"Test file")
assert int(res.headers["Content-Length"]) == res.content_length
assert res.last_modified.second == test_file.updated.second
assert res.last_modified.minute == test_file.updated.minute
assert res.last_modified.day == test_file.updated.day
assert res.last_modified.month == test_file.updated.month
assert res.last_modified.year == test_file.updated.year
def test_api__get_file_raw__ok_200__force_download_case(self) -> None:
"""
Get one file of a content
"""
dbsession = get_tm_session(self.session_factory, transaction.manager)
admin = dbsession.query(User).filter(User.email == "<EMAIL>").one()
workspace_api = WorkspaceApi(current_user=admin, session=dbsession, config=self.app_config)
content_api = ContentApi(current_user=admin, session=dbsession, config=self.app_config)
business_workspace = workspace_api.get_one(1)
tool_folder = content_api.get_one(1, content_type=content_type_list.Any_SLUG)
test_file = content_api.create(
content_type_slug=content_type_list.File.slug,
workspace=business_workspace,
parent=tool_folder,
label="Test file",
do_save=False,
do_notify=False,
)
with new_revision(session=dbsession, tm=transaction.manager, content=test_file):
content_api.update_file_data(
test_file,
new_content=b"Test file",
new_filename="Test_file.txt",
new_mimetype="text/plain",
)
content_api.update_content(test_file, "Test_file", "<p>description</p>")
dbsession.flush()
transaction.commit()
content_id = int(test_file.content_id)
self.testapp.authorization = ("Basic", ("<EMAIL>", "<EMAIL>"))
params = {"force_download": 1}
filename = "Test_file.txt"
res = self.testapp.get(
"/api/v2/workspaces/1/files/{}/raw/{}".format(content_id, filename),
status=200,
params=params,
)
assert res.headers[
"Content-Disposition"
] == "attachment; filename=\"{}\"; filename*=UTF-8''{};".format(filename, filename)
assert res.body == b"Test file"
assert res.content_type == "text/plain"
assert res.content_length == len(b"Test file")
assert int(res.headers["Content-Length"]) == res.content_length
assert res.last_modified.second == test_file.updated.second
assert res.last_modified.minute == test_file.updated.minute
assert res.last_modified.day == test_file.updated.day
assert res.last_modified.month == test_file.updated.month
assert res.last_modified.year == test_file.updated.year
def test_api__create_file__ok__200__nominal_case(self) -> None:
"""
create one file of a content at workspace root
"""
dbsession = get_tm_session(self.session_factory, transaction.manager)
admin = dbsession.query(User).filter(User.email == "<EMAIL>").one()
workspace_api = WorkspaceApi(current_user=admin, session=dbsession, config=self.app_config)
business_workspace = workspace_api.get_one(1)
self.testapp.authorization = ("Basic", ("<EMAIL>", "<EMAIL>"))
image = create_1000px_png_test_image()
res = self.testapp.post(
"/api/v2/workspaces/{}/files".format(business_workspace.workspace_id),
upload_files=[("files", image.name, image.getvalue())],
status=200,
)
res = res.json_body
assert res["parent_id"] is None
assert res["content_type"] == "file"
assert res["is_archived"] is False
assert res["is_deleted"] is False
assert res["is_editable"] is True
assert res["workspace_id"] == business_workspace.workspace_id
assert isinstance(res["content_id"], int)
content_id = res["content_id"]
assert res["status"] == "open"
assert res["label"] == "test_image"
assert res["slug"] == "test-image"
res = self.testapp.get(
"/api/v2/workspaces/{workspace_id}/files/{content_id}".format(
workspace_id=business_workspace.workspace_id, content_id=content_id
),
status=200,
)
res = res.json_body
assert res["parent_id"] is None
assert res["content_type"] == "file"
assert res["is_archived"] is False
assert res["is_deleted"] is False
assert res["is_editable"] is True
assert res["workspace_id"] == business_workspace.workspace_id
assert isinstance(res["content_id"], int)
assert res["status"] == "open"
assert res["label"] == "test_image"
assert res["slug"] == "test-image"
assert res["author"]["user_id"] == admin.user_id
assert res["page_nb"] == 1
assert res["mimetype"] == "image/png"
def test_api__create_file__err_400__filename_already_used(self) -> None:
"""
create one file of a content but filename is already used here
"""
dbsession = get_tm_session(self.session_factory, transaction.manager)
admin = dbsession.query(User).filter(User.email == "<EMAIL>").one()
workspace_api = WorkspaceApi(current_user=admin, session=dbsession, config=self.app_config)
business_workspace = workspace_api.get_one(1)
self.testapp.authorization = ("Basic", ("<EMAIL>", "<EMAIL>"))
image = create_1000px_png_test_image()
res = self.testapp.post(
"/api/v2/workspaces/{}/files".format(business_workspace.workspace_id),
upload_files=[("files", image.name, image.getvalue())],
status=200,
)
res = res.json_body
assert res["parent_id"] is None
assert res["content_type"] == "file"
assert res["is_archived"] is False
assert res["is_deleted"] is False
assert res["is_editable"] is True
assert res["workspace_id"] == business_workspace.workspace_id
assert isinstance(res["content_id"], int)
assert res["status"] == "open"
assert res["label"] == "test_image"
assert res["slug"] == "test-image"
res = self.testapp.post(
"/api/v2/workspaces/{}/files".format(business_workspace.workspace_id),
upload_files=[("files", image.name, image.getvalue())],
status=400,
)
assert isinstance(res.json, dict)
assert "code" in res.json.keys()
assert res.json_body["code"] == ErrorCode.CONTENT_FILENAME_ALREADY_USED_IN_FOLDER
def test_api__create_file__ok__200__in_folder(self) -> None:
"""
create one file of a content in a folder
"""
dbsession = get_tm_session(self.session_factory, transaction.manager)
admin = dbsession.query(User).filter(User.email == "<EMAIL>").one()
workspace_api = WorkspaceApi(current_user=admin, session=dbsession, config=self.app_config)
content_api = ContentApi(current_user=admin, session=dbsession, config=self.app_config)
business_workspace = workspace_api.get_one(1)
folder = content_api.create(
label="test-folder",
content_type_slug=content_type_list.Folder.slug,
workspace=business_workspace,
do_save=True,
do_notify=False,
)
transaction.commit()
self.testapp.authorization = ("Basic", ("<EMAIL>", "<EMAIL>"))
params = {"parent_id": folder.content_id}
image = create_1000px_png_test_image()
res = self.testapp.post(
"/api/v2/workspaces/{}/files".format(business_workspace.workspace_id),
upload_files=[("files", image.name, image.getvalue())],
params=params,
status=200,
)
res = res.json_body
assert res["parent_id"] == folder.content_id
assert res["content_type"] == "file"
assert res["is_archived"] is False
assert res["is_deleted"] is False
assert res["is_editable"] is True
assert res["workspace_id"] == business_workspace.workspace_id
assert isinstance(res["content_id"], int)
content_id = res["content_id"]
assert res["status"] == "open"
assert res["label"] == "test_image"
assert res["slug"] == "test-image"
res = self.testapp.get(
"/api/v2/workspaces/{workspace_id}/files/{content_id}".format(
workspace_id=business_workspace.workspace_id, content_id=content_id
),
status=200,
)
res = res.json_body
assert res["parent_id"] == folder.content_id
assert res["content_type"] == "file"
assert res["is_archived"] is False
assert res["is_deleted"] is False
assert res["is_editable"] is True
assert res["workspace_id"] == business_workspace.workspace_id
assert isinstance(res["content_id"], int)
assert res["status"] == "open"
assert res["label"] == "test_image"
assert res["slug"] == "test-image"
assert res["author"]["user_id"] == admin.user_id
assert res["page_nb"] == 1
assert res["mimetype"] == "image/png"
def test_api__create_file__err__400__unallow_subcontent(self) -> None:
"""
create one file of a content but subcontent of type file unallowed here
"""
dbsession = get_tm_session(self.session_factory, transaction.manager)
admin = dbsession.query(User).filter(User.email == "<EMAIL>").one()
workspace_api = WorkspaceApi(current_user=admin, session=dbsession, config=self.app_config)
content_api = ContentApi(current_user=admin, session=dbsession, config=self.app_config)
business_workspace = workspace_api.get_one(1)
folder = content_api.create(
label="test-folder",
content_type_slug=content_type_list.Folder.slug,
workspace=business_workspace,
do_save=True,
do_notify=False,
)
with new_revision(session=dbsession, tm=transaction.manager, content=folder):
content_api.set_allowed_content(folder, [])
content_api.save(folder)
transaction.commit()
self.testapp.authorization = ("Basic", ("<EMAIL>", "<EMAIL>"))
params = {"parent_id": folder.content_id}
image = create_1000px_png_test_image()
res = self.testapp.post(
"/api/v2/workspaces/{}/files".format(business_workspace.workspace_id),
upload_files=[("files", image.name, image.getvalue())],
params=params,
status=400,
)
assert isinstance(res.json, dict)
assert "code" in res.json.keys()
assert res.json_body["code"] == ErrorCode.UNALLOWED_SUBCONTENT
def test_api__create_file__err__400__parent_not_found(self) -> None:
"""
create one file of a content but parent_id is not valid
"""
dbsession = get_tm_session(self.session_factory, transaction.manager)
admin = dbsession.query(User).filter(User.email == "<EMAIL>").one()
workspace_api = WorkspaceApi(current_user=admin, session=dbsession, config=self.app_config)
business_workspace = workspace_api.get_one(1)
self.testapp.authorization = ("Basic", ("<EMAIL>", "<EMAIL>"))
params = {"parent_id": 3000}
image = create_1000px_png_test_image()
res = self.testapp.post(
"/api/v2/workspaces/{}/files".format(business_workspace.workspace_id),
upload_files=[("files", image.name, image.getvalue())],
params=params,
status=400,
)
assert isinstance(res.json, dict)
assert "code" in res.json.keys()
assert res.json_body["code"] == ErrorCode.PARENT_NOT_FOUND
def test_api__set_file_raw__ok_200__nominal_case(self) -> None:
"""
Set | |
##
# @file NonLinearPlace.py
# @author <NAME>
# @date Jul 2018
# @brief Nonlinear placement engine to be called with parameters and placement database
#
import os
import sys
import time
import pickle
import numpy as np
import logging
logger = logging.getLogger(__name__)
import torch
import gzip
import copy
import matplotlib.pyplot as plt
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import _pickle as pickle
import dreamplace.BasicPlace as BasicPlace
import dreamplace.PlaceObj as PlaceObj
import dreamplace.NesterovAcceleratedGradientOptimizer as NesterovAcceleratedGradientOptimizer
import dreamplace.EvalMetrics as EvalMetrics
import pdb
class NonLinearPlace(BasicPlace.BasicPlace):
"""
@brief Nonlinear placement engine.
It takes parameters and placement database and runs placement flow.
"""
def __init__(self, params, placedb):
"""
@brief initialization.
@param params parameters
@param placedb placement database
"""
super(NonLinearPlace, self).__init__(params, placedb)
def __call__(self, params, placedb):
"""
@brief Top API to solve placement.
@param params parameters
@param placedb placement database
"""
iteration = 0
all_metrics = []
# global placement
if params.global_place_flag:
assert len(placedb.regions) == 0, "FENCE REGIONS are not supported in global placement yet"
# global placement may run in multiple stages according to user specification
for global_place_params in params.global_place_stages:
# we formulate each stage as a 3-nested optimization problem
# f_gamma(g_density(h(x) ; density weight) ; gamma)
# Lgamma Llambda Lsub
# When optimizing an inner problem, the outer parameters are fixed.
# This is a generalization to the eplace/RePlAce approach
# As global placement may easily diverge, we record the position of best overflow
best_metric = [None]
best_pos = [None]
if params.gpu:
torch.cuda.synchronize()
tt = time.time()
# construct model and optimizer
density_weight = 0.0
# construct placement model
model = PlaceObj.PlaceObj(density_weight, params, placedb, self.data_collections, self.op_collections, global_place_params).to(self.data_collections.pos[0].device)
optimizer_name = global_place_params["optimizer"]
# determine optimizer
if optimizer_name.lower() == "adam":
optimizer = torch.optim.Adam(self.parameters(), lr=0)
elif optimizer_name.lower() == "sgd":
optimizer = torch.optim.SGD(self.parameters(), lr=0)
elif optimizer_name.lower() == "sgd_momentum":
optimizer = torch.optim.SGD(self.parameters(), lr=0, momentum=0.9, nesterov=False)
elif optimizer_name.lower() == "sgd_nesterov":
optimizer = torch.optim.SGD(self.parameters(), lr=0, momentum=0.9, nesterov=True)
elif optimizer_name.lower() == "nesterov":
optimizer = NesterovAcceleratedGradientOptimizer.NesterovAcceleratedGradientOptimizer(
self.parameters(),
lr=0,
obj_and_grad_fn=model.obj_and_grad_fn,
constraint_fn=self.op_collections.move_boundary_op,
)
else:
assert 0, "unknown optimizer %s" % (optimizer_name)
logger.info("use %s optimizer" % (optimizer_name))
model.train()
# defining evaluation ops
eval_ops = {
#"wirelength" : self.op_collections.wirelength_op,
#"density" : self.op_collections.density_op,
#"objective" : model.obj_fn,
"hpwl": self.op_collections.hpwl_op,
"overflow": self.op_collections.density_overflow_op
}
if params.routability_opt_flag:
eval_ops.update({
'route_utilization': self.op_collections.route_utilization_map_op,
'pin_utilization': self.op_collections.pin_utilization_map_op
})
# a function to initialize learning rate
def initialize_learning_rate(pos):
learning_rate = model.estimate_initial_learning_rate(pos, global_place_params["learning_rate"])
# update learning rate
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate.data
if iteration == 0:
if params.gp_noise_ratio > 0.0:
logger.info("add %g%% noise" % (params.gp_noise_ratio * 100))
model.op_collections.noise_op(model.data_collections.pos[0], params.gp_noise_ratio)
initialize_learning_rate(model.data_collections.pos[0])
# the state must be saved after setting learning rate
initial_state = copy.deepcopy(optimizer.state_dict())
if params.gpu:
torch.cuda.synchronize()
logger.info("%s initialization takes %g seconds" % (optimizer_name, (time.time() - tt)))
# as nesterov requires line search, we cannot follow the convention of other solvers
if optimizer_name.lower() in {"sgd", "adam", "sgd_momentum", "sgd_nesterov"}:
model.obj_and_grad_fn(model.data_collections.pos[0])
elif optimizer_name.lower() != "nesterov":
assert 0, "unsupported optimizer %s" % (optimizer_name)
# stopping criteria
def Lgamma_stop_criterion(Lgamma_step, metrics):
with torch.no_grad():
if len(metrics) > 1:
cur_metric = metrics[-1][-1][-1]
prev_metric = metrics[-2][-1][-1]
if Lgamma_step > 100 and ((cur_metric.overflow < params.stop_overflow and cur_metric.hpwl > prev_metric.hpwl) or cur_metric.max_density < params.target_density):
logger.debug(
"Lgamma stopping criteria: %d > 100 and (( %g < 0.1 and %g > %g ) or %g < 1.0)"
% (Lgamma_step, cur_metric.overflow,
cur_metric.hpwl, prev_metric.hpwl,
cur_metric.max_density))
return True
# a heuristic to detect divergence and stop early
if len(metrics) > 50:
cur_metric = metrics[-1][-1][-1]
prev_metric = metrics[-50][-1][-1]
# record HPWL and overflow increase, and check divergence
if cur_metric.overflow > prev_metric.overflow and cur_metric.hpwl > best_metric[0].hpwl * 2:
return True
return False
def Llambda_stop_criterion(Lgamma_step, Llambda_density_weight_step, metrics):
with torch.no_grad():
if len(metrics) > 1:
cur_metric = metrics[-1][-1]
prev_metric = metrics[-2][-1]
if (cur_metric.overflow < params.stop_overflow and cur_metric.hpwl > prev_metric.hpwl) or cur_metric.max_density < 1.0:
logger.debug(
"Llambda stopping criteria: %d and (( %g < 0.1 and %g > %g ) or %g < 1.0)"
% (Llambda_density_weight_step,
cur_metric.overflow, cur_metric.hpwl,
prev_metric.hpwl, cur_metric.max_density))
return True
return False
# use a moving average window for stopping criteria, for an example window of 3
# 0, 1, 2, 3, 4, 5, 6
# window2
# window1
moving_avg_window = max(min(model.Lsub_iteration // 2, 3), 1)
def Lsub_stop_criterion(Lgamma_step, Llambda_density_weight_step, Lsub_step, metrics):
with torch.no_grad():
if len(metrics) >= moving_avg_window * 2:
cur_avg_obj = 0
prev_avg_obj = 0
for i in range(moving_avg_window):
cur_avg_obj += metrics[-1 - i].objective
prev_avg_obj += metrics[-1 - moving_avg_window - i].objective
cur_avg_obj /= moving_avg_window
prev_avg_obj /= moving_avg_window
threshold = 0.999
if cur_avg_obj >= prev_avg_obj * threshold:
logger.debug(
"Lsub stopping criteria: %d and %g > %g * %g"
% (Lsub_step, cur_avg_obj, prev_avg_obj,
threshold))
return True
return False
def one_descent_step(Lgamma_step, Llambda_density_weight_step, Lsub_step, iteration, metrics):
t0 = time.time()
# metric for this iteration
cur_metric = EvalMetrics.EvalMetrics(iteration, (Lgamma_step, Llambda_density_weight_step, Lsub_step))
cur_metric.gamma = model.gamma.data
cur_metric.density_weight = model.density_weight.data
metrics.append(cur_metric)
pos = model.data_collections.pos[0]
# move any out-of-bound cell back to placement region
self.op_collections.move_boundary_op(pos)
if torch.eq(model.density_weight, 0.0):
model.initialize_density_weight(params, placedb)
logger.info("density_weight = %.6E" % (model.density_weight.data))
optimizer.zero_grad()
# t1 = time.time()
cur_metric.evaluate(placedb, eval_ops, pos)
model.overflow = cur_metric.overflow.data.clone()
#logger.debug("evaluation %.3f ms" % ((time.time()-t1)*1000))
#t2 = time.time()
# as nesterov requires line search, we cannot follow the convention of other solvers
if optimizer_name.lower() in ["sgd", "adam", "sgd_momentum", "sgd_nesterov"]:
obj, grad = model.obj_and_grad_fn(pos)
cur_metric.objective = obj.data.clone()
elif optimizer_name.lower() != "nesterov":
assert 0, "unsupported optimizer %s" % (optimizer_name)
# plot placement
if params.plot_flag and iteration % 100 == 0:
cur_pos = self.pos[0].data.clone().cpu().numpy()
self.plot(params, placedb, iteration, cur_pos)
t3 = time.time()
optimizer.step()
logger.info("optimizer step %.3f ms" % ((time.time() - t3) * 1000))
# nesterov has already computed the objective of the next step
if optimizer_name.lower() == "nesterov":
cur_metric.objective = optimizer.param_groups[0]['obj_k_1'][0].data.clone()
# actually reports the metric before step
logger.info(cur_metric)
# record the best overflow
if best_metric[0] is None or best_metric[0].overflow > cur_metric.overflow:
best_metric[0] = cur_metric
if best_pos[0] is None:
best_pos[0] = self.pos[0].data.clone()
else:
best_pos[0].data.copy_(self.pos[0].data)
logger.info("full step %.3f ms" % ((time.time() - t0) * 1000))
Lgamma_metrics = all_metrics
if params.routability_opt_flag:
adjust_area_flag = True
adjust_route_area_flag = params.adjust_nctugr_area_flag or params.adjust_rudy_area_flag
adjust_pin_area_flag = params.adjust_pin_area_flag
num_area_adjust = 0
Llambda_flat_iteration = 0
for Lgamma_step in range(model.Lgamma_iteration):
Lgamma_metrics.append([])
Llambda_metrics = Lgamma_metrics[-1]
for Llambda_density_weight_step in range(model.Llambda_density_weight_iteration):
Llambda_metrics.append([])
Lsub_metrics = Llambda_metrics[-1]
for Lsub_step in range(model.Lsub_iteration):
one_descent_step(Lgamma_step, Llambda_density_weight_step, Lsub_step, iteration, Lsub_metrics)
iteration += 1
# stopping criteria
if Lsub_stop_criterion(Lgamma_step, Llambda_density_weight_step, Lsub_step, Lsub_metrics):
break
Llambda_flat_iteration += 1
# update density weight
if Llambda_flat_iteration > 1:
model.op_collections.update_density_weight_op(
Llambda_metrics[-1][-1],
Llambda_metrics[-2][-1] if len(Llambda_metrics) > 1 else Lgamma_metrics[-2][-1][-1],
Llambda_flat_iteration)
#logger.debug("update density weight %.3f ms" % ((time.time()-t2)*1000))
if Llambda_stop_criterion(Lgamma_step, Llambda_density_weight_step, Llambda_metrics):
break
# for routability optimization
if params.routability_opt_flag and num_area_adjust < params.max_num_area_adjust and Llambda_metrics[-1][-1].overflow < params.node_area_adjust_overflow:
content = "routability optimization round %d: adjust area flags = (%d, %d, %d)" % (
num_area_adjust, adjust_area_flag,
adjust_route_area_flag, adjust_pin_area_flag)
pos = model.data_collections.pos[0]
#cur_metric = EvalMetrics.EvalMetrics(iteration)
#cur_metric.evaluate(placedb, {
# "hpwl" : self.op_collections.hpwl_op,
# "overflow" : self.op_collections.density_overflow_op,
# "route_utilization" : self.op_collections.route_utilization_map_op,
# "pin_utilization" : self.op_collections.pin_utilization_map_op,
# },
# pos)
#logger.info(cur_metric)
route_utilization_map = None
pin_utilization_map = None
if adjust_route_area_flag:
if params.adjust_nctugr_area_flag:
route_utilization_map = model.op_collections.nctugr_congestion_map_op(pos)
else:
route_utilization_map = model.op_collections.route_utilization_map_op(pos)
if params.plot_flag:
path = "%s/%s" % (params.result_dir, params.design_name())
figname = "%s/plot/route%d.png" % (path, num_area_adjust)
os.system("mkdir -p %s" % (os.path.dirname(figname)))
plt.imsave(figname, route_utilization_map.data.cpu().numpy().T, origin='lower')
if adjust_pin_area_flag:
pin_utilization_map = model.op_collections.pin_utilization_map_op(pos)
if params.plot_flag:
path = "%s/%s" % (params.result_dir, params.design_name())
figname = "%s/plot/pin%d.png" % (path, num_area_adjust)
os.system("mkdir -p %s" % (os.path.dirname(figname)))
plt.imsave(figname, pin_utilization_map.data.cpu().numpy().T, origin='lower')
adjust_area_flag, adjust_route_area_flag, adjust_pin_area_flag = model.op_collections.adjust_node_area_op(
pos,
route_utilization_map,
pin_utilization_map
)
content += " -> (%d, %d, %d)" % (adjust_area_flag, adjust_route_area_flag, adjust_pin_area_flag)
logger.info(content)
if adjust_area_flag:
num_area_adjust += 1
# restart Llambda
model.op_collections.density_op.reset()
model.op_collections.density_overflow_op.reset()
model.op_collections.pin_utilization_map_op.reset()
model.initialize_density_weight(params, placedb)
model.density_weight.mul_(0.1 / params.density_weight)
logger.info("density_weight = %.6E" % (model.density_weight.data))
# load state to restart the optimizer
optimizer.load_state_dict(initial_state)
# must after loading the state
initialize_learning_rate(pos)
# increase iterations of the sub problem to slow down the search
model.Lsub_iteration = model.routability_Lsub_iteration
# reset best metric
best_metric[0] = None
best_pos[0] = None
#cur_metric = EvalMetrics.EvalMetrics(iteration)
#cur_metric.evaluate(placedb, {
# "hpwl" : self.op_collections.hpwl_op,
# "overflow" : self.op_collections.density_overflow_op,
# "route_utilization" : self.op_collections.route_utilization_map_op,
# "pin_utilization" : self.op_collections.pin_utilization_map_op,
# },
# pos)
#logger.info(cur_metric)
break
# gradually reduce gamma to tradeoff smoothness and accuracy
model.op_collections.update_gamma_op(Lgamma_step, Llambda_metrics[-1][-1].overflow)
model.op_collections.precondition_op.set_overflow(Llambda_metrics[-1][-1].overflow)
if Lgamma_stop_criterion(Lgamma_step, Lgamma_metrics):
break
# update learning rate
if optimizer_name.lower() in ["sgd", "adam", "sgd_momentum", "sgd_nesterov", "cg"]:
| |
<filename>detector.py
"""
detector
Copyright (c) 2020 <NAME>
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
"""
import os
import sys
import json
# import datetime # not really useful so remove soon pls
import numpy as np
import skimage.draw
from skimage.filters import unsharp_mask
import imgaug # should augment this improt as well haha
import time
# from PIL import Image
# Root directory of project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
sys.path.append(os.path.join(os.path.abspath('.'), 'ColabESRGAN/'))
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# sys.path.insert(1, 'samples/hentai/')
# from hentai import HentaiConfig
from cv2 import imshow, waitKey, multiply, add, erode, VideoCapture, Canny, cvtColor,COLOR_GRAY2RGB, imdecode, CAP_PROP_FRAME_HEIGHT, CAP_PROP_FRAME_WIDTH, CAP_PROP_FPS, VideoWriter, VideoWriter_fourcc, resize, INTER_LANCZOS4, INTER_AREA, GaussianBlur, filter2D, bilateralFilter, blur
import ColabESRGAN.test
from green_mask_project_mosaic_resolution import get_mosaic_res
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
# Path to trained weights
WEIGHTS_PATH = os.path.join(ROOT_DIR, "weights.h5")
# taking this from hentai to avoid import
class HentaiConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "hentai"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 + 1
# Number of training steps per epoch, equal to dataset train size
STEPS_PER_EPOCH = 1490
# Skip detections with < 75% confidence
DETECTION_MIN_CONFIDENCE = 0.75
# Detector class. Handles detection and potentially esr decensoring. For now, will house an ESR instance at startup
class Detector():
# at startup, dont create model yet
def __init__(self, weights_path):
class InferenceConfig(HentaiConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
self.config = InferenceConfig()
self.weights_path = weights_path
# counts how many non-png images, if >1 then warn user
self.dcp_compat = 0
# Create model, but dont load weights yet
self.model = modellib.MaskRCNN(mode="inference", config=self.config,
model_dir=DEFAULT_LOGS_DIR)
try:
self.out_path = os.path.join(os.path.abspath('.'), "ESR_temp/ESR_out/")
self.out_path2 = os.path.join(os.path.abspath('.'), "ESR_temp/ESR_out2/")
self.temp_path = os.path.join(os.path.abspath('.'), "ESR_temp/temp/")
self.temp_path2 = os.path.join(os.path.abspath('.'), "ESR_temp/temp2/")
self.fin_path = os.path.join(os.path.abspath('.'), "ESR_output/")
except:
print("ERROR in Detector init: Cannot find ESR_out or some dir within.")
return
# Create esrgan instance for detector instance
try:
self.esr_model_path = os.path.join(os.path.abspath('.'), "4x_FatalPixels_340000_G.pth")
except:
print("ERROR in Detector init: ESRGAN model not found, make sure you have 4x_FatalPixels_340000_G.pth in this directory")
return
# Scan for cuda compatible GPU for ESRGAN. Mask-RCNN *should* automatically use a GPU if available.
self.hardware = 'cpu'
if self.model.check_cuda_gpu()==True:
print("CUDA-compatible GPU located!")
self.hardware = 'cuda'
# destroy model. Will re init during weight load.
self.model = []
# Clean out temp working images from all directories in ESR_temp. Code from https://stackoverflow.com/questions/185936/how-to-delete-the-contents-of-a-folder
def clean_work_dirs(self):
print("Cleaning work dirs...")
folders = [self.out_path, self.out_path2, self.temp_path, self.temp_path2]
for folder in folders:
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('ERROR in clean_work_dirs: Failed to delete %s. Reason: %s' % (file_path, e))
# Make sure this is called before using model weights
def load_weights(self):
print('Creating model, Loading weights...', end=' ')
self.model = modellib.MaskRCNN(mode="inference", config=self.config,
model_dir=DEFAULT_LOGS_DIR)
try:
self.model.load_weights(self.weights_path, by_name=True)
print("Weights loaded")
except Exception as e:
print("ERROR in load_weights: Model Load. Ensure you have your weights.h5 file!", end=' ')
print(e)
"""Apply cover over image. Based off of Mask-RCNN Balloon color splash function
image: RGB image [height, width, 3]
mask: instance segmentation mask [height, width, instance count]
Returns result covered image.
"""
def apply_cover(self, image, mask, dilation):
# Copy color pixels from the original color image where mask is set
green = np.zeros([image.shape[0], image.shape[1], image.shape[2]], dtype=np.uint8)
green[:,:] = [0, 255, 0]
if mask.shape[-1] > 0:
# We're treating all instances as one, so collapse the mask into one layer
mask = (np.sum(mask, -1, keepdims=True) < 1)
# dilate mask to ensure proper coverage
mimg = mask.astype('uint8')*255
kernel = np.ones((dilation,dilation), np.uint8)
mimg = erode(src=mask.astype('uint8'), kernel=kernel, iterations=1) #
# dilation returns image with channels stripped (?!?). Reconstruct image channels
mask_img = np.zeros([mask.shape[0], mask.shape[1],3]).astype('bool')
mask_img[:,:,0] = mimg.astype('bool')
mask_img[:,:,1] = mimg.astype('bool')
mask_img[:,:,2] = mimg.astype('bool')
cover = np.where(mask_img.astype('bool'), image, green).astype(np.uint8)
else:
# error case, return image
cover = image
return cover, mask
# Similar to above function, except it places the decensored image over the original image.
def splice(self, image, mask, gan_out):
if mask.shape[-1] > 0:
mask = (np.sum(mask, -1, keepdims=True) < 1)
mask = 1 - mask # invert mask for blending
mask = mask.astype('uint8')*255
mask = GaussianBlur(mask, (29,29), 0)
# mask_img = np.zeros([mask.shape[0], mask.shape[1],3]).astype('uint8')
# for i in range(3):
# mask_img[:,:,i] = mask
mask_img = mask.astype(float) / 255
# proper blending courtesy of https://www.learnopencv.com/alpha-blending-using-opencv-cpp-python/
fg_o = gan_out.astype(float)
bg_o = image.astype(float)
fg = np.zeros([mask.shape[0], mask.shape[1],3]).astype(float)
bg = np.zeros([mask.shape[0], mask.shape[1],3]).astype(float) # create foreground and background images with proper rgb channels
cover = image
for i in range(3):
# Multiply the fg with the mask matte
fg[:,:,i] = multiply(mask_img, fg_o[:,:,i])
# Multiply the bg with ( 1 - mask_img )
bg[:,:,i] = multiply(1.0 - mask_img, bg_o[:,:,i])
# Add the masked fg and bg.
cover[:,:,i] = add(fg[:,:,i], bg[:,:,i])
else:
#error case, return image
cover=image
return cover
# Return number of jpgs that were not processed
def get_non_png(self):
return self.dcp_compat
# function to handle all of the esrgan stuff
def resize_GAN(self, img_path, img_name, is_video=False):
# non-video, standard image
if is_video is False:
# Attempt to obtain image
try:
image = skimage.io.imread(img_path) # problems with strange shapes
if image.ndim != 3:
image = skimage.color.gray2rgb(image) # convert to rgb if greyscale
if image.shape[-1] == 4:
image = image[..., :3] # strip alpha channel
except Exception as e:
print("ERROR in resize_GAN: Image read. Skipping. image_path=", img_path)
print(e)
return
# Calculate mosaic granularity.
granularity = get_mosaic_res(np.array(image))
if granularity < 10: #TODO: implement adaptive granularity by weighted changes
print("Granularity of image was less than threshold at ", granularity)
granularity = 10
# Resize image down
try:
mini_img = resize(image, (int(image.shape[1]/granularity), int(image.shape[0]/granularity)), interpolation=INTER_AREA) # TODO: experiment with interpolations
# After resize, run bilateral filter to keep colors coherent
file_name = self.temp_path + img_name[:-4] + '.png'
skimage.io.imsave(file_name, mini_img)
except Exception as e:
print("ERROR in resize_GAN: resize. Skipping. image_path=",img_path, e)
return
# Now run ESRGAN inference
gan_img_path = self.out_path + img_name[:-4] + '.png'
self.esrgan_instance.run_esrgan(test_img_folder=file_name, out_filename=gan_img_path, mosaic_res=granularity)
else:
try:
video_path = img_path
vcapture = VideoCapture(video_path)
width = int(vcapture.get(CAP_PROP_FRAME_WIDTH))
height = int(vcapture.get(CAP_PROP_FRAME_HEIGHT))
fps = vcapture.get(CAP_PROP_FPS)
print("Detected fps:", fps)
except Exception as e:
print("ERROR in resize_GAN: video read and init.", e)
return
count = 0
success = True
print("Video read complete. Starting video phase 1 : resize + GAN")
while success:
print("frame: ", count)
# Read next image
success, image = vcapture.read()
if success:
# OpenCV returns images as BGR, convert to RGB
image = image[..., ::-1]
granularity = get_mosaic_res(np.array(image)) # pass np array of image as ref to gmp function
if granularity < 10: #TODO: implement adaptive granularity by weighted changes
print('Granularity was less than threshold at ',granularity)
granularity = 10
# initial resize frame
mini_img = resize(image, (int(image.shape[1]/granularity), int(image.shape[0]/granularity)), interpolation=INTER_AREA) # downscale to 1/16
# bil2 = bilateralFilter(mini_img, 3, 70, 70)
file_name = self.temp_path + img_name[:-4] + '.png' # need to save a sequence of pngs for TGAN operation
skimage.io.imsave(file_name, mini_img) # save resized images to temp path. Not used in main ESRGAN function below.
# run ESRGAN algorithms
gan_img_path = self.out_path + img_name[:-4] + str(count).zfill(6) + '.png'
self.esrgan_instance.run_esrgan(test_img_folder=file_name, out_filename=gan_img_path, mosaic_res=granularity)
gan_image = skimage.io.imread(gan_img_path)
gan_image = resize(gan_image, (image.shape[1], image.shape[0]))
count += 1
print('Video: Phase 1 complete!')
# Runs hent-AI detection and splice. Mosaic only.
def ESRGAN(self, img_path, img_name, is_video=False):
# Image reads
if is_video == False:
try:
image = skimage.io.imread(img_path) # problems with strange shapes
if image.ndim != 3:
image = skimage.color.gray2rgb(image) # convert to rgb if greyscale
if image.shape[-1] == 4:
image = image[..., :3] # strip alpha channel
except Exception as e:
print("ERROR in detector.ESRGAN: Image read. Skipping. image_path=", img_path)
print(e)
return
# Run | |
<reponame>Unathi-Skosana/ptycho
"""
File: simulator.py
Author: <NAME>
Email: <EMAIL>
Github: https://github.com/<NAME>
Description:
"""
import numpy as np
from skimage.restoration import unwrap_phase
from skimage.exposure import rescale_intensity
from joblib import Parallel, delayed
from scipy.fft import fft2, ifft2, fftshift, ifftshift
from itertools import chain, product
from utils.filters import gau_kern, circ_mask
from utils.patterns import radial_gradient
from skimage.color import rgb2gray
from skimage.util import invert
class PytchoSimulatorBase():
"""
The base class of a ptychography simulator, holding the various
parameters of the simulation.
"""
def __init__(self, alpha, beta, probe,
start, shift, rc, iterations):
""" Initialize class instance """
self._alpha = alpha
self._beta = beta
self._probe = probe
self._start = start
self._shift = shift
self._rc = rc
self._iterations = iterations
self.compute_illu_pos()
@property
def alpha(self):
""" Get parameter alpha """
return self._alpha
@alpha.setter
def alpha(self, _alpha):
""" Set the value of alpha """
self._alpha = _alpha
@property
def beta(self):
""" Get parameter beta """
return self._beta
@beta.setter
def beta(self, _beta):
""" Set the value of beta """
self._beta = _beta
@property
def probe(self):
""" Get probe size """
return self._probe
@probe.setter
def probe(self, _probe):
""" Set value of probe """
self._probe = _probe
@property
def shift(self):
""" Get probe shift """
return self._shift
@shift.setter
def shift(self, _shift):
""" Set value of probe shift """
self._shift = _shift
@property
def start(self):
""" Get start position of probe """
return self._start
@start.setter
def start(self, _start):
""" Set the value of start position """
self._start = _start
@property
def rc(self):
""" Get rows and columns of probe positions """
return self._rc
@rc.setter
def rc(self, _rc):
""" Set the value of rows and columns of probe positions """
self._rc = _rc
@property
def iterations(self):
""" Get number of iterations """
return self._iterations
@iterations.setter
def iterations(self, _iterations):
""" Set the value of iterations """
self._iterations = _iterations
def compute_illu_pos(self):
""" Compute the illumination positions of the probe on the image"""
s_x, s_y = self._start
rows, cols = self._rc
x = np.arange(s_x, s_x + rows * self._shift, self._shift)
y = np.arange(s_y, s_y + cols * self._shift, self._shift)
self._illu_pos = np.array(list(product(x, y)))
class PytchoSimulator(PytchoSimulatorBase):
"""
A class for simulating a ptychographic image reconstructions
"""
def __init__(self, alpha=1., beta=1., probe=50,
start=(2, 2), shift=20, rc=(11, 11),
iterations=200):
""" Initialize class instance """
super().__init__(alpha=alpha, beta=beta,
probe=probe, start=start,
shift=shift, rc=rc,
iterations=iterations)
def diffract(self, obj, illu_func, mode='', **kwargs):
"""
Computes diffraction patterns of the object O ( phase & amplitude )
probed by a probe P (phase & amplitude) across predetermined illuminations
positions on the object.
Args:
obj: Object
illu_func: Illumination function
illu_pos: Illumination positions for the probe across the object O
**kwargs: Arbitrary keyword arguments.
Returns:
List of diffraction patterns
"""
'''
allowedtypes = {
'poisson': 'poisson_values',
'random': 'random_values'
}
kwdefaults = {
'mean': 0.,
'amount': 00
}
allowedkwargs = {
'poisson': ['mean'],
'random': ['amount'],
}
for key in kwargs:
if key not in allowedkwargs[allowedtypes[mode]]:
raise ValueError('%s keyword not in allowed keywords %s' %
(key, allowedkwargs[allowedtypes[mode]]))
'''
# diffraction patterns
diff_patterns = np.zeros((np.prod(self.rc), self._probe, self._probe),
dtype=np.complex64)
# Routine for parallelization, no need to worry about racing conditions
# as the diffraction patterns are computed independently of one another
def diff(k):
dx, dy = 0, 0
if mode == 'position':
positions = kwargs.get('positions', range(5, 100))
if k in positions:
dx = np.random.randint(low=-4, high=4)
dy = np.random.randint(low=-4, high=4)
x_k, y_k = self._illu_pos[k]
i = np.int(np.round((x_k - self._start[0]) / self._shift))
j = np.int(np.round((y_k - self._start[1]) / self._shift))
ext_wave = obj[y_k + dy:y_k + dy + self._probe,
x_k + dx:x_k + dx + self._probe] * illu_func
ext_diff = np.abs(fftshift(fft2(ext_wave)))
if mode == 'poisson':
mu = kwargs.get('mean', 1e5)
ext_inten = ext_diff ** 2
fac = mu / np.sum(ext_inten)
ext_inten_noisy = np.random.poisson(ext_inten * fac) / fac
ext_diff = np.sqrt(ext_inten_noisy)
if mode == 'random':
mu = kwargs.get('mean', 1e6)
ext_inten = ext_diff ** 2
fac = mu / np.sum(ext_inten)
ext_inten_noisy = np.random.poisson(ext_inten * fac) / fac
ext_diff = np.sqrt(ext_inten_noisy)
v = kwargs.get('amount', 0.05)
if not 0 <= v <= 1.0:
raise ValueError('Mean must be between 0 and 1.0 for random \
noise.')
# bottleneck
def f(col):
noisy_col = list(map(lambda I: I +
np.random.uniform(low=-v * I, high=v * I),
col))
return np.array(noisy_col)
noisy_vec = np.fromiter(chain.from_iterable(f(i) for i in ext_diff),
dtype=ext_diff.dtype)
ext_diff = np.reshape(noisy_vec, ext_diff.shape)
diff_patterns[i * self.rc[0] + j] = ext_diff
# Parallel diffraction pattern calculation
Parallel(n_jobs=8, prefer="threads")(
delayed(diff)(k) for k in range(np.prod(self._rc)))
return diff_patterns
def epie(self, obj, diff_patterns, **kwargs):
"""
Args:
obj: Object
diff_patterns: Diffraction pattern
illu_pos: Illumination positions for the probe across the object O
**kwargs: Arbitrary keyword arguments.
Returns:
Estimated object, estimated probe, root mean square error and
sum of least squares error of the estimate
"""
# parameters
hold = kwargs.get('hold', 1)
err_ival = kwargs.get('err_ival', 1)
permute = kwargs.get('permute', False)
# loop temp variables
err_i = 0
err_n = int(np.ceil(self._iterations / err_ival))
half_rc = np.prod(self._rc) // 2
# diffraction
idx = range(np.prod(self._rc))
# object shape
height, width = obj.shape
# object estimation initial guess
obj_est = np.zeros((height, width), dtype=np.complex64)
# illumination function initial guess
illu_func_est = np.ones((self._probe, self._probe))
# initialization for error
R_factor = np.zeros(err_n)
# holder variable for the estimated object after some iterations
obj_est_n = np.zeros((err_n, height, width), dtype=np.complex64)
k = 0
diff_pat_sum = np.sum(np.abs(diff_patterns[half_rc])**2)
MN = np.prod(diff_patterns[0].shape)
while k < self._iterations:
ext_waves = []
ext_wave_diffs = []
if permute:
idx = np.random.permutation(idx)
for i in idx:
x_i, y_i = self._illu_pos[i]
x_loc = np.int(np.round((x_i - self._start[0]) / self._shift))
y_loc = np.int(np.round((y_i - self._start[1]) / self._shift))
# steps 1 - 7 from doi:10.1016/j.ultramic.2004.11.006
obj_g = obj_est[y_i:y_i+self._probe, x_i:x_i+self._probe]
obj_g_cpy = np.copy(obj_g)
ext_wave_g = obj_g * illu_func_est
ext_diff_g = fftshift(fft2(ext_wave_g))
ext_diff_c = diff_patterns[x_loc * self.rc[0] + y_loc] \
* np.exp(1j * np.angle(ext_diff_g))
ext_wave_c = ifft2(ifftshift(ext_diff_c))
if k >= hold:
# probe power correction
illu_func_est = illu_func_est * np.sqrt(diff_pat_sum / (MN * np.sum(np.abs(illu_func_est)**2)))
ext_wave_upd = obj_g + (ext_wave_c - ext_wave_g) \
* self._alpha * illu_func_est.conj() \
/ np.max(np.abs(illu_func_est))**2
obj_est[y_i:y_i+self._probe, x_i:x_i+self._probe] = ext_wave_upd
ext_wave_diffs.append(diff_patterns[x_loc * self.rc[0] + y_loc])
ext_waves.append(ext_diff_g)
if k >= hold:
illu_func_est = illu_func_est + (ext_wave_c - ext_wave_g) \
* self._beta * obj_g_cpy.conj() \
/ np.max(np.abs(obj_g_cpy))**2
if k % err_ival == 0:
ext_waves = np.array(ext_waves)
ext_wave_diffs = np.array(ext_wave_diffs)
numerator = np.abs(ext_wave_diffs - np.abs(ext_waves))
denominator = np.abs(ext_wave_diffs)
R_factor[err_i] = np.sum(np.sum(numerator)) / np.sum(np.sum(denominator))
obj_est_n[err_i] = obj_est
err_i += 1
k += 1
def gamma(obj_est_n):
g_fac = np.sum(obj * obj_est_n.conj()) \
/ np.sum(np.abs(obj_est_n)**2)
return np.sum(np.abs(obj - g_fac * obj_est_n)**2) \
/ np.sum(np.abs(obj)**2)
RMS = np.array(list(map(gamma, obj_est_n)))
return obj_est, illu_func_est, RMS, R_factor
def pie(self, obj, illu_func, diff_patterns, **kwargs):
"""
Args:
obj: Object
illu_func: Illumination function
illu_pos: Illumination positions for the probe across the object O
diff_patterns: Diffraction pattern
**kwargs: Arbitrary keyword arguments.
Returns:
Estimated object, root mean square error and sum of least squares error
of the estimate
"""
# parameters
err_ival = kwargs.get('err_ival', 4)
permute = kwargs.get('permute', False)
# loop temp variables
err_i = 0
err_n = int(np.ceil(self._iterations / err_ival))
half_rc = np.prod(self._rc) // 2
# diffraction
idx = range(np.prod(self.rc))
# object shape
height, width = obj.shape
# object estimation initial guess
obj_est = np.zeros((height, width), dtype=np.complex64)
# initialization R_factor
R_factor = np.zeros(err_n)
# holder variable for the estimated object after some iterations
obj_est_n = np.zeros((err_n, height, width), dtype=np.complex64)
gau = gau_kern(self._probe, self.probe / np.sqrt(8 * np.log(2)),
normalize=False)
k = 0
while k < self._iterations:
ext_waves = []
ext_wave_diffs = []
if permute:
idx = np.random.permutation(idx)
for i in idx:
x_i, y_i = self._illu_pos[i]
x_loc = np.int(np.round((x_i - self._start[0]) / self._shift))
y_loc = np.int(np.round((y_i - self._start[1]) / self._shift))
# steps 1 - 7 from doi:10.1016/j.ultramic.2004.11.006
obj_g = obj_est[y_i:y_i+self._probe, x_i:x_i+self._probe]
ext_wave_g = obj_g * illu_func
ext_diff_g = fftshift(fft2(ext_wave_g))
ext_diff_c = diff_patterns[x_loc * self._rc[0] + y_loc] * \
np.exp(1j * np.angle(ext_diff_g))
ext_wave_c = ifft2(ifftshift(ext_diff_c))
ext_wave_upd = obj_g + (ext_wave_c - ext_wave_g) \
* np.abs(illu_func) * illu_func.conj() \
/ (np.max(np.abs(illu_func)) * \
(np.abs(illu_func)**2 + \
self._alpha * np.max(np.abs(illu_func))**2))
obj_est[y_i:y_i+self._probe, x_i:x_i+self._probe] = ext_wave_upd
ext_wave_diffs.append(diff_patterns[x_loc * self.rc[0] + y_loc])
ext_waves.append(ext_diff_g)
if k % err_ival == 0:
ext_waves = np.array(ext_waves)
ext_wave_diffs = np.array(ext_wave_diffs)
numerator = np.abs(ext_wave_diffs - np.abs(ext_waves))
denominator = np.abs(ext_wave_diffs)
R_factor[err_i] = np.sum(np.sum(numerator)) | |
"target_lane": 1,
"distance_reward": -0.1,
"distance_merged_vehicle_reward": 0,
"distance_reward_type": "min",
"successful_merging_reward": 5,
"continuous_mission_reward": True,
"cooperative_flag": True,
"sympathy_flag": True,
"cooperative_reward": 0.9,
# True : after merging will keep receiving the reward, False: just received the reward once
}
}
def default_config_exit(self) -> dict:
"""
:return: a configuration dict
"""
return {
'scenario': {
'scenario_number': 3,
'road_type': "road_exit",
# 1-highway, 2-road_closed , 3-road_merge , 4-road_exit, 5-test Road types should match with is vehicle_type 1,2,3
# for merging road
'lane_count_interval': [1, 4], # random number of lane range
'random_offset': [-5, 5], # offset values for before, converging, merge -+
'before_merging': 100,
'randomize_before': False, # random before road size
# distance before converging, converging is the start of the lane with slope
'converging_merging': 200,
'randomize_converging': False, # random converging road size
# distance from converging to merge, merge start when the slope lane ends
'during_merging': 110, # distance of the merging lane, paralles to highway
'randomize_merge': False, # random merge road size
'random_lane_count': False, # random number of lane
'after_merging': 1100, # distance of the highway after that
# for exit road
'before_exit': 100,
'converging_exit': 50,
'taking_exit': 40,
'during_exit': 100,
'after_exit': 1100,
'randomize_vehicles': True, # if true vehicles will be randomize based on random_offset_vehicles values
'random_offset_vehicles': [-5, 5],
'random_controlled_vehicle': False,
# will chose controlled_vehicle based on prob_of_controlled_vehicle, override controlled_vehicle
'total_number_of_vehicles': 13,
# will be the total number of vehicles in the scenario, AV or cruising will be chosen based on the prob, overide vehicle_count
'prob_of_controlled_vehicle': 0.5,
'mission_type': 'exit',
# if shuffle_controlled_vehicle , from total_number_of_vehicles with probability prob_of_controlled_vehicle AV willl be chosen
},
# 'cruising_vehicle': {
# 'acc_max': 6, # """Maximum acceleration."""
# 'comfort_acc_max': 4, # """Desired maximum acceleration."""
# 'comfort_acc_min': -12, # """Desired maximum deceleration."""
# 'distance_wanted': 0.51, # """Desired jam distance to the front vehicle."""
# 'time_wanted': 0.5, # """Desired time gap to the front vehicle."""
# 'delta': 4, # """Exponent of the velocity term."""
# 'speed': 25, # Vehicle speed
# 'enable_lane_change': False, # allow lane change
#
# 'vehicles_type': "highway_env.vehicle.behavior.CustomVehicle",
# # chose different vehicle types from :
# # "highway_env.vehicle.behavior.CustomVehicle" ,"highway_env.vehicle.behavior.AggressiveVehicle","highway_env.vehicle.behavior.DefensiveVehicle", "highway_env.vehicle.behavior.LinearVehicle" "highway_env.vehicle.behavior.IDMVehicle"
# # if CustomVehicle is chosen it will load the previous configurations, other vehicles types has their own predefiened configurations.
#
# 'length': 5.0, # Vehicle length [m]
# 'width': 2.0, # Vehicle width [m]
# 'max_speed': 40 # Maximum reachable speed [m/s]
# },
'exit_vehicle': {
'acc_max': 6, # """Maximum acceleration.""" 6
'comfort_acc_max': 3, # """Desired maximum acceleration.""" 3
'comfort_acc_min': -5, # """Desired maximum deceleration.""" -5
'distance_wanted': 0.5, # """Desired jam distance to the front vehicle.""" 5
'time_wanted': 0.5, # """Desired time gap to the front vehicle.""" 1.5
'delta': 4, # """Exponent of the velocity term.""" 4
'speed': 25,
'initial_position': [78, 0],
'enable_lane_change': True,
'controlled_vehicle': False, # chose if merging vehicle is AV or human
'vehicles_type': "highway_env.vehicle.behavior.CustomVehicle",
'set_route': True, # predefine the route
# "highway_env.vehicle.behavior.CustomVehicle" ,"highway_env.vehicle.behavior.AggressiveVehicle","highway_env.vehicle.behavior.DefensiveVehicle", "highway_env.vehicle.behavior.LinearVehicle" "highway_env.vehicle.behavior.IDMVehicle"
# if CustomVehicle is chosen it will load the previous configurations, other vehicles types has their own predefiened configurations.
'randomize': True,
'id': -1, # id for the merging vehicle
'length': 5.0, # Vehicle length [m]
'width': 2.0, # Vehicle width [m]
'max_speed': 40 # Maximum reachable speed [m/s]
},
"reward": {
"coop_reward_type": "multi_agent_tuple",
"reward_type": "exit_reward", # merging_reward
"normalize_reward": True,
"reward_speed_range": [20, 40],
"collision_reward": -2, # -1
"on_desired_lane_reward": 0.3,
"high_speed_reward": 0.6, # 0.4
"lane_change_reward": -0.2,
"target_lane": 1,
"distance_reward": -0.1,
"distance_merged_vehicle_reward": 0,
"distance_reward_type": "min",
"successful_merging_reward": 5,
"continuous_mission_reward": True,
"cooperative_flag": True,
"sympathy_flag": True,
"cooperative_reward": 0.9,
# True : after merging will keep receiving the reward, False: just received the reward once
}
}
def _create_road(self, road_type) -> None:
if road_type == "highway":
self._road_highway()
elif road_type == "road_merge":
self._road_merge()
elif road_type == "road_exit":
self._road_exit()
elif road_type == "intersection":
self._road_intersection()
elif road_type == "roundabout":
self._road_roundabout()
elif road_type == "twoway":
self._road_twoway()
elif road_type == "uturn":
self._road_uturn()
elif road_type == "road_closed":
# TODO , fix arguments
self._road_closed(end=self.before_merging + self.converging_merging, after=self.after_merging)
elif road_type == "test":
self._road_test()
def _create_vehicles(self, road_type):
if road_type == "road_merge":
if self.random_controlled_vehicle:
self._vehicles_merge_to_highway_prob()
else:
self._vehicles_merge_to_highway()
elif road_type == "road_exit":
self._vehicles_exit_highway()
elif road_type == "intersection":
self._vehicles_intersection()
elif road_type == "roundabout":
self._vehicles_roundabout()
elif road_type == "road_closed":
# TODO , fix arguments
self._vehicles_road_closed(controlled_vehicles=self.controlled_vehicles,
cruising_vehicles_count=self.cruising_vehicles_count)
elif road_type == "highway":
self._vehicles_highway()
elif road_type == "twoway":
self._vehicles_twoway()
elif road_type == "uturn":
self._vehicles_uturn()
elif road_type == "test":
self._vehicle_road_test()
def _road_merge(self):
"""Create a road composed of straight adjacent lanes."""
net = RoadNetwork()
# Highway lanes
ends = [self.before_merging, self.converging_merging, self.during_merging,
self.after_merging] # Before, converging, merge, after
c, s, n = LineType.CONTINUOUS_LINE, LineType.STRIPED, LineType.NONE
for lane in range(self.lanes_count):
line_types = [LineType.CONTINUOUS_LINE if lane == 0 else LineType.STRIPED,
LineType.CONTINUOUS_LINE if lane == self.lanes_count - 1 else LineType.NONE]
net.add_lane("a", "b", StraightLane([0, StraightLane.DEFAULT_WIDTH * (lane + 1)],
[sum(ends[:2]), StraightLane.DEFAULT_WIDTH * (lane + 1)],
line_types=line_types))
net.add_lane("b", "c",
StraightLane([sum(ends[:2]), StraightLane.DEFAULT_WIDTH * (lane + 1)],
[sum(ends[:3]), StraightLane.DEFAULT_WIDTH * (lane + 1)], line_types=line_types))
net.add_lane("c", "d", StraightLane([sum(ends[:3]), StraightLane.DEFAULT_WIDTH * (lane + 1)],
[sum(ends), StraightLane.DEFAULT_WIDTH * (lane + 1)],
line_types=line_types))
# Merging lane
amplitude = 3.25
ljk = StraightLane([0, 6.5 + 4 + self.lanes_count * 4], [ends[0], 6.5 + 4 + self.lanes_count * 4],
line_types=[c, c],
forbidden=True)
lkb = SineLane(ljk.position(ends[0], -amplitude), ljk.position(sum(ends[:2]), -amplitude),
amplitude, 2 * np.pi / (2 * ends[1]), np.pi / 2, line_types=[c, c], forbidden=True)
lbc = StraightLane(lkb.position(ends[1], 0), lkb.position(ends[1], 0) + [ends[2], 0],
line_types=[n, c], forbidden=True)
net.add_lane("j", "k", ljk)
net.add_lane("k", "b", lkb)
net.add_lane("b", "c", lbc)
road = Road(network=net, np_random=self.env.np_random, record_history=self.record_history)
road.objects.append(Obstacle(road, lbc.position(ends[2], 0)))
self.road = road
def _road_exit1(self):
"""Create a road composed of straight adjacent lanes."""
net = RoadNetwork()
# Highway lanes
ends = [self.before_exit + self.converging_exit, self.taking_exit,
self.after_exit] # Before, converging, merge, after
c, s, n = LineType.CONTINUOUS_LINE, LineType.STRIPED, LineType.NONE
for lane in range(self.lanes_count):
line_types = [LineType.CONTINUOUS_LINE if lane == 0 else LineType.STRIPED,
LineType.CONTINUOUS_LINE if lane == self.lanes_count - 1 else LineType.NONE]
net.add_lane("a", "b", StraightLane([0, StraightLane.DEFAULT_WIDTH * (lane + 1)],
[sum(ends[:1]), StraightLane.DEFAULT_WIDTH * (lane + 1)],
line_types=line_types))
net.add_lane("b", "c",
StraightLane([sum(ends[:1]), StraightLane.DEFAULT_WIDTH * (lane + 1)],
[sum(ends[:2]), StraightLane.DEFAULT_WIDTH * (lane + 1)], line_types=line_types))
net.add_lane("c", "d", StraightLane([sum(ends[:2]), StraightLane.DEFAULT_WIDTH * (lane + 1)],
[sum(ends), StraightLane.DEFAULT_WIDTH * (lane + 1)],
line_types=line_types))
# Exit lane
amplitude = 3.25 / 4
lbp = StraightLane([self.before_exit + self.converging_exit, 4 + self.lanes_count * 4],
[self.before_exit + self.converging_exit + self.taking_exit, 4 + self.lanes_count * 4],
line_types=[n, c], forbidden=True)
# ljk = StraightLane([0, 6.5 + 4 +self.lanes_count*4], [ends[0], 6.5 + 4 + self.lanes_count*4 ], line_types=[c, c],
# forbidden=True)
lpk = SineLane(lbp.position(self.taking_exit, amplitude),
lbp.position(self.taking_exit + self.during_exit, amplitude),
-amplitude, 2 * np.pi / (2 * ends[1]), np.pi / 2, line_types=[c, c], forbidden=True)
lkj = StraightLane(lpk.position(self.during_exit, 0), lpk.position(self.during_exit + self.after_exit, 0),
line_types=[c, c], forbidden=True)
net.add_lane("b", "p", lbp)
net.add_lane("p", "k", lpk)
net.add_lane("k", "j", lkj)
road = Road(network=net, np_random=self.env.np_random, record_history=self.record_history)
# road.objects.append(Obstacle(road, lbp.position(ends[2], 0)))
self.road = road
def _road_exit(self):
# road_length = 1000, exit_humans = 400, exit_length = 100
exit_position = self.exit_humans + self.exit_controlled
exit_length = self.exit_length
after_exit = self.after_exit
net = RoadNetwork.straight_road_networkv2(self.lanes_count, start=0,
length=exit_position, nodes_str=("0", "1"))
net = RoadNetwork.straight_road_networkv2(self.lanes_count+ 1, start=exit_position,
length=exit_length, nodes_str=("1", "2"), net=net)
net = RoadNetwork.straight_road_networkv2(self.lanes_count, start=exit_position + exit_length,
length=after_exit,
nodes_str=("2", "3"), net=net)
for _from in net.graph:
for _to in net.graph[_from]:
for _id in range(len(net.graph[_from][_to])):
net.get_lane((_from, _to, _id)).speed_limit = 26 - 3.4 * _id
exit_position = np.array([exit_position + exit_length, self.lanes_count * CircularLane.DEFAULT_WIDTH])
radius = 150
exit_center = exit_position + np.array([0, radius])
lane = CircularLane(center=exit_center,
radius=radius,
start_phase=3 * np.pi / 2,
end_phase=2 * np.pi,
forbidden=True)
net.add_lane("2", "exit", lane)
self.road = Road(network=net,
np_random=self.env.np_random)
def _road_closed(self, end=200, after=1000):
"""Create a road composed of straight adjacent lanes."""
net = RoadNetwork()
last_lane = 0
# Highway lanes
c, s, n = LineType.CONTINUOUS_LINE, LineType.STRIPED, LineType.NONE
y = [last_lane + StraightLane.DEFAULT_WIDTH, last_lane + 2 * StraightLane.DEFAULT_WIDTH]
line_type = [[c, s], [n, c]]
line_type_merge = [[c, s], [n, s]]
new_lane = StraightLane([0, last_lane], [end, last_lane], line_types=[c, n], forbidden=True)
net.add_lane("a", "b", new_lane)
for i in range(self.self.lanes_count):
net.add_lane("a", "b", StraightLane([0, y[i]], [end, y[i]], line_types=line_type[i]))
net.add_lane("b", "c",
StraightLane([end, y[i]], [after, y[i]], line_types=line_type_merge[i]))
road = Road(network=net, np_random=self.env.np_random, record_history=self.record_history)
pos = new_lane.position(end, 0)
road.objects.append(Obstacle(road, pos))
| |
elif term['QCODE'] == '1': newQuery = (formtype.form_set.all().filter(ref_to_parent_form__record_reference__form_name__icontains=term['TVAL'], ref_to_parent_form__record_reference_type__pk=rtypePK))#ICONTAINS
elif term['QCODE'] == '2': newQuery = (formtype.form_set.all().filter(ref_to_parent_form__record_reference__form_name__exact=term['TVAL'], ref_to_parent_form__record_reference_type__pk=rtypePK))#MATCHES EXACT
elif term['QCODE'] == '3': newQuery = (formtype.form_set.all().exclude(ref_to_parent_form__record_reference__form_name__contains=term['TVAL'], ref_to_parent_form__record_reference_type__pk=rtypePK))#EXCLUDES
elif term['QCODE'] == '4': newQuery = (formtype.form_set.all().filter(ref_to_parent_form__record_reference__isnull=True, ref_to_parent_form__record_reference_type__pk=rtypePK))#IS_NULL
queriedForms = (newQuery | queriedForms)
#==========================================================================================================================================================================================
# IF WE ARE LOOKING UP THE RELATIONS FRAT
#==========================================================================================================================================================================================
elif deepRTYPE == 'FRAT':
print >>sys.stderr, "We should be here"
#grab the formtype in question
deepFormType = FormType.objects.get(pk=FormRecordAttributeType.objects.get(pk=deepPK).form_type.pk)
#Now begin modifying the SQL query which each term of each individual query
#skip the term if the field was left blank
if term['TVAL'] != "" or term['QCODE'] == '4':
newQuery = None
#----------------------------------------------------------
# AND STATEMENT FOR A --TERM--
if term['T-ANDOR'] != 'or':#We can assume it is an AND like addition if it's anything but 'or'
#Now let's figure out the QCODE, e.g. contains, match exact etc.
#First we Get a flattened list of form pk values from the deepFormType
#Then we filter our current formtype queryset's frrt manytomany pks by the pk value list just created
if term['QCODE'] == '0':
flattenedSet = list(deepFormType.form_set.all().filter(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '1':
flattenedSet = list(deepFormType.form_set.all().filter(formrecordattributevalue__record_value__icontains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '2':
flattenedSet = list(deepFormType.form_set.all().filter(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '3':
flattenedSet = list(deepFormType.form_set.all().exclude(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '4':
flattenedSet = list(deepFormType.form_set.all().filter(formrecordattributevalue__record_value__isnull=True, formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
queriedForms = newQuery
#--------------------------------------------------------
# OR STATEMENT FOR a --TERM--
else:
#Now let's figure out the QCODE, e.g. contains, match exact etc.
if term['QCODE'] == '0':
flattenedSet = list(deepFormType.form_set.all().filter(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = formtype.form_set.all().filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '1':
flattenedSet = list(deepFormType.form_set.all().filter(formrecordattributevalue__record_value__icontains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = formtype.form_set.all().filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '2':
flattenedSet = list(deepFormType.form_set.all().filter(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = formtype.form_set.all().filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '3':
flattenedSet = list(deepFormType.form_set.all().exclude(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = formtype.form_set.all().filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '4':
flattenedSet = list(deepFormType.form_set.all().filter(formrecordattributevalue__record_value__isnull=True, formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = formtype.form_set.all().filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
queriedForms = (newQuery | queriedForms)
#==========================================================================================================================================================================================
# IF WE ARE LOOKING UP THE RELATION'S FRRT(Only form ID allowed)
#==========================================================================================================================================================================================
elif deepRTYPE == 'FRRT':
print >>sys.stderr, "We should be here 3"
#grab the formtype in question
deepFormType = FormType.objects.get(pk=FormRecordReferenceType.objects.get(pk=deepPK).form_type_parent.pk)
#Now begin modifying the SQL query which each term of each individual query
#skip the term if the field was left blank
if term['TVAL'] != "" or term['QCODE'] == '4':
newQuery = None
#----------------------------------------------------------
# AND STATEMENT FOR A --TERM--
if term['T-ANDOR'] != 'or':#We can assume it is an AND like addition if it's anything but 'or'
#Now let's figure out the QCODE, e.g. contains, match exact etc.
#First we Get a flattened list of form pk values from the deepFormType
#Then we filter our current formtype queryset's frrt manytomany pks by the pk value list just created
if term['QCODE'] == '0':
flattenedSet = list(deepFormType.form_set.all().filter(ref_to_parent_form__record_reference__form_name__contains=term['TVAL']).values_list('pk', flat=True)) #CONTAINS
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '1':
flattenedSet = list(deepFormType.form_set.all().filter(ref_to_parent_form__record_reference__form_name__contains=term['TVAL']).values_list('pk', flat=True)) #ICONTAINS
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '2':
flattenedSet = list(deepFormType.form_set.all().filter(ref_to_parent_form__record_reference__form_name__contains=term['TVAL']).values_list('pk', flat=True)) #EXACT MATCH
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '3':
flattenedSet = list(deepFormType.form_set.all().exclude(ref_to_parent_form__record_reference__form_name__contains=term['TVAL']).values_list('pk', flat=True)) #EXCLUDES
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '4':
flattenedSet = list(deepFormType.form_set.all().filter(ref_to_parent_form__record_reference__form_name__isnull=True).values_list('pk', flat=True)) #IS NULL
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
queriedForms = newQuery
#--------------------------------------------------------
# OR STATEMENT FOR a --TERM--
else:
#Now let's figure out the QCODE, e.g. contains, match exact etc.
if term['QCODE'] == '0':
flattenedSet = list(deepFormType.form_set.all().filter(ref_to_parent_form__record_reference__form_name__contains=term['TVAL']).values_list('pk', flat=True)) #CONTAINS
newQuery = formtype.form_set.all().filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '1':
flattenedSet = list(deepFormType.form_set.all().filter(ref_to_parent_form__record_reference__form_name__contains=term['TVAL']).values_list('pk', flat=True)) #ICONTAINS
newQuery = formtype.form_set.all().filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '2':
flattenedSet = list(deepFormType.form_set.all().filter(ref_to_parent_form__record_reference__form_name__contains=term['TVAL']).values_list('pk', flat=True)) #EXACT MATCH
newQuery = formtype.form_set.all().filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '3':
flattenedSet = list(deepFormType.form_set.all().exclude(ref_to_parent_form__record_reference__form_name__contains=term['TVAL']).values_list('pk', flat=True)) #EXCLUDES
newQuery = formtype.form_set.all().filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '4':
flattenedSet = list(deepFormType.form_set.all().filter(ref_to_parent_form__record_reference__form_name__isnull=True).values_list('pk', flat=True)) #IS NULL
newQuery = formtype.form_set.all().filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
queriedForms = (newQuery | queriedForms)
#We'll calculate percent by claiming finishing the query is at 50% when complete and at 20% when starting this section.
Qpercent = ((rtypeCounter-2) * (50.0/len(masterQueryJSON['query_list'])))
percentDone = 5 + Qpercent + (tCounter * (Qpercent / len(currentJSONQuery['TERMS'])) )
progressData.jsonString = '{"message":"Performing Query # '+ str(rtypeCounter-1) + ' on term: '+term['TVAL']+'","current_query":"'+ currentJSONQuery['RTYPE'] + '","current_term":"'+term['TVAL']+'","percent_done":"'+ str(percentDone) +'","is_complete":"False"}'
progressData.save()
tCounter += 1
#########################################&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&########################################&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&########################################&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
# (Form ID) Lookups
#########################################&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&########################################&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&########################################&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
elif rtype == "FORMID":
tCounter = 0;
logging.info("TimerD"+ " : " + str(time.clock()))
for term in currentJSONQuery['TERMS']:
#Now begin modifying the SQL query which each term of each individual query
#skip the term if the field was left blank
if term['TVAL'] != "" or term['QCODE'] == '4':
newQuery = None
print >>sys.stderr, str(formtype.form_set.all().filter(form_name__contains=term['TVAL']))
if term['T-ANDOR'] != 'or':#We can assume it is an AND like addition if it's anything but 'or'
print >> sys.stderr, "Is it working?"
#Now let's figure out the QCODE, e.g. contains, match exact etc.
if term['QCODE'] == '0': newQuery = queriedForms.filter(form_name__contains=term['TVAL']) #CONTAINS
elif term['QCODE'] == '1': newQuery = queriedForms.filter(form_name__icontains=term['TVAL']) #ICONTAINS
elif term['QCODE'] == '2': newQuery = queriedForms.filter(form_name__exact=term['TVAL'])#MATCHES EXACT
elif term['QCODE'] == '3': newQuery = queriedForms.exclude(form_name__contains=term['TVAL'])#EXCLUDES
elif term['QCODE'] == '4': newQuery = queriedForms.filter(form_name__isnull=True) #IS_NULL
queriedForms = newQuery
else:#Otherwise it's an OR statement
#Now let's figure out the QCODE, e.g. contains, match exact etc.
if term['QCODE'] == '0': newQuery = (formtype.form_set.all().filter(form_name__contains=term['TVAL']))#CONTAINS
elif term['QCODE'] == '1': newQuery = (formtype.form_set.all().filter(form_name__icontains=term['TVAL']))#ICONTAINS
elif term['QCODE'] == '2': newQuery = (formtype.form_set.all().filter(form_name__exact=term['TVAL']))#MATCHES EXACT
elif term['QCODE'] == '3': newQuery = (formtype.form_set.all().exclude(form_name__contains=term['TVAL']))#EXCLUDES
elif term['QCODE'] == '4': newQuery = (formtype.form_set.all().filter(form_name__isnull=True))#IS_NULL
queriedForms = (newQuery | queriedForms)
#We'll calculate percent by claiming finishing the query is at 50% when complete and at 20% when starting this section.
Qpercent = ((rtypeCounter-2) * (50.0/len(masterQueryJSON['query_list'])))
percentDone = 5 + Qpercent + (tCounter * (Qpercent / len(currentJSONQuery['TERMS'])) )
progressData.jsonString = '{"message":"Performing Query # '+ str(rtypeCounter-1) + ' on term: '+term['TVAL']+'","current_query":"'+ currentJSONQuery['RTYPE'] + '","current_term":"'+term['TVAL']+'","percent_done":"'+ str(percentDone) +'","is_complete":"False"}'
progressData.save()
tCounter += 1
#If this is an AND query--attach it to the masterQuery as so.
if currentJSONQuery['Q-ANDOR'] == 'and':
masterQuery = (masterQuery & queriedForms)
#If it's an OR query, attach it to the masterQuery as an OR statement
elif currentJSONQuery['Q-ANDOR'] == 'or':
masterQuery = (masterQuery | queriedForms)
#Otherwise its the first, or a single query and should simply replace the masterQuery
#also set the count to this first query so we have one in case there is only one query
else:
masterQuery = queriedForms;
#Now make sure our final queried list has distint values--merging querysets has a tendency to create duplicates
masterQuery = masterQuery.distinct()
#***RECYCLING BIN*** Make sure our final query gets filtered out with recycled forms(They can potentially be re-added in the above query engine
masterQuery.filter(flagged_for_deletion=False)
#Send a message to our AJAX request object
progressData.jsonString = '{"message":"Running raw SQL","current_query":"","current_term":"''","percent_done":"50","is_complete":"False"}'
progressData.save()
masterQueryCount = masterQuery.count()
#Send a message to our AJAX request object
progressData.jsonString = '{"message":"Loading Queried Forms & Sending generated stats now...","current_query":"","current_term":"''","percent_done":"60","is_complete":"False","stats":"none"}'
progressData.save()
#We need to check the # of rtypes in our header list now--if it's less than 5, then let's add from the ordered list
#We also need to make sure we aren't adding duplicates of the RTYPES, e.g. if we're looking for a match under "Object Number" and Object Number is already
#--in our sorted order-num list--let's not re-add it.
for attType in form_att_type_list:
print >>sys.stderr, "AttTypeList: " + str(attType)
matchfound = False;
for queryAttType in queryRTYPElist:
if attType[2] == queryAttType[2]:
matchfound = True
if matchfound == False and len(queryRTYPElist) < 5:
#let's arbitrarily add '100' to the order number so that our queries are definitely in front of these
queryRTYPElist.append((attType[0] + 100,attType[1],attType[2],attType[3]))
for q in queryRTYPElist:
print >>sys.stderr, "QTypeList: " + str(q)
#serializeTest = serializers.serialize("json", masterQuery)
queryCounter = 0
logging.info("TEST | |
in games.iteritems():
if game[0] != 'R':
round = 'play in'
continue
else:
round = game[0:2]
selected_winner = info['selected_0'] if ranking[info['selected_0']] < ranking[info['selected_1']] else info['selected_1']
if info['winner'] == selected_winner:
p = points[round]
else:
p = 0
score += p
possible += points[round]
return score, possible
def compute_single_ranker_records(season_range_low, season_range_high, rankers, rankings, mmad_teams, teams, games):
# set brackets
season_range = range(season_range_low, season_range_high + 1)
records = dict()
for season in season_range:
for ranker in rankers[season]:
records.setdefault(ranker, dict())
bracket = get_bracket(ranker, rankings[season], mmad_teams[season], teams)
set_selections(games[season], bracket)
score, possible = score_bracket(games[season], bracket)
records[ranker][season] = score
# print '{}: {} ({})'.format(ranker, score, possible)
delimiter = '|'
header = ['Ranker'] + [str(season) for season in season_range]
print delimiter.join(header)
for ranker, record in records.iteritems():
print_data = [ranker] + [record.get(season, '') for season in season_range]
print delimiter.join(map(str, print_data))
def compute_pair_ranker_records(season_range_low, season_range_high, votes, rankers, rankings, mmad_teams, teams, games):
# set brackets
season_range = range(season_range_low, season_range_high + 1)
records = dict()
for season in season_range:
print season
n_rankers = len(rankers[season])
n_pairs = n_rankers*(n_rankers - 1)/2 - n_rankers
ranker_pairs = set()
pair_n = 0
for ranker_1 in rankers[season]:
for ranker_2 in rankers[season]:
if ranker_1 != ranker_2:
ranker_pair = tuple(sorted([ranker_1, ranker_2]))
if ranker_pair not in ranker_pairs:
ranker_pairs.add(ranker_pair)
pair_n += 1
# print ' {} ({}/{}) {}'.format(ranker_pair, pair_n, n_pairs, n_rankers)
records.setdefault(ranker_pair, dict())
bracket = get_voter_bracket(ranker_pair, votes, rankings[season], mmad_teams[season], teams)
set_selections(games[season], bracket)
score, possible = score_bracket(games[season], bracket)
records[ranker_pair][season] = score
return records
def compute_random_ranker_score(votes, ranker_set, rankings, mmad_teams, teams, games):
bracket = get_voter_bracket(ranker_set, votes, rankings, mmad_teams, teams)
set_selections(games, bracket)
score, possible = score_bracket(games, bracket)
return score
def randomized_ranker_score(ranker, noise_magnitude, rankings, mmad_teams, teams, games):
bracket = get_randomized_bracket(ranker, noise_magnitude, rankings, mmad_teams, teams)
set_selectiofns(games, bracket)
score, possible = score_bracket(games, bracket)
return score
def main():
data_root = 'NCAA Bracket 2017'
history_dir = os.path.join(data_root, 'march-machine-learning-mania-2017')
rankings = dict()
rankers = dict()
games = dict()
seeds = dict()
results = dict()
mmad_teams = dict()
season_range_low = 2010
season_range_high = 2015
season_range = range(season_range_low, season_range_high + 1)
teams = read_teams(history_dir)
##################
# read data
##################
# for season in season_range:
# rankings[season] = read_rankings(data_root, season)
# rankers[season] = get_rankers(rankings[season])
#
# games[season] = read_games(history_dir, season)
# seeds[season] = read_seeds(history_dir, season)
# results[season] = read_results(history_dir, season)
#
# # initialize
# initialize_bracket(games[season], seeds[season], teams)
#
# # set actual outcomes
# play_round('play in', games[season], results[season], teams)
#
# for r in [1, 2, 3, 4, 5, 6]:
# round = 'R' + str(r)
# update_bracket(round, games[season], teams)
# play_round(round, games[season], results[season], teams)
#
# # get the list of 64 teams in March Madness
# mmad_teams[season] = get_mmad_teams(games[season])
# create and score brackets
# compute_single_ranker_records(season_range_low, season_range_high, rankers, rankings, mmad_teams, teams, games)
# votes = 1
# records = compute_pair_ranker_records(season_range_low, season_range_high, votes, rankers, rankings, mmad_teams, teams, games)
#
# stats = dict()
# for ranker, record in records.iteritems():
# stats.setdefault(ranker, dict())
# data = [record[season] for season in season_range if record.get(season) is not None]
# stats[ranker]['n'] = len(data)
# stats[ranker]['min'] = np.min(data)
# stats[ranker]['max'] = np.max(data)
# stats[ranker]['mean'] = np.mean(data)
#
# # lets only consider rankers that have been around for the entire set of seasons and make them rankable by mean score
# select_rankers = list()
# for ranker, info in stats.iteritems():
# if info['n'] == len(season_range):
# select_rankers.append((info['mean'], ranker))
#
# delimiter = '|'
# header = ['Ranker 1', 'Ranker 2', 'mean score']
# print delimiter.join(header)
#
# for mean_score, ranker_pair in sorted(select_rankers):
# print_data = [ranker_pair[0], ranker_pair[1], mean_score]
# print delimiter.join(map(str, print_data))
####################################
# Run single ranker + gaussian noise
####################################
#
# select_rankers = ['SAG', 'DOK', 'BOB', 'WLK', 'KPK', 'PIG', 'STH', 'POM', 'SE', 'RTH', 'WIL', 'CPA', 'MAS', 'PGH',
# 'DC', 'CNG', 'MOR', 'DCI', 'LMC', 'KRA', 'WOB', 'DOL', 'CPR', 'BIH', 'RT', 'REW', 'WOL', 'NOL',
# 'COL', 'SPW', 'RTR', 'RPI']
#
# select_rankers_2 = ['SAG', 'DOK', 'BOB', 'WLK', 'KPK', 'PIG', 'STH', 'POM', 'SE', 'RTH', 'WIL', 'CPA', 'MAS', 'PGH',
# 'DC', 'CNG', 'MOR', 'DCI', 'LMC', 'KRA', 'WOB', 'DOL', 'CPR', 'BIH', 'RT', 'REW', 'WOL', 'NOL',
# 'COL', 'SPW', 'RTR']
#
# for noise_magnitude in [2, 4, 6, 8, 10, 12, 14]:
# n_trials = 1000
# delimiter = '|'
# # for ranker in select_rankers:
# stats = dict()
#
# for ranker in ['SAG', 'LMC', 'RPI']:
# stats.setdefault(ranker, dict())
# for season in season_range:
# stats[ranker].setdefault(season, dict())
# stats[ranker][season].setdefault('data', list())
# for t in range(n_trials):
# score = randomized_ranker_score(ranker, noise_magnitude, rankings[season], mmad_teams[season], teams, games[season])
# plain_score = compute_random_ranker_score(1, [ranker], rankings[season], mmad_teams[season], teams, games[season])
# stats[ranker][season]['data'].append(score - plain_score)
#
# for ranker, info in sorted(stats.iteritems()):
# p90 = list()
# for season, st in sorted(info.iteritems()):
# p90.append(np.percentile(st['data'], 90))
# # print noise_magnitude, ranker, season, np.median(st['data'])
# print noise_magnitude, ranker, np.mean(p90)
# with open(os.path.join(data_root, 'random_brackets.csv'), 'w') as output_file:
# delimiter = '|'
# header = ['votes', 'season', 'score', 'RPI score', 'lift over RPI']
# print >> output_file, delimiter.join(header)
#
# n_trials = 1000
# n_rankers = 2
# for votes in [n_rankers]:
# print 'votes:', votes
# for t in range(n_trials):
# #n_rankers = 2
# #ranker_set = sorted(np.random.choice(select_rankers, n_rankers, replace=False))
#
# season = season_range[np.random.randint(0, len(season_range))]
# #ranker_set = select_rankers[:n_rankers]
# ranker_set = ['KPK', 'PIG']
# rpi_score = compute_random_ranker_score(1, ['RPI'], rankings[season], mmad_teams[season], teams, games[season])
# sag_score = compute_random_ranker_score(1, ['SAG'], rankings[season], mmad_teams[season], teams, games[season])
# score = compute_random_ranker_score(votes, ranker_set, rankings[season], mmad_teams[season], teams, games[season])
# print_data = [votes, season] + [score, rpi_score, sag_score, (score - rpi_score)/float(rpi_score)]
#
# # season = season_range[np.random.randint(0, len(season_range))]
# # ranker_set = sorted(['DOK', 'RPI'])
# # rpi_score = compute_random_ranker_score(1, ['RPI'], rankings[season], mmad_teams[season], teams, games[season])
# # score = compute_random_ranker_score(votes, ranker_set, rankings[season], mmad_teams[season], teams, games[season])
# # print_data += [votes, season] + list(ranker_set) + [score, rpi_score, (score-rpi_score)/float(rpi_score)]
#
# print >> output_file, delimiter.join(map(str, print_data))
# if t % 500 == 0:
# print t, delimiter.join(map(str, print_data))
####################################
# Generate Random brackets
####################################
noise_magnitude = 4.0
trials = 100
season = 2017
delimiter = '|'
rankings[season] = read_rankings(data_root, season)
rankers[season] = get_rankers(rankings[season])
games[season] = read_games(history_dir, season)
seeds[season] = read_seeds(history_dir, season)
results[season] = read_results(history_dir, season)
# initialize
initialize_bracket(games[season], seeds[season], teams)
reverse_teams = dict([(team, id) for id, team in teams.iteritems()])
# print mmad_teams_2017
#
# print teams[1344], teams[1425], teams[1291], teams[1309], teams[1300], teams[1413], teams[1243], teams[1448]
# set actual outcomes of play-in games
# slots
# 2017, W11, W11a, W11b
# 2017, W16, W16a, W16b
# 2017, Y16, Y16a, Y16b
# 2017, Z11, Z11a, Z11b
# seeds
# 2017, W11a, 1344 - Providence
# 2017, W11b, 1425 - USC
# 2017, W16a, 1291 - Mt St Mary's
# 2017, W16b, 1309 - New Orleans
# 2017, Y16a, 1300 - NC Central
# 2017, Y16b, 1413 - UC Davis
# 2017, Z11a, 1243 - Kansas St
# 2017, Z11b, 1448 - Wake Forest
# results
# 2017, 0-2, 1344, 71, 1425, 75, N, 0 --> W11 winner = 1425
# 2017, 0-2, 1291, 67, 1309, 66, N, 0 --> W16 winner = 1291
# 2017, 0-1, 1300, 63, 1413, 67, N, 0 --> Y16 winner = 1413
# 2017, 0-1, 1243, 95, 1448, 88, N, 0 --> Z11 winner = 1243
# games[game(W11, e.g.)]['winner'] = team_id
# for g in ['W11', 'W16', 'Y16', 'Z11']:
# print g, teams[games[2017][g]['winner']]
# get the list of 64 teams in March Madness
mmad_teams[season] = get_mmad_teams(games[season])
select_rankers = ['SAG', 'DOK', 'WLK', 'KPK', 'PIG', 'STH', 'POM', 'RTH', 'WIL', 'MAS', 'PGH',
'DC', 'CNG', 'MOR', 'DCI', 'LMC', 'KRA', 'WOB', 'DOL', 'BIH', 'RT', 'REW', 'WOL', 'NOL',
'COL', 'SPW', 'RPI']
# select_rankers = ['RPI']
stats = {'final four': dict(),
'final two': dict(),
'champion': list()}
trial = -1
for ranker in select_rankers:
mascot_file = os.path.join(data_root, 'mascot_rank.5_0.0.csv')
# for mascot_file in glob.glob(os.path.join(data_root, 'mascot_rank.5_0.*.csv')):
# trial += 1
# print trial
mmad_teams_2017 = list()
mascot_noise = dict()
with open(mascot_file, 'r') as input_file:
for line in input_file:
team, rank, noise = line.split(delimiter)
noise = float(noise)
if team not in reverse_teams:
print team, team in reverse_teams
raise ValueError('ERROR - could not find team.')
mmad_teams_2017.append(reverse_teams[team])
mascot_noise[reverse_teams[team]] = noise
for | |
#!/usr/bin/env python
# -*- coding: utf-8 -*
# Copyright: [CUP] - See LICENSE for details.
# Authors: <NAME> (@mythmgn),
"""
:description:
Object related storage
"""
import os
import sys
import abc
import shutil
import ftplib
import logging
from cup import log
from cup import err
__all__ = [
'AFSObjectSystem', 'S3ObjectSystem', 'FTPObjectSystem',
'LocalObjectSystem'
]
class ObjectInterface(object):
"""
object interface, abstract class. Should not be used directly
"""
__metaclass__ = abc.ABCMeta
def __init__(self, config):
"""
:param config:
dict like config, should contains at leat
{
'uri': 'xxxx',
'user': 'xxxx', # or stands for accesskey
'passwords': '<PASSWORD>', # or stands for secretkey
'extra': some_object
}
"""
self._config = config
def _validate_config(self, config, keys):
"""validate config if there's any missing items"""
ret = True
for key in keys:
if not key in config:
ret = False
return ret
@abc.abstractmethod
def put(self, dest, localfile):
"""
:param dest:
system path
:param localfile:
localfile
:return:
{
'returncode': 0 for success, others for failure,
'msg': 'if any'
}
"""
@abc.abstractmethod
def delete(self, path):
"""
delete a file
:param path:
object path
:return:
{
'returncode': 0 for success, others for failure,
'msg': 'if any'
}
"""
@abc.abstractmethod
def get(self, path, localpath):
"""
get the object into localpath
:return:
{
'returncode': 0 for success, others for failure,
'msg': 'if any'
}
"""
@abc.abstractmethod
def head(self, path):
"""
get the object info
:return:
{
'returncode': 0 for success, others for failure,
'msg': 'if any'
'objectinfo': {
'size': 1024, # at least have this one
'atime': 'xxxx.xx.xx', # optional
'mtime': 'xxxx.xx.xx', # optional
'ctime': 'xxxx.xx.xx', # optional
.......
}
}
"""
@abc.abstractmethod
def mkdir(self, path, recursive=True):
"""
mkdir dir of a path
:return:
{
'returncode': 0 for success, others for failure,
'msg': 'if any'
}
"""
@abc.abstractmethod
def rmdir(self, path, recursive=True):
"""rmdir of a path
:return:
{
'returncode': 0 for success, others for failure,
'msg': 'if any'
}
"""
class AFSObjectSystem(ObjectInterface):
"""
afs object
"""
def __init__(self, config):
"""
:param config:
be complied with cup.util.conf.Configure2Dict().get_dict().
Shoule be dict like object
"""
ObjectInterface.__init__(self, config)
def put(self, dest, localfile):
"""
:param dest:
system path
:param localfile:
localfile
:return:
{
'returncode': 0 for success, others for failure,
'msg': 'if any'
}
"""
def delete(self, path):
"""
delete a file
:param path:
object path
:return:
{
'returncode': 0 for success, others for failure,
'msg': 'if any'
}
"""
def get(self, path, localpath):
"""
get the object into localpath
:return:
{
'returncode': 0 for success, others for failure,
'msg': 'if any'
}
"""
def head(self, path):
"""
get the object info
:return:
{
'returncode': 0 for success, others for failure,
'msg': 'if any'
'objectinfo': {
size: 1024,
.......
}
}
"""
def mkdir(self, path):
"""
mkdir dir of a path
:return:
{
'returncode': 0 for success, others for failure,
'msg': 'if any'
'objectinfo': {
size: 1024,
.......
}
}
"""
def rmdir(self, path):
"""rmdir of a path"""
# pylint: disable=R0902
# need to have so many
class S3ObjectSystem(ObjectInterface):
"""
s3 object system
"""
def __init__(self, config):
"""
:param config:
be complied with cup.util.conf.Configure2Dict().get_dict().
Shoule be dict like object
:raise:
cup.err.ConfigError if there's any config item missing
"""
ObjectInterface.__init__(self, config)
required_keys = ['ak', 'sk', 'endpoint', 'bucket']
if not self._validate_config(self._config, required_keys):
raise err.ConfigError(str(required_keys))
self._ak = self._config['ak']
self._sk = self._config['sk']
self._endpoint = self._config['endpoint']
self._bucket = self._config['bucket']
import boto3
from botocore import exceptions
from botocore import client as coreclient
self._s3_config = coreclient.Config(
signature_version='s3v4',
s3={'addressing_style': 'path'}
)
logging.getLogger('boto3').setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.INFO)
logging.getLogger('s3transfer').setLevel(logging.INFO)
log.info('to connect to boto3')
self.__s3conn = boto3.client(
's3',
aws_access_key_id=self._ak,
aws_secret_access_key=self._sk,
endpoint_url=self._endpoint,
# region_name=conf_dict['region_name'],
config=self._s3_config
)
self._exception = exceptions.ClientError
def put(self, dest, localfile):
"""
:param dest:
system path
:param localfile:
localfile
:return:
{
'returncode': 0 for success, others for failure,
'msg': 'if any'
}
"""
ret = {
'returncode': -1,
'msg': 'failed to put object'
}
with open(localfile, 'r') as fhandle:
try:
self.__s3conn.put_object(
Key='{0}'.format(dest),
Bucket=self._bucket,
Body=fhandle
)
ret['returncode'] = 0
ret['msg'] = 'success'
except self._exception as error:
ret['returncode'] = -1
ret['msg'] = str(error)
return ret
def delete(self, path):
"""
delete a file
:param path:
object path
:return:
{
'returncode': 0 for success, others for failure,
'msg': 'if any'
}
"""
ret = {
'returncode': 0,
'msg': 'success'
}
try:
self.__s3conn.delete_object(
Key='{0}'.format(path),
Bucket=self._bucket
)
except self._exception as error:
ret['returncode'] = -1
ret['msg'] = str(error)
return ret
def get(self, path, localpath):
"""
get the object into localpath
:return:
{
'returncode': 0 for success, others for failure,
'msg': 'if any'
}
"""
ret = {
'returncode': 0,
'msg': 'success'
}
try:
with open(localpath, 'w+') as fhandle:
resp = self.__s3conn.get_object(
Key='{0}'.format(path),
Bucket=self._bucket
)
fhandle.write(resp['Body'].read())
except Exception as error:
ret['returncode'] = -1
ret['msg'] = str(error)
return ret
def head(self, path):
"""
get the object info
:return:
{
'returncode': 0 for success, others for failure,
'msg': 'if any'
'objectinfo': {
size: 1024,
.......
}
}
"""
ret = {
'returncode': -1,
'msg': 'failed to get objectinfo'
}
try:
resp = self.__s3conn.head_object(
Key='{0}'.format(path),
Bucket=self._bucket
)
ret['objectinfo'] = resp
ret['returncode'] = 0
ret['msg'] = 'success'
except self._exception as error:
ret['returncode'] = -1
ret['msg'] = str(error)
return ret
def mkdir(self, path):
"""
mkdir dir of a path
:return:
{
'returncode': 0 for success, others for failure,
'msg': 'if any'
'objectinfo': {
size: 1024,
.......
}
}
"""
raise err.NotImplementedYet('mkdir not supported for S3ObjectSystem')
def rmdir(self, path):
"""rmdir of a path"""
raise err.NotImplementedYet('rmdir not supported for S3ObjectSystem')
def create_bucket(self, bucket_name):
"""create bucket"""
ret = {
'returncode': -1,
'msg': 'failed to create bucket'
}
try:
resp = self.__s3conn.create_bucket(
Bucket=bucket_name
)
ret['returncode'] = 0
ret['msg'] = 'success'
except self._exception as error:
ret['returncode'] = -1
ret['msg'] = str(error)
return ret
def head_bucket(self, bucket_name):
"""create bucket"""
ret = {
'returncode': -1,
'msg': 'failed to create bucket',
'bucket_info': None
}
try:
resp = self.__s3conn.head_bucket(
Bucket=bucket_name
)
ret['returncode'] = 0
ret['msg'] = 'success'
ret['bucket_info'] = resp
except self._exception as error:
ret['returncode'] = -1
ret['msg'] = str(error)
return ret
def delete_bucket(self, bucket_name, forcely=False):
"""delete bucket
:param forcely:
if forcely is True, the bucket will be delete no matter it has
objects inside. Otherwise, you have to delete items inside,
then delete the bucket
"""
ret = {
'returncode': -1,
'msg': 'failed to create bucket'
}
try:
if forcely:
resp = self.head_bucket(bucket_name)
res = self.__s3conn.list_objects(Bucket=bucket_name)
if 'Contents' in res:
for obj in res['Contents']:
try:
self.__s3conn.delete_object(
Bucket=bucket_name,
Key=obj['Key']
)
except Exception as error:
ret['msg'] = 'faield to delete obj in bucket'
return ret
resp = self.__s3conn.delete_bucket(
Bucket=bucket_name
)
ret['returncode'] = 0
ret['msg'] = 'success'
except self._exception as error:
ret['returncode'] = -1
ret['msg'] = str(error)
return ret
class FTPObjectSystem(ObjectInterface):
"""
ftp object system
"""
def __init__(self, config):
"""
:param config:
{
"uri":"ftp://host:port",
"user":"username",
"password":"password",
"extra":None //timeout:30s
}
:raise:
cup.err.ConfigError if there's any config item missing
"""
ObjectInterface.__init__(self, config)
required_keys = ['uri', 'user', 'password']
if not self._validate_config(self._config, required_keys):
raise err.ConfigError(str(required_keys))
self._uri = self._config['uri']
self._user = self._config['user']
self._passwd = self._config['password']
self._extra = self._config['extra']
self._dufault_timeout = 30
if self._extra is not None and isinstance(self._config['extra'], int):
self._dufault_timeout = self._extra
log.info('to connect to ftp server')
self._ftp_con = ftplib.FTP()
self._host = self._uri.split(':')[1][2:]
self._port = ftplib.FTP_PORT
if len(self._uri.split(':')[2]) > 0:
self.port = int(self._uri.split(':')[2])
self._ftp_con.connect(self._host, self._port, self._dufault_timeout)
self._ftp_con.login(self._user, self._passwd)
def __del__(self):
"""release connect"""
self._ftp_con.quit()
def put(self, dest, localfile):
"""
:param dest:
ftp path
:param localfile:
localfile
"""
ret = {
'returncode': 0,
'msg': 'success'
}
src_path = self._ftp_con.pwd()
file_name = localfile
if "/" in localfile:
file_name = localfile.split('/')[-1]
with open(localfile, 'rb') as fhandle:
try:
self._ftp_con.cwd(dest)
ftp_cmd = 'STOR ' + file_name
self._ftp_con.storbinary(ftp_cmd, fhandle)
except Exception as error:
ret['returncode'] = -1
ret['msg'] = 'failed to put:{}'.format(error)
self._ftp_con.cwd(src_path)
return ret
def delete(self, path):
"""delete file"""
ret = {
'returncode': 0,
'msg': 'success'
}
try:
self._ftp_con.delete(path)
except Exception as error:
ret['returncode'] = -1
ret['msg'] = str(error)
return ret
def get(self, path, localpath):
"""
get a file into localpath
"""
ret = {
'returncode': 0,
'msg': 'success'
}
if localpath.endswith('/'):
localpath += path.split('/')[-1]
try:
with open(localpath, 'w+') as fhandle:
ftp_cmd = 'RETR {0}'.format(path)
resp = self._ftp_con.retrbinary(ftp_cmd, fhandle.write)
except Exception as error:
ret['returncode'] = -1
ret['msg'] = str(error)
return ret
def head(self, path):
"""
get the file info
| |
until you delete them. '
'You can delete functions from memory by selecting it in the jumps window and '
'pressing the delete key. '
'Adding a comment to the very top of the function (or the jump to the function) using the comments text box will '
'cause it to become labeled as such within the jumps window. '
'The mapped jumps are only mapped by a basic "distance from UNKNOWN/NOT AN INSTRUCTION" algorithm, so there may be some '
'miss-decoded jumps from sections that aren\'t instructions, and very few unmapped actual jumps.'
])
imm_id = '$'
if disassembler_loaded():
imm_id = disasm.immediate_identifier
message_3 = '\n'.join([
'----Hack Textbox Hotkeys/Info----',
'Ctrl+F: Follow jump/branch at text insert cursor',
'Ctrl+G: Find all jumps to function enveloping text insert cursor',
'Ctrl+R: Restore multi-line selection or line at text insert cursor to original code',
'Ctrl+Shift+R: Restore whole function (only works in disassembly view)',
'Ctrl+B: Replace current line with "BEQ R0, R0, {}"'.format(imm_id),
'Ctrl+O: Optimise the current function. This will remove redundant pointers and branches, freeing up room for '
'your own logic. You may toggle an option to group NOPs at the cursor location when optimising.',
'Ctrl+P: Generate a script for PJ64d to patch all assembly changes made during this session.',
'Shift+Delete or Shift+Backspace: Remove line of text at text insert cursor',
'Return: Move to end of next line',
'Shift+Return: Move to end of previous line',
'Ctrl+Comma: Undo',
'Ctrl+Fullstop: Redo',
'',
'For the sake of a 100% accurate decoding process and performance, no pseudo-instructions '
'were able to be implemented.',
'',
'Keep in mind when optimising a function, any comments you had will be moved with the instruction it was aligned to. '
'But, if you want to Ctrl+Shift+R and restore the entire function, your comments will no longer be aligned to the '
'instruction they were intended for.',
'',
'When making a multi-line selection, you don\'t need to worry about highlighting '
'the entirety of the start and end column, that will be done automatically when you attempt '
'to action your selection.',
'',
'When pasting in multiple lines of code, it will fit it to the correct lines in the text box. '
'If the amount of code you paste extends beyond the bottom of the text box, it will be cut to fit it.',
'',
'Any blank line will be replaced with NOP. Sometimes it may not happen immediately, but this '
'is not a problem.',
'',
'Any NOP instruction will automatically be typed over if you attempt to type on that line.',
'',
'Return, Shift+Return, Undo and Redo can be used on the comments '
'textbox as well as the hack textbox.',
'',
'The hacked rom text box and comments text box have separate undo/redo buffers. '
'Both buffers can hold up to 5,000 frames each.',
])
simpledialog.messagebox._show('Help', message)
simpledialog.messagebox._show('Help (continued)', message_2)
simpledialog.messagebox._show('Help (final)', message_3)
opcodes_win = codes_list_box = None
def opcodes_list():
global opcodes_win, codes_list_box
if opcodes_win:
opcodes_win.deiconify()
opcodes_win.focus_force()
return
def opcodes_win_equals_none():
global opcodes_win
opcodes_win.destroy()
opcodes_win = None
opcodes_win = tk.Tk()
opcodes_win.title('Opcodes list')
opcodes_win.geometry('{0}x800+50+50'.format(650+scrollBarWidth))
scrollbar = tk.Scrollbar(opcodes_win)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
codes_list_box = tk.Listbox(opcodes_win, font=('Courier', 10),yscrollcommand=scrollbar.set)
[codes_list_box.insert(tk.END, i + ': ' + DOCUMENTATION[i]) for i in DOCUMENTATION]
codes_list_box.place(x=5, y=5, width=640, height=790)
opcodes_win.bind('<Escape>', lambda _: opcodes_win.destroy())
change_colours()
opcodes_win.protocol('WM_DELETE_WINDOW', opcodes_win_equals_none)
opcodes_win.resizable(False, False)
scrollbar.config(command=codes_list_box.yview)
opcodes_win.mainloop()
def about_box():
def callback(event):
webbrowser.open_new('https://github.com/mikeryan/n64dev/tree/master/docs/n64ops')
about = tk.Tk()
about.title('About')
message = tk.Label(about, text='Made by <NAME>'
'\nDisassembler created using data from:')
link = tk.Label(about, text='https://github.com/mikeryan/n64dev/tree/master/docs/n64ops', fg="blue", cursor="hand2")
message.pack()
link.pack(side=tk.RIGHT)
link.bind("<Button-1>", callback)
about.focus_force()
about.bind('<FocusOut>', lambda e: about.destroy())
about.resizable(False, False)
about.mainloop()
jumps_window = None
function_list_box = None
jump_list_box = None
jumps_label = None
functions_label = None
jumps_comment_checkbox = jumps_hack_checkbox = None
auto_label_button = None
function_select = ''
jumps_displaying = {}
function_curselect = 0
jumps_curselect = 0
def find_jumps(just_window=False):
global function_select, jumps_window, function_list_box, jump_list_box, jumps_label, functions_label, \
jumps_comment_checkbox, jumps_hack_checkbox, function_curselect, jumps_curselect, auto_label_button
if not disassembler_loaded():
return
if just_window:
cursor, line, column = '1.0', 1, 0
else:
cursor, line, column = get_cursor(hack_file_text_box)
navi = (line - 1) + navigation
if not just_window:
jumps, function_start, function_end = disasm.find_jumps(navi)
else:
jumps, function_start, function_end = 0,0,0
if not jumps_window:
jumps_window = tk.Tk()
jumps_window.title('Jumps to Functions')
jumps_window.geometry('{}x{}'.format(jumps_win_w,jumps_win_h))
jumps_window.bind('<F5>', lambda e: toggle_address_mode())
function_list_box = tk.Listbox(jumps_window, font=('Courier', main_font_size))
jump_list_box = tk.Listbox(jumps_window, font=('Courier', main_font_size))
def hack_checkbox_callback():
if app_config['jumps_auto_focus_hack']:
app_config['jumps_auto_focus_hack'] = False
jumps_hack_checkbox.deselect()
else:
app_config['jumps_auto_focus_comments'] = False
app_config['jumps_auto_focus_hack'] = True
jumps_hack_checkbox.select()
jumps_comment_checkbox.deselect()
save_config()
def comments_checkbox_callback():
if app_config['jumps_auto_focus_comments']:
app_config['jumps_auto_focus_comments'] = False
jumps_comment_checkbox.deselect()
else:
app_config['jumps_auto_focus_hack'] = False
app_config['jumps_auto_focus_comments'] = True
jumps_comment_checkbox.select()
jumps_hack_checkbox.deselect()
save_config()
jumps_hack_checkbox = tk.Checkbutton(jumps_window, text='Auto-focus hack textbox',
command=lambda: window.after(1, lambda: hack_checkbox_callback()))
jumps_comment_checkbox = tk.Checkbutton(jumps_window, text='Auto-focus comments textbox',
command=lambda: window.after(1, lambda: comments_checkbox_callback()))
jumps_hack_checkbox.place(x=100, y=6)
jumps_comment_checkbox.place(x=294, y=6)
if app_config['jumps_auto_focus_comments']:
jumps_comment_checkbox.select()
elif app_config['jumps_auto_focus_hack']:
jumps_hack_checkbox.select()
def function_list_callback():
global function_select, function_curselect, jumps_curselect
curselect = function_list_box.curselection()
if not curselect:
return
jumps_curselect = 0
function_curselect = curselect[0]
apply_hack_changes()
apply_comment_changes()
key = function_list_box.get(curselect[0])
function_select = key
increment = 0 if not disasm.game_address_mode else -(disasm.game_offset >> 2)
start_address = deci(key[:8]) >> 2
reset_target()
widget = None
if app_config['jumps_auto_focus_comments']:
widget = comments_text_box
elif app_config['jumps_auto_focus_hack']:
widget = hack_file_text_box
if_disasm = disasm.game_address_mode
navigate_to(start_address + increment, center=True, widget=widget, region_treatment=if_disasm, region_override=if_disasm)
jump_list_box.delete(0, tk.END)
for address in jumps_displaying[key]:
jump_list_box.insert(tk.END, address)
if widget:
widget.focus_force()
def auto_label_callback():
curselect = function_list_box.curselection()
if not curselect:
return
key = function_list_box.get(curselect[0])
new_comment = simpledialog.askstring('', 'Input the new comment for all jumps to {}'.format(key[:8]))
if not new_comment:
return
new_comment = new_comment.replace('\n','')
increment = 0 if not disasm.game_address_mode else -disasm.game_offset
func_addr = disasm.region_align(deci(key[:8]) + increment) >> 2
j_key = str(func_addr)
if j_key in disasm.jumps_to:
for jump_addr in disasm.jumps_to[j_key]:
if str(jump_addr) in disasm.comments:
_new_comment = disasm.comments[str(jump_addr)] + ' | ' + new_comment
else:
_new_comment = new_comment
disasm.comments[str(jump_addr)] = _new_comment[:comments_max_chars]
for i, entry in enumerate(jumps_displaying[key]):
if len(entry) > 8:
_new_comment = entry + ' | ' + new_comment
else:
_new_comment = entry + ' ' + new_comment
jumps_displaying[key][i] = _new_comment[:comments_max_chars + 9]
function_list_callback()
def function_list_key(event):
if event.keysym == 'Delete':
curselect = function_list_box.curselection()
if curselect:
try:
del jumps_displaying[function_list_box.get(curselect[0])]
save_config()
finally:
function_list_box.delete(curselect[0])
jump_list_box.delete(0, tk.END)
def jump_list_callback():
global jumps_curselect
curselect = jump_list_box.curselection()
if not curselect:
return
jumps_curselect = curselect[0]
apply_hack_changes()
apply_comment_changes()
increment = 0 if not disasm.game_address_mode else -(disasm.game_offset >> 2)
address = jump_list_box.get(curselect[0])[:8]
navi = (deci(address) >> 2) + increment
reset_target()
widget = None
if app_config['jumps_auto_focus_comments']:
widget = comments_text_box
elif app_config['jumps_auto_focus_hack']:
widget = hack_file_text_box
if_disasm = disasm.game_address_mode
navigate_to(navi, center=True, widget=widget, region_treatment=if_disasm, region_override=if_disasm)
if widget:
widget.focus_force()
auto_label_button = tk.Button(jumps_window, text='Label all jumps to selected function', command=auto_label_callback)
function_list_box.bind('<<ListboxSelect>>', lambda _: function_list_callback())
jump_list_box.bind('<<ListboxSelect>>', lambda _: jump_list_callback())
function_list_box.bind('<Key>', function_list_key)
for key in jumps_displaying:
function_list_box.insert(tk.END,key)
functions_label = tk.Label(jumps_window, text='Functions')
jumps_label = tk.Label(jumps_window, text='Jumps to Function')
function_list_box.place(x=func_list_x,y=func_list_y,width=func_list_w,height=func_list_h)
jump_list_box.place(x=jumps_list_x,y=jumps_list_y,width=jumps_list_w,height=jumps_list_h)
functions_label.place(x=6,y=7)
jumps_label.place(x=jumps_list_x,y=jumps_label_y+1)
auto_label_button.place(x=150, y=jumps_label_y-1)
# window.after(100, lambda: print(geometry(auto_label_button.winfo_geometry())))
# window.update_idletasks()
# print(geometry(auto_label_button.winfo_geometry()))
def jumps_window_equals_none():
global jumps_window, function_select, jumps_curselect, function_curselect, auto_label_button
jumps_curselect = function_curselect = 0
jumps_window.destroy()
function_select = ''
jumps_window = auto_label_button = None
jumps_window.protocol('WM_DELETE_WINDOW', jumps_window_equals_none)
jumps_window.bind('<Escape>', lambda e: jumps_window_equals_none())
jumps_window.bind('<F1>', lambda e: view_comments())
jumps_window.bind('<F3>', lambda e: toggle_base_file())
jumps_window.bind('<F4>', lambda e: navigation_prompt(root=jumps_window))
jumps_window.bind('<F5>', lambda e: toggle_address_mode())
jumps_window.bind('<Control-s>', lambda e: save_changes_to_file())
jumps_window.bind('<Control-S>', lambda e: save_changes_to_file())
jumps_window.focus_force()
jumps_window.resizable(False, False)
jumps_window.after(2, lambda: change_colours())
jumps_window.after(1, lambda: jumps_window.mainloop())
elif jumps or just_window:
jumps_window.deiconify()
jumps_window.focus_force()
if just_window:
return
key = extend_zeroes(hexi(function_start << 2),8) + ' - ' + extend_zeroes(hexi(function_end << 2),8)
key_not_in_jumps_displaying = True
config = jumps_displaying.copy()
increment = -disasm.game_offset if disasm.game_address_mode else 0
try:
comment_key = str((deci(key[:8]) if not disasm.game_address_mode else (disasm.region_unalign(deci(key[:8])) + increment)) >> 2)
except ValueError as e:
# User attempting to get jumps from the top of the header
return
is_in_comments = comment_key in disasm.comments
was_in = False
for display_key in config:
key_not_in_jumps_displaying = key_not_in_jumps_displaying and display_key[:19] != key
if not key_not_in_jumps_displaying:
if is_in_comments:
new_key = key + ' ' + disasm.comments[comment_key]
else:
new_key = key
if new_key != display_key:
del jumps_displaying[display_key]
key_not_in_jumps_displaying = was_in = True
break
if is_in_comments:
key += ' {}'.format(disasm.comments[comment_key])
if key_not_in_jumps_displaying and jumps:
for i in range(len(jumps)):
comment_key = str((deci(jumps[i]) + increment) >> 2)
if comment_key in disasm.comments:
jumps[i] += ' ' + disasm.comments[comment_key]
jumps_displaying[key] = jumps
save_config()
if jumps_window and not was_in:
function_list_box.insert(tk.END, key)
comments_window = None
comments_list_box = None
filter_text = None
comments_filter_label | |
0.00000000004 * math.cos(4.09092913231 + 103498.66000202828 * self.t)
Y1 += 0.00000000005 * math.cos(0.06895267430 + 52396.4627430707 * self.t)
Y1 += 0.00000000004 * math.cos(2.56354623790 + 104505.63519426509 * self.t)
Y1 += 0.00000000005 * math.cos(5.58769879303 + 23754.46293121869 * self.t)
Y1 += 0.00000000005 * math.cos(5.71030733431 + 123201.08393375449 * self.t)
Y1 += 0.00000000004 * math.cos(6.20993923204 + 54087.2495838491 * self.t)
Y1 += 0.00000000004 * math.cos(3.63799815383 + 637.24008950771 * self.t)
Y1 += 0.00000000004 * math.cos(0.59460014711 + 28791.7631137333 * self.t)
Y1 += 0.00000000004 * math.cos(1.92743223051 + 23384.5308043821 * self.t)
Y1 += 0.00000000005 * math.cos(4.52669072840 + 53029.2464823839 * self.t)
Y1 += 0.00000000005 * math.cos(0.25501672186 + 18848.98373249069 * self.t)
Y1 += 0.00000000004 * math.cos(1.81268637965 + 26824.02347258949 * self.t)
Y1 += 0.00000000004 * math.cos(0.70934599797 + 25352.2704455259 * self.t)
Y1 += 0.00000000004 * math.cos(3.53975344268 + 133882.3344703199 * self.t)
Y1 += 0.00000000004 * math.cos(0.87353062113 + 132658.51662954128 * self.t)
Y1 += 0.00000000004 * math.cos(3.22384434044 + 53772.23654291649 * self.t)
Y1 += 0.00000000004 * math.cos(2.01166034864 + 183571.16555011149 * self.t)
Y1 += 0.00000000004 * math.cos(5.89521734601 + 167850.32676523308 * self.t)
Y1 += 0.00000000005 * math.cos(5.28708364201 + 145204.99856862429 * self.t)
Y1 += 0.00000000004 * math.cos(1.34862695530 + 27171.2271913513 * self.t)
Y1 += 0.00000000004 * math.cos(1.17340542232 + 25005.0667267641 * self.t)
Y1 += 0.00000000004 * math.cos(3.00220929293 + 50483.8844311295 * self.t)
Y1 += 0.00000000004 * math.cos(0.14217969958 + 78786.53066029989 * self.t)
Y1 += 0.00000000004 * math.cos(3.41518083973 + 949.4194264533 * self.t)
Y1 += 0.00000000005 * math.cos(0.77055922503 + 45892.48661567349 * self.t)
Y1 += 0.00000000004 * math.cos(3.51962830585 + 1795.5022612045 * self.t)
Y1 += 0.00000000005 * math.cos(1.84426394169 + 26709.8907598969 * self.t)
Y1 += 0.00000000005 * math.cos(0.67776843593 + 25466.4031582185 * self.t)
Y1 += 0.00000000004 * math.cos(4.27093115110 + 52065.84377941249 * self.t)
Y1 += 0.00000000004 * math.cos(0.10877025827 + 2222.1004520805 * self.t)
Y1 += 0.00000000004 * math.cos(1.11438053331 + 78270.58180110609 * self.t)
Y1 += 0.00000000004 * math.cos(5.94116454026 + 25653.94758621389 * self.t)
Y1 += 0.00000000004 * math.cos(5.36329329492 + 49842.36607279289 * self.t)
Y1 += 0.00000000004 * math.cos(4.90307551419 + 143005.91122533729 * self.t)
Y1 += 0.00000000004 * math.cos(1.70672949321 + 78800.75775430149 * self.t)
Y1 += 0.00000000004 * math.cos(0.14272238686 + 65697.31390725628 * self.t)
Y1 += 0.00000000003 * math.cos(1.76268636575 + 52195.23222656469 * self.t)
Y1 += 0.00000000004 * math.cos(3.45396433025 + 130459.42928625426 * self.t)
Y1 += 0.00000000004 * math.cos(5.54668248593 + 24491.47288180609 * self.t)
Y1 += 0.00000000004 * math.cos(2.57630900199 + 3178.38960805111 * self.t)
Y1 += 0.00000000004 * math.cos(3.69827148826 + 220.16882495529 * self.t)
Y1 += 0.00000000004 * math.cos(3.39420749098 + 52250.8316991992 * self.t)
Y1 += 0.00000000003 * math.cos(2.73132197327 + 78160.86046798748 * self.t)
Y1 += 0.00000000004 * math.cos(2.89190828519 + 87367.86023632369 * self.t)
Y1 += 0.00000000004 * math.cos(0.15780189015 + 130435.87818999648 * self.t)
Y1 += 0.00000000004 * math.cos(0.14307301866 + 53234.94439585409 * self.t)
Y1 += 0.00000000004 * math.cos(0.06711300408 + 26575.7817103119 * self.t)
Y1 += 0.00000000004 * math.cos(2.45491937354 + 25600.5122078035 * self.t)
Y1 += 0.00000000003 * math.cos(5.37884708213 + 1486.2239385487 * self.t)
Y1 += 0.00000000005 * math.cos(3.35368845011 + 86144.04239554508 * self.t)
Y1 += 0.00000000004 * math.cos(1.34102185895 + 32132.3755404331 * self.t)
Y1 += 0.00000000004 * math.cos(1.04693916592 + 66653.40128383189 * self.t)
Y1 += 0.00000000003 * math.cos(0.35934615572 + 52310.1591502169 * self.t)
Y1 += 0.00000000003 * math.cos(2.24307597389 + 45290.90021070109 * self.t)
Y1 += 0.00000000003 * math.cos(5.05641378963 + 70383.86408886709 * self.t)
Y1 += 0.00000000003 * math.cos(2.84685809679 + 52252.31617190749 * self.t)
Y1 += 0.00000000005 * math.cos(0.41148055834 + 52808.83383994509 * self.t)
Y1 += 0.00000000003 * math.cos(0.89098898576 + 1588.82907780029 * self.t)
Y1 += 0.00000000004 * math.cos(4.17927437554 + 58857.2749540315 * self.t)
Y1 += 0.00000000004 * math.cos(3.20092050693 + 51951.70530492999 * self.t)
Y1 += 0.00000000003 * math.cos(2.40144059169 + 50264.8506174147 * self.t)
Y1 += 0.00000000004 * math.cos(1.17285198222 + 20043.9183776823 * self.t)
Y1 += 0.00000000004 * math.cos(3.78297088323 + 128320.99566497609 * self.t)
Y1 += 0.00000000003 * math.cos(3.96547374476 + 25986.18444079209 * self.t)
Y1 += 0.00000000003 * math.cos(4.83974394004 + 26190.1094773233 * self.t)
Y1 += 0.00000000004 * math.cos(1.45951607601 + 136722.83537534589 * self.t)
Y1 += 0.00000000003 * math.cos(2.84255023310 + 65717.47130312308 * self.t)
Y1 += 0.00000000003 * math.cos(2.62501893240 + 181026.49291321907 * self.t)
Y1 += 0.00000000003 * math.cos(4.29835303229 + 51535.66517935089 * self.t)
Y1 += 0.00000000004 * math.cos(0.32726328089 + 129799.86223904048 * self.t)
Y1 += 0.00000000003 * math.cos(4.92235874244 + 26073.91986505609 * self.t)
Y1 += 0.00000000003 * math.cos(3.88285894236 + 26102.3740530593 * self.t)
Y1 += 0.00000000003 * math.cos(4.97897799737 + 52168.44891866409 * self.t)
Y1 += 0.00000000004 * math.cos(2.55090678746 + 52155.8927047651 * self.t)
Y1 += 0.00000000004 * math.cos(1.29677266690 + 37698.6989174319 * self.t)
Y1 += 0.00000000003 * math.cos(0.56868525863 + 51109.06698847489 * self.t)
Y1 += 0.00000000003 * math.cos(1.87341446779 + 26247.4486938499 * self.t)
Y1 += 0.00000000003 * math.cos(0.64861790983 + 25928.8452242655 * self.t)
Y1 += 0.00000000004 * math.cos(0.30979285060 + 468.20880783791 * self.t)
Y1 += 0.00000000004 * math.cos(3.27872430484 + 132028.83242063828 * self.t)
Y1 += 0.00000000004 * math.cos(5.61336389985 + 25661.06113321469 * self.t)
Y1 += 0.00000000003 * math.cos(0.47396259409 + 2703.3723371921 * self.t)
Y1 += 0.00000000004 * math.cos(5.86092912592 + 166740.94821313968 * self.t)
Y1 += 0.00000000004 * math.cos(3.23572058854 + 59414.23805726489 * self.t)
Y1 += 0.00000000003 * math.cos(4.82502734269 + 61560.8911087071 * self.t)
Y1 += 0.00000000003 * math.cos(0.00693087411 + 79316.22162539448 * self.t)
Y1 += 0.00000000003 * math.cos(3.05784222447 + 130866.35771623049 * self.t)
Y1 += 0.00000000003 * math.cos(0.12277419735 + 79219.06534884768 * self.t)
Y1 += 0.00000000003 * math.cos(2.46452817521 + 24506.18761077469 * self.t)
Y1 += 0.00000000003 * math.cos(0.66136657530 + 51868.4924796623 * self.t)
Y1 += 0.00000000003 * math.cos(2.85883702115 + 19202.50943415989 * self.t)
Y1 += 0.00000000003 * math.cos(6.14048429792 + 40852.8983673605 * self.t)
Y1 += 0.00000000003 * math.cos(2.81485517413 + 116783.89903417809 * self.t)
Y1 += 0.00000000003 * math.cos(5.64866640193 + 105460.74730090669 * self.t)
Y1 += 0.00000000003 * math.cos(0.28115071068 + 48835.43767393209 * self.t)
Y1 += 0.00000000003 * math.cos(4.12067030266 + 114.68310616872 * self.t)
Y1 += 0.00000000003 * math.cos(4.90383671624 + 76785.08666814168 * self.t)
Y1 += 0.00000000003 * math.cos(0.85813944126 + 81592.08889848628 * self.t)
Y1 += 0.00000000003 * math.cos(2.81772139589 + 95247.46175469569 * self.t)
Y1 += 0.00000000003 * math.cos(4.69086415103 + 52171.68113030689 * self.t)
Y1 += 0.00000000004 * math.cos(5.11822399207 + 78110.17443172129 * self.t)
Y1 += 0.00000000003 * math.cos(2.45643813863 + 68050.18006102808 * self.t)
Y1 += 0.00000000003 * math.cos(5.59679660207 + 38653.81102407349 * self.t)
Y1 += 0.00000000003 * math.cos(1.32662236669 + 2219.0009216703 * self.t)
Y1 += 0.00000000003 * math.cos(5.92341361550 + 52179.4438010229 * self.t)
Y1 += 0.00000000003 * math.cos(4.15087523006 + 52286.25642185129 * self.t)
Y1 += 0.00000000003 * math.cos(0.17348863040 + 76674.39271195528 * self.t)
Y1 += 0.00000000003 * math.cos(0.93059863207 + 126997.18458038908 * self.t)
Y1 += 0.00000000003 * math.cos(4.49117697679 + 77101.47853779829 * self.t)
Y1 += 0.00000000003 * math.cos(4.40672346738 + 77741.37582411229 * self.t)
Y1 += 0.00000000003 * math.cos(4.09517524067 + 8989.22388794889 * self.t)
Y1 += 0.00000000003 * math.cos(5.27976660688 + 77733.77464214448 * self.t)
Y1 += 0.00000000003 * math.cos(1.52949550682 + 286966.69073983253 * self.t)
Y1 += 0.00000000003 * math.cos(0.97589529959 + 74.53778108379 * self.t)
Y1 += 0.00000000003 * math.cos(4.21847009410 + 632.5399218297 * self.t)
Y1 += 0.00000000004 * math.cos(4.47958960947 + 28287.2343023447 * self.t)
Y1 += 0.00000000004 * math.cos(4.32562807533 + 23889.0596157707 * self.t)
Y1 += 0.00000000003 * math.cos(5.21982122522 + 75930.26921436709 * self.t)
Y1 += 0.00000000002 * math.cos(1.69313010887 + 182188.96761762688 * self.t)
Y1 += 0.00000000003 * math.cos(4.67718054767 + 27250.62166346549 * self.t)
Y1 += 0.00000000003 * math.cos(4.13971825138 + 51315.74017187909 * self.t)
Y1 += 0.00000000003 * math.cos(4.52130139518 + 52182.67601266569 * self.t)
Y1 += 0.00000000002 * math.cos(4.85799167915 + 121335.85253123689 * self.t)
Y1 += 0.00000000002 * math.cos(3.88643180579 + 110012.70079796549 * self.t)
Y1 += 0.00000000002 * math.cos(1.22067192730 + 2008.8013566425 * self.t)
Y1 += 0.00000000002 * math.cos(3.76859743855 + 24925.6722546499 * self.t)
Y1 += 0.00000000003 * math.cos(0.31070573063 + 39629.08052658189 * self.t)
Y1 += 0.00000000003 * math.cos(3.24788020549 + 3340.8562441833 * self.t)
Y1 += 0.00000000002 * math.cos(0.35390973897 + 23439.20449861769 * self.t)
Y1 += 0.00000000002 * math.cos(1.50089471176 + 29415.79498037089 * self.t)
Y1 += 0.00000000003 * math.cos(0.83102506625 + 25131.85780308709 * self.t)
Y1 += 0.00000000003 * math.cos(6.25945305227 + 51123.29408247649 * self.t)
Y1 += 0.00000000003 * math.cos(0.28921662553 + 90695.50825763689 * self.t)
Y1 += 0.00000000002 * math.cos(0.13680232887 + 24505.69997580769 * self.t)
Y1 += 0.00000000003 * math.cos(2.81292041302 + 26011.8808877821 * self.t)
Y1 += 0.00000000002 * math.cos(3.34607115822 + 51329.9672658807 * self.t)
Y1 += 0.00000000003 * math.cos(4.94974770833 + 81604.56566890588 * self.t)
Y1 += 0.00000000003 * math.cos(5.66982181767 + 52813.0463726561 * self.t)
Y1 += 0.00000000003 * math.cos(0.02173421963 + 391318.79094109643 * self.t)
Y1 += 0.00000000002 * math.cos(2.96964107128 + 196137.31725009429 * self.t)
Y1 += 0.00000000003 * math.cos(1.28173018247 + 156507.9929060289 * self.t)
Y1 += 0.00000000003 * math.cos(0.93767773997 + 27727.2164762457 * self.t)
Y1 += 0.00000000003 * | |
<reponame>cafealternativo/cpython
import winrt, struct, binascii
ELEMENT_TYPE_VOID = 1
ELEMENT_TYPE_BOOLEAN = 2
ELEMENT_TYPE_CHAR = 3
ELEMENT_TYPE_I1 = 4
ELEMENT_TYPE_U1 = 5
ELEMENT_TYPE_I2 = 6
ELEMENT_TYPE_U2 = 7
ELEMENT_TYPE_I4 = 8
ELEMENT_TYPE_U4 = 9
ELEMENT_TYPE_I8 = 0xa
ELEMENT_TYPE_U8 = 0xb
ELEMENT_TYPE_R4 = 0xc
ELEMENT_TYPE_R8 = 0xd
ELEMENT_TYPE_STRING = 0xe
ELEMENT_TYPE_PTR = 0xf
ELEMENT_TYPE_BYREF = 0x10
ELEMENT_TYPE_VALUETYPE = 0x11
ELEMENT_TYPE_CLASS = 0x12
ELEMENT_TYPE_VAR = 0x13
ELEMENT_TYPE_GENERICINST = 0x15
ELEMENT_TYPE_TYPEDBYREF = 0x16
ELEMENT_TYPE_I = 0x18
ELEMENT_TYPE_OBJECT = 0x1c
ELEMENT_TYPE_SZARRAY = 0x1d
ELEMENT_TYPE_CMOD_REQD = 0x1f
ELEMENT_TYPE_CMOD_OPT = 0x20
tnames = {
ELEMENT_TYPE_VOID: 'void',
ELEMENT_TYPE_BOOLEAN: 'boolean',
ELEMENT_TYPE_CHAR: 'wchar_t',
ELEMENT_TYPE_I1: 'char',
ELEMENT_TYPE_U1: 'unsigned char',
ELEMENT_TYPE_I2: 'short',
ELEMENT_TYPE_U2: 'unsigned short',
ELEMENT_TYPE_I4: 'int',
ELEMENT_TYPE_U4: 'unsigned int',
ELEMENT_TYPE_I8: 'INT64',
ELEMENT_TYPE_U8: 'UINT64',
ELEMENT_TYPE_R4: 'float',
ELEMENT_TYPE_R8: 'double',
ELEMENT_TYPE_STRING: 'HSTRING',
ELEMENT_TYPE_I: 'int',
}
mdtModule = 0x00000000
mdtTypeRef = 0x01000000
mdtTypeDef = 0x02000000
mdtFieldDef = 0x04000000
mdtMethodDef = 0x06000000
mdtParamDef = 0x08000000
mdtInterfaceImpl = 0x09000000
mdtMemberRef = 0x0a000000
mdtCustomAttribute = 0x0c000000
mdtPermission = 0x0e000000
mdtSignature = 0x11000000
mdtEvent = 0x14000000
mdtProperty = 0x17000000
mdtModuleRef = 0x1a000000
mdtTypeSpec = 0x1b000000
mdtAssembly = 0x20000000
mdtAssemblyRef = 0x23000000
mdtFile = 0x26000000
mdtExportedType = 0x27000000
mdtManifestResource = 0x28000000
mdtGenericParam = 0x2a000000
mdtMethodSpec = 0x2b000000
mdtGenericParamConstraint = 0x2c000000
mdtString = 0x70000000
mdtName = 0x71000000
mdtBaseType = 0x72000000
def token_type(t):
return t & 0xff000000
def decompress_integer(s):
b0 = s[0]
if (b0 & 0x80) == 0:
return b0, s[1:]
if (b0 & 0x40) == 0:
return ((b0 & 0x3f) << 8) + s[1], s[2:]
return (((b0 & 0x3f) << 24) + (s[1]<<16) +
(s[2] << 8) + s[3]), s[4:]
def decompress_packed_string(s):
# http://www.codeproject.com/Articles/42655/NET-file-format-Signatures-under-the-hood-Part-2
if s[0] == 0:
return None, s[1:]
if s[0] == 0xff:
return "", s[1:]
len, s = decompress_integer(s)
res = s[:len].decode('utf-8')
s = s[len:]
return res, s
struct_map = {
ELEMENT_TYPE_U1:'B',
ELEMENT_TYPE_U2:'<H',
ELEMENT_TYPE_U4:'<I',
ELEMENT_TYPE_U8:'<Q',
}
def decode_value(value, typecode):
code = struct_map[typecode]
s = struct.calcsize(code)
v = value[:s]
value = value[s:]
return struct.unpack(code, v)[0], value
def decode_custom_attribute(sig, value):
# Prolog
v, value = decode_value(value, ELEMENT_TYPE_U2)
assert v == 1
# StandAloneMethodSig
s0 = sig[0]
sig = sig[1:]
nparams, sig = decompress_integer(sig)
rtype, sig = decode_type(sig)
assert rtype.is_void()
result = []
for i in range(nparams):
ptype, sig = decode_type(sig)
if ptype.is_basic():
if ptype.kind == ELEMENT_TYPE_STRING:
v, value = decompress_packed_string(value)
else:
v, value = decode_value(value, ptype.kind)
result.append(v)
elif ptype.name == 'System.Type':
tname, value = decompress_packed_string(value)
result.append(tname)
elif ptype.is_enum():
v, value = decode_value(value, ELEMENT_TYPE_U4)
result.append(v)
else:
assert False, (ptype.name, value)
numnamed, value = decode_value(value, ELEMENT_TYPE_U2)
if numnamed:
result.append(("named", numnamed, value))
return result
class RTType:
def is_void(self):
return False
def is_array(self):
return False
def is_basic(self):
return False
class BasicType(RTType):
def __init__(self, kind):
self.kind = kind
def decl_var(self, var):
return tnames[self.kind] + " " + var + ";"
def decl_ptr(self, var):
return tnames[self.kind] + " *" + var + ";"
def is_void(self):
return self.kind == ELEMENT_TYPE_VOID
def is_basic(self):
return True
builtin_types = {
'IUnknown':'IUnknown',
'Windows.Foundation.EventRegistrationToken':'EventRegistrationToken',
'Windows.Foundation.AsyncStatus':'AsyncStatus',
'Windows.Foundation.IAsyncInfo':'IAsyncInfo',
'Windows.Foundation.HResult':'HRESULT',
'Windows.Foundation.Uri':'__x_ABI_CWindows_CFoundation_CIUriRuntimeClass',
'System.Guid':'GUID',
}
unsupported_types = {
# no C type
'Windows.Foundation.WwwFormUrlDecoder',
'Windows.Graphics.Printing.PrintTaskOptions',
'Windows.Media.MediaProperties.MediaPropertySet',
'Windows.Networking.Sockets.IWebSocket',
'Windows.Networking.Sockets.MessageWebSocketInformation',
'Windows.Networking.Sockets.StreamWebSocketInformation',
'Windows.Networking.Connectivity.IPInformation',
'Windows.Security.Authentication.OnlineId.SignOutUserOperation',
'Windows.Security.Cryptography.Core.CryptographicHash',
'Windows.Storage.Streams.DataReaderLoadOperation',
'Windows.Storage.Streams.DataWriterStoreOperation',
'Windows.Storage.AccessCache.AccessListEntryView',
'Windows.Storage.FileProperties.StorageItemThumbnail',
'Windows.UI.ApplicationSettings.SettingsCommand',
'Windows.UI.Xaml.Media.Animation.TimelineCollection',
'Windows.UI.Xaml.Media.Animation.DoubleKeyFrameCollection',
}
class NamedType(RTType):
def __init__(self, name, token, isclass):
if name in unsupported_types:
raise NotImplementedError
self.name = name
self.token = token
self.isclass = isclass
self.ptr = ''
if self.isclass:
self.ptr = '*'
try:
self.tname = builtin_types[name]
except KeyError:
comp = name.split('.')
# XXX tell interfaces apart
if isclass and not (comp[-1][0] == 'I' and comp[-1][1].isupper()):
comp[-1] = 'I'+comp[-1]
self.tname = '::'.join(comp)
def decl_var(self, var):
return "%s %s%s;" % (self.tname, self.ptr, var)
def decl_ptr(self, var):
return "%s %s*%s;" % (self.tname, self.ptr, var)
def is_enum(self):
mdi, td = type_by_name[self.name]
tname, flags, base = mdi.GetTypeDefProps(td)
scope, basename = mdi.GetTypeRefProps(base)
return basename == 'System.Enum'
class SZArray(RTType):
def __init__(self, elemtype):
self.elemtype = elemtype
def decl_var(self, var):
return self.elemtype.decl_ptr(var)
def is_array(self):
return True
def decode_type(s):
b0 = s[0]
s = s[1:]
if b0 in tnames:
return BasicType(b0), s
if b0 in (ELEMENT_TYPE_CLASS, ELEMENT_TYPE_VALUETYPE):
i, s = decompress_integer(s)
table = i & 0x3
value = i >> 2
token = (table<<24) + value
scope, name = mdi.GetTypeRefProps(token)
return NamedType(name, token, b0 == ELEMENT_TYPE_CLASS), s
if b0 == ELEMENT_TYPE_GENERICINST:
raise NotImplementedError
s = decode_type(s)
argc, s = decompress_integer(s)
print('<', end='')
for i in range(argc):
s = decode_type(s)
if i < argc-1:
print(',', end=' ')
print('>', end=' ')
return s
if b0 == ELEMENT_TYPE_OBJECT:
return NamedType("IInspectable", True), s
if b0 == ELEMENT_TYPE_SZARRAY:
b0 = s[0]
if b0 in (ELEMENT_TYPE_CMOD_REQD, ELEMENT_TYPE_CMOD_OPT):
c, s = parse_custom_mod(s)
t, s = decode_type(s)
# XXX consider c
return SZArray(t), s
if b0 == ELEMENT_TYPE_VAR:
raise NotImplementedError
param, s = decompress_integer(s)
print('T%d' % param, end=' ')
return s
raise NotImplementedError(hex(b0))
def parse_param(s):
b0 = s[0]
if b0 in (ELEMENT_TYPE_CMOD_REQD, ELEMENT_TYPE_CMOD_OPT):
raise NotImplementedError
s = parse_custom_mod(s)
return parse_param(s)
if b0 == ELEMENT_TYPE_BYREF:
raise NotImplementedError
print('BYREF', end=' ')
return decode_type(s[1:])
elif b0 == ELEMENT_TYPE_TYPEDBYREF:
raise NotImplementedError
print('TYPEDBYREF', end=' ')
elif b0 == ELEMENT_TYPE_VOID:
raise NotImplementedError
print('void', end=' ')
else:
return decode_type(s)
def parse_sig(this, s, name):
# 22.2.3 StandAloneMethodSig
s0 = s[0]
s = s[1:]
#if s0 & 0x20:
# print("HASTHIS", end='')
#if s0 & 0x40:
# print("EXPLICITTHIS", end=' ')
#callconv = ('DEFAULT', 'C', 'STDCALL', 'THISCALL', 'FASTCALL', 'VARARG')
#print(callconv[s0 & 7], end=' ')
nparams, s = decompress_integer(s)
try:
rtype, s = decode_type(s)
if rtype.is_void():
rtype = None
params = []
for i in range(nparams):
t, s = parse_param(s)
params.append(t)
except NotImplementedError:
raise
params = None
outfile.write('static PyObject*\n%s_%s(PyObject *_self, PyObject *args)\n{\n' %
(cname, name))
if params is None:
outfile.write(' PyErr_SetString(PyExc_NotImplementedError, "signature is unsupported");\n')
outfile.write(' return NULL;\n')
else:
outfile.write(' RTObject* self = (RTObject*)_self;\n')
outfile.write(' %s *_this = (%s*)self->_com;\n' % (this.tname, this.tname))
if rtype:
if rtype.is_array():
outfile.write(' UINT32 result_size;\n')
outfile.write(' %s\n' % rtype.decl_var('result'))
outfile.write(' HRESULT hres;\n')
for i, p in enumerate(params):
if p.is_array():
outfile.write(" UINT32 param%d_size;\n" % i)
outfile.write(" %s\n" % p.decl_var("param%d" % i))
outfile.write(" hres = _this->%s(" % name)
parm_strings = []
for i, p in enumerate(params):
if p.is_array():
parm_strings.append("param%d_size" % i)
parm_strings.append("param%d" % i)
if rtype:
if rtype.is_array():
parm_strings.append('&result_size')
parm_strings.append('&result')
outfile.write(", ".join(parm_strings))
outfile.write(');\n')
outfile.write('}\n\n')
# No header file for these namespaces
noheader = {
"Windows.Data",
"Windows.Devices",
"Windows.Graphics",
"Windows.Management",
"Windows.Security"
}
included = set()
def include(h):
if h in included:
return
outfile.write('#include <%s.h>\n' % h)
included.add(h)
def namespaces(n, files):
if n not in noheader:
include(n)
mdfiles, subns = winrt.RoResolveNamespace(n)
files.extend(mdfiles)
for sub in subns:
namespaces(n+"."+sub, files)
outfile = open('rtwrapper.c', 'w')
outfile.write('#include "rtsupport.c"\n')
files = []
namespaces('Windows', files)
classes = []
dispenser = winrt.newdispenser()
def print_type():
outfile.write("static PyMethodDef %s_methods[] = {\n" % cname)
for m in methods:
outfile.write(' {"%s", %s_%s, METH_VARARGS},\n' % (m, cname, m))
outfile.write(" {NULL}\n};\n\n")
outfile.write("static PyType_Slot %s_slots[] = {\n" % cname)
outfile.write(" {Py_tp_methods, %s_methods},\n" % cname)
outfile.write(" {0, 0}\n};\n\n")
outfile.write("static PyType_Spec %s_spec = {\n" % cname)
outfile.write(' "%s",\n' % cname.split('_')[-1])
outfile.write(' sizeof(RTObject),\n')
outfile.write(' 0,\n')
outfile.write(' Py_TPFLAGS_DEFAULT,\n')
outfile.write(' %s_slots,\n' % cname)
outfile.write('};\n\n')
def gen_rtclass():
outfile.write('/* rtclass %s */\n' % tname)
for intf in implements:
klass, intf = mdi.GetInterfaceImplProps(intf)
assert klass == typedef
if token_type(intf) == mdtTypeRef:
outfile.write('/* implements %r */\n' % (mdi.GetTypeRefProps(intf),))
elif token_type(intf) == mdtTypeSpec:
outfile.write('/* implements %r */\n' % mdi.GetTypeSpecFromToken(intf))
def get_attributes(typedef):
attrs = []
for cv in mdi.EnumCustomAttributes(None, typedef, None, 1000):
tkObj, tkType, pBlob = mdi.GetCustomAttributeProps(cv)
assert tkObj == typedef
atype, aname, asig = mdi.GetMemberRefProps(tkType)
assert aname == '.ctor'
ascope, aname = mdi.GetTypeRefProps(atype)
params = decode_custom_attribute(asig, pBlob)
attrs.append((aname, params))
return attrs
# skip for now
skipped_types = {
'Windows.Devices.Enumeration.DeviceThumbnail',
'Windows.ApplicationModel.Background.IBackgroundTaskBuilder',
'Windows.ApplicationModel.Background.IPushNotificationTriggerFactory',
'Windows.Devices.Sms.ISmsDevice',
'Windows.Management.Deployment.IPackageManager',
'Windows.UI.Xaml.Media.Animation.IStoryboard',
# no this type
'Windows.ApplicationModel.Background.BackgroundExecutionManager',
# 8.1 only
'Windows.ApplicationModel.Background.IAlarmApplicationManagerStatics',
'Windows.ApplicationModel.Background.IBackgroundWorkCostStatics',
}
skipped_namespaces = {
'Windows.Foundation.Metadata', # no header file
'Windows.Graphics.Printing.OptionDetails', # inconsistent type names
'Windows.UI.Xaml.Documents', # missing types
'Windows.UI.Xaml.Controls', # vector types
'Windows.UI.Xaml.Controls.Primitives', # vector types
'Windows.UI.Xaml', # vector types
'Windows.UI.Xaml.Media', # vector types
#'Windows.ApplicationModel.Appointments', # 8.1 only
#'Windows.ApplicationModel.Appointments.AppointmentsProvider', # 8.1 only
#'Windows.ApplicationModel.Search.Core', # 8.1 only
#'Windows.ApplicationModel.Calls', # 8.1 only
}
# First find all types, so that we don't need to follow assembly references
mdi_by_file = {}
type_by_name = {}
for f in files:
mdi = mdi_by_file[f] = dispenser.OpenScope(f)
for typedef in mdi.EnumTypeDefs(None, 1000):
tname, flags, base = mdi.GetTypeDefProps(typedef)
assert tname not in type_by_name
type_by_name[tname] = (mdi, typedef)
for f in files:
outfile.write("/********* %s ************/\n" % f)
mdi = mdi_by_file[f]
for typedef in mdi.EnumTypeDefs(None, 1000):
attrs = [param[0] for name, param in get_attributes(typedef)
if name == 'Windows.Foundation.Metadata.VersionAttribute']
if attrs and attrs[0] > 6020000:
# Skip Windows 8.1 and newer
continue
tname, flags, base = mdi.GetTypeDefProps(typedef)
namespace = tname.rsplit('.', 1)[0]
if namespace in skipped_namespaces:
continue
include(namespace)
outfile.write('using namespace ABI;\n')
for typedef in mdi.EnumTypeDefs(None, 1000):
tname, flags, base = mdi.GetTypeDefProps(typedef)
namespace = tname.rsplit('.', 1)[0]
if | |
merging
return_expr(ctx)
hdr.valid = z3.If(index == 0, hdr.valid, prev_valid)
return_expr = lval.locals["0"]
for hdr_idx in range(1, max_idx):
cond = index == hdr_idx
hdr = lval.locals[str(hdr_idx)]
# preserve the valid value before the method call
prev_valid = hdr.valid
# get the method
caller = hdr.locals[target_member]
# execute the call
caller(ctx)
hdr.valid = z3.If(cond, hdr.valid, prev_valid)
return_expr = handle_mux(cond, hdr, return_expr)
# return a dummy function...
return return_expr.isValid
return_expr = ctx.gen_instance("undefined", hdr.locals[target_member].sort())
for hdr_idx in range(max_idx):
hdr = lval.locals[str(hdr_idx)]
cur_val = hdr.locals[target_member]
cond = index == hdr_idx
return_expr = handle_mux(cond, cur_val, return_expr)
return return_expr
return_expr = ctx.gen_instance("undefined", lval.locals["0"].sort())
for hdr_idx in range(max_idx):
cond = index == hdr_idx
return_expr = handle_mux(cond, lval.locals[str(hdr_idx)], return_expr)
return return_expr
class P4Index(P4Member):
# FIXME: Still an absolute nightmare of a class.
# How to handle symbolic indices?
def resolve(self, ctx, target_member=None):
index = ctx.resolve_expr(self.member)
lval = ctx.resolve_expr(self.lval)
# resolve the index expression
if isinstance(index, z3.ExprRef):
index = z3.simplify(index)
if isinstance(index, int):
index = str(index)
elif isinstance(index, z3.BitVecNumRef):
index = str(index.as_long())
elif isinstance(index, z3.BitVecRef):
# if the index is a runtime value, we need to apply special care
return resolve_runtime_index(ctx, lval, index, target_member)
else:
raise RuntimeError(f"Unsupported index {type(index)}!")
hdr = lval.resolve_reference(index)
# if a target member is set, we need to resolve that member
if target_member:
return hdr.resolve_reference(target_member)
return hdr
def set_value(self, ctx, rval, target_member=None):
index = ctx.resolve_expr(self.member)
lval = ctx.resolve_expr(self.lval)
# simplify the index first
if isinstance(index, z3.ExprRef):
index = z3.simplify(index)
if isinstance(index, int):
index = str(index)
elif isinstance(index, z3.BitVecNumRef):
index = str(index.as_long())
elif isinstance(index, z3.BitVecRef):
# if the index is a runtime value, we need to set a new value
max_idx = lval.locals["size"]
if target_member:
# there is a member after the index, forward the assignment
# if the index is out of range, nothing happens.
for hdr_idx in range(max_idx):
hdr = lval.locals[str(hdr_idx)]
cur_val = hdr.locals[target_member]
if_expr = handle_mux(index == hdr_idx, rval, cur_val)
hdr.set_or_add_var(target_member, if_expr)
else:
for hdr_idx in range(max_idx):
hdr = lval.locals[str(hdr_idx)]
if_expr = handle_mux(index == hdr_idx, rval, hdr)
lval.set_or_add_var(str(hdr_idx), if_expr)
return
else:
raise RuntimeError(f"Unsupported index {type(index)}!")
# if a target member is present, we need to set that member instead
if target_member:
hdr = lval.locals[str(index)]
hdr.set_or_add_var(target_member, rval)
else:
lval.set_or_add_var(index, rval)
def __repr__(self):
return f"{self.lval}[{self.member}]"
class P4Slice(P4Expression):
def __init__(self, val, slice_l, slice_r):
self.val = val
self.slice_l = slice_l
self.slice_r = slice_r
def eval(self, ctx):
val = ctx.resolve_expr(self.val)
slice_l = ctx.resolve_expr(self.slice_l)
slice_r = ctx.resolve_expr(self.slice_r)
if isinstance(val, int):
val = val.as_bitvec
return z3.Extract(slice_l, slice_r, val)
class P4ComplexType():
""" A P4ComplexType is a wrapper for any type that is not a simple Z3 type
such as IntSort, BitVecSort or BoolSort.
A P4ComplexType creates an P4ComplexInstance , all subtypes
become fields of this class and be accessed in dot-notation
(e.g., headers.eth.srcmac).
If one of the children is a P4ComplexType a new P4ComplexInstance will be
instantiated and attached as member.
Every member of this class should either be a P4ComplexType or a z3.SortRef
if it is a basic type. A DataTypeRef should never be a member and always
needs to be converted to a P4ComplexType.
"""
def __init__(self, name):
self.name = name
self.z3_type = None
def instantiate(self, name, member_id=0):
return P4ComplexInstance(name, self, member_id)
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __eq__(self, other):
# lets us compare different z3 types with each other
# needed for type checking
if isinstance(other, P4ComplexType):
return self.z3_type == other.z3_type
elif isinstance(other, z3.AstRef):
return self.z3_type == other
return super(P4ComplexType).__eq__(other)
class P4ComplexInstance():
def __init__(self, name, p4z3_type, member_id):
self.locals = {}
self.name = name
self.z3_type = p4z3_type.z3_type
self.width = p4z3_type.width
self.p4z3_type = p4z3_type
self.member_id = member_id
self.fields = p4z3_type.fields
self.valid = z3.BoolVal(True)
def resolve_reference(self, var):
if isinstance(var, P4Member):
return var.resolve(self)
elif isinstance(var, str):
return self.locals[var]
return var
def set_list(self, rvals):
for idx, (member_name, member_type) in enumerate(self.fields):
val = rvals[idx]
# integers need to be cast to the respective type
if isinstance(val, int):
val = z3_cast(val, member_type)
self.set_or_add_var(member_name, val)
# whenever we set a list, the target instances becomes valid
self.valid = z3.BoolVal(True)
def set_or_add_var(self, lval, rval):
if isinstance(lval, P4Member):
lval.set_value(self, rval)
return
# rvals could be a list, unroll the assignment
if isinstance(rval, list) and lval in self.locals:
lval_val = self.locals[lval]
if isinstance(lval_val, P4ComplexInstance):
lval_val.set_list(rval)
else:
raise TypeError(
f"set_list {type(lval)} not supported!")
return
self.locals[lval] = rval
def sort(self):
return self.p4z3_type
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
result.locals = copy.copy(self.locals)
for name, val in self.locals.items():
if isinstance(val, P4ComplexInstance):
result.locals[name] = copy.copy(val)
return result
def __repr__(self):
return f"{self.__class__.__name__}_{self.name}"
def __eq__(self, other):
# It can happen that we compare to a list
# comparisons are almost the same just do not use fields
if isinstance(other, P4ComplexInstance):
other_list = []
for other_member_name, _ in other.fields:
other_list.append(other.resolve_reference(other_member_name))
elif isinstance(other, list):
other_list = other
else:
return z3.BoolVal(False)
# there is a mismatch in fields, clearly not equal
if len(self.fields) != len(other_list):
return z3.BoolVal(False)
eq_fields = []
for index, (self_member_name, _) in enumerate(self.fields):
self_member = self.resolve_reference(self_member_name)
other_member = other_list[index]
# we compare the fields of each complex type
z3_eq = self_member == other_member
eq_fields.append(z3_eq)
return z3.And(*eq_fields)
class StructType(P4ComplexType):
def __init__(self, name, ctx, fields, type_params):
super(StructType, self).__init__(name)
self.width = 0
self.ctx = ctx
self.fields = fields
self.flat_names = []
self.type_params = type_params
self.initialize(ctx)
def initialize(self, ctx):
flat_names = []
for idx, (member_name, member_type) in enumerate(self.fields):
try:
member_type = ctx.resolve_type(member_type)
except KeyError:
# A generic type, just use the string for now
pass
if isinstance(member_type, P4ComplexType):
# the member is a complex type
# retrieve it is flat list of fields
# append it to the member list
for sub_member in member_type.flat_names:
# FIXME: this is not the right way to construct a member...
member = Member(P4Member(member_name, sub_member.name),
sub_member.p4_type,
sub_member.width)
flat_names.append(member)
self.width += member_type.width
else:
if isinstance(member_type, z3.BoolSortRef):
# bools do not have a size attribute unfortunately
member_width = 1
elif isinstance(member_type, z3.BitVecSortRef):
member_width = member_type.size()
else:
# a kind of strange sub-type, unclear what its width is
# example: generics
member_width = 0
self.width += member_width
member = Member(member_name, member_type, member_width)
flat_names.append(member)
self.fields[idx] = (member_name, member_type)
if self.width == 0:
# we are dealing with an empty struct, create a dummy datatype
z3_type = z3.Datatype(self.name)
z3_type.declare(f"mk_{self.name}")
self.z3_type = z3_type.create()
else:
# use the flat bit width of the struct as datatype
self.z3_type = z3.BitVecSort(self.width)
self.flat_names = flat_names
def instantiate(self, name, member_id=0):
return StructInstance(name, self, member_id)
def init_type_params(self, type_ctx, *args, **kwargs):
init_struct = copy.copy(self)
# bind the types and set the type ctx
for idx, t_param in enumerate(init_struct.type_params):
arg = type_ctx.resolve_type(args[idx])
type_ctx.add_type(t_param, arg)
init_struct.initialize(type_ctx)
return init_struct
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
result.type_params = copy.copy(self.type_params)
result.fields = copy.copy(self.fields)
result.flat_names = []
return result
class StructInstance(P4ComplexInstance):
def __init__(self, name, p4z3_type, member_id):
super(StructInstance, self).__init__(name, p4z3_type, member_id)
self.const = z3.Const(name, self.z3_type)
# we use the overall index of the struct for a uniform naming scheme
flat_idx = self.member_id
for member_name, member_type in self.fields:
if isinstance(member_type, P4ComplexType):
# the z3 variable of the instance is only an id
instance = member_type.instantiate(str(flat_idx), flat_idx)
# but what we add is its fully qualified name, e.g. x.y.z
self.locals[member_name] = instance
flat_idx += len(member_type.flat_names)
else:
# this is just a filler value, it must be overridden by bind()
self.locals[member_name] = None
flat_idx += 1
def bind(self, bind_const):
# the identification of the member starts with the provided member id
# this bit_width must sum up to the bit width of the sub fields
bit_width = self.width
# set the fields of this class
for sub_member in self.p4z3_type.flat_names:
# TODO: Find a better way to handle undefined initialization
# This is very coarse
if str(bind_const) == UNDEF_LABEL:
bind_var = z3.Const(UNDEF_LABEL, sub_member.p4_type)
else:
# we bind by extracting the respective bit range
bind_var = z3.Extract(bit_width - 1,
bit_width - sub_member.width, bind_const)
if isinstance(sub_member.p4_type, z3.BoolSortRef):
# unfortunately bools still exit, we need to cast them
bind_var = z3_cast(bind_var, sub_member.p4_type)
# set the new bind value
self.set_or_add_var(sub_member.name, bind_var)
bit_width -= sub_member.width
def activate(self):
# structs may contain headers | |
#!/usr/bin/env python3
import time
from tkinter import *
from PIL import ImageTk, Image
import csv
from subprocess import Popen
import RPi.GPIO as GPIO
from subprocess import call
from datetime import datetime
from crontab import CronTab
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(22, GPIO.OUT, initial=1) # Set pin 22 gpio to an output
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Pulldown the input resistor on pin 23
GPIO.setup(24, GPIO.OUT, initial=0) # Set gpio pin 24 to an output as to turn the pump on when needed
# Main window
root = Tk()
root_status_string = StringVar() # Live timer countdown
timer_input_value = IntVar() # Keypad textbox value 1
daily_timer_input_value = IntVar() # Keypad textbox value 2
timer_set_run_text = StringVar() # Text string showing output of timer.
timer_recurrence_string = 0 # How often the pump will run
timer_time_string = "" # What time the pump will run
timer_status = StringVar() # Timer info on set text
water_level = StringVar() # Current water level string
timer_error_string = StringVar() # Timer set error string
timer_status_1 = StringVar() # Timer data info on set text
# Convert data from input value to mins/seconds
minute, sec = divmod(int(daily_timer_input_value.get()), 60)
hour, minute = divmod(minute, 60)
# Image/CSV data
keyboard_image = "keypad.jpg"
timer_data = 'timer_data.csv'
plot_img = "temp.png"
screen_off = "perl /home/pi/wateringsys/screen-off.pl"
speed_image = "/home/pi/wateringsys/speed.png"
class NumPad:
def __init__(self):
# Setup number pad screen
self.number_pad = Toplevel(root)
self.keypad_entery = Entry(self.number_pad,width=5,font=("Helvetica", 55))
self.keypad_entery.grid(row=0, column=0, columnspan=3, ipady=5)
self.number_pad.attributes('-fullscreen',True)
# Variables of keys to loop though
self.keys = [
['1', '2', '3'],
['4', '5', '6'],
['7', '8', '9'],
['Clear', '0', 'Exit'], ]
# Loop threw the keys and create the button with lambda command
for self.y, self.row in enumerate(self.keys, 1):
for self.x, self.key in enumerate(self.row):
self.b = Button(self.number_pad, text=self.key, command=lambda val=self.key:__numb_enter(val))
self.b.grid(row=self.y, column=self.x, ipadx=108, ipady=30)
self.exit = Button(
self.number_pad,
text="Exit",
command=self.number_pad.destroy).grid(
row=self.y, column=self.x, ipadx=100, ipady=30)
# Set the exit button at the end of the loop
def __numb_enter(arg):
# All globals required for updating the timer daily_timer_input_value
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
global timer_error_string
self.pin = ''
# Set the pin var to empty
if arg == 'Clear':
# remove last number from `pin`
self.pin = self.pin[:-1]
self.keypad_entery.delete('0', 'end')
self.keypad_entery.insert('end', self.pin)
elif arg == 'Exit':
self.number_pad.destroy
# Exit the keypad window
else:
# add number to pin
self.pin += arg
# add number to `entry`
self.keypad_entery.insert('end', arg)
self.pad_val = self.keypad_entery.get()
daily_timer_input_value.set(self.pad_val)
timer_input_value.set(self.pad_val)
# Set calculate the minuets and seconds for the label
minute, sec = divmod(int(self.pad_val), 60)
hours, minute = divmod(minute, 60)
# Set the label to update the current seconds/minutes
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
class Timers(object):
def __init__(self):
global timer_set_run_text
global daily_timer_input_value
global timer_status
global timer_error_string
global keyboard_img
self.timer_set_page = Toplevel(root)
# Setup the window for the timer selections
# Strings for all of the buttons
self.timer_run_text = Label(
self.timer_set_page,
text="Please choose a time of day to run the pump.",
font=('Helvetica', 20)).grid(row= 1,columnspan=8)
self.hours_in_day = [["1AM", "2AM", "3AM", "4AM", "5AM", "6AM", "7AM","8AM"],
["9AM", "10AM", "11AM", "12PM", "1PM", "2PM","3PM","4PM"],
["5PM", "6PM", "7PM", "8PM", "9PM", "10PM","11PM","12AM"]]
self.timer_entery = Entry(
self.timer_set_page,
textvariable=daily_timer_input_value,
width=23).grid(row=9, columnspan=3, column=0)
# Entery box for run time
daily_timer_input_value.set("") # Set the eatery to blank
self.keyboard_button = Button(self.timer_set_page,command=NumPad) # Button Image to open number pad
self.keyboard_img = ImageTk.PhotoImage(Image.open(keyboard_image)) #
self.keyboard_button.config(image=self.keyboard_img) #
self.keyboard_button.image = self.keyboard_img # Keep an instance of the image so
self.keyboard_button.grid(row=9, sticky=E, columnspan=2, column=1) # that it doesnt get garbage collected
self.exit = Button(
self.timer_set_page,
text="Exit",
command=self.timer_set_page.destroy).grid(row=9, columnspan=4,column=6, ipadx=50, ipady=15)
# Exit button back to main screen
self.set_timer = Button(
self.timer_set_page,
text="Set Timer",
command=self.__set_timer_cron,
bg="green").grid(row=9, columnspan=4, column=3, ipadx=50, ipady=15)
# Set the timer outputs the data to CVS
self.timer_run_text = Label(
self.timer_set_page,
textvariable=timer_set_run_text,
font=('Helvetica', 14)).grid(row=10, columnspan=8)
# Set the text variable for timer run label
Timers.timer_run_failed = Label(
self.timer_set_page,
textvariable=timer_status,
font=('Helvetica', 14), foreground='red')
Timers.timer_run_failed.grid(row=11, columnspan=8)
# Set the text variable for a failed CSV
timer_status.set("")
Timers.err_label = Label(
self.timer_set_page,
textvariable=timer_error_string,
font=('Helvetica', 14), foreground='red')
Timers.err_label.grid(row=12, columnspan=8)
# Set the text variable for a failed CSV
timer_error_string.set("")
self.timer_length_text = Label(
self.timer_set_page,
text="Please choose how long to run the timer for in seconds.",
font=('Helvetica', 20)).grid(row=7, columnspan=8)
self.z = 0
# Loop threw the hours in the day z will provide the hour of the day to return in lambda to timer_return function
# which manipulates the string and outputs to the label
for self.y, self.row in enumerate(self.hours_in_day, 1):
for self.x, self.key in enumerate(self.row):
self.z += 1
if self.z == 24:
self.z = 0
self.b = Button(self.timer_set_page, text=self.key, command=lambda val=self.z:self.__timer_return(val))
self.b.grid(row=self.y + 1, column=self.x, ipadx=20, ipady=10)
self.timer_set_page.attributes('-fullscreen', True)
# Strings for all recurrence rate
self.recurrence = ["1 Day", "2 Day", "3 Day", "4 Day", "5 Day", "6 Day","7 Day"]
self.timer_reoc_text = Label(
self.timer_set_page, text="Please choose how often you would like to run the timer.",
font=('Helvetica', 20)).grid(row=5, columnspan=8)
self.r = 0
self.col = 0
# Loop threw the recurrence options r will provide the amount
# of days between running and return in lambda to recurrence_return function
# which manipulates the string and outputs to the label
for self.d in self.recurrence:
self.r += 1
self.c = Button(self.timer_set_page, text=self.d, command=lambda val=self.r:self.__recurrence_return(val))
self.c.grid(row=6, column=self.col, ipadx=12, ipady=12)
self.col += 1
def __recurrence_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the recurrence rate, and set the new label string
timer_recurrence_string = str(arg)
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __timer_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the time of day, and set the new label string
self.pump_run_time = str(arg)
timer_time_string = str(str(arg) + ":00")
if len(timer_time_string) <= 4:
timer_time_string = "0" + timer_time_string
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __set_timer_cron(self):
global timer_status
global timer_status_1
# Remove all existing timer cron jobs.
try:
run_time = self.pump_run_time
repeat = int(timer_recurrence_string)
run_length = int(daily_timer_input_value.get())
cron = CronTab(user=True)
cron.remove_all(comment='water_timer')
cron.write()
# Insert new cron job timer.
cron = CronTab(user=True)
job = cron.new(
command='sudo python3 /home/pi/wateringsys/crontimer.py {}'.format(run_length),
comment='water_timer')
if repeat == 1:
job.hour.on(run_time)
job.minute.on(0)
if repeat >= 2:
job.setall(0, run_time, '*/{}'.format(repeat), None, None)
cron.write()
daily_timer_input_value.set("")
timer_input_value.set("")
Timers.timer_run_failed.config(fg="Green")
timer_status.set("The timer has been set.")
timer_status_1.set(str(timer_time_string))
with open(timer_data, "w") as self.output:
self.output.truncate()
self.writer = csv.writer(self.output, lineterminator='\n')
self.writer.writerow([run_time, repeat, run_length])
self.output.close()
except:
Timers.timer_run_failed.config(fg="Red")
Timers.err_label.config(fg="Red")
timer_status_1.set(str(timer_time_string))
timer_status.set("Please enter a time, recurrence rate and timer length")
def __set_timer_csv(self):
global timer_status
global timer_status_1
try:
run_time = self.pump_run_time
repeat = str(timer_recurrence_string)
run_length = str(daily_timer_input_value.get())
with open(timer_data, "w") as self.output:
self.output.truncate()
self.writer = csv.writer(self.output, lineterminator='\n')
self.writer.writerow([run_time, repeat, run_length])
self.output.close()
# Set both enterys back to empty
daily_timer_input_value.set("")
timer_input_value.set("")
call(["sudo", "systemctl", "restart", "pumptimer.service"])
Timers.timer_run_failed.config(fg="Green")
timer_status.set("The timer has been set.")
timer_status_1.set(str(timer_time_string))
except:
Timers.timer_run_failed.config(fg="Red")
Timers.err_label.config(fg="Red")
timer_status_1.set(str(timer_time_string))
timer_status.set("Please enter a time, recurrence rate and timer length")
def timer(): # Simple timer class,
try: # If any errors usually due to no input pass
run_time = timer_input_value.get()
root_status_string.set(str("Pump Running"))
timer_input_value.set("")
if GPIO.input(23) == 1:
GPIO.output(24, 1)
for i in range(1, run_time + 1, +1):
m, s = divmod(i, 60)
h, m = divmod(m, 60)
root_status_string.set(str("{} Minutes {} Seconds".format(m, s)))
root.update()
time.sleep(1)
GPIO.output(24, 0)
root_status_string.set(str("The pump run has finished"))
except:
GPIO.output(24, 0) # Turn the pump off.
print("failed")
pass
manual_timer = 0
def man_start(force=True):
global running
global manual_timer
try:
if force:
running = True
if running:
if GPIO.input(23) == 1:
root_status_string.set(str("Pump Running"))
GPIO.output(24, 1)
manual_timer += 1
m, s = divmod(manual_timer, 60)
h, m = divmod(m, 60)
root_status_string.set(str("{} Minutes {} Seconds".format(m, s)))
root.update()
root.after(1000, man_start, False)
if GPIO.input(23) == 0:
root_status_string.set(str("The pump will not run when the water level is low."))
except:
GPIO.output(24, 0) # Stop the pump.
def man_stop():
global running
global manual_timer
GPIO.output(24, 0)
running = False
manual_timer = 0
root_status_string.set(str("The Pump has been manually stopped."))
def img_updater(): # Auto image updater for home screen.
# Open image
try:
global counter
timer_set_time, time_until_run = csv_read()
if GPIO.input(23) == 0:
water_level_label.config(fg="Red")
water_level.set(str("The water level is LOW."))
if GPIO.input(23) == 1:
water_level_label.config(fg="Green")
water_level.set(str("The water level is OK."))
# Every 10 seconds change the timer_status_1 string which is the label on the front page.
counter += 1
if counter >= 1:
timer_status_1.set(str(timer_set_time))
plant_stat_img = ImageTk.PhotoImage(Image.open(plot_img))
plant_stat_panel.config(image=plant_stat_img)
plant_stat_panel.image = plant_stat_img
if counter >= 11:
timer_status_1.set(str(time_until_run))
speed_img = ImageTk.PhotoImage(Image.open(speed_image)) # /home/pi/html/
plant_stat_panel.config(image=speed_img)
plant_stat_panel.image = speed_img
if counter >= 21:
counter = 0
# Re load page every 10 seconds
root.after(1000, img_updater)
except:
timer_status_1.set(str('Please enter a timer, there is currently no timer set.'))
root.after(1000, img_updater)
pass
def back_light():
# Start the perl script which turns off the screen back light when the screensaver is active.
# The perl script calls back light.py which turns the back light on and off.
proc = Popen(
[screen_off], shell=True,
stdin=None, stdout=None, stderr=None, close_fds=True)
def csv_read():
# Consider changing the times of day to a dict to use AM PM times inline with the loop.
try:
with open(timer_data) as csvfile:
read_csv = csv.reader(csvfile, delimiter=',')
for row in read_csv:
days = int(row[1])
runtime = int(row[2])
time_of_day = int(row[0])
csvfile.close()
# Due to using plain numbers in the number pad loop must convert it to something people can read.
# Following is to read the set timer and make a label out of it.
if int(int(row[0])) <= 9:
run_hour = "0{}:00".format(str(int(row[0])))
if int(int(row[0])) >= 10:
run_hour = "{}:00".format(str(int(row[0])))
days = int(row[1])
m, s = divmod(int(row[2]), 60)
h, m = divmod(m, 60)
run_time = (str("{} Minutes and {} Seconds".format(m, s)))
current_runtime = "The timer is set to run for {} every {} day(s) at {}".format(run_time, days, run_hour)
# the following is to read the set timer and print out how much time is left on the timer.
now = datetime.now()
seconds_since_last_run = (now - now.replace(hour=time_of_day, minute=0, second=0, microsecond=0)).total_seconds()
if days == 1:
total_seconds = (days - 1) * 86400
countdown = total_seconds - int(round(seconds_since_last_run))
if countdown <= 1:
total_seconds = days * 86400
countdown = total_seconds - int(round(seconds_since_last_run))
if days > 1:
total_seconds = (days - 1) * 86400
countdown = total_seconds - int(round(seconds_since_last_run))
m, s = divmod(countdown, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
times = (
"There is {} day(s) {} hour(s) {} minute(s) and {} seconds remaining on the timer.".format(d, | |
<reponame>QuESt-Calculator/pyscf
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
Non-relativistic analytical nuclear gradients for restricted Hartree Fock with kpoints sampling
'''
import numpy as np
from pyscf import lib
from pyscf.lib import logger
from pyscf.grad import rhf as molgrad
from pyscf.pbc.gto.pseudo.pp import get_vlocG, get_alphas, get_projG, projG_li, _qli
from pyscf.pbc.dft.numint import eval_ao_kpts
from pyscf.pbc import gto, tools
from pyscf.gto import mole
import scipy
def grad_elec(mf_grad, mo_energy=None, mo_coeff=None, mo_occ=None, atmlst=None):
'''
Electronic part of KRHF/KRKS gradients
Args:
mf_grad : pbc.grad.krhf.Gradients or pbc.grad.krks.Gradients object
'''
mf = mf_grad.base
cell = mf_grad.cell
kpts = mf.kpts
nkpts = len(kpts)
if mo_energy is None: mo_energy = mf.mo_energy
if mo_occ is None: mo_occ = mf.mo_occ
if mo_coeff is None: mo_coeff = mf.mo_coeff
if atmlst is None: atmlst = range(cell.natm)
log = logger.Logger(mf_grad.stdout, mf_grad.verbose)
hcore_deriv = mf_grad.hcore_generator(cell, kpts)
s1 = mf_grad.get_ovlp(cell, kpts)
dm0 = mf.make_rdm1(mo_coeff, mo_occ)
t0 = (logger.process_clock(), logger.perf_counter())
log.debug('Computing Gradients of NR-HF Coulomb repulsion')
vhf = mf_grad.get_veff(dm0, kpts)
log.timer('gradients of 2e part', *t0)
dme0 = mf_grad.make_rdm1e(mo_energy, mo_coeff, mo_occ)
aoslices = cell.aoslice_by_atom()
de = np.zeros([len(atmlst),3])
for x, ia in enumerate(atmlst):
p0, p1 = aoslices[ia,2:]
h1ao = hcore_deriv(ia)
de[x] += np.einsum('xkij,kji->x', h1ao, dm0).real
# nabla was applied on bra in vhf, *2 for the contributions of nabla|ket>
de[x] += np.einsum('xkij,kji->x', vhf[:,:,p0:p1], dm0[:,:,p0:p1]).real * 2
de[x] -= np.einsum('kxij,kji->x', s1[:,:,p0:p1], dme0[:,:,p0:p1]).real * 2
de[x] /= nkpts
de[x] += mf_grad.extra_force(ia, locals())
if log.verbose > logger.DEBUG:
log.debug('gradients of electronic part')
mf_grad._write(log, cell, de, atmlst)
return de
def _make_fakemol():
fakemol = mole.Mole()
fakemol._atm = np.zeros((1,mole.ATM_SLOTS), dtype=np.int32)
fakemol._bas = np.zeros((1,mole.BAS_SLOTS), dtype=np.int32)
ptr = mole.PTR_ENV_START
fakemol._env = np.zeros(ptr+10)
fakemol._bas[0,mole.NPRIM_OF ] = 1
fakemol._bas[0,mole.NCTR_OF ] = 1
fakemol._bas[0,mole.PTR_EXP ] = ptr+3
fakemol._bas[0,mole.PTR_COEFF] = ptr+4
return fakemol
def get_hcore(cell, kpts):
'''Part of the nuclear gradients of core Hamiltonian'''
h1 = np.asarray(cell.pbc_intor('int1e_ipkin', kpts=kpts))
dtype = h1.dtype
if cell._pseudo:
SI=cell.get_SI()
Gv = cell.Gv
natom = cell.natm
coords = cell.get_uniform_grids()
ngrids = len(coords)
vlocG = get_vlocG(cell)
vpplocG = -np.einsum('ij,ij->j', SI, vlocG)
vpplocG[0] = np.sum(get_alphas(cell))
vpplocR = tools.ifft(vpplocG, cell.mesh).real
fakemol = _make_fakemol()
ptr = mole.PTR_ENV_START
for kn, kpt in enumerate(kpts):
aos = eval_ao_kpts(cell, coords, kpt, deriv=1)[0]
vloc = np.einsum('agi,g,gj->aij', aos[1:].conj(), vpplocR, aos[0])
expir = np.exp(-1j*np.dot(coords, kpt))
aokG = np.asarray([tools.fftk(np.asarray(ao.T, order='C'),
cell.mesh, expir).T for ao in aos])
Gk = Gv + kpt
G_rad = lib.norm(Gk, axis=1)
vnl = np.zeros(vloc.shape, dtype=np.complex128)
for ia in range(natom):
symb = cell.atom_symbol(ia)
if symb not in cell._pseudo:
continue
pp = cell._pseudo[symb]
for l, proj in enumerate(pp[5:]):
rl, nl, hl = proj
if nl >0:
hl = np.asarray(hl)
fakemol._bas[0,mole.ANG_OF] = l
fakemol._env[ptr+3] = .5*rl**2
fakemol._env[ptr+4] = rl**(l+1.5)*np.pi**1.25
pYlm_part = fakemol.eval_gto('GTOval', Gk)
pYlm = np.empty((nl,l*2+1,ngrids))
for k in range(nl):
qkl = _qli(G_rad*rl, l, k)
pYlm[k] = pYlm_part.T * qkl
SPG_lmi = np.einsum('g,nmg->nmg', SI[ia].conj(), pYlm)
SPG_lm_aoG = np.einsum('nmg,agp->anmp', SPG_lmi, aokG)
tmp = np.einsum('ij,ajmp->aimp', hl, SPG_lm_aoG[1:])
vnl += np.einsum('aimp,imq->apq', tmp.conj(), SPG_lm_aoG[0])
vnl *= (1./ngrids**2)
if dtype == np.float64:
h1[kn,:] += vloc.real + vnl.real
else:
h1[kn,:] += vloc + vnl
else:
raise NotImplementedError
return h1
def get_ovlp(cell, kpts):
return -np.asarray(cell.pbc_intor('int1e_ipovlp', kpts=kpts))
def hcore_generator(mf, cell=None, kpts=None):
if cell is None: cell = mf.cell
if kpts is None: kpts = mf.kpts
h1 = get_hcore(cell, kpts)
dtype = h1.dtype
aoslices = cell.aoslice_by_atom()
SI=cell.get_SI() ##[natom ,grid]
mesh = cell.mesh
Gv = cell.Gv ##[grid, 3]
ngrids = len(Gv)
coords = cell.get_uniform_grids()
vlocG = get_vlocG(cell) ###[natom, grid]
ptr = mole.PTR_ENV_START
def hcore_deriv(atm_id):
shl0, shl1, p0, p1 = aoslices[atm_id]
symb = cell.atom_symbol(atm_id)
fakemol = _make_fakemol()
vloc_g = 1j * np.einsum('ga,g->ag', Gv, SI[atm_id]*vlocG[atm_id])
nkpts, nao = h1.shape[0], h1.shape[2]
hcore = np.zeros([3,nkpts,nao,nao], dtype=h1.dtype)
for kn, kpt in enumerate(kpts):
ao = eval_ao_kpts(cell, coords, kpt)[0]
rho = np.einsum('gi,gj->gij',ao.conj(),ao)
for ax in range(3):
vloc_R = tools.ifft(vloc_g[ax], mesh).real
vloc = np.einsum('gij,g->ij', rho, vloc_R)
hcore[ax,kn] += vloc
rho = None
aokG= tools.fftk(np.asarray(ao.T, order='C'),
mesh, np.exp(-1j*np.dot(coords, kpt))).T
ao = None
Gk = Gv + kpt
G_rad = lib.norm(Gk, axis=1)
if symb not in cell._pseudo: continue
pp = cell._pseudo[symb]
for l, proj in enumerate(pp[5:]):
rl, nl, hl = proj
if nl >0:
hl = np.asarray(hl)
fakemol._bas[0,mole.ANG_OF] = l
fakemol._env[ptr+3] = .5*rl**2
fakemol._env[ptr+4] = rl**(l+1.5)*np.pi**1.25
pYlm_part = fakemol.eval_gto('GTOval', Gk)
pYlm = np.empty((nl,l*2+1,ngrids))
for k in range(nl):
qkl = _qli(G_rad*rl, l, k)
pYlm[k] = pYlm_part.T * qkl
SPG_lmi = np.einsum('g,nmg->nmg', SI[atm_id].conj(), pYlm)
SPG_lm_aoG = np.einsum('nmg,gp->nmp', SPG_lmi, aokG)
SPG_lmi_G = 1j * np.einsum('nmg, ga->anmg', SPG_lmi, Gv)
SPG_lm_G_aoG = np.einsum('anmg, gp->anmp', SPG_lmi_G, aokG)
tmp_1 = np.einsum('ij,ajmp->aimp', hl, SPG_lm_G_aoG)
tmp_2 = np.einsum('ij,jmp->imp', hl, SPG_lm_aoG)
vppnl = (np.einsum('imp,aimq->apq', SPG_lm_aoG.conj(), tmp_1) +
np.einsum('aimp,imq->apq', SPG_lm_G_aoG.conj(), tmp_2))
vppnl *=(1./ngrids**2)
if dtype==np.float64:
hcore[:,kn] += vppnl.real
else:
hcore[:,kn] += vppnl
hcore[:,kn,p0:p1] -= h1[kn,:,p0:p1]
hcore[:,kn,:,p0:p1] -= h1[kn,:,p0:p1].transpose(0,2,1).conj()
return hcore
return hcore_deriv
def grad_nuc(cell, atmlst):
'''
Derivatives of nuclear repulsion energy wrt nuclear coordinates
'''
ew_eta = cell.get_ewald_params()[0]
chargs = cell.atom_charges()
coords = cell.atom_coords()
Lall = cell.get_lattice_Ls()
natom = len(chargs)
ewovrl_grad = np.zeros([natom,3])
for i, qi in enumerate(chargs):
ri = coords[i]
for j in range(natom):
if j == i:
continue
qj = chargs[j]
rj = coords[j]
r1 = ri-rj + Lall
r = np.sqrt(np.einsum('ji,ji->j', r1, r1))
r = r.reshape(len(r),1)
ewovrl_grad[i] += np.sum(- (qi * qj / r ** 3 * r1 *
scipy.special.erfc(ew_eta * r).reshape(len(r),1)), axis = 0)
ewovrl_grad[i] += np.sum(- qi * qj / r ** 2 * r1 * 2 * ew_eta / np.sqrt(np.pi) *
np.exp(-ew_eta**2 * r ** 2).reshape(len(r),1), axis = 0)
mesh = gto.cell._cut_mesh_for_ewald(cell, cell.mesh)
Gv, Gvbase, weights = cell.get_Gv_weights(mesh)
absG2 = np.einsum('gi,gi->g', Gv, Gv)
absG2[absG2==0] = 1e200
ewg_grad = np.zeros([natom,3])
SI = cell.get_SI(Gv)
if cell.low_dim_ft_type is None or cell.dimension == 3:
coulG = 4*np.pi / absG2
coulG *= weights
ZSI = np.einsum("i,ij->j", chargs, SI)
ZexpG2 = coulG * np.exp(-absG2/(4*ew_eta**2))
ZexpG2_mod = ZexpG2.reshape(len(ZexpG2),1) * Gv
for i, qi in enumerate(chargs):
Zfac = np.imag(ZSI * SI[i].conj()) * qi
ewg_grad[i] = - np.sum(Zfac.reshape((len(Zfac),1)) * ZexpG2_mod, axis = 0)
ew_grad = ewg_grad + ewovrl_grad
if atmlst is not None:
ew_grad = ew_grad[atmlst]
return ew_grad
def get_jk(mf_grad, dm, kpts):
'''J = ((-nabla i) j| kl) D_lk
K = ((-nabla i) j| kl) D_jk
'''
vj, vk = mf_grad.get_jk(dm, kpts)
return vj, vk
def get_j(mf_grad, dm, kpts):
return mf_grad.get_j(dm, kpts)
def get_k(mf_grad, dm, kpts):
return mf_grad.get_k(dm, kpts)
def get_veff(mf_grad, dm, kpts):
'''NR Hartree-Fock Coulomb repulsion'''
vj, vk = mf_grad.get_jk(dm, kpts)
return vj - vk * .5
def make_rdm1e(mo_energy, mo_coeff, mo_occ):
'''Energy weighted density matrix'''
nkpts = len(mo_occ)
dm1e = [molgrad.make_rdm1e(mo_energy[k], mo_coeff[k], mo_occ[k]) for k in range(nkpts)]
return np.asarray(dm1e)
class GradientsMixin(molgrad.GradientsMixin):
'''
Basic nuclear gradient functions for non-relativistic methods
'''
def __init__(self, method):
self.cell = method.cell
self.kpts = method.kpts
molgrad.GradientsMixin.__init__(self, method)
def get_hcore(self, cell=None, kpts=None):
if cell is None: cell = self.cell
if kpts is None: kpts = self.kpts
return get_hcore(cell, kpts)
hcore_generator = hcore_generator
def get_ovlp(self, cell=None, kpts=None):
if cell is None: cell = self.cell
if kpts is None: kpts = self.kpts
return get_ovlp(cell, kpts)
def get_jk(self, dm=None, kpts=None):
if kpts is None: kpts = self.kpts
if dm is None: dm = self.base.make_rdm1()
exxdiv = self.base.exxdiv
cpu0 = (logger.process_clock(), logger.perf_counter())
vj, vk = self.base.with_df.get_jk_e1(dm, kpts, exxdiv=exxdiv)
logger.timer(self, 'vj and vk', *cpu0)
return vj, vk
def get_j(self, dm=None, kpts=None):
if kpts is None: kpts = self.kpts
if dm is None: dm = self.base.make_rdm1()
cpu0 = (logger.process_clock(), logger.perf_counter())
vj = self.base.with_df.get_j_e1(dm, kpts)
logger.timer(self, 'vj', *cpu0)
return vj
def get_k(self, dm=None, kpts=None, kpts_band=None):
if kpts is None: kpts = self.kpts
if dm is None: dm = self.base.make_rdm1()
exxdiv = self.base.exxdiv
cpu0 = (logger.process_clock(), logger.perf_counter())
vk = self.base.with_df.get_k_e1(dm, kpts, kpts_band, exxdiv)
logger.timer(self, 'vk', *cpu0)
return vk
def grad_nuc(self, cell=None, atmlst=None):
if cell is None: cell = self.cell
return grad_nuc(cell, atmlst)
def as_scanner(mf_grad):
'''Generating a nuclear gradients scanner/solver (for geometry optimizer).
The returned solver is a function. This function | |
pft.public = getattr(lang, 'public', None)
pft.meta_title = getattr(lang, 'meta_title', None)
pft.meta_description = getattr(lang, 'meta_description', None)
pft.meta_keywords = getattr(lang, 'meta_keywords', None)
pft.tags = getattr(lang, 'tags', None)
pft.save()
except IntegrityError as e:
raise IntegrityError(e)
@classmethod
def find_product(cls, query, lang, onlypublic=False):
product = cls.query_or(
query,
"pk",
"price_base",
# "product__pk",
"model",
"code",
"url_video",
# "product__brand__{}__name".format(lang),
# "product__brand__{}__slug".format(lang),
# "product__brand__image",
# "product__brand__outstanding",
# "product__family",
# "product__family__code",
"family__pk".format(lang),
"family__{}__slug".format(lang),
# "product__family__image",
"family__{}__name".format(lang),
"family__{}__description".format(lang),
"category__pk",
# "product__category",
# "product__category__code".format(lang),
# "product__category__image".format(lang),
"category__{}__slug".format(lang),
"category__{}__name".format(lang),
"category__{}__description".format(lang),
# "product__subcategory",
# "product__subcategory__code",
# "product__subcategory__image",
"subcategory__pk",
"subcategory__{}__slug".format(lang),
"subcategory__{}__name".format(lang),
"subcategory__{}__description".format(lang),
"family__image",
"family__icon",
"category__image",
"subcategory__image",
"tax__tax",
"{}__meta_title".format(lang),
"{}__meta_description".format(lang),
"{}__meta_keywords".format(lang),
"{}__description_short".format(lang),
"{}__description_long".format(lang),
"{}__slug".format(lang),
"{}__name".format(lang),
"weight",
family_image="family__image".format(lang),
family_name="family__{}__name".format(lang),
family_slug="family__{}__slug".format(lang),
family_description="family__{}__description".format(lang),
category_image="category__image".format(lang),
category_slug="category__{}__slug".format(lang),
category_name="category__{}__name".format(lang),
category_description="category__{}__description".format(lang),
subcategory_image="subcategory__image".format(lang),
subcategory_slug="subcategory__{}__slug".format(lang),
subcategory_name="subcategory__{}__name".format(lang),
subcategory_description="subcategory__{}__description".format(lang),
tax="tax__tax",
meta_title="{}__meta_title".format(lang),
meta_description="{}__meta_description".format(lang),
meta_keywords="{}__meta_keywords".format(lang),
description_short="{}__description_short".format(lang),
description_long="{}__description_long".format(lang),
name="{}__name".format(lang),
slug="{}__slug".format(lang),
pop_annotations=True
)
if onlypublic:
product = product.exclude(public=False)
product = product.first()
# if product:
# product_final = cls.objects.get(pk=product['pk'])
# prices = product_final.calculate_price()
# product['price'] = prices['price_total']
return product
# productos relacionados mas vendidos
class ProductRelationSold(CodenerixModel):
product = models.ForeignKey(Product, on_delete=models.CASCADE, blank=False, null=False, related_name='products_related', verbose_name=_("Product"))
related = models.ForeignKey(Product, on_delete=models.CASCADE, blank=False, null=False, related_name='products_related_sold', verbose_name=_("Products related"))
hits = models.SmallIntegerField(_("Hits"), blank=True, null=True)
class Meta(CodenerixModel.Meta):
unique_together = (('product', 'related'), )
def __unicode__(self):
return u"{} ({})".format(smart_str(self.product), smart_str(self.hits))
def __str__(self):
return self.__unicode__()
def __fields__(self, info):
fields = []
fields.append(('product', _("Product")))
fields.append(('related', _("Products related")))
fields.append(('hits', _("Hits")))
return fields
# imagenes de productos
class ProductImage(CodenerixModel, GenImageFile):
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='products_image', verbose_name=_("Product"))
order = models.SmallIntegerField(_("Order"), blank=True, null=True)
public = models.BooleanField(_("Public"), blank=True, null=False, default=True)
principal = models.BooleanField(_("Principal"), blank=False, null=False, default=False)
flagship_product = models.BooleanField(_("Flagship product"), default=False)
outstanding = models.BooleanField(_("Outstanding"), default=False)
def __unicode__(self):
return u"{} ({})".format(smart_str(self.product), smart_str(self.order))
def __str__(self):
return self.__unicode__()
def __fields__(self, info):
fields = []
fields.append(('product', _("Product")))
fields.append(('order', _("Order")))
fields.append(('public', _("Public")))
fields.append(('principal', _("Principal")))
return fields
# Save necesita un check que indique si debe comprobar o no los productos destacados y productos estrella.
@transaction.atomic
def save(self, *args, **kwards):
if self.principal:
ProductImage.objects.filter(product=self.product).exclude(pk=self.pk).update(principal=False)
elif not ProductImage.objects.exclude(pk=self.pk).filter(principal=True).exists():
self.principal = True
# Si no hay ninguna imagen con el check de producto estrella, marco esta.
if self.flagship_product:
ProductImage.objects.filter(product=self.product, flagship_product=True).exclude(pk=self.pk).update(flagship_product=False)
elif not ProductImage.objects.filter(product=self.product, flagship_product=True).exclude(pk=self.pk).exists():
self.flagship_product = True
# Producto destacado
if self.outstanding:
ProductImage.objects.filter(product=self.product, outstanding=True).exclude(pk=self.pk).update(outstanding=False)
elif not ProductImage.objects.filter(product=self.product, outstanding=True).exclude(pk=self.pk).exists():
self.outstanding = True
return super(ProductImage, self).save(*args, **kwards)
# documentos de productos
class ProductDocument(CodenerixModel, GenDocumentFile):
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='products_document', verbose_name=_("Product"))
public = models.BooleanField(_("Public"), blank=False, null=False, default=False)
def __unicode__(self):
return u"{}".format(smart_str(self.product))
def __str__(self):
return self.__unicode__()
def __fields__(self, info):
fields = []
fields.append(('product', _("Product")))
fields.append(('public', _("Public")))
return fields
# producto final (1 producto muchos atributos) (pulgadas, RAM)
class ProductFinal(CustomQueryMixin, CodenerixModel):
"""
el stock se relaciona con esta clase
definición de productos individuales
"""
product = models.ForeignKey(Product, on_delete=models.CASCADE, blank=False, null=False, related_name='products_final', verbose_name=_('Product'))
# productos relacionados
related = models.ManyToManyField("ProductFinal", blank=True, related_name='productsrelated', symmetrical=False)
related_accesory = models.ManyToManyField("ProductFinal", blank=True, related_name='productsrelatedaccesory', symmetrical=False)
offer = models.BooleanField(_("Offer"), blank=True, null=False, default=False)
outstanding = models.BooleanField(_("Outstanding"), blank=True, null=False, default=False)
most_sold = models.BooleanField(_("Most sold"), blank=True, null=False, default=False)
# price without tax
price_base = models.DecimalField(_("Price base"), null=False, blank=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
# price with tax
price = models.DecimalField(_("Price"), null=False, blank=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
ean13 = models.CharField(_("EAN-13"), null=True, blank=True, max_length=13)
reviews_value = models.FloatField(_("Reviews"), null=False, blank=False, default=0, editable=False)
reviews_count = models.IntegerField(_("Reviews count"), null=False, blank=False, default=0, editable=False)
sample = models.BooleanField(_("Sample"), blank=True, null=False, default=False, help_text=_('If this option is checked the product can not be sold'))
code = models.CharField(_("Code"), max_length=250, blank=True, null=True, unique=True, help_text=_('If it is empty, code is equal to code product'))
price_base_local = models.DecimalField(_("Price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, help_text=_('If it is empty, price base is equal to price base of product'))
packing_cost = models.DecimalField(_("Packing cost"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, help_text=_('If it is empty, packing cost is equal to packing cost of product'))
weight = models.FloatField(_("Weight"), blank=True, null=True, help_text=_('If it is empty, weight is equal to weight of product'))
def __str__(self):
lang = get_language_database()
lang_model = getattr(self, '{}'.format(lang), None)
if lang_model:
name = lang_model.name
else:
name = self.product
if self.ean13:
name = u"{} ({})".format(smart_str(name), self.ean13)
else:
name = u"{}".format(name)
return name
def __unicode__(self):
return self.__str__()
def get_name(self):
lang = get_language_database()
lang_model = getattr(self, '{}'.format(lang), None)
if lang_model:
name = lang_model.name
else:
name = self.product
return name
def __fields__(self, info):
lang = get_language_database()
fields = []
fields.append(('code', _("Code")))
fields.append(('product__code', _("Product Code")))
fields.append(('{}__name'.format(lang), _("Product")))
fields.append(('product__family__{}__name'.format(lang), _("Family")))
fields.append(('product__category__{}__name'.format(lang), _("Category")))
fields.append(('product__subcategory__{}__name'.format(lang), _("Subcategory")))
fields.append(('{}__public'.format(lang), _("Public")))
fields.append(('price', _("Price")))
fields.append(('is_pack', _("Is pack")))
fields.append(('sample', _("Sample")))
return fields
def __searchF__(self, info):
lang = get_language_database()
fields = {}
fields['product__family__{}__name'.format(lang)] = (_('Family'), lambda x, lang=lang: Q(**{'product__family__{}__name__icontains'.format(lang): x}), 'input')
fields['product__category__{}__name'.format(lang)] = (_('Category'), lambda x, lang=lang: Q(**{'product__category__{}__name__icontains'.format(lang): x}), 'input')
fields['product__subcategory__{}__name'.format(lang)] = (_('Subcategory'), lambda x, lang=lang: Q(**{'product__subcategory__{}__name__icontains'.format(lang): x}), 'input')
fields['{}__name'.format(lang)] = (_('Product'), lambda x, lang=lang: Q(**{'{}__name__icontains'.format(lang): x}), 'input')
fields['product__code'] = (_('Product Code'), lambda x, lang=lang: Q(**{'product__code__icontains': x}), 'input')
return fields
def __searchQ__(self, info, text):
lang = get_language_database()
qobject = qobject_builder_string_search(
[
"{}__name".format(lang),
"{}__slug".format(lang),
],
text
)
text_filters = {}
text_filters['product_name'] = qobject
return text_filters
def save(self, *args, **kwards):
self.recalculate(commit=False)
return super(ProductFinal, self).save(*args, **kwards)
def recalculate(self, commit=True):
prices = self.calculate_price()
if self.price != prices['price_total'] or self.price_base != prices['price_base']:
self.price = prices['price_total']
self.price_base = prices['price_base']
if commit:
self.save()
def lock_delete(self):
if self.products_final_attr.exists():
return _("Cannot delete product final model, relationship between product final model and products final attributes")
elif self.productfinals_image.exists():
return _("Cannot delete product final model, relationship between product final model and products final image")
elif self.products_unique.exists():
return _("Cannot delete product final model, relationship between product final model and products unique")
elif self.flagship_products.exists():
return _("Cannot delete product final model, relationship between product final model and products flaship")
else:
return super(ProductFinal, self).lock_delete()
def calculate_price(self):
if self.price_base_local is None:
price = self.product.price_base
else:
price = self.price_base_local
tax = self.product.tax.tax
price_base = price
# atributos
update = True
for attr in self.products_final_attr.all().order_by('-updated'):
if update:
if attr.attribute.type_price == TYPE_PRICE_FINAL:
price = Decimal(attr.attribute.price)
update = False
elif attr.attribute.type_price == TYPE_PRICE_INCREASE:
price += Decimal(attr.attribute.price)
elif attr.attribute.type_price == TYPE_PRICE_PERCENTAGE:
price += price_base * Decimal(attr.attribute.price / 100.0)
# caracteristicas
if update:
for feature in self.product.product_features.all().order_by('-updated'):
if update:
if feature.feature.type_price == TYPE_PRICE_FINAL:
price = Decimal(feature.feature.price)
update = False
elif feature.feature.type_price == TYPE_PRICE_INCREASE:
price += Decimal(feature.feature.price)
elif feature.feature.type_price == TYPE_PRICE_PERCENTAGE:
price += price_base * Decimal(feature.feature.price / 100.0)
# caracteristicas especiales
if update and self.product.feature_special:
if self.product.feature_special.type_price == TYPE_PRICE_FINAL:
price = Decimal(self.product.feature_special.price)
elif self.product.feature_special.type_price == TYPE_PRICE_INCREASE:
price += Decimal(self.product.feature_special.price)
elif self.product.feature_special.type_price == TYPE_PRICE_PERCENTAGE:
price += price_base * Decimal(self.product.feature_special.price / 100.0)
result = {}
result['price_base'] = price
result['tax'] = price * Decimal(tax) / 100
result['price_total'] = price + result['tax']
return result
def is_pack(self):
return self.productfinals_option.exists()
@property
def ispack(self):
return self.is_pack()
@classmethod
def get_recommended_products(cls, lang, family=None, category=None, subcategory=None):
products = []
query = Q(most_sold=True) | Q(product__products_image__principal=True)
if family is not None:
query &= Q(product__family=category)
if category is not None:
query &= Q(product__category=category)
if subcategory is not None:
query &= Q(product__subcategory=subcategory)
for product in cls.query_or(
query,
"{}__slug".format(lang),
"offer",
"created",
"offer",
"pk",
"product__{}__name".format(lang),
"product__model",
"product__brand__{}__name".format(lang),
"product__products_image__image",
"{}__meta_title".format(lang),
slug="{}__slug".format(lang),
meta_title="{}__meta_title".format(lang),
image="product__products_image__image",
name="product__{}__name".format(lang),
pop_annotations=True
):
product['new'] = 1 if (timezone.now() - product['created']).days <= settings.CDNX_PRODUCTS_NOVELTY_DAYS else 0
products.append(product)
return products
@classmethod
def get_outstanding_products(cls, lang, family=None, category=None, subcategory=None, limit=16):
products = []
query = Q(outstanding=True) & (Q(product__products_image__principal=True) | Q(productfinals_image__principal=True))
if family is not None:
query &= Q(product__family=family)
if category is not None:
query &= Q(product__category=category)
if subcategory is not None:
query &= Q(product__subcategory=subcategory)
qset = cls.objects.filter(
query
).values(
"{}__slug".format(lang),
"offer",
"created",
"offer",
"pk",
"sample",
"code",
"product__tax__tax",
"product__{}__name".format(lang),
"product__model",
"product__category__{}__name".format(lang),
"product__brand__{}__name".format(lang),
"product__products_image__image",
"{}__meta_title".format(lang)
).annotate(
slug=F("{}__slug".format(lang)),
meta_title=F("{}__meta_title".format(lang)),
image_product=F("product__products_image__image"),
image_productfinal=F("productfinals_image__image"),
name=F("product__{}__name".format(lang)),
category_name=F("product__category__{}__name".format(lang))
)[:limit]
for product in qset:
prices = cls.objects.get(pk=product['pk']).calculate_price()
product['pop_annotations'] = True
product['price'] = prices['price_total']
product['new'] = 1 if (timezone.now() - product['created']).days <= settings.CDNX_PRODUCTS_NOVELTY_DAYS else 0
if product['image_productfinal']:
product['image'] = product['image_productfinal']
else:
product['image'] = product['image_product']
products.append(product)
return products
@classmethod
def get_products(cls, lang, family=None, category=None, subcategory=None, brand=None):
products = []
query = Q(product__products_image__principal=True)
if family is not None:
query &= Q(product__family=family)
if category is not None:
query &= Q(product__category=category)
if subcategory is not None:
query &= Q(product__subcategory=subcategory)
if brand is not None:
query &= Q(product__brand=brand)
for product in cls.query_or(
query,
"{}__slug".format(lang),
"offer",
"created",
"offer",
"pk",
"product__tax__tax",
"product__{}__name".format(lang),
"product__model",
"product__brand__{}__name".format(lang),
"product__products_image__image",
"{}__meta_title".format(lang),
slug="{}__slug".format(lang),
meta_title="{}__meta_title".format(lang),
image="product__products_image__image",
name="product__{}__name".format(lang),
pop_annotations=True
):
prices = cls.objects.get(pk=product['pk']).calculate_price()
product['price'] = prices['price_total']
product['new'] = 1 if (timezone.now() - product['created']).days <= settings.CDNX_PRODUCTS_NOVELTY_DAYS | |
def test_put_gpu_resources_search_resources_11(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources')
assert False == false_res.json()['enable']
req['search_resources'] = [1, 2]
requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources')
assert false_res.json() == res.json()
def test_put_gpu_resources_search_resources_12(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources')
assert False == false_res.json()['enable']
req['search_resources'] = {}
requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources')
assert false_res.json() == res.json()
def test_put_gpu_resources_search_resources_13(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources')
assert False == false_res.json()['enable']
req['search_resources'] = {'k':1}
requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources')
assert false_res.json() == res.json()
def test_put_gpu_resources_search_resources_14(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources')
assert False == false_res.json()['enable']
req['search_resources'] = None
requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources')
assert false_res.json() == res.json()
def test_put_gpu_resources_search_resources_15(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources')
assert False == false_res.json()['enable']
req['search_resources'] = -3.222
requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources')
assert false_res.json() == res.json()
def test_put_gpu_resources_search_resources_16(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources')
assert False == false_res.json()['enable']
req['search_resources'] = ' '
requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources')
assert false_res.json() == res.json()
def test_put_gpu_resources_search_resources_17(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources')
assert False == false_res.json()['enable']
req['search_resources'] = '汉子'
requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources')
assert false_res.json() == res.json()
def test_put_gpu_resources_search_resources_18(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources')
assert False == false_res.json()['enable']
req['search_resources'] = '\t'
requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources')
assert false_res.json() == res.json()
def test_put_gpu_resources_search_resources_19(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources#')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources#', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources#')
assert False == false_res.json()['enable']
requests.put(base_url + 'config/gpu_resources#', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources#')
assert false_res.json() == res.json()
def test_put_gpu_resources_search_resources_20(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources?')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources?', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources?')
assert False == false_res.json()['enable']
requests.put(base_url + 'config/gpu_resources?', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources?')
assert false_res.json() == res.json()
def test_put_gpu_resources_search_resources_21(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources/')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources/', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources/')
assert False == false_res.json()['enable']
requests.put(base_url + 'config/gpu_resources/', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources/')
assert false_res.json() == res.json()
def test_put_gpu_resources_search_resources_22(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = True
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
true_res = requests.get(base_url + 'config/gpu_resources')
assert True == true_res.json()['enable']
logging.getLogger().info(true_res.json())
req = copy.deepcopy(original_req)
req['search_resources'] = 0
requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources')
logging.getLogger().info(res.json())
def test_put_gpu_resources_search_resources_23(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = True
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
true_res = requests.get(base_url + 'config/gpu_resources')
assert True == true_res.json()['enable']
req['search_resources'] = 5
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
assert 500 == res.status_code
assert 'Internal Server Error' in res.text
res = requests.get(base_url + 'config/gpu_resources')
assert true_res.json() == res.json()
def test_put_gpu_resources_search_resources_24(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = True
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
true_res = requests.get(base_url + 'config/gpu_resources')
assert True == true_res.json()['enable']
req['search_resources'] = 2147386
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
assert 500 == res.status_code
assert 'Internal Server Error' in res.text
res = requests.get(base_url + 'config/gpu_resources')
assert true_res.json() == res.json()
def test_put_gpu_resources_search_resources_25(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = True
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
true_res = requests.get(base_url + 'config/gpu_resources')
assert True == true_res.json()['enable']
req['search_resources'] = -10
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
assert 500 == res.status_code
assert 'Internal Server Error' in res.text
res = requests.get(base_url + 'config/gpu_resources')
assert true_res.json() == res.json()
def test_put_gpu_resources_search_resources_26(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = True
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
true_res = requests.get(base_url + 'config/gpu_resources')
assert True == true_res.json()['enable']
req['search_resources'] = 5
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
assert 500 == res.status_code
assert 'Internal Server Error' in res.text
res = requests.get(base_url + 'config/gpu_resources')
assert true_res.json() == res.json()
def | |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
from argparse import Action, ArgumentTypeError, Namespace, _ActionsContainer
from pex import pex_warnings
from pex.argparse import HandleBoolAction
from pex.network_configuration import NetworkConfiguration
from pex.orderedset import OrderedSet
from pex.resolve.lockfile import json_codec
from pex.resolve.lockfile.model import Lockfile
from pex.resolve.path_mappings import PathMapping, PathMappings
from pex.resolve.resolver_configuration import (
PYPI,
LockRepositoryConfiguration,
PexRepositoryConfiguration,
PipConfiguration,
ReposConfiguration,
ResolverVersion,
)
from pex.result import Error
from pex.tracer import TRACER
from pex.typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
from typing import Optional, Union
class _ManylinuxAction(Action):
def __init__(self, *args, **kwargs):
kwargs["nargs"] = "?"
super(_ManylinuxAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, value, option_str=None):
if option_str.startswith("--no"):
setattr(namespace, self.dest, None)
elif value.startswith("manylinux"):
setattr(namespace, self.dest, value)
else:
raise ArgumentTypeError(
"Please specify a manylinux standard; ie: --manylinux=manylinux1. "
"Given {}".format(value)
)
class _HandleTransitiveAction(Action):
def __init__(self, *args, **kwargs):
kwargs["nargs"] = 0
super(_HandleTransitiveAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, value, option_str=None):
setattr(namespace, self.dest, option_str == "--transitive")
def register(
parser, # type: _ActionsContainer
include_pex_repository=False, # type: bool
include_lock=False, # type: bool
):
# type: (...) -> None
"""Register resolver configuration options with the given parser.
:param parser: The parser to register resolver configuration options with.
:param include_pex_repository: Whether to include the `--pex-repository` option.
:param include_lock: Whether to include the `--lock` option.
"""
default_resolver_configuration = PipConfiguration()
parser.add_argument(
"--resolver-version",
dest="resolver_version",
default=default_resolver_configuration.resolver_version,
choices=ResolverVersion.values(),
type=ResolverVersion.for_value,
help=(
"The dependency resolver version to use. Read more at "
"https://pip.pypa.io/en/stable/user_guide/#resolver-changes-2020"
),
)
register_repos_options(parser)
register_network_options(parser)
parser.add_argument(
"--cache-ttl",
metavar="DEPRECATED",
default=None,
type=int,
help="Deprecated: No longer used.",
)
parser.add_argument(
"-H",
"--header",
dest="headers",
metavar="DEPRECATED",
default=None,
type=str,
action="append",
help="Deprecated: No longer used.",
)
repository_choice = (
parser.add_mutually_exclusive_group() if include_pex_repository and include_lock else parser
)
if include_pex_repository:
repository_choice.add_argument(
"--pex-repository",
dest="pex_repository",
metavar="FILE",
default=None,
type=str,
help=(
"Resolve requirements from the given PEX file instead of from --index servers, "
"--find-links repos or a --lock file."
),
)
if include_lock:
repository_choice.add_argument(
"--lock",
dest="lock",
metavar="FILE",
default=None,
type=str,
help=(
"Resolve requirements from the given lock file created by Pex instead of from "
"--index servers, --find-links repos or a --pex-repository. If no requirements are "
"specified, will install the entire lock."
),
)
register_lock_options(parser)
parser.add_argument(
"--pre",
"--no-pre",
dest="allow_prereleases",
default=default_resolver_configuration.allow_prereleases,
action=HandleBoolAction,
help="Whether to include pre-release and development versions of requirements.",
)
parser.add_argument(
"--wheel",
"--binary",
"--no-wheel",
"--no-use-wheel",
"--no-binary",
"--no-use-binary",
dest="allow_wheels",
default=default_resolver_configuration.allow_wheels,
action=HandleBoolAction,
help="Whether to allow binary distributions.",
)
parser.add_argument(
"--build",
"--no-build",
dest="allow_builds",
default=default_resolver_configuration.allow_builds,
action=HandleBoolAction,
help="Whether to allow building of distributions from source.",
)
parser.add_argument(
"--prefer-wheel",
"--prefer-binary",
"--no-prefer-wheel",
"--no-prefer-binary",
dest="prefer_older_binary",
default=default_resolver_configuration.prefer_older_binary,
action=HandleBoolAction,
help=(
"Whether to prefer older binary distributions to newer source distributions (prefer "
"not building wheels)."
),
)
parser.add_argument(
"--force-pep517",
"--use-pep517",
"--no-use-pep517",
dest="use_pep517",
default=default_resolver_configuration.use_pep517,
action=HandleBoolAction,
help=(
"Whether to force use of PEP 517 for building source distributions into wheels ("
"https://www.python.org/dev/peps/pep-0518) or force direct invocation of"
"`setup.py bdist_wheel` (which requires all source distributions have a `setup.py` "
"based build). Defaults to using PEP-517 only when a `pyproject.toml` file is present "
"with a `build-system` section. If PEP-517 is forced (--use-pep517 is passed) and no "
"`pyproject.toml` file is present or one is but does not have a `build-system` section "
"defined, then the build is executed as if a `pyproject.toml` was present with a "
'`build-system` section comprised of `requires = ["setuptools>=40.8.0", "wheel"]` and '
'`build-backend = "setuptools.build_meta:__legacy__"`.'
),
)
parser.add_argument(
"--build-isolation",
"--no-build-isolation",
dest="build_isolation",
default=default_resolver_configuration.build_isolation,
action=HandleBoolAction,
help=(
"Disable `sys.path` isolation when building a modern source distribution. Build "
"dependencies specified by PEP 518 (https://www.python.org/dev/peps/pep-0518) must "
"already be installed on the `sys.path` if this option is used."
),
)
parser.add_argument(
"--transitive",
"--no-transitive",
"--intransitive",
dest="transitive",
default=default_resolver_configuration.transitive,
action=_HandleTransitiveAction,
help="Whether to transitively resolve requirements.",
)
register_max_jobs_option(parser)
def register_lock_options(parser):
# type: (_ActionsContainer) -> None
"""Register lock options with the given parser.
:param parser: The parser to register lock configuration options with.
"""
parser.add_argument(
"--path-mapping",
dest="path_mappings",
action="append",
default=[],
type=str,
help=(
"A mapping of the form `NAME|PATH|DESCRIPTION` of a logical name to a concrete local "
"absolute path with an optional description. Can be specified multiple times. The "
"mapping must include the pipe (`|`) separated name and absolute path components, but "
"the trailing pipe-separated description is optional. The mapping is used when "
"creating, and later reading, lock files to ensure the lock file created on one "
"machine can be used on another with a potentially different realization of various "
"paths used in the resolve. A typical example is a find-links repo. This might be "
"provided on the file-system via a network mount instead of via an HTTP(S) server and "
"that network mount may be at different absolute paths on different machines. "
"Classically, it may be in a user's home directory; whose path will vary from user to "
"user."
),
)
def register_repos_options(parser):
# type: (_ActionsContainer) -> None
"""Register repos configuration options with the given parser.
:param parser: The parser to register repos configuration options with.
"""
parser.add_argument(
"--pypi",
"--no-pypi",
"--no-index",
dest="pypi",
action=HandleBoolAction,
default=True,
help="Whether to use PyPI to resolve dependencies.",
)
parser.add_argument(
"-f",
"--find-links",
"--repo",
metavar="PATH/URL",
action="append",
dest="find_links",
type=str,
help="Additional repository path (directory or URL) to look for requirements.",
)
parser.add_argument(
"-i",
"--index",
"--index-url",
metavar="URL",
action="append",
dest="indexes",
type=str,
help="Additional cheeseshop indices to use to satisfy requirements.",
)
def register_network_options(parser):
# type: (_ActionsContainer) -> None
"""Register network configuration options with the given parser.
:param parser: The parser to register network configuration options with.
"""
default_resolver_configuration = PipConfiguration()
default_network_configuration = default_resolver_configuration.network_configuration
parser.add_argument(
"--retries",
default=default_network_configuration.retries,
type=int,
help="Maximum number of retries each connection should attempt.",
)
parser.add_argument(
"--timeout",
metavar="SECS",
default=default_network_configuration.timeout,
type=int,
help="Set the socket timeout in seconds.",
)
parser.add_argument(
"--proxy",
type=str,
default=default_network_configuration.proxy,
help="Specify a proxy in the form http(s)://[user:passwd@]proxy.server:port.",
)
parser.add_argument(
"--cert",
metavar="PATH",
type=str,
default=default_network_configuration.cert,
help="Path to alternate CA bundle.",
)
parser.add_argument(
"--client-cert",
metavar="PATH",
type=str,
default=default_network_configuration.client_cert,
help=(
"Path to an SSL client certificate which should be a single file containing the "
"private key and the certificate in PEM format."
),
)
def register_max_jobs_option(parser):
# type: (_ActionsContainer) -> None
"""Register the max jobs configuration option with the given parser.
:param parser: The parser to register the max job option with.
"""
default_resolver_configuration = PipConfiguration()
parser.add_argument(
"-j",
"--jobs",
metavar="JOBS",
dest="max_jobs",
type=int,
default=default_resolver_configuration.max_jobs,
help=(
"The maximum number of parallel jobs to use when resolving, building and "
"installing distributions. You might want to increase the maximum number of "
"parallel jobs to potentially improve the latency of the pex creation process at "
"the expense of other processes on your system."
),
)
class InvalidConfigurationError(Exception):
"""Indicates an invalid resolver configuration."""
if TYPE_CHECKING:
ResolverConfiguration = Union[
LockRepositoryConfiguration, PexRepositoryConfiguration, PipConfiguration
]
def configure(options):
# type: (Namespace) -> ResolverConfiguration
"""Creates a resolver configuration from options registered by `register`.
:param options: The resolver configuration options.
:raise: :class:`InvalidConfigurationError` if the resolver configuration is invalid.
"""
pex_repository = getattr(options, "pex_repository", None)
lock = getattr(options, "lock", None)
if pex_repository and (options.indexes or options.find_links):
raise InvalidConfigurationError(
'The "--pex-repository" option cannot be used together with the "--index" or '
'"--find-links" options.'
)
if pex_repository:
return PexRepositoryConfiguration(
pex_repository=pex_repository,
network_configuration=create_network_configuration(options),
transitive=options.transitive,
)
pip_configuration = create_pip_configuration(options)
if lock:
return LockRepositoryConfiguration(
parse_lock=lambda: parse_lockfile(options),
pip_configuration=pip_configuration,
)
return pip_configuration
def create_pip_configuration(options):
# type: (Namespace) -> PipConfiguration
"""Creates a Pip configuration from options registered by `register`.
:param options: The Pip resolver configuration options.
"""
if options.cache_ttl:
pex_warnings.warn("The --cache-ttl option is deprecated and no longer has any effect.")
if options.headers:
pex_warnings.warn("The --header option is deprecated and no longer has any effect.")
repos_configuration = create_repos_configuration(options)
return PipConfiguration(
resolver_version=options.resolver_version,
repos_configuration=repos_configuration,
network_configuration=create_network_configuration(options),
allow_prereleases=options.allow_prereleases,
allow_wheels=options.allow_wheels,
allow_builds=options.allow_builds,
prefer_older_binary=options.prefer_older_binary,
use_pep517=options.use_pep517,
build_isolation=options.build_isolation,
transitive=options.transitive,
max_jobs=get_max_jobs_value(options),
)
def create_repos_configuration(options):
# type: (Namespace) -> ReposConfiguration
"""Creates a repos configuration from options registered by `register_repos_options`.
:param options: The Pip resolver configuration options.
"""
indexes = OrderedSet(
([PYPI] if options.pypi else []) + (options.indexes or [])
) # type: OrderedSet[str]
find_links = OrderedSet(options.find_links or ()) # type: OrderedSet[str]
return ReposConfiguration.create(indexes=tuple(indexes), find_links=tuple(find_links))
def create_network_configuration(options):
# type: (Namespace) -> NetworkConfiguration
"""Creates a network configuration from options registered by `register_network_options`.
:param options: The Pip resolver configuration options.
"""
return NetworkConfiguration(
retries=options.retries,
timeout=options.timeout,
proxy=options.proxy,
cert=options.cert,
client_cert=options.client_cert,
)
def get_max_jobs_value(options):
# type: (Namespace) -> int
"""Retrieves the max jobs value from the option registered by `register_max_jobs_option`.
:param options: The max jobs | |
<gh_stars>0
#!/usr/bin/python3
# Copyright 2022. FastyBird s.r.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
HomeKit connector registry module models
"""
# Python base dependencies
import json
import uuid
from datetime import datetime
from typing import Dict, List, Optional, Tuple, Union
# Library dependencies
from fastybird_devices_module.repositories.state import (
ChannelPropertiesStatesRepository,
)
from fastybird_devices_module.utils import normalize_value
from fastybird_metadata.types import ButtonPayload, DataType, SwitchPayload
from inflection import camelize, underscore
from kink import inject
from pyhap import CHARACTERISTICS_FILE, SERVICES_FILE
from pyhap.accessory_driver import AccessoryDriver
from pyhap.util import hap_type_to_uuid
from whistle import EventDispatcher
# Library libs
from fastybird_homekit_connector.exceptions import InvalidStateException
from fastybird_homekit_connector.registry.records import (
AccessoryRecord,
CharacteristicRecord,
ServiceRecord,
)
from fastybird_homekit_connector.types import HAPDataType
def read_definition_file(path: bytes) -> Dict[str, Dict[str, Union[str, List[str], Dict[str, Union[str, int, float]]]]]:
"""Read file and return a dict"""
with open(path, "r", encoding="utf8") as file:
definition: Dict[str, Dict[str, Union[str, List[str], Dict[str, Union[str, int, float]]]]] = json.load(file)
return definition
class AccessoriesRegistry:
"""
Accessories registry
@package FastyBird:HomeKitConnector!
@module registry/model
@author <NAME> <<EMAIL>>
"""
__items: Dict[str, AccessoryRecord] = {}
__iterator_index = 0
__services_registry: "ServicesRegistry"
# -----------------------------------------------------------------------------
def __init__(
self,
services_registry: "ServicesRegistry",
) -> None:
self.__items = {}
self.__services_registry = services_registry
# -----------------------------------------------------------------------------
def get_by_id(self, accessory_id: uuid.UUID) -> Optional[AccessoryRecord]:
"""Find accessory in registry by given unique identifier"""
items = self.__items.copy()
return next(
iter([record for record in items.values() if accessory_id == record.id]),
None,
)
# -----------------------------------------------------------------------------
def append(
self,
accessory_id: uuid.UUID,
accessory_name: str,
accessory_enabled: bool,
driver: AccessoryDriver,
) -> AccessoryRecord:
"""Append accessory record into registry"""
accessory_record = AccessoryRecord(
accessory_id=accessory_id,
accessory_enabled=accessory_enabled,
accessory_name=accessory_name,
driver=driver,
)
self.__items[str(accessory_record.id)] = accessory_record
return accessory_record
# -----------------------------------------------------------------------------
def remove(self, accessory_id: uuid.UUID) -> None:
"""Remove accessory from registry"""
items = self.__items.copy()
for record in items.values():
if accessory_id == record.id:
try:
del self.__items[str(record.id)]
self.__services_registry.reset(accessory_id=record.id)
except KeyError:
pass
break
# -----------------------------------------------------------------------------
def reset(self) -> None:
"""Reset accessories registry to initial state"""
items = self.__items.copy()
for record in items.values():
self.__services_registry.reset(accessory_id=record.id)
self.__items = {}
# -----------------------------------------------------------------------------
def enable(self, accessory: AccessoryRecord) -> AccessoryRecord:
"""Enable accessory for communication"""
accessory.enabled = True
self.__update(accessory=accessory)
updated_accessory = self.get_by_id(accessory_id=accessory.id)
if updated_accessory is None:
raise InvalidStateException("Accessory record could not be re-fetched from registry after update")
return updated_accessory
# -----------------------------------------------------------------------------
def disable(self, accessory: AccessoryRecord) -> AccessoryRecord:
"""Enable accessory for communication"""
accessory.enabled = False
self.__update(accessory=accessory)
updated_accessory = self.get_by_id(accessory_id=accessory.id)
if updated_accessory is None:
raise InvalidStateException("Accessory record could not be re-fetched from registry after update")
return updated_accessory
# -----------------------------------------------------------------------------
def add_service(self, accessory: AccessoryRecord, service: ServiceRecord) -> AccessoryRecord:
"""Add service to accessory"""
accessory.add_service(service)
self.__update(accessory=accessory)
updated_accessory = self.get_by_id(accessory_id=accessory.id)
if updated_accessory is None:
raise InvalidStateException("Accessory record could not be re-fetched from registry after update")
return updated_accessory
# -----------------------------------------------------------------------------
def __update(self, accessory: AccessoryRecord) -> bool:
items = self.__items.copy()
for record in items.values():
if record.id == accessory.id:
self.__items[str(accessory.id)] = accessory
return True
return False
# -----------------------------------------------------------------------------
def __iter__(self) -> "AccessoriesRegistry":
# Reset index for nex iteration
self.__iterator_index = 0
return self
# -----------------------------------------------------------------------------
def __len__(self) -> int:
return len(self.__items.values())
# -----------------------------------------------------------------------------
def __next__(self) -> AccessoryRecord:
if self.__iterator_index < len(self.__items.values()):
items: List[AccessoryRecord] = list(self.__items.values())
result: AccessoryRecord = items[self.__iterator_index]
self.__iterator_index += 1
return result
# Reset index for nex iteration
self.__iterator_index = 0
# End of iteration
raise StopIteration
class ServicesRegistry:
"""
Services registry
@package FastyBird:HomeKitConnector!
@module registry/model
@author <NAME> <<EMAIL>>
"""
__items: Dict[str, ServiceRecord] = {}
__characteristics_registry: "CharacteristicsRegistry"
__services_definitions: Dict
# -----------------------------------------------------------------------------
def __init__(
self,
characteristics_registry: "CharacteristicsRegistry",
services_definitions: bytes = SERVICES_FILE,
) -> None:
self.__items = {}
self.__characteristics_registry = characteristics_registry
self.__services_definitions = read_definition_file(services_definitions)
# -----------------------------------------------------------------------------
def get_by_id(self, service_id: uuid.UUID) -> Optional[ServiceRecord]:
"""Find service in registry by given unique identifier"""
items = self.__items.copy()
return next(
iter([record for record in items.values() if service_id == record.id]),
None,
)
# -----------------------------------------------------------------------------
def get_by_identifier(self, accessory_id: uuid.UUID, service_identifier: int) -> Optional[ServiceRecord]:
"""Find service in registry by given unique shelly identifier and accessory unique identifier"""
items = self.__items.copy()
return next(
iter(
[
record
for record in items.values()
if accessory_id == record.accessory_id and record.identifier == service_identifier
]
),
None,
)
# -----------------------------------------------------------------------------
def get_all_by_accessory(self, accessory_id: uuid.UUID) -> List[ServiceRecord]:
"""Find services in registry by accessory unique identifier"""
items = self.__items.copy()
return list(iter([record for record in items.values() if accessory_id == record.accessory_id]))
# -----------------------------------------------------------------------------
def append(
self,
accessory: AccessoryRecord,
service_id: uuid.UUID,
service_identifier: str,
service_name: str,
) -> ServiceRecord:
"""Append service record into registry"""
service_name = camelize(underscore(service_name))
if service_name not in self.__services_definitions:
raise AttributeError(f"Provided invalid service name: {service_name}")
service_config: Dict = self.__services_definitions[service_name].copy()
if "UUID" not in service_config or not isinstance(service_config, dict):
raise KeyError(f"Could not load service: {service_name}")
service_record: ServiceRecord = ServiceRecord(
accessory=accessory,
service_id=service_id,
service_identifier=service_identifier,
service_name=service_name,
service_type_id=hap_type_to_uuid(service_config.pop("UUID")),
)
self.__items[str(service_record.id)] = service_record
return service_record
# -----------------------------------------------------------------------------
def remove(self, service_id: uuid.UUID) -> None:
"""Remove service from registry"""
items = self.__items.copy()
for record in items.values():
if service_id == record.id:
try:
del self.__items[str(record.id)]
self.__characteristics_registry.reset(service_id=record.id)
except KeyError:
pass
break
# -----------------------------------------------------------------------------
def reset(self, accessory_id: Optional[uuid.UUID] = None) -> None:
"""Reset services registry to initial state"""
items = self.__items.copy()
if accessory_id is not None:
for record in items.values():
if accessory_id == record.accessory_id:
self.remove(service_id=record.id)
else:
for record in items.values():
self.__characteristics_registry.reset(service_id=record.id)
self.__items = {}
# -----------------------------------------------------------------------------
def add_characteristic(self, service: ServiceRecord, characteristic: CharacteristicRecord) -> ServiceRecord:
"""Add characteristic to service"""
service.add_characteristic(characteristic)
self.__update(service=service)
updated_service = self.get_by_id(service_id=service.id)
if updated_service is None:
raise InvalidStateException("Service record could not be re-fetched from registry after update")
return updated_service
# -----------------------------------------------------------------------------
def __update(self, service: ServiceRecord) -> bool:
items = self.__items.copy()
for record in items.values():
if record.id == service.id:
self.__items[str(service.id)] = service
return True
return False
@inject
class CharacteristicsRegistry:
"""
Characteristics registry
@package FastyBird:HomeKitConnector!
@module registry/model
@author <NAME> <<EMAIL>>
"""
__items: Dict[str, CharacteristicRecord] = {}
__iterator_index = 0
__event_dispatcher: EventDispatcher
__channel_property_state_repository: ChannelPropertiesStatesRepository
__characteristics_definitions: Dict
# -----------------------------------------------------------------------------
def __init__(
self,
event_dispatcher: EventDispatcher,
channel_property_state_repository: ChannelPropertiesStatesRepository,
characteristics_definitions: bytes = CHARACTERISTICS_FILE,
) -> None:
self.__items = {}
self.__event_dispatcher = event_dispatcher
self.__channel_property_state_repository = channel_property_state_repository
self.__characteristics_definitions = read_definition_file(characteristics_definitions)
# -----------------------------------------------------------------------------
def get_by_id(self, characteristic_id: uuid.UUID) -> Optional[CharacteristicRecord]:
"""Find characteristic in registry by given unique identifier"""
items = self.__items.copy()
return next(
iter([record for record in items.values() if characteristic_id == record.id]),
None,
)
# -----------------------------------------------------------------------------
def get_by_identifier(
self, service_id: uuid.UUID, characteristic_identifier: int
) -> Optional[CharacteristicRecord]:
"""Find characteristic in registry by given unique shelly identifier and service unique identifier"""
items = self.__items.copy()
return next(
iter(
[
record
for record in items.values()
if service_id == record.service_id and record.identifier == characteristic_identifier
]
),
None,
)
# -----------------------------------------------------------------------------
def get_all_for_service(self, service_id: uuid.UUID) -> List[CharacteristicRecord]:
"""Find characteristic in registry by service unique identifier"""
items = self.__items.copy()
return [record for record in items.values() if service_id == record.service_id]
# -----------------------------------------------------------------------------
def append( # pylint: disable=too-many-arguments,too-many-locals
self,
accessory: AccessoryRecord,
service: ServiceRecord,
characteristic_id: uuid.UUID,
characteristic_identifier: str,
characteristic_name: str,
characteristic_data_type: DataType,
characteristic_format: Union[
Tuple[Optional[int], Optional[int]],
Tuple[Optional[float], Optional[float]],
List[Union[str, Tuple[str, Optional[str], Optional[str]]]],
None,
] = None,
characteristic_invalid: Union[int, float, str, None] = None,
characteristic_number_of_decimals: Optional[int] = None,
characteristic_queryable: bool = False,
characteristic_settable: bool = False,
characteristic_value: Union[int, float, str, bool, datetime, ButtonPayload, SwitchPayload, None] = None,
) -> CharacteristicRecord:
"""Append characteristic record into registry"""
characteristic_name = camelize(underscore(characteristic_name))
if characteristic_name not in self.__characteristics_definitions:
raise AttributeError(f"Provided invalid characteristic name: {characteristic_name}")
characteristic_config: Dict = self.__characteristics_definitions[characteristic_name].copy()
if "UUID" not in characteristic_config or not isinstance(characteristic_config, dict):
raise KeyError(f"Could not load service: {characteristic_name}")
hap_data_type: Optional[HAPDataType] = None
if CharacteristicRecord.PROP_FORMAT in characteristic_config and HAPDataType.has_value(
str(characteristic_config.get(CharacteristicRecord.PROP_FORMAT))
):
hap_data_type = HAPDataType(characteristic_config.get(CharacteristicRecord.PROP_FORMAT))
hap_valid_values: Optional[Dict[str, int]] = None
if CharacteristicRecord.PROP_VALID_VALUES in characteristic_config:
hap_valid_values = characteristic_config.get(CharacteristicRecord.PROP_VALID_VALUES)
hap_max_length: Optional[int] = characteristic_config.get(CharacteristicRecord.PROP_MAX_LEN, None)
hap_min_value: Optional[float] = None
if CharacteristicRecord.PROP_MIN_VALUE in characteristic_config:
hap_min_value = float(str(characteristic_config.get(CharacteristicRecord.PROP_MIN_VALUE)))
hap_max_value: Optional[float] = None
if CharacteristicRecord.PROP_MAX_VALUE in characteristic_config:
hap_max_value = float(str(characteristic_config.get(CharacteristicRecord.PROP_MAX_VALUE)))
hap_min_step: Optional[float] = None
if CharacteristicRecord.PROP_MIN_STEP in characteristic_config:
hap_min_step = float(str(characteristic_config.get(CharacteristicRecord.PROP_MIN_STEP)))
hap_permissions: List[str] = []
if CharacteristicRecord.PROP_PERMISSIONS in characteristic_config:
hap_permissions = (
list(characteristic_config.get(CharacteristicRecord.PROP_PERMISSIONS, []))
if isinstance(characteristic_config.get(CharacteristicRecord.PROP_PERMISSIONS, []), list)
else []
)
hap_unit: Optional[str] = None
if CharacteristicRecord.PROP_UNIT in characteristic_config:
hap_unit = str(characteristic_config.get(CharacteristicRecord.PROP_UNIT))
characteristic_record: CharacteristicRecord = CharacteristicRecord(
event_dispatcher=self.__event_dispatcher,
accessory=accessory,
service=service,
characteristic_id=characteristic_id,
characteristic_identifier=characteristic_identifier,
characteristic_name=characteristic_name,
characteristic_type_id=hap_type_to_uuid(characteristic_config.pop("UUID")),
characteristic_data_type=characteristic_data_type,
characteristic_format=characteristic_format,
characteristic_invalid=characteristic_invalid,
characteristic_number_of_decimals=characteristic_number_of_decimals,
characteristic_queryable=characteristic_queryable,
characteristic_settable=characteristic_settable,
characteristic_value=characteristic_value,
| |
= local_analyses(E, HMM.Obs(E, t), HMM.Obs.noise.C, yy[ko],
batch, taper, pool.map, self.xN, self.g)
self.stats.write(stats, k, ko, "a")
E = post_process(E, self.infl, self.rot)
self.stats.assess(k, ko, E=E)
def effective_N(YR, dyR, xN, g):
"""Effective ensemble size N.
As measured by the finite-size EnKF-N
"""
N, Ny = YR.shape
N1 = N-1
V, s, UT = svd0(YR)
du = UT @ dyR
eN, cL = hyperprior_coeffs(s, N, xN, g)
def pad_rk(arr): return pad0(arr, min(N, Ny))
def dgn_rk(l1): return pad_rk((l1*s)**2) + N1
# Make dual cost function (in terms of l1)
def J(l1):
val = np.sum(du**2/dgn_rk(l1)) \
+ eN/l1**2 \
+ cL*np.log(l1**2)
return val
# Derivatives (not required with minimize_scalar):
def Jp(l1):
val = -2*l1 * np.sum(pad_rk(s**2) * du**2/dgn_rk(l1)**2) \
+ -2*eN/l1**3 \
+ 2*cL/l1
return val
def Jpp(l1):
val = 8*l1**2 * np.sum(pad_rk(s**4) * du**2/dgn_rk(l1)**3) \
+ 6*eN/l1**4 \
+ -2*cL/l1**2
return val
# Find inflation factor (optimize)
l1 = Newton_m(Jp, Jpp, 1.0)
# l1 = fmin_bfgs(J, x0=[1], gtol=1e-4, disp=0)
# l1 = minimize_scalar(J, bracket=(sqrt(prior_mode), 1e2), tol=1e-4).x
za = N1/l1**2
return za
# Notes on optimizers for the 'dual' EnKF-N:
# ----------------------------------------
# Using minimize_scalar:
# - Doesn't take dJdx. Advantage: only need J
# - method='bounded' not necessary and slower than 'brent'.
# - bracket not necessary either...
# Using multivariate minimization: fmin_cg, fmin_bfgs, fmin_ncg
# - these also accept dJdx. But only fmin_bfgs approaches
# the speed of the scalar minimizers.
# Using scalar root-finders:
# - brenth(dJ1, LowB, 1e2, xtol=1e-6) # Same speed as minimization
# - newton(dJ1,1.0, fprime=dJ2, tol=1e-6) # No improvement
# - newton(dJ1,1.0, fprime=dJ2, tol=1e-6, fprime2=dJ3) # No improvement
# - Newton_m(dJ1,dJ2, 1.0) # Significantly faster. Also slightly better CV?
# => Despite inconvienience of defining analytic derivatives,
# Newton_m seems like the best option.
# - In extreme (or just non-linear Obs.mod) cases,
# the EnKF-N cost function may have multiple minima.
# Then: should use more robust optimizer!
#
# For 'primal'
# ----------------------------------------
# Similarly, Newton_m seems like the best option,
# although alternatives are provided (commented out).
#
def Newton_m(fun, deriv, x0, is_inverted=False,
conf=1.0, xtol=1e-4, ytol=1e-7, itermax=10**2):
"""Find root of `fun`.
This is a simple (and pretty fast) implementation of Newton's method.
"""
itr = 0
dx = np.inf
Jx = fun(x0)
def norm(x):
return sqrt(np.sum(x**2))
while ytol < norm(Jx) and xtol < norm(dx) and itr < itermax:
Dx = deriv(x0)
if is_inverted:
dx = Dx @ Jx
elif isinstance(Dx, float):
dx = Jx/Dx
else:
dx = mldiv(Dx, Jx)
dx *= conf
x0 -= dx
Jx = fun(x0)
itr += 1
return x0
def hyperprior_coeffs(s, N, xN=1, g=0):
r"""Set EnKF-N inflation hyperparams.
The EnKF-N prior may be specified by the constants:
- `eN`: Effect of unknown mean
- `cL`: Coeff in front of log term
These are trivial constants in the original EnKF-N,
but are further adjusted (corrected and tuned) for the following reasons.
- Reason 1: mode correction.
These parameters bridge the Jeffreys (`xN=1`) and Dirac (`xN=Inf`) hyperpriors
for the prior covariance, B, as discussed in `bib.bocquet2015expanding`.
Indeed, mode correction becomes necessary when $$ R \rightarrow \infty $$
because then there should be no ensemble update (and also no inflation!).
More specifically, the mode of `l1`'s should be adjusted towards 1
as a function of $$ I - K H $$ ("prior's weight").
PS: why do we leave the prior mode below 1 at all?
Because it sets up "tension" (negative feedback) in the inflation cycle:
the prior pulls downwards, while the likelihood tends to pull upwards.
- Reason 2: Boosting the inflation prior's certainty from N to xN*N.
The aim is to take advantage of the fact that the ensemble may not
have quite as much sampling error as a fully stochastic sample,
as illustrated in section 2.1 of `bib.raanes2019adaptive`.
- Its damping effect is similar to work done by <NAME>.
The tuning is controlled by:
- `xN=1`: is fully agnostic, i.e. assumes the ensemble is generated
from a highly chaotic or stochastic model.
- `xN>1`: increases the certainty of the hyper-prior,
which is appropriate for more linear and deterministic systems.
- `xN<1`: yields a more (than 'fully') agnostic hyper-prior,
as if N were smaller than it truly is.
- `xN<=0` is not meaningful.
"""
N1 = N-1
eN = (N+1)/N
cL = (N+g)/N1
# Mode correction (almost) as in eqn 36 of `bib.bocquet2015expanding`
prior_mode = eN/cL # Mode of l1 (before correction)
diagonal = pad0(s**2, N) + N1 # diag of [email protected]@Y + N1*I
# (Hessian of J)
I_KH = np.mean(diagonal**(-1))*N1 # ≈ 1/(1 + HBH/R)
# I_KH = 1/(1 + (s**2).sum()/N1) # Scalar alternative: use tr(HBH/R).
mc = sqrt(prior_mode**I_KH) # Correction coeff
# Apply correction
eN /= mc
cL *= mc
# Boost by xN
eN *= xN
cL *= xN
return eN, cL
def zeta_a(eN, cL, w):
"""EnKF-N inflation estimation via w.
Returns `zeta_a = (N-1)/pre-inflation^2`.
Using this inside an iterative minimization as in the
`dapper.da_methods.variational.iEnKS` effectively blends
the distinction between the primal and dual EnKF-N.
"""
N = len(w)
N1 = N-1
za = N1*cL/(eN + w@w)
return za
@ens_method
class EnKF_N:
"""Finite-size EnKF (EnKF-N).
Refs: `bib.bocquet2011ensemble`, `bib.bocquet2015expanding`
This implementation is pedagogical, prioritizing the "dual" form.
In consequence, the efficiency of the "primal" form suffers a bit.
The primal form is included for completeness and to demonstrate equivalence.
In `dapper.da_methods.variational.iEnKS`, however,
the primal form is preferred because it
already does optimization for w (as treatment for nonlinear models).
`infl` should be unnecessary (assuming no model error, or that Q is correct).
`Hess`: use non-approx Hessian for ensemble transform matrix?
`g` is the nullity of A (state anomalies's), ie. g=max(1,N-Nx),
compensating for the redundancy in the space of w.
But we have made it an input argument instead, with default 0,
because mode-finding (of p(x) via the dual) completely ignores this redundancy,
and the mode gets (undesireably) modified by g.
`xN` allows tuning the hyper-prior for the inflation.
Usually, I just try setting it to 1 (default), or 2.
Further description in hyperprior_coeffs().
"""
N: int
dual: bool = False
Hess: bool = False
xN: float = 1.0
g: int = 0
def assimilate(self, HMM, xx, yy):
R, N, N1 = HMM.Obs.noise.C, self.N, self.N-1
# Init
E = HMM.X0.sample(N)
self.stats.assess(0, E=E)
# Cycle
for k, ko, t, dt in progbar(HMM.tseq.ticker):
# Forecast
E = HMM.Dyn(E, t-dt, dt)
E = add_noise(E, dt, HMM.Dyn.noise, self.fnoise_treatm)
# Analysis
if ko is not None:
self.stats.assess(k, ko, 'f', E=E)
Eo = HMM.Obs(E, t)
y = yy[ko]
mu = np.mean(E, 0)
A = E - mu
xo = np.mean(Eo, 0)
Y = Eo-xo
dy = y - xo
V, s, UT = svd0(Y @ R.sym_sqrt_inv.T)
du = UT @ (dy @ R.sym_sqrt_inv.T)
def dgn_N(l1): return pad0((l1*s)**2, N) + N1
# Adjust hyper-prior
# xN_ = noise_level(self.xN, self.stats, HMM.tseq, N1, ko, A,
# locals().get('A_old', None))
eN, cL = hyperprior_coeffs(s, N, self.xN, self.g)
if self.dual:
# Make dual cost function (in terms of l1)
def pad_rk(arr): return pad0(arr, min(N, HMM.Obs.M))
def dgn_rk(l1): return pad_rk((l1*s)**2) + N1
def J(l1):
val = np.sum(du**2/dgn_rk(l1)) \
+ eN/l1**2 \
+ cL*np.log(l1**2)
return val
# Derivatives (not required with minimize_scalar):
def Jp(l1):
val = -2*l1 * np.sum(pad_rk(s**2) * du**2/dgn_rk(l1)**2) \
+ -2*eN/l1**3 + 2*cL/l1
return val
def Jpp(l1):
val = 8*l1**2 * np.sum(pad_rk(s**4) * du**2/dgn_rk(l1)**3) \
+ 6*eN/l1**4 + -2*cL/l1**2
return val
# Find inflation factor (optimize)
l1 = Newton_m(Jp, Jpp, 1.0)
# l1 = fmin_bfgs(J, x0=[1], gtol=1e-4, disp=0)
# l1 = minimize_scalar(J, bracket=(sqrt(prior_mode), 1e2),
# tol=1e-4).x
else:
# Primal form, in a fully linearized version.
def za(w): return zeta_a(eN, cL, w)
def J(w): return \
.5*np.sum(((dy-w@Y)@R.sym_sqrt_inv.T)**2) + \
.5*N1*cL*np.log(eN + w@w)
# Derivatives (not required with fmin_bfgs):
def Jp(w): return [email protected]@(dy-w@Y) + w*za(w)
# Jpp = lambda w: [email protected]@Y.T + \
# za(w)*(eye(N) - 2*np.outer(w,w)/(eN + w@w))
# Approx: no radial-angular cross-deriv:
# Jpp = lambda w: [email protected]@Y.T + za(w)*eye(N)
def nvrs(w):
# inverse of Jpp-approx
return (V * (pad0(s**2, N) + za(w)) ** -1.0) @ V.T
# Find w (optimize)
wa = Newton_m(Jp, | |
#!/usr/bin/env python
# Copyright (c) 2011-2013 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
This file contains the definitions for all of the RawMetrics supported by
RAMCloud. When executed, it generates two files, RawMetrics.in.h and
RawMetrics.in.cc, which are included by other files when building RAMCloud.
"""
from __future__ import division, print_function
from glob import glob
from optparse import OptionParser
from pprint import pprint
from functools import partial
import math
import os
import random
import re
import sys
import itertools
from common import *
__all__ = ['average', 'avgAndStdDev', 'parseRecovery']
### Utilities:
class Counter:
"""Used to share an incrementing value.
"""
def __init__(self):
self.current = 0
def next(self):
self.current += 1
def value(self):
return self.current
class Out:
"""Indents text and writes it to a file.
Useful for generated code.
"""
def __init__(self, stream=sys.stdout, indent=0):
self._stream = stream
self._indent = indent
def __call__(self, s):
self._stream.write('%s%s\n' % (' ' * 4 * self._indent, s))
def indent(self):
return Out(self._stream, self._indent + 1)
class Metric:
"""A single performance metric.
"""
def __init__(self, name, documentation):
""" name is the variable name to use for this metric """
self.name = name
self.documentation = documentation
def dump_header(self, out):
out('/// %s' % self.documentation)
out('RawMetric %s;' % self.name)
def initializer(self):
return '%s(0)' % (self.name)
def instance_name(self):
""" Compute the name to use for an instance of this metric. """
return self.name
def dump_metric_info_code(self, out, path, counter):
""" Generate a case statement as part of a giant switch statement
that allows for iteration over all metrics.
path is a hierarchical name identifying this element, such
as 'backup.local' (it includes this object's name, if that
is desired).
counter is a Counter used to generate "case" clauses with
incrementing values.
"""
out(' case %s:' % (counter.value()))
out(' return {"%s",' % path)
out(' &%s};' % path)
counter.next()
class Group:
"""A group of related performance metrics and subgroups. Translates
into a nested struct inside the C++ RawMetrics object.
"""
def __init__(self, name, documentation):
""" name is the name of a class to use for this group (i.e.
initial capital letter).
"""
self.name = name
self.documentation = documentation
self.metrics = []
self.groups = []
def metric(self, name, documentation):
self.metrics.append(Metric(name, documentation))
def group(self, group):
self.groups.append(group)
def dump_header(self, out):
indent = ' ' * 4 * (out._indent + 2)
out('/// %s' % self.documentation)
constructorBody = ''
if self.name != 'RawMetrics':
out('struct %s {' % self.name)
else:
constructorBody = 'init();'
children = self.groups + self.metrics;
out(' %s()' % self.name)
out(' : %s {%s}' %
(('\n%s, ' % (indent)).join(
[child.initializer() for child in children]),
constructorBody))
for child in children:
child.dump_header(out.indent())
if self.name != 'RawMetrics':
out('} %s;' % self.instance_name())
def initializer(self):
return '%s()' % self.instance_name()
def instance_name(self):
""" Compute the name to use for an instance of this group. """
return self.name[0].lower() + self.name[1:]
def dump_metric_info_code(self, out, path, counter):
""" Generate a case statement as part of a giant switch statement
that allows for iteration over all metrics.
path is a hierarchical name identifying this element, such
as 'backup.local' (it includes this object's name, if that
is desired).
counter is a Counter used to generate "case" clauses with
incrementing values.
"""
prefix = path
if len(path) != 0:
prefix += '.'
for child in self.groups + self.metrics:
child.dump_metric_info_code(out,
prefix + child.instance_name(), counter)
### Metrics definitions:
coordinator = Group('Coordinator', 'metrics for coordinator')
coordinator.metric('recoveryCount',
'number of recoveries in which this coordinator participated')
coordinator.metric('recoveryTicks', 'elapsed time during recoveries')
coordinator.metric('recoveryBuildReplicaMapTicks',
'time contacting backups and finding replicas for crashed '
'master')
coordinator.metric('recoveryStartTicks', 'time in Recovery::start')
coordinator.metric('recoveryCompleteTicks',
'time sending recovery complete RPCs to backups')
master = Group('Master', 'metrics for masters')
master.metric('recoveryCount',
'number of recoveries in which this master participated')
master.metric('recoveryTicks', 'the elapsed time during recoveries')
master.metric('replicaManagerTicks', 'time spent in ReplicaManager')
master.metric('segmentAppendTicks', 'time spent in Segment::append')
master.metric('segmentAppendCopyTicks',
'time spent copying in Segment::append')
master.metric('segmentReadCount',
'number of BackupClient::getRecoveryData calls issued')
master.metric('segmentReadTicks',
'elapsed time for getRecoveryData calls to backups')
master.metric('segmentReadStallTicks',
'time stalled waiting for segments from backups')
master.metric('segmentReadByteCount',
'bytes of recovery segments received from backups')
master.metric('verifyChecksumTicks',
'time verifying checksums on objects from backups')
master.metric('recoverSegmentTicks',
'spent in MasterService::recoverSegment')
master.metric('backupInRecoverTicks',
'time spent in ReplicaManager::proceed '
'called from MasterService::recoverSegment')
master.metric('segmentCloseCount',
'number of complete segments written to backups')
master.metric('recoverySegmentEntryCount',
'number of recovery segment entries (e.g. objects, tombstones)')
master.metric('recoverySegmentEntryBytes',
'number of bytes in recovery segment entries (without overhead)')
master.metric('liveObjectCount',
'number of live objects written during recovery')
master.metric('liveObjectBytes',
'number of bytes of live object data written during recovery')
master.metric('objectAppendCount',
'number of objects appended to the log during recovery')
master.metric('objectDiscardCount',
'number of objects not appended to the log during recovery')
master.metric('safeVersionRecoveryCount',
'number of safeVersion updates during recovery')
master.metric('safeVersionNonRecoveryCount',
'number of safeVersion discarded during recovery')
master.metric('tombstoneAppendCount',
'number of tombstones kept during recovery')
master.metric('tombstoneDiscardCount',
'number of tombstones discarded during recovery')
master.metric('logSyncTicks',
'time syncing the log at the end of recovery')
master.metric('logSyncBytes',
'bytes sent during log sync')
master.metric('recoveryWillTicks',
'time rebuilding will at the end of recovery')
master.metric('removeTombstoneTicks',
'time deleting tombstones at the end of recovery')
master.metric('replicationTicks',
'time with outstanding RPCs to backups')
master.metric('replicationBytes',
'bytes sent during recovery from first gRD response '
'through log sync')
master.metric('replicas',
'number of backups on which to replicate each segment')
master.metric('backupCloseTicks',
'time closing segments in ReplicaManager')
master.metric('backupCloseCount',
'number of segments closed in ReplicaManager')
master.metric('logSyncCloseTicks',
'time close segments during log sync')
master.metric('logSyncCloseCount',
'number of segments closed during log sync')
master.metric('replicaRecoveries',
'number of replicas which have started replica recreation')
master.metric('openReplicaRecoveries',
'of replicaRecoveries how many were for replicas which were open')
master.metric('replicationTasks',
'max number of outstanding tasks in ReplicaManager')
master.metric('replicationTransmitCopyTicks',
'time spent copying outgoing rpcs in transport')
master.metric('logSyncTransmitCopyTicks',
'time spent copying outgoing rpcs in transport just during log sync')
master.metric('replayMemoryReadBytes',
'rough estimate of memory read during log replay')
master.metric('replayMemoryWrittenBytes',
'rough estimate of memory written during log replay')
master.metric('replicationTransmitActiveTicks',
'time transport tx was active during replication')
master.metric('logSyncTransmitActiveTicks',
'time transport tx was active during log sync')
master.metric('replicationPostingWriteRpcTicks',
'time spent during recovery starting write rpcs in transport')
master.metric('recoverSegmentPostingWriteRpcTicks',
'time spent during recoverSegment starting write rpcs in transport')
master.metric('logSyncPostingWriteRpcTicks',
'time spent during recovery final log sync starting write rpcs in transport')
backup = Group('Backup', 'metrics for backups')
backup.metric('recoveryCount',
'number of recoveries in which this backup participated')
backup.metric('recoveryTicks', 'elapsed time during recovery')
backup.metric('serviceTicks', 'time spent servicing RPC requests')
backup.metric('readCompletionCount',
'number of getRecoveryData requests successfully completed')
backup.metric('readingDataTicks',
'time from startReadingData to done reading')
backup.metric('storageReadCount', 'number of segment reads from disk')
backup.metric('storageReadBytes', 'amount of bytes read from disk')
backup.metric('storageReadTicks', 'time reading from disk')
backup.metric('writeClearTicks',
'time clearing segment memory during segment open')
backup.metric('writeCopyBytes', 'bytes written to backup segments')
backup.metric('writeCopyTicks', 'time copying data to backup segments')
backup.metric('storageWriteCount', 'number of segment writes to disk')
backup.metric('storageWriteBytes', 'bytes written to disk')
backup.metric('storageWriteTicks', 'time writing to disk')
backup.metric('filterTicks', 'time filtering segments')
backup.metric('primaryLoadCount', 'number of primary segments requested')
backup.metric('secondaryLoadCount', 'number of secondary segments requested')
backup.metric('storageType', '1 = in-memory, 2 = on-disk')
backup.metric('uncommittedFramesFreed', 'number of segment frames freed before being fully flushed to disk')
# This class records basic statistics for RPCs (count & execution time).
# The order here must match WireFormat.h. In the old days we did it manually,
# but that became a labourious timesink. We now autogenerate from the header
# file. Note that Service.cc indexes into the Rpc metrics struct when
# twiddling counters, so it relies not just on order, but on the counters being
# sequential in memory.
rpc = Group('Rpc', 'metrics for remote procedure calls')
# Returns a dictionary where keys are opcode numbers and values are their
# symbolic names. These are extracted directly from WireFormat.h.
def getRpcOpcodes():
wf = open(top_path + "/src/WireFormat.h", "r")
rpcOpcodes = {}
inOpcodes = False
for line in wf:
if line.startswith("enum Opcode {"):
inOpcodes = True
continue
if inOpcodes:
if line.startswith("};"):
break
opName, opNumber = line.strip().split("=")
opName = opName.strip()
opNumber = int(opNumber.split(",")[0].split("/")[0].strip())
rpcOpcodes[opNumber] = opName
return rpcOpcodes
for phase in ("counts", "ticks"):
rpcOpcodes = getRpcOpcodes()
for i in itertools.count():
if len(rpcOpcodes) == 0:
break
if i in rpcOpcodes:
if phase == "counts":
rpc.metric('%sCount' % rpcOpcodes[i].lower(), 'number of invocations of the %s RPC' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.