input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<reponame>andrewcooke/simple-date<filename>src/simpledate/__init__.py
from calendar import timegm
import datetime as dt
from itertools import islice
from collections import OrderedDict
from threading import local
from tzlocal import get_localzone
from pytz import timezone, country_timezones, all_timezones, FixedOffset, utc, NonExistentTimeError, common_timezones, UTC
from simpledate.fmt import strptime, reconstruct, strip, invert, auto_invert
from simpledate.utils import DebugLog, MRUSortedIterable, OrderedSet, set_kargs_only, always_tuple
# A wrapper around the datetime, pytz and tzlocal packages.
# (c) 2013 <NAME> (<EMAIL>)
# Released into the public domain for any use, but with absolutely no warranty.
# Build the various formats used by SimpleDateParser.
RFC_2822 = EMAIL = ('(!a!, ?)d! ?b! ?Y! H!:M(!:S)?(! !Z|! ?!z)?',)
ISO_8601 = YMD = (invert('Y(!-?m(!-?d(( |%T)H!:?M(!:?S(.f)?)?)?)?)?(! ?(!Z|!z))?'),)
MDY = ('(m!/d!/)?Y(! H!:M(!:S(.f)?)?)?(! !Z|! ?!z)?',)
DMY = ('(d!/m!/)?Y(! H!:M(!:S(.f)?)?)?(! !Z|! ?!z)?',)
ASN_1 = ('b! d(! !H!:!M(!:!S)?)?! Y(! ?!Z|! ?!z)?', 'Y!m!d!H!M!S(!Z|!z)', '!y!m!d!H!M!S(!Z|!z)')
DEFAULT_FORMAT = '%Y-%m-%d %H:%M:%S.%f %Z'
DEFAULT_FORMATS = ISO_8601 + RFC_2822 + ASN_1
# ASN.1 alias for UTC
class _Z(UTC.__class__):
def tzname(self, dt): return 'Z'
def __str__(self): return 'Z'
def __repr__(self): return 'Z'
def __repr__(self): return '<Z>'
Z = _Z()
# Various utilities to work around oddities (bugs?) in pytz and python versions.
def reapply_tzinfo(datetime, is_dst):
'''
Re-apply the timezone to the datetime. This is what you might think
pytz's normalize does, but it doesn't. So this is like a normalize on
steroids. This fixes an issue where pytz's tzinfo gets stuck at the
wrong date.
:param datetime: The datetime (with tzinfo) that may be broken.
:return: A new datetime, with the same tzinfo.
'''
return tzinfo_localize(datetime.tzinfo, datetime.replace(tzinfo=None), is_dst)
def tzinfo_astimezone(tzinfo, datetime):
'''
Set the timezone after conversion.
:param tzinfo: The timezone we are targetting.
:param datetime: The datetime to adjust and then make naive.
:return: A naive datetime in the given timezone.
'''
if datetime.tzinfo:
datetime = datetime.astimezone(tzinfo)
if datetime.tzinfo is not tzinfo:
datetime = datetime.replace(tzinfo=tzinfo)
return datetime
def tzinfo_tzname(tzinfo, datetime, is_dst):
'''
Get the name for the timezone at this time, avoiding an error when not
naive.
:param tzinfo: The tzinfo whose name we want.
:param datetime: The time at which we want the name.
:param is_dst: To resolve ambiguities.
:return: The name of the tzinfo at the given time.
'''
datetime = tzinfo_astimezone(tzinfo, datetime)
# don't understand why we need this, but without it get very odd results.
datetime = datetime.replace(tzinfo=None)
# for some reason there are two APIs...
try:
return tzinfo.tzname(datetime, is_dst)
except TypeError:
name = tzinfo.tzname(datetime)
if name is None:
offset = tzinfo_utcoffset(tzinfo, datetime)
# following from datetime %z formatting code
if offset is not None:
sign = '+'
if offset.days < 0:
offset = -offset
sign = '-'
h, m = divmod(offset, dt.timedelta(hours=1))
assert not m % dt.timedelta(minutes=1), "whole minute"
m //= dt.timedelta(minutes=1)
name = '%c%02d%02d' % (sign, h, m)
return name
def tzinfo_utcoffset(tzinfo, datetime):
'''
Get the UTC offset for the timezone at this time, avoiding an error when
not naive.
:param tzinfo: The tzinfo whose offset we want.
:param datetime: The time at which we want the offset.
:return: The UTC offset of the tzinfo at the given time.
'''
datetime = tzinfo_astimezone(tzinfo, datetime)
# don't understand why we need this, but without it get very odd results.
datetime = datetime.replace(tzinfo=None)
return tzinfo.utcoffset(datetime)
def tzinfo_localize(tzinfo, datetime, is_dst):
'''
If is_dst is unsupported then ignore it.
:param tzinfo: The tzinfo we are setting.
:param datetime: The datetime we are converting.
:param is_dst: Whether the date it daylight savings time.
:return: The localized datetime.
'''
try:
return tzinfo.localize(datetime, is_dst)
except TypeError:
return tzinfo.localize(datetime)
def datetime_timestamp(datetime):
'''
Equivalent to datetime.timestamp() for pre-3.3
'''
try:
return datetime.timestamp()
except AttributeError:
utc_datetime = datetime.astimezone(utc)
return timegm(utc_datetime.timetuple()) + utc_datetime.microsecond / 1e6
# Utilities to help with argument handling and the like.
def always_datetime(value):
'''
:param value: The value to convert to datetime (datetime or SimpleDate).
:return: A datetime.
'''
try:
return value.datetime
except AttributeError:
return value
def names(cutoff, test, **kargs):
'''
Given a set of named values, select those for which `test(value)` is True
and then, if `cutoff` or more are found, return their names.
:param cutoff: The number of named values that must match the test.
:param test: The test for values.
:param kargs: The named values.
:return: The names of values that match the test, if `cutoff` or more match,
otherwise `None`.
'''
defined = {name: value for name, value in kargs.items() if test(value)}
if len(defined) >= cutoff:
return list(defined.keys())
else:
return None
def test_all(test, *args): return all(map(test, args))
def test_any(test, *args): return any(map(test, args))
def is_none(value): return value is None
def is_not_none(value): return value is not None
def is_int_or_none(value): return value is None or isinstance(value, int)
def take(n, iterable): return islice(iterable, 0, n)
def prefer(*countries, using=set(country_timezones.keys())):
'''
Pull some countries to the front of the list. When used with
`unsafe=True` this can help select the expected timezone.
:param countries: The countries to prefer (in order).
:param using: The full list of countries.
:return: All country codes, with the given ones first.
'''
codes = OrderedSet(countries)
codes.union(using)
return codes
def exclude(*countries, using=set(country_timezones.keys())):
'''
Drop some countries from the list.
:param countries: The countries to prefer (in order).
:param using: The full list of countries.
:return: All country codes, with the given ones first.
'''
return OrderedSet(code for code in using if code not in countries)
# Exceptions.
class SimpleDateError(Exception):
def __init__(self, template='', *args, **kargs):
'''
:param template: A message that can contain {0}-style formatting.
:param args: Format arguments.
:param kargs: Named format arguments.
:return: A new instance of the exception.
'''
super().__init__(template.format(*args, **kargs))
class PyTzFactoryError(SimpleDateError):
def __init__(self, message, timezones, datetime, is_dst=False, country=None, unsafe=None):
'''
:param message: A descriptive message.
:param timezones: The timezones passed to the search method.
:param datetime: The datetime passed to the search method.
:param is_dst: The DST flag passed to the search method.
:param country: The country code passed to the search method.
:param unsafe: The unsafe flag passed to the search method.
'''
super().__init__(PyTzFactoryError.format(message, timezones, datetime, is_dst, country, unsafe))
@staticmethod
def format(message, timezones, datetime, is_dst, country, unsafe):
'''
:param message: A descriptive message.
:param timezones: The timezones passed to the search method.
:param datetime: The datetime passed to the search method.
:param is_dst: The DST flag passed to the search method.
:param country: The country code passed to the search method.
:param unsafe: The unsafe flag passed to the search method.
'''
if is_dst is None and country is None and unsafe is None:
return '{0} (timezones={1!r}, datetime={2!r})'.format(message, timezones, datetime)
else:
return '{0} (timezones={1!r}, datetime={2!r}, is_dst={3!r}, country={4!r}, unsafe={5!r})'.format(message, timezones, datetime, is_dst, country, unsafe)
class NoTimezone(PyTzFactoryError):
def __init__(self, timezones, datetime, is_dst, country, unsafe):
'''
:param timezones: The timezones passed to the search method.
:param datetime: The datetime passed to the search method.
:param is_dst: The DST flag passed to the search method.
:param country: The country code passed to the search method.
:param unsafe: The unsafe flag passed to the search method.
'''
# use a list for timezones so it looks different from tuples in docs
super().__init__('No timezone found', list(timezones), datetime, is_dst, country, unsafe)
class AmbiguousTimezone(PyTzFactoryError):
def __init__(self, distinct, timezones, datetime, is_dst, country, unsafe):
'''
:param distinct: The timezones found with distinct offsets..
:param timezones: The timezones passed to the search method.
:param datetime: The datetime passed to the search method.
:param is_dst: The DST flag passed to the search method.
:param country: The country code passed to the search method.
:param unsafe: The unsafe flag passed to the search method.
'''
super().__init__('{0} distinct timezones found: {1}'.format(len(distinct), '; '.join(map(repr, distinct))),
timezones, datetime, is_dst, country, unsafe)
class SingleInstantTzError(SimpleDateError):
'''
An attempt was made to use a timezone defined only for one isolated
instant in time in a more general way. Typically, all you can do with
times associated with such timezones is convert them to UTC.
'''
def __init__(self, tzinfo, datetime, other):
'''
:param tzinfo: The offset and name.
:param datetime: The time at which the timezone is defined.
:param other: The time at which the timezone was used. No longer
used in message, as string conversion can itself cause an error,
giving infinite recursion.
'''
super().__init__('Attempted to use {0}, defined only for {1}', tzinfo, datetime)
# Classes implementing the core functionality.
class SingleInstantTz(dt.tzinfo):
| |
import time
import struct
try:
# Try to import the Python 3.x enum module
from enum import IntEnum
except:
# If we're on Python 2.x we need to define
# a dummy replacement
class IntEnum:
pass
# <pep8 compliant>
LOG_READ_TIME = False
LOG_WRITE_TIME = False
LOG_ANIM_HEADER = False
LOG_ANIM_BONES = False
LOG_ANIM_BONE_MODIFIERS = False
LOG_ANIM_BONES_KEYS = False
LOG_ANIM_NOTES = False
class SEANIM_TYPE(IntEnum):
SEANIM_TYPE_ABSOLUTE = 0
SEANIM_TYPE_ADDITIVE = 1
SEANIM_TYPE_RELATIVE = 2
SEANIM_TYPE_DELTA = 3
class SEANIM_PRESENCE_FLAGS(IntEnum):
# These describe what type of keyframe data is present for the bones
SEANIM_BONE_LOC = 1 << 0
SEANIM_BONE_ROT = 1 << 1
SEANIM_BONE_SCALE = 1 << 2
# If any of the above flags are set, then bone keyframe data is present,
# thus this comparing against this mask will return true
SEANIM_PRESENCE_BONE = 1 << 0 | 1 << 1 | 1 << 2
SEANIM_PRESENCE_NOTE = 1 << 6 # The file contains notetrack data
SEANIM_PRESENCE_CUSTOM = 1 << 7 # The file contains a custom data block
class SEANIM_PROPERTY_FLAGS(IntEnum):
SEANIM_PRECISION_HIGH = 1 << 0
class SEANIM_FLAGS(IntEnum):
SEANIM_LOOPED = 1 << 0
class Info(object):
__slots__ = ('version', 'magic')
def __init__(self, file=None):
self.version = 1
self.magic = b'SEAnim'
if file is not None:
self.load(file)
def load(self, file):
bytes = file.read(8)
data = struct.unpack('6ch', bytes)
magic = b''
for i in range(6):
magic += data[i]
version = data[6]
assert magic == self.magic
assert version == self.version
def save(self, file):
bytes = self.magic
bytes += struct.pack('h', self.version)
file.write(bytes)
class Header(object):
__slots__ = (
'animType', 'animFlags',
'dataPresenceFlags', 'dataPropertyFlags',
'framerate', 'frameCount',
'boneCount', 'boneAnimModifierCount',
'noteCount'
)
def __init__(self, file=None):
self.animType = SEANIM_TYPE.SEANIM_TYPE_RELATIVE # Relative is default
self.animFlags = 0x0
self.dataPresenceFlags = 0x0
self.dataPropertyFlags = 0x0
self.framerate = 0
self.frameCount = 0
self.boneCount = 0
self.boneAnimModifierCount = 0
self.noteCount = 0
if file is not None:
self.load(file)
def load(self, file):
bytes = file.read(2)
data = struct.unpack('h', bytes)
headerSize = data[0]
bytes = file.read(headerSize - 2)
# = prefix tell is to ignore C struct packing rules
data = struct.unpack('=6BfII4BI', bytes)
self.animType = data[0]
self.animFlags = data[1]
self.dataPresenceFlags = data[2]
self.dataPropertyFlags = data[3]
# reserved = data[4]
# reserved = data[5]
self.framerate = data[6]
self.frameCount = data[7]
self.boneCount = data[8]
self.boneAnimModifierCount = data[9]
# reserved = data[10]
# reserved = data[11]
# reserved = data[12]
self.noteCount = data[13]
def save(self, file):
bytes = struct.pack('=6BfII4BI',
self.animType, self.animFlags,
self.dataPresenceFlags, self.dataPropertyFlags,
0, 0,
self.framerate,
self.frameCount, self.boneCount,
self.boneAnimModifierCount, 0, 0, 0,
self.noteCount)
size = struct.pack('h', len(bytes) + 2)
file.write(size)
file.write(bytes)
class Frame_t(object):
"""
The Frame_t class is only ever used to get the size
and format character used by frame indices in a given seanim file
"""
__slots__ = ('size', 'char')
def __init__(self, header):
if header.frameCount <= 0xFF:
self.size = 1
self.char = 'B'
elif header.frameCount <= 0xFFFF:
self.size = 2
self.char = 'H'
else: # if header.frameCount <= 0xFFFFFFFF:
self.size = 4
self.char = 'I'
class Bone_t(object):
"""
The Bone_t class is only ever used to get the size
and format character used by frame indices in a given seanim file
"""
__slots__ = ('size', 'char')
def __init__(self, header):
if header.boneCount <= 0xFF:
self.size = 1
self.char = 'B'
elif header.boneCount <= 0xFFFF:
self.size = 2
self.char = 'H'
else: # if header.boneCount <= 0xFFFFFFFF:
self.size = 4
self.char = 'I'
class Precision_t(object):
"""
The Precision_t class is only ever used to get the size
and format character used by vec3_t, quat_t, etc. in a given sanim file
"""
__slots__ = ('size', 'char')
def __init__(self, header):
if (header.dataPropertyFlags &
SEANIM_PROPERTY_FLAGS.SEANIM_PRECISION_HIGH):
self.size = 8
self.char = 'd'
else:
self.size = 4
self.char = 'f'
class KeyFrame(object):
"""
A small class used for holding keyframe data
"""
__slots__ = ('frame', 'data')
def __init__(self, frame, data):
self.frame = frame
self.data = data
class Bone(object):
__slots__ = (
'name', 'flags',
'locKeyCount', 'rotKeyCount', 'scaleKeyCount',
'posKeys', 'rotKeys', 'scaleKeys',
'useModifier', 'modifier'
)
def __init__(self, file=None):
self.name = ""
self.flags = 0x0
self.locKeyCount = 0
self.rotKeyCount = 0
self.scaleKeyCount = 0
self.posKeys = []
self.rotKeys = []
self.scaleKeys = []
self.useModifier = False
self.modifier = 0
if file is not None:
self.load(file)
def load(self, file):
bytes = b''
b = file.read(1)
while not b == b'\x00':
bytes += b
b = file.read(1)
self.name = bytes.decode("utf-8")
def loadData(self, file, frame_t, precision_t,
useLoc=False, useRot=False, useScale=False):
# Read the flags for the bone
bytes = file.read(1)
data = struct.unpack("B", bytes)
self.flags = data[0]
# Load the position keyframes if they are present
if useLoc:
bytes = file.read(frame_t.size)
data = struct.unpack('%c' % frame_t.char, bytes)
self.locKeyCount = data[0]
for _ in range(self.locKeyCount):
bytes = file.read(frame_t.size + 3 * precision_t.size)
data = struct.unpack('=%c3%c' %
(frame_t.char, precision_t.char), bytes)
frame = data[0]
pos = (data[1], data[2], data[3])
self.posKeys.append(KeyFrame(frame, pos))
# Load the rotation keyframes if they are present
if useRot:
bytes = file.read(frame_t.size)
data = struct.unpack('%c' % frame_t.char, bytes)
self.rotKeyCount = data[0]
for _ in range(self.rotKeyCount):
bytes = file.read(frame_t.size + 4 * precision_t.size)
data = struct.unpack('=%c4%c' %
(frame_t.char, precision_t.char), bytes)
frame = data[0]
# Load the quaternion as XYZW
quat = (data[1], data[2], data[3], data[4])
self.rotKeys.append(KeyFrame(frame, quat))
# Load the Scale Keyrames
if useScale:
bytes = file.read(frame_t.size)
data = struct.unpack('%c' % frame_t.char, bytes)
self.scaleKeyCount = data[0]
for _ in range(self.scaleKeyCount):
bytes = file.read(frame_t.size + 3 * precision_t.size)
data = struct.unpack('=%c3%c' %
(frame_t.char, precision_t.char), bytes)
frame = data[0]
scale = (data[1], data[2], data[3])
self.scaleKeys.append(KeyFrame(frame, scale))
def save(self, file, frame_t, bone_t, precision_t,
useLoc=False, useRot=False, useScale=False):
bytes = struct.pack("B", self.flags)
file.write(bytes)
if useLoc:
bytes = struct.pack('%c' % frame_t.char, len(self.posKeys))
file.write(bytes)
for key in self.posKeys:
bytes = struct.pack('=%c3%c' %
(frame_t.char, precision_t.char),
key.frame,
key.data[0], key.data[1], key.data[2])
file.write(bytes)
if useRot:
bytes = struct.pack('%c' % frame_t.char, len(self.rotKeys))
file.write(bytes)
for key in self.rotKeys:
bytes = struct.pack('=%c4%c' %
(frame_t.char, precision_t.char),
key.frame,
key.data[0], key.data[1],
key.data[2], key.data[3])
file.write(bytes)
if useScale:
bytes = struct.pack('%c' % frame_t.char, len(self.scaleKeys))
file.write(bytes)
for key in self.scaleKeys:
bytes = struct.pack('=%c3%c' %
(frame_t.char, precision_t.char),
key.frame,
key.data[0], key.data[1], key.data[2])
file.write(bytes)
class Note(object):
__slots__ = ('frame', 'name')
def __init__(self, file=None, frame_t=None):
self.frame = -1
self.name = ""
if file is not None:
self.load(file, frame_t)
def load(self, file, frame_t):
bytes = file.read(frame_t.size)
data = struct.unpack('%c' % frame_t.char, bytes)
self.frame = data[0]
bytes = b''
b = file.read(1)
while not b == b'\x00':
bytes += b
b = file.read(1)
self.name = bytes.decode("utf-8")
def save(self, file, frame_t):
bytes = struct.pack('%c' % frame_t.char, self.frame)
file.write(bytes)
bytes = struct.pack('%ds' % (len(self.name) + 1), self.name.encode())
file.write(bytes)
class Anim(object):
__slots__ = ('__info', 'info', 'header', 'bones',
'boneAnimModifiers', 'notes')
def __init__(self, path=None):
self.__info = Info()
self.header = Header()
self.bones = []
self.boneAnimModifiers = []
self.notes = []
if path is not None:
self.load(path)
# Update the header flags based on the presence of certain keyframe /
# notetrack data
def update_metadata(self, high_precision=False, looping=False):
anim_locKeyCount = 0
anim_rotKeyCount = 0
anim_scaleKeyCount = 0
header = self.header
header.boneCount = len(self.bones)
dataPresenceFlags = header.dataPresenceFlags
dataPropertyFlags = header.dataPropertyFlags
max_frame_index = 0
for bone in self.bones:
bone.locKeyCount = len(bone.posKeys)
bone.rotKeyCount = len(bone.rotKeys)
bone.scaleKeyCount = len(bone.scaleKeys)
anim_locKeyCount += bone.locKeyCount
anim_rotKeyCount += bone.rotKeyCount
anim_scaleKeyCount += bone.scaleKeyCount
for key in bone.posKeys:
max_frame_index = max(max_frame_index, key.frame)
for key in bone.rotKeys:
max_frame_index = max(max_frame_index, key.frame)
for key in bone.scaleKeys:
max_frame_index = max(max_frame_index, key.frame)
if anim_locKeyCount:
dataPresenceFlags |= SEANIM_PRESENCE_FLAGS.SEANIM_BONE_LOC
if anim_rotKeyCount:
dataPresenceFlags |= SEANIM_PRESENCE_FLAGS.SEANIM_BONE_ROT
if anim_scaleKeyCount:
dataPresenceFlags |= SEANIM_PRESENCE_FLAGS.SEANIM_BONE_SCALE
for note in self.notes:
max_frame_index = max(max_frame_index, note.frame)
header.noteCount = len(self.notes)
if header.noteCount:
dataPresenceFlags |= SEANIM_PRESENCE_FLAGS.SEANIM_PRESENCE_NOTE
if high_precision:
dataPropertyFlags |= SEANIM_PROPERTY_FLAGS.SEANIM_PRECISION_HIGH
if looping:
header.animFlags |= SEANIM_FLAGS.SEANIM_LOOPED
header.dataPresenceFlags = dataPresenceFlags
header.dataPropertyFlags = dataPropertyFlags
# FrameCount represents the length of the animation in frames
# and since all animations start at frame 0 - we simply grab
# the max frame number (from keys / notes / etc.) and add 1 to it
header.frameCount = max_frame_index + 1
def load(self, path):
if LOG_READ_TIME:
time_start = time.time()
print("Loading: '%s'" % path)
try:
file = open(path, "rb")
except IOError:
print("Could not open file for reading:\n %s" % path)
return
self.info = Info(file)
self.header = Header(file)
self.boneAnimModifiers = []
# Init the frame_t, bone_t and precision_t info
frame_t = | |
an exception
with pytest.raises(pytest_subprocess.ProcessNotRegisteredError) as exc:
subprocess.check_call("test")
assert str(exc.value) == "The process 'test' was not registered."
# now, register two processes once again, but the last one will be kept forever
fake_process.register_subprocess("test", stdout="first execution")
fake_process.register_subprocess("test", stdout="second execution")
fake_process.keep_last_process(True)
# now the processes can be called forever
assert subprocess.check_output("test") == b"first execution"
assert subprocess.check_output("test") == b"second execution"
assert subprocess.check_output("test") == b"second execution"
assert subprocess.check_output("test") == b"second execution"
def test_different_output_with_context(fake_process):
"""
Leaving one context shall bring back the upper contexts processes
even if they were already consumed. This functionality is important
to allow a broader-level fixtures that register own processes and keep
them predictable.
"""
fake_process.register_subprocess("test", stdout="top-level")
with fake_process.context() as nested:
nested.register_subprocess("test", stdout="nested")
assert subprocess.check_output("test") == b"nested"
assert subprocess.check_output("test") == b"top-level"
with pytest.raises(pytest_subprocess.ProcessNotRegisteredError) as exc:
subprocess.check_call("test")
assert str(exc.value) == "The process 'test' was not registered."
with fake_process.context() as nested2:
# another nest level, the top level shall reappear
nested2.register_subprocess("test", stdout="nested2")
assert subprocess.check_output("test") == b"nested2"
assert subprocess.check_output("test") == b"top-level"
with pytest.raises(pytest_subprocess.ProcessNotRegisteredError) as exc:
subprocess.check_call("test")
assert str(exc.value) == "The process 'test' was not registered."
assert subprocess.check_output("test") == b"top-level"
with pytest.raises(pytest_subprocess.ProcessNotRegisteredError) as exc:
subprocess.check_call("test")
assert str(exc.value) == "The process 'test' was not registered."
def test_different_output_with_context_multilevel(fake_process):
"""
This is a similar test to the previous one, but here the nesting will be deeper
"""
fake_process.register_subprocess("test", stdout="top-level")
with fake_process.context() as first_level:
first_level.register_subprocess("test", stdout="first-level")
with fake_process.context() as second_level:
second_level.register_subprocess("test", stdout="second-level")
assert subprocess.check_output("test") == b"second-level"
assert subprocess.check_output("test") == b"first-level"
assert subprocess.check_output("test") == b"top-level"
with pytest.raises(pytest_subprocess.ProcessNotRegisteredError) as exc:
subprocess.check_call("test")
assert subprocess.check_output("test") == b"first-level"
assert subprocess.check_output("test") == b"top-level"
with pytest.raises(pytest_subprocess.ProcessNotRegisteredError) as exc:
subprocess.check_call("test")
assert str(exc.value) == "The process 'test' was not registered."
assert subprocess.check_output("test") == b"top-level"
with pytest.raises(pytest_subprocess.ProcessNotRegisteredError) as exc:
subprocess.check_call("test")
def test_multiple_level_early_consuming(fake_process):
"""
The top-level will be declared with two ocurrences, but the first one will
be consumed before entering the context manager.
"""
fake_process.register_subprocess("test", stdout="top-level", occurrences=2)
assert subprocess.check_output("test") == b"top-level"
with fake_process.context():
assert subprocess.check_output("test") == b"top-level"
with pytest.raises(pytest_subprocess.ProcessNotRegisteredError) as exc:
subprocess.check_call("test")
assert str(exc.value) == "The process 'test' was not registered."
assert subprocess.check_output("test") == b"top-level"
with pytest.raises(pytest_subprocess.ProcessNotRegisteredError) as exc:
subprocess.check_call("test")
assert str(exc.value) == "The process 'test' was not registered."
def test_keep_last_process(fake_process):
"""
The ProcessNotRegisteredError will never be raised for the process that
has been registered at least once.
"""
fake_process.keep_last_process(True)
fake_process.register_subprocess("test", stdout="First run")
fake_process.register_subprocess("test", stdout="Second run")
assert subprocess.check_output("test") == b"First run"
assert subprocess.check_output("test") == b"Second run"
assert subprocess.check_output("test") == b"Second run"
assert subprocess.check_output("test") == b"Second run"
def test_git(fake_process):
fake_process.register_subprocess(
["git", "branch"], stdout=["* fake_branch", " master"]
)
process = subprocess.Popen(
["git", "branch"], stdout=subprocess.PIPE, universal_newlines=True
)
out, _ = process.communicate()
assert process.returncode == 0
assert out == "* fake_branch\n master\n"
def test_use_real(fake_process):
fake_process.pass_command(["python", "example_script.py"], occurrences=3)
fake_process.register_subprocess(
["python", "example_script.py"], stdout=["Fake line 1", "Fake line 2"]
)
for _ in range(0, 3):
assert (
subprocess.check_output(
["python", "example_script.py"], universal_newlines=True
)
== "Stdout line 1\nStdout line 2\n"
)
assert (
subprocess.check_output(
["python", "example_script.py"], universal_newlines=True
)
== "Fake line 1\nFake line 2\n"
)
@pytest.mark.skipif(os.name == "nt", reason="Skip on windows")
def test_real_process(fake_process):
with pytest.raises(pytest_subprocess.ProcessNotRegisteredError):
# this will fail, as "ls" command is not registered
subprocess.call("ls")
fake_process.pass_command("ls")
# now it should be fine
assert subprocess.call("ls") == 0
# allow all commands to be called by real subprocess
fake_process.allow_unregistered(True)
assert subprocess.call(["ls", "-l"]) == 0
def test_context_manager(fake_process):
with pytest.raises(pytest_subprocess.ProcessNotRegisteredError):
# command not registered, so will raise an exception
subprocess.check_call("test")
with fake_process.context() as nested_process:
nested_process.register_subprocess("test", occurrences=3)
# now, we can call the command 3 times without error
assert subprocess.check_call("test") == 0
assert subprocess.check_call("test") == 0
# the command was called 2 times, so one occurrence left, but since the
# context manager has been left, it is not registered anymore
with pytest.raises(pytest_subprocess.ProcessNotRegisteredError):
subprocess.check_call("test")
def test_raise_exception(fake_process):
def callback_function(process):
process.returncode = 1
raise PermissionError("exception raised by subprocess")
fake_process.register_subprocess(["test"], callback=callback_function)
with pytest.raises(PermissionError, match="exception raised by subprocess"):
process = subprocess.Popen(["test"])
process.wait()
assert process.returncode == 1
def test_callback_with_arguments(fake_process):
def callback_function(process, return_code):
process.returncode = return_code
return_code = 127
fake_process.register_subprocess(
["test"],
callback=callback_function,
callback_kwargs={"return_code": return_code},
)
process = subprocess.Popen(["test"])
process.wait()
assert process.returncode == return_code
def test_subprocess_pipe_without_stream_definition(fake_process):
"""
From GitHub #17 - the fake_subprocess was crashing if the subprocess was called
with stderr=subprocess.PIPE but the stderr was not defined during the process
registration.
"""
fake_process.register_subprocess(
["test-no-stderr"], stdout="test",
)
fake_process.register_subprocess(
["test-no-stdout"], stderr="test",
)
fake_process.register_subprocess(["test-no-streams"],)
assert (
subprocess.check_output(["test-no-stderr"], stderr=subprocess.STDOUT).decode()
== "test"
)
assert (
subprocess.check_output(["test-no-stdout"], stderr=subprocess.STDOUT).decode()
== "test"
)
assert (
subprocess.check_output(["test-no-streams"], stderr=subprocess.STDOUT).decode()
== ""
)
@pytest.mark.parametrize("command", (("test",), "test"))
def test_different_command_type(fake_process, command):
"""
From GitHub #18 - registering process as ["command"] or "command" should make no
difference, and none of those command usage attempts shall raise error.
"""
fake_process.keep_last_process(True)
fake_process.register_subprocess(command)
assert subprocess.check_call("test") == 0
assert subprocess.check_call(["test"]) == 0
@pytest.mark.parametrize(
"command", (("test", "with", "arguments"), "test with arguments")
)
def test_different_command_type_complex_command(fake_process, command):
"""
Similar to previous test, but the command is more complex.
"""
fake_process.keep_last_process(True)
fake_process.register_subprocess(command)
assert subprocess.check_call("test with arguments") == 0
assert subprocess.check_call(["test", "with", "arguments"]) == 0
@pytest.mark.flaky(reruns=2, condition=platform.python_implementation() == "PyPy")
def test_raise_exception_check_output(fake_process):
"""
From GitHub#16 - the check_output raises the CalledProcessError exception
when the exit code is not zero. The exception should not shadow the exception
from the callback, if any.
For some reason, this test is flaky on PyPy. Further investigation required.
"""
def callback_function(_):
raise FileNotFoundError("raised in callback")
fake_process.register_subprocess("regular-behavior", returncode=1)
fake_process.register_subprocess(
"custom-exception", returncode=1, callback=callback_function
)
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_output("regular-behavior")
with pytest.raises(FileNotFoundError, match="raised in callback"):
subprocess.check_output("custom-exception")
def test_callback_and_return_code(fake_process):
"""Regression - the returncode was ignored when callback_function was present."""
def dummy_callback(_):
pass
def override_returncode(process):
process.returncode = 5
return_code = 1
fake_process.register_subprocess(
"test-dummy", returncode=return_code, callback=dummy_callback
)
process = subprocess.Popen("test-dummy")
process.wait()
assert process.returncode == return_code
fake_process.register_subprocess(
"test-increment", returncode=return_code, callback=override_returncode
)
process = subprocess.Popen("test-increment")
process.wait()
assert process.returncode == 5
@pytest.mark.skipif(
sys.version_info <= (3, 6), reason="encoding and errors has been introduced in 3.6",
)
@pytest.mark.parametrize("argument", ["encoding", "errors"])
@pytest.mark.parametrize("fake", [False, True])
def test_encoding(fake_process, fake, argument):
"""If encoding or errors is provided, the `text=True` behavior should be enabled."""
username = getpass.getuser()
values = {"encoding": "utf-8", "errors": "strict"}
fake_process.allow_unregistered(not fake)
if fake:
fake_process.register_subprocess(["whoami"], stdout=username)
output = subprocess.check_output(
["whoami"], **{argument: values.get(argument)}
).strip()
assert isinstance(output, str)
assert output.endswith(username)
@pytest.mark.parametrize("command", ["ls -lah", ["ls", "-lah"]])
def test_string_or_tuple(fake_process, command):
"""
It doesn't matter how you register the command, it should work as string or list.
"""
fake_process.register_subprocess(command, occurrences=2)
assert subprocess.check_call("ls -lah") == 0
assert subprocess.check_call(["ls", "-lah"]) == 0
def test_with_wildcards(fake_process):
"""Use Any() with real example"""
fake_process.keep_last_process(True)
fake_process.register_subprocess(("ls", fake_process.any()))
assert subprocess.check_call("ls -lah") == 0
assert subprocess.check_call(["ls", "-lah", "/tmp"]) == 0
assert subprocess.check_call(["ls"]) == 0
fake_process.register_subprocess(["cp", fake_process.any(min=2)])
with pytest.raises(pytest_subprocess.ProcessNotRegisteredError):
subprocess.check_call("cp /source/dir")
assert subprocess.check_call("cp /source/dir /tmp/random-dir") == 0
fake_process.register_subprocess(["cd", fake_process.any(max=1)])
with pytest.raises(pytest_subprocess.ProcessNotRegisteredError):
subprocess.check_call(["cd ~/ /tmp"])
assert subprocess.check_call("cd ~/") == 0
def test_call_count(fake_process):
"""Check if commands are registered and counted properly"""
fake_process.keep_last_process(True)
fake_process.register_subprocess([fake_process.any()])
assert subprocess.check_call("ls -lah") == 0
assert subprocess.check_call(["cp", "/tmp/source", "/source"]) == 0
assert subprocess.check_call(["cp", "/source", "/destination"]) == 0
assert subprocess.check_call(["cp", "/source", "/other/destination"]) == 0
assert "ls -lah" in fake_process.calls
assert ["cp", "/tmp/source", "/source"] in fake_process.calls
assert ["cp", "/source", "/destination"] in fake_process.calls
assert ["cp", "/source", "/other/destination"] in fake_process.calls
assert fake_process.call_count("cp /tmp/source /source") == 1
assert fake_process.call_count(["cp", "/source", fake_process.any()]) == 2
assert fake_process.call_count(["cp", fake_process.any()]) == 3
assert fake_process.call_count(["ls", "-lah"]) == 1
def test_called_process_waits_for_the_callback_to_finish(fake_process, tmp_path):
output_file_path = tmp_path / "output"
def callback(process):
# simulate a long-running process that creates an output file at the very end
time.sleep(1)
output_file_path.touch()
fake_process.register_subprocess([fake_process.any()], callback=callback)
subprocess.run(["ls", "-al"], stdin="abc")
assert output_file_path.exists()
def test_allow_unregistered_cleaning(fake_process):
"""
GitHub: #46.
The `allow_unregistered()` function should affect only the level where it was applied
The setting shouldn't leak to a higher levels or other tests.
"""
fake_process.allow_unregistered(False)
with fake_process.context() as context:
context.allow_unregistered(True)
subprocess.run(["python", "example_script.py"])
subprocess.run(["python", "example_script.py"])
subprocess.run(["python", "example_script.py"])
with fake_process.context():
with pytest.raises(ProcessNotRegisteredError):
subprocess.run(["python", "example_script.py"])
with pytest.raises(ProcessNotRegisteredError):
subprocess.run(["test"])
def test_keep_last_process_cleaning(fake_process):
"""
GitHub: #46.
The `keep_last_process()` function should affect only the level where it was applied
The setting shouldn't leak to a higher levels or other tests.
"""
fake_process.keep_last_process(False)
with fake_process.context() as context:
context.keep_last_process(True)
context.register_subprocess(["test"])
subprocess.run(["test"])
subprocess.run(["test"])
subprocess.run(["test"])
with fake_process.context():
with pytest.raises(ProcessNotRegisteredError):
subprocess.run(["test"])
fake_process.register_subprocess(["test"])
subprocess.run(["test"])
with pytest.raises(ProcessNotRegisteredError):
subprocess.run(["test"])
def test_signals(fake_process):
"""Test signal receiving functionality"""
fake_process.register_subprocess("test")
process = subprocess.Popen("test")
process.kill()
process.terminate()
process.send_signal(signal.SIGSEGV)
if sys.platform == "win32":
expected_signals = (signal.SIGTERM, signal.SIGTERM, signal.SIGSEGV)
else:
expected_signals = (signal.SIGKILL, signal.SIGTERM, signal.SIGSEGV)
assert process.received_signals() == expected_signals
def test_signal_callback(fake_process):
"""Test that signal callbacks work."""
def callback(process, sig):
if sig == signal.SIGTERM:
process.returncode = -1
fake_process.register_subprocess("test", signal_callback=callback, occurrences=3)
# no signal
process = subprocess.Popen("test")
process.wait()
assert process.returncode | |
# coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v1.api_client import ApiClient, Endpoint
from datadog_api_client.v1.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from datadog_api_client.v1.model.api_error_response import APIErrorResponse
from datadog_api_client.v1.model.usage_analyzed_logs_response import UsageAnalyzedLogsResponse
from datadog_api_client.v1.model.usage_billable_summary_response import UsageBillableSummaryResponse
from datadog_api_client.v1.model.usage_custom_reports_response import UsageCustomReportsResponse
from datadog_api_client.v1.model.usage_fargate_response import UsageFargateResponse
from datadog_api_client.v1.model.usage_hosts_response import UsageHostsResponse
from datadog_api_client.v1.model.usage_lambda_response import UsageLambdaResponse
from datadog_api_client.v1.model.usage_logs_by_index_response import UsageLogsByIndexResponse
from datadog_api_client.v1.model.usage_logs_response import UsageLogsResponse
from datadog_api_client.v1.model.usage_network_flows_response import UsageNetworkFlowsResponse
from datadog_api_client.v1.model.usage_network_hosts_response import UsageNetworkHostsResponse
from datadog_api_client.v1.model.usage_profiling_response import UsageProfilingResponse
from datadog_api_client.v1.model.usage_rum_sessions_response import UsageRumSessionsResponse
from datadog_api_client.v1.model.usage_snmp_response import UsageSNMPResponse
from datadog_api_client.v1.model.usage_sort import UsageSort
from datadog_api_client.v1.model.usage_sort_direction import UsageSortDirection
from datadog_api_client.v1.model.usage_specified_custom_reports_response import UsageSpecifiedCustomReportsResponse
from datadog_api_client.v1.model.usage_summary_response import UsageSummaryResponse
from datadog_api_client.v1.model.usage_synthetics_api_response import UsageSyntheticsAPIResponse
from datadog_api_client.v1.model.usage_synthetics_browser_response import UsageSyntheticsBrowserResponse
from datadog_api_client.v1.model.usage_synthetics_response import UsageSyntheticsResponse
from datadog_api_client.v1.model.usage_timeseries_response import UsageTimeseriesResponse
from datadog_api_client.v1.model.usage_top_avg_metrics_response import UsageTopAvgMetricsResponse
from datadog_api_client.v1.model.usage_trace_response import UsageTraceResponse
from datadog_api_client.v1.model.usage_tracing_without_limits_response import UsageTracingWithoutLimitsResponse
class UsageMeteringApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __get_daily_custom_reports(
self,
**kwargs
):
"""Get the list of available daily custom reports # noqa: E501
Get daily custom reports. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_daily_custom_reports(async_req=True)
>>> result = thread.get()
Keyword Args:
page_size (int): The number of files to return in the response. `[default=60]`.. [optional]
page_number (int): The identifier of the first page to return. This parameter is used for the pagination feature `[default=0]`.. [optional]
sort_dir (UsageSortDirection): The direction to sort by: `[desc, asc]`.. [optional]
sort (UsageSort): The field to sort by: `[computed_on, size, start_date, end_date]`.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UsageCustomReportsResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_daily_custom_reports = Endpoint(
settings={
'response_type': (UsageCustomReportsResponse,),
'auth': [
'apiKeyAuth',
'appKeyAuth'
],
'endpoint_path': '/api/v1/daily_custom_reports',
'operation_id': 'get_daily_custom_reports',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'page_size',
'page_number',
'sort_dir',
'sort',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'page_size':
(int,),
'page_number':
(int,),
'sort_dir':
(UsageSortDirection,),
'sort':
(UsageSort,),
},
'attribute_map': {
'page_size': 'page[size]',
'page_number': 'page[number]',
'sort_dir': 'sort_dir',
'sort': 'sort',
},
'location_map': {
'page_size': 'query',
'page_number': 'query',
'sort_dir': 'query',
'sort': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json;datetime-format=rfc3339'
],
'content_type': [],
},
api_client=api_client,
callable=__get_daily_custom_reports
)
def __get_monthly_custom_reports(
self,
**kwargs
):
"""Get the list of available monthly custom reports # noqa: E501
Get monthly custom reports. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_monthly_custom_reports(async_req=True)
>>> result = thread.get()
Keyword Args:
page_size (int): The number of files to return in the response `[default=60].`. [optional]
page_number (int): The identifier of the first page to return. This parameter is used for the pagination feature `[default=0]`.. [optional]
sort_dir (UsageSortDirection): The direction to sort by: `[desc, asc]`.. [optional]
sort (UsageSort): The field to sort by: `[computed_on, size, start_date, end_date]`.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UsageCustomReportsResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_monthly_custom_reports = Endpoint(
settings={
'response_type': (UsageCustomReportsResponse,),
'auth': [
'apiKeyAuth',
'appKeyAuth'
],
'endpoint_path': '/api/v1/monthly_custom_reports',
'operation_id': 'get_monthly_custom_reports',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'page_size',
'page_number',
'sort_dir',
'sort',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'page_size':
(int,),
'page_number':
(int,),
'sort_dir':
(UsageSortDirection,),
'sort':
(UsageSort,),
},
'attribute_map': {
'page_size': 'page[size]',
'page_number': 'page[number]',
'sort_dir': 'sort_dir',
'sort': 'sort',
},
'location_map': {
'page_size': 'query',
'page_number': 'query',
'sort_dir': 'query',
'sort': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json;datetime-format=rfc3339'
],
'content_type': [],
},
api_client=api_client,
callable=__get_monthly_custom_reports
)
def __get_specified_daily_custom_reports(
self,
report_id,
**kwargs
):
"""Get specified daily custom reports # noqa: E501
Get specified daily custom reports. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_specified_daily_custom_reports(report_id, async_req=True)
>>> result = thread.get()
Args:
report_id (str): The specified ID to search results for.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UsageSpecifiedCustomReportsResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['report_id'] = \
report_id
return self.call_with_http_info(**kwargs)
self.get_specified_daily_custom_reports = Endpoint(
settings={
'response_type': (UsageSpecifiedCustomReportsResponse,),
'auth': [
'apiKeyAuth',
'appKeyAuth'
],
'endpoint_path': '/api/v1/daily_custom_reports/{report_id}',
'operation_id': 'get_specified_daily_custom_reports',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'report_id',
],
'required': [
'report_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'report_id':
(str,),
},
'attribute_map': {
'report_id': 'report_id',
},
'location_map': {
'report_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json;datetime-format=rfc3339'
],
'content_type': [],
},
api_client=api_client,
callable=__get_specified_daily_custom_reports
)
def __get_specified_monthly_custom_reports(
self,
report_id,
**kwargs
):
"""Get specified monthly custom reports # noqa: E501
Get specified monthly custom reports. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_specified_monthly_custom_reports(report_id, | |
Texture"].location = (-500, 200)
# bsdf = bpy.data.materials[activeMaterialName].node_tree.nodes[bsdf.name]
# imgTex = bpy.data.materials[activeMaterialName].node_tree.nodes[normalName].outputs[0]
# link_output_to_slot_named(bpy.data.materials[activeMaterialName], imgTex, bsdf, "Normal")
#bpy.data.images[normalName].save_render(filepath=bpy.context.scene.render.filepath)
# print(normalName + " has been saved.")
# else:
print("There is a normal connection to " + bpy.context.active_object.name +".")
print("Baking normal data now.")
# new_image_texture(activeMaterialName, normalName, regNormColor)
new_image_texture(activeMaterialName, normalName, resolution = bpy.data.scenes['Scene'].render.resolution_x)
bpy.ops.object.bake("INVOKE_DEFAULT",type="NORMAL",filepath=working_dir + "textures", save_mode='EXTERNAL')
print("Normal Bake Done.")
#bpy.data.images[normalName].save_render(filepath=bpy.context.scene.render.filepath)
print(normalName + " has been saved.")
return {'FINISHED'}
else:
print("There is more than 1 object selected.")
for allObj in bpy.data.objects:
allObj.hide_render = False
bpy.context.scene.render.bake.use_selected_to_active = True
for eachObject in bpy.data.collections['web'].all_objects:
eachObject.select_set(True)
for eachObject in bpy.data.collections['master'].all_objects:
eachObject.select_set(True)
print("Baking normal data now.")
new_image_texture(activeMaterialName, normalName, regNormColor, resolution = bpy.data.scenes['Scene'].render.resolution_x)
bpy.ops.object.bake("INVOKE_DEFAULT",type="NORMAL",filepath=working_dir + "textures", save_mode='EXTERNAL')
print("Normal Bake Done.")
#bpy.data.images[normalName].save_render(filepath=bpy.context.scene.render.filepath)
print(normalName + " has been saved.")
return {'FINISHED'}
def bake_selected_opacity():
""" Create a new image texture and bake opacity to it """
# Note, copied from bake_selected_diffuse and could use some additional refactoring
xrs.render.set_bake_render(bpy.data.scenes['Scene'].render.resolution_x)
xrs.render.disable_direct_indirect_for_bake()
textureName = bpy.context.active_object.name + "_4k_opacity"
make_material() # ensures that the object has a material name based on the product
activeMaterialName = bpy.context.active_object.active_material.name
node_tree = bpy.data.materials[activeMaterialName].node_tree
nodes = node_tree.nodes
# Swap to an emit output and link in to the material alpha
output_node = xrs.material.get_one_node_of_type(nodes, "OUTPUT_MATERIAL")
bsdf_node = xrs.material.get_one_node_of_type(nodes, "BSDF_PRINCIPLED")
emit_node = nodes.new("ShaderNodeEmission")
node_tree.links.new(emit_node.outputs[0], output_node.inputs[0])
alpha_output = None
# find the input link for the BSDF alpha
for link in node_tree.links:
if link.to_node == bsdf_node and link.to_socket.name == 'Alpha':
alpha_output = link.from_socket
if alpha_output:
node_tree.links.new(alpha_output, emit_node.inputs[0])
working_dir = xrs.filename.get_parent_dir()
bpy.data.scenes['Scene'].render.filepath = working_dir + "textures/" + textureName + ".png"
if len(bpy.context.selected_objects) == 1:
for allObj in bpy.data.objects:
allObj.hide_render = False
bpy.context.scene.render.bake.use_selected_to_active = False
new_image_texture(activeMaterialName, textureName, resolution = bpy.data.scenes['Scene'].render.resolution_x)
bpy.ops.object.bake("INVOKE_DEFAULT",type="EMIT",filepath=working_dir + "textures", save_mode='EXTERNAL')
else:
xrs.log.info("Baking selected to active.")
for allObj in bpy.data.objects:
allObj.hide_render = False
bpy.context.scene.render.bake.use_selected_to_active = True
for eachObject in bpy.data.collections['master'].all_objects:
eachObject.select_set(True)
for eachObject in bpy.data.collections['web'].all_objects:
eachObject.select_set(True)
new_image_texture(activeMaterialName, textureName, resolution = bpy.data.scenes['Scene'].render.resolution_x)
bpy.ops.object.bake("INVOKE_DEFAULT",type="EMIT",filepath=working_dir + "textures", save_mode='EXTERNAL')
# restore the nodes (remove emit, relink nodes)
node_tree.nodes.remove(emit_node)
node_tree.links.new(bsdf_node.outputs[0], output_node.inputs[0])
xrs.log.info(textureName + " has been generated.")
return {'FINISHED'}
def bake_selected_roughness():
xrs.render.set_bake_render(bpy.data.scenes['Scene'].render.resolution_x)
roughnessName = bpy.context.active_object.name + "_4k_roughness"
xrs.material.make_material()
activeMaterialName = bpy.context.active_object.active_material.name
nodes = bpy.data.materials[activeMaterialName].node_tree.nodes
bsdf = xrs.material.get_one_node_of_type(nodes, "BSDF_PRINCIPLED")
# if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None:
# currentColor = bpy.data.materials[activeMaterialName].node_tree.nodes[bsdf.name].inputs[7].default_value
working_dir = xrs.filename.get_parent_dir()
bpy.data.scenes['Scene'].render.filepath = working_dir + "textures/" + roughnessName + ".png"
print("File path set")
if len(bpy.context.selected_objects) == 1:
for allObj in bpy.data.objects:
allObj.hide_render = False
bpy.context.scene.render.bake.use_selected_to_active = False
print("There is 1 object selected.")
xrs.material.new_image_texture_float(activeMaterialName, roughnessName, resolution = bpy.data.scenes['Scene'].render.resolution_x)
#xrs.material.link_output_to_slot_named(bpy.data.materials[activeMaterialName], nodes[roughnessName].outputs[0], bsdf, "Roughness")
#nodes[roughnessName].location = (-330, 80)
# if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None:
# if check_node_link(activeMaterialName, bsdf.name, "Roughness") == False:
# new_image_texture_float(activeMaterialName, roughnessName, currentColor)
# bpy.data.materials[activeMaterialName].node_tree.nodes[roughnessName].location = (-500, -300)
# bsdf = bpy.data.materials[activeMaterialName].node_tree.nodes[bsdf.name]
# imgTex = bpy.data.materials[activeMaterialName].node_tree.nodes[roughnessName].outputs[0]
# link_output_to_slot_named(bpy.data.materials[activeMaterialName], imgTex, bsdf, "Roughness")
#bpy.data.images[roughnessName].save_render(filepath=bpy.context.scene.render.filepath)
# print(roughnessName + " has been saved.")
# else:
# print("There is a roughness connection to " + bpy.context.active_object.name +".")
# print("Baking roughness data now.")
# if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None:
# new_image_texture_float(activeMaterialName, diffuseName, currentColor)
# else:
# new_image_texture_float(activeMaterialName, diffuseName, 0)
bpy.ops.object.bake("INVOKE_DEFAULT",type="ROUGHNESS",filepath=working_dir + "textures", width=4096, height=4096, save_mode='EXTERNAL')
print("Roughness Bake Done.")
#bpy.data.images[roughnessName].save_render(filepath=bpy.context.scene.render.filepath)
print(roughnessName + " has been saved.")
return {'FINISHED'}
else:
print("There is more than 1 object selected.")
for allObj in bpy.data.objects:
allObj.hide_render = False
bpy.context.scene.render.bake.use_selected_to_active = True
for eachObject in bpy.data.collections['master'].all_objects:
eachObject.select_set(True)
for eachObject in bpy.data.collections['web'].all_objects:
eachObject.select_set(True)
print("Baking roughness data now.")
new_image_texture_float(activeMaterialName, roughnessName, resolution = bpy.data.scenes['Scene'].render.resolution_x)
# if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None:
# new_image_texture_float(activeMaterialName, roughnessName, currentColor)
# else:
# new_image_texture_float(activeMaterialName, roughnessName, 0)
bpy.ops.object.bake("INVOKE_DEFAULT",type="ROUGHNESS",filepath=working_dir + "textures", width=4096, height=4096, save_mode='EXTERNAL')
print("Roughness Bake Done.")
#bpy.data.images[roughnessName].save_render(filepath=bpy.context.scene.render.filepath)
print(roughnessName + " has been saved.")
return {'FINISHED'}
def check_if_bsdf():
for allMat in bpy.data.materials:
if get_one_node_of_type(allMat.node_tree.nodes, "BSDF_PRINCIPLED") == None:
return False
return True
def clean_material_slots(obj):
for slot in obj.material_slots:
if slot.material == None:
xrs.object.select(obj)
bpy.ops.object.material_slot_remove_unused()
def check_node_link(matName, nodeName, inputType):
return(bpy.data.materials[matName].node_tree.nodes[nodeName].inputs[inputType].is_linked)
def create_3XR_mat_node(name):
""" Create a 3XR procedural material group node in the given material shader node tree """
# Create material group node in material
bpy.context.scene.use_nodes = True
mat = bpy.data.materials[name]
mat_group = mat.node_tree.nodes.new("ShaderNodeGroup")
mat_group.location = (1300,300)
mat_group.name = bpy.context.scene.xr_studio.product_name
mat_group.label = bpy.context.scene.xr_studio.product_name
# Create general group node with attributes
mat_nodes = bpy.data.node_groups.new(name, 'ShaderNodeTree')
mat_nodes.inputs.new("NodeSocketColor", "Overall Color")
mat_nodes.inputs.new("NodeSocketFloat", "Texture Scale")
mat_nodes.inputs.new("NodeSocketFloat", "Roughness")
mat_nodes.inputs.new("NodeSocketFloat", "Metallic")
mat_nodes.inputs.new("NodeSocketFloat", "Normal")
mat_nodes.outputs.new("NodeSocketColor", "Color")
mat_nodes.outputs.new("NodeSocketFloat", "Roughness")
mat_nodes.outputs.new("NodeSocketFloat", "Metallic")
mat_nodes.outputs.new("NodeSocketVector", "Normal")
# Link the attributes to the designated group node in procedural material
mat_group.node_tree = bpy.data.node_groups[mat_nodes.name]
def draw_normal(matName, imgSize):
""" Set up workspace to begin drawing normals on model """
mat=bpy.data.materials[str(matName)]
bpy.ops.paint.texture_paint_toggle()
bpy.ops.paint.add_texture_paint_slot(
type='BUMP',
name=xrs.filename.get_filename()+"_4k_bump",
width=imgSize,
height=imgSize)
bump = mat.node_tree.nodes['Bump']
mainBSDF = mat.node_tree.nodes['Principled BSDF']
xrs.material.link_output_to_slot_named(mat, bump.outputs[0], mainBSDF, 'Normal')
def denoise_img(imgName):
""" Denoises image based off of image name """
# Make Image Node & Attach Image
bpy.context.scene.use_nodes = True
node_tree = bpy.data.scenes['Scene'].node_tree
node = node_tree.nodes.new("CompositorNodeImage")
node.select = True
node_tree.nodes.active = node
node.image = bpy.data.images[imgName]
node.name = imgName
node.location = (-1000, 350)
# Add Denoise Node
denoise_node = node_tree.nodes.new("CompositorNodeDenoise")
denoise_node.location = (-750, 350)
#Add Viewer Node
viewer_node = node_tree.nodes.new("CompositorNodeViewer")
viewer_node.location = (-500, 350)
#Link All Nodes, Select Viewer Node First to Load Faster
xrs.material.link_output_to_slot_named(bpy.data.scenes['Scene'], node.outputs[0], denoise_node, 'Image')
viewer_node.select = True
xrs.material.link_output_to_slot_named(bpy.data.scenes['Scene'], denoise_node.outputs[0], viewer_node, 'Image')
xrs.log.info("Image in Denoiser. Check your Viewer Node in your Image Editor for Results.")
def end_draw_normal(matName, imgSize):
""" Take what was drawn on the model and bake that to an attached Normal Map """
mat=bpy.data.materials[str(matName)]
activeName=bpy.context.active_object.name
xrs.material.new_image_texture(str(matName), activeName+"_4k_normal", (0, 0, 0, 0), imgSize)
bpy.data.scenes['Scene'].render.bake.use_selected_to_active = False
bpy.ops.object.bake(type="NORMAL",filepath=xrs.filename.get_parent_dir() + "textures")
norm = bpy.data.materials[str(matName)].node_tree.nodes.new(type="ShaderNodeNormalMap")
norm.name = "3XRNormal"
normal = mat.node_tree.nodes['3XRNormal']
mainBSDF = mat.node_tree.nodes['Principled BSDF']
normTex = mat.node_tree.nodes[activeName+"_4k_normal"]
xrs.material.link_output_to_slot_named(mat, normTex.outputs[0], normal, 'Color')
xrs.material.link_output_to_slot_named(mat, normal.outputs[0], mainBSDF, 'Normal')
def get_bsdf_link_count(material, name):
""" Find the first bsdf node and get the number of links for the input with the given name """
bsdf = get_one_node_of_type(material.node_tree.nodes, "BSDF_PRINCIPLED")
if bsdf:
return get_node_link_count_named(bsdf, name)
return 0
def get_bsdf_value(material, name):
""" Find the first bsdf node and get the value of the given name """
bsdf = get_one_node_of_type(material.node_tree.nodes, "BSDF_PRINCIPLED")
if bsdf:
return get_node_default_value_named(bsdf, name)
return 0
def get_node_default_value_named(node, name):
""" Find the matching input based on name and return the value """
for input in node.inputs:
if input.name == name:
return input.default_value
xrs.log.warn(name + " node not found in " + node.name)
return 0
def get_node_link_count_named(node, name):
""" Find the matching input based on name and return the number of links """
for input in node.inputs:
if input.name == name:
return len(input.links)
xrs.log.warn(name + " node not found in " + node.name)
return 0
def get_first_from_node_link_named(node, name):
""" Get the input node for the given name on the given node """
for input in node.inputs:
if input.name == name:
return input.links[0].from_node
xrs.log.warn(name + " node not found in " + node.name)
return None
def get_one_node_of_type(nodes, type):
""" Search the material node tree for the first node matching the type """
for node in nodes:
if node.type == type:
return node
xrs.log.debug("Can not find " + str(type) + " in " + node.name)
return None
def is_8_bit():
""" Checks if any image in the web collection is an 8 bit texture """
for img in bpy.data.images:
if img.depth > 32:
split = img.name.split("_4k_")
if split[0] == xrs.filename.get_filename():
print(img.name + " needs to be an 8 bit texture.")
return False
else:
return True
def is_image():
for img in bpy.data.images:
if str(img.name).startswith(xrs.filename.get_filename()) == True:
if img.source != 'FILE':
return False
def link_output_to_slot_named(mat, output, node, name):
valid = True
try:
for input in node.inputs:
if input.name == name:
mat.node_tree.links.new(output, input)
except:
xrs.log.warn("Link to nodes was unable to be made")
valid = False
return valid
def make_material_for_mesh(mesh):
""" Add a material to the mesh with the same name as the mesh, if it doesn't exist and assign to first slot """
mesh_has_target_material = False
for mat in mesh.materials:
if mat.name == mesh.name:
mesh_has_target_material = True
if mesh_has_target_material:
xrs.log.warn('The mesh already has a material named ' + mesh.name)
else:
# Mesh does not have a material with its name
if mesh.name not in bpy.data.materials:
# material does not exist, create it
xrs.log.info('Creating a new material named ' + mesh.name)
mat = bpy.data.materials.new(name=mesh.name)
mat.use_nodes=True
if len(mesh.materials) == 0:
mesh.materials.append(bpy.data.materials[mesh.name])
else:
# Assign it to the first slot
mesh.materials[0] = bpy.data.materials[mesh.name]
def make_material():
""" Add a material with the name of the mesh for all meshes """
# TODO: this should be renamed to be clear it impacts all meshes
for mesh in bpy.data.meshes:
make_material_for_mesh(mesh)
| |
<filename>autotest/ogr/ogr_gpkg.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test GeoPackage driver functionality.
# Author: <NAME> <<EMAIL>>
#
###############################################################################
# Copyright (c) 2004, <NAME> <<EMAIL>>
# Copyright (c) 2014, <NAME> <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import string
import shutil
# Make sure we run from the directory of the script
if os.path.basename(sys.argv[0]) == os.path.basename(__file__):
if os.path.dirname(sys.argv[0]) != '':
os.chdir(os.path.dirname(sys.argv[0]))
sys.path.append( '../pymod' )
from osgeo import ogr, osr, gdal
import gdaltest
import ogrtest
###############################################################################
# Create a fresh database.
def ogr_gpkg_1():
gdaltest.gpkg_ds = None
gdaltest.gpkg_dr = None
try:
gdaltest.gpkg_dr = ogr.GetDriverByName( 'GPKG' )
if gdaltest.gpkg_dr is None:
return 'skip'
except:
return 'skip'
try:
os.remove( 'tmp/gpkg_test.gpkg' )
except:
pass
# This is to speed-up the runtime of tests on EXT4 filesystems
# Do not use this for production environment if you care about data safety
# w.r.t system/OS crashes, unless you know what you are doing.
gdal.SetConfigOption('OGR_SQLITE_SYNCHRONOUS', 'OFF')
gdaltest.gpkg_ds = gdaltest.gpkg_dr.CreateDataSource( 'tmp/gpkg_test.gpkg' )
if gdaltest.gpkg_ds is not None:
return 'success'
else:
return 'fail'
gdaltest.gpkg_ds.Destroy()
###############################################################################
# Re-open database to test validity
def ogr_gpkg_2():
if gdaltest.gpkg_dr is None:
return 'skip'
gdaltest.gpkg_ds = gdaltest.gpkg_dr.Open( 'tmp/gpkg_test.gpkg', update = 1 )
if gdaltest.gpkg_ds is not None:
return 'success'
else:
return 'fail'
###############################################################################
# Create a layer
def ogr_gpkg_3():
if gdaltest.gpkg_dr is None or gdaltest.gpkg_ds is None:
return 'skip'
# Test invalid FORMAT
#gdal.PushErrorHandler('CPLQuietErrorHandler')
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG( 4326 )
lyr = gdaltest.gpkg_ds.CreateLayer( 'first_layer', geom_type = ogr.wkbPoint, srs = srs4326)
#gdal.PopErrorHandler()
if lyr is None:
return 'fail'
# Test creating a layer with an existing name
gdal.PushErrorHandler('CPLQuietErrorHandler')
lyr = gdaltest.gpkg_ds.CreateLayer( 'a_layer')
lyr = gdaltest.gpkg_ds.CreateLayer( 'a_layer' )
gdal.PopErrorHandler()
if lyr is not None:
gdaltest.post_reason('layer creation should have failed')
return 'fail'
return 'success'
###############################################################################
# Close and re-open to test the layer registration
def ogr_gpkg_4():
if gdaltest.gpkg_dr is None or gdaltest.gpkg_ds is None:
return 'skip'
gdaltest.gpkg_ds.Destroy()
gdal.PushErrorHandler('CPLQuietErrorHandler')
gdaltest.gpkg_ds = gdaltest.gpkg_dr.Open( 'tmp/gpkg_test.gpkg', update = 1 )
gdal.PopErrorHandler()
if gdaltest.gpkg_ds is None:
return 'fail'
if gdaltest.gpkg_ds.GetLayerCount() != 2:
gdaltest.post_reason( 'unexpected number of layers' )
return 'fail'
lyr0 = gdaltest.gpkg_ds.GetLayer(0)
lyr1 = gdaltest.gpkg_ds.GetLayer(1)
if lyr0.GetName() != 'first_layer':
gdaltest.post_reason( 'unexpected layer name for layer 0' )
return 'fail'
if lyr1.GetName() != 'a_layer':
gdaltest.post_reason( 'unexpected layer name for layer 1' )
return 'fail'
return 'success'
###############################################################################
# Delete a layer
def ogr_gpkg_5():
if gdaltest.gpkg_dr is None or gdaltest.gpkg_ds is None:
return 'skip'
if gdaltest.gpkg_ds.GetLayerCount() != 2:
gdaltest.post_reason( 'unexpected number of layers' )
return 'fail'
if gdaltest.gpkg_ds.DeleteLayer(1) != 0:
gdaltest.post_reason( 'got error code from DeleteLayer(1)' )
return 'fail'
if gdaltest.gpkg_ds.DeleteLayer(0) != 0:
gdaltest.post_reason( 'got error code from DeleteLayer(0)' )
return 'fail'
if gdaltest.gpkg_ds.GetLayerCount() != 0:
gdaltest.post_reason( 'unexpected number of layers (not 0)' )
return 'fail'
return 'success'
###############################################################################
# Add fields
def ogr_gpkg_6():
if gdaltest.gpkg_dr is None or gdaltest.gpkg_ds is None:
return 'skip'
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG( 4326 )
lyr = gdaltest.gpkg_ds.CreateLayer( 'field_test_layer', geom_type = ogr.wkbPoint, srs = srs4326)
if lyr is None:
return 'fail'
field_defn = ogr.FieldDefn('dummy', ogr.OFTString)
ret = lyr.CreateField(field_defn)
if lyr.GetLayerDefn().GetFieldDefn(0).GetType() != ogr.OFTString:
gdaltest.post_reason( 'wrong field type' )
return 'fail'
gdaltest.gpkg_ds.Destroy()
gdal.PushErrorHandler('CPLQuietErrorHandler')
gdaltest.gpkg_ds = gdaltest.gpkg_dr.Open( 'tmp/gpkg_test.gpkg', update = 1 )
gdal.PopErrorHandler()
if gdaltest.gpkg_ds is None:
return 'fail'
if gdaltest.gpkg_ds.GetLayerCount() != 1:
return 'fail'
lyr = gdaltest.gpkg_ds.GetLayer(0)
if lyr.GetName() != 'field_test_layer':
return 'fail'
field_defn_out = lyr.GetLayerDefn().GetFieldDefn(0)
if field_defn_out.GetType() != ogr.OFTString:
gdaltest.post_reason( 'wrong field type after reopen' )
return 'fail'
if field_defn_out.GetName() != 'dummy':
gdaltest.post_reason( 'wrong field name after reopen' )
return 'fail'
return 'success'
###############################################################################
# Add a feature / read a feature / delete a feature
def ogr_gpkg_7():
if gdaltest.gpkg_dr is None or gdaltest.gpkg_ds is None:
return 'skip'
lyr = gdaltest.gpkg_ds.GetLayerByName('field_test_layer')
geom = ogr.CreateGeometryFromWkt('POINT(10 10)')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(geom)
feat.SetField('dummy', 'a dummy value')
if lyr.CreateFeature(feat) != 0:
gdaltest.post_reason('cannot create feature')
return 'fail'
# Read back what we just inserted
lyr.ResetReading()
feat_read = lyr.GetNextFeature()
if feat_read.GetField('dummy') != 'a dummy value':
gdaltest.post_reason('output does not match input')
return 'fail'
# Only inserted one thing, so second feature should return NULL
feat_read = lyr.GetNextFeature()
if feat_read is not None:
gdaltest.post_reason('last call should return NULL')
return 'fail'
# Add another feature
geom = ogr.CreateGeometryFromWkt('POINT(100 100)')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(geom)
feat.SetField('dummy', 'who you calling a dummy?')
if lyr.CreateFeature(feat) != 0:
gdaltest.post_reason('cannot create feature')
return 'fail'
# Random read a feature
feat_read_random = lyr.GetFeature(feat.GetFID())
if feat_read_random.GetField('dummy') != 'who you calling a dummy?':
gdaltest.post_reason('random read output does not match input')
return 'fail'
# Random write a feature
feat.SetField('dummy', 'i am no dummy')
lyr.SetFeature(feat)
feat_read_random = lyr.GetFeature(feat.GetFID())
if feat_read_random.GetField('dummy') != 'i am no dummy':
gdaltest.post_reason('random read output does not match random write input')
return 'fail'
# Delete a feature
lyr.DeleteFeature(feat.GetFID())
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('delete feature did not delete')
return 'fail'
# Delete the layer
if gdaltest.gpkg_ds.DeleteLayer('field_test_layer') != 0:
gdaltest.post_reason( 'got error code from DeleteLayer(field_test_layer)' )
return 'success'
###############################################################################
# Test a variety of geometry feature types and attribute types
def ogr_gpkg_8():
# try:
# os.remove( 'tmp/gpkg_test.gpkg' )
# except:
# pass
# gdaltest.gpkg_dr = ogr.GetDriverByName( 'GPKG' )
# gdaltest.gpkg_ds = gdaltest.gpkg_dr.CreateDataSource( 'tmp/gpkg_test.gpkg' )
if gdaltest.gpkg_dr is None or gdaltest.gpkg_ds is None:
return 'skip'
srs = osr.SpatialReference()
# Test a non-default SRS
srs.ImportFromEPSG( 32631 )
lyr = gdaltest.gpkg_ds.CreateLayer( 'tbl_linestring', geom_type = ogr.wkbLineString, srs = srs)
if lyr is None:
return 'fail'
lyr.StartTransaction()
ret = lyr.CreateField(ogr.FieldDefn('fld_integer', ogr.OFTInteger))
ret = lyr.CreateField(ogr.FieldDefn('fld_string', ogr.OFTString))
ret = lyr.CreateField(ogr.FieldDefn('fld_real', ogr.OFTReal))
ret = lyr.CreateField(ogr.FieldDefn('fld_date', ogr.OFTDate))
ret = lyr.CreateField(ogr.FieldDefn('fld_datetime', ogr.OFTDateTime))
ret = lyr.CreateField(ogr.FieldDefn('fld_binary', ogr.OFTBinary))
geom = ogr.CreateGeometryFromWkt('LINESTRING(5 5,10 5,10 10,5 10)')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(geom)
for i in range(10):
feat.SetField('fld_integer', 10 + i)
feat.SetField('fld_real', 3.14159/(i+1) )
feat.SetField('fld_string', 'test string %d test' % i)
feat.SetField('fld_date', '2014/05/17 ' )
feat.SetField('fld_datetime', '2014/05/17 12:34:56' )
feat.SetFieldBinaryFromHexString('fld_binary', 'fffe' )
if lyr.CreateFeature(feat) != 0:
gdaltest.post_reason('cannot create feature %d' % i)
return 'fail'
lyr.CommitTransaction()
feat = ogr.Feature(lyr.GetLayerDefn())
if lyr.CreateFeature(feat) != 0:
gdaltest.post_reason('cannot insert empty')
return 'fail'
feat.SetFID(6)
if lyr.SetFeature(feat) != 0:
gdaltest.post_reason('cannot update with empty')
return 'fail'
gdaltest.gpkg_ds = None
gdaltest.gpkg_ds = gdaltest.gpkg_dr.Open( 'tmp/gpkg_test.gpkg', update = 1 )
lyr = gdaltest.gpkg_ds.GetLayerByName('tbl_linestring')
feat = lyr.GetNextFeature()
if feat.GetField(0) != 10 or feat.GetField(1) != 'test string 0 test' or \
feat.GetField(2) != 3.14159 or feat.GetField(3) != '2014/05/17' or \
feat.GetField(4) != '2014/05/17 12:34:56' or feat.GetField(5) != 'FFFE':
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
lyr = gdaltest.gpkg_ds.CreateLayer( 'tbl_polygon', geom_type = ogr.wkbPolygon, srs = srs)
if lyr is None:
return 'fail'
lyr.StartTransaction()
ret = lyr.CreateField(ogr.FieldDefn('fld_datetime', ogr.OFTDateTime))
ret = lyr.CreateField(ogr.FieldDefn('fld_string', ogr.OFTString))
geom = ogr.CreateGeometryFromWkt('POLYGON((5 5, 10 5, 10 10, 5 10, 5 5),(6 6, 6 7, 7 7, 7 6, 6 6))')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(geom)
for i in range(10):
feat.SetField('fld_string', 'my super string %d' % i)
feat.SetField('fld_datetime', '2010-01-01' )
if lyr.CreateFeature(feat) != 0:
gdaltest.post_reason('cannot create polygon feature %d' % i)
return 'fail'
lyr.CommitTransaction()
feat = lyr.GetFeature(3)
geom_read = feat.GetGeometryRef()
if geom.ExportToWkt() != geom_read.ExportToWkt():
gdaltest.post_reason('geom output not equal to geom input')
return 'fail'
# Test out the 3D support...
lyr = gdaltest.gpkg_ds.CreateLayer( 'tbl_polygon25d', geom_type = ogr.wkbPolygon25D, srs = srs)
if lyr is None:
return 'fail'
ret = lyr.CreateField(ogr.FieldDefn('fld_string', ogr.OFTString))
geom = ogr.CreateGeometryFromWkt('POLYGON((5 5 1, 10 5 2, 10 10 3, 5 104 | |
#####################################################
# #
# Source file of the Matrix Elements exports for #
# the PyOut MG5aMC plugin. #
# #
#####################################################
import os
import logging
import fractions
import shutil
import itertools
import copy
from math import fmod
import subprocess
import re
import string
plugin_path = os.path.dirname(os.path.realpath( __file__ ))
from madgraph import MadGraph5Error, InvalidCmd, MG5DIR
import madgraph.iolibs.export_python as export_python
import madgraph.iolibs.helas_call_writers as helas_call_writers
import madgraph.iolibs.files as files
import madgraph.iolibs.export_v4 as export_v4
import madgraph.core.color_algebra as color
import madgraph.various.misc as misc
import aloha
import aloha.create_aloha as create_aloha
import aloha.aloha_writers as aloha_writers
from . import PyOut_create_aloha as pyout_create_aloha
from . import PyOut_helas_call_writer as pyout_helas_call_writer
import models.check_param_card as check_param_card
logger = logging.getLogger('PyOut_plugin.MEExporter')
pjoin = os.path.join
class PyOutExporterError(MadGraph5Error):
""" Error from the Resummation MEs exporter. """
def coeff(ff_number, frac, is_imaginary, Nc_power, Nc_value=3, is_first = False):
"""Returns a nicely formatted string for the coefficients in JAMP lines"""
total_coeff = ff_number * frac * fractions.Fraction(Nc_value) ** Nc_power
if total_coeff == 1:
plus = '+'
if is_first:
plus = ''
if is_imaginary:
return plus + 'complex_tf(0,1)*'
else:
return plus
elif total_coeff == -1:
if is_imaginary:
return '-complex(0,1)*'
else:
return '-'
if is_first:
res_str = '%-i.' % total_coeff.numerator
else:
res_str = '%+i.' % total_coeff.numerator
if total_coeff.denominator != 1:
# Check if total_coeff is an integer
res_str = res_str + '/%i.' % total_coeff.denominator
if is_imaginary:
res_str = res_str + '*complex(0,1)'
return res_str + '*'
class PyOutExporter(export_python.ProcessExporterPython):
"""this exporter is built upon the Python exporter of MG5_aMC.
If changes are requested wrt the latter, one can just define
here the corresponding function
"""
# check status of the directory. Remove it if already exists
check = True
# Language type: 'v4' for f77 'cpp' for C++ output
exporter = 'v4'
# Output type:
#[Template/dir/None] copy the Template, just create dir or do nothing
output = 'dir'
# Decide which type of merging if used [madevent/madweight]
grouped_mode = False
# if no grouping on can decide to merge uu~ and u~u anyway:
sa_symmetry = False
params_ext = []
params_dep = []
params_indep = []
coups_dep = []
coups_indep = []
me_names = []
proc_names = []
mass_lists = []
refactorized = False
PS_dependent_key = ['aS','MU_R']
def __init__(self, dir_path, *args, **opts):
os.mkdir(dir_path)
self.dir_path = dir_path
def pass_information_from_cmd(self, cmd):
"""Pass information for MA5"""
self.proc_defs = cmd._curr_proc_defs
self.model = cmd._curr_model
def get_python_matrix_methods(self, gauge_check=False):
"""Write the matrix element calculation method for the processes"""
replace_dict = {}
# Extract version number and date from VERSION file
info_lines = self.get_mg5_info_lines()
replace_dict['info_lines'] = info_lines
for ime, matrix_element in enumerate(self.matrix_elements):
self.aloha_names = self.write_alohas(matrix_element)
process_string = matrix_element.get('processes')[0].shell_string()
if process_string in self.matrix_methods:
continue
replace_dict['process_string'] = process_string
# Extract number of external particles
(nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
replace_dict['nexternal'] = nexternal
# Extract ncomb
ncomb = matrix_element.get_helicity_combinations()
replace_dict['ncomb'] = ncomb
# Extract helicity lines
helicity_lines = self.get_helicity_matrix(matrix_element)
replace_dict['helicity_lines'] = helicity_lines
# Extract overall denominator
# Averaging initial state color, spin, and identical FS particles
den_factor_line = self.get_den_factor_line(matrix_element)
replace_dict['den_factor_line'] = den_factor_line
# Extract process info lines for all processes
process_lines = self.get_process_info_lines(matrix_element)
replace_dict['process_lines'] = process_lines
# Extract ngraphs
ngraphs = matrix_element.get_number_of_amplitudes()
replace_dict['ngraphs'] = ngraphs
# Extract ndiags
ndiags = len(matrix_element.get('diagrams'))
replace_dict['ndiags'] = ndiags
# Extract helas calls
helas_calls = self.helas_call_writer.get_matrix_element_calls(\
matrix_element, gauge_check)
replace_dict['helas_calls'] = "\n ".join(helas_calls)
# Extract nwavefuncs
nwavefuncs = matrix_element.get_number_of_wavefunctions()
replace_dict['nwavefuncs'] = nwavefuncs
# Extract ncolor
ncolor = max(1, len(matrix_element.get('color_basis')))
replace_dict['ncolor'] = ncolor
# informations on initial states
initial_states = [p.get_initial_ids() for \
p in matrix_element.get('processes')]
mirror = bool(matrix_element.get_mirror_processes())
replace_dict['initial_states'] = ','.join([str(ini) for ini in initial_states])
replace_dict['mirror'] = mirror
# Extract model parameter lines
parameters, couplings = \
self.get_model_parameters(matrix_element)
model_parameter_lines = ' # External (param_card) parameters\n '
model_parameter_lines += "\n ".join([\
"%(param)s = param_card['%(block)s'].get(%(id)s).value"\
% {"param": param.name, 'block': param.lhablock, 'id': param.lhacode[0]} for param in self.params_ext]) + '\n\n'
model_parameter_lines += self.get_intparam_definition()
# read param card
# write the expression of the PS-dependent couplings
couplings_dep = []
model_parameter_lines_dep = ''
model_parameter_lines_dep += '\n # PS-dependent couplings\n'
for c in self.coups_dep:
if not c.name in couplings: continue
model_parameter_lines_dep += ' %s = lambda G: complex_me(%s)\n' % (c.name, c.expr)
couplings.remove(c.name)
couplings_dep.append(c.name)
# now replace the parameters that depend on G with the call to the corresponding function
for p in self.params_dep:
if p.name == "mdl_sqrt__aS" : continue
model_parameter_lines_dep = \
model_parameter_lines_dep.replace(p.name, '%s(G)' % p.name)
# and of the independent ones
for c in self.coups_indep:
if not c.name in couplings: continue
model_parameter_lines += ' %s = [%s]\n' % (c.name, c.expr)
if aloha.complex_mass:
paramsignature_const = ",\n ".join(['tf.TensorSpec(shape=[], dtype=DTYPECOMPLEX)'] * len(parameters+couplings))
paramtuple_const = ",".join(["complex_me(%s)" % p for p in parameters+couplings])
else:
paramsignature_const = ",\n ".join(['tf.TensorSpec(shape=[], dtype=DTYPE)'] * len(parameters) +
['tf.TensorSpec(shape=[None], dtype=DTYPECOMPLEX)'] * len(couplings))
paramtuple_const = ",".join(["float_me(%s)" % p for p in parameters] + ["complex_me(%s)" % p for p in couplings])
paramtuple_func = ",".join(["%s" % p for p in couplings_dep])
if paramsignature_const:
paramsignature_const += ','
paramsignature_func = ",\n ".join(['tf.TensorSpec(shape=[None], dtype=DTYPECOMPLEX)'] * len(couplings_dep))
params = ",".join([p for p in parameters + couplings + couplings_dep])
paramnames_const = ",".join(["\"%s\"" % p for p in parameters + couplings])
paramnames_func = ",".join(["\"%s\"" % p for p in couplings_dep])
replace_dict['model_parameters'] = model_parameter_lines + model_parameter_lines_dep
# replace cmath->numpy(np) inside the model paramaters
replace_dict['model_parameters'] = replace_dict['model_parameters'].replace('cmath', 'np')
replace_dict['paramsignature_const'] = paramsignature_const
replace_dict['paramsignature_func'] = paramsignature_func
replace_dict['params'] = params
replace_dict['paramnames_const'] = paramnames_const
replace_dict['paramnames_func'] = paramnames_func
replace_dict['paramtuple_const'] = paramtuple_const
replace_dict['paramtuple_func'] = paramtuple_func
# Extract color data lines
color_matrix_lines = self.get_color_matrix_lines(matrix_element)
replace_dict['color_matrix_lines'] = \
"\n ".join(color_matrix_lines)
# Extract JAMP lines
jamp_lines = self.get_jamp_lines(matrix_element)
replace_dict['jamp_lines'] = jamp_lines
# Extract amp2 lines
amp2_lines = self.get_amp2_lines(matrix_element,
self.config_maps.setdefault(ime, []))
replace_dict['amp2_lines'] = '\n #'.join(amp2_lines)
replace_dict['model_path'] = self.model.path
replace_dict['root_path'] = MG5DIR
replace_dict['aloha_imports'] = "\n".join(["from %s import *" % name for name in self.aloha_names])
method_file = open(os.path.join(plugin_path, \
'template_files/matrix_method_python.inc')).read()
method_file = method_file % replace_dict
self.matrix_methods[process_string] = method_file
return self.matrix_methods
def get_helicity_matrix(self, matrix_element):
"""Return the Helicity matrix definition lines for this matrix element"""
helicity_line = "helicities = float_me([ \\\n "
helicity_line_list = []
for helicities in matrix_element.get_helicity_matrix():
helicity_line_list.append("[" + ",".join(['%d'] * len(helicities)) % \
tuple(helicities) + "]")
return helicity_line + ",\n ".join(helicity_line_list) + "])"
def get_color_matrix_lines(self, matrix_element):
"""Return the color matrix definition lines for this matrix element. Split
rows in chunks of size n."""
if not matrix_element.get('color_matrix'):
return ["denom = tf.constant([1.], dtype=DTYPECOMPLEX)", "cf = tf.constant([[1.]], dtype=DTYPECOMPLEX)"]
else:
color_denominators = matrix_element.get('color_matrix').\
get_line_denominators()
denom_string = "denom = tf.constant([%s], dtype=DTYPECOMPLEX)" % \
",".join(["%i" % denom for denom in color_denominators])
matrix_strings = []
my_cs = color.ColorString()
for index, denominator in enumerate(color_denominators):
# Then write the numerators for the matrix elements
num_list = matrix_element.get('color_matrix').\
get_line_numerators(index, denominator)
matrix_strings.append("%s" % \
",".join(["%d" % i for i in num_list]))
matrix_string = "cf = tf.constant([[" + \
"],\n [".join(matrix_strings) + "]], dtype=DTYPECOMPLEX)"
return [denom_string, matrix_string]
def get_den_factor_line(self, matrix_element):
"""Return the denominator factor line for this matrix element"""
return "denominator = float_me(%d)" % \
matrix_element.get_denominator_factor()
def get_jamp_lines(self, matrix_element):
"""Return the jamp = sum(fermionfactor * amp[i]) lines"""
res_list = []
for i, coeff_list in enumerate(matrix_element.get_color_amplitudes()):
is_first = i==0
res = ""
# Optimization: if all contributions to that color basis element have
# the same coefficient (up to a sign), put it in front
list_fracs = [abs(coefficient[0][1]) for coefficient in coeff_list]
common_factor = False
diff_fracs = list(set(list_fracs))
if len(diff_fracs) == 1 and abs(diff_fracs[0]) != 1:
common_factor = True
global_factor = diff_fracs[0]
res = res + '%s(' % coeff(1, global_factor, False, 0, is_first=is_first)
for i2, (coefficient, amp_number) in enumerate(coeff_list):
is_first2 = i2==0
if common_factor:
res = res + "%samp%d" % (coeff(coefficient[0],
coefficient[1] / abs(coefficient[1]),
coefficient[2],
coefficient[3], is_first=is_first2),
amp_number - 1)
else:
res = res + "%samp%d" % (coeff(coefficient[0],
coefficient[1],
coefficient[2],
coefficient[3], is_first=is_first2),
amp_number - 1)
if common_factor:
res = res + ')'
res_list.append(res)
return "jamp = tf.stack([" + ",".join([r for r in res_list]) + "], axis=0)"
def get_model_parameters(self, matrix_element):
"""Return definitions for all model parameters used in this
matrix element"""
# Get all masses and widths used
if aloha.complex_mass:
parameters = [(wf.get('mass') == 'ZERO' or wf.get('width')=='ZERO')
and wf.get('mass') or 'CMASS_%s' % wf.get('mass')
for wf in \
matrix_element.get_all_wavefunctions()]
parameters += [wf.get('mass') for wf in \
matrix_element.get_all_wavefunctions()]
else:
parameters = [wf.get('mass') for wf in \
matrix_element.get_all_wavefunctions()]
parameters += [wf.get('width') for wf | |
# Copyright 2018 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module defining the abstract entity class."""
import abc
import collections
import os
import weakref
from absl import logging
from dm_control import mjcf
from dm_control.composer import define
from dm_control.mujoco.wrapper import mjbindings
import numpy as np
_OPTION_KEYS = set(['update_interval', 'buffer_size', 'delay', 'aggregator',
'corruptor', 'enabled'])
_NO_ATTACHMENT_FRAME = 'No attachment frame found.'
# The component order differs from that used by the open-source `tf` package.
def _multiply_quaternions(quat1, quat2):
result = np.empty_like(quat1)
mjbindings.mjlib.mju_mulQuat(result, quat1, quat2)
return result
def _rotate_vector(vec, quat):
"""Rotates a vector by the given quaternion."""
result = np.empty_like(vec)
mjbindings.mjlib.mju_rotVecQuat(result, vec, quat)
return result
class _ObservableKeys:
"""Helper object that implements the `observables.dict_keys` functionality."""
def __init__(self, entity, observables):
self._entity = entity
self._observables = observables
def __getattr__(self, name):
try:
model_identifier = self._entity.mjcf_model.full_identifier
except AttributeError:
raise ValueError('cannot retrieve the full identifier of mjcf_model')
return os.path.join(model_identifier, name)
def __dir__(self):
out = set(self._observables.keys())
out.update(dir(super()))
return list(out)
class Observables:
"""Base-class for Entity observables.
Subclasses should declare getter methods annotated with @define.observable
decorator and returning an observable object.
"""
def __init__(self, entity):
self._entity = weakref.proxy(entity)
self._observables = collections.OrderedDict()
self._keys_helper = _ObservableKeys(self._entity, self._observables)
# Ensure consistent ordering.
for attr_name in sorted(dir(type(self))):
type_attr = getattr(type(self), attr_name)
if isinstance(type_attr, define.observable):
self._observables[attr_name] = getattr(self, attr_name)
@property
def dict_keys(self):
return self._keys_helper
def as_dict(self, fully_qualified=True):
"""Returns an OrderedDict of observables belonging to this Entity.
The returned observables will include any added using the _add_observable
method, as well as any generated by a method decorated with the
@define.observable annotation.
Args:
fully_qualified: (bool) Whether the dict keys should be prefixed with the
parent entity's full model identifier.
"""
if fully_qualified:
# We need to make sure that this property doesn't raise an AttributeError,
# otherwise __getattr__ is executed and we get a very funky error.
try:
model_identifier = self._entity.mjcf_model.full_identifier
except AttributeError:
raise ValueError('cannot retrieve the full identifier of mjcf_model')
return collections.OrderedDict(
[(os.path.join(model_identifier, name), observable)
for name, observable in self._observables.items()])
else:
# Return a copy to prevent dict being edited.
return self._observables.copy()
def get_observable(self, name, name_fully_qualified=False):
"""Returns the observable with the given name.
Args:
name: (str) The identifier of the observable.
name_fully_qualified: (bool) Whether the provided name is prefixed by the
model's full identifier.
"""
if name_fully_qualified:
try:
model_identifier = self._entity.mjcf_model.full_identifier
except AttributeError:
raise ValueError('cannot retrieve the full identifier of mjcf_model')
return self._observables[name.replace(model_identifier, '')]
else:
return self._observables[name]
def set_options(self, options):
"""Configure Observables with an options dict.
Args:
options: A dict of dicts of configuration options keyed on
observable names, or a dict of configuration options, which will
propagate those options to all observables.
"""
if options is None:
options = {}
elif options.keys() and set(options.keys()).issubset(_OPTION_KEYS):
options = dict([(key, options) for key in self._observables.keys()])
for obs_key, obs_options in options.items():
try:
obs = self._observables[obs_key]
except KeyError:
raise KeyError('No observable with name {!r}'.format(obs_key))
obs.configure(**obs_options)
def enable_all(self):
"""Enable all observables of this entity."""
for obs in self._observables.values():
obs.enabled = True
def disable_all(self):
"""Disable all observables of this entity."""
for obs in self._observables.values():
obs.enabled = False
def add_observable(self, name, observable, enabled=True):
self._observables[name] = observable
self._observables[name].enabled = enabled
class FreePropObservableMixin(metaclass=abc.ABCMeta):
"""Enforce observables of a free-moving object."""
@abc.abstractproperty
def position(self):
pass
@abc.abstractproperty
def orientation(self):
pass
@abc.abstractproperty
def linear_velocity(self):
pass
@abc.abstractproperty
def angular_velocity(self):
pass
class Entity(metaclass=abc.ABCMeta):
"""The abstract base class for an entity in a Composer environment."""
def __init__(self, *args, **kwargs):
"""Entity constructor.
Subclasses should not override this method, instead implement a _build
method.
Args:
*args: Arguments passed through to the _build method.
**kwargs: Keyword arguments. Passed through to the _build method, apart
from the following.
`observable_options`: A dictionary of Observable
configuration options.
"""
self._post_init_hooks = []
self._parent = None
self._attached = []
try:
observable_options = kwargs.pop('observable_options')
except KeyError:
observable_options = None
self._build(*args, **kwargs)
self._observables = self._build_observables()
self._observables.set_options(observable_options)
@abc.abstractmethod
def _build(self, *args, **kwargs):
"""Entity initialization method to be overridden by subclasses."""
raise NotImplementedError
def _build_observables(self):
"""Entity observables initialization method.
Returns:
An object subclassing the Observables class.
"""
return Observables(self)
def iter_entities(self, exclude_self=False):
"""An iterator that recursively iterates through all attached entities.
Args:
exclude_self: (optional) Whether to exclude this `Entity` itself from the
iterator.
Yields:
If `exclude_self` is `False`, the first value yielded is this Entity
itself. The following Entities are then yielded recursively in a
depth-first fashion, following the order in which the Entities are
attached.
"""
if not exclude_self:
yield self
for attached_entity in self._attached:
for attached_entity_of_attached_entity in attached_entity.iter_entities():
yield attached_entity_of_attached_entity
@property
def observables(self):
"""The observables defined by this entity."""
return self._observables
def initialize_episode_mjcf(self, random_state):
"""Callback executed when the MJCF model is modified between episodes."""
pass
def after_compile(self, physics, random_state):
"""Callback executed after the Mujoco Physics is recompiled."""
pass
def initialize_episode(self, physics, random_state):
"""Callback executed during episode initialization."""
pass
def before_step(self, physics, random_state):
"""Callback executed before an agent control step."""
pass
def before_substep(self, physics, random_state):
"""Callback executed before a simulation step."""
pass
def after_substep(self, physics, random_state):
"""A callback which is executed after a simulation step."""
pass
def after_step(self, physics, random_state):
"""Callback executed after an agent control step."""
pass
@abc.abstractproperty
def mjcf_model(self):
raise NotImplementedError
def attach(self, entity, attach_site=None):
"""Attaches an `Entity` without any additional degrees of freedom.
Args:
entity: The `Entity` to attach.
attach_site: (optional) The site to which to attach the entity's model. If
not set, defaults to self.attachment_site.
Returns:
The frame of the attached model.
"""
if attach_site is None:
attach_site = self.attachment_site
frame = attach_site.attach(entity.mjcf_model)
self._attached.append(entity)
entity._parent = weakref.ref(self) # pylint: disable=protected-access
return frame
def detach(self):
"""Detaches this entity if it has previously been attached."""
if self._parent is not None:
parent = self._parent() # pylint: disable=not-callable
if parent: # Weakref might dereference to None during garbage collection.
self.mjcf_model.detach()
parent._attached.remove(self) # pylint: disable=protected-access
self._parent = None
else:
raise RuntimeError('Cannot detach an entity that is not attached.')
@property
def parent(self):
"""Returns the `Entity` to which this entity is attached, or `None`."""
return self._parent() if self._parent else None # pylint: disable=not-callable
@property
def attachment_site(self):
return self.mjcf_model
@property
def root_body(self):
if self.parent:
return mjcf.get_attachment_frame(self.mjcf_model)
else:
return self.mjcf_model.worldbody
def global_vector_to_local_frame(self, physics, vec_in_world_frame):
"""Linearly transforms a world-frame vector into entity's local frame.
Note that this function does not perform an affine transformation of the
vector. In other words, the input vector is assumed to be specified with
respect to the same origin as this entity's local frame. This function
can also be applied to matrices whose innermost dimensions are either 2 or
3. In this case, a matrix with the same leading dimensions is returned
where the innermost vectors are replaced by their values computed in the
local frame.
Args:
physics: An `mjcf.Physics` instance.
vec_in_world_frame: A NumPy array with last dimension of shape (2,) or
(3,) that represents a vector quantity in the world frame.
Returns:
The same quantity as `vec_in_world_frame` but reexpressed in this
entity's local frame. The returned np.array has the same shape as
np.asarray(vec_in_world_frame).
Raises:
ValueError: if `vec_in_world_frame` does not have shape ending with (2,)
or (3,).
"""
vec_in_world_frame = np.asarray(vec_in_world_frame)
xmat = np.reshape(physics.bind(self.root_body).xmat, (3, 3))
# The ordering of the np.dot is such that the transformation holds for any
# matrix whose final dimensions are (2,) or (3,).
if vec_in_world_frame.shape[-1] == 2:
return np.dot(vec_in_world_frame, xmat[:2, :2])
elif vec_in_world_frame.shape[-1] == 3:
return np.dot(vec_in_world_frame, xmat)
else:
raise ValueError('`vec_in_world_frame` should have shape with final '
'dimension 2 or 3: got {}'.format(
vec_in_world_frame.shape))
def global_xmat_to_local_frame(self, physics, xmat):
"""Transforms another entity's `xmat` into this entity's local frame.
This function takes another entity's (E) xmat, which is an SO(3) matrix
from E's frame to the world frame, and turns it to a matrix that transforms
from E's frame into this entity's local frame.
Args:
physics: An `mjcf.Physics` | |
0.5*m.b276*m.b324 + 0.5*m.b276*
m.b342 + 0.5*m.b276*m.b351 + 0.5*m.b276*m.b355 + 0.5*m.b276*m.b361 + 0.5*m.b276*m.b363 + 0.5*
m.b276*m.b372 + 0.5*m.b276*m.b375 + 0.5*m.b276*m.b376 + 0.5*m.b276*m.b377 + 0.5*m.b276*m.b383 +
0.5*m.b276*m.b386 + 0.5*m.b276*m.b390 + 0.5*m.b276*m.b394 + 0.5*m.b276*m.b419 + 0.5*m.b276*m.b423
+ 0.5*m.b276*m.b424 + 0.5*m.b276*m.b428 + 0.5*m.b276*m.b440 + 0.5*m.b276*m.b458 + 0.5*m.b276*
m.b467 + 0.5*m.b276*m.b477 + 0.5*m.b276*m.b482 + 0.5*m.b276*m.b485 + 0.5*m.b276*m.b488 + 0.5*
m.b276*m.b490 + 0.5*m.b276*m.b493 + 0.5*m.b276*m.b497 + 0.5*m.b276*m.b499 + 0.5*m.b276*m.b500 +
0.5*m.b276*m.b505 + 0.5*m.b276*m.b526 + 0.5*m.b276*m.b530 + 0.5*m.b276*m.b531 + 0.5*m.b276*m.b537
+ 0.5*m.b276*m.b552 + 0.5*m.b276*m.b556 + 0.5*m.b276*m.b562 + 0.5*m.b276*m.b566 + 0.5*m.b276*
m.b569 + 0.5*m.b276*m.b570 + 0.5*m.b276*m.b572 + 0.5*m.b276*m.b574 + 0.5*m.b276*m.b578 + 0.5*
m.b276*m.b581 + 0.5*m.b276*m.b587 + 0.5*m.b276*m.b603 + 0.5*m.b276*m.b605 + 0.5*m.b276*m.b608 +
0.5*m.b276*m.b611 + 0.5*m.b276*m.b623 + 0.5*m.b276*m.b627 + 0.5*m.b276*m.b628 + 0.5*m.b276*m.b638
+ 0.5*m.b276*m.b639 + 0.5*m.b276*m.b646 + 0.5*m.b276*m.b648 + 0.5*m.b276*m.b656 + 0.5*m.b276*
m.b664 + 0.5*m.b276*m.b666 + 0.5*m.b276*m.b670 + 0.5*m.b276*m.b673 + 0.5*m.b276*m.b674 + 0.5*
m.b276*m.b676 + 0.5*m.b276*m.b681 + m.b277*m.b293 + m.b277*m.b296 + 0.5*m.b277*m.b318 + 0.5*
m.b277*m.b381 + 0.5*m.b277*m.b397 + 0.5*m.b277*m.b407 + 0.5*m.b277*m.b412 + 0.5*m.b277*m.b413 +
0.5*m.b277*m.b427 + 0.5*m.b277*m.b449 + 0.5*m.b277*m.b471 + 0.5*m.b277*m.b491 + 0.5*m.b277*m.b547
+ 0.5*m.b277*m.b557 + 0.5*m.b277*m.b559 + 0.5*m.b277*m.b591 + 0.5*m.b277*m.b602 + 0.5*m.b277*
m.b616 + 0.5*m.b277*m.b652 + 0.5*m.b277*m.b661 + 0.5*m.b277*m.b671 + 0.5*m.b277*m.b672 + 0.5*
m.b277*m.b678 + 0.5*m.b277*m.b704 + 0.5*m.b277*m.b714 + 0.5*m.b277*m.b749 + 0.5*m.b277*m.b759 +
0.5*m.b277*m.b761 + 0.5*m.b277*m.b765 + 0.5*m.b277*m.b788 + 0.5*m.b277*m.b789 + 0.5*m.b277*m.b790
+ 0.5*m.b277*m.b798 + 0.5*m.b277*m.b804 + 0.5*m.b277*m.b809 + 0.5*m.b277*m.b811 + 0.5*m.b277*
m.b816 + 0.5*m.b277*m.b823 + 0.5*m.b277*m.b826 + m.b278*m.x851 + 0.5*m.b279*m.b281 + m.b279*
m.b291 + 0.5*m.b279*m.b303 + 0.5*m.b279*m.b320 + 0.5*m.b279*m.b324 + 0.5*m.b279*m.b351 + 0.5*
m.b279*m.b355 + 0.5*m.b279*m.b367 + 0.5*m.b279*m.b368 + 0.5*m.b279*m.b372 + 0.5*m.b279*m.b376 +
0.5*m.b279*m.b383 + 0.5*m.b279*m.b388 + 0.5*m.b279*m.b390 + 0.5*m.b279*m.b394 + 0.5*m.b279*m.b396
+ 0.5*m.b279*m.b423 + 0.5*m.b279*m.b424 + 0.5*m.b279*m.b428 + 0.5*m.b279*m.b435 + 0.5*m.b279*
m.b443 + 0.5*m.b279*m.b445 + 0.5*m.b279*m.b446 + 0.5*m.b279*m.b458 + 0.5*m.b279*m.b464 + 0.5*
m.b279*m.b467 + 0.5*m.b279*m.b477 + 0.5*m.b279*m.b479 + 0.5*m.b279*m.b482 + 0.5*m.b279*m.b488 +
0.5*m.b279*m.b489 + 0.5*m.b279*m.b490 + 0.5*m.b279*m.b494 + 0.5*m.b279*m.b497 + 0.5*m.b279*m.b499
+ 0.5*m.b279*m.b500 + 0.5*m.b279*m.b526 + 0.5*m.b279*m.b530 + 0.5*m.b279*m.b531 + 0.5*m.b279*
m.b551 + 0.5*m.b279*m.b562 + 0.5*m.b279*m.b564 + 0.5*m.b279*m.b566 + 0.5*m.b279*m.b568 + 0.5*
m.b279*m.b570 + 0.5*m.b279*m.b572 + 0.5*m.b279*m.b573 + 0.5*m.b279*m.b574 + 0.5*m.b279*m.b587 +
0.5*m.b279*m.b593 + 0.5*m.b279*m.b598 + 0.5*m.b279*m.b601 + 0.5*m.b279*m.b603 + 0.5*m.b279*m.b605
+ 0.5*m.b279*m.b608 + 0.5*m.b279*m.b613 + 0.5*m.b279*m.b620 + 0.5*m.b279*m.b622 + 0.5*m.b279*
m.b623 + 0.5*m.b279*m.b625 + 0.5*m.b279*m.b628 + 0.5*m.b279*m.b643 + 0.5*m.b279*m.b647 + 0.5*
m.b279*m.b664 + 0.5*m.b279*m.b670 + 0.5*m.b279*m.b673 + 0.5*m.b279*m.b674 + 0.5*m.b279*m.b676 +
0.5*m.b279*m.b681 + m.b280*m.b287 + m.b280*m.b301 + m.b280*m.b302 + 0.5*m.b280*m.b369 + m.b280*
m.x850 + 0.5*m.b281*m.b291 + m.b281*m.b303 + 0.5*m.b281*m.b324 + 0.5*m.b281*m.b342 + 0.5*m.b281*
m.b351 + 0.5*m.b281*m.b355 + 0.5*m.b281*m.b361 + 0.5*m.b281*m.b363 + 0.5*m.b281*m.b372 + 0.5*
m.b281*m.b375 + 0.5*m.b281*m.b376 + 0.5*m.b281*m.b377 + 0.5*m.b281*m.b383 + 0.5*m.b281*m.b386 +
0.5*m.b281*m.b390 + 0.5*m.b281*m.b394 + 0.5*m.b281*m.b419 + 0.5*m.b281*m.b423 + 0.5*m.b281*m.b424
+ 0.5*m.b281*m.b428 + 0.5*m.b281*m.b440 + 0.5*m.b281*m.b458 + 0.5*m.b281*m.b467 + 0.5*m.b281*
m.b477 + 0.5*m.b281*m.b482 + 0.5*m.b281*m.b485 + 0.5*m.b281*m.b488 + 0.5*m.b281*m.b490 + 0.5*
m.b281*m.b493 + 0.5*m.b281*m.b497 + 0.5*m.b281*m.b499 + 0.5*m.b281*m.b500 + 0.5*m.b281*m.b505 +
0.5*m.b281*m.b526 + 0.5*m.b281*m.b530 + 0.5*m.b281*m.b531 + 0.5*m.b281*m.b537 + 0.5*m.b281*m.b552
+ 0.5*m.b281*m.b556 + 0.5*m.b281*m.b562 + 0.5*m.b281*m.b566 + 0.5*m.b281*m.b569 + 0.5*m.b281*
m.b570 + 0.5*m.b281*m.b572 + 0.5*m.b281*m.b574 + 0.5*m.b281*m.b578 + 0.5*m.b281*m.b581 + 0.5*
m.b281*m.b587 + 0.5*m.b281*m.b603 + 0.5*m.b281*m.b605 + 0.5*m.b281*m.b608 + 0.5*m.b281*m.b611 +
0.5*m.b281*m.b623 + 0.5*m.b281*m.b627 + 0.5*m.b281*m.b628 + 0.5*m.b281*m.b638 + 0.5*m.b281*m.b639
+ 0.5*m.b281*m.b646 + 0.5*m.b281*m.b648 + 0.5*m.b281*m.b656 + 0.5*m.b281*m.b664 + 0.5*m.b281*
m.b666 + 0.5*m.b281*m.b670 + 0.5*m.b281*m.b673 + 0.5*m.b281*m.b674 + 0.5*m.b281*m.b676 + 0.5*
m.b281*m.b681 + m.b282*m.b290 + m.b282*m.b292 + m.b282*m.b294 + m.b282*m.b305 + m.b285*m.b297 +
m.b286*m.b289 + 0.5*m.b286*m.b321 + 0.5*m.b286*m.b328 + 0.5*m.b286*m.b331 + 0.5*m.b286*m.b334 +
0.5*m.b286*m.b335 + 0.5*m.b286*m.b337 + 0.5*m.b286*m.b347 + 0.5*m.b286*m.b348 + 0.5*m.b286*m.b352
+ 0.5*m.b286*m.b354 + 0.5*m.b286*m.b357 + 0.5*m.b286*m.b359 + 0.5*m.b286*m.b362 + 0.5*m.b286*
m.b365 + 0.5*m.b286*m.b371 + 0.5*m.b286*m.b373 + 0.5*m.b286*m.b374 + 0.5*m.b286*m.b393 + 0.5*
m.b286*m.b408 + 0.5*m.b286*m.b410 + 0.5*m.b286*m.b411 + 0.5*m.b286*m.b430 + 0.5*m.b286*m.b436 +
0.5*m.b286*m.b450 + 0.5*m.b286*m.b451 + 0.5*m.b286*m.b454 + 0.5*m.b286*m.b456 + 0.5*m.b286*m.b461
+ 0.5*m.b286*m.b470 + 0.5*m.b286*m.b472 + 0.5*m.b286*m.b474 + 0.5*m.b286*m.b480 + 0.5*m.b286*
m.b481 + 0.5*m.b286*m.b492 + 0.5*m.b286*m.b510 + 0.5*m.b286*m.b515 + 0.5*m.b286*m.b544 + 0.5*
m.b286*m.b555 + 0.5*m.b286*m.b563 + 0.5*m.b286*m.b567 + 0.5*m.b286*m.b582 + 0.5*m.b286*m.b583 +
0.5*m.b286*m.b585 + 0.5*m.b286*m.b595 + 0.5*m.b286*m.b614 + 0.5*m.b286*m.b619 + 0.5*m.b286*m.b624
+ 0.5*m.b286*m.b632 + 0.5*m.b286*m.b633 + 0.5*m.b286*m.b635 + 0.5*m.b286*m.b636 + 0.5*m.b286*
m.b645 + 0.5*m.b286*m.b657 + 0.5*m.b286*m.b660 + 0.5*m.b286*m.b677 + m.b287*m.b301 + m.b287*
m.b302 + 0.5*m.b287*m.b369 + m.b287*m.x850 + m.b288*m.b295 + m.b288*m.b299 + m.b288*m.b304 + 0.5*
m.b288*m.b323 + 0.5*m.b288*m.b339 + 0.5*m.b288*m.b341 + 0.5*m.b288*m.b349 + 0.5*m.b288*m.b360 +
0.5*m.b288*m.b380 + 0.5*m.b288*m.b387 + 0.5*m.b288*m.b395 + 0.5*m.b288*m.b398 + 0.5*m.b288*m.b400
+ 0.5*m.b288*m.b401 + 0.5*m.b288*m.b403 + 0.5*m.b288*m.b405 + 0.5*m.b288*m.b421 + 0.5*m.b288*
m.b426 + 0.5*m.b288*m.b438 + 0.5*m.b288*m.b441 + 0.5*m.b288*m.b447 + 0.5*m.b288*m.b452 + 0.5*
m.b288*m.b453 + 0.5*m.b288*m.b460 + 0.5*m.b288*m.b501 + 0.5*m.b288*m.b511 + 0.5*m.b288*m.b525 +
0.5*m.b288*m.b528 + 0.5*m.b288*m.b538 + 0.5*m.b288*m.b541 + 0.5*m.b288*m.b550 + 0.5*m.b288*m.b565
+ 0.5*m.b288*m.b575 + 0.5*m.b288*m.b577 + 0.5*m.b288*m.b579 + 0.5*m.b288*m.b580 + 0.5*m.b288*
m.b594 + 0.5*m.b288*m.b599 + 0.5*m.b288*m.b604 + 0.5*m.b288*m.b606 + 0.5*m.b288*m.b607 + 0.5*
m.b288*m.b653 + 0.5*m.b288*m.b667 + 0.5*m.b288*m.b680 + 0.5*m.b289*m.b321 + 0.5*m.b289*m.b328 +
0.5*m.b289*m.b331 + 0.5*m.b289*m.b334 + 0.5*m.b289*m.b335 + 0.5*m.b289*m.b337 + 0.5*m.b289*m.b347
+ 0.5*m.b289*m.b348 + 0.5*m.b289*m.b352 + 0.5*m.b289*m.b354 + 0.5*m.b289*m.b357 + 0.5*m.b289*
m.b359 + 0.5*m.b289*m.b362 + 0.5*m.b289*m.b365 + 0.5*m.b289*m.b371 + 0.5*m.b289*m.b373 + 0.5*
m.b289*m.b374 + 0.5*m.b289*m.b393 + 0.5*m.b289*m.b408 + 0.5*m.b289*m.b410 + 0.5*m.b289*m.b411 +
0.5*m.b289*m.b430 + 0.5*m.b289*m.b436 + 0.5*m.b289*m.b450 + 0.5*m.b289*m.b451 + 0.5*m.b289*m.b454
+ 0.5*m.b289*m.b456 + 0.5*m.b289*m.b461 + 0.5*m.b289*m.b470 + 0.5*m.b289*m.b472 + 0.5*m.b289*
m.b474 + 0.5*m.b289*m.b480 + 0.5*m.b289*m.b481 + 0.5*m.b289*m.b492 + 0.5*m.b289*m.b510 + 0.5*
m.b289*m.b515 + 0.5*m.b289*m.b544 + 0.5*m.b289*m.b555 + 0.5*m.b289*m.b563 + 0.5*m.b289*m.b567 +
0.5*m.b289*m.b582 + 0.5*m.b289*m.b583 + 0.5*m.b289*m.b585 + 0.5*m.b289*m.b595 + 0.5*m.b289*m.b614
+ 0.5*m.b289*m.b619 + 0.5*m.b289*m.b624 + 0.5*m.b289*m.b632 + 0.5*m.b289*m.b633 + 0.5*m.b289*
m.b635 + 0.5*m.b289*m.b636 + 0.5*m.b289*m.b645 + 0.5*m.b289*m.b657 + 0.5*m.b289*m.b660 + 0.5*
m.b289*m.b677 + m.b290*m.b292 + m.b290*m.b294 + m.b290*m.b305 + 0.5*m.b291*m.b303 + 0.5*m.b291*
m.b320 + 0.5*m.b291*m.b324 + 0.5*m.b291*m.b351 + 0.5*m.b291*m.b355 + 0.5*m.b291*m.b367 + 0.5*
m.b291*m.b368 + 0.5*m.b291*m.b372 + 0.5*m.b291*m.b376 + 0.5*m.b291*m.b383 + 0.5*m.b291*m.b388 +
0.5*m.b291*m.b390 + 0.5*m.b291*m.b394 + 0.5*m.b291*m.b396 + 0.5*m.b291*m.b423 + 0.5*m.b291*m.b424
+ 0.5*m.b291*m.b428 + 0.5*m.b291*m.b435 + 0.5*m.b291*m.b443 + 0.5*m.b291*m.b445 + 0.5*m.b291*
m.b446 + 0.5*m.b291*m.b458 + 0.5*m.b291*m.b464 + 0.5*m.b291*m.b467 + 0.5*m.b291*m.b477 + 0.5*
m.b291*m.b479 + 0.5*m.b291*m.b482 + 0.5*m.b291*m.b488 + 0.5*m.b291*m.b489 + 0.5*m.b291*m.b490 +
0.5*m.b291*m.b494 + 0.5*m.b291*m.b497 + 0.5*m.b291*m.b499 + 0.5*m.b291*m.b500 + 0.5*m.b291*m.b526
+ 0.5*m.b291*m.b530 + 0.5*m.b291*m.b531 + 0.5*m.b291*m.b551 + 0.5*m.b291*m.b562 + 0.5*m.b291*
m.b564 + 0.5*m.b291*m.b566 + 0.5*m.b291*m.b568 + 0.5*m.b291*m.b570 + 0.5*m.b291*m.b572 + 0.5*
m.b291*m.b573 + 0.5*m.b291*m.b574 + 0.5*m.b291*m.b587 + 0.5*m.b291*m.b593 + 0.5*m.b291*m.b598 +
0.5*m.b291*m.b601 + 0.5*m.b291*m.b603 + 0.5*m.b291*m.b605 + 0.5*m.b291*m.b608 + 0.5*m.b291*m.b613
+ 0.5*m.b291*m.b620 + 0.5*m.b291*m.b622 + 0.5*m.b291*m.b623 + 0.5*m.b291*m.b625 + 0.5*m.b291*
m.b628 + 0.5*m.b291*m.b643 + 0.5*m.b291*m.b647 + 0.5*m.b291*m.b664 + 0.5*m.b291*m.b670 + 0.5*
m.b291*m.b673 + 0.5*m.b291*m.b674 + 0.5*m.b291*m.b676 + 0.5*m.b291*m.b681 + m.b292*m.b294 +
m.b292*m.b305 + m.b293*m.b296 + 0.5*m.b293*m.b318 + 0.5*m.b293*m.b381 + 0.5*m.b293*m.b397 + 0.5*
m.b293*m.b407 + 0.5*m.b293*m.b412 + 0.5*m.b293*m.b413 + 0.5*m.b293*m.b427 + 0.5*m.b293*m.b449 +
0.5*m.b293*m.b471 + 0.5*m.b293*m.b491 + 0.5*m.b293*m.b547 + 0.5*m.b293*m.b557 + 0.5*m.b293*m.b559
+ 0.5*m.b293*m.b591 + 0.5*m.b293*m.b602 + 0.5*m.b293*m.b616 + 0.5*m.b293*m.b652 + 0.5*m.b293*
m.b661 + 0.5*m.b293*m.b671 + 0.5*m.b293*m.b672 + 0.5*m.b293*m.b678 + 0.5*m.b293*m.b704 + 0.5*
m.b293*m.b714 + 0.5*m.b293*m.b749 + 0.5*m.b293*m.b759 + 0.5*m.b293*m.b761 + 0.5*m.b293*m.b765 +
0.5*m.b293*m.b788 + 0.5*m.b293*m.b789 + 0.5*m.b293*m.b790 + 0.5*m.b293*m.b798 + 0.5*m.b293*m.b804
+ 0.5*m.b293*m.b809 + 0.5*m.b293*m.b811 + 0.5*m.b293*m.b816 + 0.5*m.b293*m.b823 + 0.5*m.b293*
m.b826 + m.b294*m.b305 + m.b295*m.b299 + m.b295*m.b304 + 0.5*m.b295*m.b323 + 0.5*m.b295*m.b339 +
0.5*m.b295*m.b341 + 0.5*m.b295*m.b349 + 0.5*m.b295*m.b360 + 0.5*m.b295*m.b380 + 0.5*m.b295*m.b387
+ 0.5*m.b295*m.b395 + 0.5*m.b295*m.b398 + 0.5*m.b295*m.b400 + 0.5*m.b295*m.b401 + 0.5*m.b295*
m.b403 + 0.5*m.b295*m.b405 + 0.5*m.b295*m.b421 + 0.5*m.b295*m.b426 + 0.5*m.b295*m.b438 + 0.5*
m.b295*m.b441 + 0.5*m.b295*m.b447 + 0.5*m.b295*m.b452 + 0.5*m.b295*m.b453 + 0.5*m.b295*m.b460 +
0.5*m.b295*m.b501 + 0.5*m.b295*m.b511 + 0.5*m.b295*m.b525 + 0.5*m.b295*m.b528 + 0.5*m.b295*m.b538
+ 0.5*m.b295*m.b541 + 0.5*m.b295*m.b550 + 0.5*m.b295*m.b565 + 0.5*m.b295*m.b575 + 0.5*m.b295*
m.b577 + 0.5*m.b295*m.b579 + 0.5*m.b295*m.b580 + 0.5*m.b295*m.b594 + 0.5*m.b295*m.b599 + 0.5*
m.b295*m.b604 + 0.5*m.b295*m.b606 + 0.5*m.b295*m.b607 + 0.5*m.b295*m.b653 + 0.5*m.b295*m.b667 +
0.5*m.b295*m.b680 + 0.5*m.b296*m.b318 + 0.5*m.b296*m.b381 + 0.5*m.b296*m.b397 + 0.5*m.b296*m.b407
+ 0.5*m.b296*m.b412 + 0.5*m.b296*m.b413 + 0.5*m.b296*m.b427 + 0.5*m.b296*m.b449 + 0.5*m.b296*
m.b471 + 0.5*m.b296*m.b491 + 0.5*m.b296*m.b547 + 0.5*m.b296*m.b557 + 0.5*m.b296*m.b559 + 0.5*
m.b296*m.b591 + 0.5*m.b296*m.b602 + 0.5*m.b296*m.b616 + 0.5*m.b296*m.b652 + 0.5*m.b296*m.b661 +
0.5*m.b296*m.b671 + 0.5*m.b296*m.b672 + 0.5*m.b296*m.b678 + 0.5*m.b296*m.b704 + 0.5*m.b296*m.b714
+ 0.5*m.b296*m.b749 + 0.5*m.b296*m.b759 + 0.5*m.b296*m.b761 + 0.5*m.b296*m.b765 + 0.5*m.b296*
m.b788 + | |
USED IN SHGO
try:
for c in self.H[self.gen]:
if self.symmetry:
# self.sub_generate_cell_symmetry(c, self.gen + 1)
self.split_simplex_symmetry(c, self.gen + 1)
else:
self.sub_generate_cell(c, self.gen + 1)
except IndexError:
no_splits = True # USED IN SHGO
self.gen += 1
return no_splits # USED IN SHGO
# @lru_cache(maxsize=None)
def construct_hypercube(self, origin, suprenum, gen, hgr, p_hgr_h,
printout=False):
"""
Build a hypercube with triangulations symmetric to C0.
Parameters
----------
origin : vec
suprenum : vec (tuple)
gen : generation
hgr : parent homology group rank
"""
# Initiate new cell
C_new = Cell(gen, hgr, p_hgr_h, origin, suprenum)
C_new.centroid = tuple(
(numpy.array(origin) + numpy.array(suprenum)) / 2.0)
# C_new.centroid =
# centroid_index = len(self.C0()) - 1
# Build new indexed vertex list
V_new = []
# Cached calculation
for i, v in enumerate(self.C0()[:-1]):
t1 = self.generate_sub_cell_t1(origin, v.x)
t2 = self.generate_sub_cell_t2(suprenum, v.x)
vec = t1 + t2
vec = tuple(vec)
C_new.add_vertex(self.V[vec])
V_new.append(vec)
# Add new centroid
C_new.add_vertex(self.V[C_new.centroid])
V_new.append(C_new.centroid)
# Connect new vertices #TODO: Thread into other loop; no need for V_new
for i, connections in enumerate(self.graph):
# Present vertex V_new[i]; connect to all connections:
for j in connections:
self.V[V_new[i]].connect(self.V[V_new[j]])
if printout:
print("A sub hyper cube with:")
print("origin: {}".format(origin))
print("suprenum: {}".format(suprenum))
for v in C_new():
print("Vertex: {}".format(v.x))
constr = 'Connections: '
for vc in v.nn:
constr += '{} '.format(vc.x)
print(constr)
print('Order = {}'.format(v.order))
# Append the new cell to the to complex
self.H[gen].append(C_new)
return C_new
def split_simplex_symmetry(self, S, gen):
"""
Split a hypersimplex S into two sub simplcies by building a hyperplane
which connects to a new vertex on an edge (the longest edge in
dim = {2, 3}) and every other vertex in the simplex that is not
connected to the edge being split.
This function utilizes the knowledge that the problem is specified
with symmetric constraints
The longest edge is tracked by an ordering of the
vertices in every simplices, the edge between first and second
vertex is the longest edge to be split in the next iteration.
"""
# If not gen append
try:
self.H[gen]
except IndexError:
self.H.append([])
# gen, hgr, p_hgr_h,
# gen, C_i.hg_n, C_i.p_hgr_h
# Find new vertex.
# V_new_x = tuple((numpy.array(C()[0].x) + numpy.array(C()[1].x)) / 2.0)
V_new = self.V[
tuple((numpy.array(S()[0].x) + numpy.array(S()[-1].x)) / 2.0)]
# Disconnect old longest edge
self.V[S()[0].x].disconnect(self.V[S()[-1].x])
# Connect new vertices to all other vertices
for v in S()[:]:
v.connect(self.V[V_new.x])
# New "lower" simplex
S_new_l = Simplex(gen, S.hg_n, S.p_hgr_h, self.generation_cycle,
self.dim)
S_new_l.add_vertex(S()[0])
S_new_l.add_vertex(V_new) # Add new vertex
for v in S()[1:-1]: # Add all other vertices
S_new_l.add_vertex(v)
# New "upper" simplex
S_new_u = Simplex(gen, S.hg_n, S.p_hgr_h, S.generation_cycle, self.dim)
S_new_u.add_vertex(
S()[S_new_u.generation_cycle + 1]) # First vertex on new long edge
for v in S()[1:-1]: # Remaining vertices
S_new_u.add_vertex(v)
for k, v in enumerate(S()[1:-1]): # iterate through inner vertices
# for easier k / gci tracking
k += 1
# if k == 0:
# continue # We do this rather than S[1:-1]
# for easier k / gci tracking
if k == (S.generation_cycle + 1):
S_new_u.add_vertex(V_new)
else:
S_new_u.add_vertex(v)
S_new_u.add_vertex(S()[-1]) # Second vertex on new long edge
# for i, v in enumerate(S_new_u()):
# print(f'S_new_u()[{i}].x = {v.x}')
self.H[gen].append(S_new_l)
if 1:
self.H[gen].append(S_new_u)
return
@lru_cache(maxsize=None)
def generate_sub_cell_2(self, origin, suprenum, v_x_t): # No hits
"""
Use the origin and suprenum vectors to find a new cell in that
subspace direction
NOTE: NOT CURRENTLY IN USE!
Parameters
----------
origin : tuple vector (hashable)
suprenum : tuple vector (hashable)
Returns
-------
"""
t1 = self.generate_sub_cell_t1(origin, v_x_t)
t2 = self.generate_sub_cell_t2(suprenum, v_x_t)
vec = t1 + t2
return tuple(vec)
@lru_cache(maxsize=None)
def generate_sub_cell_t1(self, origin, v_x):
# TODO: Calc these arrays outside
v_o = numpy.array(origin)
return v_o - v_o * numpy.array(v_x)
@lru_cache(maxsize=None)
def generate_sub_cell_t2(self, suprenum, v_x):
v_s = numpy.array(suprenum)
return v_s * numpy.array(v_x)
# Plots
def plot_complex(self):
"""
Here C is the LIST of simplexes S in the
2 or 3 dimensional complex
To plot a single simplex S in a set C, use ex. [C[0]]
"""
from matplotlib import pyplot
if self.dim == 2:
pyplot.figure()
for C in self.H:
for c in C:
for v in c():
if self.bounds is None:
x_a = numpy.array(v.x, dtype=float)
else:
x_a = numpy.array(v.x, dtype=float)
for i in range(len(self.bounds)):
x_a[i] = (x_a[i] * (self.bounds[i][1]
- self.bounds[i][0])
+ self.bounds[i][0])
# logging.info('v.x_a = {}'.format(x_a))
pyplot.plot([x_a[0]], [x_a[1]], 'o')
xlines = []
ylines = []
for vn in v.nn:
if self.bounds is None:
xn_a = numpy.array(vn.x, dtype=float)
else:
xn_a = numpy.array(vn.x, dtype=float)
for i in range(len(self.bounds)):
xn_a[i] = (xn_a[i] * (self.bounds[i][1]
- self.bounds[i][0])
+ self.bounds[i][0])
# logging.info('vn.x = {}'.format(vn.x))
xlines.append(xn_a[0])
ylines.append(xn_a[1])
xlines.append(x_a[0])
ylines.append(x_a[1])
pyplot.plot(xlines, ylines)
if self.bounds is None:
pyplot.ylim([-1e-2, 1 + 1e-2])
pyplot.xlim([-1e-2, 1 + 1e-2])
else:
pyplot.ylim(
[self.bounds[1][0] - 1e-2, self.bounds[1][1] + 1e-2])
pyplot.xlim(
[self.bounds[0][0] - 1e-2, self.bounds[0][1] + 1e-2])
pyplot.show()
elif self.dim == 3:
from mpl_toolkits.mplot3d import Axes3D
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
for C in self.H:
for c in C:
for v in c():
x = []
y = []
z = []
# logging.info('v.x = {}'.format(v.x))
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
for vn in v.nn:
x.append(vn.x[0])
y.append(vn.x[1])
z.append(vn.x[2])
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
# logging.info('vn.x = {}'.format(vn.x))
ax.plot(x, y, z, label='simplex')
pyplot.show()
else:
print("dimension higher than 3 or wrong complex format")
return
class Cell:
"""
Contains a cell that is symmetric to the initial hypercube triangulation
"""
def __init__(self, p_gen, p_hgr, p_hgr_h, origin, suprenum):
self.p_gen = p_gen # parent generation
self.p_hgr = p_hgr # parent homology group rank
self.p_hgr_h = p_hgr_h #
self.hg_n = None
self.hg_d = None
# Maybe add parent homology group rank total history
# This is the sum off all previously split cells
# cumulatively throughout its entire history
self.C = []
self.origin = origin
self.suprenum = suprenum
self.centroid = None # (Not always used)
# TODO: self.bounds
def __call__(self):
return self.C
def add_vertex(self, V):
if V not in self.C:
self.C.append(V)
def homology_group_rank(self):
"""
Returns the homology group order of the current cell
"""
if self.hg_n is not None:
return self.hg_n
else:
hg_n = 0
for v in self.C:
if v.minimiser():
hg_n += 1
self.hg_n = hg_n
return hg_n
def homology_group_differential(self):
"""
Returns the difference between the current homology group of the
cell and it's parent group
"""
if self.hg_d is not None:
return self.hg_d
else:
self.hgd = self.hg_n - self.p_hgr
return self.hgd
def polytopial_sperner_lemma(self):
"""
Returns the number of stationary points theoretically contained in the
cell based information currently known about the cell
"""
pass
def print_out(self):
"""
Print the current cell to console
"""
for v in self():
print("Vertex: {}".format(v.x))
constr = 'Connections: '
for vc in v.nn:
constr += '{} '.format(vc.x)
print(constr)
print('Order = {}'.format(v.order))
class Simplex:
"""
Contains a simplex that is symmetric to the initial symmetry constrained
hypersimplex triangulation
"""
def __init__(self, p_gen, p_hgr, p_hgr_h, generation_cycle, dim):
self.p_gen = p_gen # parent generation
self.p_hgr = p_hgr # parent homology group rank
self.p_hgr_h = p_hgr_h #
self.hg_n = None
self.hg_d = None
gci_n = (generation_cycle + 1) % (dim - 1)
gci = gci_n
self.generation_cycle = gci
# Maybe add parent homology group rank total history
# This is the sum off all previously split cells
# cumulatively throughout its entire history
self.C = []
def __call__(self):
return self.C
def add_vertex(self, V):
if V not in self.C:
self.C.append(V)
def homology_group_rank(self):
"""
Returns the homology group order of the current cell
"""
if self.hg_n is not None:
return self.hg_n
else:
hg_n = 0
for v in self.C:
if v.minimiser():
hg_n += 1
self.hg_n = hg_n
return hg_n
def homology_group_differential(self):
"""
Returns the difference between the current homology group of the
cell and it's parent group
"""
if self.hg_d is not None:
return self.hg_d
else:
self.hgd = self.hg_n - self.p_hgr
return self.hgd
def polytopial_sperner_lemma(self):
"""
Returns the number of stationary points theoretically contained in the
cell based information currently known about the cell
"""
pass
def print_out(self):
"""
Print the current cell to console
"""
for v in self():
print("Vertex: {}".format(v.x))
constr = 'Connections: | |
"""Define the Dataset and FileDataset classes.
The Dataset class represents the DICOM Dataset while the FileDataset class
adds extra functionality to Dataset when data is read from or written to file.
Overview of DICOM object model
------------------------------
Dataset (dict subclass)
Contains DataElement instances, each of which has a tag, VR, VM and value.
The DataElement value can be:
* A single value, such as a number, string, etc. (i.e. VM = 1)
* A list of numbers, strings, etc. (i.e. VM > 1)
* A Sequence (list subclass), where each item is a Dataset which
contains its own DataElements, and so on in a recursive manner.
"""
#
# Copyright (c) 2008-2013 <NAME>
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at https://github.com/darcymason/pydicom
#
import inspect # for __dir__
import io
import os.path
import sys
from pydicom import compat
from pydicom.charset import default_encoding, convert_encodings
from pydicom.datadict import dictionaryVR
from pydicom.datadict import tag_for_name, all_names_for_tag
from pydicom.tag import Tag, BaseTag
from pydicom.dataelem import DataElement, DataElement_from_raw, RawDataElement
from pydicom.uid import NotCompressedPixelTransferSyntaxes, UncompressedPixelTransferSyntaxes
from pydicom.tagtools import tag_in_exception
import pydicom # for write_file
import pydicom.charset
from pydicom.config import logger
import pydicom.encaps
sys_is_little_endian = (sys.byteorder == 'little')
have_numpy = True
try:
import numpy
except ImportError:
have_numpy = False
have_gdcm = True
try:
import gdcm
except ImportError:
have_gdcm = False
stat_available = True
try:
from os import stat
except ImportError:
stat_available = False
have_jpeg_ls = True
try:
import jpeg_ls
except ImportError:
have_jpeg_ls = False
have_pillow = True
try:
from PIL import Image as PILImg
except ImportError:
have_pillow = False
# If that failed, try the alternate import syntax for PIL.
try:
import Image as PILImg
except ImportError:
# Neither worked, so it's likely not installed.
have_pillow = False
class PropertyError(Exception):
"""For AttributeErrors caught in a property, so do not go to __getattr__"""
# http://docs.python.org/release/3.1.3/tutorial/errors.html#tut-userexceptions
pass
class Dataset(dict):
"""A collection (dictionary) of DICOM DataElements.
Examples
--------
Add DataElements to the Dataset (for elements in the DICOM dictionary).
>>> ds = Dataset()
>>> ds.PatientName = "CITIZEN^Joan"
>>> ds.add_new(0x00100020, 'LO', '12345')
>>> ds[0x0010, 0x0030] = DataElement(0x00100030, 'DA', '20010101')
Add Sequence DataElement to the Dataset
>>> ds.BeamSequence = [Dataset(), Dataset(), Dataset()]
>>> ds.BeamSequence[0].Manufacturer = "Linac, co."
>>> ds.BeamSequence[1].Manufacturer = "Linac and Sons, co."
>>> ds.BeamSequence[2].Manufacturer = "Linac and Daughters, co."
Add private DataElements to the Dataset
>>> ds.add(DataElement(0x0043102b, 'SS', [4, 4, 0, 0]))
>>> ds.add_new(0x0043102b, 'SS', [4, 4, 0, 0])
>>> ds[0x0043, 0x102b] = DataElement(0x0043102b, 'SS', [4, 4, 0, 0])
Updating and retrieving DataElement values
>>> ds.PatientName = "CITIZEN^Joan"
>>> ds.PatientName
'CITIZEN^Joan"
>>> ds.PatientName = "CITIZEN^John"
>>> ds.PatientName
'CITIZEN^John'
Retrieving a DataElement's value from a Sequence
>>> ds.BeamSequence[0].Manufacturer
'Linac, co.'
>>> ds.BeamSequence[1].Manufacturer
'Linac and Sons, co.'
Retrieving DataElements
>>> elem = ds[0x00100010]
>>> elem = ds.data_element('PatientName')
>>> elem
(0010, 0010) Patient's Name PN: 'CITIZEN^Joan'
Deleting a DataElement from the Dataset
>>> del ds.PatientID
>>> del ds.BeamSequence[1].Manufacturer
>>> del ds.BeamSequence[2]
Deleting a private DataElement from the Dataset
>>> del ds[0x0043, 0x102b]
Determining if a DataElement is present in the Dataset
>>> 'PatientName' in ds
True
>>> 'PatientID' in ds
False
>>> 0x00100030 in ds
True
>>> 'Manufacturer' in ds.BeamSequence[0]
True
Iterating through the top level of a Dataset only (excluding Sequences)
>>> for elem in ds:
>>> print(elem)
Iterating through the entire Dataset (including Sequences)
>>> for elem in ds.iterall():
>>> print(elem)
Recursively iterate through a Dataset (including Sequences)
>>> def recurse(ds):
>>> for elem in ds:
>>> if elem.VR == 'SQ':
>>> [recurse(item) for item in elem]
>>> else:
>>> # Do something useful with each DataElement
Attributes
----------
default_element_format : str
The default formatting for string display.
default_sequence_element_format : str
The default formatting for string display of sequences.
indent_chars : str
For string display, the characters used to indent nested Sequences.
Default is " ".
"""
indent_chars = " "
# Python 2: Classes which define __eq__ should flag themselves as unhashable
__hash__ = None
def __init__(self, *args, **kwargs):
"""Create a new Dataset instance."""
self._parent_encoding = kwargs.get('parent_encoding', default_encoding)
dict.__init__(self, *args)
def __enter__(self):
"""Method invoked on entry to a with statement."""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Method invoked on exit from a with statement."""
return False
def add(self, data_element):
"""Add a DataElement to the Dataset.
Equivalent to ds[data_element.tag] = data_element
Parameters
----------
data_element : pydicom.dataelem.DataElement
The DataElement to add to the Dataset.
"""
self[data_element.tag] = data_element
def add_new(self, tag, VR, value):
"""Add a DataElement to the Dataset.
Parameters
----------
tag
The DICOM (group, element) tag in any form accepted by
pydicom.tag.Tag such as [0x0010, 0x0010], (0x10, 0x10), 0x00100010,
etc.
VR : str
The 2 character DICOM value representation (see DICOM standard part
5, Section 6.2).
value
The value of the data element. One of the following:
* a single string or number
* a list or tuple with all strings or all numbers
* a multi-value string with backslash separator
* for a sequence DataElement, an empty list or list of Dataset
"""
data_element = DataElement(tag, VR, value)
# use data_element.tag since DataElement verified it
self[data_element.tag] = data_element
def data_element(self, name):
"""Return the DataElement corresponding to the element keyword `name`.
Parameters
----------
name : str
A DICOM element keyword.
Returns
-------
pydicom.dataelem.DataElement or None
For the given DICOM element `keyword`, return the corresponding
Dataset DataElement if present, None otherwise.
"""
tag = tag_for_name(name)
if tag:
return self[tag]
return None
def __contains__(self, name):
"""Extend dict.__contains__() to handle DICOM keywords.
This is called for code like:
>>> 'SliceLocation' in ds
True
Parameters
----------
name : str or int or 2-tuple
The Element keyword or tag to search for.
Returns
-------
bool
True if the DataElement is in the Dataset, False otherwise.
"""
if isinstance(name, (str, compat.text_type)):
tag = tag_for_name(name)
else:
try:
tag = Tag(name)
except:
return False
if tag:
return dict.__contains__(self, tag)
else:
return dict.__contains__(self, name) # will no doubt raise an exception
def decode(self):
"""Apply character set decoding to all DataElements in the Dataset.
See DICOM PS3.5-2008 6.1.1.
"""
# Find specific character set. 'ISO_IR 6' is default
# May be multi-valued, but let pydicom.charset handle all logic on that
dicom_character_set = self._character_set
# Shortcut to the decode function in pydicom.charset
decode_data_element = pydicom.charset.decode
# Callback for walk(), to decode the chr strings if necessary
# This simply calls the pydicom.charset.decode function
def decode_callback(ds, data_element):
"""Callback to decode `data_element`."""
if data_element.VR == 'SQ':
for dset in data_element.value:
dset.decode()
else:
decode_data_element(data_element, dicom_character_set)
self.walk(decode_callback, recursive=False)
def __delattr__(self, name):
"""Intercept requests to delete an attribute by `name`.
If `name` is a DICOM keyword:
Delete the corresponding DataElement from the Dataset.
>>> del ds.PatientName
Else:
Delete the class attribute as any other class would do.
>>> del ds._is_some_attribute
Parameters
----------
name : str
The keyword for the DICOM element or the class attribute to delete.
"""
# First check if a valid DICOM keyword and if we have that data element
tag = tag_for_name(name)
if tag is not None and tag in self:
dict.__delitem__(self, tag) # direct to dict as we know we have key
# If not a DICOM name in this dataset, check for regular instance name
# can't do delete directly, that will call __delattr__ again
elif name in self.__dict__:
del self.__dict__[name]
# Not found, raise an error in same style as python does
else:
raise AttributeError(name)
def __delitem__(self, key):
"""Intercept requests to delete an attribute by key.
>>> del ds[0x00100010]
Parameters
----------
key
The key for the attribute to be deleted.
"""
# Assume is a standard tag (for speed in common case)
try:
dict.__delitem__(self, key)
# If not a standard tag, than convert to Tag and try again
except KeyError:
tag = Tag(key)
dict.__delitem__(self, tag)
def __dir__(self):
"""Give a list of attributes available in the Dataset.
List of attributes is used, for example, in auto-completion in editors
or command-line environments.
"""
# Force zip object into a list in case of python3. Also backwards
# compatible
meths = set(list(zip(
*inspect.getmembers(Dataset, inspect.isroutine)))[0])
props = set(list(zip(
*inspect.getmembers(Dataset, inspect.isdatadescriptor)))[0])
dicom_names = set(self.dir())
alldir = sorted(props | meths | dicom_names)
return alldir
| |
<reponame>domdfcoding/domdf_python_tools<filename>tests/test_stringlist.py
# stdlib
import pickle
from textwrap import dedent
# 3rd party
import pytest
# this package
from domdf_python_tools.stringlist import DelimitedList, Indent, StringList, joinlines, splitlines
class TestStringList:
def test_creation(self):
assert not StringList()
assert not StringList([])
assert not StringList(())
assert StringList([1]) == ['1']
assert StringList(['1']) == ['1']
assert StringList('1') == ['1']
assert StringList("1\n") == ['1', '']
with pytest.raises(TypeError, match="'int' object is not iterable"):
StringList(1) # type: ignore
def test_append(self):
sl = StringList()
sl.append('')
assert sl == ['']
sl.append('')
assert sl == ['', '']
sl.append("hello")
assert sl == ['', '', "hello"]
sl.append("world\n\n\n")
assert sl == ['', '', "hello", "world", '', '', '']
sl.append("1234")
assert sl == ['', '', "hello", "world", '', '', '', "1234"]
def test_insert(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
sl.insert(0, "foo")
assert sl == ["foo", '', '', "hello", "world", '', '', '', "1234"]
sl.insert(1, "bar")
assert sl == ["foo", "bar", '', '', "hello", "world", '', '', '', "1234"]
sl.insert(0, "1234")
assert sl == ["1234", "foo", "bar", '', '', "hello", "world", '', '', '', "1234"]
sl.insert(11, "baz")
assert sl == ["1234", "foo", "bar", '', '', "hello", "world", '', '', '', "1234", "baz"]
sl.insert(3, "\na line\n")
assert sl == ["1234", "foo", "bar", '', "a line", '', '', '', "hello", "world", '', '', '', "1234", "baz"]
sl.insert(100, "end")
assert sl == [
"1234", "foo", "bar", '', "a line", '', '', '', "hello", "world", '', '', '', "1234", "baz", "end"
]
def test_setitem(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
sl[0] = "foo"
assert sl == ["foo", '', "hello", "world", '', '', '', "1234"]
sl[1] = "bar"
assert sl == ["foo", "bar", "hello", "world", '', '', '', "1234"]
sl[2] = "\nhello\nworld\n"
assert sl == ["foo", "bar", '', "hello", "world", '', "world", '', '', '', "1234"]
sl[3:4] = "\nfoo\nbar\n", "baz"
assert sl == ["foo", "bar", '', '', "foo", "bar", '', "baz", '', "world", '', '', '', "1234"]
def test_blankline(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
sl.blankline()
assert sl == ['', '', "hello", "world", '', '', '', "1234", '']
sl.blankline()
assert sl == ['', '', "hello", "world", '', '', '', "1234", '', '']
sl.blankline(ensure_single=True)
assert sl == ['', '', "hello", "world", '', '', '', "1234", '']
sl.blankline(ensure_single=True)
assert sl == ['', '', "hello", "world", '', '', '', "1234", '']
sl.append('\t')
sl.blankline(ensure_single=True)
assert sl == ['', '', "hello", "world", '', '', '', "1234", '']
sl.append(" ")
sl.blankline(ensure_single=True)
assert sl == ['', '', "hello", "world", '', '', '', "1234", '']
sl.append(" ")
sl.blankline(ensure_single=True)
sl.blankline()
assert sl == ['', '', "hello", "world", '', '', '', "1234", '', '']
def test_slicing(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
assert sl[:-3] == ['', '', "hello", "world", '']
assert sl[-3:] == ['', '', "1234"]
def test_start_of_line_indents(self):
assert StringList("Hello\n World") == ["Hello", " World"]
assert StringList("Hello\n World", convert_indents=True) == ["Hello", "\tWorld"]
def test_negative_getitem(self):
sl = StringList(['', '', "hello", "world", '', '', "abc", "1234"])
assert sl[-1] == "1234"
sl[-1] += "5678"
assert sl == ['', '', "hello", "world", '', '', "abc", "12345678"]
assert sl[-2] == "abc"
sl[-2] += "def"
assert sl == ['', '', "hello", "world", '', '', "abcdef", "12345678"]
def test_indent_size(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
assert sl.indent_size == 0
sl.indent_size = 7
assert sl.indent_size == 7
sl.set_indent_size()
assert sl.indent_size == 0
sl.set_indent_size(2)
assert sl.indent_size == 2
sl.indent_size += 1
assert sl.indent_size == 3
sl.indent_size -= 2
assert sl.indent_size == 1
def test_indent_type(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
assert sl.indent_type == '\t'
with pytest.raises(ValueError, match="'type' cannot an empty string."):
sl.indent_type = ''
assert sl.indent_type == '\t'
sl.indent_type = ' '
assert sl.indent_type == ' '
sl.set_indent_type('\t')
assert sl.indent_type == '\t'
sl.set_indent_type(' ')
assert sl.indent_type == ' '
with pytest.raises(ValueError, match="'type' cannot an empty string."):
sl.set_indent_type('')
assert sl.indent_type == ' '
sl.set_indent_type()
assert sl.indent_type == '\t'
def test_indent(self):
sl = StringList()
sl.set_indent_size(1)
sl.append("Indented")
assert sl == ["\tIndented"]
sl.set_indent_type(" ")
sl.append("Indented")
assert sl == ["\tIndented", " Indented"]
expected_list = [
"class Foo:",
'',
"\tdef bar(self, listicle: List[Item]):",
"\t\t...",
'',
"\tdef __repr__(self) -> str:",
'\t\treturn "Foo()"',
'',
]
expected_string = dedent(
"""\
class Foo:
def bar(self, listicle: List[Item]):
...
def __repr__(self) -> str:
return "Foo()"
"""
)
sl = StringList()
sl.append("class Foo:")
sl.blankline(True)
sl.set_indent_size(1)
sl.append("def bar(self, listicle: List[Item]):")
sl.indent_size += 1
sl.append("...")
sl.indent_size -= 1
sl.blankline(True)
sl.append("def __repr__(self) -> str:")
sl.indent_size += 1
sl.append('return "Foo()"')
sl.indent_size -= 1
sl.blankline(True)
sl.set_indent_size(0)
assert sl == expected_list
assert str(sl) == expected_string
assert sl == expected_string
sl = StringList()
sl.append("class Foo:")
sl.blankline(True)
with sl.with_indent('\t', 1):
sl.append("def bar(self, listicle: List[Item]):")
with sl.with_indent('\t', 2):
sl.append("...")
sl.blankline(True)
sl.append("def __repr__(self) -> str:")
with sl.with_indent('\t', 2):
sl.append('return "Foo()"')
sl.blankline(True)
assert sl.indent_size == 0
assert sl == expected_list
assert str(sl) == expected_string
assert sl == expected_string
sl = StringList()
sl.append("class Foo:")
sl.blankline(True)
with sl.with_indent_size(1):
sl.append("def bar(self, listicle: List[Item]):")
with sl.with_indent_size(2):
sl.append("...")
sl.blankline(True)
sl.append("def __repr__(self) -> str:")
with sl.with_indent_size(2):
sl.append('return "Foo()"')
sl.blankline(True)
assert sl.indent_size == 0
assert sl == expected_list
assert str(sl) == expected_string
assert sl == expected_string
sl = StringList()
sl.append("class Foo:")
sl.set_indent(Indent(0, " "))
sl.blankline(True)
with sl.with_indent_size(1):
sl.append("def bar(self, listicle: List[Item]):")
with sl.with_indent_size(2):
sl.append("...")
sl.blankline(True)
sl.append("def __repr__(self) -> str:")
with sl.with_indent_size(2):
sl.append('return "Foo()"')
sl.blankline(True)
assert sl.indent_size == 0
assert sl == [x.expandtabs(4) for x in expected_list]
assert str(sl) == expected_string.expandtabs(4)
assert sl == expected_string.expandtabs(4)
sl = StringList()
sl.append("class Foo:")
sl.set_indent(" ", 0)
sl.blankline(True)
with sl.with_indent_size(1):
sl.append("def bar(self, listicle: List[Item]):")
with sl.with_indent_size(2):
sl.append("...")
sl.blankline(True)
sl.append("def __repr__(self) -> str:")
with sl.with_indent_size(2):
sl.append('return "Foo()"')
sl.blankline(True)
assert sl.indent_size == 0
assert sl == [x.expandtabs(4) for x in expected_list]
assert str(sl) == expected_string.expandtabs(4)
assert sl == expected_string.expandtabs(4)
sl = StringList()
sl.append("class Foo:")
sl.blankline(True)
with sl.with_indent_size(1):
sl.append("def bar(self, listicle: List[Item]):")
with sl.with_indent_size(2):
sl.append("...")
sl.blankline(True)
sl.append("def __repr__(self) -> str:")
with sl.with_indent_size(2):
with sl.with_indent_type(" "):
sl.append('return "Foo()"')
sl.blankline(True)
assert sl.indent_size == 0
expected_list[-2] = ' return "Foo()"'
assert sl == expected_list
assert str(sl) == expected_string.replace('\t\treturn "Foo()"', ' return "Foo()"')
assert sl == expected_string.replace('\t\treturn "Foo()"', ' return "Foo()"')
def test_convert_indents(self):
sl = StringList(convert_indents=True)
sl.append(" Indented")
assert sl == ["\tIndented"]
def test_set_indent_error(self):
sl = StringList()
with pytest.raises(TypeError, match="'size' argument cannot be used when providing an 'Indent' object."):
sl.set_indent(Indent(0, " "), 5)
def test_extend(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
sl.extend(["\nfoo\nbar\n baz"])
assert sl == ['', '', "hello", "world", '', '', '', "1234", '', "foo", "bar", " baz"]
def test_clear(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
sl.clear()
assert sl == []
def test_copy(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
sl2 = sl.copy()
assert sl == sl2
assert sl2 == ['', '', "hello", "world", '', '', '', "1234"]
assert isinstance(sl2, StringList)
def test_count(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
assert sl.count("hello") == 1
def test_count_blanklines(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
assert sl.count_blanklines() == 5
def test_index(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
assert sl.index("hello") == 2
def test_pop(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
assert sl.pop(2) == "hello"
assert sl == ['', '', "world", '', '', '', "1234"]
assert isinstance(sl, StringList)
def test_remove(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
sl.remove("hello")
assert sl == ['', '', "world", '', '', '', "1234"]
assert isinstance(sl, StringList)
def test_reverse(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
sl.reverse()
assert sl == ["1234", '', '', '', "world", "hello", '', '']
assert isinstance(sl, StringList)
def test_sort(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
sl.sort()
assert sl == ['', '', '', '', '', "1234", "hello", "world"]
assert isinstance(sl, StringList)
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
sl.sort(reverse=True)
assert sl == ["world", "hello", "1234", '', '', '', '', '']
assert isinstance(sl, StringList)
def test_str(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
assert str(sl) == "\n\nhello\nworld\n\n\n\n1234"
sl = StringList(['', '', "hello", "world", '', '', '', "1234", ''])
assert str(sl) == "\n\nhello\nworld\n\n\n\n1234\n"
def test_bytes(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
assert bytes(sl) == b"\n\nhello\nworld\n\n\n\n1234"
sl = StringList(['', '', "hello", "world", '', '', '', "1234", ''])
assert bytes(sl) == b"\n\nhello\nworld\n\n\n\n1234\n"
@pytest.mark.xfail()
def test_pickle(self):
sl = StringList(['', '', "hello", "world", '', '', '', "1234"])
loaded = pickle.loads(pickle.dumps(sl)) # nosec: B301
assert sl == loaded
assert sl.indent == loaded.indent
assert isinstance(loaded, StringList)
class TestIndent:
def test_creation(self):
indent = Indent()
assert indent.size == 0
assert indent.type == '\t'
indent = Indent(3, " ")
assert indent.size == 3
assert indent.type == " "
def test_iter(self):
indent = Indent(3, " ")
assert tuple(indent) == (3, " ")
assert list(iter(indent)) == [3, " "]
def test_size(self):
indent = Indent()
indent.size = 1
assert indent.size == 1
indent.size = '2' # type: ignore
assert indent.size == 2
indent.size = 3.0 # type: ignore
assert indent.size == 3
def test_type(self):
indent = Indent()
indent.type = " "
assert indent.type == " "
indent.type = ' '
assert indent.type == ' '
indent.type = 1 # type: ignore
assert indent.type == '1'
indent.type = ">>> "
assert indent.type == ">>> "
with pytest.raises(ValueError, match="'type' cannot an empty string."):
indent.type = ''
def test_str(self):
assert str(Indent()) == ''
assert str(Indent(1)) == '\t'
assert str(Indent(5)) == "\t\t\t\t\t"
assert str(Indent(type=" ")) == ''
assert str(Indent(1, type=" ")) == " "
assert str(Indent(5, type=" ")) == " " * 5
assert str(Indent(type=">>> ")) == ''
assert str(Indent(1, type=">>> ")) == ">>> "
def test_repr(self):
assert repr(Indent()) == "Indent(size=0, type='\\t')"
assert repr(Indent(1)) == "Indent(size=1, type='\\t')"
assert repr(Indent(5)) == "Indent(size=5, type='\\t')"
assert repr(Indent(type=" ")) == "Indent(size=0, type=' ')"
assert repr(Indent(1, type=" ")) == "Indent(size=1, type=' ')"
assert repr(Indent(5, type=" ")) == "Indent(size=5, type=' ')"
assert repr(Indent(type=">>> ")) == "Indent(size=0, type='>>> ')"
assert repr(Indent(1, type=">>> ")) == "Indent(size=1, type='>>> ')"
def test_eq(self):
assert Indent() == Indent()
assert Indent() == (0, '\t')
assert Indent() == ''
assert Indent(1, " ") == Indent(1, " ")
assert Indent(1, " ") == (1, " ")
assert Indent(1, " ") == " "
assert Indent(2, '\t') == Indent(2, '\t')
assert Indent(2, '\t') == (2, '\t')
assert Indent(2, '\t') == "\t\t"
assert Indent() != 1
def test_pickle(self):
indent = Indent(2, " ")
assert indent == pickle.loads(pickle.dumps(indent)) # nosec: B301
def test_delimitedlist():
data = DelimitedList(['a', 'b', | |
<gh_stars>0
import dropbox
import hashlib
import math
import os
import pdbox
import shutil
from pdbox.utils import DropboxError, dbx_uri, execute, normpath
def get_remote(path, meta=None):
"""
Get a RemoteFile or RemoteFolder from path.
Raises:
- ValueError
"""
if meta: # Don't look up the path, just use what's provided.
if isinstance(meta, dropbox.files.FileMetadata):
return RemoteFile(None, meta=meta)
if isinstance(meta, dropbox.files.FolderMetadata):
return RemoteFolder(None, meta=meta)
path = normpath(path)
if path == "/": # get_metadata on the root is not supported.
return RemoteFolder(path)
try:
meta = execute(pdbox.dbx.files_get_metadata, path)
except DropboxError:
raise ValueError("%s could not be found" % dbx_uri(path))
if isinstance(meta, dropbox.files.DeletedMetadata):
pdbox.debug("%s was recently deleted" % dbx_uri(path))
raise ValueError("%s could not be found" % dbx_uri(path))
if isinstance(meta, dropbox.files.FolderMetadata):
return RemoteFolder(None, meta=meta)
else:
# This doesn't account for types other than FileMetadata but I don't
# think that they can be returned here.
return RemoteFile(None, meta=meta)
def get_local(path):
"""
Get a LocalFile or LocalFolder from path.
Raises: ValueError
"""
path = os.path.abspath(path)
if os.path.isfile(path):
return LocalFile(path)
if os.path.isdir(path):
return LocalFolder(path)
raise ValueError("%s does not exist" % path)
def remote_assert_empty(path):
"""
Assert that nothing exists at path in Dropbox.
Raises: ValueError
"""
path = normpath(path)
try:
remote = get_remote(path)
except ValueError: # Nothing exists at path, nothing to worry about.
return
raise ValueError("Something exists at %s" % remote.uri)
def local_assert_empty(path):
"""
Assert that nothing exists at path locally.
Raises: ValueError
"""
try:
local = get_local(path)
except ValueError:
return
raise ValueError("Something exists at %s" % local.path)
class RemoteObject(object):
"""A file or folder inside Dropbox."""
def delete(self):
"""
Delete a file or folder inside Dropbox.
Raises: DropboxError
"""
if not pdbox._args.get("dryrun"):
result = execute(pdbox.dbx.files_delete_v2, self.path)
pdbox.debug("Metadata response: %s" % result.metadata)
pdbox.info("Deleted %s" % self.uri)
def copy(self, dest, overwrite=False):
"""
Copy a file or folder to dest inside Dropbox.
Raises:
- ValueError
- DropboxError
"""
dest = normpath(dest)
try:
remote = get_remote(dest)
except ValueError: # Nothing exists at dest, nothing to worry about.
remote = None
else: # Something exists here.
if not overwrite:
raise ValueError("Something exists at %s" % remote.uri)
try:
if self.hash == remote.hash: # Nothing to update.
pdbox.info(
"%s and %s are identical" % (self.uri, remote.uri),
)
return
except AttributeError: # RemoteFolder doesn't have a hash.
pass
if not pdbox._args.get("dryrun"):
if overwrite and remote:
# There's no way to copy and overwrite at the same time,
# so delete the existing file first.
remote.delete()
result = execute(pdbox.dbx.files_copy_v2, self.path, dest)
pdbox.debug("Metadata respones: %s" % result.metadata)
pdbox.info("Copied %s to %s" % (self.uri, dbx_uri(dest)))
if not pdbox._args.get("dryrun"): # Return the newly created object.
return get_remote(None, meta=result.metadata)
def move(self, dest, overwrite=False):
"""
Move a file or folder to dest inside Dropbox.
Note that this is essentially "rename", and will not move the source
into a folder. Instead, it will delete that folder if overwrite is set.
Raises:
- ValueError
- DropboxError
"""
dest = normpath(dest)
try:
remote = get_remote(dest)
except ValueError: # Nothing exists at dest, nothing to worry about.
pass
else: # Something exists here.
if not overwrite:
raise ValueError("Something exists at %s" % remote.uri)
# There's no way to copy and overwrite at the same time,
# so delete the existing file first.
# Note that this can delete folders too.
remote.delete()
if not pdbox._args.get("dryrun"):
result = execute(pdbox.dbx.files_move_v2, self.path, dest)
pdbox.debug("Metadata response: %s" % result.metadata)
pdbox.info("Moved %s to %s" % (self.path, dbx_uri(dest)))
if not pdbox._args.get("dryrun"): # Return the newly created object.
return get_remote(None, meta=result.metadata)
class RemoteFile(RemoteObject):
"""A file in Dropbox."""
def __init__(self, path, meta=None):
"""Raises: ValueError"""
if not meta: # Look for a file at path.
path = normpath(path)
if path == "/": # get_metadata on the root is not supported.
raise ValueError("The root folder is not a file")
try:
meta = execute(pdbox.dbx.files_get_metadata, path)
except DropboxError:
raise ValueError("%s could not be found" % dbx_uri(path))
if isinstance(meta, dropbox.files.FolderMetadata):
raise ValueError("%s is a folder" % dbx_uri(meta.path_display))
if isinstance(meta, dropbox.files.DeletedMetadata):
pdbox.debug("%s was recently deleted" % dbx_uri(path))
raise ValueError("%s could not be found" % dbx_uri(path))
self.id = meta.id # File ID, not sure how this can be used.
self.size = meta.size # Size in bytes.
self.path = meta.path_display # Path, including the name.
self.parent = "/".join(self.path.split("/")[:-1]) # Parent folder.
self.name = meta.name # File name with extension.
self.modified = meta.server_modified # Last modified time.
self.rev = meta.rev # Revision, not sure how this can be used.
self.hash = meta.content_hash # Hash for comparing the contents.
self.uri = dbx_uri(self.path) # Convenience field for display.
def download(self, dest, overwrite=False):
"""
Download this file to dest locally.
Raises:
- ValueError
- DropboxError
- Exception
"""
dest = os.path.abspath(dest)
try:
local = get_local(dest)
except ValueError: # Nothing exists at dest, nothing to worry about.
local = None
else: # Something exists here.
if local.hash() == self.hash: # Nothing to update.
pdbox.info("%s and %s are identical" % (self.uri, local.path))
return
if not overwrite:
raise ValueError("%s already exists" % local.path)
# To avoid any weird overwriting behaviour in the case of errors, we'll
# download to a different location first, then move to dest afterwards.
tmp_dest = os.path.join(
pdbox.TMP_DOWNLOAD_DIR,
os.path.basename(dest),
)
while os.path.exists(tmp_dest): # Make sure the temp name is unique.
tmp_dest += "_"
if pdbox._args.get("dryrun"):
pdbox.info("Downloaded %s to %s" % (self.uri, dest))
return None
# TODO: Progress bars.
meta = execute(pdbox.dbx.files_download_to_file, tmp_dest, self.path)
pdbox.debug("Metadata response: %s" % meta)
if not os.path.isdir(os.path.dirname(dest)):
# Create the parent directories of dest.
os.makedirs(os.path.dirname(dest))
if not pdbox._args.get("dryrun"):
# os.rename overwrites files just fine, but not directories.
if local and isinstance(local, LocalFolder):
shutil.rmtree(local.path)
# Move the file from the temp location to dest.
os.rename(tmp_dest, dest)
pdbox.info("Downloaded %s to %s" % (self.uri, dest))
return LocalFile(dest) # Return the newly created file.
class RemoteFolder(RemoteObject):
"""A folder in Dropbox."""
def __init__(self, path, meta=None):
"""Raises: ValueError"""
if not meta: # Look for a folder at path.
path = normpath(path)
if path == "/":
# get_metadata on the root folder is not supported.
self.id = -1
self.path = "/"
self.parent = "/"
self.name = "/"
self.uri = "dbx://"
return
try:
meta = execute(pdbox.dbx.files_get_metadata, path)
except DropboxError:
raise ValueError("%s could not be found" % dbx_uri(path))
if isinstance(meta, dropbox.files.FileMetadata):
raise ValueError("%s is a file" % dbx_uri(meta.path_display))
if isinstance(meta, dropbox.files.DeletedMetadata):
pdbox.debug("%s was recently deleted" % dbx_uri(path))
raise ValueError("%s does not exist" % dbx_uri(path))
self.id = meta.id # Folder ID, not sure how this can be used.
self.path = meta.path_display # Path to the folder, including name.
self.parent = "/".join(self.path.split("/")[:-1]) # Parent folder.
self.name = meta.name # Base name of the folder.
self.uri = dbx_uri(self.path) # Convenience field for display.
@staticmethod
def create(path, overwrite=False):
"""
Create a new folder in Dropbox.
Raises:
- ValueError
- DropboxError
"""
path = normpath(path)
try:
remote = get_remote(path)
except ValueError: # Nothing exists at path, nothing to worry about.
pass
else:
if isinstance(remote, RemoteFolder):
pdbox.info("%s already exists" % remote.uri)
return remote
elif not overwrite:
raise ValueError("%s already exists" % remote.uri)
if not pdbox._args.get("dryrun"):
result = execute(pdbox.dbx.files_create_folder_v2, path)
pdbox.debug("Metadata response: %s" % result.metadata)
pdbox.info("Created new folder %s" % dbx_uri(path))
if not pdbox._args.get("dryrun"): # Return the newly created folder.
return RemoteFolder(None, meta=result.metadata)
def contents(self):
"""Get this folder's contents in Dropbox."""
# list_folder on "/" isn't supported for some reason.
path = "" if self.path == "/" else self.path
result = execute(pdbox.dbx.files_list_folder, path)
entries = [get_remote(None, meta=e) for e in result.entries]
# TODO: Verify that this works.
while result.has_more:
# As long as there are more pages to look through,
# add their contents to the list of entries.
more = execute(pdbox.dbx.files_list_folder_continue, result.cursor)
entries.extend(get_remote(None, meta=e) for e in more)
return entries
def download(self, dest, overwrite=False):
"""
Download this folder to dest locally.
Raises:
- ValueError
- DropboxError
"""
dest = os.path.abspath(dest)
try:
local = get_local(dest)
except ValueError: # Nothing exists at dest, nothing to worry about.
local = None
else:
if not overwrite:
raise ValueError("%s already exists" % local.path)
# To avoid any weird overwriting behaviour in the case of errors, we'll
# download to a different location first, then move to dest afterwards.
tmp_dest = | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
#
# Author: <NAME>, Finland 2015-2019
#
# This file is part of Kunquat.
#
# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/
#
# To the extent possible under law, Kunquat Affirmers have waived all
# copyright and related or neighboring rights to Kunquat.
#
import math
import time
from kunquat.tracker.ui.qt import *
import kunquat.tracker.ui.model.tstamp as tstamp
from kunquat.tracker.ui.model.gridpattern import STYLE_COUNT
from kunquat.tracker.ui.views.headerline import HeaderLine
from kunquat.tracker.ui.views.kqtcombobox import KqtComboBox
from kunquat.tracker.ui.views.numberslider import NumberSlider
from kunquat.tracker.ui.views.updater import Updater
from kunquat.tracker.ui.views.varprecspinbox import VarPrecSpinBox
from .config import *
from .ruler import Ruler
from . import utils
class GridEditor(QWidget, Updater):
def __init__(self):
super().__init__()
self._grid_list = GridList()
self._general_editor = GeneralEditor()
self._grid_area = GridArea()
self._subdiv_editor = SubdivEditor()
self._line_editor = LineEditor()
self.add_to_updaters(
self._grid_list,
self._general_editor,
self._grid_area,
self._subdiv_editor,
self._line_editor)
self._line_layout = QVBoxLayout()
self._line_layout.setContentsMargins(0, 0, 0, 0)
self._line_layout.setSpacing(8)
self._line_layout.addWidget(self._subdiv_editor)
self._line_layout.addWidget(self._line_editor)
self._details_layout = QHBoxLayout()
self._details_layout.setContentsMargins(0, 0, 0, 0)
self._details_layout.setSpacing(4)
self._details_layout.addWidget(self._grid_area)
self._details_layout.addLayout(self._line_layout)
self._header = HeaderLine('Grid editor')
v = QVBoxLayout()
v.setContentsMargins(0, 0, 0, 0)
v.setSpacing(2)
v.addWidget(self._grid_list, 1)
v.addSpacing(2)
v.addWidget(self._header)
v.addWidget(self._general_editor)
v.addLayout(self._details_layout, 4)
self.setLayout(v)
def _on_setup(self):
self.register_action('signal_style_changed', self._update_style)
def _update_style(self):
style_mgr = self._ui_model.get_style_manager()
self._header.update_style(style_mgr)
self._line_layout.setSpacing(style_mgr.get_scaled_size_param('large_padding'))
self._details_layout.setSpacing(
style_mgr.get_scaled_size_param('medium_padding'))
layout = self.layout()
spacing = style_mgr.get_scaled_size_param('small_padding')
layout.setSpacing(spacing)
for i in range(layout.count()):
spacer = layout.itemAt(i).spacerItem()
if spacer:
spacer.changeSize(2, spacing)
class GridListModel(QAbstractListModel, Updater):
def __init__(self):
super().__init__()
self._items = []
def _on_setup(self):
self._make_items()
def get_item(self, index):
row = index.row()
if 0 <= row < len(self._items):
item = self._items[row]
return item
return None
def _make_items(self):
grid_mgr = self._ui_model.get_grid_manager()
for gp_id in grid_mgr.get_editable_grid_pattern_ids():
gp = grid_mgr.get_grid_pattern(gp_id)
gp_name = gp.get_name()
self._items.append((gp_id, gp_name))
self._items = sorted(self._items, key=lambda x: x[1])
# Qt interface
def rowCount(self, parent):
return len(self._items)
def data(self, index, role):
if role == Qt.DisplayRole:
row = index.row()
if 0 <= row < len(self._items):
item = self._items[row]
_, gp_name = item
return gp_name
return None
def headerData(self, section, orientation, role):
return None
class GridListView(QListView, Updater):
def __init__(self):
super().__init__()
self.setSelectionMode(QAbstractItemView.SingleSelection)
def _on_setup(self):
self.clicked.connect(self._select_grid_pattern)
self.activated.connect(self._select_grid_pattern)
def _select_grid_pattern(self, index):
item = self.model().get_item(index)
if item:
gp_id, _ = item
grid_mgr = self._ui_model.get_grid_manager()
grid_mgr.select_grid_pattern(gp_id)
self._updater.signal_update('signal_grid_pattern_selection')
class GridList(QWidget, Updater):
def __init__(self):
super().__init__()
self._toolbar = GridListToolBar()
self._grid_list_model = None
self._grid_list_view = GridListView()
v = QVBoxLayout()
v.setContentsMargins(0, 0, 0, 0)
v.setSpacing(0)
v.addWidget(self._toolbar)
v.addWidget(self._grid_list_view)
self.setLayout(v)
def _on_setup(self):
self.add_to_updaters(self._toolbar, self._grid_list_view)
self.register_action('signal_grid_pattern_list', self._update_model)
self._update_model()
def _update_model(self):
if self._grid_list_model:
self.remove_from_updaters(self._grid_list_model)
self._grid_list_model = GridListModel()
self.add_to_updaters(self._grid_list_model)
self._grid_list_view.setModel(self._grid_list_model)
class GridListToolBar(QToolBar, Updater):
def __init__(self):
super().__init__()
self._new_button = QToolButton()
self._new_button.setText('New grid')
self._new_button.setEnabled(True)
self._remove_button = QToolButton()
self._remove_button.setText('Remove grid')
self._remove_button.setEnabled(False)
self.addWidget(self._new_button)
self.addWidget(self._remove_button)
def _on_setup(self):
self.register_action('signal_grid_pattern_list', self._update_all)
self.register_action('signal_grid_pattern_selection', self._update_all)
self._new_button.clicked.connect(self._add_grid_pattern)
self._remove_button.clicked.connect(self._remove_grid_pattern)
self._update_all()
def _update_all(self):
grid_mgr = self._ui_model.get_grid_manager()
gp_count = len(grid_mgr.get_editable_grid_pattern_ids())
selected_gp_id = grid_mgr.get_selected_grid_pattern_id()
self._remove_button.setEnabled((gp_count > 0) and (selected_gp_id != None))
def _add_grid_pattern(self):
grid_mgr = self._ui_model.get_grid_manager()
grid_mgr.add_grid_pattern()
self._updater.signal_update('signal_grid_pattern_list')
def _remove_grid_pattern(self):
grid_mgr = self._ui_model.get_grid_manager()
gp_id = grid_mgr.get_selected_grid_pattern_id()
if gp_id == None:
return
grid_mgr.remove_grid_pattern(gp_id)
grid_mgr.select_grid_pattern(None)
self._updater.signal_update(
'signal_grid_pattern_list',
'signal_grid_pattern_modified',
'signal_grid_pattern_selection')
class Corner(QWidget):
def __init__(self):
super().__init__()
self._bg_colour = QColor(0, 0, 0)
self.setAutoFillBackground(False)
self.setAttribute(Qt.WA_OpaquePaintEvent)
self.setAttribute(Qt.WA_NoSystemBackground)
def set_config(self, config):
self._bg_colour = config['bg_colour']
def paintEvent(self, event):
painter = QPainter(self)
painter.setBackground(self._bg_colour)
painter.eraseRect(event.rect())
class GridArea(QAbstractScrollArea, Updater):
def __init__(self):
super().__init__()
self.setFocusPolicy(Qt.NoFocus)
self._config = None
# Widgets
self.setViewport(GridView())
self._corner = Corner()
self._ruler = Ruler(is_grid_ruler=True)
self._header = GridHeader()
# Layout
g = QGridLayout()
g.setSpacing(0)
g.setContentsMargins(0, 0, 0, 0)
g.addWidget(self._corner, 0, 0)
g.addWidget(self._ruler, 1, 0)
g.addWidget(self._header, 0, 1)
self.setLayout(g)
self.viewport().setFocusProxy(None)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.MinimumExpanding)
def _on_setup(self):
self.register_action(
'signal_grid_pattern_selection', self._update_selected_grid_pattern)
self.register_action(
'signal_grid_pattern_modified', self._update_selected_grid_pattern)
self.register_action('signal_grid_zoom', self._update_zoom)
self.register_action('signal_style_changed', self._update_style)
self._update_config()
grid_mgr = self._ui_model.get_grid_manager()
# Default zoom level
px_per_beat = self._config['trs_per_beat'] * self._config['tr_height']
self._zoom_levels = utils.get_zoom_levels(
16, px_per_beat, 512, self._config['zoom_factor'])
self._default_zoom_index = self._zoom_levels.index(px_per_beat)
grid_mgr.set_zoom_range(
-self._default_zoom_index,
len(self._zoom_levels) - self._default_zoom_index - 1)
self._set_px_per_beat(self._zoom_levels[self._default_zoom_index])
self.add_to_updaters(self._ruler, self.viewport())
self.viewport().followCursor.connect(self._follow_cursor)
self._update_selected_grid_pattern()
def _update_style(self):
self._update_config()
self.update()
def _update_config(self):
style_mgr = self._ui_model.get_style_manager()
self._config = DEFAULT_CONFIG.copy()
config = get_config_with_custom_style(style_mgr)
self._config.update(config)
for subcfg in ('ruler', 'header', 'edit_cursor', 'grid'):
self._config[subcfg] = DEFAULT_CONFIG[subcfg].copy()
if subcfg in config:
self._config[subcfg].update(config[subcfg])
self._corner.set_config(self._config)
fm = QFontMetrics(self._config['font'], self)
self._config['font_metrics'] = fm
self._config['tr_height'] = (fm.tightBoundingRect('A').height() +
self._config['trigger']['padding_y'] * 2)
self._header.set_config(self._config)
self._ruler.set_config(self._config['ruler'])
header_height = self._header.minimumSizeHint().height()
ruler_width = self._ruler.sizeHint().width()
self.setViewportMargins(ruler_width, header_height, 0, 0)
self._corner.setFixedSize(ruler_width, header_height)
self._header.setFixedHeight(header_height)
self._ruler.setFixedWidth(ruler_width)
self.viewport().set_config(self._config)
def _set_px_per_beat(self, px_per_beat):
self._ruler.set_px_per_beat(px_per_beat)
self.viewport().set_px_per_beat(px_per_beat)
def _update_zoom(self):
grid_mgr = self._ui_model.get_grid_manager()
zoom_level = grid_mgr.get_zoom()
cur_zoom_index = zoom_level + self._default_zoom_index
self._set_px_per_beat(self._zoom_levels[cur_zoom_index])
def _update_selected_grid_pattern(self):
self._ruler.update_grid_pattern()
def _update_scrollbars(self):
grid_mgr = self._ui_model.get_grid_manager()
gp_id = grid_mgr.get_selected_grid_pattern_id()
if gp_id == None:
self.verticalScrollBar().setRange(0, 0)
return
total_height_px = self.viewport().get_total_height()
vp_height = self.viewport().height()
vscrollbar = self.verticalScrollBar()
vscrollbar.setPageStep(vp_height)
vscrollbar.setRange(0, total_height_px - vp_height)
def _follow_cursor(self, new_y_offset_str):
new_y_offset = int(new_y_offset_str)
vscrollbar = self.verticalScrollBar()
old_y_offset = vscrollbar.value()
self._update_scrollbars()
vscrollbar.setValue(new_y_offset)
self.viewport().update()
def scrollContentsBy(self, dx, dy):
px_offset = self.verticalScrollBar().value()
self._ruler.set_px_offset(px_offset)
self.viewport().set_px_offset(px_offset)
def sizeHint(self):
width = (self._ruler.width() +
self.viewport().width() +
self.verticalScrollBar().width())
return QSize(width, 200)
def paintEvent(self, event):
self.viewport().paintEvent(event)
def resizeEvent(self, event):
self.viewport().resizeEvent(event)
def mousePressEvent(self, event):
self.viewport().mousePressEvent(event)
class GridHeader(QWidget):
def __init__(self):
super().__init__()
self._width = DEFAULT_CONFIG['col_width']
def set_config(self, config):
self._config = config
self._width = config['col_width']
self.update()
def resizeEvent(self, event):
self.update()
def minimumSizeHint(self):
fm = QFontMetrics(self._config['header']['font'], self)
height = fm.tightBoundingRect('Ag').height()
return QSize(self._width, height)
def sizeHint(self):
return self.minimumSizeHint()
def paintEvent(self, event):
painter = QPainter(self)
bg_colour = utils.scale_colour(
self._config['header']['bg_colour'], self._config['inactive_dim'])
painter.setBackground(bg_colour)
painter.eraseRect(0, 0, self.width(), self.height())
class GridView(QWidget, Updater):
followCursor = Signal(str, name='followCursor')
def __init__(self):
super().__init__()
self._config = None
self._width = DEFAULT_CONFIG['col_width']
self._px_offset = 0
self._px_per_beat = None
self.setAutoFillBackground(False)
self.setAttribute(Qt.WA_OpaquePaintEvent)
self.setAttribute(Qt.WA_NoSystemBackground)
self.setFocusPolicy(Qt.StrongFocus)
def _on_setup(self):
self.register_action('signal_grid_pattern_selection', self._follow_edit_cursor)
self.register_action(
'signal_grid_pattern_line_selection', self._follow_edit_cursor)
self.register_action('signal_grid_pattern_modified', self.update)
self.register_action('signal_grid_zoom', self.update)
def set_config(self, config):
self._config = config
fm = self._config['font_metrics']
em_px = int(math.ceil(fm.tightBoundingRect('m').width()))
self._width = self._config['col_width'] * em_px
self.setFixedWidth(self._width)
def set_px_offset(self, new_offset):
if self._px_offset != new_offset:
self._px_offset = new_offset
self.update()
def set_px_per_beat(self, px_per_beat):
if self._px_per_beat != px_per_beat:
orig_px_per_beat = self._px_per_beat
orig_px_offset = self._px_offset
self._px_per_beat = px_per_beat
if not self._ui_model:
return
# Get old edit cursor offset
grid_mgr = self._ui_model.get_grid_manager()
selected_line_ts = tstamp.Tstamp(0)
gp_id = grid_mgr.get_selected_grid_pattern_id()
if gp_id != None:
gp = grid_mgr.get_grid_pattern(gp_id)
selected_line_ts = gp.get_selected_line() or tstamp.Tstamp(0)
orig_relative_offset = utils.get_px_from_tstamp(
selected_line_ts, orig_px_per_beat) - orig_px_offset
# Adjust vertical position so that edit cursor maintains its height
new_cursor_offset = utils.get_px_from_tstamp(selected_line_ts, px_per_beat)
new_px_offset = new_cursor_offset - orig_relative_offset
self.followCursor.emit(str(new_px_offset))
def get_total_height(self):
grid_mgr = self._ui_model.get_grid_manager()
gp_id = grid_mgr.get_selected_grid_pattern_id()
if gp_id == None:
return 0
gp = grid_mgr.get_grid_pattern(gp_id)
gp_length = gp.get_length()
return (utils.get_px_from_tstamp(gp_length, self._px_per_beat) +
self._config['tr_height'])
def _follow_edit_cursor(self):
grid_mgr = self._ui_model.get_grid_manager()
gp_id = grid_mgr.get_selected_grid_pattern_id()
if gp_id == None:
return
gp = grid_mgr.get_grid_pattern(gp_id)
selected_line_ts = gp.get_selected_line() or tstamp.Tstamp(0)
cursor_abs_y = utils.get_px_from_tstamp(selected_line_ts, self._px_per_beat)
cursor_rel_y = cursor_abs_y - self._px_offset
is_scrolling_required = False
min_snap_dist = self._config['edit_cursor']['min_snap_dist']
min_centre_dist = min(min_snap_dist, self.height() // 2)
min_y_offset = min_centre_dist
max_y_offset = self.height() - min_centre_dist
if cursor_rel_y < min_centre_dist:
is_scrolling_required = True
new_px_offset = self._px_offset - (min_y_offset - cursor_rel_y)
elif cursor_rel_y >= max_y_offset:
is_scrolling_required = True
new_px_offset = self._px_offset + (cursor_rel_y - max_y_offset)
if is_scrolling_required:
self.followCursor.emit(str(new_px_offset))
self.update()
def paintEvent(self, event):
start = time.time()
painter = QPainter(self)
# Background
painter.setBackground(self._config['canvas_bg_colour'])
painter.eraseRect(QRect(0, 0, self._width, self.height()))
# Get grid pattern info
grid_mgr = self._ui_model.get_grid_manager()
gp_id = grid_mgr.get_selected_grid_pattern_id()
if gp_id == None:
return
gp = grid_mgr.get_grid_pattern(gp_id)
gp_length = gp.get_length()
gp_lines = gp.get_lines()
selected_line_ts = gp.get_selected_line()
# Column background
painter.setBackground(self._config['bg_colour'])
length_rems = gp_length.beats * tstamp.BEAT + gp_length.rem
height_px = length_rems * self._px_per_beat // tstamp.BEAT
bg_extent = height_px - self._px_offset
painter.eraseRect(QRect(0, 0, self._width, bg_extent))
# Grid lines
selected_line_found = False
for line in gp_lines:
line_ts_raw, line_style = line
line_ts = tstamp.Tstamp(line_ts_raw)
if line_ts >= gp_length:
continue
abs_y = utils.get_px_from_tstamp(line_ts, self._px_per_beat)
y_offset = abs_y - self._px_offset
if not 0 <= y_offset < self.height():
continue
pen = QPen(self._config['grid']['styles'][line_style])
painter.setPen(pen)
painter.drawLine(QPoint(0, y_offset), QPoint(self._width - 1, y_offset))
if line_ts == selected_line_ts:
selected_line_found = True
if selected_line_found:
cursor_config = self._config['grid']['edit_cursor']
cursor_max_y = (cursor_config['height'] - 1) // 2
abs_y = utils.get_px_from_tstamp(selected_line_ts, self._px_per_beat)
y_offset = abs_y - self._px_offset
painter.setRenderHint(QPainter.Antialiasing)
painter.translate(QPointF(0.5, 0.5 + y_offset))
painter.setPen(cursor_config['colour'])
painter.setBrush(cursor_config['colour'])
painter.drawPolygon(QPolygon([
QPoint(0, cursor_max_y),
QPoint(cursor_config['width'], 0),
QPoint(0, -cursor_max_y)]))
end = time.time()
elapsed = end - start
#print('Grid pattern view updated in {:.2f} ms'.format(elapsed * 1000))
def mousePressEvent(self, event):
if not event.buttons() == Qt.LeftButton:
return
# Get grid pattern info
grid_mgr = self._ui_model.get_grid_manager()
gp_id = grid_mgr.get_selected_grid_pattern_id()
if gp_id == None:
return
gp = grid_mgr.get_grid_pattern(gp_id)
gp_length = gp.get_length()
gp_lines = gp.get_lines()
# Get timestamp at clicked position
rel_y_offset = event.y()
y_offset = rel_y_offset + self._px_offset
click_ts = utils.get_tstamp_from_px(y_offset, self._px_per_beat)
# Find the nearest grid line
nearest_ts = None
nearest_dist = gp_length * 2
for line in gp_lines:
line_ts, _ = line
dist = abs(click_ts - line_ts)
if dist < nearest_dist:
nearest_ts = line_ts
nearest_dist = dist
assert nearest_ts != None
gp.select_line(nearest_ts)
self._updater.signal_update('signal_grid_pattern_line_selection')
def keyPressEvent(self, event):
# Get grid pattern info
grid_mgr = self._ui_model.get_grid_manager()
gp_id = grid_mgr.get_selected_grid_pattern_id()
if gp_id == None:
| |
in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
return exclude
def func_7d0669129fb84f4ea8d73c092b70937a(infile):
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
return budget
def func_ee78176ad994479d8f81063d7bc240fa(infile):
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
return cand
def func_e55093cbd2724cdeb209473119f15cc6(infile):
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
return p
def func_ec821cd60abb4227989f79f08aa7a81a(infile):
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
return cases
def func_29b7b6e45d8048db8d9374e3d188b405(infile):
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
return remaining_budget
def func_df5e424df57148b68dc3f8fbb20a362d(infile):
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - | |
= (Building_Info[i,3], Building_Info[i,4]) # in m
Site_Dimensions[j] = (Building_Info[i,5], Building_Info[i,6]) # in m
Type[j] = Building_Info[i,7]
Solar_Roof_Area[j] = Building_Info[i,8]
Height[j] = Building_Info[i,9] ### in ft!!
# Create the Low and High Sequence values that control mutation in optimization
Low_Seq = []
High_Seq = []
for i in range(Num_Buildings): ## TO DO: CAN INCLUDE THE MAX & MIN BLDG TYPES IN THE LOW AND HIGH SEQ
Low_Seq += [0]
High_Seq += [min(Max_Buildings_per_Type, np.floor(Max_GFA/GFA[i+1]), np.floor(Max_Site_GFA/Site_GFA[i+1]), np.floor(Max_FAR*Max_Site_GFA/GFA[i+1]))]
if High_Seq[i] == 0.0:
print ("Warning: Building "+str(i)+" has an invalid site or floor area constraint given the problem space. Check inputs and try again.")
sys.exit()
Low_Seq += [Supply_Min]
Low_Seq += [Supply_Min]
High_Seq += [Num_Engines]
High_Seq += [Num_Chillers]
#Low_Seq += [Min_Solar]
#High_Seq += [Max_Solar]
if CWWTP_Mode == 0:
Low_Seq += [Min_WWT]
High_Seq += [Num_WWT]
def SupplyandDemandOptimization(Building_Var_Inputs):
Internal_Start = timeit.default_timer()
'''-----------------------------------------------------------------------------------------------'''
### Use the input variables to create an overall demand file and then call the required functions. #
'''-----------------------------------------------------------------------------------------------'''
# First create a dictionary of building input variables
Building_Vars = {}
for i in range(Num_Buildings): ## MODIFIED it was 21 and it wasn't a loop
Building_Vars[i+1] = Building_Var_Inputs[i] ###### Building_Vars = number of each building type
Engine_Var = Building_Var_Inputs[Num_Buildings]
Chiller_Var = Building_Var_Inputs[Num_Buildings+1]
Comm_Solar_Var = 0 #Building_Var_Inputs[Num_Buildings+2]
if CWWTP_Mode == 0:
WWT_Var = Building_Var_Inputs[Num_Buildings+2]
else:
WWT_Var = 3
# Comm_Solar_Type_Var = 1 ## ?? WHAT IS IT? Only the first solar type is used! NOT OPTIMIZING FOR SOLAR PANEL TYPE
'''-----------------------------------------------------------------------------------------------'''
## Trivial Case Avoidance
'''-----------------------------------------------------------------------------------------------'''
if np.sum(Building_Var_Inputs[:Num_Buildings]) == 0: ## TRIVIAL CASE AVOIDANCE
Run_Result = np.zeros((1,Vars_Plus_Output))
Run_Result[0][Num_Buildings] = Engine_Var # i.e. element 21
Run_Result[0][Num_Buildings+1] = Chiller_Var # i.e. element 22
Run_Result[0][Num_Buildings+2] = Comm_Solar_Var # i.e. element 23
Run_Result[0][Num_Buildings+3] = WWT_Var # i.e. element 24
return ((0, 0,),
((Max_Site_GFA-0)/Max_Site_GFA,
(0-Min_GFA)/Min_GFA,
(Max_GFA-0)/Max_GFA, ), Run_Result) # Update based on whatever needs to be optimized
# Use the Building_Vars dictionary and the dictionary of demands to create an aggregate function of demand
# Note that the Diversifier Peak is an assumption
Diversifier_Peak = 0.8 ## ??
Aggregate_Demand = 0
for i in range(Num_Buildings):
j = i+1
Aggregate_Demand += Diversifier_Peak*(Building_Vars[j]*Demand_Types[j][:,0:4]) ## MODIFIED for water demand+syntax shortened (columnstack replaced)
'''-----------------------------------------------------------------------------------------------'''
### Adding the municipal demands to the created aggregate demands #
'''-----------------------------------------------------------------------------------------------'''
# Calculate total length and width of building sites
Total_Site_Length = 0
Total_Site_Width = 0
for i in range(Num_Buildings):
j = i+1
Total_Site_Length += Building_Vars[j]*Site_Dimensions[j][0]
Total_Site_Width += Building_Vars[j]*Site_Dimensions[j][1]
# Add in municipal loads # MODIFIED--WAS ERRONEOUS BEFORE
Curfew_Modifier = 0.50
Light_Spacing = 48.8 # m
Lights_Per_Side = 2
Light_Power = .190 # kW
Width_to_Length_Ratio = 1/8
hours = np.array(range(8760))
hours %= 24
hours_lights_on = np.logical_or(((hours >= 19) * (hours <= 23)), ((hours >= 0) * (hours <= 6)))
hours_lights_half_power = ((hours >= 2) * (hours <= 6))*(1-Curfew_Modifier)
## hours_lights_on-hours_lights_half_power results in 1 for hours with lights on, and curfew_modifier for half-powered hours
Aggregate_Demand[:,0] += (hours_lights_on-hours_lights_half_power)*(np.ceil((Total_Site_Length+Width_to_Length_Ratio*Total_Site_Width)/Light_Spacing)*Lights_Per_Side*Light_Power)
# Save the loads at this point for use later
Final_Demand = copy.deepcopy(Aggregate_Demand)
'''-----------------------------------------------------------------------------------------------'''
### Initiate TES based on the max raw hourly thermal demand #
'''-----------------------------------------------------------------------------------------------'''
TES_Max = np.max(Aggregate_Demand[:,1]) * TES_Max_Hours ## Storage capacity = TES_Max_Hours hours x peak annual hour heat load
TES_Capex = 95*TES_Max/1000 * USD_2008_to_2019 # In 2019 USD # Averaged based on Table 8 from Cost for Sensible and other heat storage... @ D:\PhD\+Main Research Directory\W&WW+low-heat applications\+++ TES
'''-----------------------------------------------------------------------------------------------'''
### Adding the losses to the demands #
'''-----------------------------------------------------------------------------------------------'''
Heat_Loss = 0.003 # kW/m
Cooling_Loss = 0.017 # kW/m
Electrical_Loss = 0.8568*0.06 # Decimal
''' See ISSST paper for losses on thermal side. For electrical, data is a combination of 6% loss on average
in the U.S. and calculations by Mungkung, et al. on the percentage makeup of those losses at the low
voltage level. References are:
Munkung, et al.: http://www.wseas.us/e-library/conferences/2009/istanbul/TELE-INFO/TELE-INFO-02.pdf
EIA: http://www.eia.gov/tools/faqs/faq.cfm?id=105&t=3
'''
## MODIFIED: For loop -> in-place conversion
Aggregate_Demand[:,0] += Aggregate_Demand[:,0]*Electrical_Loss
Aggregate_Demand[:,1] += (Total_Site_Length+Total_Site_Width)*2*Heat_Loss*np.ones(len(Aggregate_Demand[:,0]))
Aggregate_Demand[:,2] += (Total_Site_Length+Total_Site_Width)*2*Cooling_Loss*np.ones(len(Aggregate_Demand[:,0]))
'''-----------------------------------------------------------------------------------------------'''
### Adding the chiller electrical/thermal demand to the aggregate electrical and thermal demands #
'''-----------------------------------------------------------------------------------------------'''
# Chiller_Hourly_Cooling_Results = np.zeros((8760)) ## MODIFIED for performance
Chiller_COP_Results = np.zeros((8760)) ## MODIFIED for performance
# UNUSED: Electrical_Demand = np.zeros((8760)) ## MODIFIED for performance
Chiller_Costs = np.zeros((8760)) ## MODIFIED for performance
Chilled_Water_Supply_Temperature = 44.0 # in deg F ## WHERE DID THIS COME FROM?
Number_Iterations = 1 ## why??
Heat_Source_Temperature = 100 ## And this? IS it in deg C or F?? It's in deg F
Engine_Demand = np.zeros(shape=(8760,2))
for i in range(len(Aggregate_Demand[:,0])):
Hourly_Chiller_Result = Chiller_Types[Chiller_Var](Chilled_Water_Supply_Temperature, Hourly_Wet_Bulb[i]*9/5+32, Hourly_Temperature[i]*9/5+32, Aggregate_Demand[i,2], Number_Iterations, Heat_Source_Temperature)[0:6]
# Chiller_Hourly_Cooling_Results[i] = Hourly_Chiller_Result[3] ## UNUSED
Chiller_COP_Results[i] = Hourly_Chiller_Result[4] # MODIFIED
Chiller_Costs[i] = Hourly_Chiller_Result[5] # MODIFIED
Engine_Demand[i,0] = Aggregate_Demand[i,0]+Hourly_Chiller_Result[1]
Engine_Demand[i,1] = Aggregate_Demand[i,1]+Hourly_Chiller_Result[2]
## Creating the total energy and wastewater demand for the neighborhood (used for comparing neighborhoods)
Total_Energy_Demand = np.sum(Engine_Demand[:,0]) + np.sum(Engine_Demand[:,1])
Total_WWater_Demand = np.sum(Aggregate_Demand[:,3])
# additional vars: Hourly_WWT_Results (use later), WWT_Var (add to optimization vars)
# additional functions: WWT_Types
'''-----------------------------------------------------------------------------------------------'''
### Adding the GW treatment electrical/thermal demand to the aggregate electrical and thermal demands #
'''-----------------------------------------------------------------------------------------------'''
if CWWTP_Mode == 0:
Hourly_WWT_Results = WWT_Types[WWT_Var](Aggregate_Demand[:,3], Hourly_Temperature)
else:
Hourly_WWT_Results = WWT_Types[WWT_Var](Aggregate_Demand[:,3], Hourly_Temperature, Grid_Emissions)
Engine_Demand[:,0] += Hourly_WWT_Results[0]
Engine_Demand[:,1] += Hourly_WWT_Results[1]
WWT_Opex_Total = Hourly_WWT_Results[2] ## Annual value
WWT_Capex_Total = Hourly_WWT_Results[3] ## Annual value
if CWWTP_Mode == 0:
WWT_GHG = 0
else:
WWT_GHG = Hourly_WWT_Results[4]
'''-----------------------------------------------------------------------------------------------'''
### Solar Production #
'''-----------------------------------------------------------------------------------------------'''
Excess_Electricity = np.zeros((8760)) ## Originally: grid_sales
Capital_Solar_Cost = 0
# Calculate loads and subtract from total electrical demand; calculate costs and total solar capacity installed
[Hourly_Solar_Generation, Capital_Solar_Cost] = [0,0]#Commercial_Solar_Types[Comm_Solar_Type_Var](np.array(range(8760)), UTC, Comm_Solar_Area, Tilt, Azimuth, Latitude, Longitude, Hourly_DNI, Hourly_DHI, Hourly_GHI, Hourly_Albedo, Hourly_Temperature, Hourly_Wind_Speed, Site_Altitude)[3:5]
Engine_Demand[:,0] -= Hourly_Solar_Generation
Excess_Electricity = np.abs((Engine_Demand[:,0] < 0) * Engine_Demand[:,0]) # Excess electricity no. 1
Engine_Demand[:,0] += Excess_Electricity ## Hours with excess electricity are zeroed to avoid erroneous calculation in the CHPEngines.py with a negative Engine_Demand[i,0]
# Save the loads with a different name at this point for use later
Post_Solar_Demand = copy.deepcopy(Engine_Demand)
'''-----------------------------------------------------------------------------------------------'''
### Run the CHP engine with the demands + use the excess heat for ww treatment #
'''-----------------------------------------------------------------------------------------------'''
# Now run a control scheme that simply produces to the greatest demand and counts excess as waste
Power_to_Heat_Ratio = Power_to_Heat[Engine_Var]
Gas_Line_Pressure = 55.0
Fuel_Input_Results = np.zeros((8760))
CCHP_Capex = 0 # in $
CCHP_Opex = 0
Carbon_Emissions = np.zeros(8760) ## CHANGED TO ARRAY FOLLOWING IILP_TOY_OPT
Last_Part_Load = 0
Last_Num_Engines = 0
Excess_Heat = np.zeros((8760)) ## CHANGED TO ARRAY FOLLOWING IILP_TOY_OPT
TES = np.zeros((8760)) ## Thermal Energy Storage
## For the previous version of the code in which only the excess heat was used in the WWT, refer to Ch3_SF_CaseStudy_w_Storage_PreE_Consumption_for_WWT
for i in range(len(Engine_Demand[:,0])): ## MODIFIED: repetitive code excluded from the first if else
TES[i] = Hourly_TES_Coeff * TES[i-1] ## Depreciating the previous time-step's stored energy; each timestep is defined as 300s ## NOTE: CAPITAL AND O&M for TES is not included yet!
if Engine_Demand[i,1] < TES[i]: # More Stored heat than needed
TES[i] -= Engine_Demand[i,1]
Engine_Demand[i,1] = 0
else: # All the stored heat should be used and we'll need extra heat from the CCHP
Engine_Demand[i,1] -= TES[i]
TES[i] = 0
Test_Electricity = Engine_Demand[i,1]*Power_to_Heat_Ratio ## Electrical equivalent of the heat demand
if Engine_Demand[i,0] > Test_Electricity: ## heat is not the controlling load; produce electricity to supply the engine-demand --> We'll have excess heat
Hourly_Supply_Result = Supply_Types[Engine_Var](Site_Altitude, Hourly_Temperature[i], Gas_Line_Pressure, Engine_Demand[i,0], Last_Num_Engines, Last_Part_Load)
Last_Num_Engines = Hourly_Supply_Result[7]
Last_Part_Load = Hourly_Supply_Result[8]
if Hourly_Supply_Result[2] < Engine_Demand[i,1]: ## Checking the produced heat with the required heat ## HOW IS IT POSSIBLE?
Hourly_Supply_Result = Supply_Types[Engine_Var](Site_Altitude, Hourly_Temperature[i], Gas_Line_Pressure, Test_Electricity, Last_Num_Engines, Last_Part_Load)
Last_Num_Engines = Hourly_Supply_Result[7]
Last_Part_Load = Hourly_Supply_Result[8]
else: ## Heat is the controlling load, produce to satisfy the heat, we'll have excess electricity
Hourly_Supply_Result = Supply_Types[Engine_Var](Site_Altitude, Hourly_Temperature[i], Gas_Line_Pressure, Test_Electricity, Last_Num_Engines, Last_Part_Load)
Last_Num_Engines = Hourly_Supply_Result[7]
Last_Part_Load = Hourly_Supply_Result[8]
if Hourly_Supply_Result[3] < Engine_Demand[i,0]: ## Checking electricity with the existing demand ## HOW IS IT POSSIBLE? ## We'll have excess heat
Hourly_Supply_Result = Supply_Types[Engine_Var](Site_Altitude, Hourly_Temperature[i], Gas_Line_Pressure, Engine_Demand[i,0], Last_Num_Engines, Last_Part_Load)
Last_Num_Engines = Hourly_Supply_Result[7]
Last_Part_Load | |
spr_buy_item_triggers("itm_steppe_horse", resources=["itm_saddle", "itm_wheat_sheaf"], herding=1)),
("pw_buy_arabian_horse_a",spr_buy_item_flags(7),"arabian_horse_a","bo_pw_horse", spr_buy_item_triggers("itm_arabian_horse_a", resources=["itm_saddle", "itm_wheat_sheaf"], herding=2)),
("pw_buy_arabian_horse_b",spr_buy_item_flags(8),"arabian_horse_b","bo_pw_horse", spr_buy_item_triggers("itm_arabian_horse_b", resources=["itm_saddle", "itm_wheat_sheaf"], herding=2)),
("pw_buy_courser",spr_buy_item_flags(10),"courser","bo_pw_horse", spr_buy_item_triggers("itm_courser", resources=["itm_saddle", "itm_wheat_sheaf"], herding=3)),
("pw_buy_hunter",spr_buy_item_flags(12),"hunting_horse","bo_pw_horse", spr_buy_item_triggers("itm_hunter", resources=["itm_saddle", "itm_wheat_sheaf"], herding=2)),
("pw_buy_warhorse",spr_buy_item_flags(15),"warhorse_chain","bo_pw_horse", spr_buy_item_triggers("itm_warhorse", resources=["itm_saddle", "itm_horse_armor", "itm_wheat_sheaf"], herding=3)),
("pw_buy_warhorse_steppe",spr_buy_item_flags(15),"warhorse_steppe","bo_pw_horse", spr_buy_item_triggers("itm_warhorse_steppe", resources=["itm_saddle", "itm_horse_armor", "itm_wheat_sheaf"], herding=3)),
("pw_buy_warhorse_sarranid",spr_buy_item_flags(15),"warhorse_sarranid","bo_pw_horse", spr_buy_item_triggers("itm_warhorse_sarranid", resources=["itm_saddle", "itm_horse_armor", ("itm_wheat_sheaf", 2)], herding=3)),
("pw_buy_charger",spr_buy_item_flags(17),"charger_new","bo_pw_horse", spr_buy_item_triggers("itm_charger", resources=["itm_saddle", "itm_horse_armor", ("itm_wheat_sheaf", 2)], herding=3)),
("pw_buy_plated_charger",spr_buy_item_flags(20),"plated_charger1","bo_pw_horse", spr_buy_item_triggers("itm_plated_charger", resources=["itm_saddle", "itm_horse_armor", ("itm_wheat_sheaf", 2)], herding=3)),
("pw_buy_saddle",spr_buy_item_flags(4),"pw_saddle","bo_pw_saddle", spr_buy_item_triggers("itm_saddle", pos_offset=(0,0,20), resources=["itm_leather_roll", "itm_board"], engineer=2)),
("pw_buy_horse_armor",spr_buy_item_flags(16),"pw_horse_armor","bo_pw_horse_armor", spr_buy_item_triggers("itm_horse_armor", pos_offset=(0,80,0), resources=[("itm_iron_bar", 4)], engineer=6)),
("pw_buy_woodcutter_axe",spr_buy_item_flags(2),"pw_wood_axe","bo_pw_weapon", spr_buy_item_triggers("itm_woodcutter_axe", resources=["itm_iron_bar_short", "itm_wood_pole_short"], engineer=2)),
("pw_buy_small_mining_pick",spr_buy_item_flags(2),"pw_small_mining_pick","bo_pw_weapon", spr_buy_item_triggers("itm_small_mining_pick", resources=["itm_iron_bar_short", "itm_wood_pole_short"], engineer=2)),
("pw_buy_mining_pick",spr_buy_item_flags(5),"pw_mining_pick","bo_pw_weapon", spr_buy_item_triggers("itm_mining_pick", resources=["itm_iron_bar", "itm_wood_pole_short"], engineer=3)),
("pw_buy_repair_hammer",spr_buy_item_flags(3),"pw_repair_hammer","bo_pw_weapon_small", spr_buy_item_triggers("itm_repair_hammer", resources=["itm_iron_piece", "itm_stick"], engineer=2)),
("pw_buy_lock_pick",spr_buy_item_flags(8),"pw_lock_pick","bo_pw_weapon_small", spr_buy_item_triggers("itm_lock_pick", resources=["itm_iron_piece"], engineer=3)),
("pw_buy_bucket",spr_buy_item_flags(1),"pw_bucket_ground","bo_pw_bucket", spr_buy_item_triggers("itm_bucket", pos_offset=(0,0,20), resources=[("itm_board", 2), "itm_iron_piece"], engineer=2)),
("pw_buy_fishing_spear",spr_buy_item_flags(2),"pw_fishing_spear","bo_pw_weapon_big", spr_buy_item_triggers("itm_fishing_spear", resources=["itm_wood_pole", "itm_iron_bar_short"], engineer=2)),
("pw_buy_fishing_net",spr_buy_item_flags(4),"pw_fishing_net_b","bo_pw_fishing_net_b", spr_buy_item_triggers("itm_fishing_net", pos_offset=(150,-100,0), rotate=(0,-90,0), resources=[("itm_wood_pole_short", 2), ("itm_linen_thread", 2)], engineer=2)),
("pw_buy_sickle",spr_buy_item_flags(1),"pw_sickle","bo_pw_weapon_small", spr_buy_item_triggers("itm_sickle", resources=["itm_iron_bar_short", "itm_stick"], engineer=2)),
("pw_buy_scythe",spr_buy_item_flags(3),"pw_scythe","bo_pw_weapon_big", spr_buy_item_triggers("itm_scythe", resources=["itm_iron_bar", "itm_wood_pole"], engineer=2)),
("pw_buy_wheat_sack",spr_buy_item_flags(1),"pw_wheat_sack","bo_pw_weapon_small", spr_buy_item_triggers("itm_wheat_sack", pos_offset=(0,0,-20), resources=["itm_wheat_sheaf"])),
("pw_buy_kitchen_knife",spr_buy_item_flags(1),"pw_kitchen_knife","bo_pw_weapon_small", spr_buy_item_triggers("itm_kitchen_knife", resources=["itm_iron_piece"], engineer=2)),
("pw_buy_cleaver",spr_buy_item_flags(2),"cleaver_new","bo_pw_weapon_small", spr_buy_item_triggers("itm_cleaver", resources=["itm_iron_bar_short"], engineer=2)),
("pw_buy_knife",spr_buy_item_flags(1),"peasant_knife_new","bo_pw_weapon_small", spr_buy_item_triggers("itm_knife", resources=["itm_iron_bar_short"], engineer=2)),
("pw_buy_butchering_knife",spr_buy_item_flags(2),"khyber_knife_new","bo_pw_weapon_small", spr_buy_item_triggers("itm_butchering_knife", resources=["itm_iron_bar_short"], engineer=3)),
("pw_buy_broom",spr_buy_item_flags(1),"pw_broom","bo_pw_weapon", spr_buy_item_triggers("itm_broom", resources=["itm_wood_pole_short", "itm_flax_bundle"], engineer=0)),
("pw_buy_herding_crook",spr_buy_item_flags(2),"pw_herding_crook","bo_pw_weapon_big", spr_buy_item_triggers("itm_herding_crook", resources=["itm_wood_pole", "itm_iron_piece"], engineer=2)),
("pw_buy_surgeon_scalpel",spr_buy_item_flags(8),"dagger_b_scabbard","bo_pw_weapon_small", spr_buy_item_triggers("itm_surgeon_scalpel", resources=["itm_iron_piece"], engineer=4)),
("pw_buy_dagger",spr_buy_item_flags(3),"scab_dagger","bo_pw_weapon_small", spr_buy_item_triggers("itm_dagger", resources=["itm_iron_bar_short"], engineer=3)),
("pw_buy_poisoned_dagger",spr_buy_item_flags(20),"scab_dagger","bo_pw_weapon_small", spr_buy_item_triggers("itm_poisoned_dagger", resources=["itm_dagger", "itm_poison_herb"], engineer=4)),
("pw_buy_thin_lance",spr_buy_item_flags(3),"spear_d_2-8m","bo_pw_weapon_big", spr_buy_item_triggers("itm_thin_lance", resources=["itm_wood_pole", "itm_iron_piece"], engineer=2)),
("pw_buy_torch",spr_use_time(1),"pw_torch","bo_pw_weapon_small", spr_buy_item_triggers("itm_torch", resources=["itm_stick"], engineer=1)),
("pw_buy_banner",spr_use_time(1),"pw_banner_pole_only","bo_pw_banner_pole", spr_buy_banner_triggers("itm_pw_banner_pole_a01")),
("pw_buy_banner_mercenary",spr_use_time(2),"pw_banner_pole_only","bo_pw_banner_pole", spr_buy_banner_triggers("itm_pw_banner_pole_a01", mercenary=True)),
("pw_buy_book_a",spr_buy_item_flags(20),"pw_book_a","bo_pw_weapon_small", spr_buy_item_triggers("itm_book_a", resources=["itm_linen_cloth_small","itm_leather_piece"], engineer=2)),
("pw_buy_book_b",spr_buy_item_flags(25),"pw_book_b","bo_pw_weapon_small", spr_buy_item_triggers("itm_book_b", resources=["itm_linen_cloth_small","itm_leather_piece"], engineer=2)),
("pw_buy_book_c",spr_buy_item_flags(27),"pw_book_c","bo_pw_weapon_small", spr_buy_item_triggers("itm_book_c", resources=["itm_linen_cloth_small","itm_leather_piece"], engineer=2)),
("pw_buy_book_d",spr_buy_item_flags(10),"pw_book_d","bo_pw_weapon_small", spr_buy_item_triggers("itm_book_d", resources=["itm_linen_cloth_small","itm_leather_piece"], engineer=2)),
("pw_buy_book_e",spr_buy_item_flags(5),"pw_book_e","bo_pw_weapon_small", spr_buy_item_triggers("itm_book_e", resources=["itm_linen_cloth_small","itm_leather_piece"], engineer=2)),
("pw_buy_book_f",spr_buy_item_flags(60),"pw_book_f","bo_pw_weapon_small", spr_buy_item_triggers("itm_book_f", resources=["itm_linen_cloth_small","itm_leather_piece"], engineer=2)),
("pw_buy_lyre",spr_buy_item_flags(7),"pw_lyre_carry","bo_pw_weapon_small", spr_buy_item_triggers("itm_lyre", pos_offset=(0,0,7), resources=["itm_board"], engineer=2)),
("pw_buy_lute",spr_buy_item_flags(8),"pw_lute_carry","bo_pw_weapon_small", spr_buy_item_triggers("itm_lute", pos_offset=(0,0,15), resources=[("itm_board", 2), "itm_stick"], engineer=3)),
("pw_buy_warhorn",spr_buy_item_flags(8),"warhorn","bo_pw_weapon_small", spr_buy_item_triggers("itm_warhorn", pos_offset=(0,0,15), resources=["itm_iron_piece", "itm_leather_piece"], engineer=3)),
("pw_buy_dart",spr_buy_item_flags(1),"pw_dart","bo_pw_weapon_small", spr_buy_item_triggers("itm_dart")),
("pw_buy_die",spr_buy_item_flags(1),"pw_die","bo_pw_weapon_small", spr_buy_item_triggers("itm_die", pos_offset=(7,7,0), resources=["itm_stick"], engineer=2)),
("pw_buy_club",spr_buy_item_flags(3),"club","bo_pw_weapon", spr_buy_item_triggers("itm_club", resources=["itm_wood_pole_short"], engineer=2)),
("pw_buy_spiked_club",spr_buy_item_flags(5),"spiked_club","bo_pw_weapon", spr_buy_item_triggers("itm_spiked_club", resources=["itm_wood_pole_short"], engineer=2)),
("pw_buy_old_knife",spr_buy_item_flags(3),"peasant_knife","bo_pw_weapon_small", spr_buy_item_triggers("itm_old_knife", resources=["itm_iron_piece"], engineer=2)),
("pw_buy_crude_spear",spr_buy_item_flags(5),"spear_g_1-9m","bo_pw_weapon_big", spr_buy_item_triggers("itm_crude_spear", resources=["itm_wood_pole_short"], engineer=2)),
("pw_buy_blunt_falchion",spr_buy_item_flags(3),"falchion","bo_pw_weapon_small", spr_buy_item_triggers("itm_blunt_falchion", resources=["itm_iron_bar_short"], engineer=2)),
("pw_buy_chipped_falchion",spr_buy_item_flags(5),"falchion","bo_pw_weapon_small", spr_buy_item_triggers("itm_chipped_falchion", resources=["itm_iron_bar_short"], engineer=2)),
("pw_buy_rusty_sword",spr_buy_item_flags(3),"sword_rusty_a","bo_pw_weapon", spr_buy_item_triggers("itm_rusty_sword", resources=["itm_iron_bar_short"], engineer=2)),
("pw_buy_worn_sword",spr_buy_item_flags(5),"sword_norman_rusty","bo_pw_weapon", spr_buy_item_triggers("itm_worn_sword", resources=["itm_iron_bar_short"], engineer=2)),
("pw_buy_bent_lance",spr_buy_item_flags(5),"spear","bo_pw_weapon_big", spr_buy_item_triggers("itm_bent_lance", resources=["itm_wood_pole_short"], engineer=2)),
# Persistent Kingdoms Clothes & Armour
("cm_buy_blue_shirt",spr_buy_item_flags(2),"shirt_c","bo_pw_armor_body", spr_buy_item_triggers("itm_shirt_c", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_green_shirt",spr_buy_item_flags(2),"shirt_b","bo_pw_armor_body", spr_buy_item_triggers("itm_shirt_b", resources=["itm_linen_cloth"], tailoring=1)),
("cm_buy_decorated_khergit_vest",spr_buy_item_flags(3),"khergit_vest_d","bo_pw_armor_body", spr_buy_item_triggers("itm_khergit_vest_d", resources=["itm_linen_cloth"], tailoring=2)),
("cm_buy_white_khergit_vest",spr_buy_item_flags(3),"khergit_vest_a","bo_pw_armor_body", spr_buy_item_triggers("itm_khergit_vest_a", resources=["itm_linen_cloth"], tailoring=2)),
("cm_buy_blue_kaftan",spr_buy_item_flags(3),"blue_kaftan","bo_pw_armor_body", spr_buy_item_triggers("itm_blue_kaftan", resources=["itm_linen_cloth"], tailoring=2)),
("cm_buy_red_kaftan",spr_buy_item_flags(3),"drz_kaftan","bo_pw_armor_body", spr_buy_item_triggers("itm_drz_kaftan", resources=["itm_linen_cloth"], tailoring=2)),
("cm_buy_gold_tourney_armor",spr_buy_item_flags(5),"padded_armor","bo_pw_armor_body", spr_buy_item_triggers("itm_padded_armor", resources=["itm_linen_cloth","itm_leather_piece"], tailoring=1)),
("cm_buy_ragged_armor",spr_buy_item_flags(11),"ragged_armour_a","bo_pw_armor_body", spr_buy_item_triggers("itm_ragged_armour_a", resources=["itm_iron_bar_short","itm_linen_cloth"], engineer=3)),
("cm_buy_byrnie_with_green_tunic",spr_buy_item_flags(11),"byrnie_e","bo_pw_armor_body", spr_buy_item_triggers("itm_byrnie_e", resources=["itm_iron_bar_short","itm_linen_cloth"], engineer=3)),
("cm_buy_byrnie_with_blue_tunic",spr_buy_item_flags(11),"byrnie_f","bo_pw_armor_body", spr_buy_item_triggers("itm_byrnie_f", resources=["itm_iron_bar_short","itm_linen_cloth"], engineer=3)),
("cm_buy_studded_coat_with_mail",spr_buy_item_flags(11),"studded_coat_mail_a","bo_pw_armor_body", spr_buy_item_triggers("itm_studded_coat_mail_a", resources=["itm_iron_bar_short","itm_leather_roll"], engineer=3)),
("cm_buy_red_gambeson",spr_buy_item_flags(10),"gambeson_red","bo_pw_armor_body", spr_buy_item_triggers("itm_gambeson_red", resources=["itm_linen_cloth","itm_leather_piece"], tailoring=3)),
("cm_buy_brown_gambeson",spr_buy_item_flags(10),"gambeson_brown","bo_pw_armor_body", spr_buy_item_triggers("itm_gambeson_brown", resources=["itm_leather_roll","itm_linen_cloth_small"], tailoring=3)),
("cm_buy_gambeson",spr_buy_item_flags(10),"gambeson","bo_pw_armor_body", spr_buy_item_triggers("itm_gambeson", resources=["itm_linen_cloth","itm_leather_piece"], tailoring=3)),
("cm_buy_padded_leather",spr_buy_item_flags(8),"fred_padded_leather","bo_pw_armor_body", spr_buy_item_triggers("itm_fred_padded_leather", resources=["itm_leather_roll","itm_linen_cloth_small"], tailoring=3, engineer=2)),
("cm_buy_padded_leather_with_cape",spr_buy_item_flags(10),"fred_padded_leather_cowl","bo_pw_armor_body", spr_buy_item_triggers("itm_fred_padded_leather_cowl", resources=["itm_leather_roll",("itm_linen_cloth_small",2)], tailoring=3, engineer=2)),
("cm_buy_brown_assassin_armor",spr_buy_item_flags(11),"brown_assassin_armor","bo_pw_armor_body", spr_buy_item_triggers("itm_brown_assassin_armor", resources=[("itm_linen_cloth",2)], tailoring=2, engineer=2)),
("cm_buy_white_assassin_armor",spr_buy_item_flags(11),"white_assassin_armor","bo_pw_armor_body", spr_buy_item_triggers("itm_white_assassin_armor", resources=[("itm_linen_cloth",2)], tailoring=2, engineer=2)),
("cm_buy_green_assassin_armor",spr_buy_item_flags(11),"green_assassin_armor","bo_pw_armor_body", spr_buy_item_triggers("itm_green_assassin_armor", resources=[("itm_linen_cloth",2)], tailoring=2, engineer=2)),
("cm_buy_aketon",spr_buy_item_flags(11),"aketon","bo_pw_armor_body", spr_buy_item_triggers("itm_aketon", resources=[("itm_linen_cloth",2)], tailoring=2, engineer=2)),
("cm_buy_light_mercenary_armor",spr_buy_item_flags(11),"light_mercenary_armor","bo_pw_armor_body", spr_buy_item_triggers("itm_light_mercenary_armor", resources=["itm_linen_cloth","itm_iron_bar_short"], engineer=3)),
("cm_buy_aketon_with_kneecops",spr_buy_item_flags(11),"aketon_kneecops","bo_pw_armor_body", spr_buy_item_triggers("itm_aketon_kneecops", resources=["itm_linen_cloth","itm_iron_bar_short"], engineer=3)),
("cm_buy_peasant_mail_with_cape",spr_buy_item_flags(11),"fred_peasant_mail","bo_pw_armor_body", spr_buy_item_triggers("itm_fred_peasant_mail", resources=["itm_iron_bar_short","itm_leather_piece"], engineer=3)),
("cm_buy_brown_gambeson_with_cape",spr_buy_item_flags(11),"cwe_archer_armor_2","bo_pw_armor_body", spr_buy_item_triggers("itm_cwe_archer_armor_2", resources=["itm_leather_roll","itm_leather_piece"], tailoring=2, engineer=2)),
("cm_buy_black_gambeson_with_cape",spr_buy_item_flags(11),"cwe_archer_armor_1","bo_pw_armor_body", spr_buy_item_triggers("itm_cwe_archer_armor_1", resources=["itm_leather_roll","itm_leather_piece"], tailoring=2, engineer=2)),
("cm_buy_padded_mail_with_white_shirt",spr_buy_item_flags(11),"padded_mail_a","bo_pw_armor_body", spr_buy_item_triggers("itm_padded_mail_a", resources=[("itm_iron_bar_short",2),"itm_linen_cloth_small"], engineer=3)),
("cm_buy_padded_mail_with_green_shirt",spr_buy_item_flags(11),"padded_mail_b","bo_pw_armor_body", spr_buy_item_triggers("itm_padded_mail_b", resources=[("itm_iron_bar_short",2),"itm_linen_cloth_small"], engineer=3)),
("cm_buy_light_hauberk",spr_buy_item_flags(11),"hauberk","bo_pw_armor_body", spr_buy_item_triggers("itm_hauberk", resources=["itm_iron_bar_short","itm_linen_cloth"], engineer=4)),
("cm_buy_green_light_brigandine",spr_buy_item_flags(11),"brigandine_green","bo_pw_armor_body", spr_buy_item_triggers("itm_brigandine_green", resources=["itm_iron_bar_short","itm_linen_cloth"], engineer=4)),
("cm_buy_blue_lamellar_vest",spr_buy_item_flags(11),"sar_lamellar_vest_blue","bo_pw_armor_body", spr_buy_item_triggers("itm_sar_lamellar_vest_blue", resources=["itm_iron_bar_short","itm_linen_cloth"], engineer=4)),
("cm_buy_red_lamellar_vest_with_mail",spr_buy_item_flags(11),"sar_lamellar_vest_mail_red","bo_pw_armor_body", spr_buy_item_triggers("itm_sar_lamellar_vest_mail_red", resources=[("itm_iron_bar_short",2),"itm_linen_cloth"], engineer=4)),
("cm_buy_black_gambeson_with_mail",spr_buy_item_flags(11),"cwe_sergeant_armor_2","bo_pw_armor_body", spr_buy_item_triggers("itm_cwe_sergeant_armor_2", resources=[("itm_iron_bar_short",2),"itm_linen_cloth"], engineer=4)),
("cm_buy_red_gambeson_with_mail",spr_buy_item_flags(11),"cwe_sergeant_armor_3","bo_pw_armor_body", spr_buy_item_triggers("itm_cwe_sergeant_armor_3", resources=[("itm_iron_bar_short",2),"itm_linen_cloth"], engineer=4)),
("cm_buy_brown_light_brigandine_with_mail",spr_buy_item_flags(11),"brigandine_brown_mail","bo_pw_armor_body", spr_buy_item_triggers("itm_brigandine_brown_mail", resources=[("itm_iron_bar_short",2),"itm_leather_roll"], engineer=4)),
("cm_buy_light_saracen_armor",spr_buy_item_flags(11),"turk_bandit_b","bo_pw_armor_body", spr_buy_item_triggers("itm_turk_bandit_b", resources=[("itm_iron_bar_short",2),"itm_linen_cloth_small"], engineer=4)),
("cm_buy_light_saracen_infantry_armor",spr_buy_item_flags(11),"turk_bandit_c","bo_pw_armor_body", spr_buy_item_triggers("itm_turk_bandit_c", resources=[("itm_iron_bar_short",2),"itm_linen_cloth"], engineer=4)),
("cm_buy_medium_mercenary_armor",spr_buy_item_flags(11),"medium_mercenary_armor","bo_pw_armor_body", spr_buy_item_triggers("itm_medium_mercenary_armor", resources=[("itm_iron_bar_short",2),"itm_linen_cloth"], engineer=4)),
("cm_buy_rus_lamellar_armor",spr_buy_item_flags(11),"rus_lamellar_a","bo_pw_armor_body", spr_buy_item_triggers("itm_rus_lamellar_a", resources=[("itm_iron_bar_short",2),"itm_linen_cloth"], engineer=4)),
("cm_buy_light_kuyak",spr_buy_item_flags(11),"kuyak_a","bo_pw_armor_body", spr_buy_item_triggers("itm_kuyak_a", resources=[("itm_iron_bar_short",2),"itm_leather_piece"], engineer=4)),
("cm_buy_saracen_infantry_armor",spr_buy_item_flags(11),"sar_infantry_armor","bo_pw_armor_body", spr_buy_item_triggers("itm_sar_infantry_armor", resources=[("itm_iron_bar_short",2),"itm_linen_cloth_small"], engineer=4)),
("cm_buy_medium_saracen_armor",spr_buy_item_flags(11),"cwe_armor_medium_tyrk_d","bo_pw_armor_body", spr_buy_item_triggers("itm_cwe_armor_medium_tyrk_d", resources=[("itm_iron_bar_short",2),"itm_linen_cloth_small"], engineer=4)),
("cm_buy_heavy_kuyak",spr_buy_item_flags(12),"kuyak_b","bo_pw_armor_body", spr_buy_item_triggers("itm_kuyak_b", resources=[("itm_iron_bar_short",2),"itm_leather_roll"], engineer=5)),
("cm_buy_saracen_lamellar_armor",spr_buy_item_flags(12),"sar_lamellar_armor","bo_pw_armor_body", spr_buy_item_triggers("itm_sar_lamellar_armor", resources=[("itm_iron_bar_short",2),"itm_linen_cloth"], engineer=5)),
("cm_buy_lamellar_armor",spr_buy_item_flags(15),"drz_lamellar_armor","bo_pw_armor_body", spr_buy_item_triggers("itm_drz_lamellar_armor", resources=[("itm_iron_bar_short",2),"itm_linen_cloth"], engineer=6)),
("cm_buy_heavy_saracen_armor",spr_buy_item_flags(15),"armor_medium_tyrk_f","bo_pw_armor_body", spr_buy_item_triggers("itm_armor_medium_tyrk_f", resources=[("itm_iron_bar_short",2),"itm_linen_cloth"], engineer=6)),
("cm_buy_heavy_hauberk",spr_buy_item_flags(15),"hauberk3","bo_pw_armor_body", spr_buy_item_triggers("itm_hauberk3", resources=[("itm_iron_bar_short",2),"itm_linen_cloth_small"], engineer=6)),
("cm_buy_rus_scale_armor",spr_buy_item_flags(15),"rus_scale","bo_pw_armor_body", spr_buy_item_triggers("itm_rus_scale", resources=[("itm_iron_bar_short",2),"itm_linen_cloth"], engineer=6)),
("cm_buy_heavy_hauberk_with_plate",spr_buy_item_flags(16),"hauberk2","bo_pw_armor_body", spr_buy_item_triggers("itm_hauberk2", resources=[("itm_iron_bar_short",2),"itm_linen_cloth"], engineer=6)),
("cm_buy_corrazina_green",spr_buy_item_flags(16),"corrazina_green","bo_pw_armor_body", spr_buy_item_triggers("itm_corrazina_green", resources=[("itm_iron_bar_short",2),"itm_linen_cloth"], engineer=6)),
("cm_buy_corrazina_red",spr_buy_item_flags(16),"corrazina_red","bo_pw_armor_body", spr_buy_item_triggers("itm_corrazina_red", resources=[("itm_iron_bar_short",2),"itm_linen_cloth"], engineer=6)),
("cm_buy_sarranid_guard_armor",spr_buy_item_flags(16),"sarranid_guard_armor","bo_pw_armor_body", spr_buy_item_triggers("itm_sarranid_guard_armor", resources=[("itm_iron_bar_short",2),"itm_linen_cloth"], engineer=6)),
("cm_buy_blue_brigandine_with_plate",spr_buy_item_flags(17),"brigandine_blue","bo_pw_armor_body", spr_buy_item_triggers("itm_brigandine_blue", resources=[("itm_iron_bar",2),"itm_linen_cloth"], engineer=6)),
("cm_buy_red_brigandine_with_plate",spr_buy_item_flags(17),"brigandine_red_a","bo_pw_armor_body", spr_buy_item_triggers("itm_brigandine_red_a", resources=[("itm_iron_bar",2),"itm_linen_cloth"], engineer=6)),
("cm_buy_elite_lamellar_armor",spr_buy_item_flags(17),"drz_elite_lamellar_armor","bo_pw_armor_body", spr_buy_item_triggers("itm_drz_elite_lamellar_armor", resources=[("itm_iron_bar",2),"itm_leather_piece","itm_linen_cloth"], engineer=6)),
("cm_buy_ghulam_lamellar_armor",spr_buy_item_flags(17),"ghulam_heavy_cavalryman_3","bo_pw_armor_body", spr_buy_item_triggers("itm_ghulam_heavy_cavalryman_3", resources=[("itm_iron_bar",2),"itm_leather_piece","itm_linen_cloth"], engineer=6)),
("cm_buy_heavy_mercenary_armor",spr_buy_item_flags(17),"heavy_mercenary_armor","bo_pw_armor_body", spr_buy_item_triggers("itm_heavy_mercenary_armor", resources=[("itm_iron_bar",2),"itm_leather_piece","itm_linen_cloth"], engineer=6)),
("cm_buy_red_brigandine_with_plate_and_mail",spr_buy_item_flags(17),"brigandine_red_mail","bo_pw_armor_body", spr_buy_item_triggers("itm_brigandine_red_mail", resources=[("itm_iron_bar",2),"itm_leather_piece","itm_linen_cloth"], engineer=6)),
("cm_buy_black_brigandine_with_plate_and_mail",spr_buy_item_flags(17),"brigandine_black_mail","bo_pw_armor_body", spr_buy_item_triggers("itm_brigandine_black_mail", resources=[("itm_iron_bar",2),"itm_leather_piece","itm_linen_cloth"], engineer=6)),
("cm_buy_sultan_armor",spr_buy_item_flags(20),"armor_sultan_saracens","bo_pw_armor_body", spr_buy_item_triggers("itm_armor_sultan_saracens", resources=[("itm_iron_bar",2),"itm_linen_cloth"], engineer=7)),
("cm_buy_blue_churburg",spr_buy_item_flags(20),"churburg_13","bo_pw_armor_body", spr_buy_item_triggers("itm_churburg_13", resources=[("itm_iron_bar_short",2),"itm_linen_cloth"], engineer=7)),
("cm_buy_decorated_red_churburg",spr_buy_item_flags(22),"churburg_13_brass","bo_pw_armor_body", spr_buy_item_triggers("itm_churburg_13_brass", resources=[("itm_iron_bar_long",2),"itm_leather_piece","itm_linen_cloth"], engineer=7)),
("cm_buy_decorated_grey_churburg",spr_buy_item_flags(22),"churburg","bo_pw_armor_body", spr_buy_item_triggers("itm_churburg", resources=[("itm_iron_bar_long",2),"itm_leather_piece","itm_linen_cloth"], engineer=7)),
("cm_buy_milanese_plate",spr_buy_item_flags(23),"armour_new_b","bo_pw_armor_body", spr_buy_item_triggers("itm_armour_new_b", resources=[("itm_iron_bar_long",2),"itm_leather_piece","itm_linen_cloth"], engineer=7)),
("cm_buy_gothic_plate",spr_buy_item_flags(23),"gothic_armour_plain","bo_pw_armor_body", spr_buy_item_triggers("itm_gothic_armour_plain", resources=[("itm_iron_bar_long",2),"itm_leather_piece","itm_linen_cloth"], engineer=7)),
("cm_buy_royal_plate",spr_buy_item_flags(23),"royal_plate","bo_pw_armor_body", spr_buy_item_triggers("itm_royal_plate", resources=[("itm_iron_bar_long",2),"itm_leather_piece","itm_linen_cloth"], engineer=7)),
("cm_buy_leather_gauntlets",spr_buy_item_flags(2),"leather_gauntlet_L","bo_pw_armor_hand", spr_buy_item_triggers("itm_leather_gauntlet", resources=["itm_leather_piece"], tailoring=2, engineer=4)),
("cm_buy_mail_gauntlets",spr_buy_item_flags(4),"mail_gauntlets_L","bo_pw_armor_hand", spr_buy_item_triggers("itm_mail_gauntlets", resources=["itm_iron_piece"], engineer=3)),
("cm_buy_demi_gauntlets",spr_buy_item_flags(5),"demi_gauntlets_L","bo_pw_armor_hand", spr_buy_item_triggers("itm_demi_gauntlets", resources=["itm_leather_piece","itm_iron_piece"], engineer=4)),
("cm_buy_saracen_gauntlets",spr_buy_item_flags(5),"gauntlets_arabs_a_L","bo_pw_armor_hand", spr_buy_item_triggers("itm_gauntlets_arabs_a", resources=["itm_leather_piece","itm_iron_piece"], engineer=5)),
("cm_buy_finger_gauntlets",spr_buy_item_flags(5),"finger_gauntlets_L","bo_pw_armor_hand", spr_buy_item_triggers("itm_finger_gauntlets", resources=["itm_leather_piece","itm_iron_piece"], engineer=5)),
("cm_buy_decorated_saracen_gauntlets",spr_buy_item_flags(5),"gauntlets_arabs_b_L","bo_pw_armor_hand", spr_buy_item_triggers("itm_gauntlets_arabs_b", resources=["itm_leather_piece",("itm_iron_piece",2)], engineer=5)),
("cm_buy_hourglass_gauntlets",spr_buy_item_flags(6),"hourglass_gauntlets_L","bo_pw_armor_hand", spr_buy_item_triggers("itm_hourglass_gauntlets", resources=["itm_leather_piece",("itm_iron_bar_short",2)], engineer=6)),
("cm_buy_decorated_hourglass_gauntlets",spr_buy_item_flags(6),"hourglass_gauntlets_ornate_L","bo_pw_armor_hand", spr_buy_item_triggers("itm_hourglass_gauntlets_ornate", resources=["itm_leather_piece",("itm_iron_bar_short",2)], engineer=6)),
("cm_buy_black_rich_boots",spr_buy_item_flags(4),"civil_rich_Boots_b","bo_pw_armor_foot", spr_buy_item_triggers("itm_civil_rich_boots_b", resources=[("itm_leather_piece",2)], tailoring=2, engineer=3)),
("cm_buy_red_rich_boots",spr_buy_item_flags(4),"civil_rich_Boots_a","bo_pw_armor_foot", spr_buy_item_triggers("itm_civil_rich_boots_a", resources=[("itm_leather_piece",2)], tailoring=2, engineer=3)),
("cm_buy_rus_cavalry_boots",spr_buy_item_flags(4),"rus_cav_boots","bo_pw_armor_foot", spr_buy_item_triggers("itm_rus_cav_boots", resources=[("itm_leather_piece",2)], tailoring=2, engineer=3)),
("cm_buy_saracen_leather_boots",spr_buy_item_flags(4),"sarranid_camel_boots1","bo_pw_armor_foot", spr_buy_item_triggers("itm_sarranid_camel_boots1", resources=[("itm_leather_piece",2)], tailoring=2, engineer=3)),
("cm_buy_noble_leather_boots",spr_buy_item_flags(4),"leather_boots","bo_pw_armor_foot", spr_buy_item_triggers("itm_leather_boots_noble", resources=[("itm_leather_piece",2)], tailoring=2, engineer=3)),
("cm_buy_red_saracen_leather_boots",spr_buy_item_flags(4),"sarranid_camel_boots2","bo_pw_armor_foot", spr_buy_item_triggers("itm_sarranid_camel_boots2", resources=[("itm_leather_piece",2)], tailoring=2, engineer=3)),
("cm_buy_saracen_elite_boots",spr_buy_item_flags(4),"sarranid_elite_boots","bo_pw_armor_foot", spr_buy_item_triggers("itm_sarranid_elite_boots", resources=[("itm_leather_piece",2)], tailoring=2, engineer=3)),
("cm_buy_saracen_royal_boots",spr_buy_item_flags(4),"sarranid_royal_boots","bo_pw_armor_foot", spr_buy_item_triggers("itm_sarranid_royal_boots", resources=[("itm_leather_piece",2)], tailoring=2, engineer=3)),
("cm_buy_rus_splinted_greaves",spr_buy_item_flags(5),"rus_splint_greaves","bo_pw_armor_foot", spr_buy_item_triggers("itm_rus_splint_greaves", resources=[("itm_leather_piece",2),"itm_iron_piece"], tailoring=3, engineer=3)),
("cm_buy_splinted_greaves",spr_buy_item_flags(5),"splinted_greaves_nospurs","bo_pw_armor_foot", spr_buy_item_triggers("itm_splinted_greaves_nospurs", resources=[("itm_leather_piece",2),"itm_iron_piece"], tailoring=3, engineer=3)),
("cm_buy_steel_greaves",spr_buy_item_flags(7),"narf_greaves2","bo_pw_armor_foot", spr_buy_item_triggers("itm_narf_greaves2", resources=["itm_leather_piece",("itm_iron_bar_short",2)], engineer=5)),
("cm_buy_shynbaulds",spr_buy_item_flags(8),"shynbaulds","bo_pw_armor_foot", spr_buy_item_triggers("itm_shynbaulds", resources=["itm_leather_piece",("itm_iron_bar_short",3)], engineer=6)),
("cm_buy_polished_steel_boots",spr_buy_item_flags(10),"narf_greaves","bo_pw_armor_foot", spr_buy_item_triggers("itm_narf_greaves", resources=[("itm_leather_piece",2),("itm_iron_bar",2)], engineer=6)),
("cm_buy_steel_boots",spr_buy_item_flags(10),"steel_greaves","bo_pw_armor_foot", spr_buy_item_triggers("itm_steel_greaves", resources=[("itm_leather_piece",2),("itm_iron_bar",2)], engineer=6)),
("cm_buy_crown",spr_buy_item_flags(12),"crown","bo_crown", spr_buy_item_triggers("itm_crown", resources=["itm_gold_nugget"], engineer=6)),
("cm_buy_bejeweled_crown",spr_buy_item_flags(12),"bejeweled_crown","bo_bejeweled_crown", spr_buy_item_triggers("itm_bejeweled_crown", resources=["itm_gold_nugget"], engineer=6)),
("cm_buy_brown_assassin_cape",spr_buy_item_flags(1),"brown_assassin_cape","bo_pw_armor_head", spr_buy_item_triggers("itm_brown_assassin_cape", resources=[("itm_linen_cloth_small",2)], tailoring=1)),
("cm_buy_green_assassin_cape",spr_buy_item_flags(1),"green_assassin_cape","bo_pw_armor_head", spr_buy_item_triggers("itm_green_assassin_cape", resources=[("itm_linen_cloth_small",2)], tailoring=1)),
("cm_buy_white_assassin_cape",spr_buy_item_flags(1),"white_assassin_cape","bo_pw_armor_head", spr_buy_item_triggers("itm_white_assassin_cape", resources=[("itm_linen_cloth_small",2)], tailoring=1)),
("cm_buy_tourney_helm_yellow",spr_buy_item_flags(5),"tourney_helmY","bo_pw_armor_head", spr_buy_item_triggers("itm_tourney_helm_yellow", resources=["itm_leather_piece","itm_iron_piece"], engineer=2)),
("cm_buy_saracen_helmet_with_leather",spr_buy_item_flags(5),"helm_saracin_c","bo_pw_armor_head", spr_buy_item_triggers("itm_helm_saracin_c", resources=["itm_leather_piece","itm_iron_piece"], engineer=2)),
("cm_buy_saracen_helmet_with_mail",spr_buy_item_flags(6),"sar_infantry_helmet1","bo_pw_armor_head", spr_buy_item_triggers("itm_sar_infantry_helmet1", resources=["itm_iron_bar_short","itm_leather_piece"], engineer=2)),
("cm_buy_chapel_de_fer_cloth",spr_buy_item_flags(7),"chapel_de_fer_cloth2","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_chapel_de_fer_cloth2", resources=["itm_iron_bar_short","itm_linen_cloth_small"], engineer=2)),
("cm_buy_gnezdovo_helmet",spr_buy_item_flags(7),"gnezdovo_helm_a","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_gnezdovo_helm_a", resources=["itm_iron_bar_short","itm_leather_piece"], engineer=2)),
("cm_buy_bascinet",spr_buy_item_flags(8),"north_helmet","bo_pw_armor_head", spr_buy_item_triggers("itm_north_helmet", resources=["itm_iron_bar_short","itm_iron_piece"], engineer=4)),
("cm_buy_sallet",spr_buy_item_flags(8),"north_noseguard","bo_pw_armor_head", spr_buy_item_triggers("itm_north_noseguard", resources=["itm_iron_bar_short","itm_iron_piece"], engineer=4)),
("cm_buy_barbuta",spr_buy_item_flags(9),"barbuta1","bo_pw_armor_head", spr_buy_item_triggers("itm_barbuta1", resources=["itm_iron_bar_short","itm_iron_piece"], engineer=4)),
("cm_buy_rus_helmet",spr_buy_item_flags(10),"rus_helm","bo_pw_armor_head", spr_buy_item_triggers("itm_rus_helm", resources=["itm_iron_bar_short","itm_iron_piece"], engineer=4)),
("cm_buy_decorated_saracen_helmet_with_mail",spr_buy_item_flags(10),"helm_saracin_j","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_helm_saracin_j", resources=["itm_iron_bar_short","itm_linen_cloth_small"], engineer=4)),
("cm_buy_reinforced_kattlehat",spr_buy_item_flags(10),"dejawolf_kettlehat_1","bo_pw_armor_head", spr_buy_item_triggers("itm_dejawolf_kettlehat_1", resources=["itm_iron_bar_short","itm_iron_piece"], engineer=4)),
("cm_buy_chapel_de_fer_with_mail_1",spr_buy_item_flags(11),"chapel_de_fer_mail2","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_chapel_de_fer_mail2", resources=["itm_iron_bar_short","itm_iron_piece"], engineer=4)),
("cm_buy_chapel_de_fer_with_mail_2",spr_buy_item_flags(11),"chapel_de_fer_mail3","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_chapel_de_fer_mail3", resources=["itm_iron_bar_short","itm_iron_piece"], engineer=4)),
("cm_buy_open_sallet_with_coif",spr_buy_item_flags(11),"open_salet_coif","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_open_salet_coif", resources=["itm_iron_bar_short","itm_iron_piece"], engineer=4)),
("cm_buy_zitta_bascinet_without_visor",spr_buy_item_flags(11),"zitta_bascinet_novisor","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_zitta_bascinet_novisor", resources=["itm_iron_bar_short","itm_iron_piece"], engineer=4)),
("cm_buy_prato_chapel_de_fer",spr_buy_item_flags(11),"prato_chapel_de_fer","bo_pw_armor_head", spr_buy_item_triggers("itm_prato_chapel_de_fer", resources=["itm_iron_bar_short","itm_iron_piece","itm_leather_piece"], engineer=4)),
("cm_buy_gulam_helmet_with_blue_cloth",spr_buy_item_flags(12),"gulam_helm_c_market","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_gulam_helm_c_market", resources=["itm_iron_bar_short","itm_linen_cloth_small"], engineer=4)),
("cm_buy_gulam_helmet_with_red_cloth",spr_buy_item_flags(12),"gulam_helm_b_market","bo_pw_armor_head", spr_buy_item_triggers("itm_gulam_helm_b_market", resources=["itm_iron_bar_short","itm_linen_cloth_small"], engineer=4)),
("cm_buy_gulam_helmet",spr_buy_item_flags(13),"gulam_helm_a","bo_pw_armor_head", spr_buy_item_triggers("itm_gulam_helm_a", resources=["itm_iron_bar_short","itm_iron_piece"], engineer=4)),
("cm_buy_open_tagancha",spr_buy_item_flags(13),"tagancha_helm_a","bo_pw_armor_head", spr_buy_item_triggers("itm_tagancha_helm_a", resources=[("itm_iron_bar_short",2)], engineer=5)),
("cm_buy_closed_tagancha",spr_buy_item_flags(13),"tagancha_helm_b","bo_pw_armor_head", spr_buy_item_triggers("itm_tagancha_helm_b", resources=[("itm_iron_bar_short",2)], engineer=5)),
("cm_buy_nikolskoe_helmet",spr_buy_item_flags(13),"nikolskoe_helm","bo_pw_armor_head", spr_buy_item_triggers("itm_nikolskoe_helm", resources=[("itm_iron_bar_short",2)], engineer=5)),
("cm_buy_novogrod_helmet",spr_buy_item_flags(14),"novogrod_helm","bo_pw_armor_head", spr_buy_item_triggers("itm_novogrod_helm", resources=[("itm_iron_bar_short",2)], engineer=5)),
("cm_buy_visored_sallet",spr_buy_item_flags(14),"visored_salet","bo_pw_armor_head", spr_buy_item_triggers("itm_visored_salet", resources=["itm_iron_bar_short","itm_iron_piece"], engineer=5)),
("cm_buy_visored_sallet_with_coif",spr_buy_item_flags(14),"visored_salet_coif","bo_pw_armor_head", spr_buy_item_triggers("itm_visored_salet_coif", resources=["itm_iron_bar_short","itm_iron_piece"], engineer=5)),
("cm_buy_open_sallet",spr_buy_item_flags(14),"north_aventail","bo_pw_armor_head", spr_buy_item_triggers("itm_north_aventail", resources=["itm_iron_bar_short","itm_iron_piece"], engineer=5)),
("cm_buy_closed_sallet",spr_buy_item_flags(14),"north_bascinet","bo_pw_armor_head", spr_buy_item_triggers("itm_north_bascinet", resources=["itm_iron_bar_short","itm_iron_piece"], engineer=5)),
("cm_buy_open_sallet_with_neckguard",spr_buy_item_flags(15),"north_sallet","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_north_sallet", resources=[("itm_iron_bar_short",2)], engineer=5)),
("cm_buy_closed_sallet_with_neckguard",spr_buy_item_flags(15),"north_neckguard","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_north_neckguard", resources=[("itm_iron_bar_short",2)], engineer=5)),
("cm_buy_litchina_helmet",spr_buy_item_flags(15),"litchina_helm","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_litchina_helm", resources=[("itm_iron_bar_short",2)], engineer=5)),
("cm_buy_decorated_gulam_helm",spr_buy_item_flags(16),"gulam_helm_f_market","bo_pw_armor_head", spr_buy_item_triggers("itm_gulam_helm_f_market", resources=[("itm_iron_bar_short",2)], engineer=5)),
("cm_buy_hounskull_helmet",spr_buy_item_flags(17),"hounskull","bo_pw_armor_head", spr_buy_item_triggers("itm_hounskull", resources=[("itm_iron_bar_short",2),"itm_leather_piece"], engineer=6)),
("cm_buy_open_zitta_bascinet",spr_buy_item_flags(17),"zitta_bascinet_open","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_zitta_bascinet_open", resources=[("itm_iron_bar_short",2),"itm_leather_piece"], engineer=6)),
("cm_buy_zitta_bascinet",spr_buy_item_flags(17),"zitta_bascinet","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_zitta_bascinet", resources=[("itm_iron_bar_short",2),"itm_leather_piece"], engineer=6)),
("cm_buy_open_pigface_helmet",spr_buy_item_flags(17),"pigface_klappvisor_open","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_pigface_klappvisor_open", resources=[("itm_iron_bar_short",2),"itm_iron_piece"], engineer=6)),
("cm_buy_pigface_helmet",spr_buy_item_flags(17),"pigface_klappvisor","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_pigface_klappvisor", resources=[("itm_iron_bar_short",2),"itm_iron_piece"], engineer=6)),
("cm_buy_open_klappvisier",spr_buy_item_flags(17),"klappvisier_open","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_klappvisier_open", resources=[("itm_iron_bar_short",2),"itm_iron_piece"], engineer=6)),
("cm_buy_klappvisier",spr_buy_item_flags(17),"klappvisier","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_klappvisier", resources=[("itm_iron_bar_short",2),"itm_iron_piece"], engineer=6)),
("cm_buy_milanese_helmet",spr_buy_item_flags(18),"dejawolf_sallet","bo_pw_armor_head", spr_buy_item_triggers("itm_dejawolf_sallet", resources=[("itm_iron_bar_short",2),"itm_iron_piece"], engineer=6)),
("cm_buy_sugarloaf",spr_buy_item_flags(20),"sugarloaf","bo_pw_armor_head", spr_buy_item_triggers("itm_sugarloaf", resources=["itm_iron_bar","itm_linen_cloth_small"], engineer=6)),
("cm_buy_sultan_helmet",spr_buy_item_flags(20),"helm_sultan_saracens_market","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_helm_sultan_saracens_market", resources=["itm_iron_bar","itm_linen_cloth_small"], engineer=6)),
("cm_buy_flemish_armet",spr_buy_item_flags(21),"flemish_armet","bo_pw_armor_head", spr_buy_item_triggers("itm_flemish_armet", resources=["itm_iron_bar","itm_iron_bar_short","itm_linen_cloth_small"], engineer=6)),
("cm_buy_great_bascinet",spr_buy_item_flags(21),"greatbascinet1","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_greatbascinet1", resources=["itm_iron_bar","itm_iron_bar_short","itm_linen_cloth_small"], engineer=6)),
("cm_buy_greathelm_with_bascinet",spr_buy_item_flags(21),"greathelm1","bo_pw_armor_head", spr_buy_item_triggers("itm_greathelm1", resources=["itm_iron_bar","itm_iron_bar_short","itm_linen_cloth_small"], engineer=6)),
("cm_buy_weimar_helmet",spr_buy_item_flags(21),"weimarhelm","bo_pw_armor_head_armature", spr_buy_item_triggers("itm_weimarhelm", resources=["itm_iron_bar","itm_iron_bar_short","itm_linen_cloth_small"], engineer=6)),
# Heraldic PK armour
("cm_buy_early_transitional_armor",spr_buy_item_flags(16),"early_transitional_heraldic","bo_pw_armor_body", spr_buy_item_triggers("itm_early_transitional_heraldic", resources=[("itm_iron_bar_short",2),"itm_linen_cloth"], engineer=6, tableau="tableau_heraldic_early_transitional_white")),
# PK Weapons
("cm_buy_practice_dagger",spr_buy_item_flags(1),"practice_dagger","bo_pw_weapon_small", spr_buy_item_triggers("itm_practice_dagger", resources=["itm_wood_pole_short"], engineer=2)),
("cm_buy_iberian_mace",spr_buy_item_flags(4),"faradon_iberianmace","bo_pw_weapon_small", spr_buy_item_triggers("itm_faradon_iberianmace", resources=[("itm_iron_bar_short",2)], engineer=2)),
("cm_buy_warhammer",spr_buy_item_flags(4),"faradon_warhammer","bo_pw_weapon_small", spr_buy_item_triggers("itm_faradon_warhammer", resources=["itm_iron_bar_short", "itm_stick"], engineer=3)),
("cm_buy_grosse_messer",spr_buy_item_flags(4),"grosse_messer","bo_pw_weapon_small", spr_buy_item_triggers("itm_grosse_messer", resources=["itm_iron_bar"], engineer=2)),
("cm_buy_milanese_sword",spr_buy_item_flags(8),"milanese_sword","bo_pw_weapon", spr_buy_item_triggers("itm_milanese_sword", resources=["itm_iron_bar"], engineer=4)),
("cm_buy_scottish_sword",spr_buy_item_flags(9),"scottish_sword","bo_pw_weapon", spr_buy_item_triggers("itm_scottish_sword", resources=["itm_iron_bar"], engineer=4)),
("cm_buy_italian_sword",spr_buy_item_flags(10),"italian_sword","bo_pw_weapon", spr_buy_item_triggers("itm_italian_sword", resources=["itm_iron_bar"], engineer=4)),
("cm_buy_crusader_sword",spr_buy_item_flags(10),"crusader_sword","bo_pw_weapon", spr_buy_item_triggers("itm_crusader_sword", resources=["itm_iron_bar"], engineer=4)),
("cm_buy_langes_messer",spr_buy_item_flags(11),"grosse_messer_b","bo_pw_weapon", spr_buy_item_triggers("itm_grosse_messer_b", resources=["itm_iron_bar_long"], engineer=5)),
("cm_buy_longsword",spr_buy_item_flags(12),"longsword_b","bo_pw_weapon", spr_buy_item_triggers("itm_longsword_b", resources=["itm_iron_bar_long"], engineer=5)),
("cm_buy_german_longsword",spr_buy_item_flags(12),"german_bastard_sword","bo_pw_weapon", spr_buy_item_triggers("itm_german_bastard_sword", resources=["itm_iron_bar_long"], engineer=5)),
("cm_buy_english_longsword",spr_buy_item_flags(12),"english_longsword","bo_pw_weapon", spr_buy_item_triggers("itm_english_longsword", resources=["itm_iron_bar_long"], engineer=5)),
("cm_buy_german_greatsword",spr_buy_item_flags(15),"faradon_twohanded1","bo_pw_weapon", spr_buy_item_triggers("itm_faradon_twohanded1", resources=["itm_iron_bar_long"], engineer=6)),
("cm_buy_danish_greatsword",spr_buy_item_flags(15),"danish_greatsword","bo_pw_weapon", spr_buy_item_triggers("itm_danish_greatsword", resources=["itm_iron_bar_long"], engineer=7)),
("cm_buy_claymore",spr_buy_item_flags(15),"claymore","bo_pw_weapon", spr_buy_item_triggers("itm_claymore", resources=["itm_iron_bar_long"], engineer=7)),
("cm_buy_battle_fork",spr_buy_item_flags(2),"battle_fork","bo_pw_weapon_small", spr_buy_item_triggers("itm_battle_fork", resources=["itm_wood_pole", "itm_iron_piece"], engineer=1)),
("cm_buy_short_poleaxe",spr_buy_item_flags(4),"poleaxe_no3","bo_pw_weapon_big", spr_buy_item_triggers("itm_poleaxe_no3", resources=["itm_wood_pole", "itm_iron_bar_short"], engineer=2)),
("cm_buy_german_poleaxe",spr_buy_item_flags(5),"german_poleaxe","bo_pw_weapon_big", spr_buy_item_triggers("itm_german_poleaxe", resources=["itm_wood_pole", "itm_iron_bar_short"], engineer=2)),
("cm_buy_poleaxe",spr_buy_item_flags(5),"poleaxe_a","bo_pw_weapon_big", spr_buy_item_triggers("itm_poleaxe_a", resources=["itm_wood_pole", "itm_iron_piece"], engineer=3)),
("cm_buy_elegant_poleaxe",spr_buy_item_flags(5),"elegant_poleaxe","bo_pw_weapon_big", spr_buy_item_triggers("itm_elegant_poleaxe", resources=["itm_wood_pole", "itm_iron_piece"], engineer=3)),
("cm_buy_english_bill",spr_buy_item_flags(5),"english_bill","bo_pw_weapon_big", spr_buy_item_triggers("itm_english_bill", resources=["itm_wood_pole", "itm_iron_piece"], engineer=3)),
("cm_buy_swiss_halberd",spr_buy_item_flags(5),"swiss_halberd","bo_pw_weapon_big", spr_buy_item_triggers("itm_swiss_halberd", resources=["itm_wood_pole", "itm_iron_piece"], engineer=3)),
("cm_buy_decorated_longbow",spr_buy_item_flags(9),"bow_f_decorated_bow","bo_pw_weapon", spr_buy_item_triggers("itm_bow_f_decorated_bow", resources=["itm_wood_pole"], engineer=5)),
("cm_buy_barded_horse",spr_buy_item_flags(13),"cwe_knight_horse_2","bo_pw_horse", spr_buy_item_triggers("itm_cwe_knight_horse_2", resources=["itm_saddle", "itm_horse_armor", "itm_wheat_sheaf"], herding=3)),
("cm_buy_barded_horse_with_mask",spr_buy_item_flags(17),"cwe_knight_horse_1_mask","bo_pw_horse", spr_buy_item_triggers("itm_cwe_knight_horse_1_mask", resources=["itm_saddle", "itm_horse_armor", ("itm_wheat_sheaf", 2)], herding=3)),
("cm_buy_saracen_sultan_horse",spr_buy_item_flags(17),"saracen_horse_sultan","bo_pw_horse", spr_buy_item_triggers("itm_saracen_horse_sultan", resources=["itm_saddle", "itm_horse_armor", ("itm_wheat_sheaf", 2)], herding=3)),
("cm_buy_saracen_war_horse",spr_buy_item_flags(17),"saracin_hard_horses_d","bo_pw_horse", spr_buy_item_triggers("itm_saracin_hard_horses_d", resources=["itm_saddle", "itm_horse_armor", ("itm_wheat_sheaf", 2)], herding=3)),
("cm_buy_dragons_tongue",spr_buy_item_flags(4),"dragons_tongue","bo_pw_weapon_small", spr_buy_item_triggers("itm_dragons_tongue", resources=["itm_iron_bar_short", "itm_stick"], engineer=3)),
("cm_buy_royal_dagger",spr_buy_item_flags(3),"royal_dagger_scabbard","bo_pw_weapon_small", spr_buy_item_triggers("itm_royal_dagger", resources=["itm_iron_bar_short"], engineer=3)),
("cm_buy_noyan_dagger",spr_buy_item_flags(3),"noyan_dagger","bo_pw_weapon_small", spr_buy_item_triggers("itm_noyan_dagger", resources=["itm_iron_bar_short"], engineer=3)),
("cm_buy_royal_sword",spr_buy_item_flags(10),"royal_sword","bo_pw_weapon", spr_buy_item_triggers("itm_royal_sword", resources=["itm_iron_bar"], engineer=4)),
("cm_buy_battle_sceptre",spr_buy_item_flags(7),"hakim_battle_sceptre","bo_pw_weapon", spr_buy_item_triggers("itm_battle_sceptre", resources=["itm_wood_pole_short", "itm_iron_bar"], engineer=3)),
("cm_buy_royal_bastard_sword",spr_buy_item_flags(12),"royal_bastard_sword","bo_pw_weapon", spr_buy_item_triggers("itm_royal_bastard_sword", resources=["itm_iron_bar_long"], engineer=5)),
("cm_buy_royal_great_sword",spr_buy_item_flags(15),"royal_great_sword","bo_pw_weapon", spr_buy_item_triggers("itm_royal_great_sword", resources=["itm_iron_bar_long"], engineer=7)),
("cm_buy_katana",spr_buy_item_flags(15),"katana","bo_pw_weapon", spr_buy_item_triggers("itm_katana", resources=["itm_iron_bar_long"], engineer=7)),
("cm_buy_wakizashi",spr_buy_item_flags(8),"wakizashi","bo_pw_weapon", spr_buy_item_triggers("itm_wakizashi", resources=["itm_iron_bar"], engineer=4)),
# PK Instruments
("cm_buy_flute",spr_buy_item_flags(7),"flute","bo_pw_weapon_small", spr_buy_item_triggers("itm_flute", pos_offset=(0,0,7), resources=["itm_board"], engineer=2)),
("cm_buy_vielle",spr_buy_item_flags(7),"vielle","bo_pw_weapon_small", spr_buy_item_triggers("itm_vielle", pos_offset=(0,0,7), resources=["itm_board"], engineer=2)),
("cm_buy_vielle_bow",spr_buy_item_flags(7),"vielle_bow_R","bo_vielle_bow_R", spr_buy_item_triggers("itm_vielle_bow", pos_offset=(0,0,7), resources=["itm_stick"], engineer=2)),
# PK Scene Props
("cm_wooden_hut",0,"cm_wooden_hut","bo_cm_wooden_hut", []),
("cm_palisade_wall_a",0,"cm_palisade_wall_a","bo_cm_palisade_wall_a", []),
("cm_palisade_wall_b",0,"cm_palisade_wall_b","bo_cm_palisade_wall_b", []),
("cm_palisade_wall_c",0,"cm_palisade_wall_c","bo_cm_palisade_wall_c", []),
("cm_palisade_gate",0,"cm_palisade_gate","bo_cm_palisade_gate", []),
("cm_skeleton_a",0,"cm_skeleton_a","0", []),
("cm_skeleton_b",0,"cm_skeleton_b","0", []),
("cm_skeleton_c",0,"cm_skeleton_c","0", []),
("cm_smithy_grindstone_wheel",0,"cm_smithy_grindstone_wheel","bo_cm_smithy_grindstone_wheel", []),
("cm_smithy_forge_bellows",0,"cm_smithy_forge_bellows","bo_cm_smithy_forge_bellows", []),
("cm_smithy_anvil",0,"cm_smithy_anvil","bo_cm_smithy_anvil", []),
("cm_process_iron_divide_only",spr_use_time(5),"cm_smithy_anvil","bo_cm_smithy_anvil", spr_process_resource_triggers("script_cf_process_iron_divide_only", use_string="str_process_hammer_metal")),
("cm_vein_iron",spr_resource_flags(),"cm_vein_iron","bo_cm_vein_gold", spr_hit_mine_triggers("itm_iron_ore", resource_hp=60, hardness=4)),
("cm_vein_silver",spr_resource_flags(),"cm_vein_silver","bo_cm_vein_gold", spr_hit_mine_triggers("itm_silver_nugget", resource_hp=150, random_hp=100, hardness=5)),
("cm_vein_gold",spr_resource_flags(),"cm_vein_gold","bo_cm_vein_gold", spr_hit_mine_triggers("itm_gold_nugget", resource_hp=200, random_hp=200, hardness=6)),
("cm_mine_entranche",0,"cm_mine_entranche","bo_cm_mine_entranche", []),
("cm_village_blacksmith_e",0,"cm_village_blacksmith_e","bo_cm_village_blacksmith_e", []),
("cm_french_gatehouse_b",0,"cm_french_gatehouse_b","bo_cm_french_gatehouse_b", []),
("cm_french_gatehouse_a",0,"cm_french_gatehouse_a","bo_cm_french_gatehouse_a", []),
("cm_french_curtain_wall_a",0,"cm_french_curtain_wall_a","bo_cm_french_curtain_wall_a", []),
("cm_french_curtain_wall_a_destroyed",0,"cm_french_curtain_wall_a_destroyed","bo_cm_french_curtain_wall_a_destroyed", []),
("cm_french_curtain_wall_b",0,"cm_french_curtain_wall_b","bo_cm_french_curtain_wall_b", []),
("cm_french_curtain_wall_c",0,"cm_french_curtain_wall_c","bo_cm_french_curtain_wall_c", []),
("cm_french_curtain_wall_rea",0,"cm_french_curtain_wall_rea","bo_cm_french_curtain_wall_rea", []),
("cm_french_curtain_wall_rea_destroyed",0,"cm_french_curtain_wall_rea_destroyed","bo_cm_french_curtain_wall_rea_destroyed", []),
("cm_french_tower_b",0,"cm_french_tower_b","bo_cm_french_tower_b", []),
("cm_french_tower_c",0,"cm_french_tower_c","bo_cm_french_tower_c", []),
("cm_french_tower_d",0,"cm_french_tower_d","bo_cm_french_tower_d", []),
("cm_french_tower_e",0,"cm_french_tower_e","bo_cm_french_tower_e", []),
("cm_french_tower_f",0,"cm_french_tower_f","bo_cm_french_tower_f", []),
("cm_french_tower_g",0,"cm_french_tower_g","bo_cm_french_tower_g", []),
("cm_french_tower_h",0,"cm_french_tower_h","bo_cm_french_tower_h", []),
("cm_french_tower_i",0,"cm_french_tower_i","bo_cm_french_tower_i", []),
("cm_french_tower_j",0,"cm_french_tower_j","bo_cm_french_tower_j", []),
("cm_french_tower_k",0,"cm_french_tower_k","bo_cm_french_tower_k", []),
("cm_french_tower_l",0,"cm_french_tower_l","bo_cm_french_tower_l", []),
("cm_square_tower_akr_a",0,"cm_square_tower_Akr_a","bo_cm_square_tower_Akr_a", []),
("cm_square_tower_akr_b",0,"cm_square_tower_Akr_b","bo_cm_square_tower_Akr_b", []),
("cm_sea_gate",0,"cm_sea_gate","bo_cm_sea_gate", []),
("cm_akra_round_tower_a",0,"cm_akra_round_tower_a","bo_cm_akra_round_tower_a", []),
("cm_akra_round_tower_b",0,"cm_akra_round_tower_b","bo_cm_akra_round_tower_b", []),
("cm_akra_round_tower_c",0,"cm_akra_round_tower_c","bo_cm_akra_round_tower_c", []),
("cm_akra_round_tower_d",0,"cm_akra_round_tower_d","bo_cm_akra_round_tower_d", []),
("cm_wall_sea_akr_a",0,"cm_wall_sea_Akr_a","bo_cm_wall_sea_Akr_a", []),
("cm_wall_sea_akr_b",0,"cm_wall_sea_Akr_b","bo_cm_wall_sea_Akr_b", []),
("cm_fort_akr_a",0,"cm_fort_Akr_a","bo_cm_fort_Akr_a", []),
("cm_square_tower_akr_c",0,"cm_square_tower_Akr_c","bo_cm_square_tower_Akr_c", []),
("cm_square_tower_akr_d",0,"cm_square_tower_Akr_d","bo_cm_square_tower_Akr_d", []),
("cm_square_tower_akr_e",0,"cm_square_tower_Akr_e","bo_cm_square_tower_Akr_e", []),
("cm_square_tower_akr_f",0,"cm_square_tower_Akr_f","bo_cm_square_tower_Akr_f", []),
("cm_beacon_acr",0,"cm_beacon_Acr","bo_cm_beacon_Acr", []),
("cm_main_gate_acre",0,"cm_main_gate_Acre","bo_cm_main_gate_Acre", []),
("cm_tapisserie_1",0,"tapisserie_1","0",[]),
("cm_tapisserie_2",0,"tapisserie_2","0",[]),
("cm_tapisserie_3",0,"tapisserie_3","0",[]),
("cm_tapisserie_4",0,"tapisserie_4","0",[]),
("cm_tapisserie_5",0,"tapisserie_5","0",[]),
("cm_tapisserie_6",0,"tapisserie_6","0",[]),
("cm_tapisserie_7",0,"tapisserie_7","0",[]),
("cm_tapisserie_8",0,"tapisserie_8","0",[]),
("cm_tapisserie_9",0,"tapisserie_9","0",[]),
("cm_tapisserie_10",0,"tapisserie_10","0",[]),
("cm_tapisserie_11",0,"tapisserie_11","0",[]),
("cm_tapisserie_12",0,"tapisserie_12","0",[]),
("cm_tapisserie_13",0,"tapisserie_13","0",[]),
("cm_tapisserie_14",0,"tapisserie_14","0",[]),
("cm_tapisserie_15",0,"tapisserie_15","0",[]),
("cm_tapisserie_16",0,"tapisserie_16","0",[]),
("cm_tapisserie_17",0,"tapisserie_17","0",[]),
("cm_tapisserie_18",0,"tapisserie_18","0",[]),
("cm_tapisserie_19",0,"tapisserie_19","0",[]),
("cm_tapisserie_20",0,"tapisserie_20","0",[]),
("cm_tapisserie_21",0,"tapisserie_21","0",[]),
("cm_tapisserie_22",0,"tapisserie_22","0",[]),
("cm_tapisserie_23",0,"tapisserie_23","0",[]),
("cm_tapisserie_24",0,"tapisserie_24","0",[]),
("cm_tapisserie_25",0,"tapisserie_25","0",[]),
("cm_tapisserie_26",0,"tapisserie_26","0",[]),
("cm_tapisserie_27",0,"tapisserie_27","0",[]),
("cm_tapisserie_28",0,"tapisserie_28","0",[]),
("cm_tapisserie_29",0,"tapisserie_29","0",[]),
("cm_tapisserie_30",0,"tapisserie_30","0",[]),
("cm_tapisserie_31",0,"tapisserie_31","0",[]),
("cm_plan_tente",0,"plan_tente","0",[]),
("cm_plan_tente_2",0,"plan_tente_2","0",[]),
("cm_rich_keep",0,"ab_rich_keep","bo_ab_rich_keep", []),
("cm_earth_house_c",0,"ab_earth_house_c","bo_ab_earth_house_c", []),
("cm_bartizan",0,"ab_bartizan","bo_ab_bartizan", []),
("cm_castle_f",0,"ab_castle_f","bo_ab_castle_f", []),
("cm_wooden_div_a",0,"ab_wooden_div_a","bo_ab_div", []),
("cm_wooden_div_b",0,"ab_wooden_div_b","bo_ab_div", []),
("cm_wooden_door_a",0,"ab_wooden_door_a","bo_ab_door", []),
("cm_wooden_door_b",0,"ab_wooden_door_b","bo_ab_door", []),
("cm_wooden_div_c",0,"ab_wooden_div_c","bo_ab_div", []),
("cm_wooden_door_c",0,"ab_wooden_door_c","bo_ab_door", []),
("cm_house_c",0,"ab_house_c","bo_ab_house_c", []),
("cm_village_house_a",0,"ab_village_house_a","bo_ab_village_house_a", []),
("cm_town_house_e",0,"ab_town_house_e","bo_ab_town_house_e", []),
("cm_tapestry_10",0,"tapestry_10","0",[]),
("cm_tapestry_2",0,"tapestry_2","0",[]),
("cm_tapestry_art_two",0,"art_two","0",[]),
("cm_tapestry_art_three",0,"art_three","0",[]),
("cm_tapestry_6",0,"tapestry_6","0",[]),
("cm_tapestry_7",0,"tapestry_7","0",[]),
("cm_tapestry_9",0,"tapestry_9","0",[]),
("cm_gallows",0,"echafau","bo_echafau",[]),
("cm_hangman",0,"pendu","bo_pendu",[]),
("cm_sausage",0,"sausage","0",[]),
("cm_ab_tent_comander",0,"1429_tente_comander","bo_1429_tente_comander",[]),
("cm_ab_tent_comander_blue",0,"1429_tente_comander_bleu","bo_1429_tente_comander",[]),
("cm_ab_tent_merch",0,"tent_merch","bo_tent_merch",[]),
("cm_ab_tent_merch2",0,"tent_merch2","bo_tent_merch",[]),
("cm_noticeboard",0,"cm_noticeboard","bo_cm_noticeboard",[]),
("cm_noticeboardwall",0,"cm_noticeboardwall","bo_cm_noticeboardwall",[]),
("cm_town_house_i",0,"srp_town_house_i","srp_town_house_i_bo",[]),
("cm_town_house_m",0,"srp_town_house_m","srp_town_house_m_bo",[]),
("cm_town_house_q",0,"srp_town_house_q","srp_town_house_q_bo",[]),
("cm_town_house_r",0,"srp_town_house_r","srp_town_house_r_bo",[]),
("cm_town_house_w",0,"srp_town_house_w","srp_town_house_w_bo",[]),
("cm_town_house_s",0,"srp_town_house_s","srp_town_house_s_bo",[]),
#PK Jewellry
("cm_buy_silver_ring_sapphire",spr_buy_item_flags(10),"silver_ring_sapphire_L","bo_pw_weapon_small", spr_buy_item_triggers("itm_silver_ring_sapphire", resources=["itm_silver_nugget"], engineer=6)),
("cm_buy_silver_ring_ruby",spr_buy_item_flags(12),"silver_ring_ruby_R","bo_pw_weapon_small", spr_buy_item_triggers("itm_silver_ring_ruby", resources=["itm_silver_nugget"], engineer=6)),
("cm_buy_gold_ring_sapphire",spr_buy_item_flags(15),"gold_ring_sapphire_L","bo_pw_weapon_small", spr_buy_item_triggers("itm_gold_ring_sapphire", resources=["itm_gold_nugget"], engineer=6)),
("cm_buy_gold_ring_ruby",spr_buy_item_flags(18),"gold_ring_ruby_R","bo_pw_weapon_small", spr_buy_item_triggers("itm_gold_ring_ruby", resources=["itm_gold_nugget"], engineer=6)),
("cm_buy_gold_earrings",spr_buy_item_flags(10),"gold_earrings","bo_pw_weapon_small", spr_buy_item_triggers("itm_gold_earrings", resources=["itm_gold_nugget"], engineer=6)),
("cm_buy_opal_earrings",spr_buy_item_flags(11),"opal_earrings","bo_pw_weapon_small", spr_buy_item_triggers("itm_opal_earrings", resources=["itm_gold_nugget"], engineer=6)),
("cm_buy_agate_earrings",spr_buy_item_flags(12),"agate_earrings","bo_pw_weapon_small", spr_buy_item_triggers("itm_agate_earrings", resources=["itm_gold_nugget"], engineer=6)),
("cm_buy_yellow_jade_earrings",spr_buy_item_flags(13),"yellow_jade_earrings","bo_pw_weapon_small", spr_buy_item_triggers("itm_yellow_jade_earrings", resources=["itm_gold_nugget"], engineer=6)),
("cm_buy_amethyst_earrings",spr_buy_item_flags(14),"amethyst_earrings","bo_pw_weapon_small", spr_buy_item_triggers("itm_amethyst_earrings", resources=["itm_gold_nugget"], engineer=6)),
("cm_buy_gold_earrings_b",spr_buy_item_flags(15),"gold_earrings_b","bo_pw_weapon_small", spr_buy_item_triggers("itm_gold_earrings_b", resources=["itm_gold_nugget"], engineer=6)),
("cm_buy_sapphire_earrings",spr_buy_item_flags(16),"sapphire_earrings","bo_pw_weapon_small", spr_buy_item_triggers("itm_sapphire_earrings", resources=["itm_gold_nugget"], | |
{type(data)}')
def _numpy_to_dataset(self, data):
rai = imglyb.to_imglib(data)
return self._java_to_dataset(rai)
def _ends_with_channel_axis(self, xarr):
ends_with_axis = xarr.dims[len(xarr.dims)-1].lower() in ['c', 'channel']
return ends_with_axis
def _xarray_to_dataset(self, xarr):
"""
Converts a xarray dataarray to a dataset, inverting C-style (slow axis first) to F-style (slow-axis last)
:param xarr: Pass an xarray dataarray and turn into a dataset.
:return: The dataset
"""
if self._ends_with_channel_axis(xarr):
vals = numpy.moveaxis(xarr.values, -1, 0)
dataset = self._numpy_to_dataset(vals)
else:
dataset = self._numpy_to_dataset(xarr.values)
axes = self._assign_axes(xarr)
dataset.setAxes(axes)
self._assign_dataset_metadata(dataset, xarr.attrs)
return dataset
def _assign_axes(self, xarr):
"""
Obtain xarray axes names, origin, and scale and convert into ImageJ Axis; currently supports EnumeratedAxis
:param xarr: xarray that holds the units
:return: A list of ImageJ Axis with the specified origin and scale
"""
axes = ['']*len(xarr.dims)
for axis in xarr.dims:
axis_str = self._pydim_to_ijdim(axis)
ax_type = Axes.get(axis_str)
ax_num = self._get_axis_num(xarr, axis)
scale = self._get_scale(xarr.coords[axis])
if scale is None:
logging.warning(f"The {ax_type.label} axis is non-numeric and is translated to a linear index.")
doub_coords = [Double(numpy.double(x)) for x in numpy.arange(len(xarr.coords[axis]))]
else:
doub_coords = [Double(numpy.double(x)) for x in xarr.coords[axis]]
# EnumeratedAxis is a new axis made for xarray, so is only present in ImageJ versions that are released
# later than March 2020. This actually returns a LinearAxis if using an earlier version.
java_axis = EnumeratedAxis(ax_type, ij.py.to_java(doub_coords))
axes[ax_num] = java_axis
return axes
def _pydim_to_ijdim(self, axis):
"""Convert between the lowercase Python convention (x, y, z, c, t) to IJ (X, Y, Z, C, T)"""
if str(axis) in ['x', 'y', 'z', 'c', 't']:
return str(axis).upper()
return str(axis)
def _ijdim_to_pydim(self, axis):
"""Convert the IJ uppercase dimension convention (X, Y, Z C, T) to lowercase python (x, y, z, c, t) """
if str(axis) in ['X', 'Y', 'Z', 'C', 'T']:
return str(axis).lower()
return str(axis)
def _get_axis_num(self, xarr, axis):
"""
Get the xarray -> java axis number due to inverted axis order for C style numpy arrays (default)
:param xarr: Xarray to convert
:param axis: Axis number to convert
:return: Axis idx in java
"""
py_axnum = xarr.get_axis_num(axis)
if numpy.isfortran(xarr.values):
return py_axnum
if self._ends_with_channel_axis(xarr):
if axis == len(xarr.dims) - 1:
return axis
else:
return len(xarr.dims) - py_axnum - 2
else:
return len(xarr.dims) - py_axnum - 1
def _assign_dataset_metadata(self, dataset, attrs):
"""
:param dataset: ImageJ Java dataset
:param attrs: Dictionary containing metadata
"""
dataset.getProperties().putAll(self.to_java(attrs))
def _get_origin(self, axis):
"""
Get the coordinate origin of an axis, assuming it is the first entry.
:param axis: A 1D list like entry accessible with indexing, which contains the axis coordinates
:return: The origin for this axis.
"""
return axis.values[0]
def _get_scale(self, axis):
"""
Get the scale of an axis, assuming it is linear and so the scale is simply second - first coordinate.
:param axis: A 1D list like entry accessible with indexing, which contains the axis coordinates
:return: The scale for this axis or None if it is a non-numeric scale.
"""
try:
return axis.values[1] - axis.values[0]
except TypeError:
return None
def _java_to_dataset(self, data):
"""
Converts the data into a ImageJ Dataset
"""
# This try checking is necessary because the set of ImageJ converters is not complete. E.g., here is no way
# to directly go from Img to Dataset, instead you need to chain the Img->ImgPlus->Dataset converters.
try:
if self._ij.convert().supports(data, Dataset):
return self._ij.convert().convert(data, Dataset)
if self._ij.convert().supports(data, ImgPlus):
imgPlus = self._ij.convert().convert(data, ImgPlus)
return self._ij.dataset().create(imgPlus)
if self._ij.convert().supports(data, Img):
img = self._ij.convert().convert(data, Img)
return self._ij.dataset().create(ImgPlus(img))
if self._ij.convert().supports(data, RandomAccessibleInterval):
rai = self._ij.convert().convert(data, RandomAccessibleInterval)
return self._ij.dataset().create(rai)
except Exception as exc:
_dump_exception(exc)
raise exc
raise TypeError('Cannot convert to dataset: ' + str(type(data)))
def from_java(self, data):
"""
Converts the data into a python equivalent
"""
# todo: convert a datset to xarray
if not isjava(data): return data
try:
if self._ij.convert().supports(data, Dataset):
# HACK: Converter exists for ImagePlus -> Dataset, but not ImagePlus -> RAI.
data = self._ij.convert().convert(data, Dataset)
return self._dataset_to_xarray(data)
if self._ij.convert().supports(data, RandomAccessibleInterval):
rai = self._ij.convert().convert(data, RandomAccessibleInterval)
return self.rai_to_numpy(rai)
except Exception as exc:
_dump_exception(exc)
raise exc
return to_python(data)
def _dataset_to_xarray(self, dataset):
"""
Converts an ImageJ dataset into an xarray, inverting F-style (slow idx last) to C-style (slow idx first)
:param dataset: ImageJ dataset
:return: xarray with reversed (C-style) dims and coords as labeled by the dataset
"""
attrs = self._ij.py.from_java(dataset.getProperties())
axes = [(cast('net.imagej.axis.CalibratedAxis', dataset.axis(idx)))
for idx in range(dataset.numDimensions())]
dims = [self._ijdim_to_pydim(axes[idx].type().getLabel()) for idx in range(len(axes))]
values = self.rai_to_numpy(dataset)
coords = self._get_axes_coords(axes, dims, numpy.shape(numpy.transpose(values)))
if dims[len(dims)-1].lower() in ['c', 'channel']:
xarr_dims = self._invert_except_last_element(dims)
values = numpy.moveaxis(values, 0, -1)
else:
xarr_dims = list(reversed(dims))
xarr = xr.DataArray(values, dims=xarr_dims, coords=coords, attrs=attrs)
return xarr
def _invert_except_last_element(self, lst):
"""
Invert a list except for the last element.
:param lst:
:return:
"""
cut_list = lst[0:-1]
reverse_cut = list(reversed(cut_list))
reverse_cut.append(lst[-1])
return reverse_cut
def _get_axes_coords(self, axes, dims, shape):
"""
Get xarray style coordinate list dictionary from a dataset
:param axes: List of ImageJ axes
:param dims: List of axes labels for each dataset axis
:param shape: F-style, or reversed C-style, shape of axes numpy array.
:return: Dictionary of coordinates for each axis.
"""
coords = {dims[idx]: [axes[idx].calibratedValue(position) for position in range(shape[idx])]
for idx in range(len(dims))}
return coords
def show(self, image, cmap=None):
"""
Display a java or python 2D image.
:param image: A java or python image that can be converted to a numpy array
:param cmap: The colormap of the image, if it is not RGB
:return:
"""
if image is None:
raise TypeError('Image must not be None')
# NB: Import this only here on demand, rather than above.
# Otherwise, some headless systems may experience errors
# like "ImportError: Failed to import any qt binding".
from matplotlib import pyplot
pyplot.imshow(self.from_java(image), interpolation='nearest', cmap=cmap)
pyplot.show()
def _is_arraylike(self, arr):
return hasattr(arr, 'shape') and \
hasattr(arr, 'dtype') and \
hasattr(arr, '__array__') and \
hasattr(arr, 'ndim')
def _is_memoryarraylike(self, arr):
return self._is_arraylike(arr) and \
hasattr(arr, 'data') and \
type(arr.data).__name__ == 'memoryview'
def _is_xarraylike(self, xarr):
return hasattr(xarr, 'values') and \
hasattr(xarr, 'dims') and \
hasattr(xarr, 'coords') and \
self._is_arraylike(xarr.values)
def _assemble_plugin_macro(self, plugin: str, args=None, ij1_style=True):
"""
Assemble an ImageJ macro string given a plugin to run and optional arguments in a dict
:param plugin: The string call for the function to run
:param args: A dict of macro arguments in key/value pairs
:param ij1_style: Whether to use implicit booleans in IJ1 style or explicit booleans in IJ2 style
:return: A string version of the macro run
"""
if args is None:
macro = "run(\"{}\");".format(plugin)
return macro
macro = """run("{0}", \"""".format(plugin)
for key, value in args.items():
argument = self._format_argument(key, value, ij1_style)
if argument is not None:
macro = macro + ' {}'.format(argument)
macro = macro + """\");"""
return macro
def _format_argument(self, key, value, ij1_style):
if value is True:
argument = '{}'.format(key)
if not ij1_style:
argument = argument + '=true'
elif value is False:
argument = None
if not ij1_style:
argument = '{0}=false'.format(key)
elif value is None:
raise NotImplementedError('Conversion for None is not yet implemented')
else:
val_str = self._format_value(value)
argument = '{0}={1}'.format(key, val_str)
return argument
def _format_value(self, value):
temp_value = str(value).replace('\\', '/')
if temp_value.startswith('[') and temp_value.endswith(']'):
return temp_value
final_value = '[' + temp_value + ']'
return final_value
def window_manager(self):
"""
Get the ImageJ1 window manager if legacy mode is enabled. It may not work properly if in headless mode.
:return: WindowManager
"""
if not ij.legacy_enabled:
raise ImportError("Your ImageJ installation does not support IJ1. This function does not work.")
elif ij.ui().isHeadless():
logging.warning("Operating in headless mode - The WindowManager will not be fully funtional.")
else:
return WindowManager
def active_xarray(self, sync=True):
"""
Convert the active image to a xarray.DataArray, synchronizing from IJ1 -> IJ2
:param sync: Manually synchronize the current IJ1 slice if True
:return: numpy array containing the image data
"""
# todo: make the behavior use pure IJ2 if legacy is not active
if ij.legacy().isActive():
imp = self.active_image_plus(sync=sync)
return self._ij.py.from_java(imp)
else:
dataset = self.active_dataset()
return self._ij.py.from_java(dataset)
def active_dataset(self):
"""Get the currently active Dataset from the Dataset service"""
return self._ij.imageDisplay().getActiveDataset()
def active_image_plus(self, sync=True):
"""
Get the currently active IJ1 image, optionally synchronizing from IJ1 -> IJ2
| |
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
| |
# Copyright (c) 2018, NVIDIA CORPORATION.
import warnings
import numpy as np
import pandas as pd
from toolz import partition_all
import dask
import dask.dataframe as dd
from dask import compute
from dask.base import normalize_token, tokenize
from dask.compatibility import apply
from dask.context import _globals
from dask.core import flatten
from dask.dataframe import from_delayed
from dask.dataframe.core import Scalar, handle_out, map_partitions
from dask.dataframe.utils import raise_on_meta_error
from dask.delayed import delayed
from dask.highlevelgraph import HighLevelGraph
from dask.optimization import cull, fuse
from dask.utils import M, OperatorMethodMixin, derived_from, funcname
import cudf
import cudf._lib as libcudf
from dask_cudf import batcher_sortnet
from dask_cudf.accessor import (
CachedAccessor,
CategoricalAccessor,
DatetimeAccessor,
)
def optimize(dsk, keys, **kwargs):
flatkeys = list(flatten(keys)) if isinstance(keys, list) else [keys]
dsk, dependencies = cull(dsk, flatkeys)
dsk, dependencies = fuse(
dsk,
keys,
dependencies=dependencies,
ave_width=_globals.get("fuse_ave_width", 1),
)
dsk, _ = cull(dsk, keys)
return dsk
def finalize(results):
return cudf.concat(results)
class _Frame(dd.core._Frame, OperatorMethodMixin):
""" Superclass for DataFrame and Series
Parameters
----------
dsk : dict
The dask graph to compute this DataFrame
name : str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame / Series
meta : cudf.DataFrame, cudf.Series, or cudf.Index
An empty cudf object with names, dtypes, and indices matching the
expected output.
divisions : tuple of index values
Values along which we partition our blocks on the index
"""
__dask_scheduler__ = staticmethod(dask.get)
__dask_optimize__ = staticmethod(optimize)
def __dask_postcompute__(self):
return finalize, ()
def __dask_postpersist__(self):
return type(self), (self._name, self._meta, self.divisions)
def __init__(self, dsk, name, meta, divisions):
if not isinstance(dsk, HighLevelGraph):
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])
self.dask = dsk
self._name = name
meta = dd.core.make_meta(meta)
if not isinstance(meta, self._partition_type):
raise TypeError(
"Expected meta to specify type {0}, got type "
"{1}".format(
self._partition_type.__name__, type(meta).__name__
)
)
self._meta = dd.core.make_meta(meta)
self.divisions = tuple(divisions)
def __getstate__(self):
return (self.dask, self._name, self._meta, self.divisions)
def __setstate__(self, state):
self.dask, self._name, self._meta, self.divisions = state
def __repr__(self):
s = "<dask_cudf.%s | %d tasks | %d npartitions>"
return s % (type(self).__name__, len(self.dask), self.npartitions)
def to_dask_dataframe(self):
"""Create a dask.dataframe object from a dask_cudf object"""
return self.map_partitions(M.to_pandas)
concat = dd.concat
normalize_token.register(_Frame, lambda a: a._name)
class DataFrame(_Frame, dd.core.DataFrame):
_partition_type = cudf.DataFrame
def _assign_column(self, k, v):
def assigner(df, k, v):
out = df.copy()
out[k] = v
return out
meta = assigner(self._meta, k, dd.core.make_meta(v))
return self.map_partitions(assigner, k, v, meta=meta)
def apply_rows(self, func, incols, outcols, kwargs={}, cache_key=None):
import uuid
if cache_key is None:
cache_key = uuid.uuid4()
def do_apply_rows(df, func, incols, outcols, kwargs):
return df.apply_rows(
func, incols, outcols, kwargs, cache_key=cache_key
)
meta = do_apply_rows(self._meta, func, incols, outcols, kwargs)
return self.map_partitions(
do_apply_rows, func, incols, outcols, kwargs, meta=meta
)
def merge(self, other, **kwargs):
if kwargs.pop("shuffle", "tasks") != "tasks":
raise ValueError(
"Dask-cudf only supports task based shuffling, got %s"
% kwargs["shuffle"]
)
on = kwargs.pop("on", None)
if isinstance(on, tuple):
on = list(on)
return super().merge(other, on=on, shuffle="tasks", **kwargs)
def join(self, other, **kwargs):
if kwargs.pop("shuffle", "tasks") != "tasks":
raise ValueError(
"Dask-cudf only supports task based shuffling, got %s"
% kwargs["shuffle"]
)
# CuDF doesn't support "right" join yet
how = kwargs.pop("how", "left")
if how == "right":
return other.join(other=self, how="left", **kwargs)
on = kwargs.pop("on", None)
if isinstance(on, tuple):
on = list(on)
return super().join(other, how=how, on=on, shuffle="tasks", **kwargs)
def set_index(self, other, **kwargs):
if kwargs.pop("shuffle", "tasks") != "tasks":
raise ValueError(
"Dask-cudf only supports task based shuffling, got %s"
% kwargs["shuffle"]
)
return super().set_index(other, shuffle="tasks", **kwargs)
def reset_index(self, force=False, drop=False):
"""Reset index to range based
"""
if force:
dfs = self.to_delayed()
sizes = np.asarray(compute(*map(delayed(len), dfs)))
prefixes = np.zeros_like(sizes)
prefixes[1:] = np.cumsum(sizes[:-1])
@delayed
def fix_index(df, startpos):
stoppos = startpos + len(df)
return df.set_index(
cudf.core.index.RangeIndex(start=startpos, stop=stoppos)
)
outdfs = [
fix_index(df, startpos) for df, startpos in zip(dfs, prefixes)
]
return from_delayed(outdfs, meta=self._meta.reset_index(drop=True))
else:
return self.map_partitions(M.reset_index, drop=drop)
def sort_values(self, by, ignore_index=False):
"""Sort by the given column
Parameter
---------
by : str
"""
parts = self.to_delayed()
sorted_parts = batcher_sortnet.sort_delayed_frame(parts, by)
return from_delayed(sorted_parts, meta=self._meta).reset_index(
force=not ignore_index
)
def sort_values_binned(self, by):
"""Sorty by the given column and ensure that the same key
doesn't spread across multiple partitions.
"""
# Get sorted partitions
parts = self.sort_values(by=by).to_delayed()
# Get unique keys in each partition
@delayed
def get_unique(p):
return set(p[by].unique())
uniques = list(compute(*map(get_unique, parts)))
joiner = {}
for i in range(len(uniques)):
joiner[i] = to_join = {}
for j in range(i + 1, len(uniques)):
intersect = uniques[i] & uniques[j]
# If the keys intersect
if intersect:
# Remove keys
uniques[j] -= intersect
to_join[j] = frozenset(intersect)
else:
break
@delayed
def join(df, other, keys):
others = [
other.query("{by}==@k".format(by=by)) for k in sorted(keys)
]
return cudf.concat([df] + others)
@delayed
def drop(df, keep_keys):
locvars = locals()
for i, k in enumerate(keep_keys):
locvars["k{}".format(i)] = k
conds = [
"{by}==@k{i}".format(by=by, i=i) for i in range(len(keep_keys))
]
expr = " or ".join(conds)
return df.query(expr)
for i in range(len(parts)):
if uniques[i]:
parts[i] = drop(parts[i], uniques[i])
for joinee, intersect in joiner[i].items():
parts[i] = join(parts[i], parts[joinee], intersect)
results = [p for i, p in enumerate(parts) if uniques[i]]
return from_delayed(results, meta=self._meta).reset_index()
def to_parquet(self, path, *args, **kwargs):
""" Calls dask.dataframe.io.to_parquet with CudfEngine backend """
from dask_cudf.io import to_parquet
return to_parquet(self, path, *args, **kwargs)
def to_orc(self, path, **kwargs):
""" Calls dask_cudf.io.to_orc """
from dask_cudf.io import to_orc
return to_orc(self, path, **kwargs)
@derived_from(pd.DataFrame)
def var(
self,
axis=None,
skipna=True,
ddof=1,
split_every=False,
dtype=None,
out=None,
):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(
M.var,
self,
meta=meta,
token=self._token_prefix + "var",
axis=axis,
skipna=skipna,
ddof=ddof,
)
return handle_out(out, result)
else:
num = self._get_numeric_data()
x = 1.0 * num.sum(skipna=skipna, split_every=split_every)
x2 = 1.0 * (num ** 2).sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + "var"
result = map_partitions(
var_aggregate, x2, x, n, token=name, meta=meta, ddof=ddof
)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return handle_out(out, result)
def sum_of_squares(x):
x = x.astype("f8")._column
outcol = libcudf.reduce.reduce("sum_of_squares", x)
return cudf.Series(outcol)
def var_aggregate(x2, x, n, ddof):
try:
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
result = (x2 / n) - (x / n) ** 2
if ddof != 0:
result = result * n / (n - ddof)
return result
except ZeroDivisionError:
return np.float64(np.nan)
def nlargest_agg(x, **kwargs):
return cudf.concat(x).nlargest(**kwargs)
def nsmallest_agg(x, **kwargs):
return cudf.concat(x).nsmallest(**kwargs)
def unique_k_agg(x, **kwargs):
return cudf.concat(x).unique_k(**kwargs)
class Series(_Frame, dd.core.Series):
_partition_type = cudf.Series
def count(self, split_every=False):
return reduction(
self,
chunk=M.count,
aggregate=np.sum,
split_every=split_every,
meta="i8",
)
def mean(self, split_every=False):
sum = self.sum(split_every=split_every)
n = self.count(split_every=split_every)
return sum / n
def unique_k(self, k, split_every=None):
return reduction(
self,
chunk=M.unique_k,
aggregate=unique_k_agg,
meta=self._meta,
token="unique-k",
split_every=split_every,
k=k,
)
@derived_from(pd.DataFrame)
def var(
self,
axis=None,
skipna=True,
ddof=1,
split_every=False,
dtype=None,
out=None,
):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(
M.var,
self,
meta=meta,
token=self._token_prefix + "var",
axis=axis,
skipna=skipna,
ddof=ddof,
)
return handle_out(out, result)
else:
num = self._get_numeric_data()
x = 1.0 * num.sum(skipna=skipna, split_every=split_every)
x2 = 1.0 * (num ** 2).sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + "var"
result = map_partitions(
var_aggregate, x2, x, n, token=name, meta=meta, ddof=ddof
)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return handle_out(out, result)
# ----------------------------------------------------------------------
# Accessor Methods
# ----------------------------------------------------------------------
dt = CachedAccessor("dt", DatetimeAccessor)
cat = CachedAccessor("cat", CategoricalAccessor)
class Index(Series, dd.core.Index):
_partition_type = cudf.Index
def splits_divisions_sorted_cudf(df, chunksize):
segments = list(df.index.find_segments().to_array())
segments.append(len(df) - 1)
splits = [0]
last = current_size = 0
for s in segments:
size = s - last
last = s
current_size += size
if current_size >= chunksize:
splits.append(s)
current_size = 0
# Ensure end is included
if splits[-1] != segments[-1]:
splits.append(segments[-1])
divisions = tuple(df.index.take(np.array(splits)).values)
splits[-1] += 1 # Offset to extract to end
return splits, divisions
def _extract_meta(x):
"""
Extract internal cache data (``_meta``) from dask_cudf objects
"""
if isinstance(x, (Scalar, _Frame)):
return x._meta
elif isinstance(x, list):
return [_extract_meta(_x) for _x in x]
elif isinstance(x, tuple):
return tuple([_extract_meta(_x) for _x in x])
elif isinstance(x, dict):
return {k: _extract_meta(v) for k, v in x.items()}
return x
def _emulate(func, *args, **kwargs):
"""
Apply a function using args / kwargs. If arguments contain dd.DataFrame /
dd.Series, using internal cache (``_meta``) for calculation
"""
with raise_on_meta_error(funcname(func)):
return func(*_extract_meta(args), **_extract_meta(kwargs))
def align_partitions(args):
"""Align partitions between dask_cudf objects.
Note that if all divisions are unknown, but have equal npartitions, then
they will be passed through unchanged."""
dfs = [df for df in args if isinstance(df, _Frame)]
if not dfs:
return args
divisions = dfs[0].divisions
if not all(df.divisions == divisions for df in dfs):
raise NotImplementedError("Aligning | |
"""
AWS API-Gateway Authorizer
==========================
This authorizer is designed to be attached to an AWS API-Gateway, as a
Lambda authorizer. It assumes that AWS Cognito is used to authenticate
a client (UI) and then API requests will pass a JSON Web Token to be
validated for authorization of API method calls. The initial designs
for authorization are very limited in scope.
This auth module is using a recent release of jwcrypto for several reasons:
- jwcrypto supports all JOSE features (see jwt.io libs for python)
- jwcrypto has well designed and documented APIs (python-jose does not)
- it can generate keys as well as other functions for JOSE
.. seealso::
- https://jwcrypto.readthedocs.io/en/latest/index.html
- https://auth0.com/docs/tokens/concepts/jwts
- https://jwt.io/
- https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-jwt-authorizer.html
- https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-verifying-a-jwt.html
- https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html
License
*******
This auth module is a derivative of various sources of JWT documentation and
source code samples that are covered by the Apache License, Version 2.0.
Copyright 2015-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use
this file except in compliance with the License. A copy of the License is
located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing permissions and
limitations under the License.
"""
# WARNING: moto provides python-jose as a dev-dep, which is not part of
# the app-deps and should not be used in this auth module, that is,
# do not use imports like these:
# from jose import jwt
# from jose import jwk
import json
import os
import re
from typing import Dict
import jwcrypto
import jwcrypto.jwk
import jwcrypto.jwt
import requests
from dataclasses import dataclass
from example_app.logger import get_logger
LOGGER = get_logger(__name__)
API_ADMIN_EMAILS = [
email.strip() for email in os.getenv("API_ADMIN_EMAILS", "").split(",")
]
COGNITO_REGION = os.getenv("API_COGNITO_REGION", "us-west-2")
COGNITO_CLIENT_ID = os.getenv("API_COGNITO_CLIENT_ID")
COGNITO_POOL_ID = os.getenv("API_COGNITO_POOL_ID")
@dataclass
class AuthError(Exception):
error: str
status_code: int
@dataclass
class CognitoPool:
id: str
client_id: str
region: str
_jwks: Dict = None
@property
def jwks_uri(self) -> str:
return "https://cognito-idp.{}.amazonaws.com/{}/.well-known/jwks.json".format(
self.region, self.id
)
@property
def jwks(self) -> Dict:
if self._jwks is None:
LOGGER.debug(self.jwks_uri)
response = requests.get(self.jwks_uri)
LOGGER.debug(response)
response.raise_for_status()
# use jwcrypto to parse the JWKS (it takes a json string)
jwks = jwcrypto.jwk.JWKSet.from_json(response.text)
self._jwks = json.loads(jwks.export())
LOGGER.debug(self._jwks)
return self._jwks
@staticmethod
def jwt_decode(jwt_token: str):
try:
jwt_headers, jwt_payload, jwt_signature = jwt_token.split(".")
if not isinstance(jwt_headers, str):
raise AuthError("Unauthorized - JWT is malformed", 401)
if not isinstance(jwt_payload, str):
raise AuthError("Unauthorized - JWT is malformed", 401)
if not isinstance(jwt_signature, str):
raise AuthError("Unauthorized - JWT is malformed", 401)
unverified_token = jwcrypto.jwt.JWT(jwt=jwt_token)
jwt_headers = unverified_token.token.jose_header
if not isinstance(jwt_headers, dict):
raise AuthError("Unauthorized - JWT has malformed headers", 401)
if not jwt_headers.get("alg"):
raise AuthError("Unauthorized - JWT-alg is not in headers", 401)
if not jwt_headers.get("kid"):
raise AuthError("Unauthorized - JWT-kid is not in headers", 401)
jwt_payload = unverified_token.token.objects["payload"].decode("utf-8")
jwt_payload = json.loads(jwt_payload)
if not isinstance(jwt_payload, dict):
raise AuthError("Unauthorized - JWT has malformed payload", 401)
if not jwt_payload.get("token_use") in ["id", "access"]:
raise AuthError("Unauthorized - JWT has malformed payload", 401)
return jwt_headers, jwt_payload, jwt_signature
except Exception as err:
LOGGER.error(err)
raise AuthError("Unauthorized - JWT is malformed", 401)
def jwt_public_key(self, jwt_token: str):
unverified_token = jwcrypto.jwt.JWT(jwt=jwt_token)
jwt_headers = unverified_token.token.jose_header
kid = jwt_headers.get("kid")
if kid is None:
raise AuthError("Unauthorized - JWT-kid is missing", 401)
LOGGER.debug(kid)
for pub_key in self.jwks.get("keys"):
if kid == pub_key.get("kid"):
LOGGER.info("JWT-kid has matching public-kid")
return pub_key
raise AuthError("Unauthorized - JWT-kid has no matching public-kid", 401)
def jwt_claims(self, jwt_token: str):
try:
public_key = self.jwt_public_key(jwt_token)
public_jwk = jwcrypto.jwk.JWK(**public_key)
verified_token = jwcrypto.jwt.JWT(
key=public_jwk, jwt=jwt_token, algs=[public_key["alg"]]
)
return json.loads(verified_token.claims)
except Exception as err:
LOGGER.error(err)
raise AuthError("Unauthorized - token failed to verify", 401)
COGNITO_POOL = CognitoPool(
region=COGNITO_REGION, client_id=COGNITO_CLIENT_ID, id=COGNITO_POOL_ID
)
if os.getenv("AWS_EXECUTION_ENV"):
# instead of re-downloading the public keys every time, memoize them only on cold start
# https://aws.amazon.com/blogs/compute/container-reuse-in-lambda/
# https://docs.aws.amazon.com/lambda/latest/dg/runtimes-context.html
assert COGNITO_POOL.jwks
@dataclass
class APIGateway:
aws_region: str
aws_account_id: str
api_gateway_arn: str
rest_api_id: str
rest_api_stage: str
@staticmethod
def from_method_arn(method_arn):
tmp = method_arn.split(":")
api_gateway_arn = tmp[5].split("/")
return APIGateway(
aws_region=tmp[3],
aws_account_id=tmp[4],
api_gateway_arn=tmp[5],
rest_api_id=api_gateway_arn[0],
rest_api_stage=api_gateway_arn[1],
)
def get_auth_policy(self, principal_id: str):
policy = AuthPolicy(principal_id, self.aws_account_id)
policy.restApiId = self.rest_api_id
policy.stage = self.rest_api_stage
policy.region = self.aws_region
return policy
def aws_auth_handler(event, context):
"""AWS Authorizer for JWT tokens provided by AWS Cognito
event should have this form:
{
"type": "TOKEN",
"authorizationToken": "{caller-supplied-token}",
"methodArn": "arn:aws:execute-api:{regionId}:{accountId}:{apiId}/{stage}/{httpVerb}/[{resource}/[{child-resources}]]"
}
.. seealso::
- https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html
"""
LOGGER.debug("event: %s", event)
LOGGER.debug("context: %s", context)
try:
# validate the incoming token
# and produce the principal user identifier associated with the token
# this could be accomplished in a number of ways:
# 1. Call out to OAuth provider
# 2. Decode a JWT token inline
# 3. Lookup in a self-managed DB
# TODO: try 2. Decode a JWT token inline
# https://docs.authlib.org/en/stable/jose/index.html
# https://aws.amazon.com/premiumsupport/knowledge-center/decode-verify-cognito-json-token/
# https://github.com/awslabs/aws-support-tools/tree/master/Cognito/decode-verify-jwt
# there are flask plugins for this, but the API-Gateway solution is different
# https://flask-jwt-extended.readthedocs.io/en/stable/basic_usage/
# https://auth0.com/docs/quickstart/backend/python
token = event.get("authorizationToken")
if token is None:
raise AuthError("Unauthorized - authorizationToken is missing", 401)
if token.startswith("Bearer"):
token = token.strip("Bearer").strip()
# TODO: handle a SigV4 token?
# 'authorizationToken': 'AWS<PASSWORD>56
# Credential=<secret_id>/20200529/us-west-2/execute-api/aws4_request,
# Signature=xyz'
claims = COGNITO_POOL.jwt_claims(token) # also validates JWT
issuer = claims.get("iss")
if not (COGNITO_POOL.region in issuer and COGNITO_POOL.id in issuer):
raise AuthError("Unauthorized - invalid issuer in JWT claims", 403)
if claims["token_use"] == "id":
audience = claims.get("aud")
if audience != COGNITO_POOL.client_id:
raise AuthError("Unauthorized - invalid client-id in JWT claims", 403)
elif claims["token_use"] == "access":
client_id = claims.get("client_id")
if client_id != COGNITO_POOL.client_id:
raise AuthError("Unauthorized - invalid client-id in JWT claims", 403)
else:
# token validation should check this already, so should not get here
raise AuthError("Unauthorized - invalid client-id in JWT claims", 403)
if claims["token_use"] == "id":
principle_id = claims.get("email")
if not principle_id:
raise AuthError(
"Unauthorized - invalid principle-id in JWT claims", 403
)
if not claims.get("email_verified"):
raise AuthError(
"Unauthorized - email is not verified in JWT claims", 403
)
elif claims["token_use"] == "access":
principle_id = claims.get("username")
if not principle_id:
raise AuthError(
"Unauthorized - invalid principle-id in JWT claims", 403
)
else:
# token validation should check this already, so should not get here
raise AuthError("Unauthorized - invalid principle-id in JWT claims", 403)
# if the token is valid, a policy must be generated which will allow or deny
# access to the client
# if access is denied, the client will receive a 403 Access Denied response
# if access is allowed, API Gateway will proceed with the backend
# integration configured on the method that was called
# this function must generate a policy that is associated with the
# recognized principal user identifier. depending on your use case, you
# might store policies in a DB, or generate them on the fly
# keep in mind, the policy is cached for 5 minutes by default (TTL is
# configurable in the authorizer) and will apply to subsequent calls to any
# method/resource in the RestApi made with the same token
# the example policy below denies access to all resources in the RestApi
LOGGER.info("Method ARN: %s", event["methodArn"])
api_gateway = APIGateway.from_method_arn(event.get("methodArn"))
policy = api_gateway.get_auth_policy(principle_id)
policy.allowAllMethods() # a valid signed JWT is sufficient
#
# TODO: use cognito-groups with an JWT-access token?
#
if principle_id not in API_ADMIN_EMAILS:
policy.denyMethod(HttpVerb.GET, "/api/healthz")
# TODO: restrict the policy by additional options:
# #: The API Gateway API id. By default this is set to '*'
# restApiId = "*"
# #: The region where the API is deployed. By default this is set to '*'
# region = "*"
# #: The name of the stage used in the policy. By default this is set to '*'
# stage = "*"
# Finally, build the policy
auth_response = policy.build()
# # Add additional key-value pairs associated with the authenticated principal
# # these are made available by API-GW like so: $context.authorizer.<key>
# # additional context is cached
# context = {"key": "value", "number": 1, "bool": True} # $context.authorizer.key -> value
# # context['arr'] = ['foo'] <- this is invalid, API-GW will not accept it
# # context['obj'] = {'foo':'bar'} <- also invalid
# auth_response["context"] = context
# TODO: use "usageIdentifierKey": "{api-key}" for API-key use plans, if any.
return auth_response
| |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXX XXXXXXXXXXXXXXXX XXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXX XXXXXXXX XXXXX XXX XXXXXXX XXXX XXXXXXXXXXXXXX XXX XXXXXXXX
XXX XXXXX XXXX XX XXX X XXXXXXXXXXXXX XXXXXX XXX X XXXXXX XXX XXXX XXXXXX
XXXXXXXXXXXXX XX XXXX XXXXXXXXX XX XXXX XX XXXXXXXX X XXXXXX XXXXXXXXXXXXX
XXXXXXX XXX XXX XXXXXXXXX XXXXXXXXXXX XXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX XXXXX XXX X XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XX XXX XXXXXXXXXXXX XXXXXX
XXXX XXXXXXXX X XXXXXXXXXXXXX XXXXXXX XXX XXXX XXXXXX XXXXXX XX XXXX XXXXXXXXX
XXXXX XXXX XXXXXXX XXXXX XXXX XX XXXXXXXXXXX XXX XXXXXXXXX
XXXXXX
XXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXX
XXXXXXXX
XX XXXX XXXXXXXX XXX XXXXXXXXXXXXX XXXXXXX XXXX XX XXXX XXXXXXXXX XX XXX XXXXXXX
XXXX XXXXXXXX XXX XXXXXXXXX XXXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXX XXXXXX XXXX XXX XX XXXXXXXXX XX X XXXXXXXXXX XXXXXXXX
XXX XXXXXXXXXXXXXXXX XXXXXX XXX XXXX XXX XXXXXXXXXXX XX XX XXXX XXXXXX X XXXXX
XXXXXXXXXXX XXXX XXXXXXX XXXXXXXXXXXXXXXXXXXX XX XXX XX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXX XXXXXXXXXX XXXXXXX
XXXXXX XXXXXXXXXX XXXXXXXX XXX XXXXXXXXXX XXXXXX XXX XXXXXXX XXXXXXXXXX
XXXXXXX XX XXX XXXXXXX XXXX XXX XXXXXXXXXXXX XXXXXXXX XXXX XXX XXXX XXXXXXX
XX XXXX XXXXXXX XXXXXXXXX XXXXX XXXXXXX XXXXXX
XX XXXXXXXXX XXX XXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX
XXXX XXXX XXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXX XXXXXX XXXXXXXX XX XXXX
XXXXX XXXXXXXXXXXXXXXXXXXXX
XXXX X XXXXXXX XXX XXXXXXXXX XXXX XXX XXXXXXX
XXX XXXXXXXXXXXXXXXXXXX XXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXX
XXX XXXXXXXXXXXX XXXXXX XXXXXXXXXXX
XXX XXXXXXX XX XXXXXXXXXXXXXXXXXXXX
XXXX
XXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXX XXXX XXXX XXX XXXXXX X XXXXXXXX
XXXXXXXXXXX X XXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXX XXXXX X XXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXX
XXXX XXX XXX XXXXX XXXXXXXXXX XXXXXXXX XXX XXXX XX XXXXXXX XXXXXXX
XXXXXXX XXX XXXXXX XXXXX XX XXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXX
XXXXXXX XX XXXXXXXX XX XXXXXXXXXX XXX XXXXXXXXXX XXXXXXXXX XX
XXXXX XXXXX XXXXXXXX XX XXXXXXX XXXX XXXXXX XX XXXX XXXX XXXXXX
XXXXXXXX XXXX XXXX XXXX XXX XXXXX XXXX XX XXX XXXXXXXX XXXX X XXXXXXX
XXXXXXXXXX XX XXXX XX XXXXX XXXXXXXXXXXXXX XXXXXX XXX XXXXXXX XXX XXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXX
XXX XXX XXXXXX XXXXXXX XXX XX XXXXXX XXXXX XXXXXXXX XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXX
XXX XXXXXXXXXXXX XXXXXX XXXXX XXX XX XXXX XXXXXXXXXXXX XXX XXXX XXXXXXXXXXXXXXX
XX XXXXXXXXX XXX XXXX XXXX XX XXX XXXX XXXXXXXXXX XXX XXXXXXXXXXX XXXXXX X
XXXXXXXXXXXXXXXXXXX XX XXXXXXX XXX XXXXXXXXXXXXXXX XXXXXXXXX XXXX XXX XXXXX XX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX XXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXX XXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXX XXXXXXXXXXXXX XXXXX XX XXXXXX XXXXXXXX XX XXXXXX X XXXXX XXXX XXXXXXX
XX XXXXXXX XX XX XXXXXXXXX XXXXXXXXXX XXXXXXX XXXX XXXXXXXX XXXXX XXXXXX
XXXXXXX XXX XX XXXXX XX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXX XXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXX XXXXXXXX
X XXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXX
X XXXXX XXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXX XXXXXXX XX XXXXXXX XXXX
X
XXX XXXXXXXXXXXX XXXXXX XXXXXXXXXXX
X XXX
XX XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
X XXX
XXX XXXXXX XXXXXXXXXXX XX XXX XXXXXXXX XX XXXXXXXXX XX XXX XXXXXXX XXXX
XXXXXXXXX XX XXX XXXXXX XXXXXXX XXX XXX XXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXX
XXX XXXX XXXXX XXXXXXXXXXXXXXXX XXXXXX
XX XXXXXXXX XX XXXXX XXXX XX XXX XXXXXX XXXXXXX XXXX XXXXXXXX XXX
XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXX XXXX XXXXXXX XXXXXXX
XXXX XX XXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXX XXX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXX XXX XXXXXXXX XXXX XXX XXXXXXX XXXXXX XXXXXXX
XXX XXX XXXX XXXXXXX XXXX XXXXXX XXXXXXXXXX XXXXXXX XXXX XXX XXXXXXX XX XXXXXX
XXXXXX XXXX XXXXXXXX XX XXXXXXX XXXXXXXXXX XXXXXXX XXXX XXXXX XXXXXXXX XXXX
XXX XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXX XXXXX XXX XXXXXXXXXXXXXXXXXXXX
XXXXXXXXX XX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXX
XXX
XXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXX XXXXXX XXXXXXXXXXX
XXX
XXXXX XXXXXXXXXXX XXXXXXXXXXXX XXXXXXXX XXXXXX XX XXXXXXXXXX XXXXXXXXX XXX
XXXXXXXXX XXXXX XX XXXX XXX XXXXXXXX XXXX XXXX XXXXXXX XXXXXXXXXX XXXXXXXXX
XXXXXXX
XXXXXXX
XXXXXXXXXXX XX XXX XX XXXX XXXXXX XXXXXXXXXX XXXXXXXX XXX XX XXXXX XX XXX
XXXXXXXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXX XXX XXXXXXXX XXXXXXXX XXX XXXX XXXXXXXX XXX XXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXX XXXXXX XXX XXXXXXX XX X XXXXXXX XXXX
XXXXXXXXXX XX XXXXXXX XXXXXXXXXX XXXXXXXX XXX XXXXX XXXXXXXXXX XXXXXXX
XXXXXXXXX XXX XXXXXX
XX XXXXX XXXXXX XX XXXXXXXX X XXXXXXXX XXX XXX XXXXXXX XXXX XXXX XXX XXXX XXXX
XXX XXX XXX XXXX XX XXXXXX XXX XXXXXXXXXX XXXXXXXXX XXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXX XXXX XXXXXXXXXXX XXXX XXXX XXXX XXXX XXXXXXXXXXXXXXX
XXXXXXXXXX XXX XX XXXX XXXXXXXXX XXXXX X XXX XXXX XX XXXXXXXX X XXX XXXXXXX XX
XXX XX XXXX XXXXXXXXX XXXX XXXXXXXX XXXXXX XXX XXXXXXXXXXX XXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXX XXX XXXXXXXXXXX XX XXX XXXXXXXXXX
XXXXXXXX
XXXXXXX XXXXXXX
XXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXX
XXX XXXX XXXXX XXXX XXXXX XXX XXXXXXXXXX XXXXXXXX XXXXXXXXXX XXXXXXX
XXX XXXX XXXXX XX XXX XXXX XXXXXX XX XXX XX XXX XXXXXXXXXX XXXXX
XXXXX XXX XXXXXXXXXXXX XXXXXXXXX XXX XXXX XXX XXXX XXXX XX XXXX XX
XXXXXXXXX XX XXX XXXXX XXXX XX XXXXXX XXX XX XXXX XXXXXXXXX
XXXXXXXX XXXXX XXX XX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXX XXXX XXX XXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX
XXXXXXXXXX
XXXXXXXXXX
XXX XXXXXXXXXX XXX XX XXX XX XXXX XXXXXXX XXXXX XXX XXX XX XXXX XX
XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXXX
X XXXXX XXXXXXXXXXX XX XXX XXXXXXXX XXXXX XXXX XX XXXXXXX XX XXX
XXXX XXXXXXX XXXX XXX XXXX XXXX XXX XXXXXXX
XXXXXXXX XXXXXXXXX XXXX XXXXXXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXX XXXXXXX XXXXXXX XXXXXXXXX XXXXXXXXXX XXXXXXXXXX XXX XXX XXXXXXXXX
XXX XXXXXXX XXXXX XXXXXXXX XX XXX XXXX XX XXXXXXX XXXXXXXXXX XXX XXXXXXX XX
XXXXXX XX XXXXXXXXXXXXXXXXXX XXXXX XXX XXXXXXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X XXXXXXX XXXXXXXXXX XXXXXXX XXX XXXXXXX XXXXXXX XXX XXXXXXXXXXX XX
XXXXXXXXX XXX XXXXXX XXXX XXXXXXXXXXXXX XX XXXXXXX XXXX XXXXXXXXXX XXX
XXXXXXXXXXXX XXXXXXX XXXXX XX XXXXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X XXXXXXXX XX XXXXXXXXX XXX XXXXXXX XXXXXX X XXXXXXX XX XXX XXX XX
XXXXXXXXXX XX XXXX XXXXX XXXXX XXX XXXXXXXXXX XX XXX XXXXXXXXX X XXXXXXX
XXXXXXX XXXXXXX XXX XXXXXXX XXXX XXXXXXXXXX XXXXXXX XXXXX XX XXXXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X XXXX XX XXXXX XX XXXXX XXXX XXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXX
XXXXXX XXXXXXXXXX XX XXX XXXXXX XXXX XXXX XX XXXXXXX XXX XXXXXX XXXXX XX
XXXXXXXXX XXX XXXXXXXX XXX XXXXX XXXXXXXXXXXXX XXX XX XXXX XX XXXXXXX
XXXX XXX XXXXXX XXXXXX XXXXXX XX XXXXXXXXXX XXXXXXX XXXXX XX XXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXX XXX
XX XXXXX XXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXX X
XXXXXXX XXXXX XXXXXXX XX X XXXX XX XXXXX XX XXXXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXX XXXXXXX XXXXXX XXXX XXXXXXX XX
XXXXXXXXXX XX XXXXXXXXXXX XXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXX XX XXXXX XXX XX XXXXXX XXX XXXXX XXXXXXX XXX XX
XXX XXX XXXXXXXXX XXXXXX XXXX XXXXXXXXXX XXXXXXXX XX XXX XXXXXXX XXXXXXXXX
XX XXXX XXXXXXXXX
XX XXX XXXX XXX XXXXXXXXXXXXXXXXXXXX XXXXXX XXXX XXXXXXX XXXX XXXXXXXX XXX
XXXXXXXXXXXXXXXX XXXXX XXXX XXXXXX XXX XXXXXXXX XXXXXX XXXXXXXXXX
XXXXXXX
XXXXXXX
XXXXXXXXXXXXXXXXXXXX XXX X XXX XXXXXXX XXXX XXX XX XXXXXXXXXX XXX XXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXX XX XXXXXXXXXXXX
XX XXXXXXXXXXXX XXXXXXXXXXXX X XXXXXXXXXXX XX X XXXXXXXX
XX XXX XXXXXXXXX XXXXXXXXXXXX XX XXXX XXXXXXXX XX XXXXXXXXXXXXXXXXXXXXX
XXX XXXX XXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXX XXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX
X XXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX XXXXXXXXX
XXXXXXX X XXXXXXXXXXXXXXXXX XXXXXXXXX XXXXX XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXX X XXX XXXXXXXXXXXXXX XXX
XXXXXXX
XXX XXX XXXXXXXXX XXX XXXXXXXX XX XXXXXXXXXX XXXX XXXXXX XXX XXXXXXX
XXXXXXXXXXX XXXX XXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXX XX XXX XXXXXX XXXXXXXXX XX XXXXXX XXXXXXX XXXX XXXXXXXXX XXXXXX
XX XXX XXXXXXXX XXXXXX XXXXXXXX XXXXXX XXXXXXXX XXXX XXXXXX XX XXX XXXX
XXXXXXXXXX XXX XXXXXXXX XXXXXXXXX XXXXXXXX XX XXX XXXXXXXX XXXXXXX
XXXXXXXXXXX XX XXX XXXXXX XXXX XXXXXXXX XXXXXXXXXXX XXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXX XXXXXX XXXXXXXX XXXXX XXXXXX XX XXXXXXX XXX XXX XXXXXXXX XXXXXX
XXXXXXXXX XXXXXXXXXXXXX XXXXXXXX XXX XXXXXXXX XXXX XXXXXX XX XXXXXX XXXXX
XXX XXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX
XXXXX XX XXXXXXX XXXX XXXXXXXX XXXXXXXXXX XXXXXX XXXXXX XX XXXXXX XXX
XXXXXXXXXX XX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX XX XXX XXXXXXX
XXXXXX X XXXXXXXXXXXXXXXXXXXX XXXX XXXXXXXXXXX XXX XXXXXXX XX XXXXXXX
XX XXXXXXXXXXXX XXXXXXX X XXXXXXXXXX XXXXXXX XX XXXX XXXX
XXXXXXXXXXXXX XXXXXX XXX XX XXXXXX XXXXXXXX XXXX XXXX XXXX XX XXXXXXX X
XXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX
XXX XXXXXX XXXXX XX XXX XXXXXXXX XXXXXXXXXX XXXX XXXXXXXXX XXXX XXXXXXX
XX XXX XXXXXX X XXXXXX XXXXX XXXX XX XXXXXXX XX XXXXXXXXXX XXXXXXXX
XX XXXXXXXXXX XXX XXXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXX XXXXXX XXXXX XXXXXXXXX XX XXXXXXX XXX XXXXXX XXXXXX XXXXXXX XXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXX XXX XXXXXX XX X XXXXXXXXXXXXXXXXXXXX
XXXXXXXX XXX XXXXXX XX XXXXXXX XXXXX XXXXXXXXXXXXX XXX XXXXXX XX XXXXXXX
XX XXXXXXXXXXXXXXX XXX XXXXXXXX XXX XXXX XXXXXXXXX XXX XXXXXX XXXXXX XXX
XXXXXXXXXX XXXXXXXX XXX XX X XXXX XX XXXXX XXXXX XXXX XXXXXXXXXXXXXXXXX XX
XXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXX
X XXXXXXXXXX XXXXXXX XXXXX | |
is present
if self.ncs_object():
for m in [map_manager, map_manager_1, map_manager_2]+\
extra_map_manager_list:
if m:
m.set_ncs_object(self.ncs_object())
# Make sure all really match:
for m in [map_manager, map_manager_1, map_manager_2]+\
extra_map_manager_list:
if m and not any_map_manager.is_similar(m):
raise AssertionError(any_map_manager.warning_message())
# Set up maps, model, as dictionaries (same as used in map_model_manager)
self.set_up_map_dict(
map_manager = map_manager,
map_manager_1 = map_manager_1,
map_manager_2 = map_manager_2,
extra_map_manager_list = extra_map_manager_list,
extra_map_manager_id_list = extra_map_manager_id_list)
self.set_up_model_dict(
model = model,
extra_model_list = extra_model_list,
extra_model_id_list = extra_model_id_list)
def _make_cell_slightly_different_in_abc(self,map_manager):
'''
Adjust cell parameters just slightly so that gridding is not exactly the
same in all directions. This will make binner give uniform results
'''
cs=map_manager.unit_cell_crystal_symmetry()
uc=cs.unit_cell()
from cctbx import uctbx
p=list(uc.parameters())
if p[0] == p[1]:
p[1] += 1.e-2
if p[0] == p[2]:
p[2] -= 1.e-2
uc=uctbx.unit_cell(tuple(p))
cs=cs.customized_copy(unit_cell=uc)
map_manager.set_unit_cell_crystal_symmetry(cs)
def set_up_map_dict(self,
map_manager = None,
map_manager_1 = None,
map_manager_2 = None,
extra_map_manager_list = None,
extra_map_manager_id_list = None):
'''
map_dict has four special ids with interpretations:
map_manager: full map
map_manager_1, map_manager_2: half-maps 1 and 2
map_manager_mask: a mask in a map_manager
All other ids are any strings and are assumed to correspond to other maps
map_manager must be present
'''
assert (map_manager is not None) or (
(map_manager_1 is not None) and (map_manager_2 is not None))
self._map_dict={}
self._map_dict['map_manager']=map_manager
if map_manager_1 and map_manager_2:
self._map_dict['map_manager_1']=map_manager_1
self._map_dict['map_manager_2']=map_manager_2
if extra_map_manager_id_list:
for id, m in zip(extra_map_manager_id_list,extra_map_manager_list):
if (id is not None) and (m is not None):
self._map_dict[id]=m
def set_up_model_dict(self,
model = None,
extra_model_list = None,
extra_model_id_list = None):
'''
map_dict has one special id with interpretation:
model: standard model
All other ids are any strings and are assumed to correspond to other
models.
'''
self._model_dict={}
self._model_dict['model']=model
if extra_model_id_list:
for id, m in zip(extra_model_id_list,extra_model_list):
if id is not None and m is not None:
self._model_dict[id]=m
# prevent pickling error in Python 3 with self.log = sys.stdout
# unpickling is limited to restoring sys.stdout
def __getstate__(self):
import io
pickle_dict = self.__dict__.copy()
if isinstance(self.log, io.TextIOWrapper):
pickle_dict['log'] = None
return pickle_dict
def __setstate__(self, pickle_dict):
self.__dict__ = pickle_dict
if self.log is None:
self.log = sys.stdout
def __repr__(self):
text = "\nMap_model_manager '%s': \n" %(self.name)
if self.model():
text += "\n%s\n" %(str(self.model()))
map_info = self._get_map_info()
model_info = self._get_model_info()
if self.map_manager():
text += "\nmap_manager: %s\n" %(str(self.map_manager()))
for id in map_info.other_map_id_list:
text += "\n%s: %s\n" %(id,str(self.get_map_manager_by_id(id)))
for id in model_info.other_model_id_list:
text += "\n%s: %s\n" %(id,str(self.get_model_by_id(id)))
return text
def set_name(self, name = None):
'''
Set name
'''
self.name = name
def set_verbose(self, verbose = None):
'''
Set verbose
'''
self.verbose = verbose
# Methods to get and set info object (any information about this object)
def set_info(self, info):
self._info = info
def info(self):
return self.get_info()
def get_info(self, item_name = None):
if not item_name:
return self._info
else:
return self._info.get(item_name)
def add_to_info(self, item_name = None, item = None):
setattr(self._info,item_name, item)
# Methods for printing
def set_log(self, log = sys.stdout):
'''
Set output log file
'''
if log is None:
self.log = null_out()
else:
self.log = log
def _print(self, m):
'''
Print to log if it is present
'''
if (self.log is not None) and hasattr(self.log, 'closed') and (
not self.log.closed):
print(m, file = self.log)
# Methods for obtaining models, map_managers, symmetry, ncs_objects
def crystal_symmetry(self):
''' Get the working crystal_symmetry'''
return self.map_manager().crystal_symmetry()
def unit_cell_crystal_symmetry(self):
''' Get the unit_cell_crystal_symmetry (full or original symmetry)'''
return self.map_manager().unit_cell_crystal_symmetry()
def shift_cart(self):
''' get the shift_cart (shift since original location)'''
return self.map_manager().shift_cart()
def map_dict(self):
''' Get the dictionary of all maps and masks as map_manager objects'''
return self._map_dict
def model_dict(self):
''' Get the dictionary of all models '''
return self._model_dict
def models(self):
''' Get all the models as a list'''
model_list = []
for id in self.model_id_list():
m = self.get_model_by_id(id)
if m is not None:
model_list.append(m)
return model_list
def model(self):
''' Get the model '''
return self._model_dict.get('model')
def model_id_list(self):
''' Get all the names (ids) for all models'''
mil = []
for id in self.model_dict().keys():
if self.get_model_by_id(id) is not None:
mil.append(id)
return mil
def get_model_by_id(self, model_id):
''' Get a model with the name model_id'''
return self.model_dict().get(model_id)
def remove_model_by_id(self, model_id = 'extra'):
'''
Remove this model
'''
del self._model_dict[model_id]
def map_managers(self):
''' Get all the map_managers as a list'''
map_manager_list = []
for id in self.map_id_list():
mm = self.get_map_manager_by_id(id)
if mm:
map_manager_list.append(mm)
return map_manager_list
def map_manager(self):
'''
Get the map_manager
If not present, calculate it from map_manager_1 and map_manager_2
and set it.
'''
map_manager = self._map_dict.get('map_manager')
if (not map_manager):
# If map_manager_1 and map_manager_2 are supplied but no map_manager,
# create map_manager as average of map_manager_1 and map_manager_2
map_manager_1 = self._map_dict.get('map_manager_1')
map_manager_2 = self._map_dict.get('map_manager_2')
if map_manager_1 and map_manager_2:
map_manager = map_manager_1.customized_copy(map_data =
0.5 * (map_manager_1.map_data() + map_manager_2.map_data()))
# Try to make a file name
file_name = None
if map_manager_1.file_name and map_manager_2.file_name:
try:
file_name = "_and_".join([
os.path.splitext(os.path.split(
map_manager_1.file_name)[-1])[0],
os.path.splitext(os.path.split(
map_manager_2.file_name)[-1])[0]]) + \
os.path.splitext(map_manager_1.file_name)[1]
except Exception as e:
file_name = None
map_manager.file_name = file_name
self._map_dict['map_manager'] = map_manager
if self.model() and not map_manager: # make one based on model
crystal_symmetry = self.model().unit_cell_crystal_symmetry()
if not crystal_symmetry:
crystal_symmetry = self.model().crystal_symmetry()
if crystal_symmetry:
from iotbx.map_manager import dummy_map_manager
map_manager = dummy_map_manager(crystal_symmetry)
self._map_dict['map_manager'] = map_manager
self.info().dummy_map_manager = True # mark it
return map_manager
def map_manager_1(self):
''' Get half_map 1 as a map_manager object '''
return self._map_dict.get('map_manager_1')
def map_manager_2(self):
''' Get half_map 2 as a map_manager object '''
return self._map_dict.get('map_manager_2')
def map_manager_mask(self):
''' Get the mask as a map_manager object '''
return self._map_dict.get('map_manager_mask')
def map_id_list(self):
''' Get all the names (ids) for all map_managers that are present'''
mil = []
for id in self.map_dict().keys():
if self.get_map_manager_by_id(id) is not None:
mil.append(id)
return mil
def get_ncs_from_model(self):
'''
Return model NCS as ncs_spec object if available
Does not set anything. If you want to save it use:
self.set_ncs_object(self.get_ncs_from_model())
This will set the ncs object in the map_manager (if present)
'''
if not self.model():
return None
if not self.model().get_ncs_obj():
self.model().search_for_ncs()
if self.model().get_ncs_obj():
return self.model().get_ncs_obj().get_ncs_info_as_spec()
else:
return None
def get_ncs_from_map(self, use_existing = True,
include_helical_symmetry = False,
symmetry_center = None,
min_ncs_cc = None,
symmetry = None,
ncs_object = None):
'''
Use existing ncs object in map if present or find ncs from map
Sets ncs_object in self.map_manager()
Sets self._ncs_cc which can be retrieved with self.ncs_cc()
'''
if (not ncs_object) and use_existing:
ncs_object = self.ncs_object()
ncs=self.map_manager().find_map_symmetry(
include_helical_symmetry = include_helical_symmetry,
symmetry_center = symmetry_center,
min_ncs_cc = min_ncs_cc,
symmetry = symmetry,
ncs_object = ncs_object)
self._ncs_cc = self.map_manager().ncs_cc()
return self.ncs_object()
def ncs_cc(self):
if hasattr(self,'_ncs_cc'):
return self._ncs_cc
def set_ncs_object(self, ncs_object):
'''
Set the ncs object of map_manager
'''
if not self.map_manager():
return
else:
self.map_manager().set_ncs_object(ncs_object)
def ncs_object(self):
if self.map_manager():
return self.map_manager().ncs_object()
else:
return None
def experiment_type(self):
if self.map_manager():
return self.map_manager().experiment_type()
else:
return None
def scattering_table(self):
if self._scattering_table:
return self._scattering_table
elif self.map_manager():
return self.map_manager().scattering_table()
else:
return None
def minimum_resolution(self):
'''
Return d_min, normally minimum available but if set, return
value of d_min
'''
if not self._minimum_resolution:
# get it and set it and return it
self._minimum_resolution = self.map_manager().minimum_resolution()
return self._minimum_resolution
def nproc(self):
return self._nproc
def resolution(self,
use_fsc_if_no_resolution_available_and_maps_available = True,
map_id_1 = 'map_manager_1',
map_id_2 = 'map_manager_2',
fsc_cutoff = 0.143,
):
if self._resolution: # have it already
return self._resolution
else: # figure out resolution
resolution = None
if use_fsc_if_no_resolution_available_and_maps_available and \
self.get_map_manager_by_id(map_id_1) and \
self.get_map_manager_by_id(map_id_2):
fsc_info = self.map_map_fsc( # get resolution from FSC
map_id_1 = map_id_1,
map_id_2 = map_id_2)
resolution = fsc_info.d_min
if resolution is not None:
print("\nResolution estimated from FSC of '%s' and '%s: %.3f A " %(
map_id_1, map_id_2, resolution), file = self.log)
elif fsc_info.fsc.fsc.min_max_mean().min > fsc_cutoff:
print("\nResolution estimated from minimum_resolution ",
"\nbecause FSC of '%s' and '%s is undefined" %(
map_id_1, map_id_2), file = self.log)
resolution = self.minimum_resolution()
else:
print("\nCould not obtain resolution from FSC", file = self.log)
if (not resolution) and self.map_manager():
# get resolution from map_manager
resolution = self.map_manager().resolution()
print("\nResolution obtained from map_manager: %.3f A " %(
resolution), file = self.log)
if resolution:
self.set_resolution(resolution)
return resolution
else:
return None
def set_multiprocessing(self,
nproc = None,
multiprocessing = None,
queue_run_command = None):
''' Set multiprocessing parameters'''
if nproc:
self._nproc = nproc
if nproc > 1 and | |
from .memoize import Memoize
from . import stdops
#Needed for travis
import random
def collect(exp, fn):
rv = set()
def _collect(exp):
if fn(exp):
rv.add(exp)
return exp
exp.walk(_collect)
return rv
def _replace_one(expr, match, repl):
vals = WildResults()
if expr.match(match, vals):
expr = repl.substitute({wilds(w): vals[w] for w in vals})
if len(expr) > 1:
return expr[0](*[_replace_one(x, match, repl) for x in expr.args])
else:
return expr
def replace(expr, d, repeat=True):
while True:
old_expr = expr
for k in d:
expr = _replace_one(expr, k, d[k])
if old_expr == expr or not repeat:
return expr
class _Symbolic(object):
def match(self, other, valuestore=None):
'''
matches against a pattern, use wilds() to generate wilds
Example:
a,b = wilds('a b')
val = WildsResults()
if exp.match(a(b + 4), val):
print val.a
print val.b
'''
from . import match
return match.match(self, other, valuestore)
def __new__(typ):
return object.__new__(typ)
def __hash__(self):
return hash(self.name)
def simplify(self):
from . import simplify
return simplify.simplify(self)
def walk(self, *fns):
if len(fns) > 1:
def _(exp):
for f in fns:
exp = f(exp)
return exp
return self.walk(_)
exp = self
fn = fns[0]
if len(exp) == 1:
oldexp = exp
exp = fn(exp)
while exp != oldexp:
oldexp = exp
exp = fn(exp)
else:
args = list([x.walk(fn) for x in exp.args])
oldexp = self
exp = fn(fn(exp[0])(*args))
return exp
def _dump(self):
return {
'name': self.name,
'id': id(self)
}
def __contains__(self, exp):
rv = {}
rv['val'] = False
def _(_exp):
if _exp.match(exp):
rv['val'] = True
return _exp
self.walk(_)
return rv['val']
def substitute(self, subs):
'''
takes a dictionary of substitutions
returns itself with substitutions made
'''
if self in subs:
self = subs[self]
return self
def compile(self, *arguments):
'''compiles a symbolic expression with arguments to a python function'''
def _compiled_func(*args):
assert len(args) == len(arguments)
argdic = {}
for i in range(len(args)):
argdic[arguments[i]] = args[i]
rv = self.substitute(argdic).simplify()
return desymbolic(rv)
return _compiled_func
def __eq__(self, other):
# return type(self) == type(other) and self.name == other.name
return id(self) == id(other)
def __ne__(self, other):
return not self.__eq__(other)
def __getitem__(self, num):
if num == 0:
return self
raise BaseException("Invalid index")
def __len__(self):
return 1
# comparison operations notice we don't override __eq__
def __gt__(self, obj):
return Fn.GreaterThan(self, obj)
def __ge__(self, obj):
return Fn.GreaterThanEq(self, obj)
def __lt__(self, obj):
return Fn.LessThan(self, obj)
def __le__(self, obj):
return Fn.LessThanEq(self, obj)
# arithmetic overrides
def __mul__(self, other):
return Fn.Mul(self, other)
def __pow__(self, other):
return Fn.Pow(self, other)
def __rpow__(self, other):
return Fn.Pow(other, self)
def __truediv__(self, other):
return Fn.Div(self, other)
def __add__(self, other):
return Fn.Add(self, other)
def __sub__(self, other):
return Fn.Sub(self, other)
def __or__(self, other):
return Fn.BitOr(self, other)
def __and__(self, other):
return Fn.BitAnd(self, other)
def __xor__(self, other):
return Fn.BitXor(self, other)
def __rmul__(self, other):
return Fn.Mul(other, self)
def __rtruediv__(self, other):
return Fn.Div(other, self)
def __radd__(self, other):
return Fn.Add(other, self)
def __rsub__(self, other):
return Fn.Sub(other, self)
def __ror__(self, other):
return Fn.BitOr(other, self)
def __rand__(self, other):
return Fn.BitAnd(other, self)
def __rxor__(self, other):
return Fn.BitXor(other, self)
def __rshift__(self, other):
return Fn.RShift(self, other)
def __lshift__(self, other):
return Fn.LShift(self, other)
def __rrshift__(self, other):
return Fn.RShift(other, self)
def __rlshift__(self, other):
return Fn.LShift(other, self)
def __neg__(self):
return self * -1
class _KnownValue(_Symbolic):
def value(self):
raise BaseException('not implemented')
class Boolean(_KnownValue):
@Memoize
def __new__(typ, b):
self = _KnownValue.__new__(typ)
self.name = str(b)
self.boolean = b
return self
def value(self):
return bool(self.boolean)
def __str__(self):
return str(self.boolean)
def __repr__(self):
return str(self)
def __eq__(self, other):
if isinstance(other, Boolean):
return bool(self.boolean) == bool(other.boolean)
elif isinstance(other, _Symbolic):
return other.__eq__(self)
else:
return bool(self.boolean) == other
class Number(_KnownValue):
FFORMAT = str
@Memoize
def __new__(typ, n):
n = float(n)
self = _KnownValue.__new__(typ)
self.name = str(n)
self.n = n
return self
def __hash__(self):
return hash(self.name)
@property
def is_integer(self):
return self.n.is_integer()
def value(self):
return self.n
def __eq__(self, other):
if isinstance(other, Number):
return self.n == other.n
elif isinstance(other, _Symbolic):
return other.__eq__(self)
else:
return self.n == other
def __ne__(self, other):
if isinstance(other, _Symbolic):
return super(Number, self).__ne__(other)
else:
return self.n != other
def __str__(self):
return Number.FFORMAT(self.n)
def __repr__(self):
return str(self)
class WildResults(object):
def __init__(self):
self._hash = {}
def clear(self):
self._hash.clear()
def __setitem__(self, idx, val):
self._hash.__setitem__(idx, val)
def __contains__(self, idx):
return idx in self._hash
def __getitem__(self, idx):
return self._hash[idx]
def __getattr__(self, idx):
return self[idx]
def __iter__(self):
return self._hash.__iter__()
def __str__(self):
return str(self._hash)
def __repr__(self):
return str(self)
def __len__(self):
return len(self._hash)
class Wild(_Symbolic):
'''
wilds will be equal to anything, and are used for pattern matching
'''
@Memoize
def __new__(typ, name, **kargs):
self = _Symbolic.__new__(typ)
self.name = name
self.kargs = kargs
return self
def __str__(self):
return self.name
def __repr__(self):
return str(self)
def __call__(self, *args):
return Fn(self, *args)
def _dump(self):
return {
'type': type(self),
'name': self.name,
'kargs': self.kargs,
'id': id(self)
}
class Symbol(_Symbolic):
'''
symbols with the same name and kargs will be equal
(and in fact are guaranteed to be the same instance)
'''
@Memoize
def __new__(typ, name, **kargs):
self = Wild.__new__(typ, name)
self.name = name
self.kargs = kargs
self.is_integer = False # set to true to force domain to integers
self.is_bitvector = 0 # set to the size of the bitvector if it is a bitvector
self.is_bool = False # set to true if the symbol represents a boolean value
return self
def __str__(self):
return self.name
def __repr__(self):
return str(self)
def __call__(self, *args):
return Fn(self, *args)
def _dump(self):
return {
'type': type(self),
'name': self.name,
'kargs': self.kargs,
'id': id(self)
}
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
class Fn(_Symbolic):
@Memoize
def __new__(typ, fn, *args):
'''
arguments: Function, *arguments, **kargs
valid keyword args:
commutative (default False) - order of operands is unimportant
'''
if None in args:
raise BaseException('NONE IN ARGS %s %s' % (fn, args))
if not isinstance(fn, _Symbolic):
fn = symbolic(fn)
return Fn.__new__(typ, fn, *args)
for i in args:
if not isinstance(i, _Symbolic):
args = list(map(symbolic, args))
return Fn.__new__(typ, fn, *args)
self = _Symbolic.__new__(typ)
kargs = fn.kargs
self.kargs = fn.kargs
self.name = fn.name
self.fn = fn
self.args = args
# import simplify
# rv = simplify.simplify(self)
return self
def _dump(self):
return {
'id': id(self),
'name': self.name,
'fn': self.fn._dump(),
'kargs': self.kargs,
'args': list([x._dump() for x in self.args]),
'orig kargs': self.orig_kargs,
'orig args': list([x._dump() for x in self.orig_args])
}
def __call__(self, *args):
return Fn(self, *args)
def has(self, x):
return self.__contains__(x)
def is_Symbols(self):
if isinstance(self, _Symbolic):
return True
else:
return False
def substitute(self, subs):
args = list([x.substitute(subs) for x in self.args])
newfn = self.fn.substitute(subs)
self = Fn(newfn, *args)
if self in subs:
self = subs[self]
return self
def recursive_substitute(self, subs):
y = self
while True:
x = y.substitute(subs)
if x == y:
return x
y = x
def __getitem__(self, n):
if n == 0:
return self.fn
return self.args[n - 1]
def __len__(self):
return len(self.args) + 1
def _get_assoc_arguments(self):
from . import simplify
rv = []
args = list(self.args)
def _(a, b):
if (isinstance(a, Fn) and a.fn == self.fn) and not (isinstance(b, Fn) and b.fn == self.fn):
return -1
if (isinstance(b, Fn) and b.fn == self.fn) and not (isinstance(a, Fn) and a.fn == self.fn):
return 1
return simplify._order(a, b)
args.sort(key=cmp_to_key(_))
for i in args:
if isinstance(i, Fn) and i.fn == self.fn:
for j in i._get_assoc_arguments():
rv.append(j)
else:
rv.append(i)
return rv
@staticmethod
def LessThan(lhs, rhs):
return Fn(stdops.LessThan, lhs, rhs)
@staticmethod
def GreaterThan(lhs, rhs):
return Fn(stdops.GreaterThan, lhs, rhs)
@staticmethod
def LessThanEq(lhs, rhs):
return Fn(stdops.LessThanEq, lhs, rhs)
@staticmethod
def GreaterThanEq(lhs, rhs):
return Fn(stdops.GreaterThanEq, lhs, rhs)
@staticmethod
def Add(lhs, rhs):
return Fn(stdops.Add, lhs, rhs)
@staticmethod
def Sub(lhs, rhs):
return Fn(stdops.Sub, lhs, rhs)
@staticmethod
def Div(lhs, rhs):
return Fn(stdops.Div, lhs, rhs)
@staticmethod
def Mul(lhs, rhs):
return Fn(stdops.Mul, lhs, rhs)
@staticmethod
def Pow(lhs, rhs):
return Fn(stdops.Pow, lhs, rhs)
@staticmethod
def RShift(lhs, rhs):
return Fn(stdops.RShift, lhs, rhs)
@staticmethod
def LShift(lhs, rhs):
return Fn(stdops.LShift, lhs, rhs)
@staticmethod
def BitAnd(lhs, rhs):
return Fn(stdops.BitAnd, | |
import warnings
warnings.filterwarnings(
"default", r"plotly\.graph_objs\.\w+ is deprecated", DeprecationWarning
)
class Data(list):
"""
plotly_study.graph_objs.Data is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly_study.graph_objs.Scatter
- plotly_study.graph_objs.Bar
- plotly_study.graph_objs.Area
- plotly_study.graph_objs.Histogram
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.Data is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly_study.graph_objs.Scatter
- plotly_study.graph_objs.Bar
- plotly_study.graph_objs.Area
- plotly_study.graph_objs.Histogram
- etc.
"""
warnings.warn(
"""plotly_study.graph_objs.Data is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly_study.graph_objs.Scatter
- plotly_study.graph_objs.Bar
- plotly_study.graph_objs.Area
- plotly_study.graph_objs.Histogram
- etc.
""",
DeprecationWarning,
)
super(Data, self).__init__(*args, **kwargs)
class Annotations(list):
"""
plotly_study.graph_objs.Annotations is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly_study.graph_objs.layout.Annotation
- plotly_study.graph_objs.layout.scene.Annotation
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.Annotations is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly_study.graph_objs.layout.Annotation
- plotly_study.graph_objs.layout.scene.Annotation
"""
warnings.warn(
"""plotly_study.graph_objs.Annotations is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly_study.graph_objs.layout.Annotation
- plotly_study.graph_objs.layout.scene.Annotation
""",
DeprecationWarning,
)
super(Annotations, self).__init__(*args, **kwargs)
class Frames(list):
"""
plotly_study.graph_objs.Frames is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly_study.graph_objs.Frame
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.Frames is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly_study.graph_objs.Frame
"""
warnings.warn(
"""plotly_study.graph_objs.Frames is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly_study.graph_objs.Frame
""",
DeprecationWarning,
)
super(Frames, self).__init__(*args, **kwargs)
class AngularAxis(dict):
"""
plotly_study.graph_objs.AngularAxis is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.AngularAxis
- plotly_study.graph_objs.layout.polar.AngularAxis
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.AngularAxis is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.AngularAxis
- plotly_study.graph_objs.layout.polar.AngularAxis
"""
warnings.warn(
"""plotly_study.graph_objs.AngularAxis is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.AngularAxis
- plotly_study.graph_objs.layout.polar.AngularAxis
""",
DeprecationWarning,
)
super(AngularAxis, self).__init__(*args, **kwargs)
class Annotation(dict):
"""
plotly_study.graph_objs.Annotation is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.Annotation
- plotly_study.graph_objs.layout.scene.Annotation
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.Annotation is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.Annotation
- plotly_study.graph_objs.layout.scene.Annotation
"""
warnings.warn(
"""plotly_study.graph_objs.Annotation is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.Annotation
- plotly_study.graph_objs.layout.scene.Annotation
""",
DeprecationWarning,
)
super(Annotation, self).__init__(*args, **kwargs)
class ColorBar(dict):
"""
plotly_study.graph_objs.ColorBar is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.marker.ColorBar
- plotly_study.graph_objs.surface.ColorBar
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.ColorBar is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.marker.ColorBar
- plotly_study.graph_objs.surface.ColorBar
- etc.
"""
warnings.warn(
"""plotly_study.graph_objs.ColorBar is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.marker.ColorBar
- plotly_study.graph_objs.surface.ColorBar
- etc.
""",
DeprecationWarning,
)
super(ColorBar, self).__init__(*args, **kwargs)
class Contours(dict):
"""
plotly_study.graph_objs.Contours is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.contour.Contours
- plotly_study.graph_objs.surface.Contours
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.Contours is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.contour.Contours
- plotly_study.graph_objs.surface.Contours
- etc.
"""
warnings.warn(
"""plotly_study.graph_objs.Contours is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.contour.Contours
- plotly_study.graph_objs.surface.Contours
- etc.
""",
DeprecationWarning,
)
super(Contours, self).__init__(*args, **kwargs)
class ErrorX(dict):
"""
plotly_study.graph_objs.ErrorX is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.ErrorX
- plotly_study.graph_objs.histogram.ErrorX
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.ErrorX is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.ErrorX
- plotly_study.graph_objs.histogram.ErrorX
- etc.
"""
warnings.warn(
"""plotly_study.graph_objs.ErrorX is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.ErrorX
- plotly_study.graph_objs.histogram.ErrorX
- etc.
""",
DeprecationWarning,
)
super(ErrorX, self).__init__(*args, **kwargs)
class ErrorY(dict):
"""
plotly_study.graph_objs.ErrorY is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.ErrorY
- plotly_study.graph_objs.histogram.ErrorY
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.ErrorY is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.ErrorY
- plotly_study.graph_objs.histogram.ErrorY
- etc.
"""
warnings.warn(
"""plotly_study.graph_objs.ErrorY is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.ErrorY
- plotly_study.graph_objs.histogram.ErrorY
- etc.
""",
DeprecationWarning,
)
super(ErrorY, self).__init__(*args, **kwargs)
class ErrorZ(dict):
"""
plotly_study.graph_objs.ErrorZ is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter3d.ErrorZ
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.ErrorZ is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter3d.ErrorZ
"""
warnings.warn(
"""plotly_study.graph_objs.ErrorZ is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter3d.ErrorZ
""",
DeprecationWarning,
)
super(ErrorZ, self).__init__(*args, **kwargs)
class Font(dict):
"""
plotly_study.graph_objs.Font is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.Font
- plotly_study.graph_objs.layout.hoverlabel.Font
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.Font is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.Font
- plotly_study.graph_objs.layout.hoverlabel.Font
- etc.
"""
warnings.warn(
"""plotly_study.graph_objs.Font is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.Font
- plotly_study.graph_objs.layout.hoverlabel.Font
- etc.
""",
DeprecationWarning,
)
super(Font, self).__init__(*args, **kwargs)
class Legend(dict):
"""
plotly_study.graph_objs.Legend is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.Legend
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.Legend is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.Legend
"""
warnings.warn(
"""plotly_study.graph_objs.Legend is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.Legend
""",
DeprecationWarning,
)
super(Legend, self).__init__(*args, **kwargs)
class Line(dict):
"""
plotly_study.graph_objs.Line is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.Line
- plotly_study.graph_objs.layout.shape.Line
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.Line is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.Line
- plotly_study.graph_objs.layout.shape.Line
- etc.
"""
warnings.warn(
"""plotly_study.graph_objs.Line is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.Line
- plotly_study.graph_objs.layout.shape.Line
- etc.
""",
DeprecationWarning,
)
super(Line, self).__init__(*args, **kwargs)
class Margin(dict):
"""
plotly_study.graph_objs.Margin is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.Margin
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.Margin is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.Margin
"""
warnings.warn(
"""plotly_study.graph_objs.Margin is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.Margin
""",
DeprecationWarning,
)
super(Margin, self).__init__(*args, **kwargs)
class Marker(dict):
"""
plotly_study.graph_objs.Marker is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.Marker
- plotly_study.graph_objs.histogram.selected.Marker
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.Marker is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.Marker
- plotly_study.graph_objs.histogram.selected.Marker
- etc.
"""
warnings.warn(
"""plotly_study.graph_objs.Marker is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.Marker
- plotly_study.graph_objs.histogram.selected.Marker
- etc.
""",
DeprecationWarning,
)
super(Marker, self).__init__(*args, **kwargs)
class RadialAxis(dict):
"""
plotly_study.graph_objs.RadialAxis is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.RadialAxis
- plotly_study.graph_objs.layout.polar.RadialAxis
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.RadialAxis is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.RadialAxis
- plotly_study.graph_objs.layout.polar.RadialAxis
"""
warnings.warn(
"""plotly_study.graph_objs.RadialAxis is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.RadialAxis
- plotly_study.graph_objs.layout.polar.RadialAxis
""",
DeprecationWarning,
)
super(RadialAxis, self).__init__(*args, **kwargs)
class Scene(dict):
"""
plotly_study.graph_objs.Scene is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.Scene
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.Scene is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.Scene
"""
warnings.warn(
"""plotly_study.graph_objs.Scene is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.Scene
""",
DeprecationWarning,
)
super(Scene, self).__init__(*args, **kwargs)
class Stream(dict):
"""
plotly_study.graph_objs.Stream is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.Stream
- plotly_study.graph_objs.area.Stream
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.Stream is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.Stream
- plotly_study.graph_objs.area.Stream
"""
warnings.warn(
"""plotly_study.graph_objs.Stream is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.scatter.Stream
- plotly_study.graph_objs.area.Stream
""",
DeprecationWarning,
)
super(Stream, self).__init__(*args, **kwargs)
class XAxis(dict):
"""
plotly_study.graph_objs.XAxis is deprecated.
Please replace it with one of the following more specific types
- plotly_study.graph_objs.layout.XAxis
- plotly_study.graph_objs.layout.scene.XAxis
"""
def __init__(self, *args, **kwargs):
"""
plotly_study.graph_objs.XAxis is deprecated.
Please | |
<reponame>soonho-tri/dreal4
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import math
import unittest
from dreal import (And, Expression, Formula, Iff, Implies, Not, Or, Variable,
Variables, acos, asin, atan, atan2, cos, cosh, exp, forall,
if_then_else, intersect, log, logical_imply, Max, Min, sin,
sinh, sqrt, tan, tanh)
x = Variable("x")
y = Variable("y")
z = Variable("z")
w = Variable("w")
a = Variable("a")
b = Variable("b")
c = Variable("c")
b1 = Variable("b1", Variable.Bool)
b2 = Variable("b2", Variable.Bool)
e_x = Expression(x)
e_y = Expression(y)
class SymbolicVariableTest(unittest.TestCase):
def test_type(self):
real_var = Variable("x", Variable.Real)
self.assertEqual(real_var.get_type(), Variable.Real)
int_var = Variable("x", Variable.Int)
self.assertEqual(int_var.get_type(), Variable.Int)
bool_var = Variable("x", Variable.Bool)
self.assertEqual(bool_var.get_type(), Variable.Bool)
binary_var = Variable("x", Variable.Binary)
self.assertEqual(binary_var.get_type(), Variable.Binary)
def test_addition(self):
self.assertEqual(str(x + y), "(x + y)")
self.assertEqual(str(x + 1), "(1 + x)")
self.assertEqual(str(1 + x), "(1 + x)")
def test_subtraction(self):
self.assertEqual(str(x - y), "(x - y)")
self.assertEqual(str(x - 1), "(-1 + x)")
self.assertEqual(str(1 - x), "(1 - x)")
def test_multiplication(self):
self.assertEqual(str(x * y), "(x * y)")
self.assertEqual(str(x * 1), "x")
self.assertEqual(str(1 * x), "x")
def test_division(self):
self.assertEqual(str(x / y), "(x / y)")
self.assertEqual(str(x / 1), "x")
self.assertEqual(str(1 / x), "(1 / x)")
def test_unary_operators(self):
self.assertEqual(str(+x), "x")
self.assertEqual(str(-x), "(-1 * x)")
def test_relational_operators(self):
# Variable rop float
self.assertEqual(str(x >= 1), "(x >= 1)")
self.assertEqual(str(x > 1), "(x > 1)")
self.assertEqual(str(x <= 1), "(x <= 1)")
self.assertEqual(str(x < 1), "(x < 1)")
self.assertEqual(str(x == 1), "(x == 1)")
self.assertEqual(str(x != 1), "(x != 1)")
# float rop Variable
self.assertEqual(str(1 < y), "(y > 1)")
self.assertEqual(str(1 <= y), "(y >= 1)")
self.assertEqual(str(1 > y), "(y < 1)")
self.assertEqual(str(1 >= y), "(y <= 1)")
self.assertEqual(str(1 == y), "(y == 1)")
self.assertEqual(str(1 != y), "(y != 1)")
# Variable rop Variable
self.assertEqual(str(x < y), "(x < y)")
self.assertEqual(str(x <= y), "(x <= y)")
self.assertEqual(str(x > y), "(x > y)")
self.assertEqual(str(x >= y), "(x >= y)")
self.assertEqual(str(x == y), "(x == y)")
self.assertEqual(str(x != y), "(x != y)")
def test_repr(self):
self.assertEqual(repr(x), "Variable('x')")
def test_simplify(self):
self.assertEqual(str(0 * (x + y)), "0")
self.assertEqual(str(x + y - x - y), "0")
self.assertEqual(str(x / x - 1), "0")
self.assertEqual(str(x / x), "1")
def test_expand(self):
ex = 2 * (x + y)
self.assertEqual(str(ex), "(2 * (x + y))")
self.assertEqual(str(ex.Expand()), "(2 * x + 2 * y)")
def test_pow(self):
self.assertEqual(str(x**2), "pow(x, 2)")
self.assertEqual(str(x**y), "pow(x, y)")
self.assertEqual(str((x + 1)**(y - 1)), "pow((1 + x), (-1 + y))")
def test_neg(self):
self.assertEqual(str(-(x + 1)), "(-1 - x)")
def test_logical(self):
f1 = (x == 0)
f2 = (y == 0)
self.assertEqual(str(Not(f1)), "!((x == 0))")
self.assertEqual(str(Implies(f1, f2)), str(Or(Not(f1), f2)))
self.assertEqual(str(Iff(f1, f2)),
str(And(Implies(f1, f2), Implies(f2, f1))))
# Test single-operand logical statements
self.assertEqual(str(And(x >= 1)), "(x >= 1)")
self.assertEqual(str(Or(x >= 1)), "(x >= 1)")
# Test binary operand logical statements
self.assertEqual(str(And(x >= 1, x <= 2)), "((x >= 1) and (x <= 2))")
self.assertEqual(str(Or(x <= 1, x >= 2)), "((x >= 2) or (x <= 1))")
# Test multiple operand logical statements
self.assertEqual(str(And(x >= 1, x <= 2, y == 2)),
"((y == 2) and (x >= 1) and (x <= 2))")
self.assertEqual(str(Or(x >= 1, x <= 2, y == 2)),
"((y == 2) or (x >= 1) or (x <= 2))")
def test_logical2(self):
self.assertEqual(str(And(b1, b2, Implies(b1, b2))),
"(b1 and b2 and (b2 or !(b1)))")
self.assertEqual(str(Or(b1, b2, Implies(b1, b2))),
"(b1 or b2 or !(b1))")
self.assertEqual(str(Not(b1)), "!(b1)")
def test_functions_with_variable(self):
self.assertEqual(str(log(x)), "log(x)")
self.assertEqual(str(abs(x)), "abs(x)")
self.assertEqual(str(exp(x)), "exp(x)")
self.assertEqual(str(sqrt(x)), "sqrt(x)")
self.assertEqual(str(pow(x, y)), "pow(x, y)")
self.assertEqual(str(sin(x)), "sin(x)")
self.assertEqual(str(cos(x)), "cos(x)")
self.assertEqual(str(tan(x)), "tan(x)")
self.assertEqual(str(asin(x)), "asin(x)")
self.assertEqual(str(acos(x)), "acos(x)")
self.assertEqual(str(atan(x)), "atan(x)")
self.assertEqual(str(atan2(x, y)), "atan2(x, y)")
self.assertEqual(str(sinh(x)), "sinh(x)")
self.assertEqual(str(cosh(x)), "cosh(x)")
self.assertEqual(str(tanh(x)), "tanh(x)")
self.assertEqual(str(Min(x, y)), "min(x, y)")
self.assertEqual(str(Max(x, y)), "max(x, y)")
self.assertEqual(str(if_then_else(x > y, x, y)),
"(if (x > y) then x else y)")
class TestSymbolicVariables(unittest.TestCase):
def test_default_constructor(self):
variables = Variables()
self.assertEqual(variables.size(), 0)
self.assertTrue(variables.empty())
def test_constructor_list(self):
variables = Variables([x, y, z])
self.assertEqual(variables.size(), 3)
self.assertEqual(len(variables), 3)
def test_to_string(self):
variables = Variables([x, y, z])
self.assertEqual(variables.to_string(), "{x, y, z}")
self.assertEqual("{}".format(variables), "{x, y, z}")
def test_repr(self):
variables = Variables([x, y, z])
self.assertEqual(repr(variables), '<Variables "{x, y, z}">')
def test_insert1(self):
variables = Variables()
variables.insert(x)
self.assertEqual(variables.size(), 1)
def test_insert2(self):
variables = Variables([x])
variables.insert(Variables([y, z]))
self.assertEqual(variables.size(), 3)
def test_erase1(self):
variables = Variables([x, y, z])
count = variables.erase(x)
self.assertEqual(count, 1)
def test_erase2(self):
variables1 = Variables([x, y, z])
variables2 = Variables([w, z])
count = variables1.erase(variables2)
self.assertEqual(count, 1)
self.assertEqual(variables1.size(), 2)
def test_include(self):
variables = Variables([x, y, z])
self.assertTrue(variables.include(y))
self.assertTrue(y in variables)
self.assertFalse(w in variables)
def test_subset_properties(self):
variables1 = Variables([x, y, z])
variables2 = Variables([x, y])
self.assertFalse(variables1.IsSubsetOf(variables2))
self.assertFalse(variables1.IsStrictSubsetOf(variables2))
self.assertTrue(variables1.IsSupersetOf(variables2))
self.assertTrue(variables1.IsStrictSupersetOf(variables2))
def test_eq(self):
variables1 = Variables([x, y, z])
variables2 = Variables([x, y])
self.assertFalse(variables1 == variables2)
def test_lt(self):
variables1 = Variables([x, y])
variables2 = Variables([x, y, z])
self.assertTrue(variables1 < variables2)
def test_add(self):
variables1 = Variables([x, y])
variables2 = Variables([y, z])
variables3 = variables1 + variables2 # [x, y, z]
self.assertEqual(variables3.size(), 3)
variables4 = variables1 + z # [x, y, z]
self.assertEqual(variables4.size(), 3)
variables5 = x + variables1 # [x, y]
self.assertEqual(variables5.size(), 2)
def test_add_assignment(self):
variables = Variables([x])
variables += y
self.assertEqual(variables.size(), 2)
variables += Variables([x, z])
self.assertEqual(variables.size(), 3)
def test_sub(self):
variables1 = Variables([x, y])
variables2 = Variables([y, z])
variables3 = variables1 - variables2 # [x]
self.assertEqual(variables3, Variables([x]))
variables4 = variables1 - y # [x]
self.assertEqual(variables4, Variables([x]))
def test_sub_assignment(self):
variables = Variables([x, y, z])
variables -= y # = [x, z]
self.assertEqual(variables, Variables([x, z]))
variables -= Variables([x]) # = [z]
self.assertEqual(variables, Variables([z]))
def test_intersect(self):
variables1 = Variables([x, y, z])
variables2 = Variables([y, w])
variables3 = intersect(variables1, variables2) # = [y]
self.assertEqual(variables3, Variables([y]))
def test_iter(self):
variables = Variables([x, y, z])
count = 0
for var in variables:
self.assertTrue(var in variables)
count = count + 1
self.assertEqual(count, len(variables))
class TestSymbolicExpression(unittest.TestCase):
def test_addition(self):
self.assertEqual(str(e_x + e_y), "(x + y)")
self.assertEqual(str(e_x + y), "(x + y)")
self.assertEqual(str(e_x + 1), "(1 + x)")
self.assertEqual(str(x + e_y), "(x + y)")
self.assertEqual(str(1 + e_x), "(1 + x)")
def test_addition_assign(self):
e = x
e += e_y
self.assertEqual(e, x + y)
e += z
self.assertEqual(e, x + y + z)
e += 1
self.assertEqual(e, x + y + z + 1)
def test_subtract(self):
self.assertEqual(str(e_x - e_y), "(x - y)")
self.assertEqual(str(e_x - y), "(x - y)")
self.assertEqual(str(e_x - 1), "(-1 + x)")
self.assertEqual(str(x - e_y), "(x - y)")
self.assertEqual(str(1 - e_x), "(1 - x)")
def test_subtract_assign(self):
e = x
e -= e_y
self.assertEqual(e, x - y)
e -= z
self.assertEqual(e, x - y - z)
e -= 1
self.assertEqual(e, x - y - z - 1)
def test_multiplication(self):
self.assertEqual(str(e_x * e_y), "(x * y)")
self.assertEqual(str(e_x * y), "(x * y)")
self.assertEqual(str(e_x * 1), "x")
self.assertEqual(str(x * e_y), "(x * y)")
self.assertEqual(str(1 * e_x), "x")
def test_multiplication_assign(self):
e = x
e *= e_y
self.assertEqual(e, x * y)
e *= z
self.assertEqual(e, x * y * z)
e *= 1
self.assertEqual(e, x * y * z)
def test_division(self):
self.assertEqual(str(e_x / e_y), "(x / y)")
self.assertEqual(str(e_x / y), "(x / y)")
self.assertEqual(str(e_x / 1), "x")
self.assertEqual(str(x / e_y), "(x / y)")
self.assertEqual(str(1 / e_x), "(1 / x)")
def test_division_assign(self):
e = x
e /= e_y
self.assertEqual(e, x / y)
e /= z
self.assertEqual(e, x / y / z)
e /= 1
self.assertEqual(e, x / y / z)
def test_unary_operators(self):
# self.assertEqual(str(+e_x), "x")
self.assertEqual(str(-e_x), "(-1 * x)")
def test_relational_operators(self):
# Expression rop Expression
self.assertEqual(str(e_x < e_y), "(x < y)")
self.assertEqual(str(e_x <= e_y), "(x <= y)")
self.assertEqual(str(e_x > e_y), "(x > y)")
self.assertEqual(str(e_x >= e_y), "(x >= y)")
self.assertEqual(str(e_x == e_y), "(x == y)")
self.assertEqual(str(e_x != e_y), "(x != y)")
# Expression rop Variable
self.assertEqual(str(e_x < y), "(x < y)")
self.assertEqual(str(e_x <= y), "(x <= y)")
self.assertEqual(str(e_x > y), "(x > y)")
self.assertEqual(str(e_x >= y), "(x >= y)")
self.assertEqual(str(e_x == y), "(x == y)")
self.assertEqual(str(e_x != y), "(x != y)")
# Variable rop Expression
self.assertEqual(str(x < e_y), "(x < y)")
self.assertEqual(str(x <= e_y), "(x <= y)")
self.assertEqual(str(x > e_y), "(x > y)")
self.assertEqual(str(x >= e_y), "(x >= y)")
self.assertEqual(str(x == e_y), "(x == y)")
self.assertEqual(str(x != e_y), "(x != y)")
# Expression rop float
self.assertEqual(str(e_x < 1), "(x < 1)")
self.assertEqual(str(e_x <= 1), "(x <= 1)")
self.assertEqual(str(e_x | |
== 'ready':
continue
if sig['TYPE'] == 'rstruct_enable':
continue
llink_lsb = sig['LLINDEX_MAIN_LSB'] + (rstruct_iteration * llink['WIDTH_MAIN'])
lsb2 = sig['LSB'] + (rstruct_iteration * sig['SIGWID'])
for unused1 in list (range (0, sig['SIGWID'])):
#lsb2=sig['LSB'] + (sig['SIGWID']*iteration)
#llink_lsb=sig['LLINDEX_MAIN_LSB'] + (llink['WIDTH_MAIN']*iteration)
tx_local_index_lsb += 1
tx_print_index_lsb = print_aib_mapping_text(configuration, localdir, sig['NAME'], wid1=1, lsb1=tx_print_index_lsb, lsb2=lsb2, llink_lsb=llink_lsb, llink_name=llink['NAME'])
if (tx_print_index_lsb % config_raw1phy_beat) == 0:
tx_print_index_lsb += config_raw1phy_data - config_raw1phy_beat
llink_lsb += 1
lsb2 += 1
else:
if llink['HASREADY']:
#global_struct.g_dv_vector_print.append ("assign {}_f = {};\n".format(gen_llink_concat_credit (llink['NAME'],localdir), tx_local_index_lsb))
tx_local_index_lsb += 1
tx_print_index_lsb = print_aib_mapping_text(configuration, localdir, gen_llink_concat_credit (llink['NAME'],localdir)+"_r"+str(rstruct_iteration), wid1=1, lsb1=tx_print_index_lsb)
if (tx_print_index_lsb % config_raw1phy_beat) == 0:
tx_print_index_lsb += config_raw1phy_data - config_raw1phy_beat
## This fills in the unused data space
if this_is_tx:
local_index_wid = configuration['TOTAL_TX_ROUNDUP_BIT_RSTRUCT']
tx_local_index_lsb += local_index_wid
configuration['TX_SPARE_WIDTH'] = 0
else:
local_index_wid = configuration['TOTAL_RX_ROUNDUP_BIT_RSTRUCT']
tx_local_index_lsb += local_index_wid
configuration['RX_SPARE_WIDTH'] = 0
for unused1 in list (range (0, local_index_wid)):
tx_local_index_lsb += 1
tx_print_index_lsb= print_aib_mapping_text(configuration, localdir,"1'b0", wid1=1, lsb1=tx_print_index_lsb, lsb2=-1)
if (tx_print_index_lsb % config_raw1phy_beat) == 0:
tx_print_index_lsb += config_raw1phy_data - config_raw1phy_beat
# This is unused for rep struct
# ## This fills in the empty space after the data but before the end of the channel (e.g. DBI)
# local_index_wid = config_raw1phy_beat - tx_local_index_lsb
# tx_local_index_lsb += local_index_wid
#
# for unused1 in list (range (0, local_index_wid)):
# if global_struct.g_SIGNAL_DEBUG:
# print ("Fill in iteration {} for index_lsb {}".format(unused1, tx_local_index_lsb))
# tx_local_index_lsb += 1
# tx_print_index_lsb= print_aib_mapping_text(configuration, localdir,"1'b0", wid1=1, lsb1=tx_print_index_lsb, lsb2=-1)
# if (tx_print_index_lsb % config_raw1phy_beat) == 0:
# tx_print_index_lsb += config_raw1phy_data - config_raw1phy_beat
#
# # local_lsb1 = print_aib_assign_text_check_for_aib_bit (configuration, local_lsb1, use_tx, sysv)
## The print vectors were messed up by bit blasting. We'll correct it here
use_tx = True if localdir == "output" else False
if use_tx:
#global_struct.g_llink_vector_print_tx.clear()
del global_struct.g_llink_vector_print_tx [:]
else:
#global_struct.g_llink_vector_print_rx.clear()
del global_struct.g_llink_vector_print_rx [:]
for rstruct_iteration in list (range (0, configuration['RSTRUCT_MULTIPLY_FACTOR'])):
tx_print_index_lsb = rstruct_iteration * config_raw1phy_beat
for llink in configuration['LL_LIST']:
if llink['DIR'] == localdir:
use_tx = True if localdir == "output" else False
for sig in llink['SIGNALLIST_MAIN']:
if sig['TYPE'] == 'valid' or sig['TYPE'] == 'ready':
continue
if use_tx:
if llink_lsb != -1:
global_struct.g_llink_vector_print_tx.append (" assign {0:20} {1:13} = {2:20} {3:13}\n".format(gen_llink_concat_fifoname (llink['NAME'],"input" ), gen_index_msb (sig['SIGWID'], sig['LLINDEX_MAIN_LSB'] + (rstruct_iteration * llink['WIDTH_MAIN'])), sig['NAME'], gen_index_msb (sig['SIGWID'], sig['LSB'] + (rstruct_iteration * sig['SIGWID']))))
else:
if llink_lsb != -1:
global_struct.g_llink_vector_print_rx.append (" assign {0:20} {1:13} = {2:20} {3:13}\n".format(gen_llink_concat_fifoname (llink['NAME'],"output"), gen_index_msb (sig['SIGWID'], sig['LLINDEX_MAIN_LSB'] + (rstruct_iteration * llink['WIDTH_MAIN'])), sig['NAME'], gen_index_msb (sig['SIGWID'], sig['LSB'] + (rstruct_iteration * sig['SIGWID']))))
return configuration
## calculate_bit_loc_repstruct
##########################################################################################
##########################################################################################
## calculate_bit_loc_fixed_alloc
## Calculate fixed allocation bit locations
def calculate_bit_loc_fixed_alloc(this_is_tx, configuration):
if this_is_tx:
localdir = "output"
otherdir = "input"
else:
localdir = "input"
otherdir = "output"
local_index_wid = 0;
tx_print_index_lsb = 0;
rx_print_index_lsb = 0;
tx_local_index_lsb = 0;
rx_local_index_lsb = 0;
for llink in configuration['LL_LIST']:
if llink['DIR'] == localdir:
if llink['HASVALID']:
local_index_wid = 1
llink['PUSH_RAW_INDEX_MAIN'] = gen_index_msb(local_index_wid, tx_local_index_lsb)
llink['PUSH_RAW_LSB_MAIN'] = tx_local_index_lsb
tx_local_index_lsb += local_index_wid
tx_print_index_lsb = print_aib_mapping_text(configuration, localdir, gen_llink_concat_pushbit (llink['NAME'],otherdir), wid1=1, lsb1=tx_print_index_lsb)
local_index_wid = llink['WIDTH_MAIN']
llink['DATA_RAW_INDEX_MAIN'] = gen_index_msb(local_index_wid, tx_local_index_lsb)
llink['DATA_RAW_LSB_MAIN'] = tx_local_index_lsb
tx_local_index_lsb += local_index_wid
for sig in llink['SIGNALLIST_MAIN']:
if sig['TYPE'] == 'valid' or sig['TYPE'] == 'ready':
continue
tx_print_index_lsb = print_aib_mapping_text(configuration, localdir, sig['NAME'], wid1=sig['SIGWID'], lsb1=tx_print_index_lsb, lsb2=sig['LSB'], llink_lsb=sig['LLINDEX_MAIN_LSB'], llink_name=llink['NAME'])
else:
if llink['HASREADY']:
local_index_wid = 1
llink['CREDIT_RAW_INDEX_MAIN'] = gen_index_msb(local_index_wid, tx_local_index_lsb)
llink['CREDIT_RAW_LSB_MAIN'] = tx_local_index_lsb
#global_struct.g_dv_vector_print.append ("assign {}_f = {};\n".format(gen_llink_concat_credit (llink['NAME'],localdir), tx_local_index_lsb))
tx_local_index_lsb += local_index_wid
tx_print_index_lsb = print_aib_mapping_text(configuration, localdir, gen_llink_concat_credit (llink['NAME'],localdir), wid1=1, lsb1=tx_print_index_lsb)
if this_is_tx:
local_index_wid = configuration['TOTAL_TX_ROUNDUP_BIT_MAIN']
tx_local_index_lsb += local_index_wid
if configuration['TOTAL_TX_ROUNDUP_BIT_MAIN'] :
if global_struct.USE_SPARE_VECTOR:
tx_print_index_lsb= print_aib_mapping_text(configuration, localdir,"spare_"+localdir, wid1=configuration['TOTAL_TX_ROUNDUP_BIT_MAIN'], lsb1=tx_print_index_lsb, lsb2=0, llink_lsb=0, llink_name="spare")
configuration['TX_SPARE_WIDTH'] = configuration['TOTAL_TX_ROUNDUP_BIT_MAIN']
else:
tx_print_index_lsb= print_aib_mapping_text(configuration, localdir,"1'b0", wid1=configuration['TOTAL_TX_ROUNDUP_BIT_MAIN'], lsb1=tx_print_index_lsb, lsb2=-1)
configuration['TX_SPARE_WIDTH'] = 0
else:
local_index_wid = configuration['TOTAL_RX_ROUNDUP_BIT_MAIN']
tx_local_index_lsb += local_index_wid
if configuration['TOTAL_RX_ROUNDUP_BIT_MAIN'] :
if global_struct.USE_SPARE_VECTOR:
tx_print_index_lsb= print_aib_mapping_text(configuration, localdir,"spare_"+localdir, wid1=configuration['TOTAL_RX_ROUNDUP_BIT_MAIN'], lsb1=tx_print_index_lsb, lsb2=0, llink_lsb=0, llink_name="spare")
configuration['RX_SPARE_WIDTH'] = configuration['TOTAL_RX_ROUNDUP_BIT_MAIN']
else:
tx_print_index_lsb= print_aib_mapping_text(configuration, localdir,"1'b0", wid1=configuration['TOTAL_RX_ROUNDUP_BIT_MAIN'], lsb1=tx_print_index_lsb, lsb2=-1)
configuration['RX_SPARE_WIDTH'] = 0
return configuration
## calculate_bit_loc_fixed_alloc
##########################################################################################
##########################################################################################
## make_name_file
## Generate name files
def make_name_file(configuration):
for direction in ['master', 'slave']:
name_file_name = "{}_{}_name".format(configuration['MODULE'], direction)
file_name = open("{}/{}.sv".format(configuration['OUTPUT_DIR'], name_file_name), "w+")
print_verilog_header(file_name)
file_name.write("module {} (\n".format(name_file_name))
first_line = True;
# List User Signals
for llink in configuration['LL_LIST']:
#if (llink['WIDTH_GALT'] != 0) and (llink['WIDTH_MAIN'] != 0):
# file_name.write("\n // {0} channel\n".format(llink['NAME']))
# for sig_gen2 in llink['SIGNALLIST_MAIN']:
# found_gen1_match = 0;
# for sig_gen1 in llink['SIGNALLIST_GALT']:
# if sig_gen2['NAME'] == sig_gen1['NAME']:
# found_gen1_match = 1
# localdir = gen_direction(name_file_name, sig_gen2['DIR'])
# print_verilog_io_line(file_name, localdir, sig_gen2['NAME'], index=gen_index_msb(sig_gen2['SIGWID'] + sig_gen1['SIGWID'],sig_gen1['LSB'], sysv=False))
# if found_gen1_match == 0:
# localdir = gen_direction(name_file_name, sig_gen2['DIR'])
# print_verilog_io_line(file_name, localdir, sig_gen2['NAME'], index=gen_index_msb(sig_gen2['SIGWID'],sig_gen2['LSB'], sysv=False))
#
#else:
file_name.write("\n // {0} channel\n".format(llink['NAME']))
for sig in llink['SIGNALLIST_MAIN']:
if sig['TYPE'] == "rstruct_enable" and direction == 'master' : ## Drop the user_enable if in master (slave only)
continue
localdir = gen_direction(name_file_name, sig['DIR'])
print_verilog_io_line(file_name, localdir, sig['NAME'], index=gen_index_msb(sig['SIGWID'] * configuration['RSTRUCT_MULTIPLY_FACTOR'],sig['LSB'], sysv=False))
# List Logic Link Signals
file_name.write("\n // Logic Link Interfaces\n")
for llink in configuration['LL_LIST']:
if first_line:
first_line = False
else:
file_name.write("\n")
localdir = gen_direction(name_file_name, llink['DIR'], True)
if llink['HASVALID']:
print_verilog_io_line(file_name, gen_direction(name_file_name, llink['DIR'], True), gen_llink_user_valid (llink['NAME'] ))
if localdir == 'output':
print_verilog_io_line(file_name, gen_direction(name_file_name, llink['DIR'], True), gen_llink_user_fifoname (llink['NAME'],localdir), gen_index_msb(llink['WIDTH_MAIN'] * configuration['RSTRUCT_MULTIPLY_FACTOR'], sysv=False))
else:
if configuration['REPLICATED_STRUCT']:
print_verilog_io_line(file_name, gen_direction(name_file_name, llink['DIR'], True), gen_llink_user_fifoname (llink['NAME'],localdir), gen_index_msb(llink['WIDTH_RX_RSTRUCT'] * configuration['RSTRUCT_MULTIPLY_FACTOR'], sysv=False))
else:
print_verilog_io_line(file_name, gen_direction(name_file_name, llink['DIR'], True), gen_llink_user_fifoname (llink['NAME'],localdir), gen_index_msb(llink['WIDTH_MAIN'] * configuration['RSTRUCT_MULTIPLY_FACTOR'], sysv=False))
if llink['HASREADY']:
print_verilog_io_line(file_name, gen_direction(name_file_name, llink['DIR'], False), gen_llink_user_ready (llink['NAME'] ))
file_name.write("\n")
if llink['HASVALID_NOREADY_NOREP']:
print_verilog_io_line(file_name, "input", "rx_online")
print_verilog_io_line(file_name, "input", "m_gen2_mode", comma=False)
file_name.write("\n);\n")
file_name.write("\n // Connect Data\n")
for llink in configuration['LL_LIST']:
file_name.write("\n")
localdir = gen_direction(name_file_name, llink['DIR'], True);
if localdir == 'output':
if llink['HASVALID']:
for sig in llink['SIGNALLIST_MAIN']:
if sig['TYPE'] == 'valid':
print_verilog_assign(file_name, gen_llink_user_valid (llink['NAME']), sig['NAME'])
else:
print_verilog_assign(file_name, gen_llink_user_valid (llink['NAME']), "1'b1", comment=gen_llink_user_valid (llink['NAME']) + " is unused" )
if llink['HASREADY']:
for sig in llink['SIGNALLIST_MAIN']:
if sig['TYPE'] == 'ready':
print_verilog_assign(file_name, sig['NAME'], gen_llink_user_ready (llink['NAME']))
#else:
# file_name.write(" // "+ gen_llink_user_ready (llink['NAME']) +" is unused\n")
for rstruct_iteration in list (range (0, configuration['RSTRUCT_MULTIPLY_FACTOR'])):
for sig in llink['SIGNALLIST_MAIN']:
if sig['TYPE'] == 'signal' or sig['TYPE'] == 'signal_valid' or sig['TYPE'] == 'bus':
print_verilog_assign(file_name, gen_llink_user_fifoname (llink['NAME'], localdir), sig['NAME'], index1=gen_index_msb (sig['SIGWID'], sig['LLINDEX_MAIN_LSB'] + (rstruct_iteration * llink['WIDTH_MAIN'])), index2=gen_index_msb(sig['SIGWID'], sig['LSB'] + (rstruct_iteration * sig['SIGWID'])))
#if sig['TYPE'] == 'rstruct_enable' and localdir == 'input':
# print_verilog_assign(file_name, gen_llink_user_fifoname (llink['NAME'], localdir), sig['NAME'], index1=gen_index_msb (sig['SIGWID'], sig['LLINDEX_MAIN_LSB'] + rstruct_iteration + (configuration['RSTRUCT_MULTIPLY_FACTOR'] * llink['WIDTH_MAIN'])), index2=gen_index_msb(sig['SIGWID'], sig['LSB'] + (rstruct_iteration * sig['SIGWID'])))
#print_verilog_assign(file_name, gen_llink_user_fifoname (llink['NAME'], localdir), "'0", index1=gen_index_msb(llink['WIDTH_MAIN']-llink['WIDTH_GALT'], llink['WIDTH_GALT']))
#file_name.write(" assign "+gen_llink_user_fifoname (llink['NAME'], localdir)+" = m_gen2_mode ? "+gen_llink_user_fifoname (llink['NAME'], localdir)+" : "+gen_llink_user_fifoname (llink['NAME'], localdir)+";\n")
else: # if llink['DIR'] == 'output':
if llink['HASVALID']:
for sig in llink['SIGNALLIST_MAIN']:
if sig['TYPE'] == 'valid':
print_verilog_assign(file_name, sig['NAME'], gen_llink_user_valid (llink['NAME']))
else:
file_name.write(" // "+ gen_llink_user_valid (llink['NAME']) +" is unused\n")
if llink['HASREADY']:
for sig in llink['SIGNALLIST_MAIN']:
if sig['TYPE'] == 'ready':
print_verilog_assign(file_name, gen_llink_user_ready (llink['NAME']), sig['NAME'])
#else:
# print_verilog_assign(file_name, gen_llink_user_ready (llink['NAME']), "1'b1", comment=gen_llink_user_ready (llink['NAME']) + " is unused" )
for rstruct_iteration in list (range (0, configuration['RSTRUCT_MULTIPLY_FACTOR'])):
for sig in llink['SIGNALLIST_MAIN']:
if sig['TYPE'] == 'signal' or sig['TYPE'] == 'bus':
print_verilog_assign(file_name, sig['NAME'], gen_llink_user_fifoname (llink['NAME'], localdir), index1=gen_index_msb(sig['SIGWID'], sig['LSB'] + (rstruct_iteration * sig['SIGWID'])), index2=gen_index_msb (sig['SIGWID'], sig['LLINDEX_MAIN_LSB'] + (rstruct_iteration * llink['WIDTH_MAIN'])))
elif sig['TYPE'] == 'signal_valid':
print_verilog_assign(file_name, sig['NAME'], "rx_online & " + gen_llink_user_fifoname (llink['NAME'], localdir), index1=gen_index_msb(sig['SIGWID'], sig['LSB'] + (rstruct_iteration * sig['SIGWID'])), index2=gen_index_msb (sig['SIGWID'], sig['LLINDEX_MAIN_LSB'] + (rstruct_iteration * llink['WIDTH_MAIN'])))
if sig['TYPE'] == 'rstruct_enable' and localdir == 'input':
print_verilog_assign(file_name, sig['NAME'], gen_llink_user_fifoname (llink['NAME'], localdir), index1=gen_index_msb(sig['SIGWID'], sig['LSB'] + rstruct_iteration) , index2=gen_index_msb (sig['SIGWID'], (sig['LLINDEX_MAIN_LSB'] * configuration['RSTRUCT_MULTIPLY_FACTOR']) + rstruct_iteration))
#### for sig in llink['SIGNALLIST_MAIN']:
#### if sig['TYPE'] == 'signal' or sig['TYPE'] == 'bus':
#### print_verilog_assign(file_name, gen_llink_user_fifoname (llink['NAME'], localdir), sig['NAME'], index1=sig['LLINDEX_MAIN'], index2=gen_index_msb(sig['SIGWID'],sig['LSB']))
####
#### for sig in llink['SIGNALLIST_GALT']:
#### if sig['TYPE'] == 'signal' or sig['TYPE'] == 'bus':
#### print_verilog_assign(file_name, gen_llink_user_fifoname (llink['NAME'], localdir), sig['NAME'], index1=sig['LLINDEX_GALT'], index2=gen_index_msb(sig['SIGWID'],sig['LSB']))
#### print_verilog_assign(file_name, gen_llink_user_fifoname (llink['NAME'], localdir), "'0", index1=gen_index_msb(llink['WIDTH_MAIN']-llink['WIDTH_GALT'], llink['WIDTH_GALT']))
#### file_name.write(" assign "+gen_llink_user_fifoname (llink['NAME'], localdir)+" = m_gen2_mode ? "+gen_llink_user_fifoname (llink['NAME'], localdir)+" : "+gen_llink_user_fifoname (llink['NAME'], localdir)+";\n")
#### else: # if llink['DIR'] == 'output':
####
#### if llink['HASVALID']:
#### for sig in llink['SIGNALLIST_MAIN']:
#### if sig['TYPE'] == 'valid':
#### print_verilog_assign(file_name, sig['NAME'], gen_llink_user_valid (llink['NAME']))
#### else:
#### file_name.write(" // "+ gen_llink_user_valid (llink['NAME']) +" is unused\n")
####
#### if llink['HASREADY']:
#### for sig in llink['SIGNALLIST_MAIN']:
#### if sig['TYPE'] == 'ready':
#### print_verilog_assign(file_name, gen_llink_user_ready (llink['NAME']), sig['NAME'])
#### else:
#### print_verilog_assign(file_name, gen_llink_user_ready (llink['NAME']), "1'b1", comment=gen_llink_user_ready (llink['NAME']) + " is unused" )
####
#### for sig in llink['SIGNALLIST_MAIN']:
#### if sig['TYPE'] == 'signal' or sig['TYPE'] == 'bus':
#### print_verilog_assign(file_name, sig['NAME'], gen_llink_user_fifoname (llink['NAME'], localdir), index1=gen_index_msb(sig['SIGWID'],sig['LSB']), index2=sig['LLINDEX_MAIN'])
####
#### for sig in llink['SIGNALLIST_GALT']:
#### if sig['TYPE'] == 'signal' or sig['TYPE'] == 'bus':
#### print_verilog_assign(file_name, sig['NAME'], gen_llink_user_fifoname (llink['NAME'], localdir), index1=gen_index_msb(sig['SIGWID'],sig['LSB']), index2=sig['LLINDEX_GALT'])
file_name.write("\n")
file_name.write("endmodule\n")
file_name.close()
return
## make_name_file
##########################################################################################
##########################################################################################
## make_concat_file
## Generate concat file
def make_concat_file(configuration):
for direction in ['master', | |
#coding:utf-8
#
# id: functional.datatypes.decfloat_min_distinguish
# title: List of all values starting from 1.0 divided by 2, until previous and current become equal
# decription:
# Checked on 4.0.0.1740 (both Windows and POSIX give the same result).
#
# tracker_id:
# min_versions: ['4.0.0']
# versions: 4.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 4.0
# resources: None
substitutions_1 = [('[\\s]+', ' ')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set heading off;
set term ^;
execute block returns(i int, x decfloat, y decfloat) as
begin
i = 0;
x = cast(1.0 as decfloat);
y = cast(0.5 as decfloat);
while ( i < 20000 ) do
begin
suspend;
i = i+1;
x = y/2;
y = y/4;
if ( x<= y ) then
begin
suspend;
leave;
end
end
end
^
set term ;^
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
0 1.0 0.5
1 0.25 0.125
2 0.0625 0.03125
3 0.015625 0.0078125
4 0.00390625 0.001953125
5 0.0009765625 0.00048828125
6 0.000244140625 0.0001220703125
7 0.00006103515625 0.000030517578125
8 0.0000152587890625 0.00000762939453125
9 0.000003814697265625 0.0000019073486328125
10 9.5367431640625E-7 4.76837158203125E-7
11 2.384185791015625E-7 1.1920928955078125E-7
12 5.9604644775390625E-8 2.98023223876953125E-8
13 1.490116119384765625E-8 7.450580596923828125E-9
14 3.7252902984619140625E-9 1.86264514923095703125E-9
15 9.31322574615478515625E-10 4.656612873077392578125E-10
16 2.3283064365386962890625E-10 1.16415321826934814453125E-10
17 5.82076609134674072265625E-11 2.910383045673370361328125E-11
18 1.4551915228366851806640625E-11 7.2759576141834259033203125E-12
19 3.63797880709171295166015625E-12 1.818989403545856475830078125E-12
20 9.094947017729282379150390625E-13 4.5474735088646411895751953125E-13
21 2.27373675443232059478759765625E-13 1.136868377216160297393798828125E-13
22 5.684341886080801486968994140625E-14 2.8421709430404007434844970703125E-14
23 1.42108547152020037174224853515625E-14 7.10542735760100185871124267578125E-15
24 3.552713678800500929355621337890625E-15 1.776356839400250464677810668945313E-15
25 8.881784197001252323389053344726565E-16 4.440892098500626161694526672363283E-16
26 2.220446049250313080847263336181642E-16 1.110223024625156540423631668090821E-16
27 5.551115123125782702118158340454105E-17 2.775557561562891351059079170227053E-17
28 1.387778780781445675529539585113527E-17 6.938893903907228377647697925567633E-18
29 3.469446951953614188823848962783817E-18 1.734723475976807094411924481391908E-18
30 8.67361737988403547205962240695954E-19 4.33680868994201773602981120347977E-19
31 2.168404344971008868014905601739885E-19 1.084202172485504434007452800869943E-19
32 5.421010862427522170037264004349715E-20 2.710505431213761085018632002174858E-20
33 1.355252715606880542509316001087429E-20 6.776263578034402712546580005437145E-21
34 3.388131789017201356273290002718573E-21 1.694065894508600678136645001359286E-21
35 8.47032947254300339068322500679643E-22 4.235164736271501695341612503398215E-22
36 2.117582368135750847670806251699108E-22 1.058791184067875423835403125849554E-22
37 5.29395592033937711917701562924777E-23 2.646977960169688559588507814623885E-23
38 1.323488980084844279794253907311943E-23 6.617444900424221398971269536559713E-24
39 3.308722450212110699485634768279857E-24 1.654361225106055349742817384139928E-24
40 8.27180612553027674871408692069964E-25 4.13590306276513837435704346034982E-25
41 2.06795153138256918717852173017491E-25 1.033975765691284593589260865087455E-25
42 5.169878828456422967946304325437275E-26 2.584939414228211483973152162718638E-26
43 1.292469707114105741986576081359319E-26 6.462348535570528709932880406796595E-27
44 3.231174267785264354966440203398298E-27 1.615587133892632177483220101699149E-27
45 8.077935669463160887416100508495745E-28 4.038967834731580443708050254247873E-28
46 2.019483917365790221854025127123937E-28 1.009741958682895110927012563561968E-28
47 5.04870979341447555463506281780984E-29 2.52435489670723777731753140890492E-29
48 1.26217744835361888865876570445246E-29 6.3108872417680944432938285222623E-30
49 3.15544362088404722164691426113115E-30 1.577721810442023610823457130565575E-30
50 7.888609052210118054117285652827875E-31 3.944304526105059027058642826413938E-31
51 1.972152263052529513529321413206969E-31 9.860761315262647567646607066034845E-32
52 4.930380657631323783823303533017423E-32 2.465190328815661891911651766508711E-32
53 1.232595164407830945955825883254356E-32 6.162975822039154729779129416271778E-33
54 3.081487911019577364889564708135889E-33 1.540743955509788682444782354067945E-33
55 7.703719777548943412223911770339725E-34 3.851859888774471706111955885169863E-34
56 1.925929944387235853055977942584932E-34 9.629649721936179265279889712924658E-35
57 4.814824860968089632639944856462329E-35 2.407412430484044816319972428231165E-35
58 1.203706215242022408159986214115583E-35 6.018531076210112040799931070577913E-36
59 3.009265538105056020399965535288957E-36 1.504632769052528010199982767644478E-36
60 7.52316384526264005099991383822239E-37 3.761581922631320025499956919111195E-37
61 1.880790961315660012749978459555598E-37 9.403954806578300063749892297777988E-38
62 4.701977403289150031874946148888994E-38 2.350988701644575015937473074444497E-38
63 1.175494350822287507968736537222249E-38 5.877471754111437539843682686111243E-39
64 2.938735877055718769921841343055622E-39 1.469367938527859384960920671527811E-39
65 7.346839692639296924804603357639055E-40 3.673419846319648462402301678819528E-40
66 1.836709923159824231201150839409764E-40 9.18354961579912115600575419704882E-41
67 4.59177480789956057800287709852441E-41 2.295887403949780289001438549262205E-41
68 1.147943701974890144500719274631103E-41 5.739718509874450722503596373155513E-42
69 2.869859254937225361251798186577757E-42 1.434929627468612680625899093288878E-42
70 7.17464813734306340312949546644439E-43 3.587324068671531701564747733222195E-43
71 1.793662034335765850782373866611098E-43 8.968310171678829253911869333055488E-44
72 4.484155085839414626955934666527744E-44 2.242077542919707313477967333263872E-44
73 1.121038771459853656738983666631936E-44 5.60519385729926828369491833315968E-45
74 2.80259692864963414184745916657984E-45 1.40129846432481707092372958328992E-45
75 7.0064923216240853546186479164496E-46 3.5032461608120426773093239582248E-46
76 1.7516230804060213386546619791124E-46 8.758115402030106693273309895562E-47
77 4.379057701015053346636654947781E-47 2.1895288505075266733183274738905E-47
78 1.09476442525376333665916373694525E-47 5.47382212626881668329581868472625E-48
79 2.736911063134408341647909342363125E-48 1.368455531567204170823954671181563E-48
80 6.842277657836020854119773355907815E-49 3.421138828918010427059886677953908E-49
81 1.710569414459005213529943338976954E-49 8.55284707229502606764971669488477E-50
82 4.276423536147513033824858347442385E-50 2.138211768073756516912429173721193E-50
83 1.069105884036878258456214586860597E-50 5.345529420184391292281072934302983E-51
84 2.672764710092195646140536467151492E-51 1.336382355046097823070268233575746E-51
85 6.68191177523048911535134116787873E-52 3.340955887615244557675670583939365E-52
86 1.670477943807622278837835291969683E-52 8.352389719038111394189176459848413E-53
87 4.176194859519055697094588229924207E-53 2.088097429759527848547294114962103E-53
88 1.044048714879763924273647057481052E-53 5.220243574398819621368235287405258E-54
89 2.610121787199409810684117643702629E-54 1.305060893599704905342058821851315E-54
90 6.525304467998524526710294109256575E-55 3.262652233999262263355147054628288E-55
91 1.631326116999631131677573527314144E-55 8.15663058499815565838786763657072E-56
92 4.07831529249907782919393381828536E-56 2.03915764624953891459696690914268E-56
93 1.01957882312476945729848345457134E-56 5.0978941156238472864924172728567E-57
94 2.54894705781192364324620863642835E-57 1.274473528905961821623104318214175E-57
95 6.372367644529809108115521591070875E-58 3.186183822264904554057760795535438E-58
96 1.593091911132452277028880397767719E-58 7.965459555662261385144401988838595E-59
97 3.982729777831130692572200994419298E-59 1.991364888915565346286100497209649E-59
98 9.956824444577826731430502486048245E-60 4.978412222288913365715251243024123E-60
99 2.489206111144456682857625621512062E-60 1.244603055572228341428812810756031E-60
100 6.223015277861141707144064053780155E-61 3.111507638930570853572032026890078E-61
101 1.555753819465285426786016013445039E-61 7.778769097326427133930080067225195E-62
102 3.889384548663213566965040033612598E-62 1.944692274331606783482520016806299E-62
103 9.723461371658033917412600084031495E-63 4.861730685829016958706300042015748E-63
104 2.430865342914508479353150021007874E-63 1.215432671457254239676575010503937E-63
105 6.077163357286271198382875052519685E-64 3.038581678643135599191437526259843E-64
106 1.519290839321567799595718763129922E-64 7.596454196607838997978593815649608E-65
107 3.798227098303919498989296907824804E-65 1.899113549151959749494648453912402E-65
108 9.49556774575979874747324226956201E-66 4.747783872879899373736621134781005E-66
109 2.373891936439949686868310567390503E-66 1.186945968219974843434155283695251E-66
110 5.934729841099874217170776418476255E-67 2.967364920549937108585388209238128E-67
111 1.483682460274968554292694104619064E-67 7.41841230137484277146347052309532E-68
112 3.70920615068742138573173526154766E-68 1.85460307534371069286586763077383E-68
113 9.27301537671855346432933815386915E-69 4.636507688359276732164669076934575E-69
114 2.318253844179638366082334538467288E-69 1.159126922089819183041167269233644E-69
115 5.79563461044909591520583634616822E-70 2.89781730522454795760291817308411E-70
116 1.448908652612273978801459086542055E-70 7.244543263061369894007295432710275E-71
117 3.622271631530684947003647716355138E-71 1.811135815765342473501823858177569E-71
118 9.055679078826712367509119290887845E-72 4.527839539413356183754559645443923E-72
119 2.263919769706678091877279822721962E-72 1.131959884853339045938639911360981E-72
120 5.659799424266695229693199556804905E-73 2.829899712133347614846599778402453E-73
121 1.414949856066673807423299889201227E-73 7.074749280333369037116499446006133E-74
122 3.537374640166684518558249723003067E-74 1.768687320083342259279124861501533E-74
123 8.843436600416711296395624307507665E-75 4.421718300208355648197812153753833E-75
124 2.210859150104177824098906076876917E-75 1.105429575052088912049453038438458E-75
125 5.52714787526044456024726519219229E-76 2.763573937630222280123632596096145E-76
126 1.381786968815111140061816298048073E-76 6.908934844075555700309081490240363E-77
127 3.454467422037777850154540745120182E-77 1.727233711018888925077270372560091E-77
128 8.636168555094444625386351862800455E-78 4.318084277547222312693175931400228E-78
129 2.159042138773611156346587965700114E-78 1.079521069386805578173293982850057E-78
130 5.397605346934027890866469914250285E-79 2.698802673467013945433234957125143E-79
131 1.349401336733506972716617478562572E-79 6.747006683667534863583087392812858E-80
132 3.373503341833767431791543696406429E-80 1.686751670916883715895771848203215E-80
133 8.433758354584418579478859241016075E-81 4.216879177292209289739429620508038E-81
134 2.108439588646104644869714810254019E-81 1.054219794323052322434857405127010E-81
135 5.27109897161526161217428702563505E-82 2.635549485807630806087143512817525E-82
136 1.317774742903815403043571756408763E-82 6.588873714519077015217858782043813E-83
137 3.294436857259538507608929391021907E-83 1.647218428629769253804464695510953E-83
138 8.236092143148846269022323477554765E-84 4.118046071574423134511161738777383E-84
139 2.059023035787211567255580869388692E-84 1.029511517893605783627790434694346E-84
140 5.14755758946802891813895217347173E-85 2.573778794734014459069476086735865E-85
141 1.286889397367007229534738043367933E-85 6.434446986835036147673690216839663E-86
142 3.217223493417518073836845108419832E-86 1.608611746708759036918422554209916E-86
143 8.04305873354379518459211277104958E-87 4.02152936677189759229605638552479E-87
144 2.010764683385948796148028192762395E-87 1.005382341692974398074014096381198E-87
145 5.02691170846487199037007048190599E-88 2.513455854232435995185035240952995E-88
146 1.256727927116217997592517620476498E-88 6.283639635581089987962588102382488E-89
147 3.141819817790544993981294051191244E-89 1.570909908895272496990647025595622E-89
148 7.85454954447636248495323512797811E-90 3.927274772238181242476617563989055E-90
149 1.963637386119090621238308781994528E-90 9.818186930595453106191543909972638E-91
150 4.909093465297726553095771954986319E-91 2.454546732648863276547885977493160E-91
151 1.227273366324431638273942988746580E-91 6.13636683162215819136971494373290E-92
152 3.06818341581107909568485747186645E-92 1.534091707905539547842428735933225E-92
153 7.670458539527697739212143679666125E-93 3.835229269763848869606071839833063E-93
154 1.917614634881924434803035919916532E-93 9.588073174409622174015179599582658E-94
155 4.794036587204811087007589799791329E-94 2.397018293602405543503794899895665E-94
156 1.198509146801202771751897449947833E-94 5.992545734006013858759487249739163E-95
157 2.996272867003006929379743624869582E-95 1.498136433501503464689871812434791E-95
158 7.490682167507517323449359062173955E-96 3.745341083753758661724679531086978E-96
159 1.872670541876879330862339765543489E-96 9.363352709384396654311698827717445E-97
160 4.681676354692198327155849413858723E-97 2.340838177346099163577924706929361E-97
161 1.170419088673049581788962353464681E-97 5.852095443365247908944811767323403E-98
162 2.926047721682623954472405883661702E-98 1.463023860841311977236202941830851E-98
163 7.315119304206559886181014709154255E-99 3.657559652103279943090507354577128E-99
164 1.828779826051639971545253677288564E-99 9.14389913025819985772626838644282E-100
165 4.57194956512909992886313419322141E-100 2.285974782564549964431567096610705E-100
166 1.142987391282274982215783548305353E-100 5.714936956411374911078917741526763E-101
167 2.857468478205687455539458870763382E-101 1.428734239102843727769729435381691E-101
168 7.143671195514218638848647176908455E-102 3.571835597757109319424323588454228E-102
169 1.785917798878554659712161794227114E-102 8.92958899439277329856080897113557E-103
170 4.464794497196386649280404485567785E-103 2.232397248598193324640202242783893E-103
171 1.116198624299096662320101121391947E-103 5.580993121495483311600505606959733E-104
172 2.790496560747741655800252803479867E-104 1.395248280373870827900126401739933E-104
173 6.976241401869354139500632008699665E-105 3.488120700934677069750316004349833E-105
174 1.744060350467338534875158002174917E-105 8.720301752336692674375790010874583E-106
175 4.360150876168346337187895005437292E-106 2.180075438084173168593947502718646E-106
176 1.090037719042086584296973751359323E-106 5.450188595210432921484868756796615E-107
177 2.725094297605216460742434378398308E-107 1.362547148802608230371217189199154E-107
178 6.81273574401304115185608594599577E-108 3.406367872006520575928042972997885E-108
179 1.703183936003260287964021486498943E-108 8.515919680016301439820107432494713E-109
180 4.257959840008150719910053716247357E-109 2.128979920004075359955026858123678E-109
181 1.064489960002037679977513429061839E-109 5.322449800010188399887567145309195E-110
182 2.661224900005094199943783572654598E-110 1.330612450002547099971891786327299E-110
183 6.653062250012735499859458931636495E-111 3.326531125006367749929729465818248E-111
184 1.663265562503183874964864732909124E-111 8.31632781251591937482432366454562E-112
185 4.15816390625795968741216183227281E-112 2.079081953128979843706080916136405E-112
186 1.039540976564489921853040458068203E-112 5.197704882822449609265202290341013E-113
187 2.598852441411224804632601145170507E-113 1.299426220705612402316300572585253E-113
188 6.497131103528062011581502862926265E-114 3.248565551764031005790751431463133E-114
189 1.624282775882015502895375715731567E-114 8.121413879410077514476878578657833E-115
190 4.060706939705038757238439289328917E-115 2.030353469852519378619219644664458E-115
191 1.015176734926259689309609822332229E-115 5.075883674631298446548049111661145E-116
192 2.537941837315649223274024555830573E-116 1.268970918657824611637012277915286E-116
193 6.34485459328912305818506138957643E-117 3.172427296644561529092530694788215E-117
194 1.586213648322280764546265347394108E-117 7.931068241611403822731326736970538E-118
195 3.965534120805701911365663368485269E-118 1.982767060402850955682831684242635E-118
196 9.913835302014254778414158421213175E-119 4.956917651007127389207079210606588E-119
197 2.478458825503563694603539605303294E-119 1.239229412751781847301769802651647E-119
198 6.196147063758909236508849013258235E-120 3.098073531879454618254424506629118E-120
199 1.549036765939727309127212253314559E-120 7.745183829698636545636061266572795E-121
200 3.872591914849318272818030633286398E-121 1.936295957424659136409015316643199E-121
201 9.681479787123295682045076583215995E-122 4.840739893561647841022538291607998E-122
202 2.420369946780823920511269145803999E-122 1.210184973390411960255634572902000E-122
203 6.05092486695205980127817286451000E-123 3.02546243347602990063908643225500E-123
204 1.51273121673801495031954321612750E-123 7.5636560836900747515977160806375E-124
205 3.78182804184503737579885804031875E-124 1.890914020922518687899429020159375E-124
206 9.454570104612593439497145100796875E-125 4.727285052306296719748572550398438E-125
207 2.363642526153148359874286275199219E-125 1.181821263076574179937143137599610E-125
208 5.90910631538287089968571568799805E-126 2.954553157691435449842857843999025E-126
209 1.477276578845717724921428921999513E-126 7.386382894228588624607144609997563E-127
210 3.693191447114294312303572304998782E-127 1.846595723557147156151786152499391E-127
211 9.232978617785735780758930762496955E-128 4.616489308892867890379465381248478E-128
212 2.308244654446433945189732690624239E-128 1.154122327223216972594866345312120E-128
213 5.77061163611608486297433172656060E-129 2.88530581805804243148716586328030E-129
214 1.44265290902902121574358293164015E-129 7.21326454514510607871791465820075E-130
215 3.606632272572553039358957329100375E-130 1.803316136286276519679478664550188E-130
216 9.01658068143138259839739332275094E-131 4.50829034071569129919869666137547E-131
217 2.254145170357845649599348330687735E-131 1.127072585178922824799674165343868E-131
218 5.63536292589461412399837082671934E-132 2.81768146294730706199918541335967E-132
219 1.408840731473653530999592706679835E-132 7.044203657368267654997963533399175E-133
220 3.522101828684133827498981766699588E-133 1.761050914342066913749490883349794E-133
221 8.80525457171033456874745441674897E-134 4.402627285855167284373727208374485E-134
222 2.201313642927583642186863604187243E-134 1.100656821463791821093431802093621E-134
223 5.503284107318959105467159010468105E-135 2.751642053659479552733579505234053E-135
224 1.375821026829739776366789752617027E-135 6.879105134148698881833948763085133E-136
225 3.439552567074349440916974381542567E-136 1.719776283537174720458487190771283E-136
226 8.598881417685873602292435953856415E-137 4.299440708842936801146217976928208E-137
227 2.149720354421468400573108988464104E-137 1.074860177210734200286554494232052E-137
228 5.37430088605367100143277247116026E-138 2.68715044302683550071638623558013E-138
229 1.343575221513417750358193117790065E-138 6.717876107567088751790965588950325E-139
230 3.358938053783544375895482794475163E-139 1.679469026891772187947741397237581E-139
231 8.397345134458860939738706986187905E-140 4.198672567229430469869353493093953E-140
232 2.099336283614715234934676746546977E-140 1.049668141807357617467338373273488E-140
233 5.24834070903678808733669186636744E-141 2.62417035451839404366834593318372E-141
234 1.31208517725919702183417296659186E-141 6.5604258862959851091708648329593E-142
235 3.28021294314799255458543241647965E-142 1.640106471573996277292716208239825E-142
236 8.200532357869981386463581041199125E-143 4.100266178934990693231790520599563E-143
237 2.050133089467495346615895260299782E-143 1.025066544733747673307947630149891E-143
238 5.125332723668738366539738150749455E-144 2.562666361834369183269869075374728E-144
239 1.281333180917184591634934537687364E-144 6.40666590458592295817467268843682E-145
240 3.20333295229296147908733634421841E-145 1.601666476146480739543668172109205E-145
241 8.008332380732403697718340860546025E-146 4.004166190366201848859170430273013E-146
242 2.002083095183100924429585215136507E-146 1.001041547591550462214792607568253E-146
243 5.005207737957752311073963037841265E-147 2.502603868978876155536981518920633E-147
244 1.251301934489438077768490759460317E-147 6.256509672447190388842453797301583E-148
245 3.128254836223595194421226898650792E-148 1.564127418111797597210613449325396E-148
246 7.82063709055898798605306724662698E-149 3.91031854527949399302653362331349E-149
247 1.955159272639746996513266811656745E-149 9.775796363198734982566334058283725E-150
248 4.887898181599367491283167029141863E-150 2.443949090799683745641583514570931E-150
249 1.221974545399841872820791757285466E-150 6.109872726999209364103958786427328E-151
250 3.054936363499604682051979393213664E-151 1.527468181749802341025989696606832E-151
251 7.63734090874901170512994848303416E-152 3.81867045437450585256497424151708E-152
252 1.90933522718725292628248712075854E-152 9.5466761359362646314124356037927E-153
253 4.77333806796813231570621780189635E-153 2.386669033984066157853108900948175E-153
254 1.193334516992033078926554450474088E-153 5.966672584960165394632772252370438E-154
255 2.983336292480082697316386126185219E-154 1.491668146240041348658193063092610E-154
256 7.45834073120020674329096531546305E-155 3.729170365600103371645482657731525E-155
257 1.864585182800051685822741328865763E-155 9.322925914000258429113706644328813E-156
258 4.661462957000129214556853322164407E-156 2.330731478500064607278426661082203E-156
259 1.165365739250032303639213330541102E-156 5.826828696250161518196066652705508E-157
260 2.913414348125080759098033326352754E-157 1.456707174062540379549016663176377E-157
261 7.283535870312701897745083315881885E-158 3.641767935156350948872541657940943E-158
262 1.820883967578175474436270828970472E-158 9.104419837890877372181354144852358E-159
263 4.552209918945438686090677072426179E-159 2.276104959472719343045338536213090E-159
264 1.138052479736359671522669268106545E-159 5.690262398681798357613346340532725E-160
265 2.845131199340899178806673170266363E-160 1.422565599670449589403336585133181E-160
266 7.112827998352247947016682925665905E-161 3.556413999176123973508341462832953E-161
267 1.778206999588061986754170731416477E-161 8.891034997940309933770853657082383E-162
268 4.445517498970154966885426828541192E-162 2.222758749485077483442713414270596E-162
269 1.111379374742538741721356707135298E-162 5.55689687371269370860678353567649E-163
270 2.778448436856346854303391767838245E-163 1.389224218428173427151695883919123E-163
271 6.946121092140867135758479419595615E-164 3.473060546070433567879239709797808E-164
272 1.736530273035216783939619854898904E-164 8.68265136517608391969809927449452E-165
273 4.34132568258804195984904963724726E-165 2.17066284129402097992452481862363E-165
274 1.085331420647010489962262409311815E-165 5.426657103235052449811312046559075E-166
275 2.713328551617526224905656023279538E-166 1.356664275808763112452828011639769E-166
276 6.783321379043815562264140058198845E-167 3.391660689521907781132070029099423E-167
277 1.695830344760953890566035014549712E-167 8.479151723804769452830175072748558E-168
278 4.239575861902384726415087536374279E-168 2.119787930951192363207543768187140E-168
279 1.059893965475596181603771884093570E-168 5.29946982737798090801885942046785E-169
280 2.649734913688990454009429710233925E-169 1.324867456844495227004714855116963E-169
281 6.624337284222476135023574275584815E-170 3.312168642111238067511787137792408E-170
282 1.656084321055619033755893568896204E-170 8.28042160527809516877946784448102E-171
283 4.14021080263904758438973392224051E-171 2.070105401319523792194866961120255E-171
284 1.035052700659761896097433480560128E-171 5.175263503298809480487167402800638E-172
285 2.587631751649404740243583701400319E-172 1.293815875824702370121791850700160E-172
286 6.46907937912351185060895925350080E-173 3.23453968956175592530447962675040E-173
| |
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation
from google.cloud.gaming_v1.services.game_server_deployments_service import pagers
from google.cloud.gaming_v1.types import common
from google.cloud.gaming_v1.types import game_server_deployments
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
from .transports.base import GameServerDeploymentsServiceTransport
from .transports.grpc import GameServerDeploymentsServiceGrpcTransport
class GameServerDeploymentsServiceClientMeta(type):
"""Metaclass for the GameServerDeploymentsService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[GameServerDeploymentsServiceTransport]]
_transport_registry["grpc"] = GameServerDeploymentsServiceGrpcTransport
def get_transport_class(
cls, label: str = None
) -> Type[GameServerDeploymentsServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class GameServerDeploymentsServiceClient(
metaclass=GameServerDeploymentsServiceClientMeta
):
"""The Game Server Deployment is used to control the deployment
of Agones fleets.
"""
DEFAULT_OPTIONS = ClientOptions.ClientOptions(
api_endpoint="gameservices.googleapis.com"
)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
{@api.name}: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@staticmethod
def game_server_deployment_path(
project: str, location: str, deployment: str
) -> str:
"""Return a fully-qualified game_server_deployment string."""
return "projects/{project}/locations/{location}/gameServerDeployments/{deployment}".format(
project=project, location=location, deployment=deployment
)
@staticmethod
def game_server_deployment_rollout_path(
project: str, location: str, deployment: str
) -> str:
"""Return a fully-qualified game_server_deployment_rollout string."""
return "projects/{project}/locations/{location}/gameServerDeployments/{deployment}/rollout".format(
project=project, location=location, deployment=deployment
)
def __init__(
self,
*,
credentials: credentials.Credentials = None,
transport: Union[str, GameServerDeploymentsServiceTransport] = None,
client_options: ClientOptions = DEFAULT_OPTIONS,
) -> None:
"""Instantiate the game server deployments service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.GameServerDeploymentsServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client.
"""
if isinstance(client_options, dict):
client_options = ClientOptions.from_dict(client_options)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, GameServerDeploymentsServiceTransport):
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
host=client_options.api_endpoint or "gameservices.googleapis.com",
)
def list_game_server_deployments(
self,
request: game_server_deployments.ListGameServerDeploymentsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListGameServerDeploymentsPager:
r"""Lists Game Server Deployments in a given project and
Location.
Args:
request (:class:`~.game_server_deployments.ListGameServerDeploymentsRequest`):
The request object. Request message for
GameServerDeploymentsService.ListGameServerDeployments.
parent (:class:`str`):
Required. The parent resource name. Uses the form:
``projects/{project}/locations/{location}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.pagers.ListGameServerDeploymentsPager:
Response message for
GameServerDeploymentsService.ListGameServerDeployments.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([parent]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = game_server_deployments.ListGameServerDeploymentsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.list_game_server_deployments,
default_timeout=None,
client_info=_client_info,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListGameServerDeploymentsPager(
method=rpc, request=request, response=response
)
# Done; return the response.
return response
def get_game_server_deployment(
self,
request: game_server_deployments.GetGameServerDeploymentRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> game_server_deployments.GameServerDeployment:
r"""Gets details of a single Game Server Deployment.
Args:
request (:class:`~.game_server_deployments.GetGameServerDeploymentRequest`):
The request object. Request message for
GameServerDeploymentsService.GetGameServerDeployment.
name (:class:`str`):
Required. The name of the Game Server Deployment to
retrieve. Uses the form:
``projects/{project}/locations/{location}/gameServerDeployments/{deployment}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.game_server_deployments.GameServerDeployment:
A Game Server Deployment resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = game_server_deployments.GetGameServerDeploymentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.get_game_server_deployment,
default_timeout=None,
client_info=_client_info,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
# Done; return the response.
return response
def create_game_server_deployment(
self,
request: game_server_deployments.CreateGameServerDeploymentRequest = None,
*,
parent: str = None,
game_server_deployment: game_server_deployments.GameServerDeployment = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates a new Game Server Deployment in a given
project and Location.
Args:
request (:class:`~.game_server_deployments.CreateGameServerDeploymentRequest`):
The request object. Request message for
GameServerDeploymentsService.CreateGameServerDeployment.
parent (:class:`str`):
Required. The parent resource name. Uses the form:
``projects/{project}/locations/{location}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
game_server_deployment (:class:`~.game_server_deployments.GameServerDeployment`):
Required. The Game Server Deployment
resource to be created.
This corresponds to the ``game_server_deployment`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operation.Operation:
An | |
#!/usr/bin/env python2
"""
Utilities for performing stochastic matrix and vector compressions.
"""
import numpy
import misc_c_utils
import near_uniform
def fri_subd(vec, num_div, sub_weights, n_samp):
""" Perform FRI compression on a vector whose first elements,
vec[i] are each subdivided into equal segments, and
whose last elements are each divided into unequal segments.
Parameters
----------
vec : (numpy.ndarray, float)
vector on which to perform compression. Elements can
be negative, and it need not be normalized. vec.shape[0]
must equal num_div.shape[0] + sub_weights.shape[0]
num_div : (numpy.ndarray, unsigned int)
the first num_div.shape[0] elements of vec are subdivided
into equal segments, the number of which for each element
is specified in this array
sub_weights : (numpy.ndarray, float)
the weights of the unequal subdivisions of the last
sub_weights.shape[0] elements of vec. Must be row-
normalized.
Returns
-------
(numpy.ndarray, uint32)
2-D array of indices of nonzero elements in the vector. The
0th column specifies the index in the vec array, while
the 1st specifies the index within the subdivision. Not
necessarily sorted, although indices in the uniform part
of the array are grouped first, followed by indices in the
nonuniform part.
(numpy.ndarray, float64)
values of nonzero elements in the compressed vector
"""
new_idx = numpy.zeros([n_samp, 2], dtype=numpy.uint32)
new_vals = numpy.zeros(n_samp)
weights = numpy.abs(vec)
sub_cp = numpy.copy(sub_weights)
preserve_uni, preserve_nonuni = _keep_idx(weights, num_div, sub_cp, n_samp)
preserve_counts = numpy.zeros_like(num_div, dtype=numpy.uint32)
preserve_counts[preserve_uni] = num_div[preserve_uni]
uni_rpt = misc_c_utils.ind_from_count(preserve_counts)
n_exact_uni = uni_rpt.shape[0]
new_idx[:n_exact_uni, 0] = uni_rpt
uni_seq = misc_c_utils.seq_from_count(preserve_counts)
new_idx[:n_exact_uni, 1] = uni_seq
new_vals[:n_exact_uni] = vec[uni_rpt] / num_div[uni_rpt]
nonuni_exact_idx = numpy.nonzero(preserve_nonuni)
n_exact_nonuni = nonuni_exact_idx[0].shape[0]
n_samp -= (n_exact_uni + n_exact_nonuni)
num_uni_wt = num_div.shape[0]
sub_renorm = numpy.sum(sub_cp, axis=1)
weights[num_uni_wt:] *= sub_renorm
sub_renorm.shape = (-1, 1)
sub_renorm = 1. / sub_renorm
sub_cp *= sub_renorm
one_norm = weights.sum()
if abs(one_norm) > 1e-10:
sampl_idx = sys_subd(weights, num_div, sub_cp, n_samp)
end_idx = n_exact_uni + n_samp
new_idx[n_exact_uni:end_idx] = sampl_idx
new_vals[n_exact_uni:end_idx] = numpy.sign(vec[sampl_idx[:, 0]]) * one_norm / n_samp
else:
end_idx = n_exact_uni
end_idx2 = end_idx + n_exact_nonuni
new_idx[end_idx:end_idx2, 0] = nonuni_exact_idx[0] + num_uni_wt
new_idx[end_idx:end_idx2, 1] = nonuni_exact_idx[1]
new_vals[end_idx:end_idx2] = vec[num_uni_wt + nonuni_exact_idx[0]] * sub_weights[nonuni_exact_idx]
return new_idx[:end_idx2], new_vals[:end_idx2]
def deterministic(vec, n_nonz):
"""Calculate the indices of the n_nonz largest-magnitude elementss in vec
Parameters
----------
vec : (numpy.ndarray)
vector to compress
n_nonz : (unsigned int)
desired number of nonzero entries
Returns
-------
(numpy.ndarray, unsigned int)
indices of elements to preserve in compression
"""
weights = numpy.abs(vec)
srt_idx = weights.argsort()[::-1]
cmp_idx = srt_idx[:n_nonz]
return cmp_idx
def fri_1D(vec, n_samp):
"""Compress a vector in full (non-sparse format) using the
FRI scheme, potentially preserving some elements exactly.
Parameters
----------
vec : (numpy.ndarray)
vector to compress
n_samp : (unsigned int)
desired number of nonzero entries in the output vector
Returns
-------
(numpy.ndarray, unsigned int)
indices of nonzero elements in compressed vector, in order
(numpy.ndarray, float)
values of nonzero elements in compressed vector
"""
weights = numpy.abs(vec)
new_vec = numpy.zeros(weights.shape[0])
counts = numpy.ones_like(vec, dtype=numpy.uint32)
sub_wts = numpy.empty((0, 2))
preserve_idx, empty_ret = _keep_idx(weights, counts, sub_wts, n_samp)
preserve_vals = vec[preserve_idx]
new_vec[preserve_idx] = preserve_vals
n_samp -= preserve_vals.shape[0]
one_norm = weights.sum()
if abs(one_norm) > 1e-10:
sampl_idx = sys_resample(weights, n_samp, ret_idx=True)
new_vec[sampl_idx] = one_norm / n_samp * numpy.sign(vec[sampl_idx])
nonz_idx = numpy.nonzero(new_vec)[0]
return nonz_idx, new_vec[nonz_idx]
def _keep_idx(weights, num_div, sub_weights, n_samp):
# Calculate indices of elements in weights that are preserved exactly
# Elements in weights are subdivided according to num_div and sub_weights
num_uni = num_div.shape[0]
uni_keep = numpy.zeros(num_uni, dtype=numpy.bool_)
nonuni_keep = numpy.zeros_like(sub_weights, dtype=numpy.bool_)
one_norm = weights.sum()
any_kept = True
uni_weights = weights[:num_uni] / num_div
nonuni_weights = weights[num_uni:]
nonuni_weights.shape = (-1, 1)
nonuni_weights = nonuni_weights * sub_weights
while any_kept and one_norm > 1e-9:
add_uni = one_norm / n_samp <= uni_weights
uni_weights[add_uni] = 0
uni_keep[add_uni] = True
num_add_uni = num_div[add_uni].sum()
n_samp -= num_add_uni
one_norm -= weights[:num_uni][add_uni].sum()
if one_norm > 0:
add_nonuni = one_norm / n_samp <= nonuni_weights
chosen_weights = nonuni_weights[add_nonuni]
nonuni_weights[add_nonuni] = 0
nonuni_keep[add_nonuni] = True
num_add_nonuni = chosen_weights.shape[0]
n_samp -= num_add_nonuni
one_norm -= chosen_weights.sum()
else:
num_add_nonuni = 0
any_kept = num_add_uni > 0 or num_add_nonuni > 0
sub_weights[nonuni_keep] = 0
weights[:num_uni][uni_keep] = 0
return uni_keep, nonuni_keep
def sys_resample(vec, nsample, ret_idx=False, ret_counts=False):
"""Choose nsample elements of vector vec according to systematic resampling
algorithm (eq. 44-46 in SIAM Rev. 59 (2017), 547-587)
Parameters
----------
vec : (numpy.ndarray, float)
the weights for each index
nsample : (unsigned int)
the number of samples to draw
ret_idx : (bool, optional)
If True, return a vector containing the indices (possibly repeated) of
chosen indices
ret_counts : (bool, optional)
If True, return a 1-D vector of the same shape as vec with the number of
chosen samples at each position
Returns
-------
(tuple)
Contains 0, 1, or 2 numpy vectors depending on the values of input parameters
ret_idx and ret_counts
"""
if nsample == 0:
return numpy.array([], dtype=int)
rand_points = (numpy.linspace(0, 1, num=nsample, endpoint=False) +
numpy.random.uniform(high=1. / nsample))
intervals = numpy.cumsum(vec)
# normalize if necessary
if intervals[-1] != 1.:
intervals /= intervals[-1]
ret_tuple = ()
if ret_idx:
ret_tuple = ret_tuple + (misc_c_utils.linsearch_1D(intervals, rand_points),)
if ret_counts:
ret_tuple = ret_tuple + (misc_c_utils.linsearch_1D_cts(intervals, rand_points),)
return ret_tuple
def sys_subd(weights, counts, sub_weights, nsample):
"""Performs systematic resampling on a vector of weights subdivided
according to counts and sub_weights
Parameters
----------
weights : (numpy.ndarray, float)
vector of weights to be subdivided. weights.shape[0] must equal
counts.shape[0] + sub_weights.shape[0]
counts : (numpy.ndarray, unsigned int)
the first counts.shape[0] elements of weights are subdivided into
equal subintervals
sub_weights : (numpy.ndarray, float)
sub_weights[i] corresponds to the subdivisions of weights[i].
Must be row-normalized
n_sample : (unsigned int)
number of samples to draw
Returns
-------
(numpy.ndarray, unsigned int)
2-D array of chosen indices. The 0th column is the index in
the weights vector, and the 1st is the index with the
subdivision.
"""
if nsample == 0:
return numpy.empty((0, 2), dtype=numpy.uint32)
rand_points = (numpy.arange(0, 1, 1. / nsample) +
numpy.random.uniform(high=1. / nsample))
rand_points = rand_points[:nsample]
big_intervals = numpy.cumsum(weights)
one_norm = big_intervals[-1]
# normalize if necessary
if abs(one_norm - 1.) > 1e-10:
big_intervals /= one_norm
ret_idx = numpy.zeros([nsample, 2], dtype=numpy.uint32)
weight_idx = misc_c_utils.linsearch_1D(big_intervals, rand_points)
ret_idx[:, 0] = weight_idx
rand_points[weight_idx > 0] -= big_intervals[weight_idx[weight_idx > 0] - 1]
rand_points *= one_norm / weights[weight_idx]
rand_points[rand_points >= 1.] = 0.999999
n_uni_wts = counts.shape[0]
num_uni = numpy.searchsorted(weight_idx, n_uni_wts)
ret_idx[:num_uni, 1] = rand_points[:num_uni] * counts[weight_idx[:num_uni]]
subweight_idx = misc_c_utils.linsearch_2D(sub_weights, weight_idx[num_uni:] - n_uni_wts,
rand_points[num_uni:])
ret_idx[num_uni:, 1] = subweight_idx
return ret_idx
def round_binomially(vec, num_round):
"""Round non-integer entries in vec to integer entries in b such that
b[i] ~ (binomial(num_round[i], vec[i] - floor(vec[i])) + floor(vec[i])
* num_round)
Parameters
----------
vec : (numpy.ndarray, float)
non-integer numbers to be rounded
num_round : (numpy.ndarray, unsigned int)
parameter of the binomial distribution for each entry in vec, must
have same shape as vec
Returns
-------
(numpy.ndarray, int)
integer array of results
"""
flr_vec = numpy.floor(vec)
flr_vec = flr_vec.astype(numpy.int32)
n = num_round.astype(numpy.uint32)
b = flr_vec * num_round + numpy.random.binomial(n, vec - flr_vec).astype(numpy.int32)
return b
def round_bernoulli(vec, mt_ptrs):
"""Round non-integer entries in vec to integer entries in b such that
b[i] ~ binomial(1, vec[i] - floor(vec[i])) + floor(vec[i])
Parameters
----------
vec : (numpy.ndarray, float)
non-integer numbers to be rounded
mt_ptrs : (numpy.ndarray, uint64)
List of addresses to MT state objects to use for RN generation
Returns
-------
(numpy.ndarray, int)
integer array of results
"""
flr_vec = numpy.floor(vec)
flr_vec = flr_vec.astype(numpy.int32)
b = flr_vec + near_uniform.par_bernoulli(vec - flr_vec, mt_ptrs)
return b
def sample_alias(alias, Q, row_idx, mt_ptrs):
"""Perform multinomial sampling using the alias method for an array of
probability distributions.
Parameters
----------
alias : (numpy.ndarray, unsigned int)
alias indices as calculated in cyth_helpers2.setup_alias
Q : (numpy.ndarray, float)
alias probabilities as calculated in cyth_helpers2.setup_alias
row_idx : (numpy.ndarray, unsigned int)
Row index in alias/Q of each value to sample. Can be obtained from
desired numbers of samples using cyth_helpers2.ind_from_count()
mt_ptrs : (numpy.ndarray, uint64)
List of addresses to MT state objects to use for RN generation
Returns
-------
(numpy.ndarray, unsigned char)
1-D array of chosen column indices of each sample
"""
n_states = alias.shape[1]
tot_samp = row_idx.shape[0]
r_ints = numpy.random.randint(n_states, size=tot_samp)
orig_success = near_uniform.par_bernoulli(Q[row_idx, r_ints], mt_ptrs)
orig_idx = orig_success == 1
alias_idx = numpy.logical_not(orig_idx)
| |
<reponame>jorgemorgado/sqlalchemy<filename>test/orm/test_cache_key.py
import random
import sqlalchemy as sa
from sqlalchemy import Column
from sqlalchemy import func
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import null
from sqlalchemy import select
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import true
from sqlalchemy import update
from sqlalchemy.orm import aliased
from sqlalchemy.orm import Bundle
from sqlalchemy.orm import defaultload
from sqlalchemy.orm import defer
from sqlalchemy.orm import join as orm_join
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import lazyload
from sqlalchemy.orm import Load
from sqlalchemy.orm import load_only
from sqlalchemy.orm import Query
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm import synonym
from sqlalchemy.orm import with_expression
from sqlalchemy.orm import with_loader_criteria
from sqlalchemy.orm import with_polymorphic
from sqlalchemy.sql.base import CacheableOptions
from sqlalchemy.sql.expression import case
from sqlalchemy.sql.visitors import InternalTraversal
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import ne_
from sqlalchemy.testing.fixtures import fixture_session
from test.orm import _fixtures
from .inheritance import _poly_fixtures
from .test_query import QueryTest
from ..sql.test_compare import CacheKeyFixture
def stmt_20(*elements):
return tuple(
elem._statement_20() if isinstance(elem, Query) else elem
for elem in elements
)
class CacheKeyTest(CacheKeyFixture, _fixtures.FixtureTest):
run_setup_mappers = "once"
run_inserts = None
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def test_mapper_and_aliased(self):
User, Address, Keyword = self.classes("User", "Address", "Keyword")
self._run_cache_key_fixture(
lambda: (inspect(User), inspect(Address), inspect(aliased(User))),
compare_values=True,
)
def test_attributes(self):
User, Address, Keyword = self.classes("User", "Address", "Keyword")
self._run_cache_key_fixture(
lambda: (
User.id,
Address.id,
aliased(User).id,
aliased(User, name="foo").id,
aliased(User, name="bar").id,
User.name,
User.addresses,
Address.email_address,
aliased(User).addresses,
),
compare_values=True,
)
def test_bundles_in_annotations(self):
User = self.classes.User
self._run_cache_key_fixture(
lambda: (
Bundle("mybundle", User.id).__clause_element__(),
Bundle("myotherbundle", User.id).__clause_element__(),
Bundle("mybundle", User.name).__clause_element__(),
Bundle("mybundle", User.id, User.name).__clause_element__(),
),
compare_values=True,
)
def test_bundles_directly(self):
User = self.classes.User
self._run_cache_key_fixture(
lambda: (
Bundle("mybundle", User.id),
Bundle("mybundle", User.id).__clause_element__(),
Bundle("myotherbundle", User.id),
Bundle("mybundle", User.name),
Bundle("mybundle", User.id, User.name),
),
compare_values=True,
)
def test_query_expr(self):
(User,) = self.classes("User")
self._run_cache_key_fixture(
lambda: (
with_expression(User.name, true()),
with_expression(User.name, null()),
with_expression(User.name, func.foobar()),
with_expression(User.name, User.name == "test"),
Load(User).with_expression(User.name, true()),
Load(User).with_expression(User.name, null()),
Load(User).with_expression(User.name, func.foobar()),
Load(User).with_expression(User.name, User.name == "test"),
),
compare_values=True,
)
def test_loader_criteria(self):
User, Address = self.classes("User", "Address")
from sqlalchemy import Column, Integer, String
class Foo(object):
id = Column(Integer)
name = Column(String)
self._run_cache_key_fixture(
lambda: (
with_loader_criteria(User, User.name != "somename"),
with_loader_criteria(User, User.id != 5),
with_loader_criteria(User, lambda cls: cls.id == 10),
with_loader_criteria(Address, Address.id != 5),
with_loader_criteria(Foo, lambda cls: cls.id == 10),
),
compare_values=True,
)
def test_loader_criteria_bound_param_thing(self):
from sqlalchemy import Column, Integer
class Foo(object):
id = Column(Integer)
def go(param):
return with_loader_criteria(Foo, lambda cls: cls.id == param)
g1 = go(10)
g2 = go(20)
ck1 = g1._generate_cache_key()
ck2 = g2._generate_cache_key()
eq_(ck1.key, ck2.key)
eq_(ck1.bindparams[0].key, ck2.bindparams[0].key)
eq_(ck1.bindparams[0].value, 10)
eq_(ck2.bindparams[0].value, 20)
def test_instrumented_attributes(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
self._run_cache_key_fixture(
lambda: (
User.addresses,
User.addresses.of_type(aliased(Address)),
User.orders,
User.orders.and_(Order.id != 5),
User.orders.and_(Order.description != "somename"),
),
compare_values=True,
)
def test_unbound_options(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
self._run_cache_key_fixture(
lambda: (
joinedload(User.addresses),
joinedload(User.addresses.of_type(aliased(Address))),
joinedload("addresses"),
joinedload(User.orders),
joinedload(User.orders.and_(Order.id != 5)),
joinedload(User.orders.and_(Order.id == 5)),
joinedload(User.orders.and_(Order.description != "somename")),
joinedload(User.orders).selectinload("items"),
joinedload(User.orders).selectinload(Order.items),
defer(User.id),
defer("id"),
defer("*"),
defer(Address.id),
subqueryload(User.orders),
selectinload(User.orders),
joinedload(User.addresses).defer(Address.id),
joinedload(aliased(User).addresses).defer(Address.id),
joinedload(User.addresses).defer("id"),
joinedload(User.orders).joinedload(Order.items),
joinedload(User.orders).subqueryload(Order.items),
subqueryload(User.orders).subqueryload(Order.items),
subqueryload(User.orders)
.subqueryload(Order.items)
.defer(Item.description),
defaultload(User.orders).defaultload(Order.items),
defaultload(User.orders),
),
compare_values=True,
)
def test_unbound_sub_options(self):
"""test #6869"""
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
self._run_cache_key_fixture(
lambda: (
joinedload(User.addresses).options(
joinedload(Address.dingaling)
),
joinedload(User.addresses).options(
joinedload(Address.dingaling).options(load_only("name"))
),
joinedload(User.orders).options(
joinedload(Order.items).options(joinedload(Item.keywords))
),
),
compare_values=True,
)
def test_bound_options(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
a1 = aliased(Address)
self._run_cache_key_fixture(
lambda: (
Load(User).joinedload(User.addresses),
Load(User).joinedload(
User.addresses.of_type(aliased(Address))
),
Load(User).joinedload(User.orders),
Load(User).joinedload(User.orders.and_(Order.id != 5)),
Load(User).joinedload(
User.orders.and_(Order.description != "somename")
),
Load(User).defer(User.id),
Load(User).subqueryload(User.addresses),
Load(Address).defer(Address.id),
Load(Address).defer("*"),
Load(a1).defer(a1.id),
Load(User).joinedload(User.addresses).defer(Address.id),
Load(User).joinedload(User.orders).joinedload(Order.items),
Load(User).joinedload(User.orders).subqueryload(Order.items),
Load(User).subqueryload(User.orders).subqueryload(Order.items),
Load(User)
.subqueryload(User.orders)
.subqueryload(Order.items)
.defer(Item.description),
Load(User).defaultload(User.orders).defaultload(Order.items),
Load(User).defaultload(User.orders),
Load(Address).raiseload("*"),
Load(Address).raiseload(Address.user),
),
compare_values=True,
)
def test_selects_w_orm_joins(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
a1 = aliased(Address)
self._run_cache_key_fixture(
lambda: (
select(User).join(User.addresses),
select(User).join(User.orders),
select(User).join(User.addresses).join(User.orders),
select(User).join(Address, User.addresses),
select(User).join(a1, User.addresses),
select(User).join(User.addresses.of_type(a1)),
select(User).join(
User.addresses.and_(Address.email_address == "foo")
),
select(User)
.join(Address, User.addresses)
.join_from(User, Order),
select(User)
.join(Address, User.addresses)
.join_from(User, User.orders),
select(User.id, Order.id).select_from(
orm_join(User, Order, User.orders)
),
),
compare_values=True,
)
def test_orm_query_w_orm_joins(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
a1 = aliased(Address)
self._run_cache_key_fixture(
lambda: stmt_20(
fixture_session().query(User).join(User.addresses),
fixture_session().query(User).join(User.orders),
fixture_session()
.query(User)
.join(User.addresses)
.join(User.orders),
fixture_session()
.query(User)
.join(User.addresses)
.join(Address.dingaling),
fixture_session().query(User).join(Address, User.addresses),
fixture_session().query(User).join(a1, User.addresses),
fixture_session().query(User).join(User.addresses.of_type(a1)),
),
compare_values=True,
)
def test_orm_query_using_with_entities(self):
"""test issue #6503"""
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
self._run_cache_key_fixture(
lambda: stmt_20(
fixture_session()
.query(User)
.join(User.addresses)
.with_entities(Address.id),
#
fixture_session().query(Address.id).join(User.addresses),
#
fixture_session()
.query(User)
.options(selectinload(User.addresses))
.with_entities(User.id),
#
fixture_session()
.query(User)
.options(selectinload(User.addresses)),
#
fixture_session().query(User).with_entities(User.id),
#
# here, propagate_attr->orm is Address, entity is Address.id,
# but the join() + with_entities() will log a
# _MemoizedSelectEntities to differentiate
fixture_session()
.query(Address, Order)
.join(Address.dingaling)
.with_entities(Address.id),
#
# same, propagate_attr->orm is Address, entity is Address.id,
# but the join() + with_entities() will log a
# _MemoizedSelectEntities to differentiate
fixture_session()
.query(Address, User)
.join(Address.dingaling)
.with_entities(Address.id),
),
compare_values=True,
)
def test_synonyms(self, registry):
"""test for issue discovered in #7394"""
@registry.mapped
class User2(object):
__table__ = self.tables.users
name_syn = synonym("name")
@registry.mapped
class Address2(object):
__table__ = self.tables.addresses
name_syn = synonym("email_address")
self._run_cache_key_fixture(
lambda: (
User2.id,
User2.name,
User2.name_syn,
Address2.name_syn,
Address2.email_address,
aliased(User2).name_syn,
aliased(User2, name="foo").name_syn,
aliased(User2, name="bar").name_syn,
),
compare_values=True,
)
def test_more_with_entities_sanity_checks(self):
"""test issue #6503"""
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
sess = fixture_session()
q1 = (
sess.query(Address, Order)
.with_entities(Address.id)
._statement_20()
)
q2 = (
sess.query(Address, User).with_entities(Address.id)._statement_20()
)
assert not q1._memoized_select_entities
assert not q2._memoized_select_entities
# no joins or options, so q1 and q2 have the same cache key as Order/
# User are discarded. Note Address is first so propagate_attrs->orm is
# Address.
eq_(q1._generate_cache_key(), q2._generate_cache_key())
q3 = sess.query(Order).with_entities(Address.id)._statement_20()
q4 = sess.query(User).with_entities(Address.id)._statement_20()
# with Order/User as lead entity, this affects propagate_attrs->orm
# so keys are different
ne_(q3._generate_cache_key(), q4._generate_cache_key())
# confirm by deleting propagate attrs and memoized key and
# running again
q3._propagate_attrs = None
q4._propagate_attrs = None
del q3.__dict__["_generate_cache_key"]
del q4.__dict__["_generate_cache_key"]
eq_(q3._generate_cache_key(), q4._generate_cache_key())
# once there's a join() or options() prior to with_entities, now they
# are not discarded from the key; Order and User are in the
# _MemoizedSelectEntities
q5 = (
sess.query(Address, Order)
.join(Address.dingaling)
.with_entities(Address.id)
._statement_20()
)
q6 = (
sess.query(Address, User)
.join(Address.dingaling)
.with_entities(Address.id)
._statement_20()
)
assert q5._memoized_select_entities
assert q6._memoized_select_entities
ne_(q5._generate_cache_key(), q6._generate_cache_key())
def test_orm_query_from_statement(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
self._run_cache_key_fixture(
lambda: stmt_20(
fixture_session()
.query(User)
.from_statement(text("select * from user")),
select(User).from_statement(text("select * from user")),
fixture_session()
.query(User)
.options(selectinload(User.addresses))
.from_statement(text("select * from user")),
fixture_session()
.query(User)
.options(subqueryload(User.addresses))
.from_statement(text("select * from user")),
fixture_session()
.query(User)
.from_statement(text("select * from user order by id")),
fixture_session()
.query(User.id)
.from_statement(text("select * from user")),
),
compare_values=True,
)
def test_orm_query_basic(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
a1 = aliased(Address)
self._run_cache_key_fixture(
lambda: stmt_20(
fixture_session().query(User),
fixture_session().query(User).prefix_with("foo"),
fixture_session().query(User).filter_by(name="ed"),
fixture_session()
.query(User)
.filter_by(name="ed")
.order_by(User.id),
fixture_session()
.query(User)
.filter_by(name="ed")
.order_by(User.name),
fixture_session()
.query(User)
.filter_by(name="ed")
.group_by(User.id),
fixture_session()
.query(User)
.join(User.addresses)
.filter(User.name == "ed"),
fixture_session().query(User).join(User.orders),
fixture_session()
.query(User)
.join(User.orders)
.filter(Order.description == "adsf"),
fixture_session()
.query(User)
.join(User.addresses)
.join(User.orders),
fixture_session().query(User).join(Address, User.addresses),
fixture_session().query(User).join(a1, User.addresses),
fixture_session().query(User).join(User.addresses.of_type(a1)),
fixture_session().query(Address).join(Address.user),
fixture_session().query(User, Address).filter_by(name="ed"),
fixture_session().query(User, a1).filter_by(name="ed"),
),
compare_values=True,
)
def test_options(self):
class MyOpt(CacheableOptions):
_cache_key_traversal = [
("x", InternalTraversal.dp_plain_obj),
("y", InternalTraversal.dp_plain_obj),
]
x = 5
y = ()
self._run_cache_key_fixture(
lambda: (
MyOpt,
MyOpt + {"x": 10},
MyOpt + {"x": 15, "y": ("foo",)},
MyOpt + {"x": 15, "y": ("foo",)} + {"y": ("foo", "bar")},
),
compare_values=True,
)
class PolyCacheKeyTest(CacheKeyFixture, _poly_fixtures._Polymorphic):
run_setup_mappers = "once"
run_inserts = None
run_deletes = None
def test_wp_objects(self):
Person, Manager, Engineer, Boss = self.classes(
"Person", "Manager", "Engineer", "Boss"
)
self._run_cache_key_fixture(
lambda: (
inspect(with_polymorphic(Person, [Manager, Engineer])),
inspect(with_polymorphic(Person, [Manager])),
inspect(with_polymorphic(Person, [Manager, Engineer, Boss])),
inspect(
with_polymorphic(Person, [Manager, Engineer], flat=True)
),
inspect(
with_polymorphic(
Person,
[Manager, Engineer],
select(Person)
.outerjoin(Manager)
.outerjoin(Engineer)
.subquery(),
)
),
),
compare_values=True,
)
def test_wp_queries(self):
Person, Manager, Engineer, Boss = self.classes(
"Person", "Manager", "Engineer", "Boss"
)
def two():
wp = with_polymorphic(Person, [Manager, Engineer])
return fixture_session().query(wp)
def three():
wp = with_polymorphic(Person, [Manager, Engineer])
return fixture_session().query(wp).filter(wp.name == "asdfo")
def three_a():
wp = with_polymorphic(Person, [Manager, Engineer], flat=True)
return fixture_session().query(wp).filter(wp.name == "asdfo")
def five():
subq = (
select(Person)
.outerjoin(Manager)
.outerjoin(Engineer)
.subquery()
)
wp = with_polymorphic(Person, [Manager, Engineer], subq)
return fixture_session().query(wp).filter(wp.name == "asdfo")
self._run_cache_key_fixture(
lambda: stmt_20(two(), three(), three_a(), five()),
compare_values=True,
)
def test_wp_joins(self):
Company, Person, Manager, Engineer, Boss = self.classes(
"Company", "Person", "Manager", "Engineer", "Boss"
)
def one():
return (
fixture_session()
.query(Company)
.join(Company.employees)
.filter(Person.name == "asdf")
)
def two():
wp = with_polymorphic(Person, [Manager, Engineer])
return (
fixture_session()
.query(Company)
.join(Company.employees.of_type(wp))
.filter(wp.name == "asdf")
)
def three():
wp | |
<gh_stars>0
'''
Some glue code to do workspace related things based on visgraph
'''
import sys
import time
import envi
import vivisect
import threading
import collections
from operator import itemgetter
import visgraph.pathcore as vg_pathcore
import visgraph.graphcore as vg_graphcore
xrskip = envi.BR_PROC | envi.BR_DEREF
def getNodeWeightHisto(g):
'''
Takes a graph and returns the following tuple:
(weights_to_node, nodes_to_weight, leaves)
where:
weights_to_node - dict using weight as key
nodes_to_weight - dict using nodes as key
leaves - dict of nodes without refs from
'''
nodeweights = g.getHierNodeWeights()
leaves = collections.defaultdict(list)
weights_to_cb = collections.defaultdict(list)
# create default dict
for cb, weight in sorted(nodeweights.items(), lambda x,y: cmp(y[1], x[1]) ):
if not len(g.getRefsFromByNid(cb)):
# leaves is a tuple of (cb, current path, visited nodes)
# these are our leaf nodes
leaves[weight].append( (cb, list(), set()) )
# create histogram
weights_to_cb[weight].append( (cb, list(), set()) )
return weights_to_cb, nodeweights, leaves
def getLongPath(g, maxpath=1000):
'''
Returns a list of list tuples (node id, edge id) representing the longest path
'''
weights_to_cb, cb_to_weights, todo = getNodeWeightHisto(g)
# unique root node code blocks
rootnodes = set([cb for cb,nprops in g.getHierRootNodes()])
leafmax = 0
if len(todo):
leafmax = max( todo.keys() )
invalidret = False
# if the weight of the longest path to a leaf node
# is not the highest weight then we need to fix our
# path choices by taking the longer path
weightmax = max( weights_to_cb.keys() )
if leafmax != weightmax:
todo = weights_to_cb
leafmax = weightmax
invalidret = True
pcnt = 0
rpaths = []
fva = g.getMeta('fva')
# this is our loop that we want to yield out of..
# start at the bottom of the graph and work our way back up
for weight in xrange(leafmax, -1, -1):
# the todo is a a list of codeblocks a specific level
codeblocks = todo.get(weight)
if not codeblocks:
continue
for cbva, paths, visited in codeblocks:
tleafs = collections.defaultdict(list)
if not paths:
paths = [(cbva, None)]
# work is a tuple of (cbva, weight, current path, visited)
work = [(cbva, weight, paths, visited) ]
while work:
cbva, weight, cpath, visited = work.pop()
for eid, fromid, toid, einfo in g.getRefsToByNid(cbva):
#print '0x%08x in [%s]' % (fromid, ' '.join(['0x%08x' % va for va in visited]))
if fromid in visited:
continue
nweight = cb_to_weights.get(fromid)
#print 'cbva: 0x%08x nweight: %d weght: %d fromid: 0x%08x' % (cbva, nweight, weight,
if nweight == weight-1:
# we've moved back one level
newcpath = list(cpath)
newcpath[-1] = (cbva, eid)
newcpath.append( (fromid, None) )
newvisited = set(visited)
newvisited.add(fromid)
work.append( (fromid, weight-1, newcpath, newvisited) )
else:
newcpath = list(cpath)
newcpath[-1] = (cbva, eid)
newcpath.append( (fromid, None) )
newvisited = set(visited)
newvisited.add(fromid)
t = (fromid, newcpath, newvisited)
if t not in tleafs[nweight]:
tleafs[ nweight ].append( t )
if cbva in rootnodes:
l = list(cpath)
l.reverse()
yield l
# update our todo with our new paths to resume from
for nw, l in tleafs.items():
todo[nw].extend( l )
def _nodeedge(tnode):
nid = vg_pathcore.getNodeProp(tnode, 'nid')
eid = vg_pathcore.getNodeProp(tnode, 'eid')
return nid,eid
def _nodeedgeloop(tnode):
nid = vg_pathcore.getNodeProp(tnode, 'nid')
eid = vg_pathcore.getNodeProp(tnode, 'eid')
loop = vg_pathcore.getNodeProp(tnode, 'loops')
return nid,eid,loop
def getCoveragePaths(fgraph, maxpath=None):
'''
Get a set of paths which will cover every block, but will
*end* on branches which re-merge with previously traversed
paths. This allows a full coverage of the graph with as
little work as possible, but *will* omit possible states.
Returns: yield based path generator ( where path is list if (nid,edge) tuples )
'''
pathcnt = 0
nodedone = {}
for root in fgraph.getHierRootNodes():
proot = vg_pathcore.newPathNode(nid=root[0], eid=None)
todo = [(root,proot), ]
while todo:
node,cpath = todo.pop()
refsfrom = fgraph.getRefsFrom(node)
# Record that we have visited this node...
nodedone[node[0]] = True
# This is a leaf node!
if not refsfrom:
path = vg_pathcore.getPathToNode(cpath)
yield [ _nodeedge(n) for n in path ]
pathcnt += 1
if maxpath != None and pathcnt >= maxpath:
return
for eid, fromid, toid, einfo in refsfrom:
# If we're branching to a visited node, return the path as is
if nodedone.get(toid):
path = vg_pathcore.getPathToNode(cpath)
yield [ _nodeedge(n) for n in path ]
# Check if that was the last path we should yield
pathcnt += 1
if maxpath != None and pathcnt >= maxpath:
return
# If we're at a completed node, take no further branches
continue
npath = vg_pathcore.newPathNode(parent=cpath, nid=toid, eid=eid)
tonode = fgraph.getNode(toid)
todo.append((tonode,npath))
def getCodePathsThru(fgraph, tgtcbva, loopcnt=0, maxpath=None):
'''
Yields all the paths through the hierarchical graph which pass through
the target codeblock "tgtcb". Each "root" node is traced to the target,
and all paths are traversed from there to the end. Specify a loopcnt
to allow loop paths to be generated with the given "loop iteration count"
Example:
for path in getCodePathsThru(fgraph, tgtcb):
for node,edge in path:
...etc...
'''
cnt = 0
for pathto in getCodePathsTo(fgraph, tgtcbva, loopcnt=loopcnt, maxpath=maxpath):
for pathfrom in getCodePathsFrom(fgraph, tgtcbva, loopcnt=loopcnt, maxpath=maxpath):
yield pathto + pathfrom[1:]
cnt += 1
if maxpath != None and cnt >= maxpath:
return
def getCodePathsTo(fgraph, tocbva, loopcnt=0, maxpath=None):
'''
Yields all the paths through the hierarchical graph starting at the
"root nodes" and ending at tocbva. Specify a loopcnt to allow loop
paths to be generated with the given "loop iteration count"
Example:
for path in getCodePathsTo(fgraph, tocbva):
for node,edge in path:
...etc...
'''
pathcnt = 0
looptrack = []
pnode = vg_pathcore.newPathNode(nid=tocbva, eid=None)
node = fgraph.getNode(tocbva)
todo = [(node,pnode), ]
while todo:
node,cpath = todo.pop()
refsto = fgraph.getRefsTo(node)
# Is this is the root node?
if node[1].get('rootnode'):
path = vg_pathcore.getPathToNode(cpath)
path.reverse()
yield [ _nodeedge(n) for n in path ]
vg_pathcore.trimPath(cpath)
pathcnt += 1
if maxpath and pathcnt >= maxpath:
return
for eid, n1, n2, einfo in refsto:
# Skip loops if they are "deeper" than we are allowed
loops = vg_pathcore.getPathLoopCount(cpath, 'nid', n1)
if loops > loopcnt:
continue
vg_pathcore.setNodeProp(cpath, 'eid', eid)
npath = vg_pathcore.newPathNode(parent=cpath, nid=n1, eid=None)
nid1,node1 = fgraph.getNode(n1)
todo.append(((nid1,node1),npath))
def getCodePathsFrom(fgraph, fromcbva, loopcnt=0, maxpath=None):
'''
Yields all the paths through the hierarchical graph beginning with
"fromcbva", which is traced to all terminating points. Specify a loopcnt
to allow loop paths to be generated with the given "loop iteration count"
Example:
for path in getCodePathsFrom(fgraph, fromcbva):
for node,edge in path:
...etc...
'''
pathcnt = 0
proot = vg_pathcore.newPathNode(nid=fromcbva, eid=None)
cbnid,cbnode = fgraph.getNode(fromcbva)
todo = [(cbnid,proot), ]
while todo:
nid,cpath = todo.pop()
refsfrom = fgraph.getRefsFromByNid(nid)
# This is a leaf node!
if not refsfrom:
path = vg_pathcore.getPathToNode(cpath)
yield [ _nodeedge(n) for n in path ]
vg_pathcore.trimPath(cpath)
pathcnt += 1
if maxpath and pathcnt >= maxpath:
return
for eid, fromid, n2, einfo in refsfrom:
# Skip loops if they are "deeper" than we are allowed
loops = vg_pathcore.getPathLoopCount(cpath, 'nid', n2)
if loops > loopcnt:
continue
npath = vg_pathcore.newPathNode(parent=cpath, nid=n2, eid=eid)
todo.append((n2,npath))
def getCodePaths(fgraph, loopcnt=0, maxpath=None):
'''
Yields all the paths through the hierarchical graph. Each
"root" node is traced to all terminating points. Specify a loopcnt
to allow loop paths to be generated with the given "loop iteration count"
Example:
for path in getCodePaths(fgraph):
for node,edge in path:
...etc...
'''
pathcnt = 0
for root in fgraph.getHierRootNodes():
proot = vg_pathcore.newPathNode(nid=root[0], eid=None)
todo = [(root,proot), ]
while todo:
node,cpath = todo.pop()
refsfrom = fgraph.getRefsFrom(node)
# This is a leaf node!
if not refsfrom:
path = vg_pathcore.getPathToNode(cpath)
yield [ _nodeedge(n) for n in path ]
vg_pathcore.trimPath(cpath)
pathcnt += 1
if maxpath and pathcnt >= maxpath:
return
for eid, fromid, toid, einfo in refsfrom:
# Skip loops if they are "deeper" than we are allowed
if vg_pathcore.getPathLoopCount(cpath, 'nid', toid) > loopcnt:
continue
npath = vg_pathcore.newPathNode(parent=cpath, nid=toid, eid=eid)
tonode = fgraph.getNode(toid)
todo.append((tonode,npath))
def walkCodePaths(fgraph, callback, loopcnt=0, maxpath=None):
'''
walkCodePaths is a path generator which uses a callback function to determine the
viability of each particular path. This approach allows the calling function
(eg. walkSymbolikPaths) to do in-generator checks/processing and trim paths which
are simply not possible/desireable.
Callbacks will receive | |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import pytest
import mindspore.dataset as ds
from mindspore import log as logger
from util import config_get_set_num_parallel_workers, config_get_set_seed
FILE_DIR = '../data/dataset/testPennTreebank'
def test_penn_treebank_dataset_one_file():
"""
Feature: Test PennTreebank Dataset.
Description: read data from a single file.
Expectation: the data is processed successfully.
"""
data = ds.PennTreebankDataset(FILE_DIR, usage='test')
count = 0
for i in data.create_dict_iterator(num_epochs=1, output_numpy=True):
logger.info("{}".format(i["text"]))
count += 1
assert count == 3
def test_penn_treebank_dataset_train():
"""
Feature: Test PennTreebank Dataset.
Description: read data from a single file.
Expectation: the data is processed successfully.
"""
data = ds.PennTreebankDataset(FILE_DIR, usage='train')
count = 0
for i in data.create_dict_iterator(num_epochs=1, output_numpy=True):
logger.info("{}".format(i["text"]))
count += 1
assert count == 3
def test_penn_treebank_dataset_valid():
"""
Feature: Test PennTreebank Dataset.
Description: read data from a single file.
Expectation: the data is processed successfully.
"""
data = ds.PennTreebankDataset(FILE_DIR, usage='valid')
count = 0
for i in data.create_dict_iterator(num_epochs=1, output_numpy=True):
logger.info("{}".format(i["text"]))
count += 1
assert count == 3
def test_penn_treebank_dataset_all_file():
"""
Feature: Test PennTreebank Dataset.
Description: read data from a single file.
Expectation: the data is processed successfully.
"""
data = ds.PennTreebankDataset(FILE_DIR, usage='all')
count = 0
for i in data.create_dict_iterator(num_epochs=1, output_numpy=True):
logger.info("{}".format(i["text"]))
count += 1
assert count == 9
def test_penn_treebank_dataset_num_samples_none():
"""
Feature: Test PennTreebank Dataset.
Description: read data with no num_samples input.
Expectation: the data is processed successfully.
"""
# Do not provide a num_samples argument, so it would be None by default
data = ds.PennTreebankDataset(FILE_DIR, usage='all')
count = 0
for i in data.create_dict_iterator(num_epochs=1, output_numpy=True):
logger.info("{}".format(i["text"]))
count += 1
assert count == 9
def test_penn_treebank_dataset_shuffle_false4():
"""
Feature: Test PennTreebank Dataset.
Description: read data from a single file with shulle is false.
Expectation: the data is processed successfully.
"""
original_num_parallel_workers = config_get_set_num_parallel_workers(4)
original_seed = config_get_set_seed(987)
data = ds.PennTreebankDataset(FILE_DIR, usage='all', shuffle=False)
count = 0
line = [" no it was black friday ",
" does the bank charge a fee for setting up the account ",
" just ahead of them there was a huge fissure ",
" clash twits poetry formulate flip loyalty splash ",
" <unk> the wardrobe was very small in our room ",
" <unk> <unk> the proportion of female workers in this company <unk> <unk> ",
" you pay less for the supermaket's own brands ",
" black white grapes ",
" everyone in our football team is fuming "]
for i in data.create_dict_iterator(num_epochs=1, output_numpy=True):
strs = i["text"].item().decode("utf8")
assert strs == line[count]
count += 1
assert count == 9
# Restore configuration
ds.config.set_num_parallel_workers(original_num_parallel_workers)
ds.config.set_seed(original_seed)
def test_penn_treebank_dataset_shuffle_false1():
"""
Feature: Test PennTreebank Dataset.
Description: read data from a single file with shulle is false.
Expectation: the data is processed successfully.
"""
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
original_seed = config_get_set_seed(987)
data = ds.PennTreebankDataset(FILE_DIR, usage='all', shuffle=False)
count = 0
line = [" no it was black friday ",
" clash twits poetry formulate flip loyalty splash ",
" you pay less for the supermaket's own brands ",
" does the bank charge a fee for setting up the account ",
" <unk> the wardrobe was very small in our room ",
" black white grapes ",
" just ahead of them there was a huge fissure ",
" <unk> <unk> the proportion of female workers in this company <unk> <unk> ",
" everyone in our football team is fuming "]
for i in data.create_dict_iterator(num_epochs=1, output_numpy=True):
strs = i["text"].item().decode("utf8")
assert strs == line[count]
count += 1
assert count == 9
# Restore configuration
ds.config.set_num_parallel_workers(original_num_parallel_workers)
ds.config.set_seed(original_seed)
def test_penn_treebank_dataset_shuffle_files4():
"""
Feature: Test PennTreebank Dataset.
Description: read data from a single file with shulle is files.
Expectation: the data is processed successfully.
"""
original_num_parallel_workers = config_get_set_num_parallel_workers(4)
original_seed = config_get_set_seed(135)
data = ds.PennTreebankDataset(FILE_DIR, usage='all', shuffle=ds.Shuffle.FILES)
count = 0
line = [" just ahead of them there was a huge fissure ",
" does the bank charge a fee for setting up the account ",
" no it was black friday ",
" <unk> <unk> the proportion of female workers in this company <unk> <unk> ",
" <unk> the wardrobe was very small in our room ",
" clash twits poetry formulate flip loyalty splash ",
" everyone in our football team is fuming ",
" black white grapes ",
" you pay less for the supermaket's own brands "]
for i in data.create_dict_iterator(num_epochs=1, output_numpy=True):
strs = i["text"].item().decode("utf8")
assert strs == line[count]
count += 1
assert count == 9
# Restore configuration
ds.config.set_num_parallel_workers(original_num_parallel_workers)
ds.config.set_seed(original_seed)
def test_penn_treebank_dataset_shuffle_files1():
"""
Feature: Test PennTreebank Dataset.
Description: read data from a single file with shulle is files.
Expectation: the data is processed successfully.
"""
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
original_seed = config_get_set_seed(135)
data = ds.PennTreebankDataset(FILE_DIR, usage='all', shuffle=ds.Shuffle.FILES)
count = 0
line = [" just ahead of them there was a huge fissure ",
" <unk> <unk> the proportion of female workers in this company <unk> <unk> ",
" everyone in our football team is fuming ",
" does the bank charge a fee for setting up the account ",
" <unk> the wardrobe was very small in our room ",
" black white grapes ",
" no it was black friday ",
" clash twits poetry formulate flip loyalty splash ",
" you pay less for the supermaket's own brands "]
for i in data.create_dict_iterator(num_epochs=1, output_numpy=True):
strs = i["text"].item().decode("utf8")
assert strs == line[count]
count += 1
assert count == 9
# Restore configuration
ds.config.set_num_parallel_workers(original_num_parallel_workers)
ds.config.set_seed(original_seed)
def test_penn_treebank_dataset_shuffle_global4():
"""
Feature: Test PennTreebank Dataset.
Description: read data from a single file with shulle is global.
Expectation: the data is processed successfully.
"""
original_num_parallel_workers = config_get_set_num_parallel_workers(4)
original_seed = config_get_set_seed(246)
data = ds.PennTreebankDataset(FILE_DIR, usage='all', shuffle=ds.Shuffle.GLOBAL)
count = 0
line = [" everyone in our football team is fuming ",
" does the bank charge a fee for setting up the account ",
" clash twits poetry formulate flip loyalty splash ",
" no it was black friday ",
" just ahead of them there was a huge fissure ",
" <unk> <unk> the proportion of female workers in this company <unk> <unk> ",
" you pay less for the supermaket's own brands ",
" <unk> the wardrobe was very small in our room ",
" black white grapes "]
for i in data.create_dict_iterator(num_epochs=1, output_numpy=True):
strs = i["text"].item().decode("utf8")
assert strs == line[count]
count += 1
assert count == 9
# Restore configuration
ds.config.set_num_parallel_workers(original_num_parallel_workers)
ds.config.set_seed(original_seed)
def test_penn_treebank_dataset_shuffle_global1():
"""
Feature: Test PennTreebank Dataset.
Description: read data from a single file with shulle is global.
Expectation: the data is processed successfully.
"""
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
original_seed = config_get_set_seed(246)
data = ds.PennTreebankDataset(FILE_DIR, usage='all', shuffle=ds.Shuffle.GLOBAL)
count = 0
line = [" everyone in our football team is fuming ",
" does the bank charge a fee for setting up the account ",
" clash twits poetry formulate flip loyalty splash ",
" <unk> the wardrobe was very small in our room ",
" black white grapes ",
" you pay less for the supermaket's own brands ",
" <unk> <unk> the proportion of female workers in this company <unk> <unk> ",
" no it was black friday ",
" just ahead of them there was a huge fissure "]
for i in data.create_dict_iterator(num_epochs=1, output_numpy=True):
strs = i["text"].item().decode("utf8")
assert strs == line[count]
count += 1
assert count == 9
# Restore configuration
ds.config.set_num_parallel_workers(original_num_parallel_workers)
ds.config.set_seed(original_seed)
def test_penn_treebank_dataset_num_samples():
"""
Feature: Test PennTreebank Dataset.
Description: Test num_samples.
Expectation: the data is processed successfully.
"""
data = ds.PennTreebankDataset(FILE_DIR, usage='all', num_samples=2)
count = 0
for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
count += 1
assert count == 2
def test_penn_treebank_dataset_distribution():
"""
Feature: Test PennTreebank Dataset.
Description: read data from a | |
#
# Autogenerated by Thrift Compiler (0.13.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
class TDeleteType(object):
"""
Specify type of delete:
- DELETE_COLUMN means exactly one version will be removed,
- DELETE_COLUMNS means previous versions will also be removed.
"""
DELETE_COLUMN = 0
DELETE_COLUMNS = 1
DELETE_FAMILY = 2
DELETE_FAMILY_VERSION = 3
_VALUES_TO_NAMES = {
0: "DELETE_COLUMN",
1: "DELETE_COLUMNS",
2: "DELETE_FAMILY",
3: "DELETE_FAMILY_VERSION",
}
_NAMES_TO_VALUES = {
"DELETE_COLUMN": 0,
"DELETE_COLUMNS": 1,
"DELETE_FAMILY": 2,
"DELETE_FAMILY_VERSION": 3,
}
class TDurability(object):
"""
Specify Durability:
- SKIP_WAL means do not write the Mutation to the WAL.
- ASYNC_WAL means write the Mutation to the WAL asynchronously,
- SYNC_WAL means write the Mutation to the WAL synchronously,
- FSYNC_WAL means Write the Mutation to the WAL synchronously and force the entries to disk.
"""
USE_DEFAULT = 0
SKIP_WAL = 1
ASYNC_WAL = 2
SYNC_WAL = 3
FSYNC_WAL = 4
_VALUES_TO_NAMES = {
0: "USE_DEFAULT",
1: "SKIP_WAL",
2: "ASYNC_WAL",
3: "SYNC_WAL",
4: "FSYNC_WAL",
}
_NAMES_TO_VALUES = {
"USE_DEFAULT": 0,
"SKIP_WAL": 1,
"ASYNC_WAL": 2,
"SYNC_WAL": 3,
"FSYNC_WAL": 4,
}
class TConsistency(object):
"""
Specify Consistency:
- STRONG means reads only from primary region
- TIMELINE means reads might return values from secondary region replicas
"""
STRONG = 1
TIMELINE = 2
_VALUES_TO_NAMES = {
1: "STRONG",
2: "TIMELINE",
}
_NAMES_TO_VALUES = {
"STRONG": 1,
"TIMELINE": 2,
}
class TReadType(object):
DEFAULT = 1
STREAM = 2
PREAD = 3
_VALUES_TO_NAMES = {
1: "DEFAULT",
2: "STREAM",
3: "PREAD",
}
_NAMES_TO_VALUES = {
"DEFAULT": 1,
"STREAM": 2,
"PREAD": 3,
}
class TCompareOp(object):
"""
Thrift wrapper around
org.apache.hadoop.hbase.filter.CompareFilter$CompareOp.
"""
LESS = 0
LESS_OR_EQUAL = 1
EQUAL = 2
NOT_EQUAL = 3
GREATER_OR_EQUAL = 4
GREATER = 5
NO_OP = 6
_VALUES_TO_NAMES = {
0: "LESS",
1: "LESS_OR_EQUAL",
2: "EQUAL",
3: "NOT_EQUAL",
4: "GREATER_OR_EQUAL",
5: "GREATER",
6: "NO_OP",
}
_NAMES_TO_VALUES = {
"LESS": 0,
"LESS_OR_EQUAL": 1,
"EQUAL": 2,
"NOT_EQUAL": 3,
"GREATER_OR_EQUAL": 4,
"GREATER": 5,
"NO_OP": 6,
}
class TBloomFilterType(object):
"""
Thrift wrapper around
org.apache.hadoop.hbase.regionserver.BloomType
"""
NONE = 0
ROW = 1
ROWCOL = 2
ROWPREFIX_FIXED_LENGTH = 3
_VALUES_TO_NAMES = {
0: "NONE",
1: "ROW",
2: "ROWCOL",
3: "ROWPREFIX_FIXED_LENGTH",
}
_NAMES_TO_VALUES = {
"NONE": 0,
"ROW": 1,
"ROWCOL": 2,
"ROWPREFIX_FIXED_LENGTH": 3,
}
class TCompressionAlgorithm(object):
"""
Thrift wrapper around
org.apache.hadoop.hbase.io.compress.Algorithm
"""
LZO = 0
GZ = 1
NONE = 2
SNAPPY = 3
LZ4 = 4
BZIP2 = 5
ZSTD = 6
_VALUES_TO_NAMES = {
0: "LZO",
1: "GZ",
2: "NONE",
3: "SNAPPY",
4: "LZ4",
5: "BZIP2",
6: "ZSTD",
}
_NAMES_TO_VALUES = {
"LZO": 0,
"GZ": 1,
"NONE": 2,
"SNAPPY": 3,
"LZ4": 4,
"BZIP2": 5,
"ZSTD": 6,
}
class TDataBlockEncoding(object):
"""
Thrift wrapper around
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding
"""
NONE = 0
PREFIX = 2
DIFF = 3
FAST_DIFF = 4
ROW_INDEX_V1 = 7
_VALUES_TO_NAMES = {
0: "NONE",
2: "PREFIX",
3: "DIFF",
4: "FAST_DIFF",
7: "ROW_INDEX_V1",
}
_NAMES_TO_VALUES = {
"NONE": 0,
"PREFIX": 2,
"DIFF": 3,
"FAST_DIFF": 4,
"ROW_INDEX_V1": 7,
}
class TKeepDeletedCells(object):
"""
Thrift wrapper around
org.apache.hadoop.hbase.KeepDeletedCells
"""
FALSE = 0
TRUE = 1
TTL = 2
_VALUES_TO_NAMES = {
0: "FALSE",
1: "TRUE",
2: "TTL",
}
_NAMES_TO_VALUES = {
"FALSE": 0,
"TRUE": 1,
"TTL": 2,
}
class TPermissionScope(object):
TABLE = 0
NAMESPACE = 1
_VALUES_TO_NAMES = {
0: "TABLE",
1: "NAMESPACE",
}
_NAMES_TO_VALUES = {
"TABLE": 0,
"NAMESPACE": 1,
}
class TLogType(object):
SLOW_LOG = 1
LARGE_LOG = 2
_VALUES_TO_NAMES = {
1: "SLOW_LOG",
2: "LARGE_LOG",
}
_NAMES_TO_VALUES = {
"SLOW_LOG": 1,
"LARGE_LOG": 2,
}
class TFilterByOperator(object):
AND = 0
OR = 1
_VALUES_TO_NAMES = {
0: "AND",
1: "OR",
}
_NAMES_TO_VALUES = {
"AND": 0,
"OR": 1,
}
class TThriftServerType(object):
"""
Specify type of thrift server: thrift and thrift2
"""
ONE = 1
TWO = 2
_VALUES_TO_NAMES = {
1: "ONE",
2: "TWO",
}
_NAMES_TO_VALUES = {
"ONE": 1,
"TWO": 2,
}
class TTimeRange(object):
"""
Attributes:
- minStamp
- maxStamp
"""
def __init__(self, minStamp=None, maxStamp=None,):
self.minStamp = minStamp
self.maxStamp = maxStamp
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.minStamp = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.maxStamp = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TTimeRange')
if self.minStamp is not None:
oprot.writeFieldBegin('minStamp', TType.I64, 1)
oprot.writeI64(self.minStamp)
oprot.writeFieldEnd()
if self.maxStamp is not None:
oprot.writeFieldBegin('maxStamp', TType.I64, 2)
oprot.writeI64(self.maxStamp)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.minStamp is None:
raise TProtocolException(message='Required field minStamp is unset!')
if self.maxStamp is None:
raise TProtocolException(message='Required field maxStamp is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TColumn(object):
"""
Addresses a single cell or multiple cells
in a HBase table by column family and optionally
a column qualifier and timestamp
Attributes:
- family
- qualifier
- timestamp
"""
def __init__(self, family=None, qualifier=None, timestamp=None,):
self.family = family
self.qualifier = qualifier
self.timestamp = timestamp
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.family = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.qualifier = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TColumn')
if self.family is not None:
oprot.writeFieldBegin('family', TType.STRING, 1)
oprot.writeBinary(self.family)
oprot.writeFieldEnd()
if self.qualifier is not None:
oprot.writeFieldBegin('qualifier', TType.STRING, 2)
oprot.writeBinary(self.qualifier)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.family is None:
raise TProtocolException(message='Required field family is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TColumnValue(object):
"""
Represents a single cell and its value.
Attributes:
- family
- qualifier
- value
- timestamp
- tags
- type
"""
def __init__(self, family=None, qualifier=None, value=None, timestamp=None, tags=None, type=None,):
self.family = family
self.qualifier = qualifier
self.value = value
self.timestamp = timestamp
self.tags = tags
self.type = type
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.family = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.qualifier = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.value = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.timestamp = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.tags = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BYTE:
self.type = iprot.readByte()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TColumnValue')
if self.family is not None:
oprot.writeFieldBegin('family', TType.STRING, 1)
oprot.writeBinary(self.family)
oprot.writeFieldEnd()
if self.qualifier is not None:
oprot.writeFieldBegin('qualifier', TType.STRING, 2)
oprot.writeBinary(self.qualifier)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 3)
oprot.writeBinary(self.value)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 4)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.tags is not None:
oprot.writeFieldBegin('tags', TType.STRING, 5)
oprot.writeBinary(self.tags)
oprot.writeFieldEnd()
if self.type is not None:
oprot.writeFieldBegin('type', TType.BYTE, 6)
oprot.writeByte(self.type)
oprot.writeFieldEnd()
| |
#!/usr/bin/env python3
import argparse
import os, atexit
import textwrap
import time
import tempfile
import threading, subprocess
import barrier, finishedSignal
import sys
import signal
import random
import time
from enum import Enum
from collections import defaultdict, OrderedDict
BARRIER_IP = 'localhost'
BARRIER_PORT = 10000
SIGNAL_IP = 'localhost'
SIGNAL_PORT = 11000
PROCESSES_BASE_IP = 11000
# Do not run multiple validations concurrently!
class TC:
def __init__(self, losses, interface="lo", needSudo=True, sudoPassword="<PASSWORD>"):
self.losses = losses
self.interface = interface
self.needSudo = needSudo
self.sudoPassword = <PASSWORD>
cmd1 = 'tc qdisc add dev {} root netem 2>/dev/null'.format(self.interface)
cmd2 = 'tc qdisc change dev {} root netem delay {} {} distribution normal loss {} {} reorder {} {}'.format(self.interface, *self.losses['delay'], *self.losses['loss'], *self.losses['reordering'])
if self.needSudo:
os.system("echo {} | sudo -S {}".format(self.sudoPassword, cmd1))
os.system("echo {} | sudo -S {}".format(self.sudoPassword, cmd2))
else:
os.system(cmd1)
os.system(cmd2)
atexit.register(self.cleanup)
def __str__(self):
ret = """\
Interface: {}
Distribution: Normal
Delay: {} {}
Loss: {} {}
Reordering: {} {}""".format(
self.interface,
*self.losses['delay'],
*self.losses['loss'],
*self.losses['reordering'])
return textwrap.dedent(ret)
def cleanup(self):
cmd = 'tc qdisc del dev {} root 2>/dev/null'.format(self.interface)
if self.needSudo:
os.system("echo '{}' | sudo -S {}".format(self.sudoPassword, cmd))
else:
os.system(cmd)
class ProcessState(Enum):
RUNNING = 1
STOPPED = 2
TERMINATED = 3
class ProcessInfo:
def __init__(self, handle):
self.lock = threading.Lock()
self.handle = handle
self.state = ProcessState.RUNNING
@staticmethod
def stateToSignal(state):
if state == ProcessState.RUNNING:
return signal.SIGCONT
if state == ProcessState.STOPPED:
return signal.SIGSTOP
if state == ProcessState.TERMINATED:
return signal.SIGTERM
@staticmethod
def stateToSignalStr(state):
if state == ProcessState.RUNNING:
return "SIGCONT"
if state == ProcessState.STOPPED:
return "SIGSTOP"
if state == ProcessState.TERMINATED:
return "SIGTERM"
@staticmethod
def validStateTransition(current, desired):
if current == ProcessState.TERMINATED:
return False
if current == ProcessState.RUNNING:
return desired == ProcessState.STOPPED or desired == ProcessState.TERMINATED
if current == ProcessState.STOPPED:
return desired == ProcessState.RUNNING
return False
class AtomicSaturatedCounter:
def __init__(self, saturation, initial=0):
self._saturation = saturation
self._value = initial
self._lock = threading.Lock()
def reserve(self):
with self._lock:
if self._value < self._saturation:
self._value += 1
return True
else:
return False
class Validation:
def __init__(self, processes, messages, outputDir):
self.processes = processes
self.messages = messages
self.outputDirPath = os.path.abspath(outputDir)
if not os.path.isdir(self.outputDirPath):
raise Exception("`{}` is not a directory".format(self.outputDirPath))
def generateConfig(self):
# Implement on the derived classes
pass
def checkProcess(self, pid):
# Implement on the derived classes
pass
def checkAll(self, unterminated, continueOnError=True):
res = True
delivered = {}
for pid in range(1, self.processes+1):
delivered[pid] = self.checkProcess(pid)
correct = unterminated
for pid in range(1, self.processes+1):
for m in delivered[pid]:
for corr in correct:
if m not in delivered[corr]:
res = False
print(m, pid, "(" + ("correct" if pid in correct else "NOT correct") + ")", corr)
return res
class URBBroadcastValidation(Validation):
def generateConfig(self):
hosts = tempfile.NamedTemporaryFile(mode='w')
config = tempfile.NamedTemporaryFile(mode='w')
for i in range(1, self.processes + 1):
hosts.write("{} localhost {}\n".format(i, PROCESSES_BASE_IP+i))
hosts.flush()
config.write("{}\n".format(self.messages))
config.flush()
return (hosts, config)
def checkProcess(self, pid):
filePath = os.path.join(self.outputDirPath, 'proc{:02d}.output'.format(pid))
delivered = set()
with open(filePath) as f:
for lineNumber, line in enumerate(f):
tokens = line.split()
# Check delivery
if tokens[0] == 'd':
sender = int(tokens[1])
msg = int(tokens[2])
delivered.add((sender, msg))
return delivered
class FifoBroadcastValidation(Validation):
def generateConfig(self):
hosts = tempfile.NamedTemporaryFile(mode='w')
config = tempfile.NamedTemporaryFile(mode='w')
for i in range(1, self.processes + 1):
hosts.write("{} localhost {}\n".format(i, PROCESSES_BASE_IP+i))
hosts.flush()
config.write("{}\n".format(self.messages))
config.flush()
return (hosts, config)
def checkProcess(self, pid):
filePath = os.path.join(self.outputDirPath, 'proc{:02d}.output'.format(pid))
i = 1
nextMessage = defaultdict(lambda : 1)
filename = os.path.basename(filePath)
with open(filePath) as f:
for lineNumber, line in enumerate(f):
tokens = line.split()
# Check broadcast
if tokens[0] == 'b':
msg = int(tokens[1])
if msg != i:
print("File {}, Line {}: Messages broadcast out of order. Expected message {} but broadcast message {}".format(filename, lineNumber, i, msg))
return False
i += 1
# Check delivery
if tokens[0] == 'd':
sender = int(tokens[1])
msg = int(tokens[2])
if msg != nextMessage[sender]:
print("File {}, Line {}: Message delivered out of order. Expected message {}, but delivered message {}".format(filename, lineNumber, nextMessage[sender], msg))
return False
else:
nextMessage[sender] = msg + 1
return True
class LCausalBroadcastValidation(Validation):
def __init__(self, processes, outputDir, causalRelationships):
super().__init__(processes, outputDir)
def generateConfig(self):
raise NotImplementedError()
def checkProcess(self, pid):
raise NotImplementedError()
class StressTest:
def __init__(self, procs, concurrency, attempts, attemptsRatio):
self.processes = len(procs)
self.processesInfo = dict()
for (logicalPID, handle) in procs:
self.processesInfo[logicalPID] = ProcessInfo(handle)
self.concurrency = concurrency
self.attempts = attempts
self.attemptsRatio = attemptsRatio
maxTerminatedProcesses = self.processes // 2 if self.processes % 2 == 1 else (self.processes - 1) // 2
self.terminatedProcs = AtomicSaturatedCounter(maxTerminatedProcesses)
def stress(self):
selectProc = list(range(1, self.processes+1))
random.shuffle(selectProc)
selectOp = [ProcessState.STOPPED] * int(1000 * self.attemptsRatio['STOP']) + \
[ProcessState.RUNNING] * int(1000 * self.attemptsRatio['CONT']) + \
[ProcessState.TERMINATED] * int(1000 * self.attemptsRatio['TERM'])
random.shuffle(selectOp)
successfulAttempts = 0
while successfulAttempts < self.attempts:
proc = random.choice(selectProc)
op = random.choice(selectOp)
info = self.processesInfo[proc]
with info.lock:
if ProcessInfo.validStateTransition(info.state, op):
if op == ProcessState.TERMINATED:
reserved = self.terminatedProcs.reserve()
if reserved:
selectProc.remove(proc)
else:
continue
time.sleep(float(random.randint(50, 500)) / 1000.0)
info.handle.send_signal(ProcessInfo.stateToSignal(op))
info.state = op
successfulAttempts += 1
print("Sending {} to process {}".format(ProcessInfo.stateToSignalStr(op), proc))
"""
if op == ProcessState.TERMINATED and proc not in terminatedProcs:
if len(terminatedProcs) < maxTerminatedProcesses:
terminatedProcs.add(proc)
if len(terminatedProcs) == maxTerminatedProcesses:
break
"""
def remainingUnterminatedProcesses(self):
remaining = []
for pid, info in self.processesInfo.items():
with info.lock:
if info.state != ProcessState.TERMINATED:
remaining.append(pid)
return None if len(remaining) == 0 else remaining
def terminateAllProcesses(self):
for _, info in self.processesInfo.items():
with info.lock:
if info.state != ProcessState.TERMINATED:
if info.state == ProcessState.STOPPED:
info.handle.send_signal(ProcessInfo.stateToSignal(ProcessState.RUNNING))
info.handle.send_signal(ProcessInfo.stateToSignal(ProcessState.TERMINATED))
return False
def continueStoppedProcesses(self):
for _, info in self.processesInfo.items():
with info.lock:
if info.state != ProcessState.TERMINATED:
if info.state == ProcessState.STOPPED:
info.handle.send_signal(ProcessInfo.stateToSignal(ProcessState.RUNNING))
def run(self):
if self.concurrency > 1:
threads = [threading.Thread(target=self.stress) for _ in range(self.concurrency)]
[p.start() for p in threads]
[p.join() for p in threads]
else:
self.stress()
def startProcesses(processes, runscript, hostsFilePath, configFilePath, outputDir):
runscriptPath = os.path.abspath(runscript)
if not os.path.isfile(runscriptPath):
raise Exception("`{}` is not a file".format(runscriptPath))
if os.path.basename(runscriptPath) != 'run.sh':
raise Exception("`{}` is not a runscript".format(runscriptPath))
outputDirPath = os.path.abspath(outputDir)
if not os.path.isdir(outputDirPath):
raise Exception("`{}` is not a directory".format(outputDirPath))
baseDir, _ = os.path.split(runscriptPath)
bin_cpp = os.path.join(baseDir, "bin", "da_proc")
bin_java = os.path.join(baseDir, "bin", "da_proc.jar")
if os.path.exists(bin_cpp):
cmd = [bin_cpp]
elif os.path.exists(bin_java):
cmd = ['java', '-jar', bin_java]
else:
raise Exception("`{}` could not find a binary to execute. Make sure you build before validating".format(runscriptPath))
procs = []
for pid in range(1, processes+1):
cmd_ext = ['--id', str(pid),
'--hosts', hostsFilePath,
'--barrier', '{}:{}'.format(BARRIER_IP, BARRIER_PORT),
'--signal', '{}:{}'.format(SIGNAL_IP, SIGNAL_PORT),
'--output', os.path.join(outputDirPath, 'proc{:02d}.output'.format(pid)),
configFilePath]
stdoutFd = open(os.path.join(outputDirPath, 'proc{:02d}.stdout'.format(pid)), "w")
stderrFd = open(os.path.join(outputDirPath, 'proc{:02d}.stderr'.format(pid)), "w")
procs.append((pid, subprocess.Popen(cmd + cmd_ext, stdout=stdoutFd, stderr=stderrFd)))
#procs.append((pid, subprocess.Popen(cmd + cmd_ext, stdout=sys.stdout, stderr=sys.stderr)))
return procs
def main(processes, messages, runscript, broadcastType, logsDir, testConfig):
# Set tc for loopback
tc = TC(testConfig['TC'])
print(tc)
# Start the barrier
initBarrier = barrier.Barrier(BARRIER_IP, BARRIER_PORT, processes)
initBarrier.listen()
startTimesFuture = initBarrier.startTimesFuture()
initBarrierThread = threading.Thread(target=initBarrier.wait)
initBarrierThread.start()
# Start the finish signal
finishSignal = finishedSignal.FinishedSignal(SIGNAL_IP, SIGNAL_PORT, processes)
finishSignal.listen()
finishSignalThread = threading.Thread(target=finishSignal.wait)
finishSignalThread.start()
if broadcastType == "urb":
validation = URBBroadcastValidation(processes, messages, logsDir)
elif broadcastType == "fifo":
validation = FifoBroadcastValidation(processes, messages, logsDir)
else:
validation = LCausalBroadcastValidation(processes, messages, logsDir, None)
hostsFile, configFile = validation.generateConfig()
try:
# Start the processes and get their PIDs
procs = startProcesses(processes, runscript, hostsFile.name, configFile.name, logsDir)
# Create the stress test
st = StressTest(procs,
testConfig['ST']['concurrency'],
testConfig['ST']['attempts'],
testConfig['ST']['attemptsDistribution'])
for (logicalPID, procHandle) in procs:
print("Process with logicalPID {} has PID {}".format(logicalPID, procHandle.pid))
initBarrierThread.join()
print("All processes have been initialized.")
st.run()
print("StressTest is complete.")
print("Resuming stopped processes.")
st.continueStoppedProcesses()
print("Waiting until all running processes have finished broadcasting.")
finishSignalThread.join()
for pid, startTs in OrderedDict(sorted(startTimesFuture.items())).items():
print("Process {} finished broadcasting {} messages in {} ms".format(pid, messages, finishSignal.endTimestamps()[pid] - startTs))
unterminated = st.remainingUnterminatedProcesses()
if unterminated is not None:
input('Hit `Enter` to terminate the remaining processes with logicalPIDs {}.'.format(unterminated))
st.terminateAllProcesses()
mutex = threading.Lock()
def waitForProcess(logicalPID, procHandle, mutex):
procHandle.wait()
with mutex:
print("Process {} exited with {}".format(logicalPID, procHandle.returncode))
# Monitor which processes have exited
monitors = [threading.Thread(target=waitForProcess, args=(logicalPID, procHandle, mutex)) for (logicalPID, procHandle) in procs]
[p.start() for p in monitors]
[p.join() for p in monitors]
input('Hit `Enter` to validate the output')
print("Result of validation: {}".format(validation.checkAll(unterminated)))
finally:
if procs is not None:
for _, p in procs:
p.kill()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--runscript",
required=True,
dest="runscript",
help="Path to run.sh",
)
parser.add_argument(
"-b",
"--broadcast",
choices=["urb", "fifo", "lcausal"],
required=True,
dest="broadcastType",
help="Which broadcast implementation to test",
)
parser.add_argument(
"-l",
"--logs",
required=True,
dest="logsDir",
help="Directory to store stdout, stderr and outputs generated by the processes",
)
parser.add_argument(
"-p",
"--processes",
required=True,
type=int,
dest="processes",
help="Number of processes that broadcast",
)
parser.add_argument(
"-m",
"--messages",
required=True,
| |
profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ListAuthorizedApplicationsToUserGroupRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.ListAuthorizedApplicationsToUserGroup(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyAppAccount(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.EiamClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyAppAccountRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.ModifyAppAccount(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyAccountGroup(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.EiamClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyAccountGroupRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.ModifyAccountGroup(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeUserResourcesAuthorization(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.EiamClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeUserResourcesAuthorizationRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeUserResourcesAuthorization(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doListApplicationAuthorizations(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.EiamClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ListApplicationAuthorizationsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.ListApplicationAuthorizations(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeUserThirdPartyAccountInfo(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.EiamClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeUserThirdPartyAccountInfoRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeUserThirdPartyAccountInfo(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteUserGroup(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.EiamClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteUserGroupRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteUserGroup(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteAccountGroup(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.EiamClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteAccountGroupRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteAccountGroup(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyUserInfo(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.EiamClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyUserInfoRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.ModifyUserInfo(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doListUserGroups(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
| |
<filename>magni/cs/reconstruction/gamp/_algorithm.py
"""
..
Copyright (c) 2015-2017, Magni developers.
All rights reserved.
See LICENSE.rst for further information.
Module providing the core Generalised Approximate Message Passing (GAMP)
algorithm.
Routine listings
----------------
run(y, A, A_asq=None)
Run the GAMP reconstruction algorithm.
See Also
--------
magni.cs.reconstruction.gamp._config : Configuration options.
magni.cs.reconstruction.gamp.input_channel : Available input channels.
magni.cs.reconstruction.gamp.output_channel : Available output channels.
magni.cs.reconstruction.gamp.stop_criterion : Available stop critria.
Notes
-----
The default configuration of the GAMP algorithm provides the s-GB AMP algorithm
from [1]_ using an MSE convergence based stop criterion. Both the input
channel, the output channel, and the stop criterion may be changed.
This implementation allows for the use of sum approximations of the squared
system matrix as detailed in [1]_ and [2]_. Furthermore, a simple damping
option is available based on the description in [3]_ (see also [4]_ for more
details on damping in GAMP).
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>,
"Probabilistic reconstruction in compressed sensing: algorithms, phase
diagrams, and threshold achieving matrices", *Journal of Statistical
Mechanics: Theory and Experiment*, vol. P08009, pp. 1-57, Aug. 2012.
.. [2] <NAME>, "Generalized Approximate Message Passing for Estimation
with Random Linear Mixing", arXiv:1010.5141v2, pp. 1-22, Aug. 2012.
.. [3] <NAME>, <NAME>, and <NAME>. "On the Convergence of
Approximate Message Passing with Arbitrary Matrices", *in IEEE International
Symposium on Information Theory (ISIT)*, pp. 236-240, Honolulu, Hawaii, USA,
Jun. 29 - Jul. 4, 2014.
.. [4] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, "Adaptive
Damping and Mean Removal for the Generalized Approximate Message Passing
Algorithm", *in IEEE International Conference on Acoustics, Speech, and
Signal Processing (ICASSP)*, South Brisbane, Queensland, Australia, Apr.
19-24, 2015, pp. 2021-2025.
"""
from __future__ import division
import copy
import numpy as np
from magni.cs.reconstruction.gamp import config as _conf
from magni.utils.config import Configger as _Configger
from magni.utils.validation import decorate_validation as _decorate_validation
from magni.utils.validation import validate_generic as _generic
from magni.utils.validation import validate_numeric as _numeric
from magni.utils.validation.types import MatrixBase as _MatrixBase
from magni.utils.matrices import norm as _norm
from magni.utils.matrices import (SumApproximationMatrix as
_SumApproximationMatrix)
def run(y, A, A_asq=None):
"""
Run the GAMP reconstruction algorithm.
Parameters
----------
y : ndarray
The m x 1 measurement vector.
A : ndarray or magni.utils.matrices.{Matrix, MatrixCollection}
The m x n matrix which is the product of the measurement matrix and the
dictionary matrix.
A_asq : ndarray or magni.utils.matrices.{Matrix, MatrixCollection} or None
The m x n matrix which is the entrywise absolute value squared product
of the measurement matrix and the dictionary matrix (the default is
None, which implies that a sum approximation is used).
Returns
-------
alpha : ndarray
The n x 1 reconstructed coefficient vector.
history : dict, optional
The dictionary of various measures tracked in the GAMP iterations.
See Also
--------
magni.cs.reconstruction.gamp._config : Configuration options.
magni.cs.reconstruction.gamp.input_channel : Input channels.
magni.cs.reconstruction.gamp.output_channel : Output channels.
magni.cs.reconstruction.gamp.stop_criterion : Stop criteria.
Notes
-----
Optionally, the algorithm may be configured to save and return the
iteration history along with the reconstruction result. The returned
history contains the following:
* alpha_bar : Mean coefficient estimates (the reconstruction coefficients).
* alpha_tilde : Variance coefficient estimates.
* MSE : solution Mean squared error (if the true solution is known).
* input_channel_parameters : The state of the input channel.
* output_channel_parameters : The state of the output channel.
* stop_criterion : The currently used stop criterion.
* stop_criterion_value : The value of the stop criterion.
* stop_iteration : The iteration at which the algorithm stopped.
* stop_reason : The reason for termination of the algorithm.
Examples
--------
For example, recovering a vector from AWGN noisy measurements using GAMP
>>> import numpy as np
>>> from magni.cs.reconstruction.gamp import run, config
>>> np.random.seed(seed=6028)
>>> k, m, n = 10, 200, 400
>>> tau = float(k) / n
>>> A = 1 / np.sqrt(m) * np.random.randn(m, n)
>>> A_asq = np.abs(A)**2
>>> alpha = np.zeros((n, 1))
>>> alpha[:k] = np.random.normal(scale=2, size=(k, 1))
>>> np_printoptions = np.get_printoptions()
>>> np.set_printoptions(suppress=True, threshold=k+2)
>>> alpha[:k + 2]
array([[ 1.92709461],
[ 0.74378508],
[-3.2418159 ],
[-1.32277347],
[ 0.90118 ],
[-0.19157262],
[ 0.82855712],
[ 0.24817994],
[-1.43034777],
[-0.21232344],
[ 0. ],
[ 0. ]])
>>> sigma = 0.15
>>> y = A.dot(alpha) + np.random.normal(scale=sigma, size=(A.shape[0], 1))
>>> input_channel_params = {'tau': tau, 'theta_bar': 0, 'theta_tilde': 4,
... 'use_em': False}
>>> config['input_channel_parameters'] = input_channel_params
>>> output_channel_params = {'sigma_sq': sigma**2,
... 'noise_level_estimation': 'fixed'}
>>> config['output_channel_parameters'] = output_channel_params
>>> alpha_hat = run(y, A, A_asq)
>>> alpha_hat[:k + 2]
array([[ 1.93810961],
[ 0.6955502 ],
[-3.39759349],
[-1.35533562],
[ 1.10524227],
[-0.00594848],
[ 0.79274671],
[ 0.04895264],
[-1.08726071],
[-0.00142911],
[ 0.00022861],
[-0.00004272]])
>>> np.sum(np.abs(alpha - alpha_hat) > sigma * 3)
0
or recover the same vector returning a history comparing the pr. iteration
solution to the true vector and printing the A_asq details
>>> config['report_A_asq_setup'] = True
>>> config['report_history'] = True
>>> config['true_solution'] = alpha
>>> alpha_hat, history = run(y, A, A_asq) # doctest: +NORMALIZE_WHITESPACE
GAMP is using the A_asq: [[ 0.024 ..., 0.002]
...,
[ 0. ..., 0.014]]
The sum approximation method is: None
>>> alpha_hat[:k + 2]
array([[ 1.93810961],
[ 0.6955502 ],
[-3.39759349],
[-1.35533562],
[ 1.10524227],
[-0.00594848],
[ 0.79274671],
[ 0.04895264],
[-1.08726071],
[-0.00142911],
[ 0.00022861],
[-0.00004272]])
>>> np.array(history['MSE']).reshape(-1, 1)[1:11]
array([[ 0.04562729],
[ 0.01328304],
[ 0.00112098],
[ 0.00074968],
[ 0.00080175],
[ 0.00076615],
[ 0.00077043]])
or recover the same vector using sample variance AWGN noise level
estimation
>>> config['report_A_asq_setup'] = False
>>> config['report_history'] = False
>>> output_channel_params['noise_level_estimation'] = 'sample_variance'
>>> config['output_channel_parameters'] = output_channel_params
>>> alpha_hat = run(y, A, A_asq)
>>> alpha_hat[:k + 2]
array([[ 1.94820622],
[ 0.72162206],
[-3.39978431],
[-1.35357001],
[ 1.10701779],
[-0.00834467],
[ 0.79790879],
[ 0.08441384],
[-1.08946306],
[-0.0015894 ],
[ 0.00020561],
[-0.00003623]])
>>> np.sum(np.abs(alpha - alpha_hat) > sigma * 3)
0
or recover the same vector using median AWGN noise level estimation
>>> output_channel_params['noise_level_estimation'] = 'median'
>>> config['output_channel_parameters'] = output_channel_params
>>> alpha_hat = run(y, A, A_asq)
>>> alpha_hat[:k + 2]
array([[ 1.93356483],
[ 0.65232347],
[-3.39440429],
[-1.35437724],
[ 1.10312573],
[-0.0050555 ],
[ 0.78743162],
[ 0.03616397],
[-1.08589927],
[-0.00136802],
[ 0.00024121],
[-0.00004498]])
>>> np.sum(np.abs(alpha - alpha_hat) > sigma * 3)
0
or recover the same vector learning the AWGN noise level using expectation
maximization (EM)
>>> output_channel_params['noise_level_estimation'] = 'em'
>>> config['output_channel_parameters'] = output_channel_params
>>> alpha_hat = run(y, A, A_asq)
>>> alpha_hat[:k + 2]
array([[ 1.94118089],
[ 0.71553983],
[-3.40076165],
[-1.35662005],
[ 1.1099417 ],
[-0.00688125],
[ 0.79442879],
[ 0.06258856],
[-1.08792606],
[-0.00148811],
[ 0.00022266],
[-0.00003785]])
>>> np.sum(np.abs(alpha - alpha_hat) > sigma * 3)
0
>>> np.set_printoptions(**np_printoptions)
"""
@_decorate_validation
def validate_input():
_numeric('y', ('integer', 'floating', 'complex'), shape=(-1, 1))
_numeric('A', ('integer', 'floating', 'complex'), shape=(
y.shape[0],
_conf['true_solution'].shape[0]
if _conf['true_solution'] is not None else -1))
if isinstance(A_asq, _MatrixBase):
# It is not possible to validate the range of an implicit matrix
# Thus allow all possible values
range_ = '[-inf;inf]'
else:
range_ = '[0;inf)'
_numeric('A_asq', ('integer', 'floating', 'complex'), range_=range_,
shape=A.shape, ignore_none=True)
@_decorate_validation
def validate_output():
# complex128 is two float64 (real and imaginary part) each taking 8*8
# bits. Thus, in total 2*8*8=128 bits. However, we only consider it to
# be "64 bit precision" since that is what each part is.
bits_pr_nbytes = 4 if np.iscomplexobj(convert(0)) else 8
_numeric('alpha', ('integer', 'floating', 'complex'),
shape=(A.shape[1], 1),
precision=convert(0).nbytes * bits_pr_nbytes)
_generic('history', 'mapping',
keys_in=('alpha_bar', 'alpha_tilde', 'MSE',
'input_channel_parameters',
'output_channel_parameters', 'stop_criterion',
'stop_criterion_value', 'stop_iteration',
'stop_reason'))
validate_input()
# Initialisation
init = _get_gamp_initialisation(y, A, A_asq)
AH = init['AH']
A_asq = init['A_asq']
AT_asq = init['AT_asq']
o = init['o']
q = init['q']
s = init['s']
r = init['r']
m = init['m']
n = init['n']
alpha_bar = init['alpha_bar']
alpha_tilde = init['alpha_tilde']
alpha_breve = init['alpha_breve']
A_dot_alpha_bar = init['A_dot_alpha_bar']
z_bar = init['z_bar']
damping = init['damping']
sum_approximation_method = init['sum_approximation_method']
A_frob_sq = init['A_frob_sq']
output_channel = init['output_channel']
output_channel_parameters = init['output_channel_parameters']
input_channel = init['input_channel']
input_channel_parameters = init['input_channel_parameters']
stop_criterion = init['stop_criterion']
stop_criterion_name = init['stop_criterion_name']
iterations = init['iterations']
tolerance = init['tolerance']
convert = init['convert']
report_history = init['report_history']
history = init['history']
true_solution = init['true_solution']
# GAMP iterations
for it in range(iterations):
# Save previous state
alpha_bar_prev = alpha_bar # Used in stop criterion
# GAMP state updates
if sum_approximation_method == 'rangan':
# Rangan's scalar variance sum approximation.
# Factor side updates
v = 1.0 / m * A_frob_sq * alpha_breve
o = A_dot_alpha_bar - v * q
z_bar, z_tilde = output_channel.compute(locals())
q = (z_bar - o) / v
u = 1.0 / m * | |
(
rule.l2.config.ethertype, table_name, rule_idx))
return rule_props
def convert_ip(self, table_name, rule_idx, rule):
rule_props = {}
# FIXME: 0 is a valid protocol number, but openconfig seems to use it as a default value,
# so there isn't currently a good way to check if the user defined proto=0 or not.
if rule.ip.config.protocol:
if rule.ip.config.protocol in self.ip_protocol_map:
rule_props["IP_PROTOCOL"] = self.ip_protocol_map[rule.ip.config.protocol]
else:
try:
int(rule.ip.config.protocol)
except:
raise AclLoaderException("Unknown rule protocol %s in table %s, rule %d!" % (
rule.ip.config.protocol, table_name, rule_idx))
rule_props["IP_PROTOCOL"] = rule.ip.config.protocol
if rule.ip.config.source_ip_address:
source_ip_address = rule.ip.config.source_ip_address
if ipaddress.ip_network(source_ip_address).version == 4:
rule_props["SRC_IP"] = source_ip_address
else:
rule_props["SRC_IPV6"] = source_ip_address
if rule.ip.config.destination_ip_address:
destination_ip_address = rule.ip.config.destination_ip_address
if ipaddress.ip_network(destination_ip_address).version == 4:
rule_props["DST_IP"] = destination_ip_address
else:
rule_props["DST_IPV6"] = destination_ip_address
# NOTE: DSCP is available only for MIRROR table
if self.is_table_mirror(table_name):
if rule.ip.config.dscp:
rule_props["DSCP"] = rule.ip.config.dscp
return rule_props
def convert_port(self, port):
"""
Convert port field format from openconfig ACL to Config DB schema
:param port: String, ACL port number or range in openconfig format
:return: Tuple, first value is converted port string,
second value is boolean, True if value is a port range, False
if it is a single port value
"""
# OpenConfig port range is of the format "####..####", whereas
# Config DB format is "####-####"
if ".." in port:
return port.replace("..", "-"), True
else:
return port, False
def convert_transport(self, table_name, rule_idx, rule):
rule_props = {}
if rule.transport.config.source_port:
port, is_range = self.convert_port(str(rule.transport.config.source_port))
rule_props["L4_SRC_PORT_RANGE" if is_range else "L4_SRC_PORT"] = port
if rule.transport.config.destination_port:
port, is_range = self.convert_port(str(rule.transport.config.destination_port))
rule_props["L4_DST_PORT_RANGE" if is_range else "L4_DST_PORT"] = port
tcp_flags = 0x00
for flag in rule.transport.config.tcp_flags:
if flag == "TCP_FIN":
tcp_flags |= 0x01
if flag == "TCP_SYN":
tcp_flags |= 0x02
if flag == "TCP_RST":
tcp_flags |= 0x04
if flag == "TCP_PSH":
tcp_flags |= 0x08
if flag == "TCP_ACK":
tcp_flags |= 0x10
if flag == "TCP_URG":
tcp_flags |= 0x20
if flag == "TCP_ECE":
tcp_flags |= 0x40
if flag == "TCP_CWR":
tcp_flags |= 0x80
if tcp_flags:
rule_props["TCP_FLAGS"] = '0x{:02x}/0x{:02x}'.format(tcp_flags, tcp_flags)
return rule_props
def convert_input_interface(self, table_name, rule_idx, rule):
rule_props = {}
if rule.input_interface.interface_ref.config.interface:
rule_props["IN_PORTS"] = rule.input_interface.interface_ref.config.interface
return rule_props
def convert_rule_to_db_schema(self, table_name, rule):
"""
Convert rules format from openconfig ACL to Config DB schema
:param table_name: ACL table name to which rule belong
:param rule: ACL rule in openconfig format
:return: dict with Config DB schema
"""
rule_idx = int(rule.config.sequence_id)
rule_props = {}
rule_data = {(table_name, "RULE_" + str(rule_idx)): rule_props}
rule_props["PRIORITY"] = str(self.max_priority - rule_idx)
deep_update(rule_props, self.convert_action(table_name, rule_idx, rule))
deep_update(rule_props, self.convert_l2(table_name, rule_idx, rule))
deep_update(rule_props, self.convert_ip(table_name, rule_idx, rule))
deep_update(rule_props, self.convert_transport(table_name, rule_idx, rule))
deep_update(rule_props, self.convert_input_interface(table_name, rule_idx, rule))
return rule_data
def deny_rule(self, table_name):
"""
Create default deny rule in Config DB format
:param table_name: ACL table name to which rule belong
:return: dict with Config DB schema
"""
rule_props = {}
rule_data = {(table_name, "DEFAULT_RULE"): rule_props}
rule_props["PRIORITY"] = str(self.min_priority)
rule_props["PACKET_ACTION"] = "DROP"
if 'v6' in table_name.lower():
rule_props["IP_TYPE"] = "IPV6ANY" # ETHERTYPE is not supported for DATAACLV6
else:
rule_props["ETHER_TYPE"] = str(self.ethertype_map["ETHERTYPE_IPV4"])
return rule_data
def convert_rules(self):
"""
Convert rules in openconfig ACL format to Config DB schema
:return:
"""
for acl_set_name in self.yang_acl.acl.acl_sets.acl_set:
table_name = acl_set_name.replace(" ", "_").replace("-", "_").upper()
acl_set = self.yang_acl.acl.acl_sets.acl_set[acl_set_name]
if not self.is_table_valid(table_name):
warning("%s table does not exist" % (table_name))
continue
if self.current_table is not None and self.current_table != table_name:
continue
for acl_entry_name in acl_set.acl_entries.acl_entry:
acl_entry = acl_set.acl_entries.acl_entry[acl_entry_name]
try:
rule = self.convert_rule_to_db_schema(table_name, acl_entry)
deep_update(self.rules_info, rule)
except AclLoaderException as ex:
error("Error processing rule %s: %s. Skipped." % (acl_entry_name, ex))
if not self.is_table_mirror(table_name):
deep_update(self.rules_info, self.deny_rule(table_name))
def full_update(self):
"""
Perform full update of ACL rules configuration. All existing rules
will be removed. New rules loaded from file will be installed. If
the current_table is not empty, only rules within that table will
be removed and new rules in that table will be installed.
:return:
"""
for key in self.rules_db_info:
if self.current_table is None or self.current_table == key[0]:
self.configdb.mod_entry(self.ACL_RULE, key, None)
# Program for per front asic namespace also if present
for namespace_configdb in self.per_npu_configdb.values():
namespace_configdb.mod_entry(self.ACL_RULE, key, None)
self.configdb.mod_config({self.ACL_RULE: self.rules_info})
# Program for per front asic namespace also if present
for namespace_configdb in self.per_npu_configdb.values():
namespace_configdb.mod_config({self.ACL_RULE: self.rules_info})
def incremental_update(self):
"""
Perform incremental ACL rules configuration update. Get existing rules from
Config DB. Compare with rules specified in file and perform corresponding
modifications.
:return:
"""
# TODO: Until we test ASIC behavior, we cannot assume that we can insert
# dataplane ACLs and shift existing ACLs. Therefore, we perform a full
# update on dataplane ACLs, and only perform an incremental update on
# control plane ACLs.
new_rules = set(self.rules_info.keys())
new_dataplane_rules = set()
new_controlplane_rules = set()
current_rules = set(self.rules_db_info.keys())
current_dataplane_rules = set()
current_controlplane_rules = set()
for key in new_rules:
table_name = key[0]
if self.tables_db_info[table_name]['type'].upper() == self.ACL_TABLE_TYPE_CTRLPLANE:
new_controlplane_rules.add(key)
else:
new_dataplane_rules.add(key)
for key in current_rules:
table_name = key[0]
if self.tables_db_info[table_name]['type'].upper() == self.ACL_TABLE_TYPE_CTRLPLANE:
current_controlplane_rules.add(key)
else:
current_dataplane_rules.add(key)
# Remove all existing dataplane rules
for key in current_dataplane_rules:
self.configdb.mod_entry(self.ACL_RULE, key, None)
# Program for per-asic namespace also if present
for namespace_configdb in self.per_npu_configdb.values():
namespace_configdb.mod_entry(self.ACL_RULE, key, None)
# Add all new dataplane rules
for key in new_dataplane_rules:
self.configdb.mod_entry(self.ACL_RULE, key, self.rules_info[key])
# Program for per-asic namespace corresponding to front asic also if present.
for namespace_configdb in self.per_npu_configdb.values():
namespace_configdb.mod_entry(self.ACL_RULE, key, self.rules_info[key])
added_controlplane_rules = new_controlplane_rules.difference(current_controlplane_rules)
removed_controlplane_rules = current_controlplane_rules.difference(new_controlplane_rules)
existing_controlplane_rules = new_rules.intersection(current_controlplane_rules)
for key in added_controlplane_rules:
self.configdb.mod_entry(self.ACL_RULE, key, self.rules_info[key])
# Program for per-asic namespace corresponding to front asic also if present.
# For control plane ACL it's not needed but to keep all db in sync program everywhere
for namespace_configdb in self.per_npu_configdb.values():
namespace_configdb.mod_entry(self.ACL_RULE, key, self.rules_info[key])
for key in removed_controlplane_rules:
self.configdb.mod_entry(self.ACL_RULE, key, None)
# Program for per-asic namespace corresponding to front asic also if present.
# For control plane ACL it's not needed but to keep all db in sync program everywhere
for namespace_configdb in self.per_npu_configdb.values():
namespace_configdb.mod_entry(self.ACL_RULE, key, None)
for key in existing_controlplane_rules:
if cmp(self.rules_info[key], self.rules_db_info[key]) != 0:
self.configdb.set_entry(self.ACL_RULE, key, self.rules_info[key])
# Program for per-asic namespace corresponding to front asic also if present.
# For control plane ACL it's not needed but to keep all db in sync program everywhere
for namespace_configdb in self.per_npu_configdb.values():
namespace_configdb.set_entry(self.ACL_RULE, key, self.rules_info[key])
def delete(self, table=None, rule=None):
"""
:param table:
:param rule:
:return:
"""
for key in self.rules_db_info:
if not table or table == key[0]:
if not rule or rule == key[1]:
self.configdb.set_entry(self.ACL_RULE, key, None)
# Program for per-asic namespace corresponding to front asic also if present.
for namespace_configdb in self.per_npu_configdb.values():
namespace_configdb.set_entry(self.ACL_RULE, key, None)
def show_table(self, table_name):
"""
Show ACL table configuration.
:param table_name: Optional. ACL table name. Filter tables by specified name.
:return:
"""
header = ("Name", "Type", "Binding", "Description", "Stage")
data = []
for key, val in self.get_tables_db_info().items():
if table_name and key != table_name:
continue
stage = val.get("stage", Stage.INGRESS).lower()
if val["type"] == AclLoader.ACL_TABLE_TYPE_CTRLPLANE:
services = natsorted(val["services"])
data.append([key, val["type"], services[0], val["policy_desc"], stage])
if len(services) > 1:
for service in services[1:]:
data.append(["", "", service, "", ""])
else:
if not val["ports"]:
data.append([key, val["type"], "", val["policy_desc"], stage])
else:
ports = natsorted(val["ports"])
data.append([key, val["type"], ports[0], val["policy_desc"], stage])
if len(ports) > 1:
for port in ports[1:]:
data.append(["", "", port, "", ""])
print(tabulate.tabulate(data, headers=header, tablefmt="simple", missingval=""))
def show_session(self, session_name):
"""
Show mirror session configuration.
:param session_name: Optional. Mirror session name. Filter sessions by specified name.
:return:
"""
erspan_header = ("Name", "Status", "SRC IP", "DST IP", "GRE", "DSCP", "TTL", "Queue",
"Policer", "Monitor Port", "SRC Port", "Direction")
span_header = ("Name", "Status", "DST Port", "SRC Port", "Direction", "Queue", "Policer")
erspan_data = []
span_data = []
for key, val in self.get_sessions_db_info().items():
if session_name and key != session_name:
continue
if val.get("type") == "SPAN":
span_data.append([key, val.get("status", ""), val.get("dst_port", ""),
val.get("src_port", ""), val.get("direction", "").lower(),
val.get("queue", ""), val.get("policer", "")])
else:
erspan_data.append([key, val.get("status", ""), val.get("src_ip", ""),
val.get("dst_ip", ""), val.get("gre_type", ""), val.get("dscp", ""),
val.get("ttl", ""), val.get("queue", ""), val.get("policer", ""),
val.get("monitor_port", ""), val.get("src_port", ""), val.get("direction", "").lower()])
print("ERSPAN Sessions")
print(tabulate.tabulate(erspan_data, headers=erspan_header, tablefmt="simple", missingval=""))
print("\nSPAN Sessions")
print(tabulate.tabulate(span_data, headers=span_header, tablefmt="simple", missingval=""))
def show_policer(self, policer_name):
"""
Show policer configuration.
:param policer_name: Optional. Policer name. Filter policers by specified name.
:return:
"""
header = ("Name", "Type", "Mode", "CIR", | |
(all phases)
name (str): phase series name
y0_dict (dict or None): dictionary of initial values or None
- key (str): variable name
- value (float): initial value
Returns:
pandas.DataFrame:
Index reset index
Columns
- Date (pandas.TimeStamp)
- variables (int)
- Population (int)
- Rt (float)
- parameter values (float)
- day parameter values (float)
"""
sim_df = self.simulate(phases=phases, name=name, y0_dict=y0_dict, show_figure=False)
param_df = self._track_param(name=name)
return pd.merge(
sim_df, param_df, how="inner", left_on=self.DATE, right_index=True, sort=True)
def track(self, phases=None, with_actual=True, y0_dict=None):
"""
Show values of parameters and variables in one dataframe.
Args:
phases (list[str] or None): phases to shoe or None (all phases)
with_actual (bool): if True, show actual number of cases will included as "Actual" scenario
y0_dict (dict or None): dictionary of initial values or None
- key (str): variable name
- value (float): initial value
Returns:
pandas.DataFrame: tracking records
Index
reset index
Columns
- Scenario (str)
- Date (pandas.TimeStamp)
- variables (int)
- Population (int)
- Rt (float)
- parameter values (float)
- day parameter values (float)
"""
dataframes = []
append = dataframes.append
for name in self._tracker_dict.keys():
df = self._track(phases=phases, name=name, y0_dict=y0_dict)
df.insert(0, self.SERIES, name)
append(df)
if with_actual:
df = self._data.records(extras=False)
df.insert(0, self.SERIES, self.ACTUAL)
append(df)
return pd.concat(dataframes, axis=0, sort=False)
def _history(self, target, phases=None, with_actual=True, y0_dict=None):
"""
Show the history of variables and parameter values to compare scenarios.
Args:
target (str): parameter or variable name to show (Rt, Infected etc.)
phases (list[str] or None): phases to shoe or None (all phases)
with_actual (bool): if True and @target is a variable name, show actual number of cases
y0_dict (dict or None): dictionary of initial values or None
- key (str): variable name
- value (float): initial value
Returns:
pandas.DataFrame
"""
# Include actual data or not
with_actual = with_actual and target in self.VALUE_COLUMNS
# Get tracking data
df = self.track(phases=phases, with_actual=with_actual, y0_dict=y0_dict)
if target not in df.columns:
col_str = ", ".join(list(df.columns))
raise KeyError(f"@target must be selected from {col_str}, but {target} was applied.")
# Select the records of target variable
return df.pivot_table(
values=target, index=self.DATE, columns=self.SERIES, aggfunc="last")
def history(self, target, phases=None, with_actual=True, y0_dict=None, **kwargs):
"""
Show the history of variables and parameter values to compare scenarios.
Args:
target (str): parameter or variable name to show (Rt, Infected etc.)
phases (list[str] or None): phases to shoe or None (all phases)
with_actual (bool): if True and @target is a variable name, show actual number of cases
y0_dict (dict or None): dictionary of initial values or None
- key (str): variable name
- value (float): initial value
kwargs: the other keyword arguments of Scenario.line_plot()
Returns:
pandas.DataFrame
"""
df = self._history(target=target, phases=phases, with_actual=with_actual, y0_dict=y0_dict)
df.dropna(subset=[col for col in df.columns if col != self.ACTUAL], inplace=True)
if target == self.RT:
ylabel = self.RT_FULL
elif target in self.VALUE_COLUMNS:
ylabel = f"The number of {target.lower()} cases"
else:
ylabel = target
title = f"{self.area}: {ylabel} over time"
tracker = self._tracker(self.MAIN)
self.line_plot(
df=df, title=title, ylabel=ylabel, v=tracker.change_dates(), math_scale=False,
h=1.0 if target == self.RT else None, **kwargs)
return df
def history_rate(self, params=None, name="Main", **kwargs):
"""
Show change rates of parameter values in one figure.
We can find the parameters which increased/decreased significantly.
Args:
params (list[str] or None): parameters to show
name (str): phase series name
kwargs: the other keyword arguments of Scenario.line_plot()
Returns:
pandas.DataFrame
"""
df = self._track_param(name=name)
model = self._tracker(name).last_model
cols = list(set(df.columns) & set(model.PARAMETERS))
if params is not None:
if not isinstance(params, (list, set)):
raise TypeError(f"@params must be a list of parameters, but {params} were applied.")
cols = list(set(cols) & set(params)) or cols
df = df.loc[:, cols] / df.loc[df.index[0], cols]
# Show figure
f_date = df.index[0].strftime(self.DATE_FORMAT)
title = f"{self.area}: {model.NAME} parameter change rates over time (1.0 on {f_date})"
ylabel = f"Value per that on {f_date}"
title = f"{self.area}: {ylabel} over time"
tracker = self._tracker(self.MAIN)
self.line_plot(
df=df, title=title, ylabel=ylabel, v=tracker.change_dates(), math_scale=False, **kwargs)
return df
def retrospective(self, beginning_date, model, control="Main", target="Target", **kwargs):
"""
Perform retrospective analysis.
Compare the actual series of phases (control) and
series of phases with specified parameters (target).
Args:
beginning_date (str): when the parameter values start to be changed from actual values
model (covsirphy.ModelBase): ODE model
control (str): scenario name of control
target (str): scenario name of target
kwargs: keyword argument of parameter values and Estimator.run()
Note:
When parameter values are not specified,
actual values of the last date before the beginning date will be used.
"""
param_dict = {k: v for (k, v) in kwargs.items() if k in model.PARAMETERS}
est_kwargs = dict(kwargs.items() - param_dict.items())
# Control
self.clear(name=control, include_past=True)
self.trend(name=control, show_figure=False)
try:
self.separate(date=beginning_date, name=control)
except ValueError:
pass
self.estimate(model, name=control, **est_kwargs)
# Target
self.clear(name=target, include_past=False, template=control)
phases_changed = [
self.num2str(i) for (i, ph) in enumerate(self._tracker(target).series)
if ph >= beginning_date]
self.delete(phases=phases_changed, name=target)
self.add(name=target, **param_dict)
self.estimate(model, name=target, **est_kwargs)
def score(self, variables=None, phases=None, past_days=None, name="Main", y0_dict=None, **kwargs):
"""
Evaluate accuracy of phase setting and parameter estimation of all enabled phases all some past days.
Args:
variables (list[str] or None): variables to use in calculation
phases (list[str] or None): phases to use in calculation
past_days (int or None): how many past days to use in calculation, natural integer
name(str): phase series name. If 'Main', main PhaseSeries will be used
y0_dict(dict[str, float] or None): dictionary of initial values of variables
kwargs: keyword arguments of covsirphy.Evaluator.score()
Returns:
float: score with the specified metrics (covsirphy.Evaluator.score())
Note:
If @variables is None, ["Infected", "Fatal", "Recovered"] will be used.
"Confirmed", "Infected", "Fatal" and "Recovered" can be used in @variables.
If @phases is None, all phases will be used.
@phases and @past_days can not be specified at the same time.
Note:
Please refer to covsirphy.Evaluator.score() for metrics.
"""
tracker = self._tracker(name)
if past_days is not None:
if phases is not None:
raise ValueError("@phases and @past_days cannot be specified at the same time.")
past_days = self._ensure_natural_int(past_days, name="past_days")
# Separate a phase, if possible
beginning_date = self.date_change(self._data.last_date, days=0 - past_days)
try:
tracker.separate(date=beginning_date)
except ValueError:
pass
# Ge the list of target phases
phases = [
self.num2str(num) for (num, unit)
in enumerate(tracker.series)
if unit >= beginning_date
]
return tracker.score(variables=variables, phases=phases, y0_dict=y0_dict, **kwargs)
def estimate_delay(self, oxcgrt_data=None, indicator="Stringency_index",
target="Confirmed", percentile=25, limits=(7, 30), **kwargs):
"""
Estimate delay period [days], assuming the indicator impact on the target value with delay.
The average of representative value (percentile) and @min_size will be returned.
Args:
oxcgrt_data (covsirphy.OxCGRTData): OxCGRT dataset
indicator (str): indicator name, a column of any registered datasets
target (str): target name, a column of any registered datasets
percentile (int): percentile to calculate the representative value, in (0, 100)
limits (tuple(int, int)): minimum/maximum size of the delay period [days]
kwargs: keyword arguments of DataHandler.estimate_delay()
Raises:
NotRegisteredMainError: either JHUData or PopulationData was not registered
SubsetNotFoundError: failed in subsetting because of lack of data
UserWarning: failed in calculating and returned the default value (recovery period)
Returns:
tuple(int, pandas.DataFrame):
- int: the estimated number of days of delay [day] (mode value)
- pandas.DataFrame:
Index
reset index
Columns
- (int or float): column defined by @indicator
- (int or float): column defined by @target
- (int): column defined by @delay_name [days]
Note:
- Average recovered period of JHU dataset will be used as returned value when the estimated value was not in value_range.
- @oxcgrt_data argument was deprecated. Please use Scenario.register(extras=[oxcgrt_data]).
"""
min_size, max_days = limits
# Register OxCGRT data
if oxcgrt_data is not None:
warnings.warn(
"Please use Scenario.register(extras=[oxcgrt_data]) rather than Scenario.fit(oxcgrt_data).",
DeprecationWarning, stacklevel=1)
self.register(extras=[oxcgrt_data])
# Un-used arguments
if "value_range" in kwargs:
warnings.warn("@value_range argument was deprecated.", DeprecationWarning, stacklevel=1)
# Calculate delay values
df = self._data.estimate_delay(
indicator=indicator, target=target, min_size=min_size, delay_name="Period Length",
**find_args(DataHandler.estimate_delay, **kwargs))
# Remove NAs and sort
df.dropna(subset=["Period Length"], inplace=True)
df.sort_values("Period Length", inplace=True)
df.reset_index(inplace=True, drop=True)
# Apply upper limit for delay period if max_days is set
if max_days is not None:
df = df[df["Period Length"] <= max_days]
# Calculate representative value
if df.empty:
return (self._data.recovery_period(), df)
# Calculate percentile
Q1 = np.percentile(df["Period Length"], percentile, interpolation="midpoint")
low_lim = min_size
delay_period = int((low_lim + Q1) / 2)
return (int(delay_period), df)
def | |
iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> dl1 = H2ODeepLearningEstimator(hidden=[10,10],
... export_weights_and_biases=True)
>>> dl1.train(x=list(range(4)), y=4, training_frame=iris)
>>> p1 = dl1.model_performance(iris).logloss()
>>> ll1 = dl1.predict(iris)
>>> print(p1)
>>> w1 = dl1.weights(0)
>>> w2 = dl1.weights(1)
>>> w3 = dl1.weights(2)
>>> b1 = dl1.biases(0)
>>> b2 = dl1.biases(1)
>>> b3 = dl1.biases(2)
>>> dl2 = H2ODeepLearningEstimator(hidden=[10,10],
... initial_weights=[w1, w2, w3],
... initial_biases=[b1, b2, b3],
... epochs=0)
>>> dl2.train(x=list(range(4)), y=4, training_frame=iris)
>>> dl2.initial_biases
""",
initial_weights="""
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> dl1 = H2ODeepLearningEstimator(hidden=[10,10],
... export_weights_and_biases=True)
>>> dl1.train(x=list(range(4)), y=4, training_frame=iris)
>>> p1 = dl1.model_performance(iris).logloss()
>>> ll1 = dl1.predict(iris)
>>> print(p1)
>>> w1 = dl1.weights(0)
>>> w2 = dl1.weights(1)
>>> w3 = dl1.weights(2)
>>> b1 = dl1.biases(0)
>>> b2 = dl1.biases(1)
>>> b3 = dl1.biases(2)
>>> dl2 = H2ODeepLearningEstimator(hidden=[10,10],
... initial_weights=[w1, w2, w3],
... initial_biases=[b1, b2, b3],
... epochs=0)
>>> dl2.train(x=list(range(4)), y=4, training_frame=iris)
>>> dl2.initial_weights
""",
initial_weight_distribution="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(initial_weight_distribution = "Uniform",
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.auc()
""",
initial_weight_scale="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(initial_weight_scale = 1.5,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.auc()
""",
input_dropout_ratio="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(input_dropout_ratio = 0.2,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.auc()
""",
keep_cross_validation_fold_assignment="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(keep_cross_validation_fold_assignment = True,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> print(cars_dl.cross_validation_fold_assignment())
""",
keep_cross_validation_models="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(keep_cross_validation_models = True,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> print(cars_dl.cross_validation_models())
""",
keep_cross_validation_predictions="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(keep_cross_validation_predictions = True,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train)
>>> print(cars_dl.cross_validation_predictions())
""",
l1="""
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> hh_imbalanced = H2ODeepLearningEstimator(l1=1e-5,
... activation="Rectifier",
... loss="CrossEntropy",
... hidden=[200,200],
... epochs=1,
... balance_classes=False,
... reproducible=True,
... seed=1234)
>>> hh_imbalanced.train(x=list(range(54)),y=54, training_frame=covtype)
>>> hh_imbalanced.mse()
""",
l2="""
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> hh_imbalanced = H2ODeepLearningEstimator(l2=1e-5,
... activation="Rectifier",
... loss="CrossEntropy",
... hidden=[200,200],
... epochs=1,
... balance_classes=False,
... reproducible=True,
... seed=1234)
>>> hh_imbalanced.train(x=list(range(54)),y=54, training_frame=covtype)
>>> hh_imbalanced.mse()
""",
loss="""
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> hh_imbalanced = H2ODeepLearningEstimator(l1=1e-5,
... activation="Rectifier",
... loss="CrossEntropy",
... hidden=[200,200],
... epochs=1,
... balance_classes=False,
... reproducible=True,
... seed=1234)
>>> hh_imbalanced.train(x=list(range(54)),y=54, training_frame=covtype)
>>> hh_imbalanced.mse()
""",
max_after_balance_size="""
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> predictors = covtype.columns[0:54]
>>> response = 'C55'
>>> train, valid = covtype.split_frame(ratios = [.8], seed = 1234)
>>> max = .85
>>> cov_dl = H2ODeepLearningEstimator(balance_classes = True,
... max_after_balance_size = max,
... seed = 1234)
>>> cov_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cov_dl.logloss()
""",
max_categorical_features="""
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> predictors = covtype.columns[0:54]
>>> response = 'C55'
>>> train, valid = covtype.split_frame(ratios = [.8], seed = 1234)
>>> cov_dl = H2ODeepLearningEstimator(balance_classes = True,
... max_categorical_features = 2147483647,
... seed = 1234)
>>> cov_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cov_dl.logloss()
""",
max_hit_ratio_k="""
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> predictors = covtype.columns[0:54]
>>> response = 'C55'
>>> train, valid = covtype.split_frame(ratios = [.8], seed = 1234)
>>> cov_dl = H2ODeepLearningEstimator(max_hit_ratio_k = 3,
... seed = 1234)
>>> cov_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cov_dl.show()
""",
max_runtime_secs="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(max_runtime_secs = 10,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.auc()
""",
max_w2="""
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> predictors = covtype.columns[0:54]
>>> response = 'C55'
>>> train, valid = covtype.split_frame(ratios = [.8], seed = 1234)
>>> cov_dl = H2ODeepLearningEstimator(activation = "RectifierWithDropout",
... hidden = [10,10],
... epochs = 10,
... input_dropout_ratio = 0.2,
... l1 = 1e-5,
... max_w2 = 10.5,
... stopping_rounds = 0)
>>> cov_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cov_dl.mse()
""",
mini_batch_size="""
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> predictors = covtype.columns[0:54]
>>> response = 'C55'
>>> train, valid = covtype.split_frame(ratios = [.8], seed = 1234)
>>> cov_dl = H2ODeepLearningEstimator(activation = "RectifierWithDropout",
... hidden = [10,10],
... epochs = 10,
... input_dropout_ratio = 0.2,
... l1 = 1e-5,
... max_w2 = 10.5,
... stopping_rounds = 0)
... mini_batch_size = 35
>>> cov_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cov_dl.mse()
""",
missing_values_handling="""
>>> boston = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/BostonHousing.csv")
>>> predictors = boston.columns[:-1]
>>> response = "medv"
>>> boston['chas'] = boston['chas'].asfactor()
>>> boston.insert_missing_values()
>>> train, valid = boston.split_frame(ratios = [.8])
>>> boston_dl = H2ODeepLearningEstimator(missing_values_handling = "skip")
>>> boston_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> boston_dl.mse()
""",
momentum_ramp="""
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> predictors = ["Year","Month","DayofMonth","DayOfWeek","CRSDepTime",
... "CRSArrTime","UniqueCarrier","FlightNum"]
>>> response_col = "IsDepDelayed"
>>> airlines_dl = H2ODeepLearningEstimator(hidden=[200,200],
... activation="Rectifier",
... input_dropout_ratio=0.0,
... momentum_start=0.9,
... momentum_stable=0.99,
... momentum_ramp=1e7,
... epochs=100,
... stopping_rounds=4,
... train_samples_per_iteration=30000,
... mini_batch_size=32,
... score_duty_cycle=0.25,
... score_interval=1)
>>> airlines_dl.train(x = predictors,
... y = response_col,
... training_frame = airlines)
>>> airlines_dl.mse()
""",
momentum_stable="""
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> predictors = ["Year","Month","DayofMonth","DayOfWeek","CRSDepTime",
... "CRSArrTime","UniqueCarrier","FlightNum"]
>>> response_col = "IsDepDelayed"
>>> airlines_dl = H2ODeepLearningEstimator(hidden=[200,200],
... activation="Rectifier",
... input_dropout_ratio=0.0,
... momentum_start=0.9,
... momentum_stable=0.99,
... momentum_ramp=1e7,
... epochs=100,
... stopping_rounds=4,
... train_samples_per_iteration=30000,
... mini_batch_size=32,
... score_duty_cycle=0.25,
... score_interval=1)
>>> airlines_dl.train(x = predictors,
... y = response_col,
... training_frame = airlines)
>>> airlines_dl.mse()
""",
momentum_start="""
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> predictors = ["Year","Month","DayofMonth","DayOfWeek","CRSDepTime",
... "CRSArrTime","UniqueCarrier","FlightNum"]
>>> response_col = "IsDepDelayed"
>>> airlines_dl = H2ODeepLearningEstimator(hidden=[200,200],
... activation="Rectifier",
... input_dropout_ratio=0.0,
... momentum_start=0.9,
... momentum_stable=0.99,
... momentum_ramp=1e7,
... epochs=100,
... stopping_rounds=4,
... train_samples_per_iteration=30000,
... mini_batch_size=32,
... score_duty_cycle=0.25,
... score_interval=1)
>>> airlines_dl.train(x = predictors,
... y = response_col,
... training_frame = airlines)
>>> airlines_dl.mse()
""",
nesterov_accelerated_gradient="""
>>> train = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/mnist/train.csv.gz")
>>> test = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/mnist/test.csv.gz")
>>> predictors = list(range(0,784))
>>> resp = 784
>>> train[resp] = train[resp].asfactor()
>>> test[resp] = test[resp].asfactor()
>>> nclasses = train[resp].nlevels()[0]
>>> model = H2ODeepLearningEstimator(activation = "RectifierWithDropout",
... adaptive_rate = False,
... rate = 0.01,
... rate_decay = 0.9,
... rate_annealing = 1e-6,
... momentum_start = 0.95,
... momentum_ramp = 1e5,
... momentum_stable = 0.99,
... nesterov_accelerated_gradient = False,
... input_dropout_ratio = 0.2,
... train_samples_per_iteration = 20000,
... classification_stop = -1,
... l1 = 1e-5)
>>> model.train (x = predictors,
... y = resp,
... training_frame = train,
... validation_frame = test)
>>> model.model_performance()
""",
nfolds="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> cars_dl = H2ODeepLearningEstimator(nfolds = 5, seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = cars)
>>> cars_dl.auc()
""",
offset_column="""
>>> boston = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/BostonHousing.csv")
>>> predictors = boston.columns[:-1]
>>> response = "medv"
>>> boston['chas'] = boston['chas'].asfactor()
>>> boston["offset"] = boston["medv"].log()
>>> train, valid = boston.split_frame(ratios = [.8], seed = 1234)
>>> boston_dl = H2ODeepLearningEstimator(offset_column = "offset",
... seed = 1234)
>>> boston_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> boston_dl.mse()
""",
overwrite_with_best_model="""
>>> boston = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/BostonHousing.csv")
>>> predictors = boston.columns[:-1]
>>> response = "medv"
>>> boston['chas'] = boston['chas'].asfactor()
>>> boston["offset"] = boston["medv"].log()
>>> train, valid = boston.split_frame(ratios = [.8], seed = 1234)
>>> boston_dl = H2ODeepLearningEstimator(overwrite_with_best_model = True,
... seed = 1234)
>>> boston_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> boston_dl.mse()
""",
pretrained_autoencoder="""
>>> from h2o.estimators.deeplearning import H2OAutoEncoderEstimator
>>> resp = 784
>>> nfeatures = 20
>>> train = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/mnist/train.csv.gz")
>>> test = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/mnist/test.csv.gz")
>>> train[resp] = train[resp].asfactor()
>>> test[resp] = test[resp].asfactor()
>>> sid = train[0].runif(0)
>>> train_unsupervised = train[sid >=0.5]
>>> train_unsupervised.pop(resp)
>>> train_supervised = train[sid < 0.5]
>>> ae_model = H2OAutoEncoderEstimator(activation = "Tanh",
... hidden = [nfeatures],
... model_id = "ae_model",
... epochs = 1,
... ignore_const_cols = False,
... reproducible = True,
... seed = 1234)
>>> ae_model.train(list(range(resp)), training_frame = train_unsupervised)
>>> ae_model.mse()
>>> pretrained_model = H2ODeepLearningEstimator(activation="Tanh",
... hidden=[nfeatures],
... epochs=1,
... reproducible = True,
... seed=1234,
... ignore_const_cols=False,
... pretrained_autoencoder="ae_model")
>>> pretrained_model.train(list(range(resp)), resp,
... training_frame=train_supervised,
... validation_frame=test)
>>> pretrained_model.mse()
""",
quantile_alpha="""
>>> boston = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/BostonHousing.csv")
>>> predictors = boston.columns[:-1]
>>> response = "medv"
>>> boston['chas'] = boston['chas'].asfactor()
>>> train, valid = boston.split_frame(ratios = [.8], seed = 1234)
>>> boston_dl = H2ODeepLearningEstimator(distribution = "quantile",
... quantile_alpha = .8,
... seed = 1234)
>>> boston_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> boston_dl.mse()
""",
quiet_mode="""
>>> titanic = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/titanic.csv")
>>> titanic['survived'] = titanic['survived'].asfactor()
>>> predictors = titanic.columns
>>> del predictors[1:3]
>>> response = 'survived'
>>> train, valid = titanic.split_frame(ratios = [.8], seed = 1234)
>>> titanic_dl = H2ODeepLearningEstimator(quiet_mode = True,
... seed = 1234)
>>> titanic_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> titanic_dl.mse()
""",
rate="""
>>> train = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/mnist/train.csv.gz")
>>> test = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/mnist/test.csv.gz")
>>> predictors = list(range(0,784))
>>> resp = 784
>>> train[resp] = train[resp].asfactor()
>>> test[resp] = test[resp].asfactor()
>>> nclasses = train[resp].nlevels()[0]
>>> model = H2ODeepLearningEstimator(activation="RectifierWithDropout",
... adaptive_rate=False,
... rate=0.01,
... rate_decay=0.9,
... rate_annealing=1e-6,
... momentum_start=0.95,
... momentum_ramp=1e5,
... momentum_stable=0.99,
... nesterov_accelerated_gradient=False,
... input_dropout_ratio=0.2,
... train_samples_per_iteration=20000,
... classification_stop=-1,
... l1=1e-5)
>>> model.train (x=predictors,y=resp, training_frame=train, validation_frame=test)
>>> model.model_performance(valid=True)
""",
rate_annealing="""
>>> train = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/mnist/train.csv.gz")
>>> test = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/mnist/test.csv.gz")
>>> predictors | |
import math
import random
import cv2
import glob
import librosa
import os
from pathlib import Path
from audio_extractor import extract_audio_snippets
character_map = {0: 'kermit_the_frog',
1: 'waldorf_and_statler',
2: 'pig',
3: 'swedish_chef',
4: 'none'}
file_map = {'Muppets-02-01-01.txt': 1,
'Muppets-02-04-04.txt': 2,
'Muppets-03-04-03.txt': 3}
video_base_path = '../../videos/'
ground_truth_files_base_path = '../../ground_truth/'
audio_snippet_path = '../../audio/'
mfcc_feature_file = '../../ground_truth/audio/mfcc.txt'
ground_truth_txt_files = ['../../ground_truth/Muppets-02-01-01/Muppets-02-01-01.txt',
'../../ground_truth/Muppets-02-04-04/Muppets-02-04-04.txt',
'../../ground_truth/Muppets-03-04-03/Muppets-03-04-03.txt']
def print_ground_truth_statistics(data_locations_dict):
"""
The aim of this method is to print statistics of the ground truth.
:param data_locations_dict: dict holding the ground truth location data
"""
character_location_map = {}
total_samples = 0
print('Number of samples per character in ground truth:')
for i in range(0, len(character_map)):
no_of_samples = 0
for key, data_locations in data_locations_dict.items():
character_location_map[key] = data_locations[i]
no_of_samples += len(data_locations[i])
total_samples += no_of_samples
print('%s: %d' % (character_map[i], no_of_samples))
print('total_samples: %d' % total_samples)
def extract_ground_truth(character_location_map, rest_location_map, character_id, output_path):
Path(output_path).mkdir(parents=True, exist_ok=True)
labels_file = open(output_path + 'labels.txt', 'w')
labels_file.write('txt_file, frame_id, label\n')
# write images of actual target character
print('[INFO] Start extracting images for target class: %d' % character_id)
for key, values in character_location_map.items():
video_path = video_base_path + key.split('.')[0] + '.avi'
cap = cv2.VideoCapture(video_path)
for value in values:
cap.set(cv2.CAP_PROP_POS_FRAMES, value)
ret, frame = cap.read()
if not ret:
print('Failed to read frame %d of video %r.' % (value, video_path))
labels_file.close()
exit(1)
filename = '%s/%d_%d_%d.jpg' % (output_path, file_map[key], value, character_id)
labels_file.write('%d, %d, %d\n' % (file_map[key], value, character_id))
cv2.imwrite(filename, frame)
print('[INFO] Start extracting randomly sampled images')
for key, values in rest_location_map.items():
for k, vals in values.items():
video_path = video_base_path + k.split('.')[0] + '.avi'
cap = cv2.VideoCapture(video_path)
for val in vals:
cap.set(cv2.CAP_PROP_POS_FRAMES, val)
ret, frame = cap.read()
if not ret:
print('Failed to read frame %d of video %r.' % (val, video_path))
labels_file.close()
exit(1)
filename = '%s/%d_%d_%d.jpg' % (output_path, file_map[k], val, key)
labels_file.write('%d, %d, %d\n' % (file_map[k], val, key))
cv2.imwrite(filename, frame)
labels_file.close()
def create_image_dataset_for_character(character_id, data_locations_dict, sub_path):
"""
The aim of this method is to generate a dataset for the specified character that consists of
50% images labeled with the specified character and 50% randomly sampled of all others
:param character_id: the id of the character
:param data_locations_dict: dict holding the ground truth location data
:return:
"""
character_location_map = {}
half_length = 0
for key, data_locations in data_locations_dict.items():
character_location_map[key] = data_locations[character_id]
half_length += len(data_locations[character_id])
# calculate data distribution over ground truth and per video
data_distribution_map = {}
total_samples = 0
for i in range(0, len(character_map)):
if i != character_id:
temp = {}
for key, data_locations in data_locations_dict.items():
total_samples += len(data_locations[i])
temp[key] = len(data_locations[i])
data_distribution_map[i] = temp
# calculate absolute rest distribution map
rest_data_distribution_map = {}
for key, values in data_distribution_map.items():
temp = {}
for k, v in values.items():
temp[k] = math.ceil((v / total_samples) * half_length)
rest_data_distribution_map[key] = temp
# actually do the random sampling
rest_frameid_map = {}
random.seed(333)
for key, values in rest_data_distribution_map.items():
temp = {}
for k, v in values.items():
temp[k] = random.sample(data_locations_dict[k][key], v)
# check if sample is not a positive sample, if so replace it
for idx, value in enumerate(temp[k]):
if value in data_locations_dict[k][character_id]:
tmp_fnr = random.sample(data_locations_dict[k][key], 1)[0]
while tmp_fnr in data_locations_dict[k][character_id] or tmp_fnr in temp[k]:
tmp_fnr = random.sample(data_locations_dict[k][key], 1)[0]
temp[k][idx] = tmp_fnr
rest_frameid_map[key] = temp
extract_ground_truth(character_location_map, rest_frameid_map, character_id,
ground_truth_files_base_path + sub_path)
def parse_ground_truth_txt_files(ground_truth_files):
"""
The aim of this method is to parse the ground truth from corresponding text files.
:param ground_truth_files: a list of ground truth text file paths
:return: a dictionary representing the ground truth locations
"""
parsed_ground_truth = {}
for filename in ground_truth_files:
gt = {}
with open(filename, 'r') as f:
for i, line in enumerate(f):
str_parts = line.strip().split(', ')
parts = [int(p) for p in str_parts]
for part in parts[1:]:
try:
gt[part].append(parts[0])
except KeyError:
gt[part] = [parts[0]]
parsed_ground_truth[filename.split('/')[-1]] = gt
return parsed_ground_truth
def create_mfcc_audio_dataset(audio_path, frame_length_ms, n_mfcc, output_file):
# extract counts for snippets with and without given character
total_no_audios = len(glob.glob(audio_path + '*.wav'))
print('Total number of audio snippets: %d' % total_no_audios)
print('Window size: %d ms' % frame_length_ms)
print('Number of MFCC features: %d' % n_mfcc)
print('Extracting MFCC features for audio data...')
# define fft window and sliding window factors based on given frame length
mfcc_n_fft_factor = frame_length_ms / 1000 # window factor
mfcc_hop_length_factor = mfcc_n_fft_factor * 0.5 # sliding window factor, note that this must be an int
# extract MFCC features for all audio files
mfcc_audio_data = {}
for audio_file in glob.glob(audio_path + '*.wav'):
# extract file id and character id
filename = audio_file.split('/')[-1]
file_char_id = filename.split('_')[0][-1] + '_' + filename.split('_')[1]
raw_data, sample_rate = librosa.load(audio_file)
mfccs = librosa.feature.mfcc(y=raw_data, sr=sample_rate, n_mfcc=n_mfcc,
hop_length=int(mfcc_hop_length_factor * sample_rate),
n_fft=int(mfcc_n_fft_factor * sample_rate)).T
try:
mfcc_audio_data[file_char_id].append(mfccs)
except KeyError:
mfcc_audio_data[file_char_id] = [mfccs]
# write calculated MFCCs to file
print('Write extracted MFCCs to file: %s' % output_file)
with open(output_file, 'w') as f:
for key, values in mfcc_audio_data.items():
file_id = key.split('_')[0]
char_id = key.split('_')[1]
for mfcc_array in values:
for mfcc_values in mfcc_array:
list_as_string = ','.join([str(mfcc_values[i]) for i in range(0, mfcc_array.shape[1])])
f.write('%s, %s, %s\n' % (file_id, char_id, list_as_string))
def random_sample_mfcc(target_character_id, mfcc_file):
# read the mfcc features from file
print('Read MFCC features for random sampling...')
mfcc_data_all = {0: {}, 1: {}, 2: {}, 3: {}, 4: {}}
total_number_of_samples = 0
no_positive_samples = 0
with open(mfcc_file, 'r') as f:
for i, line in enumerate(f):
parts = line.split(',')
file_id = int(parts[0].strip())
char_id = int(parts[1].strip())
mfcc_coeffs = [float(parts[i].strip()) for i in range(2, len(parts))]
if char_id == target_character_id:
no_positive_samples += 1
try:
mfcc_data_all[char_id][file_id].append(mfcc_coeffs)
except KeyError:
mfcc_data_all[char_id][file_id] = [mfcc_coeffs]
total_number_of_samples += 1
# exract the number of sample present for target character
print('Number of samples for target class %d: %d' % (target_character_id, no_positive_samples))
# calculate data distribution
print('Create data distribution map...')
data_distribution_map = {0: {}, 1: {}, 2: {}, 3: {}, 4: {}}
no_rest_samples = total_number_of_samples - no_positive_samples
for char_id, value in mfcc_data_all.items():
if char_id != target_character_id:
for file_id, mfccs in value.items():
data_distribution_map[char_id][file_id] = math.ceil(
(len(mfccs) / no_rest_samples) * no_positive_samples)
# add positive samples to resulting dataset
dataset = []
for char_id, value in mfcc_data_all.items():
if char_id == target_character_id:
for file_id, mfccs in value.items():
dataset += [(1, file_id, mfcc) for mfcc in mfccs]
# randomly sample the negative samples according to data distribution
random.seed(333)
for char_id, value in data_distribution_map.items():
for file_id, k in value.items():
dataset += [(0, file_id, mfcc) for mfcc in random.sample(mfcc_data_all[char_id][file_id], k)]
print('Successfully extracted MFCC feature dataset for character: %d' % target_character_id)
return dataset
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def random_sample_multi_mfcc(target_character_id, mfcc_file, audio_path, frame_length_ms, n_mfcc, mfcc_sequence_len):
# extract counts for snippets with and without given character
total_no_audios = len(glob.glob(audio_path + '*.wav'))
print('Total number of audio snippets: %d' % total_no_audios)
print('Window size: %d ms' % frame_length_ms)
print('Number of MFCC features: %d' % n_mfcc)
print('Extracting MFCC features for audio data...')
# define fft window and sliding window factors based on given frame length
mfcc_n_fft_factor = frame_length_ms / 1000 # window factor
mfcc_hop_length_factor = mfcc_n_fft_factor * 0.5 # sliding window factor, note that this must be an int
total_number_of_samples = 0
# extract MFCC features for all audio files
mfcc_audio_data = {}
for audio_file in glob.glob(audio_path + '*.wav'):
# extract file id and character id
filename = audio_file.split('/')[-1]
#file_char_id = filename.split('_')[0][-1] + '_' + filename.split('_')[1]
character_id = int(filename.split('_')[1])
raw_data, sample_rate = librosa.load(audio_file)
mfccs = librosa.feature.mfcc(y=raw_data, sr=sample_rate, n_mfcc=n_mfcc,
hop_length=int(mfcc_hop_length_factor * sample_rate),
n_fft=int(mfcc_n_fft_factor * sample_rate)).T
for mfcc_sequence in chunks(mfccs, mfcc_sequence_len):
total_number_of_samples += 1
try:
mfcc_audio_data[character_id].append(mfcc_sequence)
except KeyError:
mfcc_audio_data[character_id] = [mfcc_sequence]
no_positive_samples = len(mfcc_audio_data[target_character_id])
# exract the number of sample present for target character
print('Number of samples for target class %d: %d' % (target_character_id, no_positive_samples))
# calculate data distribution
print('Create data distribution map...')
data_distribution_map = {}
no_rest_samples = total_number_of_samples - no_positive_samples
for char_id, mfccs in mfcc_audio_data.items():
if char_id != target_character_id:
data_distribution_map[char_id] = math.ceil(
(len(mfccs) / no_rest_samples) * no_positive_samples)
# add positive samples to resulting dataset
dataset = []
for char_id, value in mfcc_audio_data.items():
if char_id == target_character_id:
for mfccs in value:
dataset.append([1, mfccs])
print(data_distribution_map)
# randomly sample the negative samples according to data distribution
random.seed(333)
for char_id, k in data_distribution_map.items():
dataset.extend([[0, mfcc] for mfcc in random.sample(mfcc_audio_data[char_id], k)])
print('Successfully extracted MFCC feature dataset for character: %d' % target_character_id)
return dataset
def get_waldorf_statler_mfcc_features(frame_length_ms, n_mfcc):
Path('../../ground_truth/audio/').mkdir(parents=True, exist_ok=True)
# | |
#!/usr/bin/env python
# coding=UTF-8
# Copyright (c) 2011, <NAME>, Inc.
# All rights reserved.
#
# Developer : <NAME> , <EMAIL> , on 2020-02-04
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
## import library
import numpy as np
import time , struct ,binascii ,math
from serial import Serial , SerialException
class smart_robotV12():
def __init__(self,port,baud):
## setup connected parameter
self.param = {}
self.param["device_port"] = port
self.param["baudrate"] = baud
self.imu_decode = {"accel":[0,0,0] , "gyro":[0,0,0]}
self.imu_decode = {"accel":[0,0,0] , "gyro":[0,0,0]}
self.odom_decode = [0,0,0]
self.odom_seq = 0
self.cmd_decode = [0 ,0 ,0]
self.cmd_seq = 0
self.connected = False
self.start = False
self.clamp = lambda n, minn, maxn: max(min(maxn, n), minn)
def connect(self):
print("Try to connect the Smart Robot")
try:
self.device = Serial(self.param["device_port"] , self.param["baudrate"] )
self.connected = True
print("Connect done!!")
except:
print("Error! Please check smart robot.")
## close port
def disconnect(self):
if self.connected == True:
self.device.close()
self.connected = False
## Start smartbot and choose " 0 :omnibot" , 1: normal motor without encoder , 2: normal motor with encoder , 3 :Mecanum"
def set_mode(self, vehicle):
start_cmd = bytearray(b'\xFF\xFE')
start_cmd.append(0x80)
start_cmd.append(0x80)
start_cmd.append(0x09)
start_cmd += struct.pack('>h',0 ) # 2-bytes , reserved bit
# 2-bytes , first is mode set ; second is vehicle of robot , 0 for omnibot , 1 for Mecanum
start_cmd += struct.pack('>h',vehicle)
start_cmd.append(0x00) # 1-bytes , reserved bit
#debug
print("You set : {} ".format(binascii.hexlify(start_cmd)))
#print("Please Wait for 5 seconds...")
#time.sleep(5)
if self.connected == True:
self.device.write(start_cmd)
time.sleep(0.5)
# send vel_cmd[Vx,Vy,Vz]
def vel(self, veh_cmd):
speed = bytearray(b'\xFF\xFE')
speed.append(0x01)
speed += struct.pack('>h',self.clamp( abs(veh_cmd[1]), 0, 65536 ) ) # 2-bytes , velocity for x axis
speed += struct.pack('>h',self.clamp( abs(veh_cmd[0]), 0, 65536 )) # 2-bytes , velocity for y axis
speed += struct.pack('>h',self.clamp( abs(veh_cmd[2]), 0, 65536 )) # 2-bytes , velocity for z axis
# set direction
direction_x = 0
direction_y = 0
direction_z = 0
if veh_cmd[0] >= 0 :
direction_y = 0
else :
direction_y = math.pow(2,1)
if veh_cmd[2] >= 0 :
direction_z = math.pow(2,0)
else :
direction_z = 0
direction = direction_x + direction_y + direction_z
# 1-bytes , direction for x(bit2) ,y(bit1) ,z(bit0) ,and 0 : normal , 1 : reverse
speed += struct.pack('>b',direction)
# debug
print(binascii.hexlify(speed))
if self.connected == True:
self.device.write(speed)
print("Direction: {}".format(direction))
# send TT motor vel_cmd
def TT_motor(self, veh_cmd):
speed = bytearray(b'\xFF\xFE')
speed.append(0x01)
speed += struct.pack('>h',0) # 2-bytes , reserved bit
speed += struct.pack('>h',self.clamp( abs(veh_cmd[1]), 0, 65536 )) # 2-bytes , velocity for y axis
speed += struct.pack('>h',self.clamp( abs(veh_cmd[2]), 0, 65536 )) # 2-bytes , velocity for z axis
if veh_cmd[2] >= 0:
direction_z = 0
else:
direction_z = 1
if veh_cmd[1] >= 0 :
direction_y = 0
else:
direction_y = 2
direction = direction_y + direction_z
print("Direction : {}".format(direction))
# 1-bytes , direction for x(bit2) ,y(bit1) ,z(bit0) ,and 0 : normal , 1 : reverse
speed += struct.pack('>b',direction)
# debug
print(binascii.hexlify(speed))
if self.connected == True:
self.device.write(speed)
print("Direction: {}".format(direction))
def TT_motor_function(self, veh_cmd):
self.param["motor_axis_width"] = 0.13 #(m)
self.param["motor_axis_length"] = 0.14 #(m)
self.param["turn_radius"] = 0.2 # (m)
# under parameter use turning right
self.param["V4_turn_radius"] = self.param["turn_radius"] - ( self.param["motor_axis_width"] / 2 )
self.param["V3_turn_radius"] = self.param["turn_radius"] + ( self.param["motor_axis_width"] / 2 )
self.param["V2_turn_radius"] = math.pow( math.pow(self.param["V4_turn_radius"],2) + math.pow(self.param["motor_axis_length"],2) ,0.5)
self.param["V1_turn_radius"] = math.pow( math.pow(self.param["V3_turn_radius"],2) + math.pow(self.param["motor_axis_length"],2) ,0.5)
self.param["velocity_function_Denominator"] = self.param["V1_turn_radius"] + self.param["V2_turn_radius"] + 2 * self.param["turn_radius"]
self.param["turn_angle"] = math.atan(self.param["motor_axis_length"] / self.param["turn_radius"])
max_speed = 10000
min_speed = 0
speed = bytearray(b'\xFF\xFE')
speed.append(0x02)
# V1 = left_front , V2 = right_front , V3 = left_back , V4 = right_back
if veh_cmd[1] >= 0 :
reverse = math.pow(2,3) + math.pow(2,1)
if veh_cmd[2] == 0: # go forward and do not turn
speed += struct.pack('>h',self.clamp( abs(veh_cmd[1]), min_speed, max_speed )) # 2-bytes , velocity for V1
speed += struct.pack('>h',self.clamp( abs(veh_cmd[1]), min_speed, max_speed )) # 2-bytes , velocity for V2
speed += struct.pack('>h',self.clamp( abs(veh_cmd[1]), min_speed, max_speed )) # 2-bytes , velocity for V3
speed += struct.pack('>h',self.clamp( abs(veh_cmd[1]), min_speed, max_speed )) # 2-bytes , velocity for V4
else:
if veh_cmd[2] < 0: # go forward and turn right
V4 = self.clamp(int(abs( ( self.param["V4_turn_radius"] / self.param["velocity_function_Denominator"] ) * veh_cmd[2] * math.cos(self.param["turn_angle"]) )) + abs(veh_cmd[1]), min_speed , 4500 )
V3 = self.clamp(int(abs( ( self.param["V3_turn_radius"] / self.param["velocity_function_Denominator"] ) * veh_cmd[2] * math.cos(self.param["turn_angle"]) )) + abs(veh_cmd[1]), min_speed , 8900 )
V2 = self.clamp(int(abs( ( self.param["V2_turn_radius"] / self.param["velocity_function_Denominator"] ) * veh_cmd[2] * math.cos(self.param["turn_angle"]) )) + abs(veh_cmd[1]), min_speed , 6500 )
V1 = self.clamp(int(abs( ( self.param["V1_turn_radius"] / self.param["velocity_function_Denominator"] ) * veh_cmd[2] * math.cos(self.param["turn_angle"]) )) + abs(veh_cmd[1]), min_speed , max_speed )
speed += struct.pack('>h',V1) # 2-bytes , velocity for V1
speed += struct.pack('>h',V2) # 2-bytes , velocity for V2
speed += struct.pack('>h',V3) # 2-bytes , velocity for V3
speed += struct.pack('>h',V4) # 2-bytes , velocity for V4
else: # go back and turn left
V4 = self.clamp(int(abs( ( self.param["V3_turn_radius"] / self.param["velocity_function_Denominator"] ) * veh_cmd[2] * math.cos(self.param["turn_angle"]) )) + abs(veh_cmd[1]), min_speed , 4500 )
V3 = self.clamp(int(abs( ( self.param["V4_turn_radius"] / self.param["velocity_function_Denominator"] ) * veh_cmd[2] * math.cos(self.param["turn_angle"]) )) + abs(veh_cmd[1]), min_speed , 8900 )
V2 = self.clamp(int(abs( ( self.param["V1_turn_radius"] / self.param["velocity_function_Denominator"] ) * veh_cmd[2] * math.cos(self.param["turn_angle"]) )) + abs(veh_cmd[1]), min_speed , 6500 )
V1 = self.clamp(int(abs( ( self.param["V2_turn_radius"] / self.param["velocity_function_Denominator"] ) * veh_cmd[2] * math.cos(self.param["turn_angle"]) )) + abs(veh_cmd[1]), min_speed , max_speed )
speed += struct.pack('>h',self.clamp( V1, min_speed, max_speed )) # 2-bytes , velocity for V1
speed += struct.pack('>h',self.clamp( V2, min_speed, max_speed )) # 2-bytes , velocity for V2
speed += struct.pack('>h',self.clamp( V3, min_speed, max_speed )) # 2-bytes , velocity for V3
speed += struct.pack('>h',self.clamp( V4, min_speed, max_speed )) # 2-bytes , velocity for V4
print(" left_front: {} , right_front: {} , left_back: {} , right_back: {} ".format(V1,V2,V3,V4) )
else:
reverse = math.pow(2,2) + math.pow(2,0)
if veh_cmd[2] == 0: # go back and do not turn
speed += struct.pack('>h',self.clamp( abs(veh_cmd[1]), min_speed, max_speed )) # 2-bytes , velocity for V1
speed += struct.pack('>h',self.clamp( abs(veh_cmd[1]), min_speed, max_speed )) # 2-bytes , velocity for V2
speed += struct.pack('>h',self.clamp( abs(veh_cmd[1]), min_speed, max_speed )) # 2-bytes , velocity for V3
speed += struct.pack('>h',self.clamp( abs(veh_cmd[1]), min_speed, max_speed )) # 2-bytes , velocity for V4
else:
if veh_cmd[2] < 0: # go back and turn right
V4 = self.clamp(int(abs( ( self.param["V4_turn_radius"] / self.param["velocity_function_Denominator"] ) * veh_cmd[2] * math.cos(self.param["turn_angle"]) )) + abs(veh_cmd[1]), min_speed , 4500 )
V3 = self.clamp(int(abs( ( self.param["V3_turn_radius"] / self.param["velocity_function_Denominator"] ) * veh_cmd[2] * math.cos(self.param["turn_angle"]) )) + abs(veh_cmd[1]), min_speed , 8900 )
V2 = self.clamp(int(abs( ( self.param["V2_turn_radius"] / self.param["velocity_function_Denominator"] ) * veh_cmd[2] * math.cos(self.param["turn_angle"]) )) + abs(veh_cmd[1]), min_speed , 6500 )
V1 = self.clamp(int(abs( ( self.param["V1_turn_radius"] / self.param["velocity_function_Denominator"] | |
import numpy as np
from collections import defaultdict
from my_cocoeval import mask as maskUtils
from terminaltables import AsciiTable
import matplotlib.pyplot as plt
import os
import pdb
NAMES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat',
'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle',
'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
class SelfEval:
def __init__(self, cocoGt, cocoDt, all_points=False):
self.gt = defaultdict(list)
self.dt = defaultdict(list)
self.all_points = all_points
# np.arange and np.linspace can not get the accurate number, e.g. 0.8500000000000003 and 0.8999999999
self.iou_thre = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
self.recall_points = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
self.max_det = 100
self.area = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.area_name = ['all', 'small', 'medium', 'large']
self.imgIds = list(np.unique(cocoGt.getImgIds()))
self.catIds = list(np.unique(cocoGt.getCatIds()))
gts = cocoGt.loadAnns(cocoGt.getAnnIds(imgIds=self.imgIds, catIds=self.catIds))
dts = cocoDt.loadAnns(cocoDt.getAnnIds(imgIds=self.imgIds, catIds=self.catIds))
self.C, self.A, self.T, self.N = len(self.catIds), len(self.area), len(self.iou_thre), len(self.imgIds)
# key is a tuple (gt['image_id'], gt['category_id']), value is a list.
for gt in gts:
# if gt['iscrowd'] == 0: # TODO: why this makes the result different
self.gt[gt['image_id'], gt['category_id']].append(gt)
for dt in dts:
self.dt[dt['image_id'], dt['category_id']].append(dt)
def evaluate(self):
self.match_record = [[['no_gt_no_dt' for _ in range(self.N)] for _ in range(self.A)] for _ in range(self.C)]
for c, cat_id in enumerate(self.catIds):
for a, area in enumerate(self.area):
for n, img_id in enumerate(self.imgIds):
print(f'\rMatching ground-truths and detections: C: {c}, A: {a}, N: {n}', end='')
gt_list, dt_list = self.gt[img_id, cat_id], self.dt[img_id, cat_id]
if len(gt_list) == 0 and len(dt_list) == 0:
continue
elif len(gt_list) != 0 and len(dt_list) == 0:
for one_gt in gt_list:
if one_gt['iscrowd'] or one_gt['area'] < area[0] or one_gt['area'] > area[1]:
one_gt['_ignore'] = 1
else:
one_gt['_ignore'] = 0
# sort ignored gt to last
index = np.argsort([aa['_ignore'] for aa in gt_list], kind='mergesort')
gt_list = [gt_list[i] for i in index]
gt_ignore = np.array([aa['_ignore'] for aa in gt_list])
num_gt = np.count_nonzero(gt_ignore == 0)
self.match_record[c][a][n] = {'has_gt_no_dt': 'pass', 'num_gt': num_gt}
else:
# different sorting method generates slightly different results.
# 'mergesort' is used to be consistent as the COCO Matlab implementation.
index = np.argsort([-aa['score'] for aa in dt_list], kind='mergesort')
dt_list = [dt_list[i] for i in index]
dt_list = dt_list[0: self.max_det] # if len(one_dt) < self.max_det, no influence
if len(gt_list) == 0 and len(dt_list) != 0:
dt_matched = np.zeros((self.T, len(dt_list))) # all dt shoule be fp, so set as 0
# set unmatched detections which are outside of area range to ignore
dt_out_range = [aa['area'] < area[0] or aa['area'] > area[1] for aa in dt_list]
dt_ignore = np.repeat(np.array(dt_out_range)[None, :], repeats=self.T, axis=0)
num_gt = 0
else:
for one_gt in gt_list:
if one_gt['iscrowd'] or one_gt['area'] < area[0] or one_gt['area'] > area[1]:
one_gt['_ignore'] = 1
else:
one_gt['_ignore'] = 0
# sort ignored gt to last
index = np.argsort([aa['_ignore'] for aa in gt_list], kind='mergesort')
gt_list = [gt_list[i] for i in index]
gt_matched = np.zeros((self.T, len(gt_list)))
gt_ignore = np.array([aa['_ignore'] for aa in gt_list])
dt_matched = np.zeros((self.T, len(dt_list)))
dt_ignore = np.zeros((self.T, len(dt_list)))
box_gt = [aa['bbox'] for aa in gt_list]
box_dt = [aa['bbox'] for aa in dt_list]
iscrowd = [int(aa['iscrowd']) for aa in gt_list]
IoUs = maskUtils.iou(box_dt, box_gt, iscrowd) # shape: (num_dt, num_gt)
assert len(IoUs) != 0, 'Bug, IoU should not be None when gt and dt are both not empty.'
for t, one_thre in enumerate(self.iou_thre):
for d, one_dt in enumerate(dt_list):
iou = one_thre
g_temp = -1
for g in range(len(gt_list)):
# if this gt already matched, and not a crowd, continue
if gt_matched[t, g] > 0 and not iscrowd[g]:
continue
# if dt matched a ignore gt, break, because all the ignore gts are at last
if g_temp > -1 and gt_ignore[g_temp] == 0 and gt_ignore[g] == 1:
break
# continue to next gt unless better match made
if IoUs[d, g] < iou:
continue
# if match successful and best so far, store appropriately
iou = IoUs[d, g]
g_temp = g
# if match made store id of match for both dt and gt
if g_temp == -1:
continue
dt_ignore[t, d] = gt_ignore[g_temp]
dt_matched[t, d] = gt_list[g_temp]['id']
gt_matched[t, g_temp] = one_dt['id']
dt_out_range = [aa['area'] < area[0] or aa['area'] > area[1] for aa in dt_list]
dt_out_range = np.repeat(np.array(dt_out_range)[None, :], repeats=self.T, axis=0)
dt_out_range = np.logical_and(dt_matched == 0, dt_out_range)
dt_ignore = np.logical_or(dt_ignore, dt_out_range)
num_gt = np.count_nonzero(gt_ignore == 0)
self.match_record[c][a][n] = {'dt_match': dt_matched,
'dt_score': [aa['score'] for aa in dt_list],
'dt_ignore': dt_ignore,
'num_gt': num_gt}
def accumulate(self): # self.match_record is all this function need
print('\nComputing recalls and precisions...')
R = len(self.recall_points)
self.p_record = [[[None for _ in range(self.T)] for _ in range(self.A)] for _ in range(self.C)]
self.r_record = [[[None for _ in range(self.T)] for _ in range(self.A)] for _ in range(self.C)]
self.s_record = [[[None for _ in range(self.T)] for _ in range(self.A)] for _ in range(self.C)]
# TODO: check if the logic is right, especially when there are absent categories when evaling part of images
for c in range(self.C):
for a in range(self.A):
temp_dets = self.match_record[c][a]
temp_dets = [aa for aa in temp_dets if aa is not 'no_gt_no_dt']
num_gt = sum([aa['num_gt'] for aa in temp_dets])
assert num_gt != 0, f'Error, category {NAMES[c]} does not exist in validation images.'
# exclude images which have no dt
temp_dets = [aa for aa in temp_dets if 'has_gt_no_dt' not in aa]
if len(temp_dets) == 0: # if no detection found for all validation images
# If continue directly, the realted record would be 'None',
# which is excluded when computing mAP in summarize().
for t in range(self.T):
self.p_record[c][a][t] = np.array([0.])
self.r_record[c][a][t] = np.array([0.])
self.s_record[c][a][t] = np.array([0.])
continue
scores = np.concatenate([aa['dt_score'] for aa in temp_dets])
index = np.argsort(-scores, kind='mergesort')
score_sorted = scores[index]
dt_matched = np.concatenate([aa['dt_match'] for aa in temp_dets], axis=1)[:, index]
dt_ignore = np.concatenate([aa['dt_ignore'] for aa in temp_dets], axis=1)[:, index]
tps = np.logical_and(dt_matched, np.logical_not(dt_ignore)) # shape: (thre_num, dt_num)
fps = np.logical_and(np.logical_not(dt_matched), np.logical_not(dt_ignore))
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
recall = (tp / num_gt).tolist()
precision = (tp / (fp + tp + np.spacing(1))).tolist()
# numpy is slow without cython optimization for accessing elements
# use python list can get significant speed improvement
p_smooth = precision.copy()
for i in range(len(tp) - 1, 0, -1):
if p_smooth[i] > p_smooth[i - 1]:
p_smooth[i - 1] = p_smooth[i]
if self.all_points:
p_reduced, s_reduced = [], []
r_reduced = list(set(recall))
r_reduced.sort()
for one_r in r_reduced:
index = recall.index(one_r) # the first precision w.r.t the recall is always the highest
p_reduced.append(p_smooth[index])
s_reduced.append(score_sorted[index])
stair_h, stair_w, stair_s = [], [], []
for i in range(len(p_reduced)): # get the falling edge of the stairs
if (i != len(p_reduced) - 1) and (p_reduced[i] > p_reduced[i + 1]):
stair_h.append(p_reduced[i])
stair_w.append(r_reduced[i])
stair_s.append(s_reduced[i])
stair_h.append(p_reduced[-1]) # add the final point which is out of range in the above loop
stair_w.append(r_reduced[-1])
stair_s.append(s_reduced[-1])
stair_w.insert(0, 0.) # insert 0. at index 0 to do np.diff()
stair_w = np.diff(stair_w)
self.p_record[c][a][t] = np.array(stair_h)
self.r_record[c][a][t] = np.array(stair_w)
self.s_record[c][a][t] = np.array(stair_s)
else:
index = np.searchsorted(recall, self.recall_points, side='left')
score_101, precision_101 = np.zeros((R,)), np.zeros((R,))
# if recall is < 1.0, then there will always be some points out of the recall range,
# so use try...except... to deal with it automatically.
try:
for ri, pi in enumerate(index):
precision_101[ri] = p_smooth[pi]
score_101[ri] = score_sorted[pi]
except:
pass
self.p_record[c][a][t] = precision_101
num_points = len(precision_101)
# COCO's ap = mean of the 101 precision points, I use this way to keep the code compatibility,
# so the width of the stair is | |
channel_name:
results.append(gspaths.Build(gspaths.Build(version=nmo_version,
board=self._build.board,
channel=self._build.channel,
bucket=self._build.bucket)))
return results
def _DiscoverRequiredFullPayloads(self, images):
"""Find the Payload objects for the images from the current build.
In practice, this creates a full payload definition for every image passed
in.
Args:
images: The images for the current build.
Returns:
A list of gspaths.Payload objects for full payloads for every image.
"""
return [gspaths.Payload(tgt_image=i) for i in images]
def _DiscoverRequiredLoopbackDelta(self, images):
"""Find the delta from an image to itself.
To test our ability to update away from a given image, we generate a delta
from itself. to itself and ensure we can apply successfully.
Args:
images: The key-filtered images for the current build.
Returns:
A list of gspaths.Payload objects for the deltas needed from the previous
builds, which may be empty.
"""
# If we have no images to delta to, no results.
if not images:
return []
# After filtering for MP/PREMP, there can be only one!
assert len(images) == 1, 'Unexpected images found %s.' % images
image = images[0]
return [gspaths.Payload(tgt_image=image, src_image=image)]
def _DiscoverRequiredFromPreviousDeltas(self, images, previous_images):
"""Find the deltas from previous builds.
All arguements should already be filtered to be all MP or all PREMP.
Args:
images: The key-filtered images for the current build.
previous_images: The key-filtered images from previous builds from
which delta payloads should be generated.
Returns:
A list of gspaths.Payload objects for the deltas needed from the previous
builds, which may be empty.
"""
# If we have no images to delta to, no results.
if not images:
return []
# After filtering for MP/PREMP, there can be only one!
assert len(images) == 1, 'Unexpected images found %s.' % images
image = images[0]
# Filter artifacts that have the same |image_type| as that of |image|.
previous_images_by_type = _FilterForImageType(previous_images,
image.image_type)
results = []
# We should never generate downgrades, they are unsafe. Deltas to the
# same images are useless. Neither case normally happens unless
# we are re-generating payloads for old builds.
for prev in previous_images_by_type:
if gspaths.VersionGreater(image.version, prev.version):
# A delta from each previous image to current image.
results.append(gspaths.Payload(tgt_image=image, src_image=prev))
else:
logging.info('Skipping %s is not older than target', prev)
return results
def _DiscoverRequiredPayloads(self):
"""Find the payload definitions for the current build.
This method finds the images for the current build, and for all builds we
need deltas from, and decides what payloads are needed.
IMPORTANT: The order in which payloads are listed is significant as it
reflects on the payload generation order. The current way is to list test
payloads last, as they are of lesser importance from the release process
standpoint, and may incur failures that do not affect the signed payloads
and may be otherwise detrimental to the release schedule.
Returns:
A PayloadManager instance.
Raises:
BuildNotReady: If the current build doesn't seem to have all of it's
images available yet. This commonly happens because the signer hasn't
finished signing the current build.
BuildCorrupt: If current or previous builds have unexpected images.
ImageMissing: Raised if expected images are missing for previous builds.
"""
images = []
previous_images = []
fsi_images = []
payload_manager = PayloadManager()
try:
# When discovering the images for our current build, they might not be
# discoverable right away (GS eventual consistency). So, we retry.
images = retry_util.RetryException(ImageMissing, 3,
self._DiscoverImages, self._build,
sleep=self.BUILD_DISCOVER_RETRY_SLEEP)
images += self._DiscoverTestImageArchives(self._build)
except ImageMissing as e:
# If the main build doesn't have the final build images, then it's
# not ready.
logging.info(e)
raise BuildNotReady()
_LogList('Images found', images)
# Discover and filter active FSI builds.
fsi_builds = self._DiscoverFsiBuildsForDeltas()
if fsi_builds:
_LogList('Active FSI builds considered', fsi_builds)
else:
logging.info('No active FSI builds found')
for fsi in fsi_builds:
fsi_images += self._DiscoverImages(fsi)
fsi_images += self._DiscoverTestImageArchives(fsi)
fsi_images = _FilterForBasic(fsi_images) + _FilterForTest(fsi_images)
# Discover previous, non-FSI, builds that we also must generate deltas for.
previous_builds = [b for b in self._DiscoverNmoBuild()
if b not in fsi_builds]
if previous_builds:
_LogList('Previous, non-FSI, builds considered', previous_builds)
else:
logging.info('No other previous builds found')
# Discover and filter previous images.
for p in previous_builds:
try:
previous_images += self._DiscoverImages(p)
except ImageMissing as e:
# Temporarily allow generation of delta payloads to fail because of
# a missing previous build until crbug.com/243916 is addressed.
# TODO(mtennant): Remove this when bug is fixed properly.
logging.warning('Previous build image is missing, skipping: %s', e)
# In this case, we should also skip test image discovery; since no
# signed deltas will be generated from this build, we don't need to
# generate test deltas from it.
continue
previous_images += self._DiscoverTestImageArchives(p)
previous_images = (
_FilterForBasic(previous_images) + _FilterForTest(previous_images))
# Full payloads for the current build.
payload_manager.Add(
['full'],
self._DiscoverRequiredFullPayloads(_FilterForImages(images)))
# Full payloads for previous builds.
payload_manager.Add(
['full', 'previous'],
self._DiscoverRequiredFullPayloads(_FilterForImages(previous_images)))
# Discover delta payloads.
skip_deltas = self._skip_delta_payloads
# Deltas for previous -> current (pre-MP and MP).
delta_previous_labels = ['delta', 'previous']
payload_manager.Add(
delta_previous_labels,
self._DiscoverRequiredFromPreviousDeltas(
_FilterForPremp(_FilterForBasic(images)),
_FilterForPremp(previous_images)),
skip=skip_deltas)
payload_manager.Add(
delta_previous_labels,
self._DiscoverRequiredFromPreviousDeltas(
_FilterForMp(_FilterForBasic(images)),
_FilterForMp(previous_images)),
skip=skip_deltas)
# Deltas for fsi -> current (pre-MP and MP).
delta_fsi_labels = ['delta', 'fsi']
payload_manager.Add(
delta_fsi_labels,
self._DiscoverRequiredFromPreviousDeltas(
_FilterForPremp(_FilterForBasic(images)),
_FilterForPremp(fsi_images)),
skip=skip_deltas)
payload_manager.Add(
delta_fsi_labels,
self._DiscoverRequiredFromPreviousDeltas(
_FilterForMp(_FilterForBasic(images)),
_FilterForMp(fsi_images)),
skip=skip_deltas)
# Discover test payloads if Autotest is not disabled.
if self._control_dir:
skip_test_deltas = self._skip_delta_payloads
# Full test payloads.
payload_manager.Add(
['test', 'full'],
self._DiscoverRequiredFullPayloads(_FilterForTest(images)))
# Full previous payloads.
payload_manager.Add(
['test', 'full', 'previous'],
self._DiscoverRequiredFullPayloads(_FilterForTest(previous_images)))
# Deltas for current -> current (for testing update away).
payload_manager.Add(
['test', 'delta', 'n2n'],
self._DiscoverRequiredLoopbackDelta(_FilterForTest(images)),
skip=skip_test_deltas)
# Deltas for previous -> current (test payloads).
payload_manager.Add(
['test', 'delta', 'previous'],
self._DiscoverRequiredFromPreviousDeltas(
_FilterForTest(images), _FilterForTest(previous_images)),
skip=skip_test_deltas)
# Deltas for fsi -> current (test payloads).
payload_manager.Add(
['test', 'delta', 'fsi'],
self._DiscoverRequiredFromPreviousDeltas(
_FilterForTest(images), _FilterForTest(fsi_images)),
skip=skip_test_deltas)
# Set the payload URIs.
for p in payload_manager.Get([]):
paygen_payload_lib.FillInPayloadUri(p)
return payload_manager
def _GeneratePayloads(self, payloads, lock=None):
"""Generate the payloads called for by a list of payload definitions.
It will keep going, even if there is a failure.
Args:
payloads: gspath.Payload objects defining all of the payloads to generate.
lock: gslock protecting this paygen_build run.
Raises:
Any arbitrary exception raised by CreateAndUploadPayload.
"""
payloads_args = [(payload,
self._work_dir,
isinstance(payload.tgt_image, gspaths.Image),
bool(self._drm))
for payload in payloads]
if self._run_parallel:
parallel.RunTasksInProcessPool(_GenerateSinglePayload, payloads_args)
else:
for args in payloads_args:
_GenerateSinglePayload(*args)
# This can raise LockNotAcquired, if the lock timed out during a
# single payload generation.
if lock:
lock.Renew()
def _FindFullTestPayloads(self, channel, version):
"""Returns a list of full test payloads for a given version.
Uses the current build's board and bucket values. This method caches the
full test payloads previously discovered as we may be using them for
multiple tests in a single run.
Args:
channel: Channel to look in for payload.
version: A build version whose payloads to look for.
Returns:
A (possibly empty) list of payload URIs.
"""
assert channel
assert version
if (channel, version) in self._version_to_full_test_payloads:
# Serve from cache, if possible.
return self._version_to_full_test_payloads[(channel, version)]
payload_search_uri = gspaths.ChromeosReleases.PayloadUri(
channel, self._build.board, version, '*',
bucket=self._build.bucket)
payload_candidate = urilib.ListFiles(payload_search_uri)
# We create related files for each payload that have the payload name
# plus these extensions. Skip these files.
NOT_PAYLOAD = ('.json', '.log')
full_test_payloads = [u for u in payload_candidate
if not any([u.endswith(n) for n in NOT_PAYLOAD])]
# Store in cache.
self._version_to_full_test_payloads[(channel, version)] = full_test_payloads
return full_test_payloads
def _EmitControlFile(self, payload_test, suite_name, control_dump_dir):
"""Emit an Autotest control file for a given payload test."""
# Figure out the source version for the test.
payload = payload_test.payload
src_version = payload_test.src_version
src_channel = payload_test.src_channel
# Discover the full test payload that corresponds to the source version.
src_payload_uri_list = self._FindFullTestPayloads(src_channel, src_version)
if not src_payload_uri_list:
logging.error('Cannot find full test payload for source version (%s), '
'control file not generated', src_version)
raise PayloadTestError('cannot find source payload for testing %s' %
payload)
if len(src_payload_uri_list) != 1:
logging.error('Found multiple (%d) full test payloads for source version '
'(%s), control file not generated:\n%s',
len(src_payload_uri_list), src_version,
'\n'.join(src_payload_uri_list))
raise PayloadTestError('multiple source payloads found for testing %s' %
payload)
src_payload_uri = src_payload_uri_list[0]
logging.info('Source full test payload found at %s', src_payload_uri)
release_archive_uri = gspaths.ChromeosReleases.BuildUri(
src_channel, self._build.board, src_version)
# TODO(dgarrett): Remove if block after finishing crbug.com/523122
stateful_uri = os.path.join(release_archive_uri, | |
in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#26:Check:Cluster:Cluster health
print(' #26:Checking:Cluster:Cluster health')
log_file_logger.info('#26:Check:Cluster:Cluster health')
writeFile(report_file, '#26:Check:Cluster:Cluster health\n\n')
try:
cluster_health_data = json.loads(getRequestpy3(version_tuple,vmanage_lo_ip, jsessionid, 'clusterManagement/list', args.vmanage_port, tokenid))
services_down, check_result, check_analysis, check_action = criticalCheckthirteen(cluster_health_data)
if check_result == 'Failed':
cluster_checks['#26:Check:Cluster:Cluster health'] = [ check_analysis, check_action]
log_file_logger.error('#26: Check result: {}'.format(check_result))
log_file_logger.error('#26: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#26: Relevant cluster services that are down: {}\n'.format(services_down))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#26: Check result: {}'.format(check_result))
log_file_logger.info('#26: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #26:Check:Cluster:Cluster health. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#27:Check:Cluster:Cluster ConfigDB topology
print(' #27:Checking:Cluster:Cluster ConfigDB topology')
log_file_logger.info('#27:Check:Cluster:Cluster ConfigDB topology')
writeFile(report_file, '#27:Check:Cluster:Cluster ConfigDB topology\n\n')
try:
cluster_health_data = json.loads(getRequestpy3(version_tuple,vmanage_lo_ip, jsessionid, 'clusterManagement/list', args.vmanage_port, tokenid))
configDB_count, check_result, check_analysis, check_action = criticalCheckfourteen(cluster_health_data)
if check_result == 'Failed':
cluster_checks['#27:Check:Cluster:Cluster ConfigDB topology'] = [ check_analysis, check_action]
log_file_logger.error('#27: Check result: {}'.format(check_result))
log_file_logger.error('#27: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#27: No. of configDB servers in the cluster: {}\n'.format(configDB_count))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#27: Check result: {}'.format(check_result))
log_file_logger.info('#27: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#27: No. of configDB servers in the cluster: {}\n'.format(configDB_count))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #27:Check:Cluster:Cluster ConfigDB topology. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#28:Check:Cluster:Messaging server
print(' #28:Checking:Cluster:Messaging server')
log_file_logger.info('#28:Check:Cluster:Messaging server')
writeFile(report_file, '#28:Check:Cluster:Messaging server\n\n')
try:
cluster_health_data = json.loads(getRequestpy3(version_tuple,vmanage_lo_ip, jsessionid, 'clusterManagement/list', args.vmanage_port, tokenid))
cluster_msdown,check_result,check_analysis, check_action = criticalCheckfifteen(cluster_health_data)
if check_result == 'Failed':
cluster_checks['#28:Check:Cluster:Messaging server'] = [ check_analysis, check_action]
log_file_logger.error('#28: Check result: {}'.format(check_result))
log_file_logger.error('#28: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#28: Relevant cluster services that are down: {}\n'.format(services_down))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#28: Check result: {}'.format(check_result))
log_file_logger.info('#28: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #28:Check:Cluster:Messaging server. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#29:Check:Cluster:DR replication status
print(' #29:Checking:Cluster:DR replication status')
log_file_logger.info('#29:Check:Cluster:DR replication status')
writeFile(report_file, '#29:Check:Cluster:DR replication status\n\n')
try:
dr_data = json.loads(getRequestpy3(version_tuple,vmanage_lo_ip, jsessionid, 'disasterrecovery/details', args.vmanage_port, tokenid))
dr_status, check_action, check_analysis, check_result = criticalChecksixteen(dr_data)
if check_result == 'Failed':
cluster_checks['#29:Check:Cluster:DR replication status'] = [ check_analysis, check_action]
log_file_logger.error('#29: Check result: {}'.format(check_result))
log_file_logger.error('#29: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#29: DR Replication status: {}\n'.format(dr_status))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#29: Check result: {}'.format(check_result))
log_file_logger.info('#29: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #29:Check:Cluster:DR replication status. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#30:Check:Cluster:Intercluster communication
print(' #30:Checking:Cluster:Intercluster communication')
log_file_logger.info('#30:Check:Cluster:Intercluster communication')
writeFile(report_file, '#30:Check:Cluster:Intercluster communication\n\n')
try:
if criticalCheckseventeenpy3.isAlive():
criticalCheckseventeenpy3.join(10)
if not criticalCheckseventeenpy3.result_queue.empty():
ping_output, ping_output_failed, ping_check_result, ping_check_analysis, ping_check_action = criticalCheckseventeenpy3.result_queue.get()
if ping_check_result == 'Failed':
cluster_checks['#30:Check:Cluster:Intercluster communication'] = [ check_analysis, check_action]
log_file_logger.error('#30: Check result: {}'.format(check_result))
log_file_logger.error('#30: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#30: Cluster nodes with ping failure: {}\n'.format(ping_output_failed))
writeFile(report_file, 'Result: ERROR - {}\n'.format(ping_check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(ping_check_action))
else:
log_file_logger.info('#30: Check result: {}'.format(check_result))
log_file_logger.info('#30: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#30: Cluster nodes details: {}\n'.format(ping_output))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(ping_check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #30:Check:Cluster:Intercluster communication. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#Logging out of the Session using jsessionid
log_file_logger.info('Logging out of the Session')
sessionLogoutpy3(vmanage_lo_ip,jsessionid,args.vmanage_port)
log_file_logger.info('Successfully closed the connection')
#Debug Execution
elif args.debug == True:
log_file_logger.info('Executing the script in Debug execution mode')
#version below 19.2
if version_tuple[0:2] < ('19','2'):
#Creating a session
try:
log_file_logger.info('Generating a JSessionID')
jsessionid = generateSessionID(vmanage_lo_ip, args.username, password, args.vmanage_port)
except Exception as e:
log_file_logger.exception('{}\n'.format(e))
raise SystemExit('\033[1;31m ERROR: Error generating JSessionID, make sure that the username and password entered is correct. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m \n\n'.format(log_file_path))
#Preliminary Data
log_file_logger.info('****Collecting Preliminary Data\n')
print ('****Collecting Preliminary Data\n')
try:
controllers = json.loads(getRequest(version_tuple, vmanage_lo_ip, jsessionid, 'system/device/controllers', args.vmanage_port))
controllers_info = controllersInfo(controllers)
log_file_logger.info('Collected controllers information: {}'.format(controllers_info))
system_ip_data = json.loads(getRequest(version_tuple, vmanage_lo_ip, jsessionid, 'device/vmanage', args.vmanage_port))
system_ip = system_ip_data['data']['ipAddress']
#system_ip = controllers_info[hostname][1]
log_file_logger.info('Collected vManage System IP address: {}'.format(system_ip))
cpu_speed = cpuSpeed()
log_file_logger.info('Collected vManage CPU Speed GHz: {}'.format(cpu_speed))
cpu_count = cpuCount()
log_file_logger.info('Collected vManage CPU Count: {}'.format(cpu_count))
vedges = json.loads(getRequest(version_tuple, vmanage_lo_ip,jsessionid, 'system/device/vedges', args.vmanage_port))
vedge_count,vedge_count_active, vedge_info = vedgeCount(vedges)
log_file_logger.info('Collected xEdge Count: {}'.format(vedge_count))
cluster_size, server_mode, vmanage_info = serverMode(controllers_info)
log_file_logger.info('Collected vManage Cluster Size: {}'.format(cluster_size))
log_file_logger.info('Collected vManage Server Mode: {}'.format(server_mode))
disk_controller = diskController()
log_file_logger.info('Collected vManage Disk Controller Type: {}'.format(disk_controller))
dpi_stats = json.loads(getRequest(version_tuple, vmanage_lo_ip, jsessionid, 'statistics/settings/status', args.vmanage_port))
dpi_status = dpiStatus(dpi_stats)
log_file_logger.info('Collected DPI status: {}'.format(dpi_status))
server_type = serverType()
log_file_logger.info('Collected Server Type: {}'.format(server_type))
vbond_info, vsmart_info = vbondvmartInfo(controllers_info)
vbond_count = len(vbond_info)
vsmart_count = len(vsmart_info)
log_file_logger.info('vSmart info: {}'.format(vbond_info))
log_file_logger.info('vBond info: {}'.format(vsmart_info))
total_devices = len(controllers_info) + vedge_count
log_file_logger.info('Total devices: {}'.format(total_devices))
except Exception as e:
log_file_logger.exception('{}\n'.format(e))
raise SystemExit('\033[1;31m ERROR: Error Collecting Preliminary Data. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
print('*Starting Checks, this may take several minutes\n\n')
#Critical Checks
critical_checks = {}
log_file_logger.info('*** Performing Critical Checks\n')
#Begining #30:Check:Cluster:Intercluster communication in the background
if cluster_size>1:
log_file_logger.info('Beginging #`31:Check:Cluster:Intercluster communication in the background')
try:
cluster_health_data = json.loads(getRequest(version_tuple,vmanage_lo_ip, jsessionid, 'clusterManagement/list', args.vmanage_port))
criticalCheckseventeen = criticalCheckseventeen(cluster_health_data, system_ip, log_file_logger)
except Exception as e:
log_file_logger.exception('{}\n'.format(e))
#01:Check:vManage:Validate current version
print(' #01:Checking:vManage:Validate current version')
log_file_logger.info('#01:Check:vManage:Validate current version')
writeFile(report_file, '#01:Check:vManage:Validate current version\n\n')
try:
boot_partition_size, check_result, check_analysis, check_action = criticalCheckone(version)
if check_result == 'Failed':
critical_checks['#01:Check:vManage:Validate current version'] = [ check_analysis, check_action]
log_file_logger.error('#01: Check result: {}'.format(check_result))
log_file_logger.error('#01: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#01: version: {}'.format(version))
log_file_logger.error('#01: Boot Partition Size: {}\n'.format(boot_partition_size))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n'.format(check_action))
print('\033[1;31m ERROR: {} \033[0;0m \n\n'.format(check_analysis))
else:
log_file_logger.info('#01: Check result: {}'.format(check_result))
log_file_logger.info('#01: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#01: version: {}'.format(version))
log_file_logger.info('#01: Boot Partition Size: {}\n'.format(boot_partition_size))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis) )
print(' INFO: {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #01:Checking:vManage:Validate current version. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m \n\n'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#02:Check:vManage:At minimum 20% server disk space should be available
print(' #02:Checking:vManage:vManage sever disk space')
log_file_logger.info('#02:Check:vManage:At minimum 20% server disk space should be available')
writeFile(report_file, '#02:Check:vManage:At minimum 20% server disk space should be available\n\n')
try:
optdata_partition_size, rootfs_partition_size, check_result, check_analysis, check_action = criticalCheckTwo()
if check_result == 'Failed':
critical_checks['#02:Check:vManage:At minimum 20% server disk space should be available'] = [check_analysis, check_action]
log_file_logger.error('#02: Check result: {}'.format(check_result))
log_file_logger.error('#02: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#02: /opt/data Used: {}'.format(optdata_partition_size))
log_file_logger.error('#02: /rootfs.rw Used: {}\n'.format(rootfs_partition_size))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis) )
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
print('\033[1;31m ERROR: {} \033[0;0m \n\n'.format(check_analysis))
else:
log_file_logger.info('#02: Check result: {}'.format(check_result))
log_file_logger.info('#02: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#02: /opt/data Used: {}'.format(optdata_partition_size))
log_file_logger.info('#02: /rootfs.rw Used: {}\n'.format(rootfs_partition_size))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
print(' INFO:{}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #02:Check:vManage:At minimum 20% server disk space should be available. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m \n\n'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#03:Check:vManage:Memory size
print(' #03:Checking:vManage:Memory size')
log_file_logger.info('#03:Check:vManage:Memory size')
writeFile(report_file, '#03:Check:vManage:Memory size\n')
writeFile(report_file, 'Link to the official documentation: \n https://www.cisco.com/c/en/us/td/docs/routers/sdwan/release/notes/compatibility-and-server-recommendations/ch-server-recs-20-3.html\n\n')
try:
memory_size, memory_size_str, dpi_status, server_type, check_result, check_analysis, check_action = criticalCheckthree(vedge_count, dpi_status, server_type, cluster_size, version_tuple)
if check_result == 'Failed':
critical_checks['#03:Check:vManage:Memory size'] = [ check_analysis, check_action]
log_file_logger.error('#03: Check result: {}'.format(check_result))
log_file_logger.error('#03: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#03: Memory Size GB: {}'.format(memory_size_str))
log_file_logger.error('#03: /rootfs.rw Used: {}'.format(rootfs_partition_size))
log_file_logger.error('#03: Server Type: {}'.format(server_type))
log_file_logger.error('#03: vEdge Count: {}\n'.format(vedge_count))
writeFile(report_file, 'Result: ERROR - {}\n '.format(check_analysis) )
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
print('\033[1;31m ERROR: {} \033[0;0m \n\n'.format(check_analysis))
else:
log_file_logger.info('#03: Check result: {}'.format(check_result))
log_file_logger.info('#03: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis) )
print(' INFO:{}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #03:Check:vManage:Memory size. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m \n\n'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#04:Check:vManage:CPU Count
print(' #04:Checking:vManage:CPU Count')
log_file_logger.info('#04:Check:vManage:CPU Count')
writeFile(report_file, '#04:Check:vManage:CPU Count\n\n')
try:
check_result, check_analysis, check_action = criticalCheckfour(cpu_count, vedge_count, dpi_status, server_type)
if check_result == 'Failed':
critical_checks['#04:Check:vManage:CPU Count'] = [ check_analysis, check_action]
log_file_logger.error('#04: Check result: {}'.format(check_result))
log_file_logger.error('#04: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#04: CPU Count: {}\n'.format(cpu_count))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis) )
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
print('\033[1;31m ERROR: {} \033[0;0m \n\n'.format(check_analysis))
else:
log_file_logger.info('#04: Check result: {}'.format(check_result))
log_file_logger.info('#04: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
print(' INFO:{}'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #04:Check:vManage:CPU Count. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m \n\n'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#05:Check:vManage:ElasticSearch Indices status
print(' #05:Checking:vManage:ElasticSearch Indices status')
log_file_logger.info('#05:Check:vManage:ElasticSearch Indices status')
writeFile(report_file, '#05:Check:vManage:ElasticSearch Indices status\n\n')
try:
es_indexes_one = json.loads(getRequest(version_tuple,vmanage_lo_ip, jsessionid, 'management/elasticsearch/index/info', args.vmanage_port))
es_index_red_one, check_result_one, check_analysis_one, check_action_one = criticalCheckfive(es_indexes_one)
time.sleep(5)
es_indexes_two = json.loads(getRequest(version_tuple,vmanage_lo_ip, jsessionid, 'management/elasticsearch/index/info', args.vmanage_port))
es_index_red_two, check_result_two, check_analysis_two, check_action_two = criticalCheckfive(es_indexes_two)
if check_result_one == 'Failed' and check_result_two == 'Failed':
critical_checks['#05:Check:vManage:ElasticSearch Indices status'] = [ check_analysis_two, check_action_two]
log_file_logger.error('#05: Check result: {}'.format(check_result_two))
log_file_logger.error('#05: Check Analysis: {}\n'.format(check_analysis_two))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis_two))
writeFile(report_file, 'Action: {}\n\n'.format(check_action_two))
print('\033[1;31m ERROR: {} \033[0;0m \n\n'.format(check_analysis_two))
elif check_result_one == 'SUCCESSFUL':
log_file_logger.info('#05: Check result: {}'.format(check_result_one))
log_file_logger.info('#05: Check Analysis: {}\n'.format(check_analysis_one))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis_one))
print(' INFO:{}\n\n'.format(check_analysis_one))
elif check_result_two == 'SUCCESSFUL':
log_file_logger.info('#05: Check result: {}'.format(check_result_two))
log_file_logger.info('#05: Check Analysis: {}\n'.format(check_analysis_two))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis_two))
print(' INFO:{}\n\n'.format(check_analysis_two))
except Exception as e:
print('\033[1;31m ERROR: Error performing #05:Check:vManage:ElasticSearch Indices status. \n Please check error | |
<filename>scifin/marketdata/simuldata.py
# Created on 2020/7/22
# This module is for simulating market data.
# Standard library imports
from datetime import datetime
from datetime import timedelta
from typing import Union
# Third party imports
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytz
from typeguard import typechecked
# Local application imports
from scifin.marketdata import marketdata
from .. import timeseries as ts
# Dictionary of Pandas' Offset Aliases
# and their numbers of appearance in a year.
DPOA = {'D': 365, 'B': 252, 'W': 52,
'SM': 24, 'SMS': 24,
'BM': 12, 'BMS': 12, 'M': 12, 'MS': 12,
'BQ': 4, 'BQS': 4, 'Q': 4, 'QS': 4,
'Y': 1, 'A':1}
# Datetimes format
fmt = "%Y-%m-%d %H:%M:%S"
fmtz = "%Y-%m-%d %H:%M:%S %Z%z"
#---------#---------#---------#---------#---------#---------#---------#---------#---------#
# CLASS FOR MARKET
@typechecked
class Market:
"""
Creates a market.
Attributes
----------
data : DataFrame
Contains a time-like index and columns of values for each market component.
start_utc : Pandas.Timestamp
Starting date.
end_utc : Pandas.Timestamp
Ending date.
dims : 2-tuple (int,int)
Dimensions of the market data.
freq : str or None
Frequency inferred from index.
name : str
Name or nickname of the market.
tz : str
Timezone name.
timezone : pytz timezone
Timezone associated with dates.
units : List of str
Unit of the market data columns.
"""
def __init__(self,
df: pd.DataFrame=None,
tz: str=None,
units: Union[str, list]=None,
name: str=""
) -> None:
"""
Initializes the Market.
"""
# Deal with DataFrame
if (df is None) or (df.empty is True):
self.data = pd.DataFrame(index=None, data=None)
self.start_utc = None
self.end_utc = None
self.dims = (0,0)
self.freq = None
self.name = 'Empty Market'
else:
# Extract values
if type(df.index[0]) == 'str':
new_index = pd.to_datetime(df.index, format=fmt)
self.data = pd.DataFrame(index=new_index, data=df.values)
self.start_utc = datetime.strptime(str(new_index[0]), fmt)
self.end_utc = datetime.strptime(str(new_index[-1]), fmt)
self.dims = df.shape
try:
self.freq = pd.infer_freq(new_index)
except:
self.freq = 'Unknown'
self.name = name
else:
self.data = df
self.start_utc = df.index[0]
self.end_utc = df.index[-1]
self.dims = df.shape
try:
self.freq = pd.infer_freq(df.index)
except:
self.freq = 'Unknown'
self.name = name
# Deal with unit
if units is None:
self.units = None
else:
assert(len(units) == len(self.data.columns))
self.units = units
# Deal with timezone
if tz is None:
self.tz = 'UTC'
self.timezone = pytz.utc
else:
self.tz = tz
self.timezone = pytz.timezone(tz)
def is_index_valid(self) -> bool:
"""
Checks if the market has a correct index, meaning no date value is repeated.
Parameters
----------
self : DataFrame
The market to be used.
Returns
-------
bool
Returns True if the index is valid, False otherwise.
"""
index = self.data.index.tolist()
market_set = set(index)
for s in market_set:
if index.count(s) > 1:
return False
return True
def reset_index(self, new_index: list) -> None:
"""
Resets the index with a new one given in argument.
"""
# Checks
try:
assert(len(new_index) == self.data.shape[0])
except AssertionError:
AssertionError("New index should have same dimension as current index.")
# Replacing index
self.data.index = new_index
return None
# TO DO: Repair this broken function.
#@Typechecking
def to_list(self,
start_date=None,
end_date=None
):
"""
Converts the Market data frame into a list of TimeSeries.
Parameters
----------
self : Market
Market to convert.
start_date : str or datetime
Starting date we want for the time series.
end_date : str or datetime
Ending date we want for the time series.
Returns
-------
List of TimeSeries
The list of times series extracted from the data frame.
"""
# Initialization
list_ts = []
if (start_date is None) and (end_date is None):
new_index = pd.to_datetime(self.data.index)
elif (start_date is None):
end_date = self.data.index[list(self.data.index).index(end_date)]
new_index = pd.to_datetime(self.data.index[:end_date])
elif (end_date is None):
start_date = self.data.index[list(self.data.index).index(start_date)]
new_index = pd.to_datetime(self.data.index[start_date:])
else:
start_date = self.data.index[list(self.data.index).index(start_date)]
end_date = self.data.index[list(self.data.index).index(end_date)]
new_index = pd.to_datetime(self.data.index[start_date:end_date])
# Forming a list of time series
i = 0
for c in self.data.columns:
tmp_series = pd.Series(index=new_index, data=self.data.loc[start_date:end_date, c].values)
if self.units is None:
tmp_unit = None
else:
tmp_unit = self.units[i]
tmp_ts = ts.TimeSeries(data=tmp_series, tz=self.tz, unit=tmp_unit, name=c)
list_ts.append(tmp_ts)
i += 1
return list_ts
# GENERAL FUNCTIONS RELATED TO MARKET
@typechecked
def set_market_names(data: pd.DataFrame,
date: str,
date_type: str="end",
interval_type: str='D'
) -> None:
"""
Sets the column and row names of the market dataframe.
Parameters
----------
data : DataFrame
Dataframe on which we want to apply the function.
date : str
A specific date.
date_type : str
Value "end" for 'date' specifying the data end date, "start" for the start date.
interval_type : str or DateOffset
Specifies nature of the jump between two dates ('D' for days, 'M' for months, 'Y' for years).
Returns
-------
None
None
Raises
------
ValueError
If the choice for 'date_type' is neither "start" or "end".
Notes
-----
The two ways ("end" and "start") of specifying the dates are approximative.
Uncertainty on the dates are of the order of the interval type.
For offset aliases available see:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases.
Examples
--------
None
"""
# Initializations
n_ticks = data.shape[0]
n_assets = data.shape[1]
# Setting the column names
data.columns = map(lambda x: "Asset " + str(x), range(n_assets))
# Setting the row names
# Quick check the current date has the right format:
try:
date = datetime.strptime(date, "%Y-%m-%d")
except:
ValueError("Current date format does not seem right.")
# Generate the dates
# either from end date
if date_type == "start":
if interval_type == 'D':
date_series = date + pd.to_timedelta(np.arange(n_ticks), unit='D')
elif interval_type == 'M':
date_series = date + pd.to_timedelta(np.arange(n_ticks) * 12, unit='D')
elif interval_type == 'Y':
date_series = date + pd.to_timedelta(np.arange(n_ticks) * 365, unit='D')
# or from the start date
elif date_type == "end":
if interval_type == 'D':
date_series = date - timedelta(days=n_ticks) \
+ pd.to_timedelta(np.arange(n_ticks), unit='D')
elif interval_type == 'M':
date_series = date - timedelta(days=int(n_ticks * (365./12.))) \
+ pd.to_timedelta(np.arange(n_ticks) * int(365./12.), unit='D')
elif interval_type == 'Y':
date_series = date - timedelta(days=int(n_ticks * 365)) \
+ pd.to_timedelta(np.arange(n_ticks) * 365, unit='D')
else:
ValueError("date_type choice is not recognized.")
# Affecting the value to the rows names
data.index = date_series.to_period(interval_type)
return None
@typechecked
def create_market_returns(r_ini: float,
drift: float,
sigma: float,
n_years: int,
steps_per_year: int,
n_components: int,
date: str,
date_type: str,
interval_type: str='D',
tz: str=None,
units: list=None,
name: str=""
) -> Market:
"""
Creates a market from a Geometric Brownian process for each stock.
The model for each stock is of the form:
r_t = drift * dt + sigma * \sqrt(dt) * \eps_t
where r_t is the return series, mu is a drift (annualized),
sigma is the volatility (annualised).
Parameters
----------
r_ini : float
Initial value of the stock.
drift : float
Value of the drift.
sigma : float
Volatility of the process.
n_years : int
Number of years to generate.
steps_per_year : int
Number of steps per year.
n_components : int
Number of components of the market.
date : str
A specific date.
date_type : str
Value "end" for 'date' specifying the data end date, "start" for the start date.
interval_type : str or DateOffset
Specifies nature of the jump between two dates ('D' for days, 'M' for months, 'Y' for years).
tz : str
Timezone name.
units : List of str
Unit of the market data columns.
Notes
-----
All stocks are assumed to be in the same time zone.
The two ways ("end" and "start") of specifying the dates are approximative.
Uncertainty on the dates are of the order of the interval type.
For offset aliases available see:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases.
Returns
-------
Market
Market of returns for the market.
"""
# Checks
for unit in units:
if not isinstance(unit, str):
raise TypeError("Argument units must be a list of 'str'.")
# Initialization
dt = 1/steps_per_year
n_steps = int(n_years * steps_per_year) + 1
# Compute r_t + 1
rets_plus_1 = np.random.normal(loc=(1+drift)**dt,
scale=(sigma*np.sqrt(dt)),
size=(n_steps, n_components))
rets_plus_1[0] = 1
df_returns = r_ini * pd.DataFrame(rets_plus_1).cumprod()
# Set market index and column names
set_market_names(df_returns, date=date, date_type=date_type, interval_type=interval_type)
# Make a market
market_returns = Market(df=df_returns, tz=tz, units=units, name=name)
return market_returns
@typechecked
def create_market_shares(market: Market,
mean: float = 100000,
stdv: float = 10000
) -> pd.Series:
"""
| |
<filename>ROLL_ALONG_V2_1.py
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#Author: <NAME>
# Setup Python ----------------------------------------------- #
import random, os, glob, time, sys
import pygame
pygame.init()
pygame.mixer.init()
# Setup Pygame/Window & Variables ---------------------------- #
HEIGHT = 650
WIDTH = 1200
radius = 10
running = False
paused = False
x = 10
y = 10
maze = []
streamers_x = []
streamers_y = []
skill = 8
runs = 0
current = 0
volume = 0.15
cursor = 0
blank = False
silent = False
schoice = 0
cx1 = 35
cx2 = 20
cy1 = 110
cy2 = 100
FONT1 = pygame.font.SysFont("comicsansms", 35)
FONT2 = pygame.font.SysFont("arial", 25)
FONT3 = pygame.font.SysFont("Aharoni", 30)
try:
folder = os.path.abspath(os.path.join(__file__, "../"))
except:
folder = os.path.join(os.path.dirname(sys.argv[0]), "")
gsfold = os.path.join(folder, 'Game_Stats/')
win = pygame.display.set_mode((WIDTH,HEIGHT))
pygame.display.set_caption("Roll Along!")
# Test To Determine Correct Rendering Speed ------------------ #
def speed_test():
if not os.path.isdir(gsfold):
os.makedirs(gsfold)
if os.path.isfile(gsfold + 'Speed.txt'):
speed = open(gsfold + "Speed.txt","r")
else:
speed = open(gsfold + "Speed.txt","w+")
tic = time.time()
test1 = []
for i in range(5000000):
test1.append('x')
toc = time.time()
guage = toc-tic
speed.write(str(guage))
latency = speed.read()
try:
wait = int(-10*float(latency)+35)
except:
latency = guage
wait = int(-10*float(latency)+35)
return wait
wait = speed_test()
#Background Image -------------------------------------------- #
def background():
global bground
bground = []
bfold = os.path.join(folder, 'Background/')
if not os.path.isdir(bfold):
os.makedirs(bfold)
extensions = [ "jpg", "jpeg", "png", "bmp", "gif" ]
try:
for extension in extensions:
for backg in glob.glob(bfold + "*." + extension):
bground.append(backg)
back = random.randint(0,len(bground)-1)
bg = pygame.image.load(str(bground[back]))
bg = pygame.transform.scale(bg, (WIDTH, HEIGHT))
return bg
except:
pass
try:
bg = background()
if bg == None:
wait = int(wait*1.4 + 4.8)
except:
wait = int(wait*1.4 + 4.8)
# Setup Audio ------------------------------------------------ #
def music(schoice):
global songs
if (schoice == 0) or (schoice==len(songs)):
songs = []
mfold = os.path.join(folder, 'Music/')
if not os.path.isdir(mfold):
os.makedirs(mfold)
extensions = [ "mp3", "wav", "ogg"]
for extension in extensions:
for tune in glob.glob(mfold + "*." + extension):
songs.append(tune)
songs = random.sample(songs,len(songs))
pygame.mixer.music.load(str(songs[schoice]))
pygame.mixer.music.play()
pygame.mixer.music.set_volume(volume)
try:
music(schoice)
except:
pass
# Setup High Score Logfile ------------------------------------ #
def HScore():
if skill==6:
file = 'HScore_E.txt'
elif skill==8:
file = 'HScore_M.txt'
else:
file = 'HScore_D.txt'
if os.path.isfile(gsfold + file):
Hi_score = open(gsfold + file,"r")
current = Hi_score.read()
Hi_score.close()
else:
Hi_score = open(gsfold + file,"w+")
Hi_score.write(str(runs))
current = str(runs)
Hi_score.close()
return current, file
current, file = HScore()
# Setup High Score Updater ------------------------------------- #
def HFile(current, skill):
if skill==6:
file = 'HScore_E.txt'
elif skill==8:
file = 'HScore_M.txt'
else:
file = 'HScore_D.txt'
if runs>=int(current):
Hi_score = open(gsfold + file,"w+")
Hi_score.write(str(runs))
current = str(runs)
Hi_score.close()
return current
# Create Randomized Maze --------------------------------------- #
def Start_Maze():
for rows in range(random.randint(20,30)):
t = (random.randint(20,WIDTH-30)) #position of columns
n = (random.randint(10,HEIGHT-10)) #center of column postions
v = random.randint(20,150) #size of columns
for stacks in range(25):
maze.append(t)
maze.append(random.randint(n-v,n+v))
# Generate Maze ------------------------------------------------ #
def Draw_Maze():
for i in range(len(maze)-1):
if (i % 2) == 0:
pygame.draw.rect(win, (80,30,30), (maze[i], maze[i+1], radius, radius))
# Create Player Icon ------------------------------------------- #
def Draw_circle(x,y):
pygame.draw.circle(win, (255,0,0), (int(x), int(y)), radius)
# Streamer Functions ------------------------------------------- #
def move(items):
for item in items:
item[0] += item[2]
item[1] += item[3]
def removeUseless_x(items):
for item in items:
if item[1] > HEIGHT:
items.remove(item)
def removeUseless_y(items):
for item in items:
if item[0] < 25:
items.remove(item)
# Create Moving Objects To Avoid aka Streamers ----------------- #
def Draw_streamers():
num_s = 1
xvals = set()
yvals = set()
ticker = random.randint(4,skill)
attack = random.randint(0,3)
if (ticker>=(random.randint(5,10))) & (attack>0):
while len(xvals) < num_s:
pos = random.randint(40, WIDTH-15)
xvals.add(pos)
DY = random.randint(6,11)
for val in xvals:
streamers_x.append([val,0,0,DY])
for item in streamers_x:
pygame.draw.circle(win, (50, 30, 150),(item[0], item[1]), 4)
if (ticker>=(random.randint(5,10))) & (attack==0):
while len(yvals) < num_s:
pos = random.randint(10, HEIGHT)
yvals.add(pos)
DX = random.randint(6,11)
for val in yvals:
streamers_y.append([WIDTH,val,-DX,0])
for item in streamers_y:
pygame.draw.circle(win, (50, 30, 150),(item[0], item[1]), 4)
move(streamers_x)
move(streamers_y)
removeUseless_x(streamers_x)
removeUseless_y(streamers_y)
# Define Losing Parameters: Streamer Encounter ------------------ #
def Lose():
for itemx in streamers_x:
s = abs(x-itemx[0])
t = abs(y-itemx[1])
if (s<=13) & (t<=13):
running = False
return running
for itemy in streamers_y:
s = abs(x-itemy[0])
t = abs(y-itemy[1])
if (s<=13) & (t<=13):
running = False
return running
else:
running = True
return running
# Display Successive Runs Completed ----------------------------- #
def winning():
pygame.draw.rect(win, (0, 128, 0), (WIDTH-40 , 12, 3.5*radius, 2.5*radius),1)
nr_wins = FONT2.render(str(runs), True, (0, 128, 0))
if runs<10:
win.blit(nr_wins, (WIDTH-22 , 10))
elif runs<100:
win.blit(nr_wins, (WIDTH-30 , 10))
else:
win.blit(nr_wins, (WIDTH-40 , 10))
def redrawGameWindow():
try:
if not blank:
win.blit(bg, [0, 0])
else:
win.fill((0,0,0))
except:
win.fill((0,0,0))
Draw_circle(x,y)
Draw_Maze()
Draw_streamers()
winning()
pygame.display.update()
run = True
while run:
# Start Game Run ------------------------------------------- #
if running:
for event in pygame.event.get():
pass
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
#Pause Function ---------------------------------------- #
if event.type == pygame.KEYDOWN:
if (event.key == pygame.K_SPACE):
paused = not paused
while paused:
try:
pygame.mixer.music.pause()
except:
pass
pygame.time.delay(300)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if (event.key == pygame.K_SPACE):
try:
pygame.mixer.music.unpause()
except:
pass
paused = not paused
# Update Player Movement & Maze Encounters ------------- #
if (event.key == pygame.K_w) or (event.key == pygame.K_UP) & (y>=15):
yes = True
for i in range(len(maze)-1):
if (i % 2) == 0:
s = abs(x-maze[i])
t = abs((y-14)-maze[i+1])
if (s<=10) & (t<=10):
yes = False
break
if yes:
y = y - 14
if (event.key == pygame.K_s) or (event.key == pygame.K_DOWN) & (y<=HEIGHT-15):
yes = True
for i in range(len(maze)-1):
if (i % 2) == 0:
s = abs(x-maze[i])
t = abs((y+14)-maze[i+1])
if (s<=10) & (t<=10):
yes = False
break
if yes:
y = y + 14
if (event.key == pygame.K_a) or (event.key == pygame.K_LEFT) & (x>=15):
yes = True
for i in range(len(maze)-1):
if (i % 2) == 0:
s = abs((x-14)-maze[i])
t = abs(y-maze[i+1])
if (s<=10) & (t<=10):
yes = False
break
if yes:
x = x - 14
if (event.key == pygame.K_d) or (event.key == pygame.K_RIGHT):
# Setup Next Run/Restart Screen ---------------- #
if x>=(WIDTH-7):
x = 10
y = 10
maze = []
streamers_x = []
streamers_y = []
runs += 1
pygame.time.delay(200)
Start_Maze()
current = HFile(current, skill)
yes = True
for i in range(len(maze)-1):
if (i % 2) == 0:
s = abs((x+14)-maze[i])
t = abs(y-maze[i+1])
if (s<=10) & (t<=10):
yes = False
break
if yes:
x = x + 14
# Test Lose Paramaters -------------------------------- #
running = Lose()
if (pygame.mixer.music.get_busy() == False) & (not silent):
try:
schoice +=1
if schoice==(len(songs)):
schoice = 0
music(schoice)
except:
pass
redrawGameWindow()
else:
try:
if not blank:
win.blit(bg, (0, 0))
else:
win.fill((0,0,0))
except:
win.fill((0,0,0))
# Startup Screen -------------------------------------- #
start = FONT1.render("Play", True, (0, 128, 0))
settings = FONT1.render("Settings", True, (0, 128, 0))
leave = FONT1.render("Exit", True, (0, 128, 0))
high_score = FONT2.render("Best Run: " + current, True, (0, 128, 0))
cursor_txt1 = FONT3.render(">", True, (50, 30, 150))
cursor_txt2 = FONT3.render("_", True, (50, 30, 150))
win.blit(start, (55, 100))
win.blit(settings, (55, 170))
win.blit(leave, (55, 240))
win.blit(high_score, (WIDTH-150, 20))
win.blit(cursor_txt1, (cx1, cy1))
win.blit(cursor_txt2, (cx2, cy2))
if skill==6:
easy = FONT2.render("Skill: Easy", True, (0, 128, 0))
win.blit(easy, (WIDTH-150, 50))
if skill==8:
moderate = FONT2.render("Skill: Moderate", True, (0, 128, 0))
win.blit(moderate, (WIDTH-150, 50))
if skill==10:
hard = FONT2.render("Skill: Hard", True, (0, 128, 0))
win.blit(hard, (WIDTH-150, 50))
pygame.display.flip()
if (pygame.mixer.music.get_busy() == False) & (not silent):
try:
schoice +=1
if schoice==(len(songs)):
schoice = 0
music(schoice)
except:
pass
# Reset Starting Conditions/Start Game ---------------- #
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
#Inside Game Options ------------------------------ #
if event.type == pygame.KEYDOWN:
if (event.key == pygame.K_RETURN) & (cursor==0):
running = True
x = 10
y = 10
maze = []
streamers_x = []
streamers_y = []
runs = 0
Start_Maze()
if (event.key == pygame.K_RETURN) & (cursor==2):
pygame.quit()
sys.exit()
if (event.key == pygame.K_RETURN) & (cursor==1):
settings = True
cursor = 0
cy1 = 110
cy2 = 100
# Change Game | |
<filename>module_constants.py
##############################################################
# These constants are used in various files.
# If you need to define a value that will be used in those files,
# just define it here rather than copying it across each file, so
# that it will be easy to change it if you need to.
##############################################################
########################################################
## PLAYER SLOTS #############################
########################################################
slot_player_faction_id = 0
slot_player_spawn_state = 1 # listed below, starting with player_spawn_state_
slot_player_spawn_invulnerable_time = 2 # mission time when the player spawned with temporary invlunerability
slot_player_spawn_health_percent = 3 # saved health percentage to be applied when next spawning
slot_player_spawn_entry_point = 4 # entry point used at last spawn
player_spawn_state_dead = 0
player_spawn_state_invulnerable = 1 # while invlunerable soon after spawning
player_spawn_state_at_marker = 2 # set before spawning to indicate that the agent should be shifted to the player's marker scene prop
player_spawn_state_alive = 3
slot_player_inactive_index = 5 # index in the inactive players array, if stored
slot_player_next_chat_event_type = 6 # next chat event number that the server expects this player's client to use
slot_player_list_button_id = 7 # overlay id in the player list presentation
slot_player_outlaw_rating = 8
slot_player_is_lord = 9
slot_player_non_lord_troop_id = 10 # the last troop used before changing to a lord only troop, to revert after respawning if someone else is voted lord
slot_player_poll_faction_id = 11 # marks whether the player can vote in the current poll
slot_player_requested_spawn_point = 12 # the spawn point requested by the player after dying, if any; -1 to indicate a newly connected player that hasn't yet requested to spawn
slot_player_has_faction_door_key = 13
slot_player_has_faction_money_key = 14
slot_player_has_faction_item_key = 15
slot_player_teleport_to_ship_no = 16 # instance no of the last ship teleported to with the admin tool
slot_player_last_faction_kicked_from = 17 # stores when kicked from a faction, so subsequent kicks can be free of cost
slot_player_accessing_instance_id = 18 # stores the instance id of the inventory currently being accessed by the player, for updates if anyone else changes it
slot_player_last_action_time = 19 # mission time of the last action that should be prevented from quick repetition
slot_player_equip_item_0 = 20 # module equipment slots corresponding to the hard coded ones in header_items starting with ek_
slot_player_equip_item_1 = 21
slot_player_equip_item_2 = 22
slot_player_equip_item_3 = 23
slot_player_equip_head = 24
slot_player_equip_body = 25
slot_player_equip_foot = 26
slot_player_equip_gloves = 27
slot_player_equip_horse = 28
slot_player_equip_end = 29
slot_player_equip_item_0_ammo = 30
slot_player_equip_item_1_ammo = 31
slot_player_equip_item_2_ammo = 32
slot_player_equip_item_3_ammo = 33
slot_player_spawn_food_amount = 34 # saved food for next spawn
slot_player_faction_chat_muted = 35
slot_player_kick_at_time = 36 # time to kick a player after the name server has rejected them, to allow time to recieve the message
slot_player_can_faction_announce = 37
slot_player_next_spawn_health_percent = 38 # spawn health percentage for the troop applied after death, if that server option is enabled
slot_player_accessing_unique_id = 39 # a unique number identifying an inventory scene prop being accessed that could despawn and the instance id be reused, like corpses
slot_player_admin_no_panel = 40 # admin permission slots: the default value 0 is permissive so everything works when a name server is not connected
slot_player_admin_no_gold = 41
slot_player_admin_no_kick = 42
slot_player_admin_no_temporary_ban = 43
slot_player_admin_no_permanent_ban = 44
slot_player_admin_no_kill_fade = 45
slot_player_admin_no_freeze = 46
slot_player_admin_no_teleport_self = 47
slot_player_admin_no_admin_items = 48
slot_player_admin_no_heal_self = 49
slot_player_admin_no_godlike_troop = 50
slot_player_admin_no_ships = 51
slot_player_admin_no_announce = 52
slot_player_admin_no_override_poll = 53
slot_player_admin_no_all_items = 54
slot_player_admin_no_mute = 55
slot_player_admin_no_animals = 56
slot_player_admin_no_factions = 57
slot_player_admin_end = 58
# ***********************************************************
# PN OTHERS CONSTANTS STUFF *********************************
# ***********************************************************
pn_trade_route_ivory_income = 6700
pn_trade_route_spices_income = 4480
pn_trade_route_sugar_income = 3360
pn_trade_route_cotton_income = 3360
pn_trade_route_tea_income = 3080
pn_trade_route_tobacco_income = 3080
pn_trade_route_fur_income = 2520
pn_trade_route_coffee_income = 2240
pn_trade_route_0 = 0
pn_trade_route_1 = 1
pn_trade_route_2 = 2
pn_trade_route_3 = 3
pn_trade_route_4 = 4
pn_trade_route_5 = 5
pn_trade_route_6 = 6
pn_trade_route_7 = 7
pn_trade_route_8 = 8
pn_trade_route_9 = 9
pn_trade_routes_props_begin = "spr_pn_trade_route_coffee_capture_point"
pn_trade_routes_props_end = "spr_mm_hugo1"
pn_chairs_begin = "spr_chair_castle_a"
pn_chairs_end = "spr_tavern_table_a"
pn_ship_climber_down_rel_z = 100
pn_ship_climber_down_rel_x = -110
pn_ship_climber_down_rel_y = 0
pn_ship_climber_up_rel_z = 1983
pn_ship_climber_up_rel_x = 130
pn_ship_climber_up_rel_y = 0
pn_art_horse_only_begin = "itm_arty_horse_french"
pn_art_horse_only_end = "itm_hussar_horse_french"
pn_art_with_horse_begin = "itm_arty_horse_cannon_french"
pn_art_with_horse_end = "itm_admin_musket"
pn_drummers_uniforms_begin = "itm_french_45e_body_drummer"
pn_drummers_uniforms_end = "itm_british_infantry_ranker"
pn_hittable_props_begin = "spr_table_tavern"
pn_hittable_props_end = "spr_custom_button_instant"
pn_usable_not_dam_doors_small_begin = "spr_pw_door_teleport_small_arch_a"
pn_usable_not_dam_doors_small_end = "spr_pw_door_teleport_arch_a"
pn_usable_dam_doors_small_begin = "spr_pw_door_rotate_a"
pn_usable_dam_doors_small_end = "spr_pw_door_rotate_e_left"
pn_usable_dam_doors_medium_begin = "spr_pw_door_rotate_e_left"
pn_usable_dam_doors_medium_end = "spr_pw_door_rotate_earth_left"
pn_usable_dam_doors_huge_begin = "spr_pw_door_rotate_earth_left"
pn_usable_dam_doors_huge_end = "spr_pw_wooden_bridge_a"
pn_usable_dam_doors_begin = pn_usable_dam_doors_small_begin
pn_usable_dam_doors_end = pn_usable_dam_doors_huge_end
pn_trees_begin = "spr_pw_tree_a1"
pn_trees_end = "spr_pw_stick_bush_2a"
pn_small_houses_one_floor_begin = "spr_mm_house_basic13"
pn_small_houses_one_floor_end = "spr_mm_house_basic14"
pn_small_houses_two_floor_begin = "spr_mm_house_basic14"
pn_small_houses_two_floor_end = "spr_mm_house_basic12"
pn_medium_houses_one_floor_begin = "spr_mm_house_basic12"
pn_medium_houses_one_floor_end = "spr_mm_house_basic1"
pn_medium_houses_two_floor_begin = "spr_mm_house_basic1"
pn_medium_houses_two_floor_end = "spr_mm_house_basic4"
pn_house_stairs_begin = "spr_mm_house_stair1"
pn_house_stairs_end = "spr_mm_house_basic13"
pn_effect_type_wood = 0
pn_effect_type_bricks = 1
pn_effect_type_earth = 2
pn_sapper_build_price_stakes1 = 100
pn_sapper_build_price_stakes2 = 80
pn_sapper_build_price_sandbags = 100
pn_sapper_build_price_chevaux_de_frise = 40
pn_sapper_build_price_gabion = 60
pn_sapper_build_price_fence_1d = 120
pn_sapper_build_price_earthwork = 80
pn_sapper_buildings_on_scene_limit = 99
player_character_language_french = 1
player_character_language_english = 2
player_character_language_austrian = 3
player_character_language_prussian = 4
player_character_language_russian = 5
player_character_language_pirate = 6
player_character_language_begin = player_character_language_french
player_character_language_end = player_character_language_pirate + 1
voice_type_cry = 1
voice_type_surrender = 2
voice_type_comm_ready = 3
voice_type_comm_present = 4
voice_type_comm_fire = 5
voice_type_comm_charge = 6
voice_type_comm_advance = 7
voice_type_comm_hold = 8
voice_type_comm_fire_at_will = 9
voice_type_comm_on_me = 10
voice_type_comm_fall_back = 11
voice_types_begin = voice_type_cry
voice_types_end = 12
music_type_start = 1
music_type_stop = 2
music_type_toggle_together = 3
music_types_begin = music_type_start
music_types_end = 4
spyglass_type_start = 1
spyglass_type_stop = 2
drinking_type_start = 1
drinking_type_stop = 2
server_action_force_music_selection = 1
server_actions_begin = server_action_force_music_selection
server_actions_end = 2
player_action_change_lang = 87
player_action_voice = 88
player_action_music = 89
player_action_spyglass = 90
player_action_place_rocket = 91
player_action_toggle_walk = 92
player_action_has_cheat = 93
player_action_surrender = 94
player_action_misc_item_drinking = 95
player_action_custom_order_menu_interact = 96
player_action_misc_item_drinking = 97
player_action_custom_order_menu_interact = 98
player_actions_begin = player_action_voice
player_actions_end = 99
# If adding more than 20 tracks in any list you need to increase this number
instrument_max_tracks = 20
drum_sounds_britain_begin = "snd_drum_britain_1"
drum_sounds_britain_end = "snd_drum_france_1"
drum_sounds_france_begin = drum_sounds_britain_end
drum_sounds_france_end = "snd_drum_prussia_1"
drum_sounds_prussia_begin = drum_sounds_france_end
drum_sounds_prussia_end = "snd_drum_russia_1"
drum_sounds_russia_begin = drum_sounds_prussia_end
drum_sounds_russia_end = "snd_drum_austria_1"
drum_sounds_austria_begin = drum_sounds_russia_end
drum_sounds_austria_end = "snd_drum_highland_1"
drum_sounds_highland_begin = drum_sounds_austria_end
drum_sounds_highland_end = "snd_drum_signal_1"
drum_sounds_calls_begin = drum_sounds_highland_end
drum_sounds_calls_end = "snd_fife_britain_1"
fife_sounds_britain_begin = drum_sounds_calls_end
fife_sounds_britain_end = "snd_fife_france_1"
fife_sounds_france_begin = fife_sounds_britain_end
fife_sounds_france_end = "snd_fife_prussia_1"
fife_sounds_prussia_begin = fife_sounds_france_end
fife_sounds_prussia_end = "snd_fife_russia_1"
fife_sounds_russia_begin = fife_sounds_prussia_end
fife_sounds_russia_end = "snd_fife_austria_1"
fife_sounds_austria_begin = fife_sounds_russia_end
fife_sounds_austria_end = "snd_bugle_britain_1"
bugle_sounds_britain_begin = fife_sounds_austria_end
bugle_sounds_britain_end = "snd_bugle_france_1"
bugle_sounds_france_begin = bugle_sounds_britain_end
bugle_sounds_france_end = "snd_bugle_prussia_1"
bugle_sounds_prussia_begin = bugle_sounds_france_end
bugle_sounds_prussia_end = "snd_bugle_russia_1"
bugle_sounds_russia_begin = bugle_sounds_prussia_end
bugle_sounds_russia_end = "snd_bugle_austria_1"
bugle_sounds_austria_begin = bugle_sounds_russia_end
bugle_sounds_austria_end = "snd_bugle_signal_1"
bugle_sounds_calls_begin = bugle_sounds_austria_end
bugle_sounds_calls_end = "snd_bagpipes_britain_1"
bagpipes_sounds_britain_begin = bugle_sounds_calls_end
bagpipes_sounds_britain_end = "snd_bagpipes_extra_1"
bagpipes_sounds_extra_begin = bagpipes_sounds_britain_end
bagpipes_sounds_extra_end = "snd_piano_loop_1"
drum_strings_britain_begin = "str_drum_britain_1"
drum_strings_france_begin = "str_drum_france_1"
drum_strings_prussia_begin = "str_drum_prussia_1"
drum_strings_russia_begin = "str_drum_russia_1"
drum_strings_austria_begin = "str_drum_austria_1"
drum_strings_highland_begin = "str_drum_highland_1"
drum_strings_calls_begin = "str_drum_signal_1"
fife_strings_britain_begin = "str_fife_britain_1"
fife_strings_france_begin = "str_fife_france_1"
fife_strings_prussia_begin = "str_fife_prussia_1"
fife_strings_russia_begin = "str_fife_russia_1"
fife_strings_austria_begin = "str_fife_austria_1"
bugle_strings_britain_begin = "str_bugle_britain_1"
bugle_strings_france_begin = "str_bugle_france_1"
bugle_strings_prussia_begin = "str_bugle_prussia_1"
bugle_strings_russia_begin = "str_bugle_russia_1"
bugle_strings_austria_begin = "str_bugle_austria_1"
bugle_strings_calls_begin = "str_bugle_signal_1"
bagpipes_strings_britain_begin = "str_bagpipes_britain_1"
bagpipes_strings_extra_begin = "str_bagpipes_extra_1"
piano_sounds_begin = "snd_piano_loop_1"
piano_sounds_end = "snd_organ_loop_1"
organ_sounds_begin = piano_sounds_end
organ_sounds_end = "snd_instruments_end"
piano_strings_begin = "str_piano_tune_1"
organ_strings_begin = "str_organ_tune_1"
instrument_sounds_begin = drum_sounds_britain_begin
instruments_sounds_end = organ_sounds_end
mm_cannon_types_begin = "spr_mm_cannon_12pdr"
mm_cannon_types_end = "spr_mm_cannonball_6pd"
mm_cannon_wood_types_begin = "spr_mm_cannon_12pdr_wood"
mm_cannon_wood_types_end = "spr_mm_cannon_12pdr_wheels"
mm_cannon_wheel_types_begin = "spr_mm_cannon_12pdr_wheels"
mm_cannon_wheel_types_end = "spr_mm_cannon_12pdr_barrel"
mm_cannon_barrel_types_begin = "spr_mm_cannon_12pdr_barrel"
mm_cannon_barrel_types_end = "spr_mm_cannon_12pdr_limber_wheels"
mm_unlimber_button_types_begin = "spr_mm_cannon_12pdr_limber"
mm_unlimber_button_types_end = "spr_mm_limber_button"
mm_cannon_button_types_begin = "spr_mm_limber_button"
mm_cannon_button_types_end = "spr_mm_round_button"
mm_button_types_begin = "spr_mm_cannon_12pdr_limber"
mm_button_types_end = "spr_mm_tunnel_wall"
cannon_ammo_type_round = 1
cannon_ammo_type_shell = 2
cannon_ammo_type_canister = 3
cannon_ammo_type_bomb = 4
cannon_ammo_type_rocket = 5
cannon_ammo_types_begin = cannon_ammo_type_round
cannon_ammo_types_end = 6
cannon_hit_effect_event_type_explosion = 1
cannon_hit_effect_event_type_ground = 2
cannon_hit_effect_event_type_water_ball = 3
cannon_hit_effect_event_type_wall = 4
cannon_hit_effect_event_types_begin = cannon_hit_effect_event_type_explosion
cannon_hit_effect_event_types_end = 5
cannon_command_up = 1
cannon_command_down = 2
cannon_command_right = 3
cannon_command_left = 4
cannon_command_fire = 5
cannon_command_stop_aim = 6
cannon_commands_begin = cannon_command_up
cannon_commands_end = 7
command_type_cannon = 1
command_type_ship = 2
command_types_begin = command_type_cannon
command_types_end = 3
prop_effect_type_sound = 1
prop_effect_type_particle = 2
prop_effect_type_stop_all = 3
prop_effect_types_begin = prop_effect_type_sound
prop_effect_types_end = 4
prop_effect_handle_stop = 0
prop_effect_handle_start = 1
prop_effect_handles_begin = prop_effect_handle_stop
prop_effect_handles_end = 2
mm_destructible_props_begin = "spr_mm_house_wall_1"
mm_destructible_props_end = "spr_mm_house_wall_2dd"
mm_destroyed_props_begin = "spr_mm_house_wall_2dd"
mm_destroyed_props_end = "spr_mm_wallgate"
construct_costs_offset = 50
construct_button_offset = construct_costs_offset + 20
construct_display_offset = construct_button_offset + 20
construct_offset_end = construct_display_offset + 20
mm_construct_props_begin = "spr_mm_stakes_construct"
mm_construct_props_end = "spr_mm_crator_small"
mm_construct_props_strings = "str_mm_stakes_construct"
mm_construct_props_meshes = "mesh_construct_mesh_stakes"
# PN END ***********************************
########################################################
## AGENT SLOTS #############################
########################################################
slot_agent_horse_last_rider = 0 # if a horse, the agent id of the last (or current) rider, or if stray, negative numbers counting down to when the horse will be removed
slot_agent_drowning_count = 1 # counts upwards each time an agent is found to be drowning underwater
slot_agent_poison_amount = 2 # increases each time the agent is attacked with poison, reduced when healed
slot_agent_poisoner_agent_id = 3 # agent id that last poisoned the agent
slot_agent_poisoner_player_uid = 4 # player unique id of the poisoner when applicable, to give correct death messages
slot_agent_freeze_instance_id = 5 # instance id of the invisible scene prop being used to freeze
slot_agent_is_targeted = 6 # mark that the stored target agent id is correct
slot_agent_food_amount = 7
slot_agent_fishing_last_school = 8 # last school fished from, to speed up repetitive check
slot_agent_last_horse_ridden = 9
slot_agent_money_bag_1_value = 10 # the values of the money bags picked up, in order
slot_agent_money_bag_2_value = 11
slot_agent_money_bag_3_value = 12
slot_agent_money_bag_4_value = 13
slot_agent_hunting_last_carcass = 14 # last animal carcass processed, to speed up repetitive checks
slot_agent_died_normally = 15
slot_agent_animation_end_time_ms = 16 # mission time in milliseconds
slot_agent_last_animation_string_id = 17
slot_agent_recent_animations_delay_ms = 18 # interval in milliseconds
slot_agent_storage_corpse_instance_id = 19 # saved when discarding armor
slot_agent_animal_herd_manager = 20 # instance id of the herd manager item attached to
slot_agent_animal_birth_time = 21 # mission time when the animal was spawned as a child, or extrapolated if spawned as an adult
slot_agent_animal_grow_time = 22 # mission time after which the animal will grow to an adult or birth a child
slot_agent_animal_move_time = 23 # mission time after which to move
slot_agent_animal_last_damage_time = 24
slot_agent_animal_food = 25
slot_agent_animal_carcass_instance_id = 26
slot_agent_animal_times_stuck = 27
slot_agent_animal_end = 28
slot_agent_last_voice_at = 29
slot_agent_last_sound_at = 30
slot_agent_used_prop_instance = 31
slot_agent_music_play_together = 32
slot_agent_base_speed_mod = 33
slot_agent_started_playing_music_at = 34
slot_agent_head_damage_factor = 40 # agent modifier factors for | |
x:
return np.nan
return x
def get_other_loc(x):
if type(x) != str or x == 'none':
return np.nan
if ',' in x:
split_str = x.split(',')
return split_str[0].lower()
else:
return x.lower()
def get_state_var(x):
if x == 'none':
return np.nan
if type(x) == str:
if ',' in x:
new_list = x.split(',')
state_str = new_list[1]
if isBlank(state_str):
return np.nan
else:
return state_str.strip()
else:
return x[-2:]
else:
return np.nan
def extract_deprecated_val(x):
if 'k' in x:
x = x[0:x.find('k')]
if '.' in x:
trail = '00'
x = x.replace('.', '')
else:
trail = '000'
elif 'm' in x:
x = x[0:x.find('m')]
if '.' in x:
trail = '00000'
x = x.replace('.', '')
else:
trail = '000000'
elif 'b' in x:
x = x[0:x.find('b')]
if '.' in x:
trail = '00000000'
x = x.replace('.', '')
else:
trail = '000000000'
goal = x+trail
goal = int(only_numerics(goal))
return goal
def get_money_raised(x):
if type(x) != str or 'none' in x:
return np.nan
elif '%' in x:
return np.nan
elif '$' not in x:
return 'NOT USD'
else:
x = x.lower()
if 'of' in x:
new_info = x.split('of')
try:
if 'k' in new_info[0] or 'm' in new_info[0]:
money_raised = extract_deprecated_val(new_info[0])
else:
money_raised = int(only_numerics(new_info[0]))
except:
print('failed to get money raised: ', x)
money_raised = np.nan
elif 'raised' in x:
if 'goal' in x:
new = x.split('\n')
this_str = new[0]
this_str = this_str[this_str.find('$'):]
if '.' in this_str:
new = this_str[0:this_str.find('.')]
if 'k' in new or 'm' in new:
money_raised = extract_deprecated_val(new)
else:
money_raised = int(only_numerics(new))
else:
try:
if 'k' in x or 'm' in x:
money_raised = extract_deprecated_val(x)
else:
money_raised = int(only_numerics(x))
except:
print('failed to get money raised: ', x)
money_raised = np.nan
else:
return np.nan
return money_raised
def get_goal(x):
if type(x) != str:
return np.nan
if '%' in x:
return np.nan
if '$' not in x:
return 'NOT USD'
x = x.lower()
if 'raised' in x and 'of' not in x:
if 'goal' in x:
new = x.split('\n')
new = new[1]
new = new[new.find('$'):]
if 'k' in new or 'm' in new or 'b' in new:
goal = extract_deprecated_val(new)
else:
if '.' in x:
new = new[0:new.find('.')]
goal = int(only_numerics(new))
return goal
else:
return np.nan
else:
if 'of' in x:
new_info = x.split('of')
new = new_info[1]
if 'k' in new or 'm' in new or 'b' in new:
goal = extract_deprecated_val(new)
else:
if '.' in new:
new = new[0:new.find('.')]
try:
goal = int(only_numerics(new))
except:
goal = 'failed'
return goal
elif 'goal' in x:
return int(only_numerics(x))
else:
print('failed to parse goal: ', x)
def get_num_contributors(x):
if type(x) == str and x != 'none':
x = x.lower()
if 'raised' in x and '$' not in x:
new = x.split('in')
if 'k' in new:
new = extract_deprecated_val(new)
else:
new = int(only_numerics(new[0]))
return new
elif 'donor' in x and 'day' not in x and 'month' not in x:
if 'k' in x:
new = extract_deprecated_val(x)
else:
new = int(only_numerics(x))
return new
elif 'people' in x or 'person' in x:
if 'by' in x:
str_split1 = x.split('by')
if 'in' in x:
str_split2 = str_split1[1].split('in')
new = str_split2[0]
if 'k' in new:
new = extract_deprecated_val(new)
else:
new = int(only_numerics(new))
return new
else:
new = str_split1[1]
if 'k' in new:
new = extract_deprecated_val(new)
else:
new = int(only_numerics(new))
return new
else:
print(x)
return x
else:
return np.nan
else:
return np.nan
def remove_non_loc_info(x):
if type(x) == str:
temp = x.lower()
if 'donations' in temp and ' 1 donation' not in temp:
#print('donations in x')
loc = temp.find('donations')
delete = loc+len('donations')
temp = temp[delete:]
#return new
if '1 donation' in temp:
#print('donation in x')
loc = temp.find('donation')
delete = loc+len('donation')
temp = temp[delete:]
#return new
if 'organizer' in temp:
#print('organizer in x')
loc = temp.find('organizer')
delete = loc+len('organizer')
temp = temp[delete:]
return temp
# Goal and donors
#regex cleaning functions
def contruct_goal_pattern():
rtypes = [] # (type of values returned (raise,goal,both) , notation that money is recorded in (US vs foreign))
rpatterns = []
rpatterns.append(r'(.*)raised of(.*)goal')
rpatterns.append(r'(.*)of(.*)goal')
rpatterns.append(r'(.*)of(.*)')
rpatterns.append(r'(.*)raised of(.*)target')
rpatterns.append(r'Raised:(.*)Goal:(.*)')
rtypes+=[['both','US']] * 5
rpatterns.append(r'(.*)des Ziels von(.*)') # german
rpatterns.append(
r'(.*)sur un objectif de(.*)') # french
rpatterns.append(r'(.*)del objetivo de(.*)') # spanish
rpatterns.append(r'(.*)da meta de(.*)') # romanian
rpatterns.append(r'(.*)su(.*)raccolti') # italian
rpatterns.append(r'(.*)van het doel(.*)') # dutch
rtypes+=[['both','foreign']] * 6
rpatterns.append(r'(.*)raised')
rtypes+=[['raised','US']]
rpatterns.append(r'(.*)réunis') # french
rpatterns.append(r'(.*)gesammelt') # german
rpatterns.append(r'(.*)recaudados') # spanish
rpatterns.append(r'(.*)arrecadados') # portugese
rpatterns.append(r'(.*)raccolti') # italian
rtypes+=[['raised','foreign']]*5
rpatterns.append(r'(.*)goal')
rpatterns.append(r'(.*)target')
rtypes+=[['goal','US']]*2
rpatterns.append(r'Objectif\s*:(.*)') # french
rpatterns.append(r'Objetivo\s*:(.*)') #spanish
rpatterns.append(r'(.*)Ziel') # german
rpatterns.append(r'Meta de(.*)') #romanian
rpatterns.append(r'(.*)obiettivo') # italian
rtypes+=[['goal','foreign']]*5
patterns_collection = pd.Series(rtypes, index=rpatterns, name='rtype')
return patterns_collection
GOAL_PATTERNS = contruct_goal_pattern()
_clean_whitespace = lambda x: re.sub(r'\s+', ' ', x).strip()
THOUNDSAND_PATTERN = re.compile(r'\d+[,.]*\d*.*[k]')
MILLION_PATTERN = re.compile(r'\d+[,.]*\d*.*[m]')
BILLION_PATTERN = re.compile(r'\d+[,.]*\d*.*[b]')
MONEY_PATTERN = re.compile(r"""( #start of group0, this is the desired output
\d+ #start digit of money amount, mustbe followed by abbr, number or marker, nonwords or end of string
((?<=\d)[,.]\d+)* #(group1) this is an optional group that only appears if markers are present
((?<=\d)[kmbKMB](?=\W|$)){0,1} #(group2)match thousand,mill,bill abbreviation if present but only if theres one of them
)#close group0
""",re.VERBOSE)
_remove_whitespace_inside_money = lambda x: re.sub(r'(?<=\d|[,.])\s(?=\d|[,.]|[kmbKMB](?=\W|$))','',x)
_extract_money_amount = lambda x: MONEY_PATTERN.findall(_remove_whitespace_inside_money(x))
def _switch_markers_to_us_notation(amnt):
chars = []
for c in amnt:
if c == ',':
chars.append('.')
elif c == '.':
chars.append(',')
else:
chars.append(c)
return ''.join(chars)
def parse_money_into_floats(x,us_notation=True,switch_retry=True):
out = {'amount':np.nan,'currency':np.nan}
if pd.isnull(x): return out
old_x = x
x = x.strip().lower()
if len(x) == 0: return out
try:
amnt = _extract_money_amount(x)[0][0]
curr = x.replace(amnt,'').strip()
if not us_notation:
# money amount written in foreign notation
# need to swap , and .
amnt = _switch_markers_to_us_notation(amnt)
numeric_amnt = ''.join(re.findall('\d*|[,.]*', amnt))
numeric_amnt = float(numeric_amnt.replace(',', ''))
trail = 1
if THOUNDSAND_PATTERN.search(amnt):
trail = 1000
elif MILLION_PATTERN.search(amnt):
trail = 1000000
elif BILLION_PATTERN.search(amnt):
trail = 1000000000
out['amount']=numeric_amnt * trail
out['currency'] = curr
return out
except:
if switch_retry:
print(f'[WARNING] failed to parse {old_x} but will retry by swapping , and .')
# ~ doesnt work, have to be not
out = parse_money_into_floats(x,us_notation=not us_notation,switch_retry=False)
if not pd.isna([*out.values()]).all():
print('[WARNING] parsed results might be inaccurate, check below')
print(f"[RETRY OUTPUT] original:{x}|parsed_amnt:{out['amount']}|parsed_currency:{out['currency']}")
else:
print(f'failed to parse original x:{old_x}|stripped:{x}')
return out
def get_raised_and_goal_amount(x, USD_only=True):
import re
out = {"raised": np.nan, "goal": np.nan,"raised_amnt":np.nan,
"raised_curr":np.nan,"goal_amnt":np.nan,"goal_curr":np.nan}
if x == 'none': return out
if USD_only:
if '$' not in x: return out
x = _clean_whitespace(x)
for rpattern, rtype in GOAL_PATTERNS.iteritems():
results = re.findall(rpattern, x)
if len(results) > 0:
results = results[0] # pop out results
rtype_value,rtype_notation = rtype[0],rtype[1]
if rtype_value == 'both':
out["raised"], out["goal"] = results[0], results[1]
for k in ["raised","goal"]:
results = parse_money_into_floats(out[k],us_notation=rtype_notation=='US')
out[k+"_amnt"],out[k+"_curr"] = results["amount"],results["currency"]
elif rtype_value == "raised":
out["raised"] = results
results = parse_money_into_floats(out["raised"],us_notation=rtype_notation=='US')
out["raised_amnt"],out["raised_curr"] = results["amount"],results["currency"]
elif rtype_value == "goal":
out["goal"] = results
results = parse_money_into_floats(out["goal"],us_notation=rtype_notation=='US')
out["goal_amnt"],out["goal_curr"] = results["amount"],results["currency"]
break
if pd.isna([*out.values()]).all(): print(f'failed to parse {x}')
return out
def standardize_MBk_in_number_str(x):
if pd.isnull(x): return x
old_x = x
x = x.strip().lower()
if len(x) == 0: return np.nan
try:
x_i = re.findall('\d+[,.]*\d*', x)[0]
x_i = float(x_i.replace(',', ''))
trail = 1
if THOUNDSAND_PATTERN.search(x):
trail = 1000
elif MILLION_PATTERN.search(x):
trail = 1000000
elif BILLION_PATTERN.search(x):
trail = 1000000000
return x_i * trail
except:
print(f'original x:{old_x}|stripped:{x}')
return np.nan
def construct_status_pattern():
rpatterns = []
rtypes = []
rpatterns.append(r'^(\S+) donor$') # ^ and $ help make match the whole string
rtypes.append(['ndonor'])
rpatterns.append(r'raised by (\S+) donor in \S+? duration')
rtypes.append(['ndonor'])
rpatterns.append(r'\S+? raised by (\S+) donor in \S+? duration')
rtypes.append(['ndonor'])
rpatterns.append(r'campaign created .*?duration ago')
rtypes.append([])
rpatterns.append(r'^recent donor [(](\S+)[)]$')
rtypes.append(['ndonor'])
rpatterns.append(r'goal reached!')
rtypes.append([])
rpatterns.append(r'campaign ended')
rtypes.append([])
rpatterns.append(r'only \S+? duration left to reach goal!')
rtypes.append([])
rpatterns.append(r'be the first to like this donor \S+? duration ago')
rtypes.append([])
rpatterns.append(r'\S+? donor likes this donor \S+? duration ago')
rtypes.append([])
rpatterns.append(r'gesammelt von (\S+) donore{0,1}n{0,1} in \S+? tage{0,1}n{0,1}')
rtypes.append(['ndonor'])
rpatterns.append(r'gesammelt von (\S+) donore{0,1}n{0,1} in \S+? monate{0,1}')
rtypes.append(['ndonor'])
rpatterns.append(r'gesammelt von (\S+) donore{0,1}n{0,1} in \S+? stunde{0,1}n{0,1}')
rtypes.append(['ndonor'])
rpatterns.append(r'(\S+) donornes ont fait un don en \S+? mois{0,1}')
rtypes.append(['ndonor'])
rpatterns.append(r'(\S+) donorne a fait un don en \S+? mois{0,1}')
rtypes.append(['ndonor'])
rpatterns.append(r'(\S+) donornes ont fait un don en \S+? jours{0,1}')
rtypes.append(['ndonor'])
rpatterns.append(r'(\S+) donorne a fait un don en \S+? jours{0,1}')
rtypes.append(['ndonor'])
rpatterns.append(r'recaudados de (\S+) donoras en \S+? mese{0,1}s{0,1}')
rtypes.append(['ndonor'])
rpatterns.append(r'recaudados de (\S+) donoras en \S+? días{0,1}')
rtypes.append(['ndonor'])
rpatterns.append(r'recolectados de (\S+) donoras{0,1} en \S+? días{0,1}')
rtypes.append(['ndonor'])
| |
OOOOOO
0x00, 0x42, 0x18, # O O OO
0x00, 0x4C, 0x18, # O OO OO
0x00, 0xFE, 0x08, # OOOOOOO O
0x83, 0xFE, 0x08, # O OOOOOOOOO O
0x83, 0x22, 0x08, # O OO O O O
0x80, 0x12, 0x08, # O O O O
0x40, 0x12, 0x18, # O O O OO
0x40, 0x1A, 0x30, # O OO O OO
0x60, 0x1B, 0xF0, # OO OO OOOOOO
0x30, 0x1B, 0xE0, # OO OO OOOOO
0x1C, 0x39, 0xC0, # OOO OOO OOO
0x1F, 0xF8, 0x00, # OOOOOOOOOO
0x0F, 0xF0, 0x00, # OOOOOOOO
0x03, 0xE0, 0x00, # OOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @6080 ']' (23 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x38, 0x00, # OOO
0x00, 0x38, 0x00, # OOO
0x00, 0x7C, 0x00, # OOOOO
0x1C, 0xFE, 0x70, # OOO OOOOOOO OOO
0x1F, 0xFF, 0xF0, # OOOOOOOOOOOOOOOOO
0x1F, 0xBB, 0xF0, # OOOOOO OOO OOOOOO
0x0F, 0x39, 0xE0, # OOOO OOO OOOO
0x1F, 0x39, 0xF0, # OOOOO OOO OOOOO
0x38, 0xFE, 0x38, # OOO OOOOOOO OOO
0xFF, 0xC7, 0xFE, # OOOOOOOOOO OOOOOOOOOO
0xFF, 0x83, 0xFE, # OOOOOOOOO OOOOOOOOO
0xFF, 0xC7, 0xFE, # OOOOOOOOOO OOOOOOOOOO
0x38, 0xFE, 0x38, # OOO OOOOOOO OOO
0x1F, 0x39, 0xF0, # OOOOO OOO OOOOO
0x0F, 0x39, 0xE0, # OOOO OOO OOOO
0x1F, 0xBB, 0xF0, # OOOOOO OOO OOOOOO
0x1F, 0xFF, 0xF0, # OOOOOOOOOOOOOOOOO
0x1C, 0xFE, 0x70, # OOO OOOOOOO OOO
0x00, 0x7C, 0x00, # OOOOO
0x00, 0x38, 0x00, # OOO
0x00, 0x38, 0x00, # OOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @6176 '^' (29 pixels wide)
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x1E, 0x00, 0x03, 0xC0, # OOOO OOOO
0x3F, 0x80, 0x0F, 0xE0, # OOOOOOO OOOOOOO
0x7F, 0xC0, 0x1C, 0x70, # OOOOOOOOO OOO OOO
0xF1, 0xE0, 0x38, 0x78, # OOOO OOOO OOO OOOO
0xE0, 0xE0, 0x30, 0x38, # OOO OOO OO OOO
0xE0, 0x70, 0x70, 0x38, # OOO OOO OOO OOO
0xE0, 0x70, 0x60, 0x38, # OOO OOO OO OOO
0xE0, 0x30, 0x60, 0x38, # OOO OO OO OOO
0x60, 0x38, 0xE0, 0x30, # OO OOO OOO OO
0x70, 0x18, 0xC0, 0x70, # OOO OO OO OOO
0x30, 0x18, 0xC0, 0x60, # OO OO OO OO
0x00, 0x1D, 0xC0, 0x00, # OOO OOO
0x00, 0x1D, 0x80, 0x00, # OOO OO
0x00, 0x0D, 0x80, 0x00, # OO OO
0x00, 0x0D, 0x80, 0x00, # OO OO
0x00, 0x0D, 0x80, 0x00, # OO OO
0x00, 0x0D, 0x80, 0x00, # OO OO
0x00, 0x0F, 0x80, 0x00, # OOOOO
0x00, 0x0F, 0x00, 0x00, # OOOO
0x00, 0x07, 0x00, 0x00, # OOO
0x00, 0x07, 0x00, 0x00, # OOO
0x00, 0x07, 0x00, 0x00, # OOO
0x00, 0x07, 0x00, 0x00, # OOO
0x00, 0x07, 0x00, 0x00, # OOO
0x00, 0x07, 0x00, 0x00, # OOO
0x00, 0x07, 0x00, 0x00, # OOO
0x00, 0x07, 0x00, 0x00, # OOO
# @6304 '_' (26 pixels wide)
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0xE0, 0x00, 0x01, 0xC0, # OOO OOO
0x30, 0x00, 0x03, 0x00, # OO OO
0x18, 0x00, 0x06, 0x00, # OO OO
0x0C, 0x00, 0x0C, 0x00, # OO OO
0x0E, 0x00, 0x1C, 0x00, # OOO OOO
0x06, 0x00, 0x18, 0x00, # OO OO
0x07, 0x00, 0x38, 0x00, # OOO OOO
0x03, 0x80, 0x70, 0x00, # OOO OOO
0x01, 0xC0, 0xE0, 0x00, # OOO OOO
0x00, 0x7F, 0x80, 0x00, # OOOOOOOO
0x00, 0xFF, 0xC0, 0x00, # OOOOOOOOOO
0x01, 0xC0, 0xE0, 0x00, # OOO OOO
0x03, 0x80, 0x70, 0x00, # OOO OOO
0x07, 0x00, 0x38, 0x00, # OOO OOO
0x06, 0x00, 0x18, 0x00, # OO OO
0x0C, 0x00, 0x0C, 0x00, # OO OO
0x0C, 0x00, 0x0C, 0x00, # OO OO
0x0C, 0x00, 0x0C, 0x00, # OO OO
0x0C, 0x00, 0x0C, 0x00, # OO OO
0x0C, 0x00, 0x0C, 0x00, # OO OO
0x0C, 0x00, 0x0C, 0x00, # OO OO
0x06, 0x00, 0x18, 0x00, # OO OO
0x07, 0x00, 0x38, 0x00, # OOO OOO
0x03, 0x80, 0x70, 0x00, # OOO OOO
0x01, 0xC0, 0xE0, 0x00, # OOO OOO
0x00, 0xFF, 0xC0, 0x00, # OOOOOOOOOO
0x00, 0x3F, 0x00, 0x00, # OOOOOO
# @6432 '`' (23 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0xE0, 0x00, 0x0E, # OOO OOO
0xFF, 0xFF, 0xFE, # OOOOOOOOOOOOOOOOOOOOOOO
0x1F, 0xFF, 0xF0, # OOOOOOOOOOOOOOOOO
0x07, 0x03, 0x80, # OOO OOO
0x07, 0x03, 0x80, # OOO OOO
0x07, 0x03, 0x80, # OOO OOO
0x07, 0x03, 0x80, # OOO OOO
0x07, 0x03, 0x80, # OOO OOO
0x07, 0x03, 0x80, # OOO OOO
0x07, 0x03, 0x80, # OOO OOO
0x07, 0x03, 0x80, # OOO OOO
0x07, 0x03, 0x80, # OOO OOO
0x07, 0x03, 0x80, # OOO OOO
0x07, 0x03, 0x80, # OOO OOO
0x07, 0x03, 0x80, # OOO OOO
0x07, 0x03, 0x80, # OOO OOO
0x07, 0x03, 0x80, # OOO OOO
0x07, 0x03, 0x80, # OOO OOO
0x07, 0x03, 0x80, # OOO OOO
0x07, 0x03, 0x80, # OOO OOO
0x1F, 0xFF, 0xE0, # OOOOOOOOOOOOOOOO
0xFF, 0xFF, 0xFE, # OOOOOOOOOOOOOOOOOOOOOOO
0xE0, 0x00, 0x0E, # OOO OOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @6528 'a' (27 pixels wide)
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x7F, 0xE0, 0x00, # OOOOOOOOOO
0x03, 0xFF, 0xFE, 0x00, # OOOOOOOOOOOOOOOOO
0x0F, 0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOOOOOO
0x3F, 0x80, 0x1F, 0xE0, # OOOOOOO OOOOOOOO
0x7F, 0x00, 0x01, 0xE0, # OOOOOOO OOOO
0x61, 0x80, 0x00, 0x20, # OO OO O
0xC0, 0xC0, 0x00, 0x00, # OO OO
0x80, 0x40, 0x00, 0x00, # O O
0x80, 0x40, 0x0F, 0x00, # O O OOOO
0x80, 0x40, 0x30, 0xC0, # O O OO OO
0x40, 0x80, 0x20, 0x40, # O O O O
0x61, 0x80, 0x40, 0x20, # OO OO O O
0x1E, 0x00, 0x40, 0x20, # OOOO O O
0x00, 0x00, 0x40, 0x20, # O O
0x00, 0x00, 0x60, 0x60, # OO OO
0x80, 0x00, 0x30, 0xC0, # O OO OO
0xF0, 0x00, 0x1F, 0xC0, # OOOO OOOOOOO
0xFF, 0x00, 0x3F, 0x80, # OOOOOOOO OOOOOOO
0x3F, 0xFF, 0xFE, 0x00, # OOOOOOOOOOOOOOOOOOOOO
0x0F, 0xFF, 0xF8, 0x00, # OOOOOOOOOOOOOOOOO
0x00, 0xFF, 0xC0, 0x00, # OOOOOOOOOO
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
# | |
-> Any:
ImageResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
odata_type: str,
key_frame_interval: Optional[str] = None,
label: Optional[str] = None,
range: Optional[str] = None,
start: Optional[str] = None,
step: Optional[str] = None,
stretch_mode: Optional[str] = None):
"""
Describes the basic properties for generating thumbnails from the input video
:param str odata_type: The discriminator for derived types.
Expected value is '#Microsoft.Media.Image'.
:param str key_frame_interval: The distance between two key frames, thereby defining a group of pictures (GOP). The value should be a non-zero integer in the range [1, 30] seconds, specified in ISO 8601 format. The default is 2 seconds (PT2S).
:param str label: An optional label for the codec. The label can be used to control muxing behavior.
:param str range: The position in the input video at which to stop generating thumbnails. The value can be in absolute timestamp (ISO 8601, e.g: PT5M30S to stop at 5 minutes and 30 seconds), or a frame count (For example, 300 to stop at the 300th frame), or a relative value (For example, 100%).
:param str start: The position in the input video from where to start generating thumbnails. The value can be in absolute timestamp (ISO 8601, e.g: PT05S), or a frame count (For example, 10 for the 10th frame), or a relative value (For example, 1%). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video.
:param str step: The intervals at which thumbnails are generated. The value can be in absolute timestamp (ISO 8601, e.g: PT05S for one image every 5 seconds), or a frame count (For example, 30 for every 30 frames), or a relative value (For example, 1%).
:param str stretch_mode: The resizing mode - how the input video will be resized to fit the desired output resolution(s). Default is AutoSize
"""
pulumi.set(__self__, "odata_type", '#Microsoft.Media.Image')
if key_frame_interval is not None:
pulumi.set(__self__, "key_frame_interval", key_frame_interval)
if label is not None:
pulumi.set(__self__, "label", label)
if range is not None:
pulumi.set(__self__, "range", range)
if start is not None:
pulumi.set(__self__, "start", start)
if step is not None:
pulumi.set(__self__, "step", step)
if stretch_mode is not None:
pulumi.set(__self__, "stretch_mode", stretch_mode)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> str:
"""
The discriminator for derived types.
Expected value is '#Microsoft.Media.Image'.
"""
return pulumi.get(self, "odata_type")
@property
@pulumi.getter(name="keyFrameInterval")
def key_frame_interval(self) -> Optional[str]:
"""
The distance between two key frames, thereby defining a group of pictures (GOP). The value should be a non-zero integer in the range [1, 30] seconds, specified in ISO 8601 format. The default is 2 seconds (PT2S).
"""
return pulumi.get(self, "key_frame_interval")
@property
@pulumi.getter
def label(self) -> Optional[str]:
"""
An optional label for the codec. The label can be used to control muxing behavior.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def range(self) -> Optional[str]:
"""
The position in the input video at which to stop generating thumbnails. The value can be in absolute timestamp (ISO 8601, e.g: PT5M30S to stop at 5 minutes and 30 seconds), or a frame count (For example, 300 to stop at the 300th frame), or a relative value (For example, 100%).
"""
return pulumi.get(self, "range")
@property
@pulumi.getter
def start(self) -> Optional[str]:
"""
The position in the input video from where to start generating thumbnails. The value can be in absolute timestamp (ISO 8601, e.g: PT05S), or a frame count (For example, 10 for the 10th frame), or a relative value (For example, 1%). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video.
"""
return pulumi.get(self, "start")
@property
@pulumi.getter
def step(self) -> Optional[str]:
"""
The intervals at which thumbnails are generated. The value can be in absolute timestamp (ISO 8601, e.g: PT05S for one image every 5 seconds), or a frame count (For example, 30 for every 30 frames), or a relative value (For example, 1%).
"""
return pulumi.get(self, "step")
@property
@pulumi.getter(name="stretchMode")
def stretch_mode(self) -> Optional[str]:
"""
The resizing mode - how the input video will be resized to fit the desired output resolution(s). Default is AutoSize
"""
return pulumi.get(self, "stretch_mode")
@pulumi.output_type
class JobErrorDetailResponse(dict):
"""
Details of JobOutput errors.
"""
def __init__(__self__, *,
code: str,
message: str):
"""
Details of JobOutput errors.
:param str code: Code describing the error detail.
:param str message: A human-readable representation of the error.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "message", message)
@property
@pulumi.getter
def code(self) -> str:
"""
Code describing the error detail.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def message(self) -> str:
"""
A human-readable representation of the error.
"""
return pulumi.get(self, "message")
@pulumi.output_type
class JobErrorResponse(dict):
"""
Details of JobOutput errors.
"""
def __init__(__self__, *,
category: str,
code: str,
details: Sequence['outputs.JobErrorDetailResponse'],
message: str,
retry: str):
"""
Details of JobOutput errors.
:param str category: Helps with categorization of errors.
:param str code: Error code describing the error.
:param Sequence['JobErrorDetailResponse'] details: An array of details about specific errors that led to this reported error.
:param str message: A human-readable language-dependent representation of the error.
:param str retry: Indicates that it may be possible to retry the Job. If retry is unsuccessful, please contact Azure support via Azure Portal.
"""
pulumi.set(__self__, "category", category)
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "details", details)
pulumi.set(__self__, "message", message)
pulumi.set(__self__, "retry", retry)
@property
@pulumi.getter
def category(self) -> str:
"""
Helps with categorization of errors.
"""
return pulumi.get(self, "category")
@property
@pulumi.getter
def code(self) -> str:
"""
Error code describing the error.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def details(self) -> Sequence['outputs.JobErrorDetailResponse']:
"""
An array of details about specific errors that led to this reported error.
"""
return pulumi.get(self, "details")
@property
@pulumi.getter
def message(self) -> str:
"""
A human-readable language-dependent representation of the error.
"""
return pulumi.get(self, "message")
@property
@pulumi.getter
def retry(self) -> str:
"""
Indicates that it may be possible to retry the Job. If retry is unsuccessful, please contact Azure support via Azure Portal.
"""
return pulumi.get(self, "retry")
@pulumi.output_type
class JobInputAssetResponse(dict):
"""
Represents an Asset for input into a Job.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "assetName":
suggest = "asset_name"
elif key == "odataType":
suggest = "odata_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in JobInputAssetResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
JobInputAssetResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
JobInputAssetResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
asset_name: str,
odata_type: str,
files: Optional[Sequence[str]] = None,
label: Optional[str] = None):
"""
Represents an Asset for input into a Job.
:param str asset_name: The name of the input Asset.
:param str odata_type: The discriminator for derived types.
Expected value is '#Microsoft.Media.JobInputAsset'.
:param Sequence[str] files: List of files. Required for JobInputHttp.
:param str label: A label that is assigned to a JobInput, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'.
"""
pulumi.set(__self__, "asset_name", asset_name)
pulumi.set(__self__, "odata_type", '#Microsoft.Media.JobInputAsset')
if files is not None:
pulumi.set(__self__, "files", files)
if label is not None:
pulumi.set(__self__, "label", label)
@property
@pulumi.getter(name="assetName")
def asset_name(self) -> str:
"""
The name of the input Asset.
"""
return pulumi.get(self, "asset_name")
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> str:
"""
The discriminator for derived types.
Expected value is '#Microsoft.Media.JobInputAsset'.
"""
return pulumi.get(self, "odata_type")
@property
@pulumi.getter
def files(self) -> Optional[Sequence[str]]:
"""
List of files. Required for JobInputHttp.
"""
return pulumi.get(self, "files")
@property
@pulumi.getter
def label(self) -> Optional[str]:
"""
A label that is assigned to a JobInput, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label | |
call a lambda function that returns a constant value.
May throw; this code is very hacky.
"""
info = InvokeDynamicInfo(ins, cf)
# We only want to deal with lambdas in the same class
assert info.ref_kind == REF_invokeStatic
assert info.method_class == cf.this.name
lambda_method = cf.methods.find_one(
name=info.method_name, args=info.method_desc.args_descriptor,
returns=info.method_desc.returns_descriptor)
assert lambda_method != None
class Callback(WalkerCallback):
def on_new(self, ins, const):
raise Exception("Illegal new")
def on_invoke(self, ins, const, obj, args):
raise Exception("Illegal invoke")
def on_get_field(self, ins, const, obj):
raise Exception("Illegal getfield")
def on_put_field(self, ins, const, obj, value):
raise Exception("Illegal putfield")
# Set verbose to false because we don't want lots of output if this errors
# (since it is expected to for more complex methods)
return walk_method(cf, lambda_method, Callback(), False, args)
class WalkerCallback(ABC):
"""
Interface for use with walk_method.
Any of the methods may raise StopIteration to signal the end of checking
instructions.
"""
@abstractmethod
def on_new(self, ins, const):
"""
Called for a `new` instruction.
ins: The instruction
const: The constant, a ConstantClass
return value: what to put on the stack
"""
pass
@abstractmethod
def on_invoke(self, ins, const, obj, args):
"""
Called when a method is invoked.
ins: The instruction
const: The constant, either a MethodReference or InterfaceMethodRef
obj: The object being invoked on (or null for a static method)
args: The arguments to the method, popped from the stack
return value: what to put on the stack (for a non-void method)
"""
pass
@abstractmethod
def on_get_field(self, ins, const, obj):
"""
Called for a getfield or getstatic instruction.
ins: The instruction
const: The constant, a FieldReference
obj: The object to get from, or None for a static field
return value: what to put on the stack
"""
pass
@abstractmethod
def on_put_field(self, ins, const, obj, value):
"""
Called for a putfield or putstatic instruction.
ins: The instruction
const: The constant, a FieldReference
obj: The object to store into, or None for a static field
value: The value to assign
"""
pass
def on_invokedynamic(self, ins, const, args):
"""
Called for an invokedynamic instruction.
ins: The instruction
const: The constant, a InvokeDynamic
args: Arguments closed by the created object
return value: what to put on the stack
"""
raise Exception("Unexpected invokedynamic: %s" % str(ins))
def walk_method(cf, method, callback, verbose, input_args=None):
"""
Walks through a method, evaluating instructions and using the callback
for side-effects.
The method is assumed to not have any conditionals, and to only return
at the very end.
"""
assert isinstance(callback, WalkerCallback)
stack = []
locals = {}
cur_index = 0
if not method.access_flags.acc_static:
# TODO: allow specifying this
locals[cur_index] = object()
cur_index += 1
if input_args != None:
assert len(input_args) == len(method.args)
for arg in input_args:
locals[cur_index] = arg
cur_index += 1
else:
for arg in method.args:
locals[cur_index] = object()
cur_index += 1
ins_list = list(method.code.disassemble())
for ins in ins_list[:-1]:
if ins in ("bipush", "sipush"):
stack.append(ins.operands[0].value)
elif ins.mnemonic.startswith("fconst") or ins.mnemonic.startswith(
"dconst"):
stack.append(float(ins.mnemonic[-1]))
elif ins == "aconst_null":
stack.append(None)
elif ins in ("ldc", "ldc_w", "ldc2_w"):
const = ins.operands[0]
if isinstance(const, ConstantClass):
stack.append("%s.class" % const.name.value)
elif isinstance(const, String):
stack.append(const.string.value)
else:
stack.append(const.value)
elif ins == "new":
const = ins.operands[0]
try:
stack.append(callback.on_new(ins, const))
except StopIteration:
break
elif ins in ("getfield", "getstatic"):
const = ins.operands[0]
if ins.mnemonic != "getstatic":
obj = stack.pop()
else:
obj = None
try:
stack.append(callback.on_get_field(ins, const, obj))
except StopIteration:
break
elif ins in ("putfield", "putstatic"):
const = ins.operands[0]
value = stack.pop()
if ins.mnemonic != "putstatic":
obj = stack.pop()
else:
obj = None
try:
callback.on_put_field(ins, const, obj, value)
except StopIteration:
break
elif ins in ("invokevirtual", "invokespecial", "invokeinterface",
"invokestatic"):
const = ins.operands[0]
method_desc = const.name_and_type.descriptor.value
desc = method_descriptor(method_desc)
num_args = len(desc.args)
args = []
for i in six.moves.range(num_args):
args.insert(0, stack.pop())
if ins.mnemonic != "invokestatic":
obj = stack.pop()
else:
obj = None
try:
ret = callback.on_invoke(ins, const, obj, args)
except StopIteration:
break
if desc.returns.name != "void":
stack.append(ret)
elif ins in ("astore", "istore", "lstore", "fstore", "dstore"):
locals[ins.operands[0].value] = stack.pop()
elif ins in ("aload", "iload", "lload", "fload", "dload"):
stack.append(locals[ins.operands[0].value])
elif ins == "dup":
stack.append(stack[-1])
elif ins == "pop":
stack.pop()
elif ins == "anewarray":
stack.append([None] * stack.pop())
elif ins == "newarray":
stack.append([0] * stack.pop())
elif ins in ("aastore", "bastore", "castore", "sastore", "iastore",
"lastore", "fastore", "dastore"):
value = stack.pop()
index = stack.pop()
array = stack.pop()
if isinstance(array, list) and isinstance(index, int):
array[index] = value
elif verbose:
print("Failed to execute %s: array %s index %s value %s" %
(ins, array, index, value))
elif ins in ("aaload", "baload", "caload", "saload", "iaload", "laload",
"faload", "daload"):
index = stack.pop()
array = stack.pop()
if isinstance(array, list) and isinstance(index, int):
stack.push(array[index])
elif verbose:
print("Failed to execute %s: array %s index %s" % (ins, array, index))
elif ins == "invokedynamic":
const = ins.operands[0]
method_desc = const.name_and_type.descriptor.value
desc = method_descriptor(method_desc)
num_args = len(desc.args)
args = []
for i in six.moves.range(num_args):
args.insert(0, stack.pop())
stack.append(callback.on_invokedynamic(ins, ins.operands[0], args))
elif ins == "checkcast":
pass
elif verbose:
print("Unknown instruction %s: stack is %s" % (ins, stack))
last_ins = ins_list[-1]
if last_ins.mnemonic in ("ireturn", "lreturn", "freturn", "dreturn",
"areturn"):
# Non-void method returning
return stack.pop()
elif last_ins.mnemonic == "return":
# Void method returning
pass
elif verbose:
print("Unexpected final instruction %s: stack is %s" % (ins, stack))
def get_enum_constants(cf, verbose):
# Gets enum constants declared in the given class.
# Consider the following code:
"""
public enum TestEnum {
FOO(900),
BAR(42) {
@Override
public String toString() {
return "bar";
}
},
BAZ(Integer.getInteger("SomeSystemProperty"));
public static final TestEnum RECOMMENDED_VALUE = BAR;
private TestEnum(int i) {}
}
"""
# which compiles to:
"""
public final class TestEnum extends java.lang.Enum<TestEnum>
minor version: 0
major version: 52
flags: ACC_PUBLIC, ACC_FINAL, ACC_SUPER, ACC_ENUM
{
public static final TestEnum FOO;
descriptor: LTestEnum;
flags: ACC_PUBLIC, ACC_STATIC, ACC_FINAL, ACC_ENUM
public static final TestEnum BAR;
descriptor: LTestEnum;
flags: ACC_PUBLIC, ACC_STATIC, ACC_FINAL, ACC_ENUM
public static final TestEnum BAZ;
descriptor: LTestEnum;
flags: ACC_PUBLIC, ACC_STATIC, ACC_FINAL, ACC_ENUM
public static final TestEnum RECOMMENDED_VALUE;
descriptor: LTestEnum;
flags: ACC_PUBLIC, ACC_STATIC, ACC_FINAL
private static final TestEnum[] $VALUES;
descriptor: [LTestEnum;
flags: ACC_PRIVATE, ACC_STATIC, ACC_FINAL, ACC_SYNTHETIC
public static TestEnum[] values();
// ...
public static TestEnum valueOf(java.lang.String);
// ...
private TestEnum(int);
// ...
static {};
descriptor: ()V
flags: ACC_STATIC
Code:
stack=5, locals=0, args_size=0
// Initializing enum constants:
0: new #5 // class TestEnum
3: dup
4: ldc #8 // String FOO
6: iconst_0
7: sipush 900
10: invokespecial #1 // Method "<init>":(Ljava/lang/String;II)V
13: putstatic #9 // Field FOO:LTestEnum;
16: new #10 // class TestEnum$1
19: dup
20: ldc #11 // String BAR
22: iconst_1
23: bipush 42
25: invokespecial #12 // Method TestEnum$1."<init>":(Ljava/lang/String;II)V
28: putstatic #13 // Field BAR:LTestEnum;
31: new #5 // class TestEnum
34: dup
35: ldc #14 // String BAZ
37: iconst_2
38: ldc #15 // String SomeSystemProperty
40: invokestatic #16 // Method java/lang/Integer.getInteger:(Ljava/lang/String;)Ljava/lang/Integer;
43: invokevirtual #17 // Method java/lang/Integer.intValue:()I
46: invokespecial #1 // Method "<init>":(Ljava/lang/String;II)V
49: putstatic #18 // Field BAZ:LTestEnum;
// Setting up $VALUES
52: iconst_3
53: anewarray #5 // class TestEnum
56: dup
57: iconst_0
58: getstatic #9 // Field FOO:LTestEnum;
61: aastore
62: dup
63: iconst_1
64: getstatic #13 // Field BAR:LTestEnum;
67: aastore
68: dup
69: iconst_2
70: getstatic #18 // Field BAZ:LTestEnum;
73: aastore
74: putstatic #2 // Field $VALUES:[LTestEnum;
// Other user-specified stuff
77: getstatic #13 // Field BAR:LTestEnum;
80: putstatic #19 // Field RECOMMENDED_VALUE:LTestEnum;
83: return
}
"""
# We only care about the enum constants, not other random user stuff
# (such as RECOMMENDED_VALUE) or the $VALUES thing. Fortunately,
# ACC_ENUM helps us with this. It's worth noting that although MC's
# obfuscater gets rid of the field names, it does not get rid of the
# string constant for enum names (which is used by valueOf()), nor
# does it touch ACC_ENUM.
# For this method, we don't care about parameters other than the name.
if not cf.access_flags.acc_enum:
raise Exception(cf.this.name.value + " is not an enum!")
enum_fields = list(
cf.fields.find(f=lambda field: field.access_flags.acc_enum))
enum_class = None
enum_name = None
result = {}
for ins in cf.methods.find_one(name="<clinit>").code.disassemble():
if ins == "new" and | |
"""
Combat Manager. This is where the magic happens. And by magic,
we mean characters dying, most likely due to vile sorcery.
The Combat Manager is invoked by a character starting combat
with the +fight command. Anyone set up as a defender of either
of those two characters is pulled into combat automatically.
Otherwise, players can enter into combat that is in progress
with the appropriate defend command, or by a +fight command
to attack one of the belligerent parties.
Turn based combat has the obvious drawback that someone who
is AFK or deliberately not taking their turn completely halts
the action. There isn't an easy solution to this. GMs will
have tools to skip someone's turn or remove them from combat,
and a majority vote by all parties can cause a turn to proceed
even when someone has not taken their turn.
Phase 1 is the setup phase. This phase is designed to have a
pause before deciding actions so that other people can join
combat. Characters who join in later phases will not receive
a combat turn, and will be added to the fight in the following
turn. Phase 1 is also when players can vote to end the combat.
Every player MUST enter a command to continue for combat to
proceed. There will never be a case where a character can be
AFK and in combat. It is possible to vote a character out of
combat due to AFK in order for things to proceed. Immediately
after every current combatant selects to continue, the participants
are locked in and we go to phase 2.
Phase 2 is the action phase. Initiative is rolled, and then
each player must take an action when it is their turn. 'pass'
is a valid action. Each combat action is resolved during the
character's turn. Characters who are incapacitated lose their
action. Characters who join combat during Phase 2 must wait
for the following turn to be allowed a legal action.
"""
import time
from operator import attrgetter
from evennia.utils.utils import fill, dedent
from server.utils.prettytable import PrettyTable
from server.utils.arx_utils import list_to_string
from typeclasses.scripts.combat import combat_settings
from typeclasses.scripts.combat.state_handler import CombatantStateHandler
from typeclasses.scripts.scripts import Script as BaseScript
COMBAT_INTRO = combat_settings.COMBAT_INTRO
PHASE1_INTRO = combat_settings.PHASE1_INTRO
PHASE2_INTRO = combat_settings.PHASE2_INTRO
MAX_AFK = combat_settings.MAX_AFK
ROUND_DELAY = combat_settings.ROUND_DELAY
class CombatManager(BaseScript):
"""
Players are added via add_combatant or add_observer. These are invoked
by commands in normal commandsets. Characters added receive the combat
commandset, which give commands that invoke the other methods.
Turns proceed based on every combatant submitting an action, which is a
dictionary of combatant IDs to their actions. Dead characters are moved
to observer status, incapacitated characters are moved to a special
list to denote that they're still in combat but can take no action.
Attribute references to the combat manager script are stored in the room
location under room.ndb.combat_manager, and inside each character in the
combat under character.ndb.combat_manager.
Note that all the data for the combat manager is stored inside non-database
attributes, since it is designed to be non-persistent. If there's a server
reset, combat will end.
Non-database attributes:
self.ndb.combatants - list of everyone active in the fight. If it's empty, combat ends
self.ndb.observers - People passively watching the fight
self.ndb.incapacitated - People who are too injured to act, but still can be attacked
self.ndb.fighter_data - CharacterCombatData for each combatant. dict with character.id as keys
self.ndb.combat_location - room where script happens
self.ndb.initiative_list - CharacterCombatData for each fighter. incapacitated chars aren't in it
self.ndb.active_character - Current turn of player in phase 2. Not used in phase 1
self.ndb.phase - Phase 1 or 2. 1 is setup, 2 is resolution
self.ndb.afk_check - anyone we're checking to see if they're afk
self.ndb.votes_to_end - anyone voting to end combat
self.ndb.flee_success - Those who can run this turn
self.ndb.fleeing - Those intending to try to run
Admin Methods:
self.msg() - Message to all combatants/observers.
self.end_combat() - shut down the fight
self.next_character_turn() - move to next character in initiative list in phase 2
self.add_observer(character)
self.add_combatant(character)
self.remove_combatant(character)
self.move_to_observer(character)
"""
# noinspection PyAttributeOutsideInit
def at_script_creation(self):
"""
Setup the script
"""
self.key = "CombatManager"
self.desc = "Manages the combat state for a group of combatants"
# Not persistent because if someone goes LD, we don't want them reconnecting
# in combat a week later with no way to leave it. Intentionally quitting out
# to avoid combat will just need to be corrected with rules enforcement.
self.persistent = False
self.interval = ROUND_DELAY
self.start_delay = True
self.ndb.combatants = [] # those actively involved in fight
self.ndb.observers = [] # sent combat data, but cannot act
self.ndb.combat_location = self.obj # room of the fight
self.ndb.initiative_list = [] # CharacterCombatData of characters in order of initiative
self.ndb.active_character = None # who is currently acting during phase 2
self.ndb.phase = 1
self.ndb.afk_check = [] # characters who are flagged afk until they take an action
self.ndb.votes_to_end = [] # if all characters vote yes, combat ends
self.ndb.flee_success = [] # if we're here, the character is allowed to flee on their turn
self.ndb.fleeing = [] # if we're here, they're attempting to flee but haven't rolled yet
self.ndb.ready = [] # those ready for phase 2
self.ndb.not_ready = [] # not ready for phase 2
self.ndb.surrender_list = [] # peoeple trying to surrender
self.ndb.affect_real_dmg = not self.obj.tags.get("nonlethal_combat")
self.ndb.random_deaths = not self.obj.tags.get("no_random_deaths")
self.ndb.max_rounds = 250
self.ndb.rounds = 0
# to ensure proper shutdown, prevent some timing errors
self.ndb.shutting_down = False
self.ndb.status_table = None
self.ndb.initializing = True
if self.obj.event:
self.ndb.risk = self.obj.event.risk
else:
self.ndb.risk = 4
self.ndb.special_actions = []
self.ndb.gm_afk_counter = 0
@property
def status_table(self):
"""text table of the combat"""
if not self.ndb.status_table:
self.build_status_table()
return self.ndb.status_table
def at_repeat(self):
"""Called at the script timer interval"""
if self.check_if_combat_should_end():
return
# reset the script timers
if self.ndb.shutting_down:
return
# proceed to combat
if self.ndb.phase == 1:
self.ready_check()
self.msg("Use {w+cs{n to see the current combat status.")
self.remove_surrendering_characters()
def is_valid(self):
"""
Check if still has combatants. Incapacitated characters are still
combatants, just with very limited options - they can either pass
turn or vote to end the fight. The fight ends when all combatants
either pass they turn or choose to end. Players can be forced out
of active combat if they are AFK, moved to observer status.
"""
if self.ndb.shutting_down:
return False
if self.ndb.combatants:
return True
if self.ndb.initializing:
return True
return False
# ----Methods for passing messages to characters-------------
@staticmethod
def send_intro_message(character, combatant=True):
"""
Displays intro message of combat to character
"""
if not combatant:
msg = fill("{mYou are now in observer mode for a fight. {n" +
"Most combat commands will not function. To " +
"join the fight, use the {w+fight{n command.")
else:
msg = "{rEntering combat mode.{n\n"
msg += "\n\n" + fill(COMBAT_INTRO)
character.msg(msg)
return
def display_phase_status(self, character, disp_intro=True):
"""
Gives message based on the current combat phase to character.cmdset
In phase 1, just list combatants and observers, anyone marked AFK,
dead, whatever, and any votes to end.
In phase 2, list initiative order and who has the current action.
"""
if self.ndb.shutting_down:
return
msg = ""
if self.ndb.phase == 1:
if disp_intro:
msg += PHASE1_INTRO + "\n"
msg += str(self.status_table) + "\n"
vote_str = self.vote_string
if vote_str:
msg += vote_str + "\n"
elif self.ndb.phase == 2:
if disp_intro:
msg += PHASE2_INTRO + "\n"
msg += str(self.status_table) + "\n"
msg += self.get_initiative_list() + "\n"
msg += "{wCurrent Round:{n %d" % self.ndb.rounds
character.msg(msg)
def build_status_table(self):
"""Builds a table of the status of combatants"""
combatants = sorted(self.ndb.combatants)
table = PrettyTable(["{wCombatant{n", "{wDamage{n", "{wFatigue{n", "{wAction{n", "{wReady?{n"])
for state in combatants:
name = state.combat_handler.name
dmg = state.character.get_wound_descriptor(state.character.dmg)
fatigue = str(state.fatigue_penalty)
action = "None" if not state.queued_action else state.queued_action.table_str
rdy = "yes" if state.ready else "{rno{n"
table.add_row([name, dmg, fatigue, action, rdy])
self.ndb.status_table = table
def display_phase_status_to_all(self, intro=False):
"""Sends status to all characters in or watching the fight"""
msglist = set([ob.character for ob in self.ndb.combatants] + self.ndb.observers)
self.build_status_table()
self.ready_check()
for ob in msglist:
self.display_phase_status(ob, disp_intro=intro)
def msg(self, message, exclude=None, options=None):
"""
Sends a message to all objects in combat/observers except for
individuals in the exclude list.
"""
# | |
<reponame>DallasTrinkle/Onsager
"""
Unit tests for supercell class
"""
__author__ = '<NAME>'
import unittest
import itertools, copy
import numpy as np
import onsager.crystal as crystal
import onsager.supercell as supercell
class FCCSuperTests(unittest.TestCase):
"""Tests to make sure we can make a supercell object."""
longMessage = False
def setUp(self):
self.crys = crystal.Crystal.FCC(1., 'Al')
self.one = np.eye(3, dtype=int)
self.groupsupers = (self.one, 2 * self.one, np.array([[-1, 1, 1], [1, -1, 1], [1, 1, -1]]))
def assertOrderingSuperEqual(self, s0, s1, msg=""):
if s0 != s1:
failmsg = msg + '\n'
for line0, line1 in itertools.zip_longest(s0.__str__().splitlines(),
s1.__str__().splitlines(),
fillvalue=' - '):
failmsg += line0 + '\t' + line1 + '\n'
self.fail(msg=failmsg)
def testSuper(self):
"""Can we make a supercell object?"""
sup = supercell.Supercell(self.crys, self.one)
self.assertNotEqual(sup, None)
self.assertEqual(sup.Nchem, self.crys.Nchem)
sup = supercell.Supercell(self.crys, self.one, interstitial=(1,))
self.assertNotEqual(sup, None)
sup = supercell.Supercell(self.crys, self.one, Nsolute=5)
self.assertNotEqual(sup, None)
self.assertEqual(sup.Nchem, self.crys.Nchem + 5)
with self.assertRaises(ZeroDivisionError):
supercell.Supercell(self.crys, np.zeros((3, 3), dtype=int))
def testEqualityCopy(self):
"""Can we copy a supercell, and is it equal to itself?"""
super0 = supercell.Supercell(self.crys, self.one)
super2 = super0.copy()
self.assertOrderingSuperEqual(super0, super2, msg="copy not equal")
def testTrans(self):
"""Can we correctly generates the translations?"""
size, invsup, tlist, tdict = supercell.Supercell.maketrans(self.one)
self.assertEqual(size, 1)
self.assertTrue(np.all(tlist[0] == 0))
size, invsup, tlist, tdict = supercell.Supercell.maketrans(2 * self.one)
self.assertEqual(size, 8)
for tv in tlist:
self.assertTrue(all(tvi == 0 or tvi == 4 for tvi in tv))
sup = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
size, invsup, tlist, tdict = supercell.Supercell.maketrans(sup)
self.assertEqual(size, 2)
for tv in tlist:
self.assertTrue(np.all(tv == 0) or np.all(tv == 1))
# Try making a whole series of supercells; if they fail, will raise an Arithmetic exception:
for n in range(100):
randsuper = np.random.randint(-5, 6, size=(3, 3))
if np.allclose(np.linalg.det(randsuper), 0):
with self.assertRaises(ZeroDivisionError):
supercell.Supercell.maketrans(randsuper)
continue
size, invsup, tlist, tdict = supercell.Supercell.maketrans(randsuper)
self.assertTrue(len(tlist) == size)
def testSites(self):
"""Do we have the correct sites in our supercell?"""
for n in range(100):
randsuper = np.random.randint(-5, 6, size=(3, 3))
if np.allclose(np.linalg.det(randsuper), 0): continue
# for efficiency we don't bother generating group ops,
# and also to avoid warnings about broken symmetry
sup = supercell.Supercell(self.crys, randsuper, NOSYM=True)
Rdictset = {ci: set() for ci in self.crys.atomindices}
for u in sup.pos:
x = np.dot(self.crys.lattice, np.dot(randsuper, u))
R, ci = self.crys.cart2pos(x)
self.assertNotEqual(ci, None)
Rtup = tuple(R)
self.assertNotIn(Rtup, Rdictset[ci])
Rdictset[ci].add(Rtup)
for v in Rdictset.values():
self.assertEqual(len(v), sup.size)
def testGroupOps(self):
"""Do we correctly generate group operations inside the supercell?"""
for nmat in self.groupsupers:
sup = supercell.Supercell(self.crys, nmat)
# print(super)
# for g in super.G: if np.all(g.rot==self.one): print(g)
self.assertEqual(len(sup.G), len(self.crys.G) * sup.size)
invlatt = np.linalg.inv(sup.lattice)
superposcart = [np.dot(sup.lattice, u) for u in sup.pos]
for g in sup.G:
for i, x, u in zip(itertools.count(), superposcart, sup.pos):
gx = np.dot(g.cartrot, x) + np.dot(sup.lattice, g.trans)
gu = crystal.incell(np.dot(invlatt, gx))
gu0 = crystal.incell(np.dot(g.rot, u) + g.trans)
gi = g.indexmap[0][i]
if not np.allclose(gu, gu0):
self.assertTrue(np.allclose(gu, gu0),
msg="{}\nProblem with GroupOp:\n{}\n{} != {}".format(sup, g, gu, gu0))
if not np.allclose(gu, sup.pos[gi]):
self.assertTrue(np.allclose(gu, sup.pos[gi]),
msg="{}\nProblem with GroupOp:\n{}\nIndexing: {} != {}".format(sup, g, gu,
sup.pos[gi]))
# do we successfully raise a Warning about broken symmetry?
with self.assertWarns(RuntimeWarning):
brokensymmsuper = np.array([[3, -5, 2], [-1, 2, 3], [4, -2, 1]])
supercell.Supercell(self.crys, brokensymmsuper)
def testSanity(self):
"""Does __sane__ operate as it should?"""
# we use NOSYM for speed only
sup = supercell.Supercell(self.crys, 3 * self.one, Nsolute=1, NOSYM=True)
self.assertTrue(sup.__sane__(), msg='Empty supercell not sane?')
# do a bunch of random operations, make sure we remain sane:
Ntests = 100
for c, ind in zip(np.random.randint(-1, sup.Nchem, size=Ntests),
np.random.randint(sup.size * sup.N, size=Ntests)):
sup.setocc(ind, c)
if not sup.__sane__():
self.assertTrue(False, msg='Supercell:\n{}\nnot sane?'.format(sup))
# Now! Break sanity (and then repair it)
for c, ind in zip(np.random.randint(-1, sup.Nchem, size=Ntests),
np.random.randint(sup.size * sup.N, size=Ntests)):
c0 = sup.occ[ind]
if c == c0: continue
sup.occ[ind] = c
self.assertFalse(sup.__sane__())
sup.occ[ind] = c0
if not sup.__sane__():
self.assertTrue(False, msg='Supercell:\n{}\nnot sane?'.format(sup))
def testIndex(self):
"""Test that we can use index into our supercell appropriately"""
for n in range(10):
randsuper = np.random.randint(-5, 6, size=(3, 3))
if np.allclose(np.linalg.det(randsuper), 0): continue
# for efficiency we don't bother generating group ops,
# and also to avoid warnings about broken symmetry
sup = supercell.Supercell(self.crys, randsuper, NOSYM=True)
for ind, u in enumerate(sup.pos):
self.assertEqual(ind, sup.index(u))
delta = np.random.uniform(-0.01, 0.01, size=3)
self.assertEqual(ind, sup.index(crystal.incell(u + delta)))
# test out setting by making a copy "by hand"
randcopy = sup.copy() # starts out empty, too.
Ntests = 30
for c, ind in zip(np.random.randint(-1, sup.Nchem, size=Ntests),
np.random.randint(sup.size * sup.N, size=Ntests)):
sup.setocc(ind, c)
for c, poslist in enumerate(sup.occposlist()):
for pos in poslist:
randcopy[pos] = c
self.assertOrderingSuperEqual(sup, randcopy, msg='Indexing fail?')
def testMultiply(self):
"""Can we multiply a supercell by our group operations successfully?"""
sup = supercell.Supercell(self.crys, 3 * self.one, Nsolute=1)
# set up some random occupancy
Ntests = 100
for c, ind in zip(np.random.randint(-1, sup.Nchem, size=Ntests),
np.random.randint(sup.size * sup.N, size=Ntests)):
sup.setocc(ind, c)
g_occ = sup.occ.copy()
for g in sup.G:
gsuper = g * sup
if not gsuper.__sane__():
self.assertTrue(False, msg='GroupOp:\n{}\nbreaks sanity?'.format(g))
# because it's sane, we *only* need to test that occupation is correct
# indexmap[0]: each entry is the index where it "lands"
for n in range(sup.size * sup.N):
g_occ[g.indexmap[0][n]] = sup.occ[n]
self.assertTrue(np.all(g_occ == gsuper.occ))
# rotate a few sites, see if they match up:
for ind in np.random.randint(sup.size * sup.N, size=Ntests // 10):
gu = crystal.incell(np.dot(g.rot, sup.pos[ind]) + g.trans)
self.assertIsInstance(gu, np.ndarray)
self.assertOrderingSuperEqual(gsuper[gu], sup[ind], msg='Group operation fail?')
# quick test of multiplying the other direction, and in-place (which should all call the same code)
self.assertOrderingSuperEqual(gsuper, sup * g, msg='Other rotation fail?')
sup *= g
self.assertOrderingSuperEqual(gsuper, sup, msg='In place rotation fail?')
def testReorder(self):
"""Can we reorder a supercell?"""
sup = supercell.Supercell(self.crys, 3 * self.one, Nsolute=1)
sup.definesolute(sup.Nchem - 1, 's')
# set up some random occupancy
Ntests = 100
for c, ind in zip(np.random.randint(-1, sup.Nchem, size=Ntests),
np.random.randint(sup.size * sup.N, size=Ntests)):
sup.setocc(ind, c)
# Try some simple reorderings: 1. unity permutation; 2. pop+push; 3. reversal
supercopy = sup.copy()
unitymap = [[i for i in range(len(clist))] for clist in sup.chemorder]
supercopy.reorder(unitymap)
self.assertOrderingSuperEqual(sup, supercopy, msg='Reordering fail with unity?')
popmap = []
for c, clist in enumerate(sup.chemorder):
n = len(clist)
popmap.append([(i + 1) % n for i in range(n)])
indpoppush = clist[0]
sup.setocc(indpoppush, -1)
sup.setocc(indpoppush, c) # *now* should be at the *end* of the chemorder list
supercopy.reorder(popmap)
self.assertOrderingSuperEqual(sup, supercopy, msg='Reordering fail with "pop/push"?')
revmap = []
for c, clist in enumerate(sup.chemorder):
n = len(clist)
revmap.append([(n - 1 - i) for i in range(n)])
cl = clist.copy() # need to be careful, since clist gets modified by our popping...
for indpoppush in cl:
sup.setocc(indpoppush, -1)
cl.reverse()
for indpoppush in cl:
sup.setocc(indpoppush, c)
supercopy.reorder(revmap)
self.assertOrderingSuperEqual(sup, supercopy, msg='Reordering fail with reverse?')
# test out a bad mapping:
badmap = [[i % 2 for i in range(len(clist))] for clist in sup.chemorder]
with self.assertRaises(ValueError):
supercopy.reorder(badmap)
self.assertOrderingSuperEqual(sup, supercopy, msg='Reordering is not safe after fail?')
def testEquivalenceMap(self):
"""Can we construct an equivalence map between two supercells?"""
sup = supercell.Supercell(self.crys, 3 * self.one, Nsolute=1)
sup.definesolute(sup.Nchem - 1, 's')
# set up some random occupancy
Ntests = 100
for c, ind in zip(np.random.randint(-1, sup.Nchem, size=Ntests),
np.random.randint(sup.size * sup.N, size=Ntests)):
sup.setocc(ind, c)
supercopy = sup.copy()
# first equivalence test: introduce some random permutations of ordering of supercell
for ind in np.random.randint(sup.size * sup.N, size=Ntests):
c, sup[ind] = sup[ind], -1
sup[ind] = c
g, mapping = supercopy.equivalencemap(sup)
self.assertNotEqual(g, None, msg='Cannot map between permutation?')
supercopy.reorder(mapping)
self.assertOrderingSuperEqual(sup, supercopy, msg='Improper map from random permutation')
# apply all of the group operations, and see how they perform:
for g in sup.G:
gsuper = g * sup
for ind in np.random.randint(sup.size * sup.N, size=Ntests):
c, gsuper[ind] = gsuper[ind], -1
gsuper[ind] = c
g0, mapping = supercopy.equivalencemap(gsuper)
if g != g0:
msg = 'Group operations not equal?\n'
for line0, line1 in itertools.zip_longest(g.__str__().splitlines(),
g0.__str__().splitlines(),
fillvalue=' - '):
msg += line0 + '\t' + line1 + '\n'
self.fail(msg=msg)
self.assertOrderingSuperEqual((g0 * supercopy).reorder(mapping), gsuper,
msg='Group operation + mapping failure?')
# do the testing with occposlist, since that's what this is really for...
rotoccposlist = [[crystal.incell(np.dot(g0.rot, pos) + g0.trans) for pos in poslist]
for poslist in supercopy.occposlist()]
# now, reorder:
reorderoccposlist = copy.deepcopy(rotoccposlist)
for reposlist, poslist, remap in zip(reorderoccposlist, rotoccposlist, mapping):
for i, m in enumerate(remap):
| |
# -*- coding: utf-8 -*-
if __name__ == "__main__":
raise Exception("Test files can't be run directly. Use `python -m pytest greenery`")
from greenery.lego import conc, mult, charclass, one, emptystring, star, plus, nothing, pattern, qm, d, multiplier, bound, w, s, W, D, S, dot, nomatch, inf, zero, parse, from_fsm
from greenery import fsm
# In general the idea for unit tests is that every unit test relies only on
# functionality which has already been unit-tested. If this isn't possible, then
# additional tests are required!
################################################################################
# Equality tests. No point in comparing different lego pieces in tests unless
# this part works
def test_charclass_equality():
assert charclass("a") == charclass("a")
assert ~charclass("a") == ~charclass("a")
assert ~charclass("a") != charclass("a")
assert charclass("ab") == charclass("ba")
def test_mult_equality():
assert mult(charclass("a"), one) == mult(charclass("a"), one)
assert mult(charclass("a"), one) != mult(charclass("b"), one)
assert mult(charclass("a"), one) != mult(charclass("a"), qm)
assert mult(charclass("a"), one) != mult(charclass("a"), multiplier(bound(1), bound(2)))
assert mult(charclass("a"), one) != charclass("a")
def test_conc_equality():
assert conc(mult(charclass("a"), one)) == conc(mult(charclass("a"), one))
assert conc(mult(charclass("a"), one)) != conc(mult(charclass("b"), one))
assert conc(mult(charclass("a"), one)) != conc(mult(charclass("a"), qm))
assert conc(mult(charclass("a"), one)) != conc(mult(charclass("a"), multiplier(bound(1), bound(2))))
assert conc(mult(charclass("a"), one)) != emptystring
def test_pattern_equality():
assert pattern(
conc(mult(charclass("a"), one)),
conc(mult(charclass("b"), one)),
) == pattern(
conc(mult(charclass("b"), one)),
conc(mult(charclass("a"), one)),
)
assert pattern(
conc(mult(charclass("a"), one)),
conc(mult(charclass("a"), one)),
) == pattern(
conc(mult(charclass("a"), one)),
)
################################################################################
# Parsing tests. Absolutely no cleverness is applied at parsing time, we just
# return the exact object which was just parsed. Call reduce() if you wish...
def test_charclass_parsing():
assert charclass.match("a", 0) == (charclass("a"), 1)
assert charclass.parse("a") == charclass("a")
assert charclass.match("aa", 1) == (charclass("a"), 2)
assert charclass.match("a$", 1) == (charclass("$"), 2)
assert charclass.match(".", 0) == (dot, 1)
try:
charclass.match("[", 0)
assert False
except IndexError:
pass
try:
charclass.match("a", 1)
assert False
except nomatch:
pass
def test_charclass_ranges():
# Should accept arbitrary ranges of characters in charclasses. No longer
# limited to alphanumerics. (User beware...)
assert parse("[z{|}~]") == parse("[z-~]")
assert parse("[\w:;<=>?@\\[\\\\\]\\^`]") == parse("[0-z]")
def test_hex_escapes():
# Should be able to parse e.g. "\\x40"
assert parse("\\x00") == parse("\x00")
assert parse("\\x40") == parse("@")
assert parse("[\\x40]") == parse("[@]")
assert parse("[\\x41-\\x5a]") == parse("[A-Z]")
def test_w_d_s():
# Allow "\w", "\d" and "\s" in charclasses
assert charclass.parse("\w") == charclass.parse("[0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz]")
assert charclass.parse("[\w~]") == charclass.parse("[0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz~]")
assert charclass.parse("[\da]") == charclass.parse("[0123456789a]")
assert charclass.parse("[\s]") == charclass.parse("[\t\n\r\f\v ]")
def test_mult_parsing():
assert mult.parse("[a-g]+") == mult(charclass("abcdefg"), plus)
assert mult.parse("[a-g0-8$%]+") == mult(charclass("abcdefg012345678$%"), plus)
assert mult.parse("[a-g0-8$%\\^]+") == mult(charclass("abcdefg012345678$%^"), plus)
assert mult.match("abcde[^fg]*", 5) == (
mult(~charclass("fg"), star),
11
)
assert mult.match("abcde[^fg]*h{5}[a-z]+", 11) == (
mult(charclass("h"), multiplier(bound(5), bound(5))),
15
)
assert mult.match("abcde[^fg]*h{5}[a-z]+T{1,}", 15) == (
mult(charclass("abcdefghijklmnopqrstuvwxyz"), plus),
21
)
assert mult.match("abcde[^fg]*h{5}[a-z]+T{2,}", 21) == (
mult(charclass("T"), multiplier(bound(2), inf)),
26
)
def test_conc_parsing():
assert conc.parse("abcde[^fg]*h{5}[a-z]+") == conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
mult(charclass("d"), one),
mult(charclass("e"), one),
mult(~charclass("fg"), star),
mult(charclass("h"), multiplier(bound(5), bound(5))),
mult(charclass("abcdefghijklmnopqrstuvwxyz"), plus),
)
assert conc.parse("[bc]*[ab]*") == conc(
mult(charclass("bc"), star),
mult(charclass("ab"), star),
)
assert conc.parse("abc...") == conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
mult(dot, one),
mult(dot, one),
mult(dot, one),
)
assert conc.parse("\\d{4}-\\d{2}-\\d{2}") == conc(
mult(charclass("0123456789"), multiplier(bound(4), bound(4))),
mult(charclass("-"), one),
mult(charclass("0123456789"), multiplier(bound(2), bound(2))),
mult(charclass("-"), one),
mult(charclass("0123456789"), multiplier(bound(2), bound(2))),
)
def test_pattern_parsing():
assert pattern.parse("abc|def(ghi|jkl)") == pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
),
conc(
mult(charclass("d"), one),
mult(charclass("e"), one),
mult(charclass("f"), one),
mult(
pattern(
conc(
mult(charclass("g"), one),
mult(charclass("h"), one),
mult(charclass("i"), one),
),
conc(
mult(charclass("j"), one),
mult(charclass("k"), one),
mult(charclass("l"), one),
),
), one
),
)
)
# Accept the "non-capturing group" syntax, "(?: ... )" but give it no
# special significance
assert parse("(?:)") == parse("()")
assert parse("(?:abc|def)") == parse("(abc|def)")
parse("(:abc)") # should give no problems
# Named groups
assert pattern.parse("(?P<ng1>abc)") == parse("(abc)")
################################################################################
# repr() tests
def test_repr():
assert repr(~charclass("a")) == "~charclass('a')"
################################################################################
# Stringification tests
def test_charclass_str():
assert str(w) == "\\w"
assert str(d) == "\\d"
assert str(s) == "\\s"
assert str(charclass("a")) == "a"
assert str(charclass("{")) == "\\{"
assert str(charclass("\t")) == "\\t"
assert str(charclass("ab")) == "[ab]"
assert str(charclass("a{")) == "[a{]"
assert str(charclass("a\t")) == "[\\ta]"
assert str(charclass("a-")) == "[\\-a]"
assert str(charclass("a[")) == "[\\[a]"
assert str(charclass("a]")) == "[\\]a]"
assert str(charclass("ab")) == "[ab]"
assert str(charclass("abc")) == "[abc]"
assert str(charclass("abcd")) == "[a-d]"
assert str(charclass("abcdfghi")) == "[a-df-i]"
assert str(charclass("^")) == "^"
assert str(charclass("\\")) == "\\\\"
assert str(charclass("a^")) == "[\\^a]"
assert str(charclass("0123456789a")) == "[0-9a]"
assert str(charclass("\t\v\r A")) == "[\\t\\v\\r A]"
assert str(charclass("\n\f A")) == "[\\n\\f A]"
assert str(charclass("\t\n\v\f\r A")) == "[\\t-\\r A]"
assert str(charclass("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz|")) == "[0-9A-Z_a-z|]"
assert str(W) == "\\W"
assert str(D) == "\\D"
assert str(S) == "\\S"
assert str(dot) == "."
assert str(~charclass("")) == "."
assert str(~charclass("a")) == "[^a]"
assert str(~charclass("{")) == "[^{]"
assert str(~charclass("\t")) == "[^\\t]"
assert str(~charclass("^")) == "[^\\^]"
# Arbitrary ranges
assert str(parse("[\w:;<=>?@\\[\\\\\]\\^`]")) == "[0-z]"
# TODO: what if \d is a proper subset of `chars`?
# escape sequences are not preserved
assert str(parse("\\x09")) == "\\t"
# Printing ASCII control characters? You should get hex escapes
assert str(parse("\\x00")) == "\\x00"
def test_mult_str():
assert str(bound(2)) == "2"
assert str(inf) == ""
assert str(multiplier(bound(2), inf)) == "{2,}"
a = charclass("a")
assert str(mult(a, one)) == "a"
assert str(mult(a, multiplier(bound(2), bound(2)))) == "a{2}"
assert str(mult(a, multiplier(bound(3), bound(3)))) == "a{3}"
assert str(mult(a, multiplier(bound(4), bound(4)))) == "a{4}"
assert str(mult(a, multiplier(bound(5), bound(5)))) == "a{5}"
assert str(mult(a, qm)) == "a?"
assert str(mult(a, star)) == "a*"
assert str(mult(a, plus)) == "a+"
assert str(mult(a, multiplier(bound(2), bound(5)))) == "a{2,5}"
assert str(mult(a, multiplier(bound(2), inf))) == "a{2,}"
assert str(mult(d, one)) == "\\d"
assert str(mult(d, multiplier(bound(2), bound(2)))) == "\\d{2}"
assert str(mult(d, multiplier(bound(3), bound(3)))) == "\\d{3}"
def test_conc_str():
assert str(conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
mult(charclass("d"), one),
mult(charclass("e"), one),
mult(~charclass("fg"), star),
mult(charclass("h"), multiplier(bound(5), bound(5))),
mult(charclass("abcdefghijklmnopqrstuvwxyz"), plus),
)) == "abcde[^fg]*h{5}[a-z]+"
def test_pattern_str():
assert str(pattern(
conc(mult(charclass("a"), one)),
conc(mult(charclass("b"), one)),
)) == "a|b"
assert str(pattern(
conc(mult(charclass("a"), one)),
conc(mult(charclass("a"), one)),
)) == "a"
assert str(pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
),
conc(
mult(charclass("d"), one),
mult(charclass("e"), one),
mult(charclass("f"), one),
mult(
pattern(
conc(
mult(charclass("g"), one),
mult(charclass("h"), one),
mult(charclass("i"), one),
),
conc(
mult(charclass("j"), one),
mult(charclass("k"), one),
mult(charclass("l"), one),
),
), one
),
),
)) == "abc|def(ghi|jkl)"
def test_parse_str_round_trip():
assert str(parse("a.b")) == "a.b" # not "a[ab]b"
assert str(parse("\\d{4}")) == "\\d{4}"
assert str(parse("a.b()()")) == "a.b()()"
################################################################################
# Test to_fsm() and alphabet-related functionality
def test_alphabet():
# lego.alphabet() should include `fsm.anything_else`
assert parse("").alphabet() == {fsm.anything_else}
def test_charclass_fsm():
# "[^a]"
nota = (~charclass("a")).to_fsm()
assert nota.alphabet == {"a", fsm.anything_else}
assert nota.accepts("b")
assert nota.accepts(["b"])
assert nota.accepts([fsm.anything_else])
def test_pattern_fsm():
# "a[^a]"
anota = pattern.parse("a[^a]").to_fsm()
assert len(anota.states) == 3
assert not anota.accepts("a")
assert not anota.accepts(["a"])
assert not anota.accepts("b")
assert not anota.accepts(["b"])
assert not anota.accepts("aa")
assert not anota.accepts(["a", "a"])
assert anota.accepts("ab")
assert anota.accepts(["a", "b"])
assert anota.accepts(["a", fsm.anything_else])
assert not anota.accepts("ba")
assert not anota.accepts("bb")
# "0\\d"
zeroD = pattern.parse("0\\d").to_fsm(d.chars)
assert zeroD.accepts("01")
assert not zeroD.accepts("10")
# "\\d{2}"
d2 = pattern.parse("\\d{2}").to_fsm(d.chars)
assert not d2.accepts("")
assert not d2.accepts("1")
assert d2.accepts("11")
assert not d2.accepts("111")
# abc|def(ghi|jkl)
conventional = pattern.parse("abc|def(ghi|jkl)").to_fsm(w.chars)
assert not conventional.accepts("a")
assert not conventional.accepts("ab")
assert conventional.accepts("abc")
assert not conventional.accepts("abcj")
assert conventional.accepts("defghi")
assert conventional.accepts("defjkl")
def test_fsm():
# You should be able to to_fsm() a single lego piece without supplying a specific
# alphabet. That should be determinable from context.
assert parse("a.b").to_fsm().accepts("acb")
bad = parse("0{2}|1{2}").to_fsm({"0", "1", fsm.anything_else})
assert bad.accepts("00")
assert bad.accepts("11")
assert not bad.accepts("01")
bad = parse("0{2}|1{2}").to_fsm()
assert bad.accepts("00")
assert bad.accepts("11")
assert not bad.accepts("01")
def test_odd_bug():
# Odd bug with ([bc]*c)?[ab]*
int5A = mult(charclass("bc"), star).to_fsm({"a", "b", "c", fsm.anything_else})
assert int5A.accepts([])
assert int5A.accepts("")
int5B = mult(charclass("c"), one).to_fsm({"a", "b", "c", fsm.anything_else})
assert int5B.accepts("c")
assert int5B.accepts(["c"])
int5C = int5A + int5B
assert int5C.accepts("c")
assert int5C.accepts(["c"])
def test_bug_28():
# Starification is broken in FSMs
assert not parse("(ab*)").to_fsm().star().accepts("bb")
assert not parse("(ab*)*").to_fsm().accepts("bb")
################################################################################
# Test matches(). Quite sparse at the moment
def test_wildcards_in_charclasses():
# Allow "\w", "\d" and "\s" in charclasses
assert parse("[\w~]*").matches("a0~")
assert parse("[\da]*").matches("0129a")
assert parse("[\s]+").matches(" \t \t ")
def test_block_comment_regex():
# I went through several incorrect regexes for C block comments. Here we show
# why the first few attempts were incorrect
a = parse("/\\*(([^*]|\\*+[^*/])*)\\*/")
assert a.matches("/**/")
assert not a.matches("/***/")
assert not a.matches("/****/")
b = parse("/\\*(([^*]|\\*[^/])*)\\*/")
assert b.matches("/**/")
assert not b.matches("/***/")
assert b.matches("/****/")
c = parse("/\\*(([^*]|\\*+[^*/])*)\\*+/")
assert c.matches("/**/")
assert c.matches("/***/")
assert c.matches("/****/")
def test_named_groups():
a = parse("(?P<ng1>abc)")
assert a.matches("abc")
def test_in():
assert "a" in parse("a")
assert "abcdsasda" in parse("\\w{4,10}")
assert "abc" in parse("abc|def(ghi|jkl)")
################################################################################
# Test string generators
def test_charclass_gen():
gen = charclass("xyz").strings()
assert next(gen) == "x"
assert next(gen) == "y"
assert next(gen) == "z"
try:
next(gen)
assert False
except StopIteration:
assert True
def test_mult_gen():
# One term
gen = mult.parse("[ab]").strings()
assert next(gen) == "a"
assert next(gen) == "b"
try:
next(gen)
assert False
except StopIteration:
assert True
# No terms
gen = mult.parse("[ab]{0}").strings()
assert next(gen) == ""
try:
next(gen)
assert False
except StopIteration:
assert True
# Many terms
gen = mult.parse("[ab]*").strings()
assert next(gen) == ""
assert next(gen) == "a"
assert next(gen) == "b"
assert next(gen) == "aa"
assert next(gen) == "ab"
assert next(gen) == "ba"
assert next(gen) == "bb"
assert next(gen) == "aaa"
def test_conc_generator():
gen = conc.parse("[ab][cd]").strings()
assert next(gen) == "ac"
assert next(gen) == "ad"
assert next(gen) == "bc"
assert next(gen) == "bd"
try:
next(gen)
assert False
except StopIteration:
assert True
def test_pattern_generator():
gen = pattern.parse("[ab]|[cde]").strings()
assert next(gen) == "a"
assert next(gen) == "b"
assert next(gen) == "c"
assert next(gen) == "d"
assert next(gen) == "e"
try:
next(gen)
assert False
except StopIteration:
assert True
# more complex
gen = pattern.parse("abc|def(ghi|jkl)").strings()
assert next(gen) == "abc"
assert next(gen) == "defghi"
assert next(gen) == "defjkl"
gen = mult.parse("[0-9a-fA-F]{3,10}").strings()
assert next(gen) == "000"
assert next(gen) == "001"
assert next(gen) == "002"
def test_infinite_generation():
# Infinite generator, flummoxes both depth-first and breadth-first searches
gen = parse("a*b*").strings()
assert next(gen) == ""
assert next(gen) == "a"
assert next(gen) == "b"
assert next(gen) == "aa"
assert next(gen) == "ab"
assert next(gen) == "bb"
assert next(gen) == "aaa"
assert next(gen) == "aab"
assert next(gen) == "abb"
assert next(gen) == "bbb"
assert next(gen) == "aaaa"
def test_wildcard_generator():
# Generator needs to handle wildcards as well. Wildcards come last.
gen = parse("a.b").strings(otherchar="*")
assert next(gen) == "aab"
assert next(gen) == "abb"
assert next(gen) == "a*b"
try:
next(gen)
assert False
except StopIteration:
assert True
def test_forin():
assert [s for s in parse("abc|def(ghi|jkl)")] == ["abc", "defghi", "defjkl"]
################################################################################
# Test cardinality() and len()
def test_cardinality():
assert charclass.parse("[]").cardinality() == 0
assert mult.parse("[]?").cardinality() == 1
assert mult.parse("[]{0,6}").cardinality() == 1
assert mult.parse("[ab]{3}").cardinality() == 8
assert mult.parse("[ab]{2,3}").cardinality() == 12
assert len(pattern.parse("abc|def(ghi|jkl)")) == 3
try:
len(pattern.parse(".*"))
assert False
except OverflowError:
assert True
################################################################################
def test_copy():
x = pattern.parse("abc|def(ghi|jkl)")
assert x.copy() == x
################################################################################
# Test from_fsm()
def test_dot():
assert str(from_fsm(parse("a.b").to_fsm())) == "a.b" # not "a[ab]b"
def test_abstar():
# Buggggs.
abstar = fsm.FSM(
alphabet = {'a', fsm.anything_else, 'b'},
states = {0, 1},
initial = 0,
finals = {0},
map = {
0: {'a': 0, fsm.anything_else: 1, 'b': 0},
1: {'a': 1, fsm.anything_else: 1, 'b': 1}
}
)
assert str(from_fsm(abstar)) == "[ab]*"
def test_adotb():
adotb = fsm.FSM(
alphabet = {'a', fsm.anything_else, 'b'},
states = {0, 1, 2, 3, 4},
initial = 0,
finals = {4},
map = {
0: {'a': 2, fsm.anything_else: 1, 'b': 1},
1: {'a': 1, fsm.anything_else: 1, 'b': 1},
2: {'a': 3, fsm.anything_else: 3, 'b': 3},
3: {'a': 1, fsm.anything_else: 1, 'b': 4},
4: {'a': 1, fsm.anything_else: 1, 'b': 1}
}
)
assert str(from_fsm(adotb)) == "a.b"
def test_lego_recursion_error():
# Catch a recursion error
assert str(from_fsm(fsm.FSM(
alphabet = {"0", "1"},
states = {0, 1, 2, 3},
initial = 3,
finals = {1},
map = {
0: {"0": 1, "1": 1},
1: {"0": 2, "1": 2},
2: {"0": | |
fnames in walk.walk(srcdir, maxdepth=search_depth):
for srcfname in fnames:
if endswith_one_of_coll(srcfname, src_suffixes):
match = re.match(checkfile_root_regex, srcfname)
if match is None:
eprint("No regex match for filename matching suffix criteria in source directory: {}".format(srcfname))
else:
cf_root_name = match.group(1)
cf_root_full = os.path.join(root, cf_root_name)
if cf_root_full not in checkffileroot_srcfnamechecklist_dict:
checkffileroot_srcfnamechecklist_dict[cf_root_full] = []
checkffileroot_srcfnamechecklist_dict[cf_root_full].append(srcfname)
elif args.get(ARGSTR_CHECKFILE_ROOT) is not None:
checkffileroot_srcfnamechecklist_dict = dict()
cf_root_full = os.path.join(srcdir, args.get(ARGSTR_CHECKFILE_ROOT))
checkffileroot_srcfnamechecklist_dict[cf_root_full] = [
os.path.basename(f) for f in glob.glob(cf_root_full+'*') if endswith_one_of_coll(f, src_suffixes)]
else: # if argument --checkfile was provided or if each source raster is allotted a checkfile
srcffile_checklist = []
for root, dnames, fnames in walk.walk(srcdir, maxdepth=search_depth):
for srcfname in fnames:
if endswith_one_of_coll(srcfname, src_suffixes):
srcffile_checklist.append(os.path.join(root, srcfname))
missing_suffixes = [s for s in src_suffixes if not ends_one_of_coll(s, srcffile_checklist)]
if missing_suffixes:
warnings.warn("Source file suffixes were not found")
if warn_missing_suffix:
eprint("Source directory is missing the following file suffixes: {}".format(missing_suffixes))
missing_suffix_flag[0] = True
elif os.path.isfile(src):
if src.endswith('.txt') and not src.endswith((ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_META,
ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_INFO50CM)):
bundle_file = src
task_list = script_utils.read_task_bundle(bundle_file)
if args.get(ARGSTR_CHECK_SPECIAL) == ARGCHO_CHECK_SPECIAL_ALL_SEPARATE:
srcffile_checklist = task_list
if args.get(ARGSTR_CHECKFILE_ROOT) is not None:
srcffile_checklist = [srcffile for srcffile in srcffile_checklist if
os.path.basename(srcffile.startswith(ARGSTR_CHECKFILE_ROOT))]
elif args.get(ARGSTR_CHECKFILE_ROOT_REGEX) is not None:
srcffile_checklist = [srcffile for srcffile in srcffile_checklist if
re.match(checkfile_root_regex, os.path.basename(srcffile)) is not None]
else:
argstr_incompat_srcfile_cfroots = [ARGSTR_CHECKFILE, ARGSTR_CHECKFILE_ROOT]
if args.get(argstr_incompat_srcfile_cfroots).count(None) < len(argstr_incompat_srcfile_cfroots):
arg_parser.error("argument {} text file containing checkfile roots is "
"incompatible with the following arguments: {}".format(
ARGSTR_SRC, argstr_incompat_srcfile_cfroots
))
checkffileroot_list = task_list
if args.get(ARGSTR_VERIFY_BY_PAIRNAME_DIR):
checkffileroot_srcfnamechecklist_dict = dict()
pairname_dir_list = []
if verify_by_pairname_dir_depth == 0:
for cff_root in checkffileroot_list:
if not os.path.isdir(cff_root):
warnings.warn("Path in source text file is not an existing directory ({})".format(ARGSTR_VERIFY_BY_PAIRNAME_DIR))
eprint("Path in source text file is not an existing directory: {}".format(cff_root))
elif not re.match(ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL, os.path.basename(cff_root)) is not None:
warnings.warn("Directory name in source text file does not match pairname regex ({})".format(ARGSTR_VERIFY_BY_PAIRNAME_DIR))
eprint("Directory name in source text file does not match pairname regex: {}".format(cff_root))
else:
pairname_dir_list.append(cff_root)
else:
for cff_root in checkffileroot_list:
for root, dnames, fnames in walk.walk(cff_root, maxdepth=verify_by_pairname_dir_depth):
for dn in dnames:
if re.match(ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL, dn) is not None:
pairname_dir = os.path.join(root, dn)
pairname_dir_list.append(pairname_dir)
if len(pairname_dir_list) == 0:
eprint("No pairname directories were found with {} and {}={}".format(
ARGSTR_VERIFY_BY_PAIRNAME_DIR, ARGSTR_VERIFY_BY_PAIRNAME_DIR_DEPTH, verify_by_pairname_dir_depth
))
for pairname_dir in pairname_dir_list:
srcfname_list = []
for _, _, srcfname_list in walk.walk(pairname_dir, maxdepth=1):
break
if len(srcfname_list) == 1 and verifying_strips:
single_strip_file = srcfname_list[0]
if single_strip_file.endswith('.fin'):
strip_finfile = single_strip_file
with open(pairname_dir+'.check', 'w') as check_strips_fin_fp:
check_strips_fin_fp.write(strip_finfile)
continue
for srcfname in srcfname_list:
if endswith_one_of_coll(srcfname, src_suffixes):
match = re.match(checkfile_root_regex, srcfname)
if match is None:
eprint("No regex match for filename matching suffix criteria in source directory: {}".format(srcfname))
else:
if pairname_dir not in checkffileroot_srcfnamechecklist_dict:
checkffileroot_srcfnamechecklist_dict[pairname_dir] = []
checkffileroot_srcfnamechecklist_dict[pairname_dir].append(srcfname)
else:
srcffiles = []
for cff_root in checkffileroot_list:
srcffiles.extend(glob.glob(cff_root+'*'))
if args.get(ARGSTR_CHECKFILE) is not None:
srcffile_checklist = srcffiles
elif args.get(ARGSTR_CHECKFILE_ROOT_REGEX) is not None:
checkffileroot_srcfnamechecklist_dict = dict()
for srcffile in srcffiles:
if endswith_one_of_coll(srcffile, src_suffixes):
srcfdir, srcfname = os.path.split(srcffile)
match = re.match(checkfile_root_regex, srcfname)
if match is None:
eprint("No regex match for file matching suffix criteria pulled from "
"source text file containing checkfile roots: {}".format(srcffile))
else:
cf_root_name = match.group(1)
cf_root_full = os.path.join(srcfdir, cf_root_name)
if cf_root_full not in checkffileroot_srcfnamechecklist_dict:
checkffileroot_srcfnamechecklist_dict[cf_root_full] = []
checkffileroot_srcfnamechecklist_dict[cf_root_full].append(srcfname)
else:
checkffileroot_srcfnamechecklist_dict = {cf_root_full: None for cf_root_full in checkffileroot_list}
# num_srcfiles = None
else:
argstr_incompat_srcfile = [ARGSTR_CHECKFILE_ROOT, ARGSTR_CHECKFILE_ROOT_REGEX, ARGSTR_CHECK_SPECIAL]
if args.get(argstr_incompat_srcfile).count(None) < len(argstr_incompat_srcfile):
arg_parser.error("argument {} source file is incompatible with the following arguments: {}".format(
ARGSTR_SRC, argstr_incompat_srcfile
))
srcffile_checklist = [src]
warn_missing_checked = False
warn_missing_suffix = False
else:
args.set(ARGSTR_CHECKFILE_ROOT, src)
srcdir = os.path.dirname(src)
print("via non-(directory/file) argument {}, argument {} set automatically to: '{}'".format(
ARGSTR_SRC, ARGSTR_CHECKFILE_ROOT, args.get(ARGSTR_CHECKFILE_ROOT)))
checkffileroot_srcfnamechecklist_dict = dict()
cf_root_full = args.get(ARGSTR_CHECKFILE_ROOT)
checkffileroot_srcfnamechecklist_dict[cf_root_full] = [
os.path.basename(f) for f in glob.glob(cf_root_full+'*') if endswith_one_of_coll(f, src_suffixes)]
num_srcfiles_to_check = None
num_checkgroups_to_check = None
num_srcfiles_to_run = None
num_checkgroups_to_run = None
num_srcfiles_err_exist = 0
num_srcfiles_err_skip = 0
num_checkgroups_err_exist = 0
num_checkgroups_err_skip = 0
num_srcfiles_suf_skip = 0
num_checkgroups_suf_miss = 0
num_checkgroups_suf_skip = 0
num_srcfiles_removed = 0
num_checkgroups_removed = 0
num_checkfiles_removed = 0
check_items = None
if checkffileroot_srcfnamechecklist_dict is not None:
num_checkgroups = len(checkffileroot_srcfnamechecklist_dict.keys())
return_incomplete_src_rasters = (args.get(ARGSTR_SCHEDULER) is None)
if return_incomplete_src_rasters:
num_srcfiles_to_check = 0
num_srcfiles_to_run = 0
num_checkgroups_to_check = 0
num_checkgroups_to_run = 0
for cff_root in checkffileroot_srcfnamechecklist_dict:
cff_root_src_rasters = checkffileroot_srcfnamechecklist_dict[cff_root]
checkgroup_errfile = cff_root+errfile_ext
srcfile_count[0] = None
errfile_count[0] = None
missing_suffix_flag[0] = False
checkfile_removed_flag[0] = False
checkffileroot_srcfnamechecklist_dict[cff_root] = checkfile_incomplete(args,
cff_root, checkfile_ext, errfile_ext, src_suffixes,
checkffileroot_srcfnamechecklist_dict[cff_root], return_incomplete_src_rasters,
srcfile_count, errfile_count,
missing_suffix_flag, checkfile_removed_flag,
warn_missing_suffix, warn_errfile_exists,
warn_missing_checked, warn_new_source
)
if checkfile_removed_flag[0]:
num_checkfiles_removed += 1
cff_root_src_rasters_to_check = checkffileroot_srcfnamechecklist_dict[cff_root]
if type(cff_root_src_rasters_to_check) is int and cff_root_src_rasters_to_check == -1:
checkffileroot_srcfnamechecklist_dict[cff_root] = None
num_checkgroups -= 1
num_checkgroups_removed += 1
num_srcfiles_removed += srcfile_count[0]
continue
elif srcfile_count[0] is not None:
num_srcfiles += srcfile_count[0]
if ( cff_root_src_rasters is not None
and ( errfile_count[0] is None
or (not retry_errors and args.get(ARGSTR_CHECKFILE_OFF) and type(cff_root_src_rasters_to_check) is list))):
cff_dir = os.path.join(os.path.dirname(cff_root))
if os.path.isfile(checkgroup_errfile):
srcfname_errlist = cff_root_src_rasters
else:
srcfname_errlist = [fn for fn in cff_root_src_rasters if os.path.isfile(os.path.join(cff_dir, fn+errfile_ext))]
errfile_count[0] = len(srcfname_errlist)
if errfile_count[0] is not None:
num_srcfiles_err_exist += errfile_count[0]
if cff_root_src_rasters_to_check:
num_checkgroups_to_check += 1
if type(cff_root_src_rasters_to_check) is list:
num_srcfiles_to_check_this_group = len(cff_root_src_rasters_to_check)
num_srcfiles_to_check += num_srcfiles_to_check_this_group
else:
num_srcfiles_to_check_this_group = None
if ( (not allow_missing_suffix and missing_suffix_flag[0])
or (not retry_errors and errfile_count[0])):
cff_root_src_rasters_to_check_backup = cff_root_src_rasters_to_check
if not retry_errors and errfile_count[0]:
if args.get(ARGSTR_CHECKFILE_OFF):
if type(cff_root_src_rasters_to_check) is list:
cff_root_src_rasters_to_check = list(set(cff_root_src_rasters_to_check).difference(set(srcfname_errlist)))
num_srcfiles_err_skip += (num_srcfiles_to_check_this_group - len(cff_root_src_rasters_to_check))
if len(cff_root_src_rasters_to_check) == 0:
if num_srcfiles_to_check_this_group > 0:
num_checkgroups_err_skip += 1
else:
if type(cff_root_src_rasters_to_check) is list:
cff_root_src_rasters_to_check = []
num_srcfiles_err_skip += num_srcfiles_to_check_this_group
num_checkgroups_err_exist += 1
if num_srcfiles_to_check_this_group > 0:
num_checkgroups_err_skip += 1
else:
num_checkgroups_err_exist += 1
if cff_root_src_rasters_to_check:
cff_root_src_rasters_to_check = False
num_checkgroups_err_skip += 1
checkffileroot_srcfnamechecklist_dict[cff_root] = cff_root_src_rasters_to_check
if not allow_missing_suffix and missing_suffix_flag[0]:
if type(cff_root_src_rasters_to_check_backup) is list:
cff_root_src_rasters_to_check = []
num_srcfiles_suf_skip += num_srcfiles_to_check_this_group
num_checkgroups_suf_miss += 1
if num_srcfiles_to_check_this_group > 0:
num_checkgroups_suf_skip += 1
else:
num_checkgroups_suf_miss += 1
if cff_root_src_rasters_to_check_backup:
cff_root_src_rasters_to_check = False
num_checkgroups_suf_skip += 1
checkffileroot_srcfnamechecklist_dict[cff_root] = cff_root_src_rasters_to_check
checkffileroot_srcfnamechecklist_dict = {
cff_root: f_list for cff_root, f_list in checkffileroot_srcfnamechecklist_dict.items() if f_list}
check_items = checkffileroot_srcfnamechecklist_dict
num_checkgroups_to_run = len(checkffileroot_srcfnamechecklist_dict.keys())
if num_checkgroups_to_run == 0:
num_srcfiles_to_run = 0
elif type(next(iter(checkffileroot_srcfnamechecklist_dict))) is list:
num_srcfiles_to_run = sum([len(file_list) for file_list in checkffileroot_srcfnamechecklist_dict.values()])
elif srcffile_checklist is not None:
num_srcfiles = len(srcffile_checklist)
srcffile_errlist = [f for f in srcffile_checklist if os.path.isfile(f+errfile_ext)]
num_srcfiles_err_exist = len(srcffile_errlist)
if args.get(ARGSTR_CHECKFILE_OFF):
num_srcfiles_to_check = len(srcffile_checklist)
else:
if args.get(ARGSTR_CHECKFILE):
num_checkgroups = 1
srcffile_checklist = checkfile_incomplete(args,
args.get(ARGSTR_CHECKFILE), None, errfile_ext, src_suffixes,
srcffile_checklist, True,
srcfile_count, errfile_count,
missing_suffix_flag, checkfile_removed_flag,
warn_missing_suffix, warn_errfile_exists,
warn_missing_checked, warn_new_source
)
else:
num_checkgroups = num_srcfiles
srcffile_checklist = [f for f in srcffile_checklist if not os.path.isfile(f+checkfile_ext)]
num_srcfiles_to_check = len(srcffile_checklist)
num_checkgroups_to_check = 1 if (args.get(ARGSTR_CHECKFILE) and num_srcfiles_to_check > 0) else num_srcfiles_to_check
if num_srcfiles_err_exist > 0 and errfile_count[0] is None:
warnings.warn("Error files were found among source files")
if warn_errfile_exists:
eprint("{} error files were found among source selection:".format(num_srcfiles_err_exist))
for fn in sorted(list(srcffile_errlist)):
eprint(fn+errfile_ext)
if not retry_errors and num_srcfiles_err_exist > 0:
if args.get(ARGSTR_CHECKFILE):
srcffile_checklist = []
num_srcfiles_err_skip = num_srcfiles_to_check
num_checkgroups_err_skip = num_checkgroups_to_check
else:
srcffile_checklist = list(set(srcffile_checklist).difference(set(srcffile_errlist)))
num_srcfiles_err_skip = num_srcfiles_to_check - len(srcffile_checklist)
num_checkgroups_err_skip = num_srcfiles_err_skip
if not allow_missing_suffix and missing_suffix_flag[0]:
srcffile_checklist = []
num_srcfiles_suf_skip = num_srcfiles_to_check
num_checkgroups_suf_skip = num_checkgroups_to_check
check_items = srcffile_checklist
num_srcfiles_to_run = len(check_items)
num_checkgroups_to_run = 1 if (args.get(ARGSTR_CHECKFILE) and num_srcfiles_to_run > 0) else num_srcfiles_to_run
else:
raise DeveloperError("Neither `checkffileroot_srcfnamechecklist_dict` "
"nor `srcffile_checklist` have been initialized")
num_errfiles_walk = 0
print("-----")
if not args.get(ARGSTR_CHECKFILE_OFF):
print("Checkfile extension: {}".format(checkfile_ext))
print("Error file extension: {}".format(errfile_ext))
print("Accepted source file suffixes: {}".format(src_suffixes))
if try_removal:
print("-----")
print("{} :: {}{}".format(
ARGSTR_REMOVE_TYPE, args.get(ARGSTR_REMOVE_TYPE),
" ({} and {})".format(ARGCHO_REMOVE_TYPE_CHECKFILES, ARGCHO_REMOVE_TYPE_SOURCEFILES)*(
args.get(ARGSTR_REMOVE_TYPE) == ARGCHO_REMOVE_TYPE_BOTH)))
if allow_remove_checkfiles:
print("Number of checkfiles removed: {}".format(num_checkfiles_removed))
if allow_remove_sourcefiles:
print("Number of check groups removed: {}".format(num_checkgroups_removed))
print("Total number of source files removed: {}".format(num_srcfiles_removed))
if delete_dryrun:
print("(dryrun; must turn on {} and turn off {} to do delete)".format(ARGSTR_DO_DELETE, ARGSTR_DRYRUN))
if args.get(ARGSTR_REMOVE_ONLY):
sys.exit(0)
print("-----")
if os.path.isdir(src):
for root, dnames, fnames in walk.walk(src, maxdepth=search_depth):
for srcfname in fnames:
if srcfname.endswith(errfile_ext):
num_errfiles_walk += 1
print("{} existing error files found within source directory".format(num_errfiles_walk))
print("{} existing error files found among source selection".format(num_srcfiles_err_exist))
if num_srcfiles is not None or num_srcfiles_to_check is not None:
print("Number of source files: {}{}{}{}{}".format(
num_srcfiles if num_srcfiles is not None else '',
', ' if (num_srcfiles is not None and num_srcfiles_to_check is not None) else '',
'{} to check'.format(num_srcfiles_to_check) if num_srcfiles_to_check is not | |
index=multi_index, columns=columns)
return out_df
@register_class('AlignedDynamicTable', namespace)
class AlignedDynamicTable(DynamicTable):
"""
DynamicTable container that subports storing a collection of subtables. Each sub-table is a
DynamicTable itself that is aligned with the main table by row index. I.e., all
DynamicTables stored in this group MUST have the same number of rows. This type effectively
defines a 2-level table in which the main data is stored in the main table implementd by this type
and additional columns of the table are grouped into categories, with each category being'
represented by a separate DynamicTable stored within the group.
"""
__fields__ = ({'name': 'category_tables', 'child': True}, )
@docval(*get_docval(DynamicTable.__init__),
{'name': 'category_tables', 'type': list,
'doc': 'List of DynamicTables to be added to the container', 'default': None},
{'name': 'categories', 'type': 'array_data',
'doc': 'List of names with the ordering of category tables', 'default': None})
def __init__(self, **kwargs):
in_category_tables = popargs('category_tables', kwargs)
in_categories = popargs('categories', kwargs)
if in_categories is None and in_category_tables is not None:
in_categories = [tab.name for tab in in_category_tables]
if in_categories is not None and in_category_tables is None:
raise ValueError("Categories provided but no category_tables given")
# at this point both in_categories and in_category_tables should either both be None or both be a list
if in_categories is not None:
if len(in_categories) != len(in_category_tables):
raise ValueError("%s category_tables given but %s categories specified" %
(len(in_category_tables), len(in_categories)))
# Initialize the main dynamic table
call_docval_func(super().__init__, kwargs)
# Create and set all sub-categories
dts = OrderedDict()
# Add the custom categories given as inputs
if in_category_tables is not None:
# We may need to resize our main table when adding categories as the user may not have set ids
if len(in_category_tables) > 0:
# We have categories to process
if len(self.id) == 0:
# The user did not initialize our main table id's nor set columns for our main table
for i in range(len(in_category_tables[0])):
self.id.append(i)
# Add the user-provided categories in the correct order as described by the categories
# This is necessary, because we do not store the categories explicitly but we maintain them
# as the order of our self.category_tables. In this makes sure look-ups are consistent.
lookup_index = OrderedDict([(k, -1) for k in in_categories])
for i, v in enumerate(in_category_tables):
# Error check that the name of the table is in our categories list
if v.name not in lookup_index:
raise ValueError("DynamicTable %s does not appear in categories %s" % (v.name, str(in_categories)))
# Error check to make sure no two tables with the same name are given
if lookup_index[v.name] >= 0:
raise ValueError("Duplicate table name %s found in input dynamic_tables" % v.name)
lookup_index[v.name] = i
for table_name, tabel_index in lookup_index.items():
# This error case should not be able to occur since the length of the in_categories and
# in_category_tables must match and we made sure that each DynamicTable we added had its
# name in the in_categories list. We, therefore, exclude this check from coverage testing
# but we leave it in just as a backup trigger in case something unexpected happens
if tabel_index < 0: # pragma: no cover
raise ValueError("DynamicTable %s listed in categories but does not appear in category_tables" %
table_name) # pragma: no cover
# Test that all category tables have the correct number of rows
category = in_category_tables[tabel_index]
if len(category) != len(self):
raise ValueError('Category DynamicTable %s does not align, it has %i rows expected %i' %
(category.name, len(category), len(self)))
# Add the category table to our category_tables.
dts[category.name] = category
# Set the self.category_tables attribute, which will set the parent/child relationships for the category_tables
self.category_tables = dts
def __contains__(self, val):
"""
Check if the given value (i.e., column) exists in this table
:param val: If val is a string then check if the given category exists. If val is a tuple
of two strings (category, colname) then check for the given category if the given colname exists.
"""
if isinstance(val, str):
return val in self.category_tables or val in self.colnames
elif isinstance(val, tuple):
if len(val) != 2:
raise ValueError("Expected tuple of strings of length 2 got tuple of length %i" % len(val))
return val[1] in self.get_category(val[0])
else:
return False
@property
def categories(self):
"""
Get the list of names the categories
Short-hand for list(self.category_tables.keys())
:raises: KeyError if the given name is not in self.category_tables
"""
return list(self.category_tables.keys())
@docval({'name': 'category', 'type': DynamicTable, 'doc': 'Add a new DynamicTable category'},)
def add_category(self, **kwargs):
"""
Add a new DynamicTable to the AlignedDynamicTable to create a new category in the table.
NOTE: The table must align with (i.e, have the same number of rows as) the main data table (and
other category tables). I.e., if the AlignedDynamicTable is already populated with data
then we have to populate the new category with the corresponding data before adding it.
:raises: ValueError is raised if the input table does not have the same number of rows as the main table
"""
category = getargs('category', kwargs)
if len(category) != len(self):
raise ValueError('New category DynamicTable does not align, it has %i rows expected %i' %
(len(category), len(self)))
if category.name in self.category_tables:
raise ValueError("Category %s already in the table" % category.name)
self.category_tables[category.name] = category
category.parent = self
@docval({'name': 'name', 'type': str, 'doc': 'Name of the category we want to retrieve', 'default': None})
def get_category(self, **kwargs):
name = popargs('name', kwargs)
if name is None or (name not in self.category_tables and name == self.name):
return self
else:
return self.category_tables[name]
@docval(*get_docval(DynamicTable.add_column),
{'name': 'category', 'type': str, 'doc': 'The category the column should be added to',
'default': None})
def add_column(self, **kwargs):
"""
Add a column to the table
:raises: KeyError if the category does not exist
"""
category_name = popargs('category', kwargs)
if category_name is None:
# Add the column to our main table
call_docval_func(super().add_column, kwargs)
else:
# Add the column to a sub-category table
try:
category = self.get_category(category_name)
except KeyError:
raise KeyError("Category %s not in table" % category_name)
category.add_column(**kwargs)
@docval({'name': 'data', 'type': dict, 'doc': 'the data to put in this row', 'default': None},
{'name': 'id', 'type': int, 'doc': 'the ID for the row', 'default': None},
{'name': 'enforce_unique_id', 'type': bool, 'doc': 'enforce that the id in the table must be unique',
'default': False},
allow_extra=True)
def add_row(self, **kwargs):
"""
We can either provide the row data as a single dict or by specifying a dict for each category
"""
data, row_id, enforce_unique_id = popargs('data', 'id', 'enforce_unique_id', kwargs)
data = data if data is not None else kwargs
# extract the category data
category_data = {k: data.pop(k) for k in self.categories if k in data}
# Check that we have the approbriate categories provided
missing_categories = set(self.categories) - set(list(category_data.keys()))
if missing_categories:
raise KeyError(
'\n'.join([
'row data keys don\'t match available categories',
'missing {} category keys: {}'.format(len(missing_categories), missing_categories)
])
)
# Add the data to our main dynamic table
data['id'] = row_id
data['enforce_unique_id'] = enforce_unique_id
call_docval_func(super().add_row, data)
# Add the data to all out dynamic table categories
for category, values in category_data.items():
self.category_tables[category].add_row(**values)
@docval({'name': 'ignore_category_ids', 'type': bool,
'doc': "Ignore id columns of sub-category tables", 'default': False})
def to_dataframe(self, **kwargs):
"""Convert the collection of tables to a single pandas DataFrame"""
dfs = [super().to_dataframe().reset_index(), ]
if getargs('ignore_category_ids', kwargs):
dfs += [category.to_dataframe() for category in self.category_tables.values()]
else:
dfs += [category.to_dataframe().reset_index() for category in self.category_tables.values()]
names = [self.name, ] + list(self.category_tables.keys())
res = pd.concat(dfs, axis=1, keys=names)
res.set_index((self.name, 'id'), drop=True, inplace=True)
return res
def __getitem__(self, item):
"""
If item is:
* int : Return a single row of the table
* string : Return a single category of the table
* tuple: Get a column, row, or cell from a particular category. The tuple is expected to consist
of (category, selection) where category may be a string with the name of the sub-category
or None (or the name of this AlignedDynamicTable) if we want to slice into the main table.
:returns: DataFrame when retrieving a row or category. Returns scalar when selecting a cell.
Returns a VectorData/VectorIndex | |
<filename>src/main.py
# import pyro
from collections import defaultdict
from torch.distributions import normal, poisson, binomial, log_normal, categorical, uniform
import random
from mpl_toolkits import mplot3d
import numpy as np
import matplotlib.pyplot as plt
import torch as th
from torch import nn
# physical constants
alpha_m = 0.9
alpha_m_star = 2.53
A_max = 1.63
D_x = 0.0
E_star = 0.032
gamma_p = 2.04
S_imm = 0.14
S_infty = 0.049
sigma_i = 10.2**0.5
sigma_0 = 0.66**0.5
X_nu_star = 4.8
X_p_star = 1514.4
X_h_star = 97.3
X_y_star = 3.5
Y_h_star = 9999999 #float("inf")
# simulation constants
delta = 5
EIR_scenario = "Namawala"
is_Garki = True # not sure!
nu = 4.8 if is_Garki else 0.18
parasite_detection_limit = 2 if is_Garki else 40
# Download EIRs - daily EIRs
import re
import requests
#EIR_urls = {"Namawala":
# "https://raw.githubusercontent.com/SwissTPH/openmalaria/6e5207cce791737c53d97d51be584f76d0059447/test/scenario5.xml"}
#EIR_dict = {}
#for name, url in EIR_urls.items():
# text = requests.get(url)
# res = re.findall(r"<EIRDaily.*>(.*)<.*", text.content.decode("utf-8"))
# EIR_dict[name] = [float(x) for x in res]
# from https://www.worldometers.info/demographics/nigeria-demographics/#age-structure
age_distributions = {"Nigeria": th.FloatTensor([[0, 4, 0.1646],
[5, 9, 0.1451],
[10, 14, 0.1251],
[15, 19, 0.1063],
[20, 24, 0.0877],
[25, 29, 0.00733],
[30, 34, 0.00633],
[35, 39, 0.00549],
[40, 44, 0.00459],
[45, 49, 0.00368],
[50, 54, 0.00288],
[55, 59, 0.00231],
[60, 64, 0.00177],
[65, 69, 0.00126],
[70, 74, 0.00083],
[75, 79, 0.00044],
[80, 84, 0.00016],
[85, 89, 0.00004]]),
"DUMMY25": th.FloatTensor([[24, 25, 1.0]]),
}
# Select and adapt EIR frequency
def change_EIR_frequency(EIRdaily, delta):
EIR = []
EIR.append(sum(EIRdaily[-delta:]))
for t in range(0, len(EIRdaily)-1, delta):
EIR.append(sum(EIRdaily[t:t+delta]))
return EIR
#EIR = change_EIR_frequency(EIR_dict[EIR_scenario], delta)
# EIR = [0.0206484514, 0.058492964, 0.20566511399999998, 0.30665830299999997, 0.5819757700000001,
# 0.9119642099999999, 0.9812269200000001, 1.08515392, 1.5639562, 1.91511741, 2.26906343,
# 1.4642980899999998, 1.44800599, 0.4953665689999999, 0.188470482, 0.12150698499999998, 0.18865241400000002,
# 0.185076822, 0.139661401, 0.175435914, 0.208139087, 0.234284155, 0.30769259, 0.298616083, 0.351398984,
# 0.262847315, 0.23533705099999996, 0.143308623, 0.329922543, 0.30471578899999996, 0.334684268, 0.267825345,
# 0.10680065749999999, 0.11492165600000001, 0.129193927, 0.10540250799999999, 0.11679969899999999, 0.097755679,
# 0.10671757, 0.0618278874, 0.0647416485, 0.038036469499999996, 0.0377843451, 0.0364936385, 0.0380708517,
# 0.047425256, 0.0326014605, 0.0489408695, 0.0666497766, 0.0296905653, 0.06196254500000001, 0.0623980334,
# 0.047184591000000005, 0.036532315999999995, 0.052737068, 0.0421134431, 0.0394260218, 0.0141218558,
# 0.014938396999999999, 0.00644923895, 0.0095492285, 0.0249317087, 0.0320313153, 0.0132738001,
# 0.022837353400000003, 0.09629449899999999, 0.106729029, 0.23377416750000002, 0.34699540500000003,
# 0.15817682300000002, 0.179243571, 0.131176548, 0.042414276, 0.0206484514]
########################################################################################################################
########################################################################################################################
########################################################################################################################
class PopulationWarmup(nn.Module):
def __init__(self, n, sim_params, age_distribution, EIR, use_cache=True, device=None):
self.simulation_params = sim_params
self.use_cache = use_cache
self.device = device
self.n = n
self.age_dist = age_distribution.to(self.device)
self.EIR = th.from_numpy(EIR).to(self.device)
pass
def forward(self, record=False):
self.record = record
if record:
self.init_recording()
# set up simulation parameters
delta = self.simulation_params["delta"]
# set up physical parameters
alpha_m = self.simulation_params["alpha_m"]
alpha_m_star = self.simulation_params["alpha_m_star"]
# draw initial population max ages (and sort them)
max_age_bands = categorical.Categorical(self.age_dist[: ,2]).sample((n,))
max_ages = (uniform.Uniform(self.age_dist[: ,0][max_age_bands],
self.age_dist[: ,1][max_age_bands]).sample()*365).int().sort(descending=True)[0]
# schedule = self._bin_pack(max_ages)
# draw host variations
log_d = th.log(log_normal.LogNormal(th.tensor(0.0).to(self.device),
th.tensor(sigma_i).to(self.device)).sample((n,)))
max_age = max_ages[0].item()
# create tensor storage for intermediate values
X_h = th.zeros((n,), device=self.device).float()
X_p = th.zeros((n,), device=self.device).float()
X_y_infidx = None
Y = th.zeros((n,), device=self.device).float()
# create list storage for infection events
tau_infidx = None # later: Tensor, 0: tau_infidx_0, 1: tau_infidx_max, 2: pop id
# create offset index tensor
offset_idxs = max_age - max_ages
# pre-cache effective EIR
t_coords = th.arange(0, max_age, delta, device=self.device).long()
eff_EIR = self.EIR[((t_coords%365)//delta)] *self._body_surface(t_coords.float()) / A_max
# pre-cache D_m
D_m = 1.0 - alpha_m *th.exp(-((t_coords.float()/365.0)/alpha_m_star ) *th.log(th.tensor(2.0)))
bidx = 0 # index indicating whether a member of the population has already been born
if self.record:
items = {"max_ages": max_ages.clone(),
"log_d": log_d.clone(),
}
self.recs[0].update(items)
# we should employ a bin-packing scheduler here! https://en.wikipedia.org/wiki/Bin_packing_problem
# this would ensure we optimally exploit parallelism at all times!
for t in range(0, max_age, delta):
# update idx determining whether people have already been born at time t
while bidx < n and max_age - max_ages[bidx] <= t:
bidx += 1
# relative time idxs
rel_idxs = (t - offset_idxs[:bidx]).long()
E = eff_EIR.repeat(len(rel_idxs))[rel_idxs // delta]
# calculate force of infection
h_star = self._force_of_infection(t, E, X_p[:bidx], Y[:bidx])
# generate infections
tau_infidx_new = self._new_infections(t, h_star)
if tau_infidx_new is not None:
# update infections
if tau_infidx is None:
tau_infidx = tau_infidx_new
X_y_infidx = th.zeros((len(tau_infidx),), device=self.device)
else:
tau_infidx = th.cat([tau_infidx, tau_infidx_new])
X_y_infidx = th.cat([X_y_infidx,
th.zeros((len(tau_infidx_new),), device=self.device)])
## discard infections that are already over, and attach new ones
# update immunity
ret = self._update_immunity(log_d[:bidx],
t,
rel_idxs,
h_star,
tau_infidx,
X_y_infidx,
X_h[:bidx],
D_m[rel_idxs//delta])
Y[:bidx], X_y_infidx, X_h[:bidx] = ret
# Discard expired infections
if tau_infidx is not None:
mask = tau_infidx[:, 1]>t
tau_infidx = tau_infidx[mask].clone()
X_y_infidx = X_y_infidx[mask].clone()
X_p[:bidx] += E
if self.record:
items = {"X_p": X_p.clone(),
"Y": Y.clone(),
"X_y": X_y_infidx if X_y_infidx is None else X_y_infidx.clone() ,
"X_h": X_h.clone(),
"tau": tau_infidx if tau_infidx is None else tau_infidx.clone(),
"h_star": h_star if h_star is None else h_star.clone(),
"E": E
}
self.recs[t].update(items)
return Y, X_y_infidx, X_h
def _bin_pack(self, lens):
from sortedcontainers import SortedList
slens = sorted([l.item() for l in lens], reverse=True)
bins = SortedList(key=lambda x: slens[0] - sum(x))
for l in slens:
if not bins:
bins.add([l])
continue
idx = bins.bisect_right([slens[0] - l])
if idx >= len(bins):
bins.add([l])
else:
n = bins[idx]
bins.discard(n)
bins.add(n + [l])
y = [slens[0] - sum(x) for x in bins]
f = 0
return bins
def init_recording(self):
self.recs = defaultdict(lambda: {})
def backward(self):
pass
def _update_immunity(self, log_d, t, t_idxs, h_star, tau_infidx, X_y_infidx, X_h, D_m):
sigma_0 = self.simulation_params["sigma_0"]
X_y_star = self.simulation_params["X_y_star"]
X_h_star = self.simulation_params["X_h_star"]
X_nu_star = self.simulation_params["X_nu_star"]
if tau_infidx is None or not len(tau_infidx): # no current infections anywhere!
Y = th.zeros(log_d.shape, device=self.device)
return Y, X_y_infidx, X_h
# Update
sigma_y = sigma_0 *((1 + X_h /X_nu_star )**(-0.5))
D_h = (1 + X_h/X_h_star )**(-1)
D_y_infidx = (1 + X_y_infidx/X_y_star )**(-1)
##################################################################################
# Now we have to convert all relevant quantities from agent idxs to infection idxs
in_idxs = tau_infidx[:, 2].long()
log_d_infidx = log_d[in_idxs]
D_h_infidx = D_h[in_idxs]
t_infidx = t_idxs[in_idxs]
D_m_infidx = D_m[in_idxs]
sigma_y_infidx = sigma_y[in_idxs]
# Calculate concurrent infections
if S_infty < float("inf"):
M, M_infidx = self._groupby_aggregate_sum(in_idxs,
th.ones(in_idxs.shape, device=self.device),
dim_size=len(log_d))
else:
M, M_infidx = None, None
# Update parasite densities per infections
y_infidx = th.exp(self._get_ln_parasite_density(log_d_infidx,
#t_infidx,
t,
tau_infidx,
D_y_infidx,
D_h_infidx,
D_m_infidx,
M_infidx,
sigma_y_infidx))
# Update total parasite densities (see https://discuss.pytorch.org/t/groupby-aggregate-mean-in-pytorch/45335)
Y, Y_infidx = self._groupby_aggregate_sum(in_idxs,
y_infidx,
dim_size=len(log_d))
if th.any(Y != Y):
a = 5
pass
# Update
X_h = X_h + h_star
if self.record:
items = {"sigma_y":sigma_y,
"D_h":D_h,
"M": M,
"D_m": D_m
}
self.recs[t].update(items)
# Update immunity due to pre-erythrocytic exposure # TODO: UNSURE ABOUT THIS HERE!
X_y_infidx = X_y_infidx + Y_infidx - y_infidx
if not th.all(th.isfinite(X_y_infidx)) or not th.all(th.isfinite(y_infidx)):
a = 5
pass
return Y, X_y_infidx, X_h
def _force_of_infection(self, t, E, X_p, Y):
"""
Batch-ready
"""
S_1 = S_infty + ( 1-S_infty ) /( 1 + E /E_star)
S_2 = S_imm + ( 1-S_imm ) /( 1 +(X_p /X_p_star )**gamma_p)
S_p = S_1 *S_2
_lambda = S_p *E
# S_h = ( 1 + Y /Y_h_star)**(-1)
h = poisson.Poisson(_lambda).sample()
h_star = h.clone().long()
h_mask = (h!=0.0)
# if th.any(S_h < 1.0) and th.sum(h_mask) > 0:
# try:
# h_star[h_mask] = binomial.Binomial(h[h_mask].float(), S_h[h_mask]).sample().long()
# except Exception as e:
# a = 5
# pass
if self.record:
items = {"S_1":S_1,
"S_2":S_2}
self.recs[t].update(items)
return h_star
def _new_infections(self, t, h):
"""
Batch-ready
"""
tot_h = h.sum().int().item()
if tot_h == 0:
return None
tau_infidx = th.zeros((tot_h, 3), device=self.device)
tau_infidx[: ,0] = t
tau_infidx[:, 1] = t + th.exp(normal.Normal(5.13, 0.8).sample((tot_h,)))
tau_infidx[: ,2] = th.repeat_interleave(th.arange(len(h), device=self.device), h)
return tau_infidx
pass
def _ln_y_G(self, t_infidx, tau_infidx):
"""
Batch-ready
"""
delta = self.simulation_params["delta"]
tau_infidx_0 = tau_infidx[:, 0]
tau_infidx_max = tau_infidx[:, 1]
a = 0.018 *(tau_infidx_max-tau_infidx_0)
a[a>4.4] = 4.4
c = a/ (1 + (tau_infidx_max - tau_infidx_0) / 35.0)
b = th.log(a / c) / (tau_infidx_max - tau_infidx_0)
ln_y = a * th.exp(-b * (t_infidx - tau_infidx_0)) - nu
ln_y[ln_y<=0.0] = 10**(-10)
# replace exponential with some tricks, i.e. log(n+m) = log(m) + log(1+m/n)
#temp = (-b * (t_infidx - tau_infidx_0))
#term = b.clone().zero_()
#term[temp > np.log(nu)] = th.log(1-nu/th.exp(temp[temp > np.log(nu)]))
#ln_y = temp.clone().zero_() - 10**10
#ln_y[temp > np.log(nu)] = th.log(a[temp > np.log(nu)]) + temp[temp > np.log(nu)] + term[temp > np.log(nu)]
#y[y<=0] = 10E-8 # entirely undetectable
#y[t_infidx < delta] = 1.0
#if th.any(ln_y>20):
# a = 5
# pass
#return ln_y #th.log(y)
return ln_y
def _get_ln_parasite_density(self,
log_d_infidx,
t_infidx,
tau_infidx,
D_y_infidx,
D_h_infidx,
D_m_infidx,
M_infidx,
sigma_y_infidx):
"""
Batch-ready
"""
E_ln_y_0 = log_d_infidx + self._ln_y_G(t_infidx, tau_infidx)
E_ln_y = D_y_infidx * D_h_infidx * D_m_infidx * E_ln_y_0 + th.log(D_x / M_infidx + 1 - D_x)
ln_y_tau_infidx = normal.Normal(E_ln_y, sigma_y_infidx).sample()
if not th.all(th.isfinite(ln_y_tau_infidx)):
a = 5
pass
return ln_y_tau_infidx
def _groupby_aggregate_sum(self, | |
of the lithologic material identified at time of'
' drilling. E.g. Black, dark, tan, rust-coloured'))
lithology_hardness = models.ForeignKey(
LithologyHardnessCode, db_column='lithology_hardness_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Hardness',
db_comment=('The hardness of the material that a well is drilled into (the lithology), e.g. Very'
' hard, Medium, Very Soft.'))
lithology_material = models.ForeignKey(
LithologyMaterialCode, db_column='lithology_material_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name="Material",
db_comment=('Description of the lithologic material using standardized terms, '
'e.g. Rock, Clay, Sand, Unspecified.'))
water_bearing_estimated_flow = models.DecimalField(
max_digits=10, decimal_places=4, blank=True, null=True, verbose_name='Water Bearing Estimated Flow')
water_bearing_estimated_flow_units = models.ForeignKey(
WellYieldUnitCode, db_column='well_yield_unit_code', on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Units')
lithology_observation = models.CharField(
max_length=250, blank=True, null=True, verbose_name='Observations',
db_comment=('Free form text used by the driller to describe observations made of the well '
'lithology including, but not limited to, the lithologic material.'))
bedrock_material = models.ForeignKey(
BedrockMaterialCode, db_column='bedrock_material_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Bedrock Material',
db_comment=('Code for the bedrock material encountered during drilling and reported in'
' lithologic description.'))
bedrock_material_descriptor = models.ForeignKey(
BedrockMaterialDescriptorCode, db_column='bedrock_material_descriptor_code', on_delete=models.PROTECT,
blank=True, null=True, verbose_name='Descriptor',
db_comment=('Code for adjective that describes the characteristics of the bedrock material in'
' more detail.'))
lithology_structure = models.ForeignKey(LithologyStructureCode, db_column='lithology_structure_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Bedding')
lithology_moisture = models.ForeignKey(LithologyMoistureCode, db_column='lithology_moisture_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Moisture')
surficial_material = models.ForeignKey(SurficialMaterialCode, db_column='surficial_material_code',
related_name='surficial_material_set', on_delete=models.PROTECT,
blank=True, null=True, verbose_name='Surficial Material')
secondary_surficial_material = models.ForeignKey(SurficialMaterialCode,
db_column='secondary_surficial_material_code',
related_name='secondary_surficial_material_set',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Secondary Surficial Material')
lithology_sequence_number = models.BigIntegerField(blank=True, null=True)
class Meta:
db_table = 'lithology_description'
ordering = ["start", "end"]
db_table_comment = ('Describes the different lithologic qualities, characteristics, and materials found '
'at different depths while drilling.')
db_column_supplemental_comments = {
"bedrock_material_code":"Code for the bedrock material encountered during drilling and reported in lithologic description. ",
"lithology_moisture_code":"Code that describes the level of water within the lithologic layer. i.e. Dry, Damp, Moist, Wet",
"lithology_sequence_number":"Check with developers to see if this is being used, or if it can be deleted.",
"water_bearing_estimated_flow":"Estimated flow of water within the lithologic layer, either recorded in US Gallons Per Minute or as per the well_yield_unit_code column.",
"well_tag_number":"System generated sequential number assigned to each well. It is widely used by groundwater staff as it is the only consistent unique identifier for each well. It is different from a well ID plate number.",
}
def __str__(self):
if self.activity_submission:
return 'activity_submission {} {} {}'.format(self.activity_submission, self.start,
self.end)
else:
return 'well {} {} {}'.format(self.well, self.start, self.end)
class PerforationBase(AuditModel):
"""
Perforation in a well liner
"""
liner_perforation_guid = models.UUIDField(primary_key=True, default=uuid.uuid4,
editable=False)
start = models.DecimalField(db_column='liner_perforation_from', max_digits=7, decimal_places=2,
verbose_name='Perforated From', blank=False,
validators=[MinValueValidator(Decimal('0.00'))])
end = models.DecimalField(db_column='liner_perforation_to', max_digits=7, decimal_places=2,
verbose_name='Perforated To', blank=False,
validators=[MinValueValidator(Decimal('0.01'))])
class Meta:
abstract = True
class LinerPerforation(PerforationBase):
"""
Perforation in a well liner
"""
well = models.ForeignKey(
Well, db_column='well_tag_number', on_delete=models.PROTECT, blank=True,
null=True, related_name='linerperforation_set',
db_comment=('The file number assigned to a particular well in the in the province\'s Groundwater '
'Wells and Aquifers application.'))
class Meta:
ordering = ["start", "end"]
db_table = 'liner_perforation'
db_table_comment = ('Describes the depths at which the liner is perforated in a well to help improve '
'water flow at the bottom of the well. Some wells are perforated instead of having '
'a screen installed.')
db_column_supplemental_comments = {
"liner_perforation_from":"The depth at the top of the liner perforation, measured in feet below ground level.",
"liner_perforation_to":"The depth at the bottom of the liner perforation, measured in feet below ground level.",
"well_tag_number":"System generated sequential number assigned to each well. It is widely used by groundwater staff as it is the only consistent unique identifier for each well. It is different from a well ID plate number.",
}
def __str__(self):
return 'well {} {} {}'.format(self.well, self.start, self.end)
class ActivitySubmissionLinerPerforation(PerforationBase):
"""
Perforation in a well liner
"""
activity_submission = models.ForeignKey(ActivitySubmission, db_column='filing_number',
on_delete=models.PROTECT, blank=True, null=True,
related_name='linerperforation_set')
class Meta:
ordering = ["start", "end"]
db_table_comment = ('Describes the depths at which the liner is perforated in a well to help improve '
'water flow at the bottom of the well. Some wells are perforated instead of having '
'a screen installed.')
def __str__(self):
return 'activity_submission {} {} {}'.format(self.activity_submission,
self.start,
self.end)
class Casing(AuditModel):
"""
Casing information
A casing may be associated to a particular submission, or to a well.
"""
casing_guid = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False)
activity_submission = models.ForeignKey(ActivitySubmission, db_column='filing_number',
on_delete=models.PROTECT, blank=True, null=True,
related_name='casing_set')
well = models.ForeignKey(
Well, db_column='well_tag_number', on_delete=models.PROTECT,
blank=True, null=True,
related_name='casing_set',
db_comment=('The file number assigned to a particular well in the in the province\'s Groundwater '
'Wells and Aquifers application.'))
# 2018/Sep/26 - According to PO (Lindsay), diameter, start and end are required fields.
# There is however a lot of legacy data that does not have this field.
start = models.DecimalField(db_column='casing_from', max_digits=7, decimal_places=2, verbose_name='From',
null=True, blank=True, validators=[MinValueValidator(Decimal('0.00'))])
end = models.DecimalField(db_column='casing_to', max_digits=7, decimal_places=2, verbose_name='To',
null=True, blank=True, validators=[MinValueValidator(Decimal('0.01'))])
# NOTE: Diameter should be pulling from screen.diameter
diameter = models.DecimalField(
max_digits=8, decimal_places=3, verbose_name='Diameter', null=True,
blank=True, validators=[MinValueValidator(Decimal('0.5'))],
db_comment=('The diameter as measure in inches of the casing of the well. There can be multiple '
'casings in a well, e.g. surface casing, and production casing. Diameter of casing made '
'available to the public is generally the production casing.'))
casing_code = models.ForeignKey(CasingCode, db_column='casing_code', on_delete=models.PROTECT,
verbose_name='Casing Type Code', null=True)
casing_material = models.ForeignKey(CasingMaterialCode, db_column='casing_material_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Casing Material Code')
wall_thickness = models.DecimalField(max_digits=6, decimal_places=3, verbose_name='Wall Thickness',
blank=True, null=True,
validators=[MinValueValidator(Decimal('0.01'))])
drive_shoe_status = models.ForeignKey(DriveShoeCode, db_column='drive_shoe_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Drive Shoe Code')
class Meta:
ordering = ["start", "end"]
db_table = 'casing'
db_table_comment = ('Piping or tubing installed in a well to support the sides of the well. The casing '
'is comprised of a production (inner tube) and surface (outer tube) and can be made '
'of a variety of materials.')
db_column_supplemental_comments = {
"casing_code":"Describes the casing component (piping or tubing installed in a well) as either production casing, surface casing (outer casing), or open hole.",
"casing_from":"The depth below ground level at which the casing begins. Measured in feet below ground level.",
"casing_to":"The depth below ground level at which the casing ends. Measured in feet below ground level.",
"diameter":"The diameter of the casing measured in inches. There can be multiple casings in a well, e.g. surface casing, and production casing. Diameter of casing made available to the public is generally the production casing.",
"drive_shoe_code":"Indicates Y or N if a drive shoe was used in the installation of the casing. A drive shoe is attached to the end of a casing and it helps protect it during installation.",
"wall_thickness":"The thickness of the casing wall, measured in inches.",
"well_tag_number":"System generated sequential number assigned to each well. It is widely used by groundwater staff as it is the only consistent unique identifier for each well. It is different from a well ID plate number.",
}
def __str__(self):
if self.activity_submission:
return 'activity_submission {} {} {}'.format(self.activity_submission, self.start, self.end)
else:
return 'well {} {} {}'.format(self.well, self.start, self.end)
def as_dict(self):
return {
"start": self.start,
"end": self.end,
"casing_guid": self.casing_guid,
"well_tag_number": self.well_tag_number,
"diameter": self.diameter,
"wall_thickness": self.wall_thickness,
"casing_material": self.casing_material,
"drive_shoe_status": self.drive_shoe_status
}
class Screen(AuditModel):
"""
Screen in a well
"""
screen_guid = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False)
activity_submission = models.ForeignKey(ActivitySubmission, db_column='filing_number',
on_delete=models.PROTECT, blank=True, null=True,
related_name='screen_set')
well = models.ForeignKey(
Well, db_column='well_tag_number', on_delete=models.PROTECT, blank=True,
null=True, related_name='screen_set',
db_comment=('System generated sequential number assigned to each well. It is widely used by groundwater staff as it is the only consistent unique identifier for each well. It is different from a well ID plate number.'))
start = models.DecimalField(db_column='screen_from', max_digits=7, decimal_places=2, verbose_name='From',
blank=True, null=True, validators=[MinValueValidator(Decimal('0.00'))])
end = models.DecimalField(db_column='screen_to', max_digits=7, decimal_places=2, verbose_name='To',
blank=False, null=True, validators=[MinValueValidator(Decimal('0.01'))])
diameter = models.DecimalField(db_column='screen_diameter', max_digits=7, decimal_places=2, verbose_name='Diameter',
blank=True, null=True,
validators=[MinValueValidator(Decimal('0.0'))])
assembly_type = models.ForeignKey(
ScreenAssemblyTypeCode, db_column='screen_assembly_type_code', on_delete=models.PROTECT, blank=True,
null=True)
slot_size = models.DecimalField(max_digits=7, decimal_places=2, verbose_name='Slot Size',
blank=True, null=True, validators=[MinValueValidator(Decimal('0.00'))])
class Meta:
db_table = 'screen'
ordering = ['start', 'end']
db_table_comment = ('Describes the screen type, diameter of screen, and the depth at which the screen is'
' installed in a well.')
def __str__(self):
if self.activity_submission:
return 'activity_submission {} {} {}'.format(self.activity_submission, self.start,
self.end)
else:
return 'well {} {} {}'.format(self.well, self.start, self.end)
class WaterQualityColour(CodeTableModel):
"""
Colour choices for describing water quality
"""
code = models.CharField(primary_key=True, max_length=32,
db_column='water_quality_colour_code')
description = models.CharField(max_length=100)
class Meta:
db_table = 'water_quality_colour_code'
db_table_comment = ('Valid values of the colour of the water as recorded at time | |
can be called directly, which is the
same as calling `validate`. For examples, see `NameValidator`.
"""
# Initial checks ..............
if (names is None):
if (nbfields is None):
return None
names = []
if isinstance(names, basestring):
names = [names, ]
if nbfields is not None:
nbnames = len(names)
if (nbnames < nbfields):
names = list(names) + [''] * (nbfields - nbnames)
elif (nbnames > nbfields):
names = names[:nbfields]
# Set some shortcuts ...........
deletechars = self.deletechars
excludelist = self.excludelist
case_converter = self.case_converter
replace_space = self.replace_space
# Initializes some variables ...
validatednames = []
seen = dict()
nbempty = 0
#
for item in names:
item = case_converter(item).strip()
if replace_space:
item = item.replace(' ', replace_space)
item = ''.join([c for c in item if c not in deletechars])
if item == '':
item = defaultfmt % nbempty
while item in names:
nbempty += 1
item = defaultfmt % nbempty
nbempty += 1
elif item in excludelist:
item += '_'
cnt = seen.get(item, 0)
if cnt > 0:
validatednames.append(item + '_%d' % cnt)
else:
validatednames.append(item)
seen[item] = cnt + 1
return tuple(validatednames)
#
def __call__(self, names, defaultfmt="f%i", nbfields=None):
return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
def str2bool(value):
"""
Tries to transform a string supposed to represent a boolean to a boolean.
Parameters
----------
value : str
The string that is transformed to a boolean.
Returns
-------
boolval : bool
The boolean representation of `value`.
Raises
------
ValueError
If the string is not 'True' or 'False' (case independent)
Examples
--------
>>> np.lib._iotools.str2bool('TRUE')
True
>>> np.lib._iotools.str2bool('false')
False
"""
value = value.upper()
if value == 'TRUE':
return True
elif value == 'FALSE':
return False
else:
raise ValueError("Invalid boolean")
class ConverterError(Exception):
"""
Exception raised when an error occurs in a converter for string values.
"""
pass
class ConverterLockError(ConverterError):
"""
Exception raised when an attempt is made to upgrade a locked converter.
"""
pass
class ConversionWarning(UserWarning):
"""
Warning issued when a string converter has a problem.
Notes
-----
In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
is explicitly suppressed with the "invalid_raise" keyword.
"""
pass
class StringConverter(object):
"""
Factory class for function transforming a string into another object
(int, float).
After initialization, an instance can be called to transform a string
into another object. If the string is recognized as representing a
missing value, a default value is returned.
Attributes
----------
func : function
Function used for the conversion.
default : any
Default value to return when the input corresponds to a missing
value.
type : type
Type of the output.
_status : int
Integer representing the order of the conversion.
_mapper : sequence of tuples
Sequence of tuples (dtype, function, default value) to evaluate in
order.
_locked : bool
Holds `locked` parameter.
Parameters
----------
dtype_or_func : {None, dtype, function}, optional
If a `dtype`, specifies the input data type, used to define a basic
function and a default value for missing data. For example, when
`dtype` is float, the `func` attribute is set to `float` and the
default value to `np.nan`. If a function, this function is used to
convert a string to another object. In this case, it is recommended
to give an associated default value as input.
default : any, optional
Value to return by default, that is, when the string to be
converted is flagged as missing. If not given, `StringConverter`
tries to supply a reasonable default value.
missing_values : {None, sequence of str}, optional
``None`` or sequence of strings indicating a missing value. If ``None``
then missing values are indicated by empty entries. The default is
``None``.
locked : bool, optional
Whether the StringConverter should be locked to prevent automatic
upgrade or not. Default is False.
"""
#
_mapper = [(nx.bool_, str2bool, False),
(nx.integer, int, -1)]
# On 32-bit systems, we need to make sure that we explicitly include
# nx.int64 since ns.integer is nx.int32.
if nx.dtype(nx.integer).itemsize < nx.dtype(nx.int64).itemsize:
_mapper.append((nx.int64, int, -1))
_mapper.extend([(nx.floating, float, nx.nan),
(nx.complexfloating, complex, nx.nan + 0j),
(nx.longdouble, nx.longdouble, nx.nan),
(nx.unicode_, asunicode, '???'),
(nx.string_, asbytes, '???')])
(_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper)
@classmethod
def _getdtype(cls, val):
"""Returns the dtype of the input variable."""
return np.array(val).dtype
#
@classmethod
def _getsubdtype(cls, val):
"""Returns the type of the dtype of the input variable."""
return np.array(val).dtype.type
#
# This is a bit annoying. We want to return the "general" type in most
# cases (ie. "string" rather than "S10"), but we want to return the
# specific type for datetime64 (ie. "datetime64[us]" rather than
# "datetime64").
@classmethod
def _dtypeortype(cls, dtype):
"""Returns dtype for datetime64 and type of dtype otherwise."""
if dtype.type == np.datetime64:
return dtype
return dtype.type
#
@classmethod
def upgrade_mapper(cls, func, default=None):
"""
Upgrade the mapper of a StringConverter by adding a new function and
its corresponding default.
The input function (or sequence of functions) and its associated
default value (if any) is inserted in penultimate position of the
mapper. The corresponding type is estimated from the dtype of the
default value.
Parameters
----------
func : var
Function, or sequence of functions
Examples
--------
>>> import dateutil.parser
>>> import datetime
>>> dateparser = datetustil.parser.parse
>>> defaultdate = datetime.date(2000, 1, 1)
>>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
"""
# Func is a single functions
if hasattr(func, '__call__'):
cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
return
elif hasattr(func, '__iter__'):
if isinstance(func[0], (tuple, list)):
for _ in func:
cls._mapper.insert(-1, _)
return
if default is None:
default = [None] * len(func)
else:
default = list(default)
default.append([None] * (len(func) - len(default)))
for (fct, dft) in zip(func, default):
cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
#
def __init__(self, dtype_or_func=None, default=None, missing_values=None,
locked=False):
# Defines a lock for upgrade
self._locked = bool(locked)
# No input dtype: minimal initialization
if dtype_or_func is None:
self.func = str2bool
self._status = 0
self.default = default or False
dtype = np.dtype('bool')
else:
# Is the input a np.dtype ?
try:
self.func = None
dtype = np.dtype(dtype_or_func)
except TypeError:
# dtype_or_func must be a function, then
if not hasattr(dtype_or_func, '__call__'):
errmsg = ("The input argument `dtype` is neither a"
" function nor a dtype (got '%s' instead)")
raise TypeError(errmsg % type(dtype_or_func))
# Set the function
self.func = dtype_or_func
# If we don't have a default, try to guess it or set it to
# None
if default is None:
try:
default = self.func('0')
except ValueError:
default = None
dtype = self._getdtype(default)
# Set the status according to the dtype
_status = -1
for (i, (deftype, func, default_def)) in enumerate(self._mapper):
if np.issubdtype(dtype.type, deftype):
_status = i
if default is None:
self.default = default_def
else:
self.default = default
break
# if a converter for the specific dtype is available use that
last_func = func
for (i, (deftype, func, default_def)) in enumerate(self._mapper):
if dtype.type == deftype:
_status = i
last_func = func
if default is None:
self.default = default_def
else:
self.default = default
break
func = last_func
if _status == -1:
# We never found a match in the _mapper...
_status = 0
self.default = default
self._status = _status
# If the input was a dtype, set the function to the last we saw
if self.func is None:
self.func = func
# If the status is 1 (int), change the function to
# something more robust.
if self.func == self._mapper[1][1]:
if issubclass(dtype.type, np.uint64):
self.func = np.uint64
elif issubclass(dtype.type, np.int64):
self.func = np.int64
else:
self.func = lambda x: int(float(x))
# Store the list of strings corresponding to missing values.
if missing_values is None:
self.missing_values = {''}
else:
if isinstance(missing_values, basestring):
missing_values = missing_values.split(",")
self.missing_values = set(list(missing_values) + [''])
#
self._callingfunction = self._strict_call
self.type = self._dtypeortype(dtype)
self._checked = False
self._initial_default = default
#
def _loose_call(self, value):
try:
return self.func(value)
except ValueError:
return self.default
#
def _strict_call(self, value):
try:
# We check if we can convert the value using the current function
new_value = self.func(value)
# In addition to having to check whether func can convert the
# value, we also have to make sure that we don't get overflow
| |
print('7_', 8)
7_ 8
""").strip()
assert expected == result
def test_heuristic_eval_1():
result = py("1+2")
expected = dedent("""
[PYFLYBY] 1+2
3
""").strip()
assert expected == result
def test_heuristic_eval_concat_1():
result = py("5 + 7")
expected = dedent("""
[PYFLYBY] 5 + 7
12
""").strip()
assert expected == result
def test_heuristic_eval_complex_1():
result = py("5 + 7j")
expected = dedent("""
[PYFLYBY] 5 + 7j
(5+7j)
""").strip()
assert expected == result
def test_heuristic_eval_complex_2():
result = py("(5+7j) ** 12")
expected = dedent("""
[PYFLYBY] (5+7j) ** 12
(65602966976-150532462080j)
""").strip()
assert expected == result
def test_heuristic_eval_exponentiation_1():
result = py("123**4")
expected = dedent("""
[PYFLYBY] 123**4
228886641
""").strip()
assert expected == result
def test_heuristic_eval_with_argv_1():
result = py('for x in sys.argv[1:]: print(x.capitalize())',
'canal', 'grand')
expected = dedent("""
[PYFLYBY] import sys
[PYFLYBY] for x in sys.argv[1:]: print(x.capitalize())
Canal
Grand
""").strip()
assert expected == result
def test_heuristic_exec_statement_1():
result = py('''if 1: print("Mulberry")''')
expected = dedent("""
[PYFLYBY] if 1: print("Mulberry")
Mulberry
""").strip()
assert expected == result
def test_heuristic_exec_multiline_statement_1():
result = py('''if 1:\n print("Mott")''')
expected = dedent("""
[PYFLYBY] if 1:
[PYFLYBY] print("Mott")
Mott
""").strip()
assert expected == result
def test_heuristic_apply_1():
result = py("str.upper", "'Ditmars'")
expected = dedent("""
[PYFLYBY] str.upper('Ditmars')
'DITMARS'
""").strip()
assert expected == result
def test_heuristic_apply_stdin_1():
result = py("str.upper", "-", stdin=b"Nassau")
expected = dedent("""
[PYFLYBY] str.upper('Nassau')
'NASSAU'
""").strip()
assert expected == result
def test_heuristic_apply_stdin_2():
result = py("--output=silent", "sys.stdout.write", "-", stdin=b"Downing")
expected = dedent("""
[PYFLYBY] import sys
[PYFLYBY] sys.stdout.write('Downing')
Downing
""").strip()
assert expected == result
def test_heuristic_apply_stdin_no_eval_1():
result = py("--output=silent", "sys.stdout.write", "-", stdin=b"3+4")
expected = dedent("""
[PYFLYBY] import sys
[PYFLYBY] sys.stdout.write('3+4')
3+4
""").strip()
assert expected == result
def test_heuristic_apply_stdin_quiet_1():
result = py("--output=silent", "-q", "sys.stdout.write", "-", stdin=b"Houston")
expected = "Houston"
assert expected == result
def test_heuristic_apply_lambda_1():
result = py("lambda a,b:a*b", "6", "7")
expected = dedent("""
[PYFLYBY] lambda a,b:a*b
[PYFLYBY] (lambda a,b:a*b)(6, 7)
42
""").strip()
assert expected == result
def test_heuristic_apply_lambda_nested_1():
result = py("(lambda a,b: lambda c,d: a*b*c*d)(2,3)", "5", "7")
expected = dedent("""
[PYFLYBY] (lambda a,b: lambda c,d: a*b*c*d)(2,3)
[PYFLYBY] (lambda a,b: lambda c,d: a*b*c*d)(2,3)(5, 7)
210
""").strip()
assert expected == result
def test_heuristic_apply_builtin_args_1():
result = py("round", "2.984375", "3")
expected = dedent("""
[PYFLYBY] round(2.984375, 3)
2.984
""").strip()
assert expected == result
def test_heuristic_apply_builtin_args_2():
result = py("round", "2.984375")
if PY2:
expected = dedent("""
[PYFLYBY] round(2.984375)
3.0
""").strip()
else:
expected = dedent("""
[PYFLYBY] round(2.984375)
3
""").strip()
assert expected == result
def test_heuristic_apply_builtin_kwargs_1():
result = py("round", "2.984375", "--ndigits=3")
expected = dedent("""
[PYFLYBY] round(2.984375, ndigits=3)
2.984
""").strip()
assert expected == result
def test_heuristic_apply_builtin_kwargs_separate_arg_1():
result = py("round", "2.984375", "--ndigits", "3")
expected = dedent("""
[PYFLYBY] round(2.984375, ndigits=3)
2.984
""").strip()
assert expected == result
def test_heuristic_print_1():
result = py("print", "4", "5")
expected = dedent("""
[PYFLYBY] print(4, 5)
4 5
""").strip()
assert expected == result
def test_heuristic_apply_expression_1():
result = py("3.0.is_integer")
expected = dedent("""
[PYFLYBY] 3.0.is_integer()
True
""").strip()
assert expected == result
def test_heuristic_apply_expression_2():
result = py("sys.stdout.flush")
expected = dedent("""
[PYFLYBY] import sys
[PYFLYBY] sys.stdout.flush()
""").strip()
assert expected == result
def test_heuristic_eval_expression_1():
result = py("os.path.sep")
expected = dedent("""
[PYFLYBY] import os.path
[PYFLYBY] os.path.sep
'/'
""").strip()
assert expected == result
def test_heuristic_eval_expression_nonmodule_1():
result = py("os.getcwd.__name__")
expected = dedent("""
[PYFLYBY] import os
[PYFLYBY] os.getcwd.__name__
'getcwd'
""").strip()
assert expected == result
@pytest.mark.skipif(
sys.version_info[0] == 3, reason="xml.dom.minidom also need import on py3"
)
def test_heuristic_eval_symbol_submodule_1():
# Verify that heuristic eval of an expression in a module in a package
# works, and also verify that we log the submodule import.
result = py("xml.dom.minidom.XMLNS_NAMESPACE")
expected = dedent("""
[PYFLYBY] import xml.dom
[PYFLYBY] xml.dom.minidom.XMLNS_NAMESPACE
'http://www.w3.org/2000/xmlns/'
""").strip()
assert expected == result
def test_heuristic_apply_method_arg_1():
result = py("float.is_integer", "3.0")
expected = dedent("""
[PYFLYBY] float.is_integer(3.0)
True
""").strip()
assert expected == result
result = py("float.is_integer", "3.5")
expected = dedent("""
[PYFLYBY] float.is_integer(3.5)
False
""").strip()
assert expected == result
def test_apply_builtin_too_few_args_1():
result, retcode = py("round")
assert retcode == 1
if PY2:
assert "TypeError: Required argument 'number' (pos 1) not found" in result
else:
assert "TypeError: round() missing required argument 'number' (pos 1)" in result
def test_apply_builtin_too_many_args_1():
result, retcode = py("round", "6", "7", "8")
assert retcode == 1
assert "TypeError: round() takes at most 2 arguments (3 given)" in result
def test_apply_builtin_bad_kwarg_1():
result, retcode = py("round", "2.7182", "--foo=5")
assert retcode == 1
assert "TypeError: 'foo' is an invalid keyword argument" in result
def test_apply_pyfunc_posargs_1():
result = py("calendar.weekday 2014 7 18".split())
expected = dedent("""
[PYFLYBY] import calendar
[PYFLYBY] calendar.weekday(2014, 7, 18)
4
""").strip()
assert expected == result
def test_apply_pyfunc_kwarg_1():
result = py("calendar.weekday --year=2014 --month=7 --day=17".split())
expected = dedent("""
[PYFLYBY] import calendar
[PYFLYBY] calendar.weekday(2014, 7, 17)
3
""").strip()
assert expected == result
def test_apply_pyfunc_kwarg_disorder_1():
result = py("calendar.weekday --day=16 --month=7 --year=2014".split())
expected = dedent("""
[PYFLYBY] import calendar
[PYFLYBY] calendar.weekday(2014, 7, 16)
2
""").strip()
assert expected == result
def test_apply_pyfunc_kwarg_short_1():
result = py("calendar.weekday -m 7 -d 15 -y 2014".split())
expected = dedent("""
[PYFLYBY] import calendar
[PYFLYBY] calendar.weekday(2014, 7, 15)
1
""").strip()
assert expected == result
def test_apply_pyfunc_hybrid_args_disorder_1():
result = py("calendar.weekday 2014 -day 15 -month 7".split())
expected = dedent("""
[PYFLYBY] import calendar
[PYFLYBY] calendar.weekday(2014, 7, 15)
1
""").strip()
assert expected == result
def test_apply_argspec_too_few_args_1():
result, retcode = py("base64.b64decode")
assert retcode == 1
assert "[PYFLYBY] missing required argument s" in result
if PY2:
assert "$ py base64.b64decode s [altchars]" in result, result
else:
assert "$ py base64.b64decode s [altchars [validate]]" in result
def test_apply_argspec_too_few_args_2():
result, retcode = py("calendar.weekday")
assert retcode == 1
assert "[PYFLYBY] missing required argument year" in result
assert "$ py calendar.weekday year month day" in result
def test_apply_argspec_too_many_args_1():
result, retcode = py("base64.b64decode", "a", "b", "c", "d")
assert retcode == 1
if PY2:
assert ("[PYFLYBY] Too many positional arguments. "
"Expected 1-2 positional argument(s): s, altchars. "
"Got 4 args: a b c d") in result, result
assert "$ py base64.b64decode s [altchars]" in result
else:
assert ("[PYFLYBY] Too many positional arguments. "
"Expected 1-3 positional argument(s): s, altchars, validate. "
"Got 4 args: a b c d") in result, result
assert "$ py base64.b64decode s [altchars [validate]]" in result
def test_apply_argspec_too_many_args_2():
result, retcode = py("calendar.weekday", "a", "b", "c", "d")
assert retcode == 1
assert ("[PYFLYBY] Too many positional arguments. "
"Expected 3 positional argument(s): year, month, day. "
"Got 4 args: a b c d") in result
assert "$ py calendar.weekday year month day" in result
def test_apply_argspec_bad_kwarg_1():
result, retcode = py("base64.b64decode", "x", "--christopher=sheridan")
assert retcode == 1
assert "[PYFLYBY] Unknown option name christopher" in result
if PY2:
assert "$ py base64.b64decode s [altchars]" in result
else:
assert "$ py base64.b64decode s [altchars [validate]]" in result
def test_apply_dashdash_1():
result = py('--apply', 'print', '4.000', '--', '--help', '5.000')
expected = dedent("""
[PYFLYBY] print(4.0, '--help', '5.000')
4.0 --help 5.000
""").strip()
assert expected == result
def test_apply_namedtuple_1():
result = py('namedtuple("ab", "aa bb")', "3", "4")
expected = dedent("""
[PYFLYBY] from collections import namedtuple
[PYFLYBY] namedtuple("ab", "aa bb")
[PYFLYBY] namedtuple("ab", "aa bb")(3, 4)
ab(aa=3, bb=4)
""").strip()
assert expected == result
def test_repr_str_1():
result = py("'Astor'")
expected = dedent("""
[PYFLYBY] 'Astor'
'Astor'
""").strip()
assert expected == result
@pytest.mark.skipif(
PY3,
reason="Long integers are not valid syntax in Python 3.")
def test_repr_long_1():
result = py("5L")
expected = dedent("""
[PYFLYBY] 5L
5L
""").strip()
assert expected == result
def test_future_division_1():
result = py("1/2")
expected = dedent("""
[PYFLYBY] 1/2
0.5
""").strip()
assert expected == result
def test_integer_division_1():
result = py("7//3")
expected = dedent("""
[PYFLYBY] 7//3
2
""").strip()
assert expected == result
def test_print_statement_1():
result = py("print(42)")
expected = dedent("""
[PYFLYBY] print(42)
42
""").strip()
assert expected == result
def test_print_statement_sep_1():
result = py("print", "43")
if PY2:
expected = dedent("""
[PYFLYBY] print 43
43
""").strip()
else:
expected = dedent("""
[PYFLYBY] print(43)
43
""").strip()
assert expected == result
def test_print_function_1():
result = py("print(44, file=sys.stdout)")
expected = dedent("""
[PYFLYBY] import sys
[PYFLYBY] print(44, file=sys.stdout)
44
""").strip()
assert expected == result
def test_print_function_tuple_1():
result = py("print(5,6)")
expected = dedent("""
[PYFLYBY] print(5,6)
5 6
""").strip()
assert expected == result
def test_write_1():
with NamedTemporaryFile(mode='w+') as f:
output = py("--output=silent", "-q", "open(%r,'w').write"%f.name, "-", stdin=b"Greenwich")
assert output == ""
result = f.read()
expected = "Greenwich"
assert expected == result
def test_print_args_1():
with NamedTemporaryFile(mode='w+') as f:
output = py("-q", "print", "-", "--file=open(%r,'w')"%f.name,
stdin=b"Spring")
assert output == ""
result = f.read()
expected = "Spring\n"
assert expected == result
def test_program_help_1():
output = py("--help")
assert "--version" in output
def test_program_help_full_1():
for arg in ["--help", "-help", "help", "--h", "-h", "--?", "-?", "?"]:
output = py(arg)
assert "--version" in output
def test_function_help_1():
output = py("base64.b64encode", "--help")
assert "s | |
# coding=utf-8
from OTLMOW.OEFModel.EMObject import EMObject
from OTLMOW.OEFModel.EMAttribuut import EMAttribuut
from OTLMOW.OTLModel.Datatypes.BooleanField import BooleanField
from OTLMOW.OTLModel.Datatypes.DateTimeField import DateTimeField
from OTLMOW.OTLModel.Datatypes.StringField import StringField
# Generated with OEFClassCreator. To modify: extend, do not edit
class Kast(EMObject):
"""Installatiekast of Voetpadkast - fysieke behuizing"""
typeURI = 'https://lgc.data.wegenenverkeer.be/ns/installatie#Kast'
label = 'Kast'
def __init__(self):
super().__init__()
self._aantalOvbsEnVoetVervangen = EMAttribuut(field=StringField,
naam='Aantal OVBS en voet vervangen',
label='Aantal OVBS en voet vervangen',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#EMObject.aantalOvbsEnVoetVervangen',
definitie='Definitie nog toe te voegen voor eigenschap Aantal OVBS en voet vervangen',
owner=self)
self._aantalOvbsVervangen = EMAttribuut(field=StringField,
naam='Aantal OVBS vervangen',
label='Aantal OVBS vervangen',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#EMObject.aantalOvbsVervangen',
definitie='Definitie nog toe te voegen voor eigenschap Aantal OVBS vervangen',
owner=self)
self._aantalBuizenAfgedicht = EMAttribuut(field=StringField,
naam='Aantal buizen afgedicht',
label='Aantal buizen afgedicht',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.aantalBuizenAfgedicht',
definitie='Definitie nog toe te voegen voor eigenschap Aantal buizen afgedicht',
owner=self)
self._aantalBuizenTePlannen = EMAttribuut(field=StringField,
naam='Aantal buizen te plannen',
label='Aantal buizen te plannen',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.aantalBuizenTePlannen',
definitie='Definitie nog toe te voegen voor eigenschap Aantal buizen te plannen',
owner=self)
self._aantalNogInTePlannen = EMAttribuut(field=StringField,
naam='Aantal nog in te plannen',
label='Aantal nog in te plannen',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#EMObject.aantalNogInTePlannen',
definitie='Definitie nog toe te voegen voor eigenschap Aantal nog in te plannen',
owner=self)
self._algemeneOpmerkingen = EMAttribuut(field=StringField,
naam='Algemene opmerkingen',
label='Algemene opmerkingen',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.algemeneOpmerkingen',
definitie='Definitie nog toe te voegen voor eigenschap Algemene opmerkingen',
owner=self)
self._bekabelingDraadkanalenOrdelijkKast = EMAttribuut(field=StringField,
naam='Bekabeling/draadkanalen ordelijk (KAST)',
label='Bekabeling/draadkanalen ordelijk (KAST)',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.bekabelingDraadkanalenOrdelijkKast',
definitie='Definitie nog toe te voegen voor eigenschap Bekabeling/draadkanalen ordelijk (KAST)',
owner=self)
self._bereikbaarheidCorrect = EMAttribuut(field=BooleanField,
naam='Bereikbaarheid correct',
label='Bereikbaarheid correct',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.bereikbaarheidCorrect',
definitie='Definitie nog toe te voegen voor eigenschap Bereikbaarheid correct',
owner=self)
self._betonsokkelGereinigd = EMAttribuut(field=BooleanField,
naam='Betonsokkel gereinigd',
label='Betonsokkel gereinigd',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.betonsokkelGereinigd',
definitie='Definitie nog toe te voegen voor eigenschap Betonsokkel gereinigd',
owner=self)
self._bezoekficheAanvangtijdstipIngevuld = EMAttribuut(field=BooleanField,
naam='Bezoekfiche aanvangtijdstip ingevuld',
label='Bezoekfiche aanvangtijdstip ingevuld',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.bezoekficheAanvangtijdstipIngevuld',
definitie='Definitie nog toe te voegen voor eigenschap Bezoekfiche aanvangtijdstip ingevuld',
owner=self)
self._bezoekficheEindtijdstipIngevuld = EMAttribuut(field=BooleanField,
naam='Bezoekfiche eindtijdstip ingevuld',
label='Bezoekfiche eindtijdstip ingevuld',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.bezoekficheEindtijdstipIngevuld',
definitie='Definitie nog toe te voegen voor eigenschap Bezoekfiche eindtijdstip ingevuld',
owner=self)
self._binnenkantSokkelGestofzuigd = EMAttribuut(field=BooleanField,
naam='Binnenkant sokkel gestofzuigd',
label='Binnenkant sokkel gestofzuigd',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.binnenkantSokkelGestofzuigd',
definitie='Definitie nog toe te voegen voor eigenschap Binnenkant sokkel gestofzuigd',
owner=self)
self._binnensokkelIsGoedAansluitend = EMAttribuut(field=BooleanField,
naam='Bin<NAME>',
label='Binnensokkel is goed aansluitend',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.binnensokkelIsGoedAansluitend',
definitie='Definitie nog toe te voegen voor eigenschap Binnensokkel is goed aansluitend',
owner=self)
self._binnenzijdeGereinigd = EMAttribuut(field=BooleanField,
naam='Binnenzijde gereinigd',
label='Binnenzijde gereinigd',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.binnenzijdeGereinigd',
definitie='Definitie nog toe te voegen voor eigenschap Binnenzijde gereinigd',
owner=self)
self._buizenZijnGoedAfgedichtKast = EMAttribuut(field=StringField,
naam='Buizen zijn goed afgedicht (KAST)',
label='Buizen zijn goed afgedicht (KAST)',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.buizenZijnGoedAfgedichtKast',
definitie='Definitie nog toe te voegen voor eigenschap Buizen zijn goed afgedicht (KAST)',
owner=self)
self._deurSluitGoedAf = EMAttribuut(field=BooleanField,
naam='Deur sluit goed af',
label='Deur sluit goed af',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.deurSluitGoedAf',
definitie='Definitie nog toe te voegen voor eigenschap Deur sluit goed af',
owner=self)
self._deurcontactAanwezig = EMAttribuut(field=BooleanField,
naam='<NAME>',
label='Deurcontact aanwezig',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.deurcontactAanwezig',
definitie='Definitie nog toe te voegen voor eigenschap Deurcontact aanwezig',
owner=self)
self._eindeKast = EMAttribuut(field=DateTimeField,
naam='Einde (KAST)',
label='Einde (KAST)',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.eindeKast',
definitie='Definitie nog toe te voegen voor eigenschap Einde (KAST)',
owner=self)
self._filterSVervangen = EMAttribuut(field=BooleanField,
naam='Filter(s) vervangen',
label='Filter(s) vervangen',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.filterSVervangen',
definitie='Definitie nog toe te voegen voor eigenschap Filter(s) vervangen',
owner=self)
self._fotoVanInhoudVanKastGenomen = EMAttribuut(field=BooleanField,
naam='Foto van inhoud van kast genomen',
label='Foto van inhoud van kast genomen',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.fotoVanInhoudVanKastGenomen',
definitie='Definitie nog toe te voegen voor eigenschap Foto van inhoud van kast genomen',
owner=self)
self._geldigeKeuringAanwezig = EMAttribuut(field=BooleanField,
naam='Geldige keuring aanwezig',
label='Geldige keuring aanwezig',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.geldigeKeuringAanwezig',
definitie='Definitie nog toe te voegen voor eigenschap Geldige keuring aanwezig',
owner=self)
self._genaakbareDelenAanwezig = EMAttribuut(field=StringField,
naam='Genaakbare delen aanwezig',
label='Genaakbare delen aanwezig',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.genaakbareDelenAanwezig',
definitie='Definitie nog toe te voegen voor eigenschap Genaakbare delen aanwezig',
owner=self)
self._graffitiFotoS = EMAttribuut(field=BooleanField,
naam="Graffiti (foto's)",
label="Graffiti (foto's)",
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.graffitiFotoS',
definitie="Definitie nog toe te voegen voor eigenschap Graffiti (foto\'s)",
owner=self)
self._hebJeGesnoeid = EMAttribuut(field=StringField,
naam='Heb je gesnoeid?',
label='Heb je gesnoeid?',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.hebJeGesnoeid',
definitie='Definitie nog toe te voegen voor eigenschap Heb je gesnoeid?',
owner=self)
self._installatienummerAanwezigOpKast = EMAttribuut(field=BooleanField,
naam='Installatienummer aanwezig op kast',
label='Installatienummer aanwezig op kast',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.installatienummerAanwezigOpKast',
definitie='Definitie nog toe te voegen voor eigenschap Installatienummer aanwezig op kast',
owner=self)
self._isDiffOk = EMAttribuut(field=StringField,
naam='Is diff ok?',
label='Is diff ok?',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.isDiffOk',
definitie='Definitie nog toe te voegen voor eigenschap Is diff ok?',
owner=self)
self._k04VervolgActie = EMAttribuut(field=StringField,
naam='K04. Vervolg actie',
label='K04. Vervolg actie',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.k04VervolgActie',
definitie='Definitie nog toe te voegen voor eigenschap Vervolg actie',
owner=self)
self._k20VervolgActie = EMAttribuut(field=StringField,
naam='K20. Vervolg actie',
label='K20. Vervolg actie',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.k20VervolgActie',
definitie='Definitie nog toe te voegen voor eigenschap Vervolg actie',
owner=self)
self._k24VervolgActie = EMAttribuut(field=StringField,
naam='K24. Vervolg actie',
label='K24. Vervolg actie',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.k24VervolgActie',
definitie='Definitie nog toe te voegen voor eigenschap Vervolg actie',
owner=self)
self._k28VervolgActie = EMAttribuut(field=StringField,
naam='K28. Vervolg actie',
label='K28. Vervolg actie',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.k28VervolgActie',
definitie='Definitie nog toe te voegen voor eigenschap Vervolg actie',
owner=self)
self._kastVlotToegankelijkBeplanting = EMAttribuut(field=StringField,
naam='Kast vlot toegankelijk (beplanting)',
label='Kast vlot toegankelijk (beplanting)',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.kastVlotToegankelijkBeplanting',
definitie='Definitie nog toe te voegen voor eigenschap Kast vlot toegankelijk (beplanting)',
owner=self)
self._knaagdierenbestrijding = EMAttribuut(field=BooleanField,
naam='Knaagdierenbestrijding',
label='Knaagdierenbestrijding',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.knaagdierenbestrijding',
definitie='Definitie nog toe te voegen voor eigenschap Knaagdierenbestrijding',
owner=self)
self._labelingElektrischeOnderdelenOkKast = EMAttribuut(field=StringField,
naam='Labeling elektrische onderdelen OK (KAST)',
label='Labeling elektrische onderdelen OK (KAST)',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.labelingElektrischeOnderdelenOkKast',
definitie='Definitie nog toe te voegen voor eigenschap Labeling elektrische onderdelen OK (KAST)',
owner=self)
self._omschrijvingAanpassing = EMAttribuut(field=StringField,
naam='Omschrijving aanpassing',
label='Omschrijving aanpassing',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.omschrijvingAanpassing',
definitie='Definitie nog toe te voegen voor eigenschap Omschrijving aanpassing',
owner=self)
self._omschrijvingBeschadiging = EMAttribuut(field=StringField,
naam='Omschrijving beschadiging',
label='Omschrijving beschadiging',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.omschrijvingBeschadiging',
definitie='Definitie nog toe te voegen voor eigenschap Omschrijving beschadiging',
owner=self)
self._omschrijvingPlanning = EMAttribuut(field=StringField,
naam='Omschrijving planning',
label='Omschrijving planning',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.omschrijvingPlanning',
definitie='Definitie nog toe te voegen voor eigenschap Omschrijving planning',
owner=self)
self._omschrijvingWaarKmp = EMAttribuut(field=StringField,
naam='Omschrijving waar? KMP?',
label='Omschrijving waar? KMP?',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.omschrijvingWaarKmp',
definitie='Definitie nog toe te voegen voor eigenschap Omschrijving waar? KMP?',
owner=self)
self._overspanningsBeveiligingenOk = EMAttribuut(field=BooleanField,
naam='Overspannings beveiligingen OK',
label='Overspannings beveiligingen OK',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.overspanningsBeveiligingenOk',
definitie='Definitie nog toe te voegen voor eigenschap Overspannings beveiligingen OK',
owner=self)
self._plantenEnOfOngedierteVerwijderd = EMAttribuut(field=BooleanField,
naam='Planten en/of ongedierte verwijderd',
label='Planten en/of ongedierte verwijderd',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.plantenEnOfOngedierteVerwijderd',
definitie='Definitie nog toe te voegen voor eigenschap Planten en/of ongedierte verwijderd',
owner=self)
self._reinigenBuitenzijdeKast = EMAttribuut(field=BooleanField,
naam='Reinigen buitenzijde kast',
label='Reinigen buitenzijde kast',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.reinigenBuitenzijdeKast',
definitie='Definitie nog toe te voegen voor eigenschap Reinigen buitenzijde kast',
owner=self)
self._schroevenContactenOkSteekproef = EMAttribuut(field=BooleanField,
naam='Schroeven contacten ok (steekproef)',
label='Schroeven contacten ok (steekproef)',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.schroevenContactenOkSteekproef',
definitie='Definitie nog toe te voegen voor eigenschap Schroeven contacten ok (steekproef)',
owner=self)
self._slotGesmeerdEnGereinigd = EMAttribuut(field=BooleanField,
naam='Slot gesmeerd en gereinigd',
label='Slot gesmeerd en gereinigd',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.slotGesmeerdEnGereinigd',
definitie='Definitie nog toe te voegen voor eigenschap Slot gesmeerd en gereinigd',
owner=self)
self._startBezoek = EMAttribuut(field=DateTimeField,
naam='Start bezoek',
label='Start bezoek',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.startBezoek',
definitie='Definitie nog toe te voegen voor eigenschap Start bezoek',
owner=self)
self._stekkersGecontroleerd = EMAttribuut(field=BooleanField,
naam='Stekkers gecontroleerd',
label='Stekkers gecontroleerd',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.stekkersGecontroleerd',
definitie='Definitie nog toe te voegen voor eigenschap Stekkers gecontroleerd',
owner=self)
self._vtcSticker = EMAttribuut(field=BooleanField,
naam='VTC-sticker',
label='VTC-sticker',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.vtcSticker',
definitie='Definitie nog toe te voegen voor eigenschap VTC-sticker',
owner=self)
self._ventilatieOpeningenAppGestofzuigd = EMAttribuut(field=BooleanField,
naam='Ventilatie-openingen app gestofzuigd',
label='Ventilatie-openingen app gestofzuigd',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.ventilatieOpeningenAppGestofzuigd',
definitie='Definitie nog toe te voegen voor eigenschap Ventilatie-openingen app gestofzuigd',
owner=self)
self._werkingDiffGetestViaTestknop = EMAttribuut(field=BooleanField,
naam='Werking diff getest (via testknop)',
label='Werking diff getest (via testknop)',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#EMObject.werkingDiffGetestViaTestknop',
definitie='Definitie nog toe te voegen voor eigenschap Werking diff getest (via testknop)',
owner=self)
self._werkingKastventilatieOkKast = EMAttribuut(field=StringField,
naam='Werking kastventilatie OK (KAST)',
label='Werking kastventilatie OK (KAST)',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.werkingKastventilatieOkKast',
definitie='Definitie nog toe te voegen voor eigenschap Werking kastventilatie OK (KAST)',
owner=self)
self._werkingKastverlichtingOkKast = EMAttribuut(field=StringField,
naam='Werking kastverlichting OK (KAST)',
label='Werking kastverlichting OK (KAST)',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.werkingKastverlichtingOkKast',
definitie='Definitie nog toe te voegen voor eigenschap Werking kastverlichting OK (KAST)',
owner=self)
self._werkingKastverwarmingOkKast = EMAttribuut(field=StringField,
naam='Werking kastverwarming OK (KAST)',
label='Werking kastverwarming OK (KAST)',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Kast.werkingKastverwarmingOkKast',
definitie='Definitie nog toe te voegen voor eigenschap Werking kastverwarming OK (KAST)',
owner=self)
self._notitieinspectie = EMAttribuut(field=StringField,
naam='notitieInspectie',
label='notitieInspectie',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#EMObject.notitieinspectie',
definitie='Definitie nog toe te voegen voor eigenschap notitie',
owner=self)
@property
def aantalOvbsEnVoetVervangen(self):
"""Definitie nog toe te voegen voor eigenschap Aantal OVBS en voet vervangen"""
return self._aantalOvbsEnVoetVervangen.waarde
@aantalOvbsEnVoetVervangen.setter
def aantalOvbsEnVoetVervangen(self, value):
self._aantalOvbsEnVoetVervangen.set_waarde(value, owner=self)
@property
def aantalOvbsVervangen(self):
"""Definitie nog toe te voegen voor eigenschap Aantal OVBS vervangen"""
return self._aantalOvbsVervangen.waarde
@aantalOvbsVervangen.setter
def aantalOvbsVervangen(self, value):
self._aantalOvbsVervangen.set_waarde(value, owner=self)
@property
def aantalBuizenAfgedicht(self):
"""Definitie nog toe te voegen voor eigenschap Aantal buizen afgedicht"""
return self._aantalBuizenAfgedicht.waarde
@aantalBuizenAfgedicht.setter
def aantalBuizenAfgedicht(self, value):
self._aantalBuizenAfgedicht.set_waarde(value, owner=self)
@property
def aantalBuizenTePlannen(self):
"""Definitie nog toe te voegen voor eigenschap Aantal buizen te plannen"""
return self._aantalBuizenTePlannen.waarde
@aantalBuizenTePlannen.setter
def aantalBuizenTePlannen(self, value):
self._aantalBuizenTePlannen.set_waarde(value, owner=self)
@property
def aantalNogInTePlannen(self):
"""Definitie nog toe te voegen voor eigenschap Aantal nog in te plannen"""
return self._aantalNogInTePlannen.waarde
@aantalNogInTePlannen.setter
def aantalNogInTePlannen(self, value):
self._aantalNogInTePlannen.set_waarde(value, owner=self)
@property
def algemeneOpmerkingen(self):
"""Definitie nog toe te voegen voor eigenschap Algemene opmerkingen"""
return self._algemeneOpmerkingen.waarde
@algemeneOpmerkingen.setter
def algemeneOpmerkingen(self, value):
self._algemeneOpmerkingen.set_waarde(value, owner=self)
@property
def bekabelingDraadkanalenOrdelijkKast(self):
"""Definitie nog toe te voegen voor eigenschap Bekabeling/draadkanalen ordelijk (KAST)"""
return self._bekabelingDraadkanalenOrdelijkKast.waarde
@bekabelingDraadkanalenOrdelijkKast.setter
def bekabelingDraadkanalenOrdelijkKast(self, value):
self._bekabelingDraadkanalenOrdelijkKast.set_waarde(value, owner=self)
@property
def bereikbaarheidCorrect(self):
"""Definitie nog toe te voegen voor | |
"masterSystemDiskCategory",
"master_system_disk_size": "masterSystemDiskSize",
"node_port_range": "nodePortRange",
"num_of_nodes": "numOfNodes",
"pod_vswitch_ids": "podVswitchIds",
"proxy_mode": "proxyMode",
"runtime": "runtime",
"security_group_id": "securityGroupId",
"service_cidr": "serviceCidr",
"snat_entry": "snatEntry",
"ssh_flags": "sshFlags",
"tags": "tags",
"taint": "taint",
"timeout_mins": "timeoutMins",
"worker_auto_renew": "workerAutoRenew",
"worker_auto_renew_period": "workerAutoRenewPeriod",
"worker_data_disk": "workerDataDisk",
"worker_data_disks": "workerDataDisks",
"worker_instance_charge_type": "workerInstanceChargeType",
"worker_period": "workerPeriod",
"worker_period_unit": "workerPeriodUnit",
"worker_system_disk_category": "workerSystemDiskCategory",
"worker_system_disk_size": "workerSystemDiskSize",
},
)
class KubernetesClusterProps:
def __init__(
self,
*,
master_instance_types: typing.Union[typing.Sequence[typing.Any], ros_cdk_core.IResolvable],
master_v_switch_ids: typing.Union[typing.Sequence[typing.Any], ros_cdk_core.IResolvable],
name: typing.Union[builtins.str, ros_cdk_core.IResolvable],
vpc_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
worker_instance_types: typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Union[builtins.str, ros_cdk_core.IResolvable]]],
worker_v_switch_ids: typing.Union[typing.Sequence[typing.Any], ros_cdk_core.IResolvable],
addons: typing.Optional[typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Union[ros_cdk_core.IResolvable, "RosKubernetesCluster.AddonsProperty"]]]] = None,
cloud_monitor_flags: typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]] = None,
container_cidr: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
cpu_policy: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
disable_rollback: typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]] = None,
endpoint_public_access: typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]] = None,
key_pair: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
kubernetes_version: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
login_password: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
master_auto_renew: typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]] = None,
master_auto_renew_period: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
master_count: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
master_data_disk: typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]] = None,
master_data_disks: typing.Optional[typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Union[ros_cdk_core.IResolvable, "RosKubernetesCluster.MasterDataDisksProperty"]]]] = None,
master_instance_charge_type: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
master_period: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
master_period_unit: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
master_system_disk_category: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
master_system_disk_size: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
node_port_range: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
num_of_nodes: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
pod_vswitch_ids: typing.Optional[typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Union[builtins.str, ros_cdk_core.IResolvable]]]] = None,
proxy_mode: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
runtime: typing.Optional[typing.Union[ros_cdk_core.IResolvable, "RosKubernetesCluster.RuntimeProperty"]] = None,
security_group_id: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
service_cidr: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
snat_entry: typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]] = None,
ssh_flags: typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]] = None,
tags: typing.Optional[typing.Sequence["RosKubernetesCluster.TagsProperty"]] = None,
taint: typing.Optional[typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Mapping[builtins.str, typing.Any]]]] = None,
timeout_mins: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
worker_auto_renew: typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]] = None,
worker_auto_renew_period: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
worker_data_disk: typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]] = None,
worker_data_disks: typing.Optional[typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Union[ros_cdk_core.IResolvable, "RosKubernetesCluster.WorkerDataDisksProperty"]]]] = None,
worker_instance_charge_type: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
worker_period: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
worker_period_unit: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
worker_system_disk_category: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
worker_system_disk_size: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
) -> None:
'''Properties for defining a ``ALIYUN::CS::KubernetesCluster``.
:param master_instance_types: Property masterInstanceTypes: Master node ECS specification type code. For more details, see Instance Type Family. Each item correspond to MasterVSwitchIds. List size must be 3, Instance Type can be repeated.
:param master_v_switch_ids: Property masterVSwitchIds: Master node switch ID. To ensure high availability of the cluster, it is recommended that you select 3 switches and distribute them in different Availability Zones.
:param name: Property name: The name of the cluster. The cluster name can use uppercase and lowercase letters, Chinese characters, numbers, and dashes.
:param vpc_id: Property vpcId: VPC ID.
:param worker_instance_types: Property workerInstanceTypes: Worker node ECS specification type code. For more details, see Instance Specification Family.
:param worker_v_switch_ids: Property workerVSwitchIds: The virtual switch ID of the worker node.
:param addons: Property addons: A combination of addon plugins for Kubernetes clusters. Network plug-in: including Flannel and Terway network plug-ins Log service: Optional. If the log service is not enabled, the cluster audit function cannot be used. Ingress: The installation of the Ingress component is enabled by default.
:param cloud_monitor_flags: Property cloudMonitorFlags: Whether to install the cloud monitoring plugin: true: indicates installation false: Do not install Default to false.
:param container_cidr: Property containerCidr: The container network segment cannot conflict with the VPC network segment. When the sytem is selected to automatically create a VPC, the network segment 172.16.0.0/16 is used by default.
:param cpu_policy: Property cpuPolicy: CPU policy. The cluster version is 1.12.6 and above supports both static and none strategies.
:param disable_rollback: Property disableRollback: Whether the failure was rolled back: true: indicates that it fails to roll back false: rollback failed The default is true. If rollback fails, resources produced during the creation process will be released. False is not recommended.
:param endpoint_public_access: Property endpointPublicAccess: Whether to enable the public network API Server: true: which means that the public network API Server is open. false: If set to false, the API server on the public network will not be created, only the API server on the private network will be created.Default to false.
:param key_pair: Property keyPair: Key pair name. Specify one of KeyPair or LoginPassword.
:param kubernetes_version: Property kubernetesVersion: The version of the Kubernetes cluster.
:param login_password: Property loginPassword: SSH login password. Password rules are 8-30 characters and contain three items (upper and lower case letters, numbers, and special symbols). Specify one of KeyPair or LoginPassword.
:param master_auto_renew: Property masterAutoRenew: Whether the master node automatically renews. It takes effect when the value of MasterInstanceChargeType is PrePaid. The optional values are: true: automatic renewal false: do not renew automatically Default to true.
:param master_auto_renew_period: Property masterAutoRenewPeriod: Automatic renewal cycle, which takes effect when prepaid and automatic renewal are selected, and is required: When PeriodUnit = Week, the values are: {"1", "2", "3"} When PeriodUnit = Month, the value is {"1", "2", "3", "6", "12"} Default to 1.
:param master_count: Property masterCount: Number of master instances. The value can be 3 or 5. The default value is 3.
:param master_data_disk: Property masterDataDisk: Whether the master node mounts data disks can be selected as: true: mount the data disk false: no data disk is mounted, default is false.
:param master_data_disks: Property masterDataDisks: Master data disk type, size and other configuration combinations. This parameter is valid only when the master node data disk is mounted.
:param master_instance_charge_type: Property masterInstanceChargeType: Master node payment type. The optional values are: PrePaid: prepaid PostPaid: Pay as you go Default to PostPaid.
:param master_period: Property masterPeriod: The duration of the annual subscription and monthly subscription. It takes effect when the master_instance_charge_type value is PrePaid and is a required value. The value range is: When PeriodUnit = Week, Period values are: {"1", "2", "3", "4"} When PeriodUnit = Month, Period values are: {"1", "2", "3", "4", "5", "6", "7", "8", "9", "12", "24", "36", "48", "60"} Default to 1.
:param master_period_unit: Property masterPeriodUnit: When you specify PrePaid, you need to specify the period. The options are: Week: Time is measured in weeks Month: time in months Default to Month
:param master_system_disk_category: Property masterSystemDiskCategory: Master disk system disk type. The value includes: cloud_efficiency: efficient cloud disk cloud_ssd: SSD cloud disk cloud_essd: ESSD cloud diskDefault to cloud_ssd.
:param master_system_disk_size: Property masterSystemDiskSize: Master disk system disk size in GiB. Default to 120.
:param node_port_range: Property nodePortRange: Node service port. The value range is [30000, 65535]. Default to 30000-65535.
:param num_of_nodes: Property numOfNodes: Number of worker nodes. The range is [0,300]. Default to 3.
:param pod_vswitch_ids: Property podVswitchIds: The list of pod vSwitches. For each vSwitch that is allocated to nodes, you must specify at least one pod vSwitch in the same zone. The pod vSwitches cannot be the same as the node vSwitches. We recommend that you set the mask length of the CIDR block to a value no greater than 19 for the pod vSwitches. The pod_vswitch_ids parameter is required when the Terway network plug-in is selected for the cluster.
:param proxy_mode: Property proxyMode: kube-proxy proxy mode, supports both iptables and ipvs modes. The default is iptables.
:param runtime: Property runtime: The container runtime of the cluster. The default runtime is Docker.
:param security_group_id: Property securityGroupId: Specifies the ID of the security group to which the cluster ECS instance belongs.
:param service_cidr: Property serviceCidr: The service network segment cannot conflict with the VPC network segment and the container network segment. When the system is selected to automatically create a VPC, the network segment 172.19.0.0/20 is used by default.
:param snat_entry: Property snatEntry: Whether to configure SNAT for the network. When a VPC can access the public network environment, set it to false. When an existing VPC cannot access the public network environment: When set to True, SNAT is configured and the public network environment can be accessed at this time. If set to false, it means that SNAT is not configured and the public network environment cannot be accessed at this time. Default to true.
:param ssh_flags: Property sshFlags: Whether to enable public network SSH login: true: open false: not open.
:param tags: Property tags: Tag the cluster.
| |
import numpy as np
from utils.string_helper import _make_n_gram
from collections import Counter
import torch
from utils.pretrained_discriminator import SeqClassifyDiscriminator
#from nltk.corpus import stopwords
import textstat
from utils.io import LEN_BINS_RANGE, LEN_BINS, n_gram_novelty_to_bin, ext_frag_density_to_bin, fusion_ratio_to_bin
import pickle as pkl
from nltk.corpus import stopwords
from dataset_extractive_fragment_stat import compute_extractive_fragment, compute_extractive_fragment_density
import ssi_functions
import multiprocessing as mp
import spacy
import neuralcoref
from utils.cloze_mc_model import ClozeMCModel
from utils.cloze_model import ClozeModel
import os
import json
#num_cpus = mp.cpu_count()
from utils.time_log import time_since
import time
import re
NUMERICAL_ENTITIES_TYPES = ["PERCENT", "MONEY", "QUANTITY", "ORDINAL", "CARDINAL"]
def count_named_entity_appear_frequency(doc_word_list, entity_words):
# check if it appears in document
match = False
appear_frequency = 0
for doc_start_idx in range(len(doc_word_list) - len(entity_words) + 1):
match = True
for entity_word_idx, entity_word in enumerate(entity_words):
doc_word = doc_word_list[doc_start_idx + entity_word_idx]
if doc_word.lower() != entity_word.lower():
match = False
break
if match:
appear_frequency += 1
return appear_frequency
def compute_n_gram_novelty(pred_word_list, src_word_list, n):
pred_n_gram_counter = Counter()
pred_n_gram_counter.update(_make_n_gram(pred_word_list, n))
src_n_gram_counter = Counter()
src_n_gram_counter.update(_make_n_gram(src_word_list, n))
num_pred_n_grams = sum(pred_n_gram_counter.values())
num_novel_pred_n_grams = 0
for n_gram, cnt in pred_n_gram_counter.items():
if n_gram not in src_n_gram_counter:
num_novel_pred_n_grams += cnt
novel_n_gram_fraction = num_novel_pred_n_grams / num_pred_n_grams
return novel_n_gram_fraction
class StyleDiscriminatorCost:
def __init__(self, device):
self.device = device
model_dir = "saved_model/bert_classifier_xsum_fox_weighted_sampler.bert_classifier.20191021-211528"
ckpt_dir = model_dir + "/ckpt/epoch-3-total_batch-20000-valid_f1-0.9635"
self.discriminator_model = SeqClassifyDiscriminator(model_dir, ckpt_dir, device=device)
def score(self, pred_word_2d_list, pred_word_2d_list_sent_tokenized, trg_word_2d_list, trg_word_2d_list_sent_tokenized, batch_size, control_variables):
pred_str_list = [' '.join(pred_word_list) for pred_word_list in pred_word_2d_list]
class_prob = self.discriminator_model.score(pred_str_list) # [batch, 2]
return class_prob[:, 1] # [batch]
class HighReadabilityCosts:
def __init__(self, device):
self.device = device
def score(self, pred_word_2d_list, pred_word_2d_list_sent_tokenized, trg_word_2d_list, trg_word_2d_list_sent_tokenized, batch_size, control_variables):
batch_cost = []
for pred_word_list in pred_word_2d_list:
pred_str = ' '.join(pred_word_list)
flesch_reading_ease_score = textstat.flesch_reading_ease(pred_str)
if flesch_reading_ease_score >= 45:
cost = 1.0
else:
cost = 0.0
batch_cost.append(cost)
return torch.FloatTensor(batch_cost).to(self.device) # tensor: [batch_size]
class LowReadabilityCosts:
def __init__(self, device):
self.device = device
def score(self, pred_word_2d_list, pred_word_2d_list_sent_tokenized, trg_word_2d_list, trg_word_2d_list_sent_tokenized, batch_size, control_variables):
batch_cost = []
for pred_word_list in pred_word_2d_list:
pred_str = ' '.join(pred_word_list)
flesch_reading_ease_score = textstat.flesch_reading_ease(pred_str)
if flesch_reading_ease_score < 55:
cost = 1.0
else:
cost = 0.0
batch_cost.append(cost)
return torch.FloatTensor(batch_cost).to(self.device) # tensor: [batch_size]
class LengthBinConsistent:
def __init__(self, device):
self.device = device
def score(self, pred_word_2d_list, pred_word_2d_list_sent_tokenized, trg_word_2d_list, trg_word_2d_list_sent_tokenized, batch_size, control_variables):
len_bin_list = control_variables['len_bins']
batch_cost = []
for pred_word_list, len_bin in zip(pred_word_2d_list, len_bin_list):
pred_len = len(pred_word_list)
lower_len, upper_len = LEN_BINS_RANGE[len_bin]
if lower_len < pred_len <= upper_len:
cost = 0.0
else:
cost = 1.0
batch_cost.append(cost)
return torch.FloatTensor(batch_cost).to(self.device) # tensor: [batch_size]
class LengthBinDistance:
def __init__(self, device, total_len_bins=10):
self.device = device
self.total_len_bins = 10
def score(self, pred_word_2d_list, pred_word_2d_list_sent_tokenized, trg_word_2d_list, trg_word_2d_list_sent_tokenized, batch_size, control_variables):
target_len_bin_list = control_variables['len_bins']
batch_cost = []
for pred_word_list, target_len_bin in zip(pred_word_2d_list, target_len_bin_list):
pred_len = len(pred_word_list)
pred_len_bin = LEN_BINS[pred_len]
len_bin_distance = abs(target_len_bin - pred_len_bin) / self.total_len_bins
batch_cost.append(len_bin_distance)
return torch.FloatTensor(batch_cost).to(self.device) # tensor: [batch_size]
class LengthBinDistanceUnnormalized:
def __init__(self, device):
self.device = device
def score(self, pred_word_2d_list, pred_word_2d_list_sent_tokenized, trg_word_2d_list, trg_word_2d_list_sent_tokenized, batch_size, control_variables):
target_len_bin_list = control_variables['len_bins']
batch_cost = []
for pred_word_list, target_len_bin in zip(pred_word_2d_list, target_len_bin_list):
pred_len = len(pred_word_list)
pred_len_bin = LEN_BINS[pred_len]
len_bin_distance = abs(target_len_bin - pred_len_bin)
batch_cost.append(len_bin_distance)
return torch.FloatTensor(batch_cost).to(self.device) # tensor: [batch_size]
class ExactLengthCost:
def __init__(self, device):
self.device = device
def score(self, pred_word_2d_list, pred_word_2d_list_sent_tokenized, trg_word_2d_list, trg_word_2d_list_sent_tokenized, batch_size, control_variables):
target_len_list = control_variables['exact_lens']
batch_cost = []
for pred_word_list, target_len in zip(pred_word_2d_list, target_len_list):
pred_len = len(pred_word_list)
"""
if pred_len == target_len:
cost = 0.0
else:
cost = 1.0
"""
cost = abs(pred_len - target_len)
batch_cost.append(cost)
return torch.FloatTensor(batch_cost).to(self.device) # tensor: [batch_size]
class ExactLengthCostDistance:
def __init__(self, device):
self.device = device
def score(self, pred_word_2d_list, pred_word_2d_list_sent_tokenized, trg_word_2d_list, trg_word_2d_list_sent_tokenized, batch_size, control_variables):
target_len_list = control_variables['exact_lens']
batch_cost = []
for pred_word_list, target_len in zip(pred_word_2d_list, target_len_list):
pred_len = len(pred_word_list)
cost = abs(pred_len - target_len) / target_len
batch_cost.append(cost)
return torch.FloatTensor(batch_cost).to(self.device) # tensor: [batch_size]
class ExactLengthCostDistanceUnnormalized:
def __init__(self, device):
self.device = device
def score(self, pred_word_2d_list, pred_word_2d_list_sent_tokenized, trg_word_2d_list, trg_word_2d_list_sent_tokenized, batch_size, control_variables):
target_len_list = control_variables['exact_lens']
batch_cost = []
for pred_word_list, target_len in zip(pred_word_2d_list, target_len_list):
pred_len = len(pred_word_list)
cost = abs(pred_len - target_len)
batch_cost.append(cost)
return torch.FloatTensor(batch_cost).to(self.device) # tensor: [batch_size]
class NegativeNamedEntityF1:
def __init__(self, device):
self.device = device
self.nlp = spacy.load("en_core_web_sm")
neuralcoref.add_to_pipe(self.nlp)
self.beta = 2.0
def score(self, pred_word_2d_list, pred_word_2d_list_sent_tokenized, trg_word_2d_list, trg_word_2d_list_sent_tokenized, batch_size, control_variables):
# reference entities_list and output entities_list should be lower-cased
reference_entities_list = control_variables['reference_entities_list']
batch_cost = []
for pred_word_list, reference_entities in zip(pred_word_2d_list, reference_entities_list):
num_matched_entities = 0
num_reference_entities = len(reference_entities)
unique_output_entities = []
if num_reference_entities > 0:
num_unique_output_entities = 0
pred_str = ' '.join(pred_word_list)
pred_str_spacy = self.nlp(pred_str)
for ent in pred_str_spacy.ents:
if ent.label_ in NUMERICAL_ENTITIES_TYPES or ent.text in unique_output_entities:
continue
unique_output_entities.append(ent.text)
num_unique_output_entities += 1
if ent.text.lower() in reference_entities:
num_matched_entities += 1
if num_unique_output_entities > 0:
precision = num_matched_entities / num_unique_output_entities
recall = num_matched_entities / num_reference_entities
if precision == 0 or recall == 0:
f_beta = 0.0
else:
f_beta = (1+self.beta**2) * (precision * recall) / (self.beta**2 * precision + recall)
#f_beta = 2 * (precision * recall) / (precision + recall)
else:
f_beta = 0.0
else:
f_beta = 1.0
batch_cost.append(-f_beta)
return torch.FloatTensor(batch_cost).to(self.device) # tensor: [batch_size]
class NegativeNamedEntityClozeConfidence:
def __init__(self, device, threshold):
self.device = device
self.threshold = threshold
self.cloze_model = ClozeMCModel(self.device)
def score(self, pred_word_2d_list, pred_word_2d_list_sent_tokenized, trg_word_2d_list,
trg_word_2d_list_sent_tokenized, batch_size, control_variables):
masked_questions_ids_2dlist = control_variables['masked_questions_ids_2dlist']
answer_id_2dlist = control_variables['answer_id_2dlist']
multiple_choices_ids_2dlist = control_variables['multiple_choices_ids_2dlist']
#print(masked_questions_ids_2dlist)
#print(answer_id_2dlist)
#print(multiple_choices_ids_2dlist)
#print()
summary_str_list = [' '.join(pred_word_list) for pred_word_list in pred_word_2d_list]
# feed a batch to the model, record the position of each sample
num_questions_per_sample = [len(questions) for questions in masked_questions_ids_2dlist]
#print(num_questions_per_sample)
flattened_masked_questions_ids_list = []
flattened_answer_id_list = []
flattened_multiple_choices_ids_list = []
flattened_context_str_list = []
for masked_question_ids_list, answer_id_list, multiple_choices_ids_list, summary_str in zip(
masked_questions_ids_2dlist, answer_id_2dlist, multiple_choices_ids_2dlist, summary_str_list):
flattened_masked_questions_ids_list += masked_question_ids_list
flattened_answer_id_list += answer_id_list
flattened_multiple_choices_ids_list += multiple_choices_ids_list
flattened_context_str_list += [summary_str] * len(masked_question_ids_list)
#print(summary_str)
#print(flattened_context_str_list)
#print(len(flattened_context_str_list))
#print(len(flattened_masked_questions_ids_list))
#print(len(flattened_answer_id_list))
#print(len(flattened_multiple_choices_ids_list))
#print()
confidence_score = self.cloze_model.compute_confidence_score(flattened_masked_questions_ids_list,
flattened_multiple_choices_ids_list,
flattened_answer_id_list,
flattened_context_str_list)
# confidence_score: [len(flattened_masked_questions_ids_list)]
# compute average confidence for each sample
num_processed_samples = 0
score_for_each_batch = []
#print(flattened_context_str_list)
#print(confidence_score)
for i in range(len(num_questions_per_sample)):
# average for each batch
if summary_str_list[i].strip() == "":
score_for_each_batch.append(torch.FloatTensor([0.0]).to(self.device))
elif num_questions_per_sample[i] > 0:
avg_score = confidence_score[
num_processed_samples:num_processed_samples + num_questions_per_sample[i]].mean(dim=0)
score_for_each_batch.append(avg_score)
else:
score_for_each_batch.append(torch.FloatTensor([self.threshold]).to(self.device))
#print(num_processed_samples)
#print(num_processed_samples + num_questions_per_sample[i])
num_processed_samples += num_questions_per_sample[i]
score_for_each_batch = torch.stack(score_for_each_batch, dim=0)
# [batch_size]
#print(-score_for_each_batch)
#print(self.threshold)
#print()
#exit()
return -score_for_each_batch
"""
class NegativeNamedEntityQAF1LengthNormalized:
def __init__(self, device, threshold):
self.device = device
self.threshold = threshold
self.cloze_model = ClozeModel(self.device)
def score(self, pred_word_2d_list, pred_word_2d_list_sent_tokenized, trg_word_2d_list,
trg_word_2d_list_sent_tokenized, batch_size, control_variables):
masked_questions_ids_2dlist = control_variables['masked_questions_ids_2dlist']
answer_str_2dlist = control_variables['answer_2dlist']
#print(masked_questions_ids_2dlist)
#print(answer_str_2dlist)
#print()
summary_str_list = [' '.join(pred_word_list) for pred_word_list in pred_word_2d_list]
trg_summary_lens = [len(trg_word_list) for trg_word_list in trg_word_2d_list]
# feed a batch to the model, record the position of each sample
num_questions_per_sample = [len(questions) for questions in masked_questions_ids_2dlist]
#print(num_questions_per_sample)
flattened_masked_questions_ids_list = []
flattened_answer_str_list = []
flattened_context_str_list = []
for masked_question_ids_list, answer_str_list, summary_str in zip(
masked_questions_ids_2dlist, answer_str_2dlist, summary_str_list):
flattened_masked_questions_ids_list += masked_question_ids_list
flattened_answer_str_list += answer_str_list
flattened_context_str_list += [summary_str] * len(masked_question_ids_list)
#print(summary_str)
#print(flattened_context_str_list)
#print(len(flattened_context_str_list))
#print(len(flattened_masked_questions_ids_list))
#print(len(flattened_answer_str_list))
#print()
f1_score = self.cloze_model.compute_f1_score(flattened_masked_questions_ids_list,
flattened_answer_str_list,
flattened_context_str_list)
# f1_score: [len(flattened_masked_questions_ids_list)]
# compute average confidence for each sample
num_processed_samples = 0
score_for_each_batch = []
#print(flattened_context_str_list)
#print(confidence_score)
for i in range(len(num_questions_per_sample)):
# average for each batch
if summary_str_list[i].strip() == "":
score_for_each_batch.append(torch.tensor(0.0).to(self.device))
elif num_questions_per_sample[i] > 0:
avg_score = f1_score[
num_processed_samples:num_processed_samples + num_questions_per_sample[i]].mean(dim=0)
score_for_each_batch.append(avg_score)
else:
score_for_each_batch.append(torch.tensor(-self.threshold).to(self.device))
#print(num_processed_samples)
#print(num_processed_samples + num_questions_per_sample[i])
num_processed_samples += num_questions_per_sample[i]
#print(score_for_each_batch)
#print(score_for_each_batch[0].size())
score_for_each_batch = torch.stack(score_for_each_batch, dim=0)
# [batch_size]
#print(score_for_each_batch.size())
#print(score_for_each_batch)
#print(self.threshold)
#print()
#exit()
return -score_for_each_batch
"""
class NegativeNamedEntityQAF1:
def __init__(self, device, threshold):
self.device = device
self.threshold = threshold
self.cloze_model = ClozeModel(self.device)
def score(self, pred_word_2d_list, pred_word_2d_list_sent_tokenized, trg_word_2d_list,
trg_word_2d_list_sent_tokenized, batch_size, control_variables):
masked_questions_ids_2dlist = control_variables['masked_questions_ids_2dlist']
answer_str_2dlist = control_variables['answer_2dlist']
#print(masked_questions_ids_2dlist)
#print(answer_str_2dlist)
#print()
summary_str_list = [' '.join(pred_word_list) for pred_word_list in pred_word_2d_list]
# feed a batch to the model, record the position of each sample
num_questions_per_sample = [len(questions) for questions in masked_questions_ids_2dlist]
#print(num_questions_per_sample)
flattened_masked_questions_ids_list = []
flattened_answer_str_list = []
flattened_context_str_list = []
for masked_question_ids_list, answer_str_list, summary_str in zip(
masked_questions_ids_2dlist, answer_str_2dlist, summary_str_list):
flattened_masked_questions_ids_list += masked_question_ids_list
flattened_answer_str_list += answer_str_list
flattened_context_str_list += [summary_str] * len(masked_question_ids_list)
#print(summary_str)
#print(flattened_context_str_list)
#print(len(flattened_context_str_list))
#print(len(flattened_masked_questions_ids_list))
#print(len(flattened_answer_str_list))
#print()
f1_score = self.cloze_model.compute_f1_score(flattened_masked_questions_ids_list,
flattened_answer_str_list,
flattened_context_str_list)
# f1_score: [len(flattened_masked_questions_ids_list)]
# compute average confidence for each sample
num_processed_samples = 0
score_for_each_batch = []
#print(flattened_context_str_list)
#print(confidence_score)
for i in range(len(num_questions_per_sample)):
# average for each batch
if summary_str_list[i].strip() == "":
score_for_each_batch.append(torch.tensor(0.0).to(self.device))
elif num_questions_per_sample[i] > 0:
avg_score = f1_score[
num_processed_samples:num_processed_samples + num_questions_per_sample[i]].mean(dim=0)
score_for_each_batch.append(avg_score)
else:
score_for_each_batch.append(torch.tensor(-self.threshold).to(self.device))
#print(num_processed_samples)
#print(num_processed_samples + num_questions_per_sample[i])
num_processed_samples += num_questions_per_sample[i]
#print(score_for_each_batch)
#print(score_for_each_batch[0].size())
score_for_each_batch = torch.stack(score_for_each_batch, dim=0)
# [batch_size]
#print(score_for_each_batch.size())
#print(score_for_each_batch)
#print(self.threshold)
#print()
#exit()
return -score_for_each_batch
class EntityRepeatCost:
def __init__(self, device):
self.device = device
def score(self, pred_word_2d_list, pred_word_2d_list_sent_tokenized, trg_word_2d_list, trg_word_2d_list_sent_tokenized, batch_size, control_variables):
batch_cost = []
reference_entities_list = control_variables['reference_entities_list']
for pred_word_list_sent_tokenized, reference_entities in zip(pred_word_2d_list_sent_tokenized, reference_entities_list):
num_pred_sents = len(pred_word_list_sent_tokenized)
num_sents_contain_repeat | |
adding empty entry to database")
else:
# log provided attributes
for key, value in attributes.items():
logger.debug("attributes[" + str(key) + "]: " + str(value))
# Create object
try:
db_entry_object = Contacts(
account_id=account_id,
address1=str(attributes.get("address1", "")),
address2=str(attributes.get("address2", "")),
postal_code=str(attributes.get("postalCode", "")),
city=str(attributes.get("city", "")),
state=str(attributes.get("state", "")),
country=str(attributes.get("country", "")),
type=str(attributes.get("type", "")),
prime=str(attributes.get("primary", ""))
)
except Exception as exp:
error_title = "Failed to create contacts object"
logger.error(error_title + ": " + repr(exp))
raise
else:
logger.debug("contacts object created: " + db_entry_object.log_entry)
# Store updates
try:
cursor = db_entry_object.to_db(cursor=cursor)
###
# Commit
db.connection.commit()
except Exception as exp:
error_title = "Failed to add contacts to DB"
logger.error(error_title + ": " + repr(exp))
logger.debug('commit failed: ' + repr(exp))
logger.debug('--> rollback')
db.connection.rollback()
raise
else:
logger.debug("Committed")
logger.info("contacts added")
logger.info(db_entry_object.log_entry)
return db_entry_object.to_api_dict
def update_contact(account_id=None, id=None, attributes=None, cursor=None):
"""
Update one contacts entry at database identified by Account ID and ID
:param account_id:
:param id:
:return: Particular dict
"""
if account_id is None:
raise AttributeError("Provide account_id as parameter")
if id is None:
raise AttributeError("Provide id as parameter")
if attributes is None:
raise AttributeError("Provide attributes as parameter")
if not isinstance(attributes, dict):
raise AttributeError("attributes must be a dict")
if cursor is None:
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise
try:
db_entry_object = Contacts(account_id=account_id, id=id)
except Exception as exp:
error_title = "Failed to create contacts object"
logger.error(error_title + ": " + repr(exp))
raise
else:
logger.debug("contacts object created: " + db_entry_object.log_entry)
# Get contacts from DB
try:
cursor = db_entry_object.from_db(cursor=cursor)
except Exception as exp:
error_title = "Failed to fetch contacts from DB"
logger.error(error_title + ": " + repr(exp))
raise
else:
logger.info("contacts fetched")
logger.info("contacts fetched from db: " + db_entry_object.log_entry)
# Update contacts object
if len(attributes) == 0:
logger.info("Empty attributes dict provided. Nothing to update.")
return db_entry_object.to_api_dict
else:
logger.info("contacts object to update: " + db_entry_object.log_entry)
# log provided attributes
for key, value in attributes.items():
logger.debug("attributes[" + str(key) + "]: " + str(value))
# Update object attributes
if "address1" in attributes:
logger.info("Updating address1")
old_value = str(db_entry_object.address1)
new_value = str(attributes.get("address1", "None"))
logger.debug("Updating: " + old_value + " --> " + new_value)
db_entry_object.address1 = new_value
logger.info(db_entry_object.log_entry)
if "address2" in attributes:
logger.info("Updating address2")
old_value = str(db_entry_object.address2)
new_value = str(attributes.get("address2", "None"))
logger.debug("Updating: " + old_value + " --> " + new_value)
db_entry_object.address2 = new_value
logger.info(db_entry_object.log_entry)
if "postalCode" in attributes:
logger.info("Updating postalCode")
old_value = str(db_entry_object.postal_code)
new_value = str(attributes.get("postalCode", "None"))
logger.debug("Updating: " + old_value + " --> " + new_value)
db_entry_object.postal_code = new_value
logger.info(db_entry_object.log_entry)
if "city" in attributes:
logger.info("Updating city")
old_value = str(db_entry_object.city)
new_value = str(attributes.get("city", "None"))
logger.debug("Updating: " + old_value + " --> " + new_value)
db_entry_object.city = new_value
logger.info(db_entry_object.log_entry)
if "state" in attributes:
logger.info("Updating state")
old_value = str(db_entry_object.state)
new_value = str(attributes.get("state", "None"))
logger.debug("Updating: " + old_value + " --> " + new_value)
db_entry_object.state = new_value
logger.info(db_entry_object.log_entry)
if "country" in attributes:
logger.info("Updating country")
old_value = str(db_entry_object.country)
new_value = str(attributes.get("country", "None"))
logger.debug("Updating: " + old_value + " --> " + new_value)
db_entry_object.country = new_value
logger.info(db_entry_object.log_entry)
if "type" in attributes:
logger.info("Updating type")
old_value = str(db_entry_object.type)
new_value = str(attributes.get("type", "None"))
logger.debug("Updating: " + old_value + " --> " + new_value)
db_entry_object.type = new_value
logger.info(db_entry_object.log_entry)
if "primary" in attributes:
logger.info("Updating primary")
old_value = str(db_entry_object.prime)
new_value = str(attributes.get("primary", "None"))
logger.debug("Updating: " + old_value + " --> " + new_value)
db_entry_object.prime = new_value
logger.info(db_entry_object.log_entry)
# Store updates
try:
cursor = db_entry_object.update_db(cursor=cursor)
###
# Commit
db.connection.commit()
except Exception as exp:
error_title = "Failed to update contacts to DB"
logger.error(error_title + ": " + repr(exp))
logger.debug('commit failed: ' + repr(exp))
logger.debug('--> rollback')
db.connection.rollback()
raise
else:
logger.debug("Committed")
logger.info("contacts updated")
logger.info(db_entry_object.log_entry)
return db_entry_object.to_api_dict
##################################
###################################
# Emails
##################################
##################################
def get_email(account_id=None, id=None, cursor=None):
"""
Get one email entry from database by Account ID and email ID
:param account_id:
:param id:
:return: dict
"""
if account_id is None:
raise AttributeError("Provide account_id as parameter")
if id is None:
raise AttributeError("Provide id as parameter")
if cursor is None:
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise
try:
db_entry_object = Email(account_id=account_id, id=id)
except Exception as exp:
error_title = "Failed to create email object"
logger.error(error_title + ": " + repr(exp))
raise
else:
logger.debug("email object created: " + db_entry_object.log_entry)
# Get email from DB
try:
cursor = db_entry_object.from_db(cursor=cursor)
except Exception as exp:
error_title = "Failed to fetch email from DB"
logger.error(error_title + ": " + repr(exp))
raise
else:
logger.info("email fetched")
logger.info("email fetched from db: " + db_entry_object.log_entry)
return db_entry_object.to_api_dict
def get_emails(account_id=None):
"""
Get all email -entries related to account
:param account_id:
:return: List of dicts
"""
if account_id is None:
raise AttributeError("Provide account_id as parameter")
# Get table name
logger.info("Create email")
db_entry_object = Email()
logger.info(db_entry_object.log_entry)
logger.info("Get table name")
table_name = db_entry_object.table_name
logger.info("Got table name: " + str(table_name))
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise
# Get primary keys for emails
try:
cursor, id_list = get_primary_keys_by_account_id(cursor=cursor, account_id=account_id, table_name=table_name)
except Exception as exp:
logger.error('Could not get primary key list: ' + repr(exp))
raise
# Get emails from database
logger.info("Get emails from database")
db_entry_list = []
for id in id_list:
# TODO: try-except needed?
logger.info("Getting emails with emails_id: " + str(id))
db_entry_dict = get_email(account_id=account_id, id=id)
db_entry_list.append(db_entry_dict)
logger.info("email object added to list: " + json.dumps(db_entry_dict))
return db_entry_list
def add_email(account_id=None, attributes=None, cursor=None):
"""
Add one email entry to database identified by Account ID and ID
:param account_id:
:param id:
:return: dict
"""
if account_id is None:
raise AttributeError("Provide account_id as parameter")
if attributes is None:
raise AttributeError("Provide attributes as parameter")
if not isinstance(attributes, dict):
raise AttributeError("attributes must be a dict")
if cursor is None:
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise
# Update emails object
if len(attributes) == 0:
logger.info("Empty attributes dict provided. Nothing to add.")
raise StandardError("Not adding empty entry to database")
else:
# log provided attributes
for key, value in attributes.items():
logger.debug("attributes[" + str(key) + "]: " + str(value))
# Create object
try:
db_entry_object = Email(
account_id=account_id,
email=str(attributes.get("email", "")),
type=str(attributes.get("type", "")),
prime=str(attributes.get("primary", ""))
)
except Exception as exp:
error_title = "Failed to create emails object"
logger.error(error_title + ": " + repr(exp))
raise
else:
logger.debug("emails object created: " + db_entry_object.log_entry)
# Store updates
try:
cursor = db_entry_object.to_db(cursor=cursor)
###
# Commit
db.connection.commit()
except Exception as exp:
error_title = "Failed to add emails to DB"
logger.error(error_title + ": " + repr(exp))
logger.debug('commit failed: ' + repr(exp))
logger.debug('--> rollback')
db.connection.rollback()
raise
else:
logger.debug("Committed")
logger.info("emails added")
logger.info(db_entry_object.log_entry)
return db_entry_object.to_api_dict
def update_email(account_id=None, id=None, attributes=None, cursor=None):
"""
Update one email entry at database identified by Account ID and ID
:param account_id:
:param id:
:return: dict
"""
if account_id is None:
raise AttributeError("Provide account_id as parameter")
if id is None:
raise AttributeError("Provide id as parameter")
if attributes is None:
raise AttributeError("Provide attributes as parameter")
if not isinstance(attributes, dict):
raise AttributeError("attributes must be a dict")
if cursor is None:
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise
try:
db_entry_object = Email(account_id=account_id, id=id)
except Exception as exp:
error_title = "Failed to create email object"
logger.error(error_title + ": " + repr(exp))
raise
else:
logger.debug("email object created: " + db_entry_object.log_entry)
# Get email from DB
try:
cursor = db_entry_object.from_db(cursor=cursor)
except Exception as exp:
error_title = "Failed to fetch email from DB"
logger.error(error_title + ": " + repr(exp))
raise
else:
logger.info("email fetched")
logger.info("email fetched from db: " + db_entry_object.log_entry)
# Update email object
if len(attributes) == 0:
logger.info("Empty attributes dict provided. Nothing to update.")
return db_entry_object.to_api_dict
else:
logger.info("email object to update: " + db_entry_object.log_entry)
# log provided attributes
for key, value in attributes.items():
logger.debug("attributes[" + str(key) + "]: " + str(value))
# Update object attributes
if "email" in attributes:
logger.info("Updating email")
old_value = str(db_entry_object.email)
new_value = str(attributes.get("email", "None"))
logger.debug("Updating: " + old_value + " --> " + new_value)
db_entry_object.email = | |
iterate to get both major and minor axes
while axes_mass_dif > 0.005:
# get 1-D radius along minor axis
masks = (major_distances < galaxy_radius_major)
galaxy_radius_minor, galaxy_mass_minor, indices = get_radius_mass_indices(
masses[masks], minor_distances[masks], distance_scaling, distance_limits,
distance_bin_width, 1, edge_kind, edge_value)
# get 2-D radius along major axes
masks = (minor_distances < galaxy_radius_minor)
galaxy_radius_major, galaxy_mass_major, indices = get_radius_mass_indices(
masses[masks], major_distances[masks], distance_scaling, distance_limits,
distance_bin_width, 2, edge_kind, edge_value)
axes_mass_dif = (abs(galaxy_mass_major - galaxy_mass_minor) /
(0.5 * (galaxy_mass_major + galaxy_mass_minor)))
indices = (major_distances < galaxy_radius_major) * (minor_distances < galaxy_radius_minor)
gal_prop['radius.major'] = galaxy_radius_major
gal_prop['radius.minor'] = galaxy_radius_minor
gal_prop['mass'] = galaxy_mass_major
gal_prop['log mass'] = np.log10(galaxy_mass_major)
gal_prop['rotation.tensor'] = rotation_tensor
gal_prop['indices'] = part_indices[indices]
if print_results:
Say.say('R_{:.0f} along major, minor axes = {:.2f}, {:.2f} kpc physical'.format(
edge_value, galaxy_radius_major, galaxy_radius_minor))
else:
galaxy_radius, galaxy_mass, indices = get_radius_mass_indices(
masses, distances, distance_scaling, distance_limits, distance_bin_width,
dimension_number, edge_kind, edge_value)
gal_prop['radius'] = galaxy_radius
gal_prop['mass'] = galaxy_mass
gal_prop['log mass'] = np.log10(galaxy_mass)
gal_prop['indices'] = part_indices[indices]
if print_results:
Say.say('R_{:.0f} = {:.2f} kpc physical'.format(edge_value, galaxy_radius))
if print_results:
Say.say('M_star = {:.2e} M_sun, log = {:.2f}'.format(
gal_prop['mass'], gal_prop['log mass']))
return gal_prop
#===================================================================================================
# profiles of properties
#===================================================================================================
class SpeciesProfileClass(ut.binning.DistanceBinClass):
'''
Get profiles of either histogram/sum or stastitics (such as average, median) of given
property for given particle species.
__init__ is defined via ut.binning.DistanceBinClass
'''
def get_profiles(
self, part, species=['all'],
property_name='', property_statistic='sum', weight_by_mass=False,
center_position=None, center_velocity=None, rotation=None,
other_axis_distance_limits=None, property_select={}, part_indicess=None):
'''
Parse inputs into either get_sum_profiles() or get_statistics_profiles().
If know what you want, can skip this and jump to those functions.
Parameters
----------
part : dict : catalog of particles
species : str or list : name[s] of particle species to compute mass from
property_name : str : name of property to get statistics of
property_statistic : str : statistic to get profile of:
'sum', 'sum.cum', 'density', 'density.cum', 'vel.circ'
weight_by_mass : bool : whether to weight property by species mass
center_position : array : position of center
center_velocity : array : velocity of center
rotation : bool or array : whether to rotate particles - two options:
(a) if input array of eigen-vectors, will define rotation axes
(b) if True, will rotate to align with principal axes stored in species dictionary
other_axis_distance_limits : float :
min and max distances along other axis[s] to keep particles [kpc physical]
property_select : dict : (other) properties to select on: names as keys and limits as values
part_indicess : array (species number x particle number) :
indices of particles from which to select
Returns
-------
pros : dict : dictionary of profiles for each particle species
'''
if ('sum' in property_statistic or 'vel.circ' in property_statistic or
'density' in property_statistic):
pros = self.get_sum_profiles(
part, species, property_name, center_position, rotation, other_axis_distance_limits,
property_select, part_indicess)
else:
pros = self.get_statistics_profiles(
part, species, property_name, weight_by_mass, center_position, center_velocity,
rotation, other_axis_distance_limits, property_select, part_indicess)
for k in pros:
if '.cum' in property_statistic or 'vel.circ' in property_statistic:
pros[k]['distance'] = pros[k]['distance.cum']
pros[k]['log distance'] = pros[k]['log distance.cum']
else:
pros[k]['distance'] = pros[k]['distance.mid']
pros[k]['log distance'] = pros[k]['log distance.mid']
return pros
def get_sum_profiles(
self, part, species=['all'], property_name='mass', center_position=None,
rotation=None, other_axis_distance_limits=None, property_select={}, part_indicess=None):
'''
Get profiles of summed quantity (such as mass or density) for given property for each
particle species.
Parameters
----------
part : dict : catalog of particles
species : str or list : name[s] of particle species to compute mass from
property_name : str : property to get sum of
center_position : list : center position
rotation : bool or array : whether to rotate particles - two options:
(a) if input array of eigen-vectors, will define rotation axes
(b) if True, will rotate to align with principal axes stored in species dictionary
other_axis_distance_limits : float :
min and max distances along other axis[s] to keep particles [kpc physical]
property_select : dict : (other) properties to select on: names as keys and limits as values
part_indicess : array (species number x particle number) :
indices of particles from which to select
Returns
-------
pros : dict : dictionary of profiles for each particle species
'''
if 'gas' in species and 'consume.time' in property_name:
pros_mass = self.get_sum_profiles(
part, species, 'mass', center_position, rotation, other_axis_distance_limits,
property_select, part_indicess)
pros_sfr = self.get_sum_profiles(
part, species, 'sfr', center_position, rotation, other_axis_distance_limits,
property_select, part_indicess)
pros = pros_sfr
for k in pros_sfr['gas']:
if 'distance' not in k:
pros['gas'][k] = pros_mass['gas'][k] / pros_sfr['gas'][k] / 1e9
return pros
pros = {}
Fraction = ut.math.FractionClass()
if np.isscalar(species):
species = [species]
if species == ['baryon']:
# treat this case specially for baryon fraction
species = ['gas', 'star', 'dark', 'dark2']
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
part_indicess = parse_property(species, 'indices', part_indicess)
assert 0 < self.dimension_number <= 3
for spec_i, spec in enumerate(species):
part_indices = part_indicess[spec_i]
if part_indices is None or not len(part_indices):
part_indices = ut.array.get_arange(part[spec].prop(property_name))
if property_select:
part_indices = catalog.get_indices_catalog(
part[spec], property_select, part_indices)
prop_values = part[spec].prop(property_name, part_indices)
if self.dimension_number == 3:
# simple case: profile using scalar distance
distances = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor'], total_distance=True) # [kpc physical]
elif self.dimension_number in [1, 2]:
# other cases: profile along R (2 major axes) or Z (minor axis)
if rotation is not None and not isinstance(rotation, bool) and len(rotation):
rotation_tensor = rotation
elif (len(part[spec].host_rotation_tensors) and
len(part[spec].host_rotation_tensors[0])):
rotation_tensor = part[spec].host_rotation_tensors[0]
else:
raise ValueError('want 2-D or 1-D profile but no means to define rotation')
distancess = get_distances_wrt_center(
part, spec, part_indices, center_position, rotation_tensor,
coordinate_system='cylindrical')
# ensure all distances are positive definite
distancess = np.abs(distancess)
if self.dimension_number == 1:
# compute profile along minor axis (Z)
distances = distancess[:, 1]
other_distances = distancess[:, 0]
elif self.dimension_number == 2:
# compute profile along major axes (R)
distances = distancess[:, 0]
other_distances = distancess[:, 1]
if (other_axis_distance_limits is not None and
(min(other_axis_distance_limits) > 0 or
max(other_axis_distance_limits) < Inf)):
masks = ((other_distances >= min(other_axis_distance_limits)) *
(other_distances < max(other_axis_distance_limits)))
distances = distances[masks]
prop_values = prop_values[masks]
pros[spec] = self.get_sum_profile(distances, prop_values) # defined in DistanceBinClass
props = [pro_prop for pro_prop in pros[species[0]] if 'distance' not in pro_prop]
props_dist = [pro_prop for pro_prop in pros[species[0]] if 'distance' in pro_prop]
if property_name == 'mass':
# create dictionary for baryonic mass
if 'star' in species or 'gas' in species:
spec_new = 'baryon'
pros[spec_new] = {}
for spec in np.intersect1d(species, ['star', 'gas']):
for pro_prop in props:
if pro_prop not in pros[spec_new]:
pros[spec_new][pro_prop] = np.array(pros[spec][pro_prop])
elif 'log' in pro_prop:
pros[spec_new][pro_prop] = ut.math.get_log(
10 ** pros[spec_new][pro_prop] +
10 ** pros[spec][pro_prop])
else:
pros[spec_new][pro_prop] += pros[spec][pro_prop]
for pro_prop in props_dist:
pros[spec_new][pro_prop] = pros[species[0]][pro_prop]
species.append(spec_new)
if len(species) > 1:
# create dictionary for total mass
spec_new = 'total'
pros[spec_new] = {}
for spec in np.setdiff1d(species, ['baryon', 'total']):
for pro_prop in props:
if pro_prop not in pros[spec_new]:
pros[spec_new][pro_prop] = np.array(pros[spec][pro_prop])
elif 'log' in pro_prop:
pros[spec_new][pro_prop] = ut.math.get_log(
10 ** pros[spec_new][pro_prop] +
10 ** pros[spec][pro_prop])
else:
pros[spec_new][pro_prop] += pros[spec][pro_prop]
for pro_prop in props_dist:
pros[spec_new][pro_prop] = pros[species[0]][pro_prop]
species.append(spec_new)
# create mass fraction wrt total mass
for spec in np.setdiff1d(species, ['total']):
for pro_prop in ['sum', 'sum.cum']:
pros[spec][pro_prop + '.fraction'] = Fraction.get_fraction(
pros[spec][pro_prop], pros['total'][pro_prop])
if spec == 'baryon':
# units of cosmic baryon fraction
pros[spec][pro_prop + '.fraction'] /= (
part.Cosmology['omega_baryon'] / part.Cosmology['omega_matter'])
# create circular velocity = sqrt (G m(< r) / r)
for spec in species:
pros[spec]['vel.circ'] = halo_property.get_circular_velocity(
pros[spec]['sum.cum'], pros[spec]['distance.cum'])
return pros
def get_statistics_profiles(
self, part, species=['all'], property_name='', weight_by_mass=True,
center_position=None, center_velocity=None, rotation=None, other_axis_distance_limits=None,
property_select={}, part_indicess=None):
'''
Get profiles of statistics (such as median, average) for given property for each
particle species.
Parameters
----------
part : dict : catalog of particles
species : str or list : name[s] of particle species to compute mass from
property_name : str : name of property to get statistics of
weight_by_mass : bool : whether to weight property by species mass
center_position : array : position of center
center_velocity : array : velocity of center
rotation : bool or array : whether to rotate particles - two options:
(a) if input array of eigen-vectors, will define rotation axes
(b) if True, will rotate to align with principal axes stored in species dictionary
other_axis_distance_limits : float :
min and max distances | |
""" Provide the nodes for the template engine. """
__author__ = "<NAME>"
__copyright__ = "Copyright 2016"
__license__ = "Apache License 2.0"
__all__ = [
"Node", "NodeList", "TextNode", "IfNode", "ForNode", "SwitchNode",
"EmitNode", "IncludeNode", "ReturnNode", "AssignNode", "SectionNode",
"UseSectionNode", "ScopeNode", "VarNode", "ErrorNode","ImportNode",
"DoNode", "UnsetNode", "CodeNode", "ExpandNode"
]
from .errors import *
from .renderers import StringRenderer
from .scope import *
class Node(object):
""" A node is a part of the expression that is rendered. """
def __init__(self, template, line):
""" Initialize the node. """
self._template = template
self._line = line
self._env = template._env
def render(self, renderer):
""" Render the node to a renderer. """
raise NotImplementedError
class NodeList(object):
""" A list of nodes. """
def __init__(self):
"""Initialize. """
self._nodes = []
def append(self, node):
""" Append a node to the list. """
self._nodes.append(node)
def extend(self, nodelist):
""" Extend one node list with another. """
self._nodes.extend(nodelist._nodes)
def render(self, renderer):
""" Render all nodes. """
for node in self._nodes:
node.render(renderer)
def __getitem__(self, n):
return self._nodes[n]
class TextNode(Node):
""" A node that represents a raw block of text. """
def __init__(self, template, line, text):
""" Initialize a text node. """
Node.__init__(self, template, line)
self._text = text
def render(self, renderer):
""" Render content from a text node. """
renderer.render(self._text)
class IfNode(Node):
""" A node that manages if/elif/else. """
def __init__(self, template, line, expr):
""" Initialize the if node. """
Node.__init__(self, template, line)
self._ifs = [(expr, NodeList())]
self._else = None
self._nodes = self._ifs[0][1]
def add_elif(self, expr):
""" Add an if section. """
# TODO: error if self._else exists
self._ifs.append((expr, NodeList()))
self._nodes = self._ifs[-1][1]
def add_else(self):
""" Add an else. """
self._else = NodeList()
self._nodes = self._else
def render(self, renderer):
""" Render the if node. """
for (expr, nodes) in self._ifs:
result = expr.eval()
if result:
nodes.render(renderer)
return
if self._else:
self._else.render(renderer)
class ForNode(Node):
""" A node for handling for loops. """
def __init__(self, template, line, var, cvar, expr):
""" Initialize the for node. """
Node.__init__(self, template, line)
self._var = var
self._cvar = cvar
self._expr = expr
self._for = NodeList()
self._else = None
self._nodes = self._for
def add_else(self):
""" Add an else section. """
self._else = NodeList()
self._nodes = self._else
def render(self, renderer):
""" Render the for node. """
env = self._env
# Iterate over each value
values = self._expr.eval()
do_else = True
if values:
index = 0
for var in values:
do_else = False
if self._cvar:
env.set(self._cvar, index)
env.set(self._var, var)
index += 1
# Execute each sub-node
self._for.render(renderer)
if do_else and self._else:
self._else.render(renderer)
class SwitchNode(Node):
""" A node for basic if/elif/elif/else nesting. """
types = ["lt", "le", "gt", "ge", "ne", "eq", "bt"]
argc = [1, 1, 1, 1, 1, 1, 2]
cbs = [
lambda *args: args[0] < args[1],
lambda *args: args[0] <= args[1],
lambda *args: args[0] > args[1],
lambda *args: args[0] >= args[1],
lambda *args: args[0] != args[1],
lambda *args: args[0] == args[1],
lambda *args: args[0] >= args[1] and args[0] <= args[2]
]
def __init__(self, template, line, expr):
""" Initialize the switch node. """
Node.__init__(self, template, line)
self._expr = expr
self._default = NodeList()
self._cases = []
self._nodes = self._default
def add_case(self, cb, exprs):
""" Add a case node. """
self._cases.append((cb, NodeList(), exprs))
self._nodes = self._cases[-1][1]
def render(self, renderer):
""" Render the node. """
value = self._expr.eval()
for cb, nodes, exprs in self._cases:
params = [expr.eval() for expr in exprs]
if cb(value, *params):
nodes.render(renderer)
return
self._default.render(renderer)
class EmitNode(Node):
""" A node to output some value. """
def __init__(self, template, line, expr):
""" Initialize the node. """
Node.__init__(self, template, line)
self._expr = expr
def render(self, renderer):
""" Render the output. """
renderer.render(str(self._expr.eval()))
class IncludeNode(Node):
""" A node to include another template. """
def __init__(self, template, line, expr, assigns, retvar):
""" Initialize the include node. """
Node.__init__(self, template, line)
self._expr = expr
self._assigns = assigns
self._retvar = retvar
def render(self, renderer):
""" Actually do the work of including the template. """
try:
template = self._env.load_file(
str(self._expr.eval()),
self._template
)
except (IOError, OSError, RestrictedError) as e:
raise TemplateError(
str(e),
self._template._filename,
self._line
)
context = {}
for (var, expr) in self._assigns:
context[var] = expr.eval()
template.render(renderer, context, self._retvar)
class ReturnNode(Node):
""" A node to set a return variable. """
def __init__(self, template, line, assigns):
""" Initialize. """
Node.__init__(self, template, line)
self._assigns = assigns
def render(self, renderer):
""" Set the return nodes. """
result = {}
for (var, expr) in self._assigns:
result[var] = expr.eval()
self._env.set(":return:", result, Scope.SCOPE_TEMPLATE)
class ExpandNode(Node):
""" A node to expand variables into the current scope. """
def __init__(self, template, line, expr):
""" Initialize """
Node.__init__(self, template, line)
self._expr = expr
def render(self, renderer):
""" Expand the variables. """
result = self._expr.eval()
try:
self._env.update(result)
except (KeyError, TypeError, ValueError) as e:
raise TemplateError(
str(e),
self._template._filename,
self._line
)
class AssignNode(Node):
""" Set a variable to a subvariable. """
def __init__(self, template, line, assigns, where):
""" Initialize. """
Node.__init__(self, template, line)
self._assigns = assigns
self._where = where
def render(self, renderer):
""" Set the value. """
env = self._env
for (var, expr) in self._assigns:
env.set(var, expr.eval(), self._where)
class SectionNode(Node):
""" A node to redirect template output to a section. """
def __init__(self, template, line, expr):
""" Initialize. """
Node.__init__(self, template, line)
self._expr = expr
self._nodes = NodeList()
def render(self, renderer):
""" Redirect output to a section. """
section = str(self._expr.eval())
renderer.push_section(section)
self._nodes.render(renderer)
renderer.pop_section()
class UseSectionNode(Node):
""" A node to use a section in the output. """
def __init__(self, template, line, expr):
""" Initialize. """
Node.__init__(self, template, line)
self._expr = expr
def render(self, renderer):
""" Render the section to the output. """
section = str(self._expr.eval())
renderer.render(renderer.get_section(section))
class ScopeNode(Node):
""" Create and remove scopes. """
def __init__(self, template, line, assigns):
""" Initialize. """
Node.__init__(self, template, line)
self._assigns = assigns
self._nodes = NodeList()
def render(self, renderer):
""" Render the scope. """
env = self._env
env._push_scope()
try:
for (var, expr) in self._assigns:
env.set(var, expr.eval())
self._nodes.render(renderer)
finally:
env._pop_scope()
class CodeNode(Node):
""" A node to execute python code. """
def __init__(self, template, line, assigns, retvar):
""" Initialize the include node. """
Node.__init__(self, template, line)
self._assigns = assigns
self._retvar = retvar
self._nodes = NodeList()
self._code = None
def render(self, renderer):
""" Actually do the work of including the template. """
# Check if allowed
if not self._env._code_enabled:
raise TemplateError(
"Use of direct python code not allowed",
self._template._filename,
self._line
)
# Compile the code only once
if not self._code:
# Get the code
new_renderer = StringRenderer()
self._nodes.render(new_renderer)
code = new_renderer.get()
# Compile it
try:
self._code = compile(code, "<string>", "exec")
except Exception as e:
raise TemplateError(
str(e),
self._template._filename,
self._line
)
# Execute the code
locals = {}
for (var, expr) in self._assigns:
locals[var] = expr.eval()
try:
exec(self._code, locals, locals)
except Exception as e:
raise TemplateError(
str(e),
self._template._filename,
self._line
)
# Handle return values
if self._retvar:
self._env.set(self._retvar, locals)
class VarNode(Node):
""" Capture output into a variable. """
def __init__(self, template, line, var):
""" Initialize. """
Node.__init__(self, template, line)
self._var = var
self._nodes = NodeList()
def render(self, renderer):
""" Render the results and capture into a variable. """
new_renderer = StringRenderer()
self._nodes.render(new_renderer)
self._env.set(self._var, new_renderer.get())
class ErrorNode(Node):
""" Raise an error from the template. """
def __init__(self, template, line, expr):
""" Initialize. """
Node.__init__(self, template, line)
self._expr = expr
def render(self, renderer):
""" Raise the error. """
raise RaisedError(
str(self._expr.eval()),
self._template._filename,
self._line
)
class ImportNode(Node):
""" Import a library to a variable in the current scope. """
def __init__(self, template, line, assigns):
Node.__init__(self, template, line)
self._assigns = assigns
def render(self, renderer):
""" Do the import. """
env = self._env
for (var, expr) in self._assigns:
name = expr.eval()
try:
imp = env.load_import(name)
env.set(var, imp)
except KeyError:
raise UnknownImportError(
"No such import: {0}".format(name),
self._template._filename,
self._line
)
class DoNode(Node):
""" Evaluate expressions and discard the results. """
def __init__(self, template, line, nodes):
""" Initialize. """
Node.__init__(self, template, line)
self._nodes = nodes
def render(self, renderer):
""" Set the value. """
for node in self._nodes:
node.eval()
class UnsetNode(Node):
""" Unset variable at the current scope rsults. """
def __init__(self, template, line, varlist):
""" Initialize. """
Node.__init__(self, template, line)
self._varlist = varlist
def render(self, renderer):
""" Set the value. """
env = self._env
| |
= load_cache(oci_utils.__chap_password_file)
if chap_passwords is None:
return None, None
for iqn, unpw in chap_passwords.items():
if ocid == unpw[0]:
return iqn, unpw[1]
return None, None
_this_instance_ocid = None
def main():
"""
Main.
Returns
-------
int
Return value of the operation, if any.
0 otherwise.
"""
global _this_instance_ocid
parser = get_args_parser()
args = parser.parse_args()
_logger.debug('Command line: %s', args)
if args.command is None:
# default to 'sync' command
args.command = "sync"
if args.command == 'usage':
parser.print_help()
sys.exit(0)
oci_sess = get_oci_api_session()
# we need this at many places, grab it once
if bool(oci_sess):
if bool(oci_sess.this_instance()):
_this_instance_ocid = oci_sess.this_instance().get_ocid()
else:
_this_instance_ocid = get_instance_ocid()
if 'compat' in args and args.compat is True:
# Display information as version 0.11 for compatibility reasons for few settings.
args.output_mode = 'compat'
args.details = True
compat_mode = 'compat'
else:
compat_mode = 'gen'
_logger.debug('Compatibility mode: %s', compat_mode)
system_disks = lsblk.list()
iscsiadm_session = iscsiadm.session()
# starting from here, nothing works if we are not root
_user_euid = os.geteuid()
if _user_euid != 0:
_logger.error("This program needs to be run with root privileges.")
return 1
if args.command == 'show':
display_attached_volumes(oci_sess, iscsiadm_session, system_disks,
args.output_mode, args.details, not args.no_truncate)
if len(args.compartments) > 0 or args.all:
api_display_available_block_volumes(oci_sess, args.compartments, args.all,
args.output_mode, args.details, not args.no_truncate)
return 0
max_volumes = OCIUtilsConfiguration.getint('iscsi', 'max_volumes')
if max_volumes > oci_utils._MAX_VOLUMES_LIMIT:
_logger.error("Your configured max_volumes(%s) is over the limit(%s)",
max_volumes, oci_utils._MAX_VOLUMES_LIMIT)
max_volumes = oci_utils._MAX_VOLUMES_LIMIT
#
# load iscsiadm-cache
ocid_cache = load_cache(iscsiadm.ISCSIADM_CACHE, max_age=timedelta(minutes=2))[1]
if ocid_cache is None:
_logger.debug('Updating the cache')
# run ocid once, to update the cache
ocid_refresh(wait=True)
# now try to load again
ocid_cache = load_cache(iscsiadm.ISCSIADM_CACHE, max_age=timedelta(minutes=2))[1]
if ocid_cache is None:
targets, attach_failed = None, None
else:
targets, attach_failed = ocid_cache
#
# load detached volumes cache
_logger.debug('iSCSI targets: %s', targets)
detached_volume_iqns = load_cache(__ignore_file)[1]
if detached_volume_iqns is None:
detached_volume_iqns = []
if args.command == 'sync' and not detached_volume_iqns and not attach_failed:
# nothing to do, stop here
print("All known devices are attached.")
if args.command == 'sync':
#
# we still have volume not attached, process them.
# this one is as good as obsolete, ocid takes care of execting iscsiadm attach commands.
# and detached volume iqns contains volumes which are detached from ociinstance
retval = 0
_did_something = False
# if detached_volume_iqns:
# print()
# print("Detached devices:")
# for iqn in detached_volume_iqns:
# # display_detached_iscsi_device(iqn, targets)
# display_iscsi_device(iqn, oci_sess)
# if args.apply or args.interactive:
# if args.yes:
# ans = True
# else:
# ans = _read_yn('Would you like to attach this device?',
# yn=True,
# waitenter=True,
# suppose_yes=False,
# default_yn=False)
# # ans = ask_yes_no("Would you like to attach this device?")
# if ans:
# try:
# _do_iscsiadm_attach(iqn, targets)
# _did_something = True
# except Exception as e:
# _logger.error('[%s] attachment failed: %s', iqn, str(e))
# retval = 1
if attach_failed:
_logger.info("Devices that could not be attached automatically:")
for iqn in list(attach_failed.keys()):
# display_detached_iscsi_device(iqn, targets, attach_failed)
display_iscsi_device(iqn, oci_sess)
_attach_user_name = None
_attach_user_passwd = None
_give_it_a_try = False
if args.apply or args.interactive:
if attach_failed[iqn] != 24:
# not authentication error
# if args.yes or ask_yes_no("Would you like to retry attaching this device?"):
if _read_yn('Would you like to retry attaching this device?',
yn=True,
waitenter=True,
suppose_yes=False,
default_yn=False):
_give_it_a_try = True
else:
# authentication error
# if args.yes or ask_yes_no("Would you like to configure this device?"):
if args.yes or _read_yn('Would you like to configure this device?',
yn=True,
waitenter=True,
suppose_yes=False,
default_yn=False):
_give_it_a_try = True
if oci_sess is not None:
oci_vols = oci_sess.find_volumes(iqn=iqn)
if len(oci_vols) != 1:
_logger.error('volume [%s] not found', iqn)
_give_it_a_try = False
_attach_user_name = oci_vols[0].get_user()
_attach_user_passwd = oci_vols[0].get_password()
else:
(_attach_user_name, _attach_user_passwd) = get_chap_secret(iqn)
if _attach_user_name is None:
_logger.error('Cannot retreive chap credentials')
_give_it_a_try = False
if _give_it_a_try:
try:
_do_iscsiadm_attach(iqn, targets, _attach_user_name, _attach_user_passwd)
_did_something = True
except Exception as e:
_logger.error("Failed to configure device automatically: %s", str(e))
retval = 1
if _did_something:
ocid_refresh()
return retval
if args.command == 'create':
if len(system_disks) > max_volumes:
_logger.error("This instance reached the max_volumes(%s)", max_volumes)
return 1
try:
if bool(oci_sess):
do_create_volume(oci_sess,
size=args.size,
display_name=args.volume_name,
attach_it=args.attach_volume,
chap_credentials=args.chap,
mode=compat_mode)
else:
_logger.info('Unable to create volume, failed to create a session.')
return 1
except Exception as e:
_logger.debug('Volume creation has failed: %s', str(e), stack_info=True, exc_info=True)
_logger.error('Volume creation has failed: %s', str(e))
return 1
ocid_refresh()
return 0
if args.command == 'destroy':
# destroy command used to be for only one volume
# changed the behavior to be more aligned with attach/dettach commands
# i.e : taking more than one ocid and doing best effort
retval = 0
if not args.yes:
for ocid in args.ocids:
_logger.info("Volume : [%s]", ocid)
# if not ask_yes_no("WARNING: the volume(s) will be destroyed. This is irreversible. Continue?"):
if not _read_yn('WARNING: the volume(s) will be destroyed. This is irreversible. Continue?',
yn=True,
waitenter=True,
suppose_yes=False,
default_yn=False):
return 0
for ocid in args.ocids:
try:
if bool(oci_sess):
_logger.debug('Destroying [%s]', ocid)
#
# try to get the iqn from a detached volume
_iqn = _get_iqn_from_ocid(oci_sess, ocid)
do_destroy_volume(oci_sess, ocid)
_ = remove_chap_secret(ocid)
_logger.info("Volume [%s] is destroyed", ocid)
#
# remove iqn from ignore list.
if bool(_iqn):
if _iqn in detached_volume_iqns:
detached_volume_iqns.remove(_iqn)
write_cache(cache_content=detached_volume_iqns, cache_fname=__ignore_file)
_logger.debug('%s removed from cache.', _iqn)
else:
_logger.info('Unable to destroy volume, failed to create a session.')
retval = 1
except Exception as e:
_logger.debug('Volume [%s] deletion has failed: %s', ocid, str(e), stack_info=True, exc_info=True)
_logger.error('Volume [%s] deletion has failed: %s', ocid, str(e))
retval = 1
return retval
if args.command == 'detach':
retval = 0
for iqn in args.iqns:
if iqn in detached_volume_iqns:
_logger.error("Target [%s] is already detached", iqn)
retval = 1
continue
if iqn not in iscsiadm_session or 'device' not in iscsiadm_session[iqn]:
_logger.error("Target [%s] not found", iqn)
retval = 1
continue
_logger.debug('Unmounting the block volume')
if not unmount_device(iscsiadm_session, iqn, system_disks):
_logger.debug('Unmounting has failed')
if not args.force:
# if not ask_yes_no("Failed to unmount volume, Continue detaching anyway?"):
if not _read_yn('Failed to unmount volume, Continue detaching anyway?',
yn=True,
waitenter=True,
suppose_yes=False,
default_yn=False):
continue
else:
_logger.info('Unmount failed, force option selected,continue anyway.')
try:
if bool(oci_sess):
_logger.debug('Detaching [%s]', iqn)
do_detach_volume(oci_sess, iscsiadm_session, iqn, mode=compat_mode)
compat_info_message(gen_msg="Volume [%s] is detached." % iqn, mode=compat_mode)
detached_volume_iqns.append(iqn)
else:
_logger.info('Unable to detach volume, failed to create a session.')
retval = 1
except Exception as e:
_logger.debug('Volume [%s] detach has failed: %s', iqn, str(e), stack_info=True, exc_info=True)
_logger.error('Volume [%s] detach has failed: %s', iqn, str(e))
retval = 1
if retval == 0:
# compat_info_message(gen_msg="Updating detached volume cache file: remove %s" % iqn, mode=compat_mode)
# compat_info_message(gen_msg="Volume [%s] successfully detached." % iqn, mode=compat_mode)
write_cache(cache_content=detached_volume_iqns, cache_fname=__ignore_file)
_logger.debug('Trigger ocid refresh')
ocid_refresh()
return retval
if args.command == 'attach':
if len(system_disks) > max_volumes:
_logger.error("This instance reached the maximum number of volumes attached (%s)", max_volumes)
return 1
if bool(args.ocids):
iqnocid = args.ocids
elif bool(args.iqns):
iqnocid = args.iqns
else:
# should be trapped by argparse, one of those is required.
_logger.error('Missing iqns or ocids')
sys.exit(1)
retval = 0
for iqn in iqnocid:
_iqn_to_use = iqn
_save_chap_cred = False
if iqn in iscsiadm_session:
_logger.info("Target [%s] is already attached.", iqn)
continue
if _iqn_to_use.startswith(oci_volume_tag):
#
# ocid
_logger.debug('Given IQN [%s] is probably an ocid, attaching it', _iqn_to_use)
bs_volume = None
try:
if bool(oci_sess):
compat_info_message(compat_msg="Attaching iSCSI device.", mode=compat_mode)
#
# verify if volume is in the chap secrets cache
this_iqn, this_pw = get_iqn_from_chap_secrets_cache(_iqn_to_use)
_logger.debug('The cache: iqn %s pw %s', this_iqn, this_pw)
if this_iqn is not None or args.chap:
_logger.debug('Using chap secret')
bs_volume = _do_attach_oci_block_volume(oci_sess, _iqn_to_use, chap=True)
else:
_logger.debug('Not using chap secret.')
bs_volume = _do_attach_oci_block_volume(oci_sess, _iqn_to_use, chap=False)
compat_info_message(gen_msg='Volume [%s] is attached' % _iqn_to_use,
compat_msg='Result: command executed successfully',
mode=compat_mode)
# user/pass coming from volume itself
_attachment_username = bs_volume.get_user()
_attachment_password = bs_volume.get_password()
_iscsi_portal_ip = bs_volume.get_portal_ip()
_iqn_to_use = bs_volume.get_iqn()
if args.chap:
_save_chap_cred = True
else:
_logger.info('Unable to attach volume, failed to create a session.')
retval = 1
except Exception as e:
_logger.debug('Failed to attach volume [%s]: %s', _iqn_to_use, str(e),
stack_info=True,
exc_info=True)
_logger.error('Failed to attach volume [%s]: %s', _iqn_to_use, str(e))
retval = 1
continue
elif _iqn_to_use.startswith(iqn_tag):
#
# iqn
_logger.debug('Given IQN [%s] is probably an iqn, attaching it', _iqn_to_use)
#
# iqn is not in iscsiadm session ... might also not be in this_instance volume list..
this_ocid = | |
"""Unit tests for classes implemented in cygraph/graph.pyx
"""
import os
import pickle
import numpy as np
import pytest
import cygraph as cg
def test_constructor():
"""Tests initialization of a StaticGraph and DynamicGraph objects.
"""
graphs = []
# Smoke tests.
cg.graph()
vertex_lists = [['s', 'e'], [0, 1]]
for lst in vertex_lists:
for directed in [True, False]:
for static in [True, False]:
g = cg.graph(static=static, directed=directed, vertices=lst)
graphs.append(g)
# Exception-raising tests.
for directed in [True, False]:
for static in [True, False]:
with pytest.raises(TypeError):
# Non-hashable type vertices.
g = cg.graph(static=static, directed=directed,
vertices=[['s'], ['e']])
graphs.append(g)
# Generating from graph data structures.
adjacency_matrix = [[0, 1, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 1, 0]]
adjacency_list = [
[1],
[0, 2],
[1, 3, 4, 5],
[2],
[2, 5],
[2, 4, 6],
[5]
]
edges = [(0, 1), (1, 2), (2, 3), (2, 4), (2, 5), (4, 5), (5, 6)]
for static in [True, False]:
adj_list_graph = cg.graph(static=static, vertices=list(range(7)),
adjacency_list=adjacency_list)
if static:
np_adjacency_matrix = [[val if val else np.nan for val in row]
for row in adjacency_matrix]
np_adjacency_matrix = np.array(np_adjacency_matrix)
adj_mat_graph = cg.graph(static=static, vertices=list(range(7)),
adjacency_matrix=np_adjacency_matrix)
else:
adj_mat_graph = cg.graph(static=static, vertices=list(range(7)),
adjacency_matrix=[[val if val else None for val in row]
for row in adjacency_matrix])
for edge in edges:
assert adj_list_graph.has_edge(*edge)
assert adj_mat_graph.has_edge(*edge)
for edge in adj_mat_graph.edges:
assert edge[:-1] in edges
for edge in adj_list_graph.edges:
assert edge[:-1] in edges
graphs.append(adj_list_graph)
graphs.append(adj_mat_graph)
# Copying another graph.
for graph in graphs:
g = cg.graph(graph_=graph)
assert g.vertices == graph.vertices
assert g.edges == graph.edges
def test_edges():
"""Tests various edge-related methods.
Tests:
- add_edge
- get_children
- get_parents
- edges
- has_edge
- remove_edge
- adjacency_matrix
- adjacency_list
- add_edges
for StaticGraph and DynamicGraph classes.
"""
for static in [True, False]:
# Directed graph.
g = cg.graph(static=static, directed=True, vertices=['s', 'a', 'b', 'e'])
# add_edge method
g.add_edge('s', 'a', weight=0.0) # Make sure weight zero edges are tested.
g.add_edge('a', 's')
g.add_edge('a', 'b')
g.add_edge('b', 'e', weight=0.5)
with pytest.raises(ValueError):
g.add_edge('b', 'e', weight=1.0)
with pytest.raises(ValueError):
g.add_edge('d', 'f')
# set_edge_weight
g.set_edge_weight('b', 'e', weight=1.0)
with pytest.raises(ValueError):
g.set_edge_weight('e', 'b', weight=1.0)
# edges property
g_edges = g.edges
assert g_edges == {('s', 'a', 0.0), ('a', 'b', 1.0), ('b', 'e', 1.0), ('a', 's', 1.0)}
# has_edge
for edge in g_edges:
assert g.has_edge(edge[0], edge[1])
assert not g.has_edge('e', 'b')
assert not g.has_edge(1, 2)
# remove_edge
g.remove_edge('s', 'a')
with pytest.raises(ValueError):
g.remove_edge('sdaf', 'dsafsd')
with pytest.warns(Warning):
g.remove_edge('s', 'e')
assert not g.has_edge('s', 'a')
assert g.has_edge('a', 's')
# get_edge_weight
assert g.get_edge_weight('a', 'b') == 1.0
with pytest.raises(ValueError):
g.get_edge_weight('b', 'a')
# get_children
assert g.get_children('a') == {'s', 'b'}
with pytest.raises(ValueError):
g.get_children('d')
# get_parents
assert g.get_parents('e') == {'b'}
with pytest.raises(ValueError):
g.get_parents('d')
# add_edges
g.add_edges({('b', 'a', 2.0), ('e', 'b')})
assert g.get_edge_weight('b', 'a') == 2.0
assert g.get_edge_weight('e', 'b') == 1.0
with pytest.raises(ValueError):
g.add_edges({('s', 'a'), ('sdaf', 'dsafsd')})
assert not g.has_edge('s', 'a')
# Undirected graph.
g2 = cg.graph(static=static, directed=False, vertices=['s', 'a', 'b', 'e'])
# add_edge method
g2.add_edge('s', 'a', weight=0.0) # Make sure weight zero edges are tested.
g2.add_edge('a', 'b')
g2.add_edge('b', 'e', weight=0.5)
with pytest.raises(ValueError):
g2.add_edge('b', 'e', weight=1.0)
with pytest.raises(ValueError):
g2.add_edge('d', 'f')
# set_edge_weight
g2.set_edge_weight('e', 'b', weight=1.0)
with pytest.raises(ValueError):
g2.set_edge_weight('a', 'e', weight=1.0)
# edges property
g2_edges = g2.edges
assert len(g2_edges) == 3
for edge in g2_edges:
assert set(edge) in [{'s', 'a', 0.0}, {'a', 'b', 1.0}, {'b', 'e', 1.0}]
# has_edge
for edge in g2_edges:
assert g2.has_edge(edge[0], edge[1])
assert g2.has_edge('e', 'b')
assert not g2.has_edge(1, 2)
# remove_edge
g2.remove_edge('s', 'a')
with pytest.raises(ValueError):
g2.remove_edge('sdaf', 'dsafsd')
with pytest.warns(Warning):
g2.remove_edge('s', 'e')
assert not g2.has_edge('s', 'a')
assert not g2.has_edge('a', 's')
# get_edge_weight
assert g2.get_edge_weight('b', 'a') == 1.0
with pytest.raises(ValueError):
g2.get_edge_weight('d', 'e')
# get_children
assert g2.get_children('a') == {'b'}
with pytest.raises(ValueError):
g2.get_children('d')
# get_parents
assert g2.get_parents('a') == {'b'}
with pytest.raises(ValueError):
g2.get_parents('d')
# add_edges
g.remove_edge('e', 'b')
g.add_edges({('s', 'e', 2.0), ('e', 'b')})
assert g.get_edge_weight('s', 'e') == 2.0
assert g.get_edge_weight('e', 'b') == 1.0
with pytest.raises(ValueError):
g.add_edges({('s', 'a'), ('sdaf', 'dsafsd')})
assert not g.has_edge('s', 'a')
# adjacency_matrix and adjacency_list
g = cg.graph(static=static, directed=False, vertices=list(range(3)))
g.add_edge(0, 1)
g.add_edge(1, 2)
undirected_adj = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
if static:
assert (np.nan_to_num(g.adjacency_matrix) == undirected_adj).all()
else:
assert g.adjacency_matrix == \
[[None if not x else x for x in lst] for lst in undirected_adj]
assert g.adjacency_list == [[1], [0, 2], [1]]
g = cg.graph(static=static, directed=True, vertices=list(range(3)))
g.add_edge(0, 1)
g.add_edge(1, 2)
directed_adj = [[0, 1, 0],
[0, 0, 1],
[0, 0, 0]]
if static:
assert (np.nan_to_num(g.adjacency_matrix) == directed_adj).all()
else:
assert g.adjacency_matrix == \
[[None if not x else x for x in lst] for lst in directed_adj]
assert g.adjacency_list == [[1], [2], []]
def test_vertices():
"""Tests various vertex-related methods.
Tests methods:
- add_vertex
- has_vertex
- vertices
- remove_vertex
- add_vertices
for StaticGraph and DynamicGraph classes.
"""
for static in [True, False]:
g = cg.graph(static=static, directed=True)
# Adding to graph with no vertices.
g.add_vertex('s')
# Adding to graph with vertices.
g.add_vertex(1)
with pytest.raises(TypeError):
g.add_vertex([])
with pytest.raises(ValueError):
g.add_vertex('s')
# vertices attribute
assert g.vertices == ['s', 1]
# has_vertex
assert g.has_vertex('s')
assert g.has_vertex(1)
assert not g.has_vertex(2)
# remove_vertex
g.remove_vertex('s')
with pytest.raises(ValueError):
g.remove_vertex(2)
assert not g.has_vertex('s')
# add_vertices
g.add_vertices({'a', 'b', 's'})
assert g.has_vertex('a')
assert g.has_vertex('b')
assert g.has_vertex('s')
with pytest.raises(ValueError):
g.add_vertices({'c', 's'})
assert not g.has_vertex('c')
assert g.has_vertex('s')
def test_attributes():
"""Tests various edge and vertex attribute-related methods.
Tests:
- set_vertex_attribute
- get_vertex_attribute
- set_edge_attribute
- get_edge_attribute
for StaticGraph and DynamicGraph classes
"""
# Edge attributes.
for static in [True, False]:
# Directed graph.
g = cg.graph(static=static, directed=True, vertices=['a', 'b', 'c'])
g.add_edge('a', 'b')
# Setting attributes.
g.set_edge_attribute(('a', 'b'), key='key', val='val')
with pytest.raises(TypeError):
g.set_edge_attribute(('a', 'b'), key=[], val='val')
with pytest.raises(ValueError):
g.set_edge_attribute(('b', 'a'), key='key 2', val='val')
g.set_edge_attribute(('a', 'b'), key='key', val='new val')
# set_edge_attributes
g.set_edge_attributes(('a', 'b'), {'key1': 'val1', 'key2': 'val2'})
with pytest.raises(ValueError):
g.set_edge_attributes(('b', 'a'), {'key1': 'val1', 'key2': 'val2'})
# Getting attributes.
assert g.get_edge_attribute(('a', 'b'), key='key') == 'new val'
assert g.get_edge_attribute(('a', 'b'), key='key1') == 'val1'
assert g.get_edge_attribute(('a', 'b'), key='key2') == 'val2'
with pytest.raises(TypeError):
g.get_edge_attribute(('a', 'b'), key=[])
with pytest.raises(ValueError):
g.get_edge_attribute(('b', 'a'), key='key')
with pytest.raises(KeyError):
g.get_edge_attribute(('a', 'b'), key="this is not a key")
# Removing attributes.
with pytest.raises(ValueError):
g.remove_edge_attribute(('a', 'c'), key='key')
with pytest.raises(ValueError):
g.remove_edge_attribute(('b', 'a'), key='key')
with pytest.raises(KeyError):
g.remove_edge_attribute(('a', 'b'), key="this is not a key")
g.remove_edge_attribute(('a', 'b'), key='key')
with pytest.raises(KeyError):
g.get_edge_attribute(('a', 'b'), key='key')
# Undirected graph.
g2 = cg.graph(static=static, directed=False, vertices=['a', 'b', 'c'])
g2.add_edge('a', 'b')
# Setting attributes.
g2.set_edge_attribute(('a', 'b'), key='key', val='val')
with pytest.raises(TypeError):
g2.set_edge_attribute(('a', 'b'), key=[], val='val')
with pytest.raises(ValueError):
g2.set_edge_attribute(('a', 'c'), key='key 2', val='val')
g2.set_edge_attribute(('a', 'b'), key='key', val='new val')
# set_edge_attributes
g2.set_edge_attributes(('a', 'b'), {'key1': 'val1', 'key2': 'val2'})
# Getting attributes.
assert g2.get_edge_attribute(('a', 'b'), key='key') == 'new val'
assert g2.get_edge_attribute(('b', 'a'), key='key') == 'new val'
assert g2.get_edge_attribute(('a', 'b'), key='key1') == 'val1'
assert g2.get_edge_attribute(('a', 'b'), key='key2') == 'val2'
with pytest.raises(TypeError):
g2.get_edge_attribute(('a', 'b'), key=[])
with pytest.raises(ValueError):
g2.get_edge_attribute(('a', 'c'), key='key')
with pytest.raises(KeyError):
g2.get_edge_attribute(('a', 'b'), key="this is not a key")
# Removing attributes.
with pytest.raises(ValueError):
g2.remove_edge_attribute(('a', 'c'), key='key')
with pytest.raises(KeyError):
g2.remove_edge_attribute(('a', 'b'), key="this is not a key")
g2.remove_edge_attribute(('a', 'b'), key='key')
with pytest.raises(KeyError):
g2.get_edge_attribute(('a', 'b'), key='key')
with pytest.raises(KeyError):
g2.get_edge_attribute(('b', 'a'), key='key')
g2.set_edge_attribute(('a', 'b'), key='key', val='val')
g2.remove_edge_attribute(('b', 'a'), key='key')
with pytest.raises(KeyError):
g2.get_edge_attribute(('a', 'b'), key='key')
with pytest.raises(KeyError):
g2.get_edge_attribute(('b', 'a'), key='key')
# Vertex attributes.
for static in [True, False]:
for directed in [True, False]:
g = cg.graph(static=static,
directed=directed, vertices=['a', 'b', 'c'])
# Setting attributes
g.set_vertex_attribute('a', key='key', val='val')
with pytest.raises(TypeError):
g.set_vertex_attribute('a', key=[], val='val')
with pytest.raises(ValueError):
g.set_vertex_attribute('d', key='key', val='val')
# Getting attributes
assert g.get_vertex_attribute('a', key='key') == 'val'
with pytest.raises(TypeError):
g.get_vertex_attribute('a', key=[])
with pytest.raises(ValueError):
g.get_vertex_attribute('d', key='key')
with pytest.raises(KeyError):
g.get_vertex_attribute('a', key='this is not a key')
# Removing attributes
with pytest.raises(TypeError):
g.remove_vertex_attribute('a', key=[])
with pytest.raises(ValueError):
g.remove_vertex_attribute('d', key='key')
with pytest.raises(KeyError):
g.remove_vertex_attribute('a', key="this is not a key")
g.remove_vertex_attribute('a', 'key')
with pytest.raises(KeyError):
g.get_vertex_attribute('a', key='key')
def test_comparisons():
"""Tests comparison operations between graphs.
"""
# __eq__ and equals
for directed in [True, False]:
for static1 in [True, False]:
for static2 in [True, False]:
g1 = cg.graph(static=static1, directed=directed,
vertices=list(range(3)))
g2 = cg.graph(static=static2, directed=directed,
vertices=list(range(3)))
with pytest.raises(ValueError):
g1 == g2
g2 == g1
assert g1.equals(g2)
assert g2.equals(g1)
g1.add_edge(0, 1)
assert | |
<reponame>NREL/streamm-tools<filename>scripts/rdfg1.py
#! /usr/bin/env python
"""
Radial distribution code
length - Angstroms
mass - AMU
volume - Angstroms^3
# g_ij(r) = n_j(r_ij)/ (rho_j 4 pi r^2 dr )
g_ij(r) = n_j(r_ij)/ (rho_j 4/3 pi( r_out^3 - r_in^3)
rho_j = N_j / V_ave
g(r) = n(r) /( rho dV )
n(r) = 1/Ni sum_i^{N_i} sum_j^{N_j} \gamma( r - r_{ij})
g(r) = 1/( dV Ni ) sum_i^{N_i} [ sum_j^{N_j} \gamma( r - r_{ij}) / rho_j(i) ]
Regular density
g(r) = 1/( dV Ni ) sum_i^{N_i} [ sum_j^{N_j} \gamma( r - r_{ij}) / ( sum_j^{N_j} \gamma( allowed pair ij )/<V> ) ]
g(r) = <V>/( dV Ni ) sum_i^{N_i} [ sum_j^{N_j} \gamma( r - r_{ij}) / sum_j^{N_j} \gamma( pair ij ) ]
True density
g(r) = 1/( dV Ni ) sum_i^{N_i} [ sum_j^{N_j} \gamma( r - r_{ij}) / ( sum_j^{N_j} \gamma( allowed pair ij )/<V> ) ]
g(r) = <V>/( dV Ni ) sum_i^{N_i} [ sum_j^{N_j} \gamma( r - r_{ij}) / sum_j^{N_j} \gamma( allowed pair ij ) ]
Nj_i = sum_j^{N_j} \gamma( pair ij )
rdf_cnt_p = sum_f^{N_frames} sum_j^{N_j} \gamma( r - r_{ij})
sum_j^{N_j} \gamma( r - r_{ij}) = rdf_cnt_p/N_frames
"""
__author__ = "<NAME>"
__version__ = "0.3"
__email__ = "<EMAIL>"
__status__ = "Beta"
import os, os.path, sys , copy ,shutil, logging, math, json, csv
import numpy as np
from datetime import datetime
from optparse import OptionParser
from streamm import *
from MDAnalysis import *
from MDAnalysis.core.distances import * ##distance_array
def reduce_bins(bin_r,bin_r_nn):
#
# Reduce bins
#
for bin_index in range( len(bin_r)):
# Sum rdf_cnt of each bin on each processor
cnt = p.allReduceSum(bin_r[bin_index])
bin_r[bin_index] = cnt
cnt_nn = p.allReduceSum(bin_r_nn[bin_index])
bin_r_nn[bin_index] = cnt_nn
p.barrier() # Barrier for MPI_COMM_WORLD
return bin_r,bin_r_nn
def calc_rdf(N_i,N_j,bin_r,bin_r_nn,volumes,bin_size,rdf_tag,options,p):
'''
Calculate rdf results
n_ij(r_ij) = bined counts in (r_ij)
n_j (r_ij) = n_ij(r_ij)/N_i # Number of nieghbor atoms j
rho_j = N_j / V_ave # 1/A^3
g_ij(r) = n_j(r_ij)/ [rho_j 4/3 pi( r_out^3 - r_in^3)]
g(r) = <V>/( dV Ni ) sum_i^{N_i} [ sum_j^{N_j} \gamma( r - r_{ij}) / sum_j^{N_j} \gamma( pair ij ) ]
n_ij = \gamma( r - r_{ij})
dV = 4/3 pi( r_out^3 - r_in^3)
'''
rank = p.getRank()
size = p.getCommSize()
n_frames = len(volumes)
n_bins = len(bin_r)
total_cnts = np.sum( bin_r )
total_nn_cnts = np.sum( bin_r_nn )
box_vol_ave = np.mean(volumes)
cnt_sum_j = 0.0
nn_cnt_sum_j = 0.0
rdf = []
time_i = datetime.now()
dat_lines = "# Date: %s \n"%(time_i)
dat_lines +="# Frames: \n"
dat_lines +="# Initial %d \n" % (options.frame_o)
dat_lines +="# Step %d \n" % (options.frame_step)
dat_lines +="# Final %d \n" % (options.frame_step)
dat_lines +="# Nproc %d \n" % (size)
dat_lines +="# Tag %s \n"%(rdf_tag)
dat_lines +="# N_i %d \n"%(N_i)
dat_lines +="# N_j %d \n"%(N_j)
dat_lines +="# Frames %d \n"%(n_frames)
dat_lines +="# n_bins %d \n"%(n_bins)
dat_lines +="# total_cnts %f \n"%(total_cnts)
dat_lines +="# total_nn_cnts %f \n"%(total_nn_cnts)
dat_lines +="# box_vol_ave %s \n"%(str(box_vol_ave))
dat_lines +="# r - center position of bin [0] \n"
dat_lines +="# g_r_box - g(r) using average box volume [1] \n"
dat_lines +="# g_r_nn_box - first nearest neighbor g(r) using average box volume [2] \n"
dat_lines +="# nb_r - neighbor count of i at r [3]\n"
dat_lines +="# nb_sum - sum neighbor count of i < r [4]\n"
dat_lines +="# nn_nb_r - first nearest neighbor count of i at r [5]\n"
dat_lines +="# nn_nb_sum - sum first nearest neighbor count of i < r [6]\n"
dat_lines +="# r , g_r_box, g_r_nn_box, nb_r ,nb_sum ,nn_nb_r ,nn_nb_sum \n"
for bin_index in range(n_bins):
r_val = options.bin_size*float(bin_index)
dr_sq = r_val*r_val
r_in = r_val - options.bin_size*0.5
r_out = r_val + options.bin_size*0.5
dr_vol = 4.0*math.pi/3.0*( r_out**3 - r_in**3 )
cnt_r_frame = float( bin_r[bin_index] ) /float(n_frames)
nn_cnt_r_frame = float( bin_r_nn[bin_index] ) /float(n_frames)
# n(r) = 1/N_i sum_j^{N_j} \gamma( r - r_{ij})
nb_cnt = cnt_r_frame/float( N_i )
cnt_sum_j += nb_cnt
nn_nb_cnt = nn_cnt_r_frame/float( N_i )
nn_cnt_sum_j += nn_nb_cnt
# g(r) = <V> * n(r) / dV
g_r_box = box_vol_ave*nb_cnt/dr_vol/float( N_j )
g_r_nn_box = box_vol_ave*nn_nb_cnt/dr_vol/float( N_j )
dat_lines += " %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f \n"%(r_val,g_r_box, g_r_nn_box,nb_cnt,cnt_sum_j,nn_nb_cnt,nn_cnt_sum_j)
rdf.append([r_val,g_r_box, g_r_nn_box,nb_cnt,cnt_sum_j,nn_nb_cnt,nn_cnt_sum_j])
# Write data file
dat_file = rdf_tag + ".dat"
logger.info("file: output %s %s "%('rdf_dat',dat_file))
dat_out = open(dat_file,"w")
dat_out.write(dat_lines)
dat_out.close()
return rdf
def pdf_rdf(rdf_tag,rdf_i):
'''
Write pdf of the rdf
'''
import matplotlib
#matplotlib.use('pdf')
import matplotlib.pyplot as plt
from matplotlib import gridspec
n_pannels = 1
fig, ax = plt.subplots(n_pannels)
ax.set_ylabel(r'$g(r)$ ',fontsize=fontsz)
ax.plot(rdf_i[:,0],rdf_i[:,1])
#fig.subplots_adjust(hspace=0)
#fig.set_size_inches(8.0, 12.0)
fig.savefig("rdf_%s.pdf"%(rdf_tag),format='pdf')
plt.close(fig)
def singleframe_gpairs(struc_o,group_id,glist_i,glist_j,pairvalue_ij,bin_size,r_cut,rank):
'''
Bin distances between particle pairs
Add size to cutoff
Assumes cut off is evenly divisable by bin_size
| |
-bin_size/2.0 r_cut +bin_size/2.0
'''
r_cut += bin_size/2.0
N_i = len(glist_i)
N_j = len(glist_j)
#
groupset_i = struc_o.groupsets[group_id]
#
probabilityperpair = 1
#
# Calculate rdf relate values
n_bins = int(r_cut/bin_size) + 1
bin_r = np.zeros(n_bins)
bin_r_nn = np.zeros(n_bins) # Nearest neighbor count
bin_r_pp = np.zeros(n_bins)
volumes = []
rdf_frames = 0
npos_i = groupset_i.properties['cent_mass']
npos_j = groupset_i.properties['cent_mass']
npos_ij,nd_ij = struc_o.lat.delta_npos(npos_i,npos_j)
#
for ref_i in range(N_i):
a_i_hasnieghbor = False
r_ij_nn = r_cut # Nearest Neighbor distance
g_i = glist_i[ref_i]
for ref_j in range(N_j):
if( pairs_ij[ref_i][ref_j] > 0.0 ):
dr_ij = nd_ij[ref_i,ref_j]
if( dr_ij <= r_cut ):
# bin distance =
bin_index = int( round( dr_ij / bin_size) )
#
# print " dist / bin / bin_sit", dist[ref_i,ref_j],bin_index,bin_size*float(bin_index)
#
bin_r[bin_index] += probabilityperpair
# Find nearest neighbor distance
a_i_hasnieghbor = True
if( dr_ij < r_ij_nn ):
r_ij_nn = dr_ij
p_ij_nn = pairs_ij[ref_i][ref_j]
#
if( close_contacts ):
g_j = glist_i[ref_j]
dr_pi_pj = groupset_i.dr_particles(g_i,g_j,r_cut)
bin_pp_index = int( round( dr_pi_pj / bin_size) )
bin_r_pp[bin_pp_index] += probabilityperpair
# Record nearest neighbor distance
if( a_i_hasnieghbor ):
bin_nn_index = int( round( r_ij_nn /bin_size) )
bin_r_nn[bin_nn_index] += p_ij_nn
return bin_r,bin_r_nn,bin_r_pp,volumes
def distbin_gpairs(struc_o,group_id,glist_i,glist_j,pairvalue_ij,gro_file,dcd_file,f_o,f_step,f_f,readall_f,bin_size,r_cut,rank):
'''
Bin distances between particle pairs
Add size to cutoff
Assumes cut off is evenly divisable by bin_size
| |
-bin_size/2.0 r_cut +bin_size/2.0
'''
r_cut += bin_size/2.0
N_i = len(glist_i)
N_j = len(glist_j)
groupset_i = struc_o.groupsets[group_id]
probabilityperpair = 1
#
# Read in trajectory
#
universe = Universe(gro_file,dcd_file)
if( readall_f ):
f_f = len(universe.trajectory)
# Allocate distance matrix
dist_pp = np.zeros((N_i,N_j), dtype=np.float64)
# Calculate rdf relate values
n_bins = int(r_cut/bin_size) + 1
bin_r = np.zeros(n_bins)
bin_r_nn = np.zeros(n_bins) # Nearest neighbor count
volumes = []
rdf_frames = 0
for ts in universe.trajectory:
if( f_o <= ts.frame ):
if( ts.frame <= f_f ):
if( ts.frame%f_step == 0 ):
rdf_frames += 1
logger.info("Calculation %d frame %d/%d on proc %d" % (rdf_frames,ts.frame, f_f,rank))
volumes.append(ts.volume) # correct unitcell volume
box = ts.dimensions
coor_i = uni_i_p.coordinates()
npos_i = groupset_i.properties['cent_mass']
npos_j = groupset_i.properties['cent_mass']
npos_ij,nd_ij = struc_o.lat.delta_npos(npos_i,npos_j)
# distance_array(coor_i,coor_j, box, result=dist)
for ref_i in range(N_i):
a_i_hasnieghbor = False
r_ij_nn = r_cut # Nearest Neighbor distance
for ref_j in range(N_j):
# logger.debug(" Checking pair %d - %d "%(ref_i,ref_j))
if( pairvalue_ij[ref_i][ref_j] > 0.0 ):
if( dist[ref_i,ref_j] <= r_cut ):
# bin distance =
bin_index = int( round( dist[ref_i,ref_j] / bin_size) )
# print " dist / bin / bin_sit", dist[ref_i,ref_j],bin_index,bin_size*float(bin_index)
bin_r[bin_index] += probabilityperpair
# Find nearest neighbor distance
a_i_hasnieghbor = True
if( dist[ref_i,ref_j] < r_ij_nn ):
r_ij_nn = dist[ref_i,ref_j]
p_ij_nn = pairvalue_ij[ref_i][ref_j]
# Record nearest neighbor distance
if( a_i_hasnieghbor ):
bin_nn_index = int( round( r_ij_nn /bin_size) )
bin_r_nn[bin_nn_index] += p_ij_nn
# Free memory
del universe
del dist
return bin_r,bin_r_nn,volumes
def grdfs(tag,options,p):
#
# MPI setup
#
rank = p.getRank()
size = p.getCommSize()
#
if( rank == 0 ):
logging.info('Running on %d procs %s '%(size,datetime.now()))
logging.info('Reading structure files ')
#
logging.debug(" proc %d of %d "%(rank,size))
if( rank == 0 ):
logger.info("Reading in structure for %s from %s "%(tag,options.cply))
#
# Read cply file
#
strucC = buildingblock.Container()
strucC.read_cply(options.cply)
#
# | |
<gh_stars>10-100
'''
python main_cast.py -a resnet50 --cos --lr 0.5 --batch-size 256 --dist-url 'tcp://localhost:10005' <ImageFolder> --mask-dir <MaskFolder> --crit-gcam cosine --alpha-masked 3 --second-constraint "ref" --output-mask-region "ref" --num-gpus-per-machine 8 --print-freq 10 --workers 8'''
import argparse
import math
import os
import os.path as osp
import random
import shutil
import time
import warnings
import matplotlib.cm as cm
import copy
import sys
import subprocess
import psutil
import albumentations as alb
import scipy
import cv2
from loguru import logger
import numpy as np
from PIL import Image, ImageDraw
import torch
import torchvision
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.multiprocessing as mp
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import torchvision.models as models
from torch.utils.tensorboard import SummaryWriter
from torch.cuda.amp import GradScaler#, autocast
from apex.parallel import DistributedDataParallel as ApexDDP
from grad_cam import GradCAM
import moco.builder
from albumentations.pytorch.transforms import ToTensorV2
from moco.datasets import SaliencyConstrainedRandomCropping
from moco.utils.checkpointing import CheckpointManager
import moco.utils.distributed as dist
import pdb
model_names = sorted(
name
for name in models.__dict__
if name.islower() and not name.startswith("__") and callable(models.__dict__[name])
)
# fmt: off
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
parser.add_argument("data", metavar="DIR",
help="path to serialized LMDB file")
parser.add_argument('--mask-dir', default='', type=str, metavar='PATH',
help='path where masks are available')
parser.add_argument("-a", "--arch", metavar="ARCH", default="resnet50",
choices=model_names,
help="model architecture: " +
" | ".join(model_names) +
" (default: resnet50)")
parser.add_argument("-j", "--workers", default=8, type=int, metavar="N",
help="number of data loading workers per GPU (default: 4)")
parser.add_argument("--epochs", default=200, type=int, metavar="N",
help="number of total epochs to run")
parser.add_argument("--start-epoch", default=0, type=int, metavar="N",
help="manual epoch number (useful on restarts)")
parser.add_argument("-b", "--batch-size", default=256, type=int,
metavar="N",
help="mini-batch size (default: 256), this is the total "
"batch size of all GPUs on the current node when "
"using Data Parallel or Distributed Data Parallel")
parser.add_argument("--lr", "--learning-rate", default=0.03, type=float,
metavar="LR", help="initial learning rate", dest="lr")
parser.add_argument("--schedule", default=[120, 160], nargs="*", type=int,
help="learning rate schedule (when to drop lr by 10x)")
parser.add_argument("--lr-cont-schedule-start", default=120, type=int,
help="continual learning rate decay schedule (when to start dropping lr by 0.94267)")
parser.add_argument("--lr-cont-decay", action="store_true",
help="True if you want to continuously decay learning rate")
parser.add_argument("--momentum", default=0.9, type=float, metavar="M",
help="momentum of SGD solver")
parser.add_argument("--wd", "--weight-decay", default=1e-4, type=float,
metavar="W", help="weight decay (default: 1e-4)",
dest="weight_decay")
parser.add_argument("-p", "--print-freq", default=10, type=int,
metavar="N", help="print frequency (default: 10)")
parser.add_argument("--resume", default="", type=str, metavar="PATH",
help="path to latest checkpoint (default: none)")
parser.add_argument("--min-areacover", default=0.2, type=float, help="min area cover")
parser.add_argument("--second-constraint", default="ref", type=str,
help="Second constraint possible values ['all', 'ref']")
parser.add_argument("--output-mask-region", default="ref", type=str,
help="output mask region possible values ['all', 'ref']")
parser.add_argument("--layer_name", default="layer4", type=str,
help="Which layer to compute gradcam")
parser.add_argument("--output-mask-size", default=7, type=int,
help="size of output_mask")
parser.add_argument("-e", "--same-encoder", dest="same_encoder", action="store_true",
help="compute gradcam on train set")
parser.add_argument("--alpha-masked", default=1, type=float,
help="gcam loss multiplier",
dest="alpha_masked")
parser.add_argument("--clip", default=2, type=float,
help="clip grad norm",
dest="clip")
parser.add_argument("--beta", default=1, type=float,
help="ssl loss multiplier",
dest="beta")
parser.add_argument("--crit-gcam", default="cosine", type=str,
help="criterion for gcam supervision [cosine]")
# Distributed training arguments.
parser.add_argument(
"--num-machines", type=int, default=1,
help="Number of machines used in distributed training."
)
parser.add_argument(
"--num-gpus-per-machine", type=int, default=8,
help="""Number of GPUs per machine with IDs as (0, 1, 2 ...). Set as
zero for single-process CPU training.""",
)
parser.add_argument(
"--machine-rank", type=int, default=0,
help="""Rank of the machine, integer in [0, num_machines). Default 0
for training with a single machine.""",
)
parser.add_argument("--dist-url", default="tcp://localhost:10001", type=str,
help="url used to set up distributed training")
parser.add_argument("--dist-backend", default="nccl", type=str,
help="distributed backend")
parser.add_argument("--seed", default=None, type=int,
help="seed for initializing training. ")
# moco specific configs:
parser.add_argument("--moco-dim", default=128, type=int,
help="feature dimension (default: 128)")
parser.add_argument("--moco-k", default=65536, type=int,
help="queue size; number of negative keys (default: 65536)")
parser.add_argument("--moco-m", default=0.999, type=float,
help="moco momentum of updating key encoder (default: 0.999)")
parser.add_argument("--moco-t", default=0.07, type=float,
help="softmax temperature (default: 0.07)")
# options for moco v2
parser.add_argument("--mlp", action="store_true",
help="use mlp head")
parser.add_argument("--cos", action="store_true",
help="use cosine lr schedule")
parser.add_argument(
"--serialization-dir", default="save/test_exp",
help="Path to a directory to serialize checkpoints and save job logs."
)
# fmt: on
def main(args: argparse.Namespace):
# This method will only work for GPU training (single or multi).
# Get the current device as set for current distributed process.
# Check `launch` function in `moco.utils.distributed` module.
device = torch.cuda.current_device()
# pdb.set_trace()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn(
"You have chosen to seed training. "
"This will turn on the CUDNN deterministic setting, "
"which can slow down your training considerably! "
"You may see unexpected behavior when restarting "
"from checkpoints."
)
# Remove default logger, create a logger for each process which writes to a
# separate log-file. This makes changes in global scope.
logger.remove(0)
if dist.get_world_size() > 1:
logger.add(
os.path.join(args.serialization_dir, f"log-rank{dist.get_rank()}.txt"),
format="{time} {level} {message}",
)
# Add a logger for stdout only for the master process.
if dist.is_master_process():
logger.add(
sys.stdout, format="<g>{time}</g>: <lvl>{message}</lvl>", colorize=True
)
logger.info(
f"Current process: Rank {dist.get_rank()}, World size {dist.get_world_size()}"
)
# create model
logger.info(f"=> creating model {args.arch}")
logger.info(f"args.mlp:{args.mlp}")
model = moco.builder.MoCo(
models.__dict__[args.arch],
args.moco_dim,
args.moco_k,
args.moco_m,
args.moco_t,
args.mlp,
).to(device)
args.batch_size = int(args.batch_size / dist.get_world_size())
# define loss function (criterion)
criterion = nn.CrossEntropyLoss().to(device)
# define loss function for the loss on gradcam
if args.crit_gcam == "cosine":
criterion_gcam = nn.CosineSimilarity(dim=1).to(device)
else:
raise NotImplementedError("Only cosine loss implemented.")
# criterion_gcam = nn.BCEWithLogitsLoss(reduction="mean").to(device)
# define optimizer
optimizer = torch.optim.SGD(
model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Create checkpoint manager and tensorboard writer.
checkpoint_manager = CheckpointManager(
serialization_dir=args.serialization_dir,
filename_prefix=f"checkpoint_{args.arch}",
state_dict=model,
optimizer=optimizer,
)
tensorboard_writer = SummaryWriter(log_dir=args.serialization_dir)
if dist.is_master_process():
tensorboard_writer.add_text("args", f"```\n{vars(args)}\n```")
# optionally resume from a checkpoint
if args.resume:
args.start_epoch = CheckpointManager(state_dict=model).load(args.resume)
if args.same_encoder:
# if you want to use the same weights for query encoder and key encoder
model.encoder_k.load_state_dict(model.encoder_q.state_dict())
cudnn.benchmark = True
# Wrap model in ApexDDP if using more than one processes.
if dist.get_world_size() > 1:
dist.synchronize()
model = ApexDDP(model, delay_allreduce=True)
DatasetClass = SaliencyConstrainedRandomCropping
train_dataset = DatasetClass(args.data, args.mask_dir, 'train2017', args.second_constraint, args.output_mask_region, args.output_mask_size, args.min_areacover)
val_dataset = DatasetClass(args.data, args.mask_dir, 'val2017', args.second_constraint, args.output_mask_region, args.output_mask_size, args.min_areacover)
if dist.get_world_size() > 1:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
else:
train_sampler = torch.utils.data.sampler.SequentialSampler(train_dataset)
val_sampler = torch.utils.data.sampler.SequentialSampler(val_dataset)
# create train and val dataloaders
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
num_workers=args.workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True,
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size,
shuffle=(val_sampler is None),
num_workers=args.workers,
pin_memory=True,
sampler=val_sampler,
drop_last=True,
)
# fmt: off
if dist.get_world_size() > 1:
candidate_layers_q = ["module.encoder_q." + args.layer_name]
else:
candidate_layers_q = ["encoder_q." + args.layer_name]
model.train()
# define instance of gradcam applied on query
gcam_q = GradCAM(model=model, candidate_layers=candidate_layers_q)
# define instance of ChecckpointManager to save checkpoints after every epoch
checkpoint_manager = CheckpointManager(
serialization_dir=args.serialization_dir,
filename_prefix=f"checkpoint_{args.arch}",
state_dict=gcam_q.model,
optimizer=optimizer,
)
# start training
for epoch in range(args.start_epoch, args.epochs):
if dist.get_world_size() > 1:
train_sampler.set_epoch(epoch)
# at the start of every epoch, adjust the learning rate
lr = adjust_learning_rate(optimizer, epoch, args)
logger.info("Current learning rate is {}".format(lr))
# train for one epoch
CAST(
train_loader, gcam_q, model, criterion, criterion_gcam,
optimizer, tensorboard_writer, epoch, device, args,
)
if dist.is_master_process():
checkpoint_manager.step(epoch=epoch + 1)
# fmt: off
def train(
train_loader, model, criterion, optimizer,
tensorboard_writer, epoch, device, args
):
# fmt: on
batch_time_meter = AverageMeter("Time", ":6.3f")
data_time_meter = AverageMeter("Data", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(train_loader),
[batch_time_meter, data_time_meter, losses, top1, top5],
prefix=f"Epoch: [{epoch}]",
)
# switch to train mode
model.train()
start_time = time.perf_counter()
for i, (images, _) in enumerate(train_loader):
data_time = time.perf_counter() - start_time
images[0] = images[0].to(device, non_blocking=True)
images[1] = images[1].to(device, non_blocking=True)
# compute output
output, target = model(im_q=images[0], im_k=images[1])
loss = criterion(output, target)
# acc1/acc5 are (K+1)-way contrast classifier accuracy
acc1, acc5 = accuracy(output, target, topk=(1, 5))
# compute gradient and do SGD step
optimizer.zero_grad()
# Perform dynamic scaling of loss to adjust for mixed precision.
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
# measure elapsed time
batch_time = time.perf_counter() - start_time
# update all progress meters
data_time_meter.update(data_time)
batch_time_meter.update(batch_time)
losses.update(loss.item(), images[0].size(0))
top1.update(acc1[0], images[0].size(0))
top5.update(acc5[0], images[0].size(0))
if dist.is_master_process():
tensorboard_writer.add_scalars(
"pretrain",
{
"data_time": data_time,
"batch_time": batch_time,
"loss": loss,
"acc1": acc1,
"acc5": acc5,
},
epoch * len(train_loader) + i,
)
if i % args.print_freq == 0:
progress.display(i)
start_time = time.perf_counter()
# fmt: off
def CAST(
train_loader, gcam_q, model, criterion, criterion_gcam, optimizer,
tensorboard_writer, epoch, device, args
):
# fmt: on
# define progress meters for measuring time, losses and accuracies
batch_time_meter = AverageMeter("Time", ":6.3f")
data_time_meter = AverageMeter("Data", ":6.3f")
losses_total = AverageMeter("Loss_total", ":.4e")
losses_ssl = AverageMeter("Loss_ssl", ":.4e")
losses_gcam_masked = AverageMeter("Loss_gcam_masked", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(train_loader),
[batch_time_meter, data_time_meter, losses_total, losses_ssl, losses_gcam_masked, top1, top5],
prefix=f"Epoch: [{epoch}]",
)
if dist.get_world_size() > 1:
target_layer_q = "module.encoder_q." + args.layer_name
else:
# single gpu
target_layer_q = "encoder_q." + args.layer_name
start_time = time.perf_counter()
for i, (images, paths, query_masks, masked_keys) in enumerate(train_loader):
data_time = time.perf_counter() - start_time
images[0] = images[0].to(device, non_blocking=True)
images[1] = images[1].to(device, non_blocking=True)
key_masked = masked_keys.to(device, non_blocking=True)
query_masks | |
Query Result
@annotate
def sqlite3_column_decltype(pStmt: sqlite3_stmt_p, n: c_int) -> c_char_p:
pass
# Evaluate An SQL Statement
@annotate
def sqlite3_step(pStmt: sqlite3_stmt_p) -> c_int:
pass
# Number of columns in a result set
@annotate
def sqlite3_data_count(pStmt: sqlite3_stmt_p) -> c_int:
pass
# Fundamental Datatypes
SQLITE_INTEGER = 1
SQLITE_FLOAT = 2
SQLITE_TEXT = 3
SQLITE_BLOB = 4
SQLITE_NULL = 5
# Result Values From A Query
@annotate
def sqlite3_column_blob(pStmt: sqlite3_stmt_p, iCol: c_int) -> c_void_p:
pass
@annotate
def sqlite3_column_double(pStmt: sqlite3_stmt_p, iCol: c_int) -> c_double:
pass
# @annotate
# def sqlite3_column_int(pStmt: sqlite3_stmt_p, iCol: c_int) -> c_int:
# pass
@annotate
def sqlite3_column_int64(pStmt: sqlite3_stmt_p, iCol: c_int) -> sqlite3_int64:
pass
@annotate
def sqlite3_column_text(pStmt: sqlite3_stmt_p, iCol: c_int) -> c_void_p:
pass
@annotate
def sqlite3_column_value(pStmt: sqlite3_stmt_p, iCol: c_int) -> sqlite3_value_p:
pass
@annotate
def sqlite3_column_bytes(pStmt: sqlite3_stmt_p, iCol: c_int) -> c_int:
pass
@annotate
def sqlite3_column_type(pStmt: sqlite3_stmt_p, iCol: c_int) -> c_int:
pass
# Destroy A Prepared Statement Object
@annotate
def sqlite3_finalize(pStmt: sqlite3_stmt_p) -> c_int:
pass
# Reset A Prepared Statement Object
@annotate
def sqlite3_reset(pStmt: sqlite3_stmt_p) -> c_int:
pass
# Create Or Redefine SQL Functions
@annotate
def sqlite3_create_function_v2(
db: c_void_p,
zFunctionName: c_char_p,
nArg: c_int,
eTextRep: c_int,
pApp: c_void_p,
xFunc: c_void_p,
xStep: c_void_p,
xFinal: c_void_p,
xDestroy: sqlite3_destructor_type) -> c_int:
pass
# @annotate
# def sqlite3_create_window_function(
# db: c_void_p,
# zFunctionName: c_char_p,
# nArg: c_int,
# eTextRep: c_int,
# pApp: c_void_p,
# xFunc: c_void_p,
# xStep: c_void_p,
# xFinal: c_void_p,
# xInverse: c_void_p,
# xDestroy: sqlite3_destructor_type) -> c_int:
# pass
# Text Encodings
SQLITE_UTF8 = 1 # IMP: R-37514-35566
SQLITE_UTF16LE = 2 # IMP: R-03371-37637
SQLITE_UTF16BE = 3 # IMP: R-51971-34154
SQLITE_UTF16 = 4 # Use native byte order
SQLITE_ANY = 5 # Deprecated
SQLITE_UTF16_ALIGNED = 8 # sqlite3_create_collation only
# Function Flags
SQLITE_DETERMINISTIC = 0x000000800
SQLITE_DIRECTONLY = 0x000080000
SQLITE_SUBTYPE = 0x000100000
# Obtaining SQL Values
@annotate
def sqlite3_value_blob(value: sqlite3_value_p) -> c_void_p:
pass
@annotate
def sqlite3_value_double(value: sqlite3_value_p) -> c_double:
pass
@annotate
def sqlite3_value_int(value: sqlite3_value_p) -> c_int:
pass
@annotate
def sqlite3_value_int64(value: sqlite3_value_p) -> sqlite3_int64:
pass
# @annotate
# def sqlite3_value_pointer(value: sqlite3_value_p, type: c_char_p) -> c_void_p:
# pass
@annotate
def sqlite3_value_text(value: sqlite3_value_p) -> c_void_p:
pass
@annotate
def sqlite3_value_bytes(value: sqlite3_value_p) -> c_int:
pass
@annotate
def sqlite3_value_type(value: sqlite3_value_p) -> c_int:
pass
@annotate
def sqlite3_value_numeric_type(value: sqlite3_value_p) -> c_int:
pass
# @annotate
# def sqlite3_value_nochange(value: sqlite3_value_p) -> c_int:
# pass
# Finding The Subtype Of SQL Values
# @annotate
# def sqlite3_value_subtype(value: sqlite3_value_p) -> c_uint:
# pass
# Copy And Free SQL Values
@annotate
def sqlite3_value_dup(value: sqlite3_value_p) -> sqlite3_value_p:
pass
@annotate
def sqlite3_value_free(value: sqlite3_value_p):
pass
# Obtain Aggregate Function Context
@annotate
def sqlite3_aggregate_context(context: sqlite3_context_p, nBytes: c_int) -> c_void_p:
pass
# User Data For Functions
@annotate
def sqlite3_user_data(context: sqlite3_context_p) -> c_void_p:
pass
# Database Connection For Functions
@annotate
def sqlite3_context_db_handle(context: sqlite3_context_p) -> sqlite3_p:
pass
# Function Auxiliary Data
@annotate
def sqlite3_get_auxdata(context: sqlite3_context_p, N: c_int) -> c_void_p:
pass
@annotate
def sqlite3_set_auxdata(context: sqlite3_context_p, N: c_int, data: c_void_p, destructor: sqlite3_destructor_type):
pass
# Setting The Result Of An SQL Function
@annotate
def sqlite3_result_blob64(context: sqlite3_context_p, value: c_void_p, nBytes: sqlite3_uint64, destructor: sqlite3_destructor_type):
pass
@annotate
def sqlite3_result_double(context: sqlite3_context_p, value: c_double):
pass
@annotate
def sqlite3_result_error(context: sqlite3_context_p, message: c_char_p, nBytes: c_int):
pass
@annotate
def sqlite3_result_error_toobig(context: sqlite3_context_p):
pass
@annotate
def sqlite3_result_error_nomem(context: sqlite3_context_p):
pass
@annotate
def sqlite3_result_error_code(context: sqlite3_context_p, error: c_int):
pass
@annotate
def sqlite3_result_int(context: sqlite3_context_p, value: c_int):
pass
@annotate
def sqlite3_result_int64(context: sqlite3_context_p, value: sqlite3_int64):
pass
@annotate
def sqlite3_result_null(context: sqlite3_context_p):
pass
@annotate
def sqlite3_result_text64(context: sqlite3_context_p, value: c_char_p, nBytes: sqlite3_uint64, destructor: sqlite3_destructor_type, encoding: c_ubyte):
pass
@annotate
def sqlite3_result_value(context: sqlite3_context_p, value: sqlite3_value_p):
pass
# @annotate
# def sqlite3_result_pointer(context: sqlite3_context_p, value: c_void_p, type: c_char_p, destructor: sqlite3_destructor_type):
# pass
@annotate
def sqlite3_result_zeroblob64(context: sqlite3_context_p, n: sqlite3_uint64) -> c_int:
pass
# Setting The Subtype Of An SQL Function
# @annotate
# def sqlite3_result_subtype(context: sqlite3_context_p, subtype: c_uint):
# pass
# Define New Collating Sequences
@annotate
def sqlite3_create_collation_v2(
db: sqlite3_p,
zName: c_char_p,
eTextRep: c_int,
pArg: c_void_p,
xCompare: c_void_p,
xDestroy: sqlite3_destructor_type) -> c_int:
pass
# Collation Needed Callbacks
@annotate
def sqlite3_collation_needed(db: sqlite3_p, pArg: c_void_p, callback: c_void_p) -> c_int:
pass
# Suspend Execution For A Short Time
@annotate
def sqlite3_sleep(ms: c_int) -> c_int:
pass
# Test For Auto-Commit Mode
@annotate
def sqlite3_get_autocommit(db: sqlite3_p) -> c_int:
pass
# Find The Database Handle Of A Prepared Statement
@annotate
def sqlite3_db_handle(pStmt: sqlite3_stmt_p) -> sqlite3_p:
pass
# Return The Filename For A Database Connection
@annotate
def sqlite3_db_filename(db: sqlite3_p, zDbName: c_char_p) -> c_char_p:
pass
# Determine if a database is read-only
@annotate
def sqlite3_db_readonly(db: sqlite3_p, zDbName: c_char_p) -> c_int:
pass
# Find the next prepared statement
@annotate
def sqlite3_next_stmt(db: sqlite3_p, pStmt: sqlite3_stmt_p) -> sqlite3_stmt_p:
pass
# Commit And Rollback Notification Callbacks
@annotate
def sqlite3_commit_hook(db: sqlite3_p, callback: c_void_p, pArg: c_void_p) -> c_void_p:
pass
@annotate
def sqlite3_rollback_hook(db: sqlite3_p, callback: c_void_p) -> c_void_p:
pass
# Data Change Notification Callbacks
@annotate
def sqlite3_update_hook(db: sqlite3_p, callback: c_void_p, pArg: c_void_p) -> c_void_p:
pass
# Enable Or Disable Shared Pager Cache
@annotate
def sqlite3_enable_shared_cache(onoff: c_int):
pass
# Attempt To Free Heap Memory
@annotate
def sqlite3_release_memory(n: c_int) -> c_int:
pass
# Free Memory Used By A Database Connection
@annotate
def sqlite3_db_release_memory(db: sqlite3_p) -> c_int:
pass
# Impose A Limit On Heap Size
@annotate
def sqlite3_soft_heap_limit64(n: sqlite3_int64) -> sqlite3_int64:
pass
# Extract Metadata About A Column Of A Table
@annotate
def sqlite3_table_column_metadata(
db: sqlite3_p, # Connection handle
zDbName: c_char_p, # Database name or NULL
zTableName: c_char_p, # Table name
zColumnName: c_char_p, # Column name
pzDataType: POINTER(c_char_p), # OUTPUT: Declared data type
pzCollSeq: POINTER(c_char_p), # OUTPUT: Collation sequence name
pNotNull: POINTER(c_int), # OUTPUT: True if NOT NULL constraint exists
pPrimaryKey: POINTER(c_int), # OUTPUT: True if column part of PK
pAutoinc: POINTER(c_int) # OUTPUT: True if column is auto-increment
) -> c_int:
pass
# Load An Extension
@annotate
def sqlite3_load_extension(
db: sqlite3_p, # Load the extension into this database connection
zFile: c_char_p, # Name of the shared library containing extension
zProc: c_char_p, # Entry point. Derived from zFile if 0
pzErrMsg: POINTER(c_char_p) # Put error message here if not 0
) -> c_int:
pass
# Enable Or Disable Extension Loading
@annotate
def sqlite3_enable_load_extension(db: sqlite3_p, onoff: c_int) -> c_int:
pass
# Automatically Load Statically Linked Extensions
@annotate
def sqlite3_auto_extension(xEntryPoint: c_void_p) -> c_int:
pass
# Cancel Automatic Extension Loading
@annotate
def sqlite3_cancel_auto_extension(xEntryPoint: c_void_p) -> c_int:
pass
# Reset Automatic Extension Loading
@annotate
def sqlite3_reset_auto_extension():
pass
# Structures used by the virtual table interface
class sqlite3_vtab(Structure):
pass
class sqlite3_index_info(Structure):
pass
class sqlite3_vtab_cursor(Structure):
pass
class sqlite3_module(Structure):
pass
# Virtual Table Object
sqlite3_module._fields_ = [
("iVersion", c_int),
("xCreate", c_void_p),
("xConnect", c_void_p),
("xBestIndex", c_void_p),
("xDisconnect", c_void_p),
("xDestroy", c_void_p),
("xOpen", c_void_p),
("xClose", c_void_p),
("xFliter", c_void_p),
("xNext", c_void_p),
("xEof", c_void_p),
("xColumn", c_void_p),
("xRowid", c_void_p),
("xUpdate", c_void_p),
("xBegin", c_void_p),
("xSync", c_void_p),
("xCommit", c_void_p),
("xRollback", c_void_p),
("xFindFunction", c_void_p),
("xRename", c_void_p),
# The methods above are in version 1 of the sqlite_module object. Those
# below are for version 2 and greater.
("xSavepoint", c_void_p),
("xRelease", c_void_p),
("xRollbackTo", c_void_p),
# The methods above are in versions 1 and 2 of the sqlite_module object.
# Those below are for version 3 and greater.
("xShadowName", c_void_p),
]
# Virtual Table Indexing Information
class sqlite3_index_constraint(Structure):
_fields_ = [
("iColumn", c_int), # Column constrained. -1 for ROWID
("op", c_ubyte), # Constraint operator
("usable", c_ubyte), # True if this constraint is usable
("iTermOffset", c_int), # Used internally - xBestIndex should ignore
]
class sqlite3_index_orderby(Structure):
_fields_ = [
("iColumn", c_int), # Column number
("desc", c_ubyte), # True for DESC. False for ASC
]
class sqlite3_index_constraint_usage(Structure):
_fields_ = [
("argvIndex", c_int), # if >0, constraint is part of argv to xFilter
("omit", c_ubyte), # Do not code a test for this constraint
]
sqlite3_index_info._fields_ = [
# Inputs
("nConstraint", c_int), # Number of entries in aConstraint
("aConstraint", POINTER(sqlite3_index_constraint)), # Table of WHERE clause constraints
("nOrderBy", c_int), # Number of terms in the ORDER BY clause
("aOrderBy", POINTER(sqlite3_index_orderby)), # The ORDER BY clause
# Outputs
("aConstraintUsage", POINTER(sqlite3_index_constraint_usage)),
("idxNum", c_int), # Number used to identify the index
("idxStr", c_char_p), # String, possibly obtained from sqlite3_malloc
("needToFreeIdxStr", c_int), # Free idxStr using sqlite3_free() if true
("orderByConsumed", c_int), # True if output is already ordered
("estimatedCost", c_double), # Estimated cost of using this index
# Fields below are only available in SQLite 3.8.2 and later
("estimatedRows", sqlite3_int64), # Estimated number of rows returned
# Fields below are only available in SQLite 3.9.0 and later
("idxFlags", c_int), # Mask of SQLITE_INDEX_SCAN_* flags
# Fields below are only available in SQLite 3.10.0 and later
("colUsed", sqlite3_uint64), # Input: Mask of columns used by statement
]
# Virtual Table Scan Flags
SQLITE_INDEX_SCAN_UNIQUE = 1 # Scan visits at most 1 row
# Virtual Table Constraint Operator Codes
SQLITE_INDEX_CONSTRAINT_EQ = 2
SQLITE_INDEX_CONSTRAINT_GT = 4
SQLITE_INDEX_CONSTRAINT_LE = 8
SQLITE_INDEX_CONSTRAINT_LT = 16
SQLITE_INDEX_CONSTRAINT_GE = 32
SQLITE_INDEX_CONSTRAINT_MATCH = 64
SQLITE_INDEX_CONSTRAINT_LIKE = 65
SQLITE_INDEX_CONSTRAINT_GLOB = 66
SQLITE_INDEX_CONSTRAINT_REGEXP = 67
SQLITE_INDEX_CONSTRAINT_NE = 68
SQLITE_INDEX_CONSTRAINT_ISNOT = 69
SQLITE_INDEX_CONSTRAINT_ISNOTNULL = 70
SQLITE_INDEX_CONSTRAINT_ISNULL = 71
SQLITE_INDEX_CONSTRAINT_IS = 72
SQLITE_INDEX_CONSTRAINT_FUNCTION = 150
# Register A Virtual Table Implementation
@annotate
def sqlite3_create_module(
db: sqlite3_p, # SQLite connection to register module with
zName: c_char_p, # Name of the module
p: POINTER(sqlite3_module), # Methods for the module
pClientData: c_void_p # Client data for xCreate/xConnect
) -> c_int:
pass
@annotate
def sqlite3_create_module_v2(
db: sqlite3_p, # SQLite connection to register module with
zName: c_char_p, # Name of the module
p: POINTER(sqlite3_module), # Methods for the module
pClientData: c_void_p, # Client data for xCreate/xConnect
xDestroy: sqlite3_destructor_type # Module destructor function
) -> c_int:
pass
# Remove Unnecessary Virtual Table Implementations
def sqlite3_drop_modules(
db: sqlite3_p, # Remove modules from this connection
azKeep: c_char_p # | |
i + 2
break
elif i == len(mms_s_dist) - 1:
k = 4
else:
k = 2
# k Confirmation : Comparing k.cluster_centers_ dist #
while True:
km = KMeans(n_clusters=k)
km = km.fit(inter_data)
if k <= 2:
break
cluster_centroids = km.cluster_centers_[:, [1]]
# print('cluster_centroids :', cluster_centroids)
error_exist = False
for i in range(len(cluster_centroids) - 1):
for j in range(i + 1, len(cluster_centroids)):
if abs(cluster_centroids[i] - cluster_centroids[j]) < 0.05:
error_exist = True
if error_exist:
k -= 1
else:
break
print('k is ', k)
predict_inter = km.predict(inter_data)
print(predict_inter)
keys = list(range(k))
for rm in predict_inter[-2:]:
keys.remove(rm)
print('keys :', keys)
# 해당 키 안에서의 closest intersection 두쌍의 centroid를 구하면 된다. #
centroid_inters = list()
closest_inters = list()
temp_black = black_plane2.copy()
for key in keys:
temp_inter_left = list()
temp_inter_right = list()
for pred_key, inter_point in zip(predict_inter[:-2], intersections_[:-2]):
if key == pred_key:
temp_inter_left.append(inter_point[:2])
temp_inter_right.append(inter_point[2:])
# else:
# cv2.circle(black_plane2, (int(inter_point[0]), int(inter_point[1])), 5, (255, 0, 255), -1)
# plt.imshow(temp_black)
# plt.show()
# print('len(temp_inter_left) :', len(temp_inter_left))
# print('len(temp_inter_right) :', len(temp_inter_right))
min_dist = close_thr
closest_p = None
closest_inter = None
for ix, iy in temp_inter_left:
for jx, jy in temp_inter_right:
dist = math.hypot(jx - ix, jy - iy)
# print(dist)
if dist < min_dist:
min_dist = dist
closest_p = ((ix + jx) / 2, (iy + jy) / 2)
closest_inter = [ix, iy, jx, jy]
# closest_inter = (ix, iy, jx, jy)
# print('min_dist :', min_dist)
if closest_p:
centroid_inters.append(closest_p)
closest_inters.append(closest_inter)
# cv2.circle(black_plane, (int(closest_p[0]), int(closest_p[1])), 5, (0, 255, 0), -1)
# 여기서 append하는 closeest_inters는 항상 2개를 유지해야한다. #
# 단, intersection이 1개이면 나중에 middle section refering 혼란을 방지하기 위해 좀 다른 형식으로 #
# border intersection을 숨겨서 보내준다. #
# intersection이 존재하지 않는 vline은 없앤다. #
if len(closest_inters) != 0:
if len(closest_inters) == 1:
# check ceil / floor type inter #
# ceil condition #
if (closest_inters[0][1] + closest_inters[0][3]) / 2 < (1 / 3) * org_np.shape[0]:
opposite_inters = r_lower
else:
opposite_inters = r
closest_inters = [closest_inters[0] + list(opposite_inters)]
# print('closest_inters :', closest_inters)
all_centroid_inters.append(centroid_inters)
all_closest_inters.append(closest_inters)
h_intersections = list()
v_border = line((0, 0), (0, skl_copy.shape[0]))
v_border_lower = line((skl_copy.shape[1], 0), (skl_copy.shape[1], skl_copy.shape[0]))
for h_line in h_lines:
# Find Intersection between v_line, h_line #
vh_intersections_x = list()
vh_intersections_y = list()
hline = line(h_line[:2], h_line[2:])
for reg_x, reg_y in zip(reg_xs, reg_ys):
vline = line((reg_x[0], reg_y[0]), (reg_x[1], reg_y[1]))
# Extract only x - coordination #
vh_intersections_x.append(intersection2(vline, hline)[0])
vh_intersections_y.append(intersection2(vline, hline)[1])
# h_x = np.array([h_line[0], h_line[2]])
# h_y = np.array([h_line[1], h_line[3]])
# ex_h_x, ex_h_y = extended(h_x, h_y, 500)
# ex_h_line = line((int(h_x[0]), int(h_y[0])), (int(h_x[1]), int(h_y[1])))
r = intersection2(v_border, hline)
r_lower = intersection2(v_border_lower, hline)
vh_intersections_x.append(r[0])
vh_intersections_y.append(r[1])
vh_intersections_x.append(r_lower[0])
vh_intersections_y.append(r_lower[1])
sorted_vh_inter_x = sorted(vh_intersections_x)
# print('vh_intersections_x :', vh_intersections_x)
# print('sorted_vh_inter_x :', sorted_vh_inter_x)
center_h_x = (h_line[0] + h_line[2]) / 2
# print('center_h_x :', center_h_x)
# hline 상의 교차점을 찾아내서 범위 내에서 연결한다. #
for i in range(1, len(sorted_vh_inter_x)):
if sorted_vh_inter_x[i - 1] <= center_h_x <= sorted_vh_inter_x[i]:
lx, ly = sorted_vh_inter_x[i - 1], vh_intersections_y[vh_intersections_x.index(sorted_vh_inter_x[i - 1])]
rx, ry = sorted_vh_inter_x[i], vh_intersections_y[vh_intersections_x.index(sorted_vh_inter_x[i])]
# print('lx, ly, rx, ry :', lx, ly, rx, ry)
# 이곳의 lx, ly, rx, ry 는 close_p 다. 좌우로 나뉘어진 lx, ly가 아니다. #
# 정제된 교차점을 만드는 hline만 사용해야한다. #
for inters in all_closest_inters:
for inter in inters:
if lx in inter or rx in inter:
h_intersections.append((lx, ly, rx, ry))
cv2.line(org_np2, (int(lx), int(ly)), (int(rx), int(ry)), (0, 0, 255), 1, cv2.LINE_AA)
plt.imshow(org_np2)
plt.show()
# vline 기준으로 구획해야한다. -> 가장 왼쪽 / 오른쪽 vline + 구역 먼저 찾아 작업하기 #
# vline 별로 구획 나누기 #
h_intersections = list(set(h_intersections))
print('np.array(all_closest_inters).shape :', np.array(all_closest_inters).shape)
all_closest_inters = np.array(all_closest_inters)
print('len(all_closest_inters) :', len(all_closest_inters))
center_xs = list()
for inters in all_closest_inters:
# all_closest_inter가 정렬되지 않았다면 inters type = list() #
sum_x = 0
for inter in inters:
sum_x += inter[0] + inter[2]
center_xs.append(sum_x / len(inters) * 2)
sorted_center_xs = sorted(center_xs)
sorted_index = list()
for center_x in sorted_center_xs:
# print('center_x :', center_x)
sorted_index.append(center_xs.index(center_x))
# print('sorted_index :', sorted_index)
# sorted_center_xs 순서로 all_closest_inter를 정렬한다. #
# index list를 추출해서 for 문으로 all_closest_inter의 inters를 추출해
# sorted_all ... 에 append 시킨다. #
sorted_all_closest_inters = list()
sorted_all_centroid_inters = list()
for s_index in sorted_index:
sorted_all_closest_inters.append(all_closest_inters[s_index])
sorted_all_centroid_inters.append(all_centroid_inters[s_index])
all_closest_inters = sorted_all_closest_inters
all_centroid_inters = sorted_all_centroid_inters
four_inters_list = list()
for inters_i, inters in enumerate(all_closest_inters):
# print('inters :', inters)
inter_x = np.array(inters)[:, [0, 2]]
inter_y = np.array(inters)[:, [1, 3]]
# vline 별로 양옆으로 작업을 하면 len(vline) = 1의 작업을 반복할 필요가 없어진다. #
iter = False
while True:
four_inters = list()
find_pair = True
centroid_inters = all_centroid_inters[inters_i]
if not iter:
# vline 우측 session #
# vline 우편 좌표 #
final_xs = inter_x[:, [1]].reshape(-1, )
final_ys = inter_y[:, [1]].reshape(-1, )
# print(final_xs)
# four_inters.append([final_xs[0], final_ys[0]])
four_inters.append(centroid_inters[0])
# print(four_inters)
# intersection이 1개이고 오른쪽 끝 vline이면, border intersection을 추가해준다. #
# 오른쪽 끝이 아니고 현재 vline의 교차점과 다음 vline의 교차점이같은 위치에 없으면 평행 copy, #
# 현재 교차점이 없고 다음 교차점도 없으면 border intersection #
# 둘다 있으면 추가 #
# border inter parallel copy #
if len(inters[0]) == 6:
print('len(inter[0]) == 6')
# 오른쪽 끝 vline 이면 #
if inters_i == len(all_closest_inters) - 1:
print('inters_i == len(all_closest_inters) - 1')
four_inters.append(inters[0][-2:])
four_inters.append([org_np.shape[1], inters[0][-1]])
else:
find_pair = False
next_inters = np.array(all_closest_inters[inters_i + 1])
next_centroid_inters = np.array(all_centroid_inters[inters_i + 1])
print(next_centroid_inters)
four_inters.append(next_centroid_inters[0])
if len(next_inters) == 2:
print('len(next_inters) == 2')
# 없는 부분 평행 copy #
four_inters.append(next_centroid_inters[1])
# 1. 없는 부분이 어디인지 확인해야한다. #
# 2. copy할 부분의 인덱스 번호를 확인해야한다. #
if inters[0][-1] == 0: # -> 천장 부분 교차점이 없다.
print(type(next_inters))
if np.mean(next_inters[[0], [1, 3]]) < np.mean(next_inters[[1], [1, 3]]):
y_in = np.mean(next_inters[[0], [1, 3]])
else:
y_in = np.mean(next_inters[[1], [1, 3]])
else: # -> 바닥 교차점이 없다.
if np.mean(next_inters[[0], [1, 3]]) < np.mean(next_inters[[1], [1, 3]]):
y_in = np.mean(next_inters[[1], [1, 3]])
else:
y_in = np.mean(next_inters[[0], [1, 3]])
x = (inters[0][0], inters[0][2])
y = (inters[0][1], inters[0][3])
p = np.polyfit(y, x , deg=1)
x_out = np.poly1d(p)(y_in)
four_inters.append([x_out, y_in])
else:
if inters[0][-1] == next_inters[0][-1]:
print('inters[0][-1] == next_inters[0][-1]')
# 없는 부분 border intersection #
four_inters.append(inters[0][-2:])
four_inters.append(next_inters[0][-2:])
else: # 다른 위치
# 없는 부분 평행 copy #
x = (inters[0][0], inters[0][2])
y = (inters[0][1], inters[0][3])
p = np.polyfit(y, x , deg=1)
y_in = np.mean(next_inters[0, [1, 3]])
x_out = np.poly1d(p)(y_in)
four_inters.append([x_out, y_in])
x = (next_inters[0][0], next_inters[0][2])
y = (next_inters[0][1], next_inters[0][3])
p = np.polyfit(y, x , deg=1)
y_in = np.mean(inters[0, [1, 3]])
x_out = np.poly1d(p)(y_in)
four_inters.append([x_out, y_in])
# len vline inters = 2 #
else:
# four_inters.append([final_xs[1], final_ys[1]])
four_inters.append(centroid_inters[1])
# 오른쪽 끝 vline 이면 #
if inters_i == len(all_closest_inters) - 1:
print('inters_i == len(all_closest_inters) - 1')
else:
find_pair = False
inters = np.array(inters)
next_inters = np.array(all_closest_inters[inters_i + 1])
next_centroid_inters = np.array(all_centroid_inters[inters_i + 1])
four_inters.append(next_centroid_inters[0])
if len(next_inters) == 2:
print('len(next_inters) == 2')
four_inters.append(next_centroid_inters[1])
else:
# 1. 없는 부분이 어디인지 확인해야한다. #
# 2. copy할 부분의 인덱스 번호를 확인해야한다. #
if next_inters[0][-1] == 0: # -> 천장 부분 교차점이 없다.
print('next_inters[0] :', next_inters[0])
print('type(next_inters) :', type(next_inters))
inters = np.array(inters)
if np.mean(inters[[0], [1, 3]]) < np.mean(inters[[1], [1, 3]]):
y_in = np.mean(inters[[0], [1, 3]])
else:
y_in = np.mean(inters[[1], [1, 3]])
else: # -> 바닥 교차점이 없다.
if np.mean(inters[0, [1, 3]]) < np.mean(inters[[[1]], [1, 3]]):
y_in = np.mean(inters[[1], [1, 3]])
else:
y_in = np.mean(inters[[0], [1, 3]])
x = (next_inters[0][0], next_inters[0][2])
y = (next_inters[0][1], next_inters[0][3])
p = np.polyfit(y, x , deg=1)
x_out = np.poly1d(p)(y_in)
four_inters.append([x_out, y_in])
# i = 0 에 한해서만 왼쪽으로도 refering 진행, 나머지는 오른쪽으로만 #
else:
# 한 vline에 대해 분포하는 모든 intersection에 대한 pair는 찾아주어야 한다. #
# vline 좌편 좌표 #
final_xs = inter_x[:, [0]].reshape(-1, )
final_ys = inter_y[:, [0]].reshape(-1, )
# print(final_xs)
four_inters.append(centroid_inters[0])
# print(four_inters)
# intersection이 1개이면, border intersection을 추가해준다. #
# border inter parallel copy #
if len(inters[0]) == 6:
print('inters[0][-2:] :', inters[0][-2:])
four_inters.append(inters[0][-2:])
four_inters.append([0, inters[0][-1]])
else:
four_inters.append(centroid_inters[1])
# print(four_inters)
print('four_inters :', four_inters)
print('h_intersections :', h_intersections)
# Find intersection pairs by h_intersections #
if find_pair:
# 맨 좌우 vline일 경우에만 해당하는데 => | |
[])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_additional_info_and_headers(self):
env = self._log_request_build()
env['brim.log_info'] = ['test:', 'one', 'two']
env['HTTP_CONTENT_TYPE'] = 'text/plain'
ss = self._log_request_execute(env, log_headers=True)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path HTTP/1.1 200 10 - - - abcdef '
'2.12000 - - - test: one two headers: Content-Type:text/plain',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_capture_exception(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss.worker_id = 123
ss._capture_exception(*exc_info())
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(
ss.logger.error_calls,
[("UNCAUGHT EXCEPTION: wid:123 None ['None']",)])
self.assertEqual(ss.logger.exception_calls, [])
ss.logger = FakeLogger()
try:
raise Exception('test')
except Exception:
ss._capture_exception(*exc_info())
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(len(ss.logger.error_calls), 1)
self.assertEqual(len(ss.logger.error_calls[0]), 1)
e = ss.logger.error_calls[0][0]
self.assertTrue(e.startswith(
"UNCAUGHT EXCEPTION: wid:123 Exception: test ['Traceback (most "
"recent call last):', ' File "))
self.assertTrue(e.endswith(
'\', " raise Exception(\'test\')", \'Exception: test\']'))
self.assertEqual(ss.logger.exception_calls, [])
def test_capture_stdout(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss.worker_id = 123
ss._capture_stdout('one\ntwo three\nfour\n')
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [
('STDOUT: wid:123 one',), ('STDOUT: wid:123 two three',),
('STDOUT: wid:123 four',)])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
def test_capture_stderr(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss.worker_id = 123
ss._capture_stderr('one\ntwo three\nfour\n')
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(ss.logger.error_calls, [
('STDERR: wid:123 one',), ('STDERR: wid:123 two three',),
('STDERR: wid:123 four',)])
self.assertEqual(ss.logger.exception_calls, [])
def test_clone_env(self):
ss = self._class(FakeServer(output=True), 'test')
newenv = ss.clone_env({
'brim': 1,
'brim.json_dumps': 2,
'brim.json_loads': 3,
'brim.logger': 4,
'brim.stats': 5,
'brim.txn': 6,
'SERVER_NAME': 7,
'SERVER_PORT': 8,
'SERVER_PROTOCOL': 9,
'OTHER': 10,
'REQUEST_PATH': 'request_path'})
self.assertEqual(newenv.get('brim'), 1)
self.assertEqual(newenv.get('brim.json_dumps'), 2)
self.assertEqual(newenv.get('brim.json_loads'), 3)
self.assertEqual(newenv.get('brim.logger'), 4)
self.assertEqual(newenv.get('brim.stats'), 5)
self.assertEqual(newenv.get('brim.txn'), 6)
self.assertEqual(newenv.get('SERVER_NAME'), 7)
self.assertEqual(newenv.get('SERVER_PORT'), 8)
self.assertEqual(newenv.get('SERVER_PROTOCOL'), 9)
self.assertEqual(newenv.get('OTHER'), None)
self.assertEqual(newenv.get('HTTP_REFERER'), 'request_path')
self.assertEqual(newenv.get('HTTP_USER_AGENT'), 'clone_env')
self.assertEquals(len(newenv), 11)
def test_get_response(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss._parse_conf(Conf({}))
ss.worker_id = 0
ss.bucket_stats = server._BucketStats(['test'], {
'request_count': 'sum', 'status_2xx_count': 'sum',
'status_200_count': 'sum', 'status_201_count': 'sum',
'status_3xx_count': 'sum', 'status_4xx_count': 'sum',
'status_5xx_count': 'sum'})
ss.first_app = ss
status_line, headers_iteritems, excinfo, content_iter = \
ss.get_response({
'REQUEST_METHOD': 'GET',
'PATH_INFO': '/test',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.input': StringIO('test value')})
self.assertEqual(status_line, '404 Not Found')
self.assertEqual(dict(headers_iteritems), {
'Content-Length': '14', 'Content-Type': 'text/plain'})
self.assertEqual(excinfo, None)
self.assertEqual(''.join(content_iter), '404 Not Found\n')
def fake_app(env, start_response):
start_response(
'204 No Content',
[('Content-Length', '0'), ('Content-Type', 'text/plain')])
return []
status_line, headers_iteritems, excinfo, content_iter = \
ss.get_response({
'REQUEST_METHOD': 'GET',
'PATH_INFO': '/test',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.input': StringIO('test value')}, next_app=fake_app)
self.assertEqual(status_line, '204 No Content')
self.assertEqual(dict(headers_iteritems), {
'Content-Length': '0', 'Content-Type': 'text/plain'})
self.assertEqual(excinfo, None)
self.assertEqual(''.join(content_iter), '')
def fake_app2(env, start_response):
start_response(
'200 OK',
[('Content-Length', '7'), ('Content-Type', 'text/plain')])
return ['200 OK']
status_line, headers_iteritems, excinfo, content_iter = \
ss.get_response({
'REQUEST_METHOD': 'GET',
'PATH_INFO': '/test',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.input': StringIO('test value')}, next_app=fake_app2)
self.assertEqual(status_line, '200 OK')
self.assertEqual(dict(headers_iteritems), {
'Content-Length': '7', 'Content-Type': 'text/plain'})
self.assertEqual(excinfo, None)
self.assertEqual(''.join(content_iter), '200 OK')
class TCPWithInvalidInit(object):
def __init__(self):
pass
class TCPWithInvalidCall(object):
def __init__(self, name, conf):
pass
def __call__(self):
pass
class TCPWithNoCall(object):
def __init__(self, name, conf):
pass
class TCPWithInvalidParseConf1(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, ip, port):
pass
@classmethod
def parse_conf(cls):
pass
class TCPWithInvalidParseConf2(object):
parse_conf = 'blah'
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, ip, port):
pass
class TCPWithNoParseConf(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, ip, port):
pass
class TCPWithParseConf(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, ip, port):
pass
@classmethod
def parse_conf(cls, name, conf):
return {'ok': True}
class TCPWithInvalidStatsConf1(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, ip, port):
pass
@classmethod
def stats_conf(cls):
pass
class TCPWithInvalidStatsConf2(object):
stats_conf = 'blah'
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, ip, port):
pass
class TCPWithNoStatsConf(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, ip, port):
pass
class TCPWithStatsConf(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, ip, port):
pass
@classmethod
def stats_conf(cls, name, conf):
return [('ok', 'sum')]
class TestTCPSubserver(TestIPSubserver):
_class = server.TCPSubserver
def _get_default_confd(self):
return {'test': {'call': 'brim.tcp_echo.TCPEcho'}}
def test_init(self):
ss = TestIPSubserver.test_init(self)
self.assertEqual(ss.stats_conf.get('connection_count'), 'sum')
def test_parse_conf_defaults(self):
ss = TestIPSubserver.test_parse_conf_defaults(self)
self.assertEqual(ss.handler.__name__, 'TCPEcho')
def test_parse_conf_no_call(self):
ss = self._class(FakeServer(), 'test')
conf = Conf({})
exc = None
try:
ss._parse_conf(conf)
except Exception as err:
exc = err
self.assertEqual(
str(exc), "[test] not configured with 'call' option.")
def test_parse_conf_invalid_call(self):
ss = self._class(FakeServer(), 'test')
conf = Conf({'test': {'call': 'invalid'}})
exc = None
try:
ss._parse_conf(conf)
except Exception as err:
exc = err
self.assertEqual(str(exc), "Invalid call value 'invalid' for [test].")
def test_configure_handler(self):
ss = self._class(FakeServer(), 'test')
conf = Conf(self._get_default_confd())
ss._parse_conf(conf)
self.assertEqual(ss.handler.__name__, 'TCPEcho')
self.assertEqual(ss.handler_conf, ss.handler.parse_conf('test', conf))
def test_configure_handler_no_call_option(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['cll'] = confd['test']['call']
del confd['test']['call']
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc), "[test] not configured with 'call' option.")
def test_configure_handler_invalid_call_option(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim_tcp_echo_TCPEcho'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc), "Invalid call value 'brim_tcp_echo_TCPEcho' for [test].")
def test_configure_handler_no_load(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.tcp_echo.cp_echo'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Could not load class 'brim.tcp_echo.cp_echo' for [test].")
def test_configure_handler_not_a_class(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.server._send_pid_sig'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to instantiate 'brim.server._send_pid_sig' for "
"[test]. Probably not a class.")
def test_configure_handler_invalid_init(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.TCPWithInvalidInit'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to instantiate "
"'brim.test.unit.test_server.TCPWithInvalidInit' for [test]. "
"Incorrect number of args, 1, should be 3 (self, name, "
"parsed_conf).")
def test_configure_handler_invalid_call(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.TCPWithInvalidCall'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to use "
"'brim.test.unit.test_server.TCPWithInvalidCall' for [test]. "
"Incorrect number of __call__ args, 1, should be 6 (self, "
"subserver, stats, sock, ip, port).")
def test_configure_handler_no_call(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.TCPWithNoCall'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to use "
"'brim.test.unit.test_server.TCPWithNoCall' for [test]. Probably "
"no __call__ method.")
def test_configure_handler_invalid_parse_conf1(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = \
'brim.test.unit.test_server.TCPWithInvalidParseConf1'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use 'brim.test.unit.test_server.TCPWithInvalidParseConf1' "
"for [test]. Incorrect number of parse_conf args, 1, should be 3 "
"(cls, name, conf).")
def test_configure_handler_invalid_parse_conf2(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = \
'brim.test.unit.test_server.TCPWithInvalidParseConf2'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use 'brim.test.unit.test_server.TCPWithInvalidParseConf2' "
"for [test]. parse_conf probably not a method.")
def test_configure_handler_no_parse_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.TCPWithNoParseConf'
conf = Conf(confd)
ss._parse_conf(conf)
self.assertEqual(ss.handler_conf, conf)
def test_configure_handler_with_parse_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.TCPWithParseConf'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.handler_conf, {'ok': True})
def test_configure_handler_invalid_stats_conf1(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = \
'brim.test.unit.test_server.TCPWithInvalidStatsConf1'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use 'brim.test.unit.test_server.TCPWithInvalidStatsConf1' "
"for [test]. Incorrect number of stats_conf args, 1, should be 3 "
"(cls, name, conf).")
def test_configure_handler_invalid_stats_conf2(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = \
'brim.test.unit.test_server.TCPWithInvalidStatsConf2'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use 'brim.test.unit.test_server.TCPWithInvalidStatsConf2' "
"for [test]. stats_conf probably not a method.")
def test_configure_handler_no_stats_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.TCPWithNoStatsConf'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.stats_conf.get('start_time'), 'worker')
self.assertEqual(ss.stats_conf.get('connection_count'), 'sum')
def test_configure_handler_with_stats_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.TCPWithStatsConf'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.stats_conf.get('start_time'), 'worker')
self.assertEqual(ss.stats_conf.get('connection_count'), 'sum')
self.assertEqual(ss.stats_conf.get('ok'), 'sum')
def test_privileged_start(self):
ss = self._class(FakeServer(), 'test')
ss._parse_conf(Conf(self._get_default_confd()))
exc = None
try:
ss._privileged_start()
except Exception as err:
exc = err
self.assertEqual(
str(exc), 'Could not bind to *:80: [Errno 13] Permission denied')
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['port'] = '0'
ss._parse_conf(Conf(confd))
ss._privileged_start()
self.assertTrue(ss.sock is not None)
get_listening_tcp_socket_calls = []
def _get_listening_tcp_socket(*args, **kwargs):
get_listening_tcp_socket_calls.append((args, kwargs))
return 'sock'
ss = self._class(FakeServer(), 'test')
ss._parse_conf(Conf(self._get_default_confd()))
get_listening_tcp_socket_orig = server.get_listening_tcp_socket
try:
server.get_listening_tcp_socket = _get_listening_tcp_socket
| |
:rtype: DetailedResponse
"""
if fromimage is None:
raise ValueError('fromimage must be provided')
if toimage is None:
raise ValueError('toimage must be provided')
headers = {
'Account': self.account
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='tag_image')
headers.update(sdk_headers)
params = {
'fromimage': fromimage,
'toimage': toimage
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/api/v1/images/tags'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def delete_image(self,
image: str,
**kwargs
) -> DetailedResponse:
"""
Delete image.
Delete a container image from the registry.
:param str image: The full IBM Cloud registry path to the image that you
want to delete, including its tag. If you do not provide a specific tag,
the version with the `latest` tag is removed.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ImageDeleteResult` object
"""
if image is None:
raise ValueError('image must be provided')
headers = {
'Account': self.account
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='delete_image')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['image']
path_param_values = self.encode_path_vars(image)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/api/v1/images/{image}'.format(**path_param_dict)
request = self.prepare_request(method='DELETE',
url=url,
headers=headers)
response = self.send(request)
return response
def inspect_image(self,
image: str,
**kwargs
) -> DetailedResponse:
"""
Inspect an image.
Inspect a container image in the private registry.
:param str image: The full IBM Cloud registry path to the image that you
want to inspect. Run `ibmcloud cr images` or call the `GET /images/json`
endpoint to review images that are in the registry.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ImageInspection` object
"""
if image is None:
raise ValueError('image must be provided')
headers = {
'Account': self.account
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='inspect_image')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['image']
path_param_values = self.encode_path_vars(image)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/api/v1/images/{image}/json'.format(**path_param_dict)
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
def get_image_manifest(self,
image: str,
**kwargs
) -> DetailedResponse:
"""
Get image manifest.
Get the manifest for a container image in the private registry.
:param str image: The full IBM Cloud registry path to the image that you
want to inspect. Run `ibmcloud cr images` or call the `GET /images/json`
endpoint to review images that are in the registry.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result
"""
if image is None:
raise ValueError('image must be provided')
headers = {
'Account': self.account
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_image_manifest')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['image']
path_param_values = self.encode_path_vars(image)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/api/v1/images/{image}/manifest'.format(**path_param_dict)
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
#########################
# Messages
#########################
def get_messages(self,
**kwargs
) -> DetailedResponse:
"""
Get messages.
Return any published system messages.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `str` result
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_messages')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/api/v1/messages'
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
#########################
# Namespaces
#########################
def list_namespaces(self,
**kwargs
) -> DetailedResponse:
"""
List namespaces.
List authorized namespaces in the targeted IBM Cloud account.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `List[str]` result
"""
headers = {
'Account': self.account
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_namespaces')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/api/v1/namespaces'
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
def list_namespace_details(self,
**kwargs
) -> DetailedResponse:
"""
Detailed namespace list.
Retrieves details, such as resource group, for all your namespaces in the targeted
registry.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `List[NamespaceDetails]` result
"""
headers = {
'Account': self.account
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_namespace_details')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/api/v1/namespaces/details'
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
def create_namespace(self,
name: str,
*,
x_auth_resource_group: str = None,
**kwargs
) -> DetailedResponse:
"""
Create namespace.
Add a namespace to the targeted IBM Cloud account.
:param str name: The name of the namespace.
:param str x_auth_resource_group: (optional) The ID of the resource group
that the namespace will be created within.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `Namespace` object
"""
if name is None:
raise ValueError('name must be provided')
headers = {
'Account': self.account,
'X-Auth-Resource-Group': x_auth_resource_group
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='create_namespace')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['name']
path_param_values = self.encode_path_vars(name)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/api/v1/namespaces/{name}'.format(**path_param_dict)
request = self.prepare_request(method='PUT',
url=url,
headers=headers)
response = self.send(request)
return response
def assign_namespace(self,
x_auth_resource_group: str,
name: str,
**kwargs
) -> DetailedResponse:
"""
Assign namespace.
Assign a namespace to the specified resource group in the targeted IBM Cloud
account.
:param str x_auth_resource_group: The ID of the resource group that the
namespace will be created within.
:param str name: The name of the namespace to be updated.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `Namespace` object
"""
if x_auth_resource_group is None:
raise ValueError('x_auth_resource_group must be provided')
if name is None:
raise ValueError('name must be provided')
headers = {
'Account': self.account,
'X-Auth-Resource-Group': x_auth_resource_group
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='assign_namespace')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['name']
path_param_values = self.encode_path_vars(name)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/api/v1/namespaces/{name}'.format(**path_param_dict)
request = self.prepare_request(method='PATCH',
url=url,
headers=headers)
response = self.send(request)
return response
def delete_namespace(self,
name: str,
**kwargs
) -> DetailedResponse:
"""
Delete namespace.
Delete the IBM Cloud Container Registry namespace from the targeted IBM Cloud
account, and removes all images that were in that namespace.
:param str name: The name of the namespace that you want to delete.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if name is None:
raise ValueError('name must be provided')
headers = {
'Account': self.account
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='delete_namespace')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
path_param_keys = ['name']
path_param_values = self.encode_path_vars(name)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/api/v1/namespaces/{name}'.format(**path_param_dict)
request = self.prepare_request(method='DELETE',
url=url,
headers=headers)
response = self.send(request)
return response
#########################
# Plans
#########################
def get_plans(self,
**kwargs
) -> DetailedResponse:
"""
Get plans.
Get plans for the targeted account.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `Plan` object
"""
headers = {
'Account': self.account
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_plans')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/api/v1/plans'
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
def update_plans(self,
*,
plan: str = None,
**kwargs
) -> DetailedResponse:
"""
Update plans.
Update plans for the targeted account.
:param str plan: (optional)
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
headers = {
'Account': self.account
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='update_plans')
headers.update(sdk_headers)
data = {
'plan': plan
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = | |
####### Done by KiroTheBlueFox #########
# #
# This is free to use #
# You can redistribute it with proper #
# credits (keep the credits) #
# Do not claim it as your program ! #
# Credits to me (in your mod) are #
# very welcomed ! #
# You can modify the program but keep #
# these credits ! #
# Please do not remove this comment ! #
# #
########################################
import platform
import subprocess
import json
import os
FOLDER = os.path.dirname(os.path.abspath(__file__))
CONFIG = open(os.path.join(FOLDER, "config.txt"),"r", encoding="utf8")
for line in CONFIG:
if line.lower().startswith("# rotationvisualization = "):
value = line[30:].lower()
if value.startswith("false"):
RotationVisualization = False
else:
RotationVisualization = True
if line.lower().startswith("# mcversion = "):
value = line[18:].lower()
if value.startswith("1.14"):
VERSIONCHECK = False
VERSION = "1.14"
elif value.startswith("1.12"):
VERSIONCHECK = False
VERSION = "1.12"
else:
VERSIONCHECK = True
def clear_screen():
input("\nPress enter to continue\n")
command = "cls" if platform.system().lower()=="windows" else "clear"
return subprocess.call(command,shell=True) == 0
def clear_screen_no_enter():
command = "cls" if platform.system().lower()=="windows" else "clear"
return subprocess.call(command,shell=True) == 0
LIST = []
if VERSIONCHECK == True:
VERSION = ""
VERSION12 = ["1","1.12","1.12.0","1.12.1","1.12.2","1.12.x"]
VERSION14 = ["2","1.14","1.14.0","1.14.1","1.14.2","1.14.3","1.14.4","1.14.x"]
while VERSION not in ["1.12","1.14"]:
clear_screen_no_enter()
VERSION = input("""For which version of the game are you creating collisions ?
1. 1.12.x
2. 1.14.x
""")
if VERSION.lower() in VERSION12:
VERSION = "1.12"
elif VERSION.lower() in VERSION14:
VERSION = "1.14"
else:
print("Error: Invalid version. Please retry.")
clear_screen()
if VERSION == "1.12":
dm = 0.5
elif VERSION == "1.14":
dm = 8
clear_screen_no_enter()
print("This program will allow you to get the collisions of a minecraft custom block model by reading its .json file.")
clear_screen()
GOODFILE = False
while GOODFILE == False:
MODELPATH = input("""Please, write the path to the model you want to get the collisions from
(This is not compatible with rotations, as the collisions have no rotation, the program will not care about cuboids' rotations):
(You can drag and drop the file here and then press enter ! (except if you use a bad console emulator *Laughing out loud emoji*))
""")
clear_screen_no_enter()
if MODELPATH.startswith('"') and MODELPATH.endswith('"'):
MODELPATH = MODELPATH[1:-1]
try:
with open(os.path.join(MODELPATH), "r") as file:
MODEL = json.load(file)
GOODFILE = True
except:
clear_screen_no_enter()
print("Error: File not found. Please retry")
clear_screen()
MODEL = MODEL["elements"]
for i in range(len(MODEL)):
CUBOID = MODEL[i]
POS = CUBOID["from"]+CUBOID["to"]
if VERSION == "1.12":
for i in range(6):
POS[i] = POS[i] / 16
LIST.append(POS)
print("List of collisions : \n"+str(LIST).replace("],","],\n"))
clear_screen()
action = ""
BLOCKTOPFACING = "Top"
BLOCKROTATION = "North"
REVERSEDX = False
REVERSEDY = False
REVERSEDZ = False
def MirrorX():
global BLOCKTOPFACING
global BLOCKROTATION
global REVERSEDX
if BLOCKTOPFACING in ["Top","Bottom","North","South"]:
if BLOCKROTATION == "West":
BLOCKROTATION = "East"
elif BLOCKROTATION == "East":
BLOCKROTATION = "West"
else:
if REVERSEDX == True:
REVERSEDX = False
else:
REVERSEDX = True
elif BLOCKTOPFACING == "East":
BLOCKTOPFACING = "West"
elif BLOCKTOPFACING == "West":
BLOCKTOPFACING = "East"
def MirrorY():
global BLOCKTOPFACING
global BLOCKROTATION
global REVERSEDY
if BLOCKTOPFACING in ["West","East","North","South"]:
if BLOCKROTATION == "Top":
BLOCKROTATION = "Bottom"
elif BLOCKROTATION == "Bottom":
BLOCKROTATION = "Top"
else:
if REVERSEDY == True:
REVERSEDY = False
else:
REVERSEDY = True
elif BLOCKTOPFACING == "Top":
BLOCKTOPFACING = "Bottom"
elif BLOCKTOPFACING == "Bottom":
BLOCKTOPFACING = "Top"
def MirrorZ():
global BLOCKTOPFACING
global BLOCKROTATION
global REVERSEDZ
if BLOCKTOPFACING in ["West","East","Top","Bottom"]:
if BLOCKROTATION == "North":
BLOCKROTATION = "South"
elif BLOCKROTATION == "South":
BLOCKROTATION = "North"
else:
if REVERSEDZ == True:
REVERSEDZ = False
else:
REVERSEDZ = True
elif BLOCKTOPFACING == "North":
BLOCKTOPFACING = "South"
elif BLOCKTOPFACING == "South":
BLOCKTOPFACING = "North"
def RotateX():
global BLOCKTOPFACING
global BLOCKROTATION
BLOCKFULLINFO = BLOCKTOPFACING+BLOCKROTATION
if BLOCKTOPFACING in ["West","East"]:
if BLOCKROTATION == "Top":
BLOCKROTATION = "South"
elif BLOCKROTATION == "South":
BLOCKROTATION = "Top"
elif BLOCKROTATION == "Bottom":
BLOCKROTATION = "North"
elif BLOCKROTATION == "North":
BLOCKROTATION = "Bottom"
else:
return
if BLOCKROTATION in ["West","East"]:
if BLOCKTOPFACING == "Top":
BLOCKTOPFACING = "South"
elif BLOCKTOPFACING == "South":
BLOCKTOPFACING = "Top"
elif BLOCKTOPFACING == "Bottom":
BLOCKTOPFACING = "North"
elif BLOCKTOPFACING == "North":
BLOCKTOPFACING = "Bottom"
else:
return
elif BLOCKFULLINFO == "SouthTop":
BLOCKTOPFACING = "Top"
BLOCKROTATION = "South"
elif BLOCKFULLINFO == "TopSouth":
BLOCKTOPFACING = "South"
BLOCKROTATION = "Top"
elif BLOCKFULLINFO == "SouthBottom":
BLOCKTOPFACING = "Top"
BLOCKROTATION = "North"
elif BLOCKFULLINFO == "BottomSouth":
BLOCKTOPFACING = "North"
BLOCKROTATION = "Top"
elif BLOCKFULLINFO == "NorthTop":
BLOCKTOPFACING = "Bottom"
BLOCKROTATION = "South"
elif BLOCKFULLINFO == "TopNorth":
BLOCKTOPFACING = "South"
BLOCKROTATION = "Bottom"
elif BLOCKFULLINFO == "NorthBottom":
BLOCKTOPFACING = "Bottom"
BLOCKROTATION = "North"
elif BLOCKFULLINFO == "BottomNorth":
BLOCKTOPFACING = "North"
BLOCKROTATION = "Bottom"
def RotateY():
global BLOCKTOPFACING
global BLOCKROTATION
BLOCKFULLINFO = BLOCKTOPFACING+BLOCKROTATION
if BLOCKTOPFACING in ["Top","Bottom"]:
if BLOCKROTATION == "West":
BLOCKROTATION = "South"
elif BLOCKROTATION == "South":
BLOCKROTATION = "West"
elif BLOCKROTATION == "East":
BLOCKROTATION = "North"
elif BLOCKROTATION == "North":
BLOCKROTATION = "East"
else:
return
if BLOCKROTATION in ["Top","Bottom"]:
if BLOCKTOPFACING == "West":
BLOCKTOPFACING = "South"
elif BLOCKTOPFACING == "South":
BLOCKTOPFACING = "West"
elif BLOCKTOPFACING == "East":
BLOCKTOPFACING = "North"
elif BLOCKTOPFACING == "North":
BLOCKTOPFACING = "East"
else:
return
elif BLOCKFULLINFO == "WestSouth":
BLOCKTOPFACING = "South"
BLOCKROTATION = "West"
elif BLOCKFULLINFO == "SouthWest":
BLOCKTOPFACING = "West"
BLOCKROTATION = "South"
elif BLOCKFULLINFO == "WestNorth":
BLOCKTOPFACING = "South"
BLOCKROTATION = "East"
elif BLOCKFULLINFO == "NorthWest":
BLOCKTOPFACING = "East"
BLOCKROTATION = "South"
elif BLOCKFULLINFO == "EastSouth":
BLOCKTOPFACING = "North"
BLOCKROTATION = "West"
elif BLOCKFULLINFO == "SouthEast":
BLOCKTOPFACING = "West"
BLOCKROTATION = "North"
elif BLOCKFULLINFO == "EastNorth":
BLOCKTOPFACING = "North"
BLOCKROTATION = "East"
elif BLOCKFULLINFO == "NorthEast":
BLOCKTOPFACING = "East"
BLOCKROTATION = "North"
def RotateZ():
global BLOCKTOPFACING
global BLOCKROTATION
BLOCKFULLINFO = BLOCKTOPFACING+BLOCKROTATION
if BLOCKTOPFACING in ["North","South"]:
if BLOCKROTATION == "West":
BLOCKROTATION = "Top"
elif BLOCKROTATION == "Top":
BLOCKROTATION = "West"
elif BLOCKROTATION == "East":
BLOCKROTATION = "Bottom"
elif BLOCKROTATION == "Bottom":
BLOCKROTATION = "East"
else:
return
elif BLOCKROTATION in ["North","South"]:
if BLOCKTOPFACING == "West":
BLOCKTOPFACING = "Top"
elif BLOCKTOPFACING == "Top":
BLOCKTOPFACING = "West"
elif BLOCKTOPFACING == "East":
BLOCKTOPFACING = "Bottom"
elif BLOCKTOPFACING == "Bottom":
BLOCKTOPFACING = "East"
else:
return
elif BLOCKFULLINFO == "WestTop":
BLOCKTOPFACING = "Top"
BLOCKROTATION = "West"
elif BLOCKFULLINFO == "TopWest":
BLOCKTOPFACING = "West"
BLOCKROTATION = "Top"
elif BLOCKFULLINFO == "WestBottom":
BLOCKTOPFACING = "Top"
BLOCKROTATION = "East"
elif BLOCKFULLINFO == "BottomWest":
BLOCKTOPFACING = "East"
BLOCKROTATION = "Top"
elif BLOCKFULLINFO == "EastTop":
BLOCKTOPFACING = "Bottom"
BLOCKROTATION = "West"
elif BLOCKFULLINFO == "TopEast":
BLOCKTOPFACING = "West"
BLOCKROTATION = "Bottom"
elif BLOCKFULLINFO == "EastBottom":
BLOCKTOPFACING = "Bottom"
BLOCKROTATION = "East"
elif BLOCKFULLINFO == "BottomEast":
BLOCKTOPFACING = "East"
BLOCKROTATION = "Bottom"
while action != "exit":
FILE = open(os.path.join(FOLDER, "Visualizations\\"+BLOCKTOPFACING+BLOCKROTATION+".txt"),"r", encoding="utf8")
VISUAL = FILE.read()
FILE.close()
if REVERSEDX == True:
VISUAL += "\nWest and East mirrored/exchanged.\n"
if REVERSEDY == True:
VISUAL += "\nTop and Bottom mirrored/exchanged.\n"
if REVERSEDZ == True:
VISUAL += "\nNorth and South mirrored/exchanged.\n"
VISUAL += "\n"
if RotationVisualization == False:
VISUAL = ""
print("""What do you want to do ?
1a. Mirror West-East (Axis X)
1b. Mirror Top-Down (Axis Y)
1c. Mirror North-South (Axis Z)
2a. Exchange East-West and Top-Down axis (Axis X and Y)
2b. Exchange Top-Down and North-South axis (Axis Y and Z)
2c. Exchange North-South and East-West axis (Axis Z and X)
Write "finish" to get the final code.
Write "info" to get informations about each commands.
Write "exit" to quit the program.
"""+VISUAL)
action = input()
clear_screen_no_enter()
if action == "info":
print("""Informations :\n
The mirror commands will mirror the cube on the specified axis.
Example for Mirror West-East:
██████ ......
...███ ██████ ██████ ......
... ██████ ███ ███ ██████ ...
. ...... ██████ █ ██ █ ██████ ███... .
. ..y... █ █ Becomes ████ █ ██y███ █ .
. ↑ █ █ ██████████████ █ ↑ █ .
. . █ █ ████ █ █ █ .
. . █ █ ██ █ █ █ .
z←. . █ █→x z←█ █ █ .→x
...... . ...███ ██████ █ ███...
...... ██████
BEFORE AFTER
This can be used to change, for example, facing North to facing South.
""")
clear_screen()
print("""Informations :\n
The exchange commands will swap 2 axis of the cube.
Example for Exchange East-West and Top-Down axis (Axis X and Y):
██████ ██████
...███ ██████ ██████ ███...
... ██████ ███ ███ ██████ ...
. ...... ██████ █ ██ █ ██████ ...... .
. ..y... █ █ | |
<reponame>iuliansimion/Chevalley
import sympy
class Curtis:
type = 0
# module for computing zUy and UxU
deodhar = 0
# Bruhat form
bruhat = 0
# the Chevalley group
group = 0
# the Weyl group
weyl = 0
# standard parabolics
para = 0
# distinguished expressions for standard parabolics
dist_expr_p = 0
# Deodhar cells
D = 0
# Deodhar cells DI-form
DI = 0
# Deodhar cells in zUyi form
zUyi = 0
# Deodhar cells in UxU form
UxU = 0
# the toral elements for the basis of the Hecke algebra of a GG-rep
# given explicitly in derived classes
tori = []
# a second list of the same tori with "primed" variables
tori2 = []
# a third list of the same tori with "double primed" variables
tori3 = []
def __init__(self, t):
self.type = t
self.deodhar = self.type.deodhar
self.bruhat = self.type.bruhat
self.group = self.type.group
self.weyl = self.type.weyl
self.para = self.type.parabolics
self.dist_expr_p = self.extract_para_dist_expr()
# needs dist_expr_p:
# self.load_cells()
"""
Selecting those distinguished expressions corresponding to
standard parabolic subgroups
"""
def extract_para_dist_expr(self):
de = self.weyl.dist_expr
w0w = self.para.w0w
result = []
for i in range(len(de)):
e = de[i]
if e[0][0] in w0w and \
e[0][1] in w0w and \
e[0][2] in w0w:
result.append(e + [i])
return result
"""
Select cells corresponding to dist_expr_p
--- needs dist_expr_p
"""
def load_cells(self):
dep = self.dist_expr_p
self.D = []
self.DI = []
self.zUyi = []
self.UxU = []
for e in dep:
pos = e[len(e) - 1]
tmpD = []
tmpDI = []
tmpzUyi = []
tmpUxU = []
for j in range(len(e[1])):
# D and zUyi
uyiu = self.deodhar.cell_UyiU(pos, j)
tmpzUyi.append(uyiu)
# DI and UxU
uxu = self.deodhar.cell_Ux(pos, j)
tmpUxU.append(uxu)
self.D.append(tmpD)
self.DI.append(tmpDI)
self.zUyi.append(tmpzUyi)
self.UxU.append(tmpUxU)
"""
prepare the two forms of the cell
"""
def prepare_zUy_UxU(self, ii, j):
de = self.weyl.dist_expr
x = de[ii][0][0]
y = de[ii][0][1]
z = de[ii][0][2]
nx = self.group.w_to_n(self.weyl.word(x))
ny = self.group.w_to_n(self.weyl.word(y))
nz = self.group.w_to_n(self.weyl.word(z))
ty = self.para.w0w.index(y)
ty = self.tori2[ty]
tyi = self.group.invert(ty)
ytyi = self.group.conjugate_left(ny, tyi)
tz = self.para.w0w.index(z)
tz = self.tori3[tz]
ztz = self.group.conjugate_left(nz, tz)
uyiu = self.deodhar.cell_UyiU(ii, j)
uxu = self.deodhar.cell_Ux(ii, j)
uyiu = self.bruhat.split_strict_Bruhat(uyiu, n_coef=-1)
ytyi0 = ytyi + self.group.invert(uyiu[2])
uxu = self.bruhat.split_strict_Bruhat(uxu)
uxu[0] = self.group.conjugate_left(ztz, uxu[0])
ztzx = self.group.conjugate_right(ztz, nx)
if nx != uxu[1]:
print("curtis.prepare_zUy_UxU: this should not be!")
uxu[3] = uxu[3] + self.group.invert(uyiu[3])
uxu[3] = self.group.conjugate_right(uxu[3], ytyi0)
uxu[2] = uxu[2] + ztzx + ytyi0
uy = uyiu[0] + uyiu[1]
uxu = uxu[0] + uxu[1] + self.group.canonic_th(uxu[2]) + self.group.canonic_u(uxu[3])
for i in range(len(uy)):
uy[i] = [uy[i][0], uy[i][1], sympy.simplify(uy[i][2])]
for i in range(len(uxu)):
uxu[i] = [uxu[i][0], uxu[i][1], sympy.simplify(uxu[i][2])]
return [uy, uxu]
"""
Get condition for toral elements to represent the same cell
--- we need t0 in zUyi*t0
--- we need t00 in Uxt00U
[z*tz][U][(y*ty)^-1]t
= [tz^(z^-1)][z][U][y^-1][(ty^-1)^(y^-1)]
= [tz^(z^-1)][zUyi][t0^-1][(ty^-1)^(y^-1)]
= [tz^(z^-1)][UxU][t0^-1][(ty^-1)^(y^-1)]
= [tz^(z^-1)][U][x][t00][U][t0^-1][(ty^-1)^(y^-1)]
"""
def structure_equation(self, i, j):
x = self.dist_expr_p[i][0][0]
y = self.dist_expr_p[i][0][1]
z = self.dist_expr_p[i][0][2]
# copiem ca sa nu modificam
zUyi = [list(e) for e in self.zUyi[i][j]]
UxU = [list(e) for e in self.UxU[i][j]]
xx = self.weyl.word(x)
xx = self.group.w_to_n(xx)
yy = self.weyl.word(y)
yy = self.group.w_to_n(yy)
zz = self.weyl.word(z)
zz = self.group.w_to_n(zz)
#
# toral part for y
#
# the order is important
# this is the correct order to get t0 on the right
t0 = yy + zUyi[1] + zUyi[2]
t0 = self.group.canonic_nt(t0)
if not self.group.all_t(t0):
print("curtis.structure_equation: This should not be! (t0)")
#
# toral part for x
#
xxi = self.group.invert(xx)
# the order is important
# this is the correct order to get t0 on the right
t00 = xxi + UxU[1] + UxU[2]
t00 = self.group.canonic_nt(t00)
if not self.group.all_t(t00):
print("curtis.structure_equation: This should not be! (t00)")
#
# tz and ty
#
tz = self.para.w0w.index(z)
# use the second set of variables for z
tz = self.tori2[tz]
ty = self.para.w0w.index(y)
ty = self.tori[ty]
# bring to other form
# left U
zztz = self.group.conjugate_left(zz, tz)
UxU[0] = self.group.conjugate_left(zztz, UxU[0])
xxizztz = self.group.conjugate_right(zztz, xxi)
# right U
t0i = self.group.invert(t0)
UxU[3] = self.group.conjugate_right(UxU[3], t0i)
tyi = self.group.invert(ty)
yytyi = self.group.conjugate_left(yy, tyi)
UxU[3] = self.group.conjugate_right(UxU[3], yytyi)
tt = xxizztz + t00 + t0i + yytyi
tt = self.group.canonic_t(tt)
return [tt, zUyi, UxU]
"""
Truncate the unipotent part
and bring the two forms of the cells in the right form for
the structure constants of the Hecke algebra of a GG-rep
"""
def Hecke_GG_form(self, i, j):
[tt, zUyi, UxU] = self.structure_equation(i, j)
Uyz = self.group.truncate_u_sr(zUyi[0])
#
# just added !!! non-standard
#
# no Uyz=self.group.invert(Uyz)
# no Uyz=self.group.canonic_u(Uyz)
# no Uyz=self.group.truncate_u_sr(Uyz)
Ux_left = self.group.truncate_u_sr(UxU[0])
Ux_right = self.group.truncate_u_sr(UxU[3])
Ux = Ux_left + Ux_right
Ux = self.group.invert(Ux)
Ux = self.group.canonic_u(Ux)
Ux = self.group.truncate_u_sr(Ux)
U = Ux + Uyz
U = self.group.canonic_u(U)
U = self.group.truncate_u_sr(U)
return [tt, zUyi, UxU, U]
"""
Produce a report for the j-th cell in the i-th case
"""
def report(self, i, j):
[uy, uxu] = self.prepare_zUy_UxU(i, j)
uy = self.bruhat.split_strict_Bruhat(uy, n_coef=-1)
uxu = self.bruhat.split_strict_Bruhat(uxu)
de = self.weyl.dist_expr[i]
word = self.weyl.word
latex = self.group.latex
truncate = self.group.truncate_u_sr
print("############################")
print("CASE: ", i, j)
print("CONFIGURATION: ", de[0])
print("DIST EXPR: ", de[1][j])
print("------------------")
print("Z: ", word(de[0][2]))
print("Y: ", word(de[0][1]))
print("X: ", word(de[0][0]))
print("------------------")
print("U in zUyi:")
print("U1: ", latex(truncate(uy[0])))
print("U in UxU:")
print(uxu)
print("U2: ", latex(truncate(uxu[0])))
print("U3: ", latex(truncate(uxu[3])))
print("------------------")
print("Condition on toral element:")
print("A) ", latex(uxu[2]))
print("------------------")
print("U to evaluate psi on:")
Ux_left = truncate(uxu[0])
Ux_right = truncate(uxu[3])
Ux = Ux_left + Ux_right
Ux = self.group.invert(Ux)
Ux = self.group.canonic_u(Ux)
Ux = truncate(Ux)
U = Ux + uy[0]
U = self.group.canonic_u(U)
U = truncate(U)
U = self.group.simplify_params(U)
print(U)
print(latex(U))
print("############################")
"""
Produce a report for the j-th cell in the i-th case
"""
def report_file(self, i, j):
f_name = "data/" + self.type.label + "/reports/" + str(i) + str(j) + ".rep"
f_name = f_name.lower()
f = open(f_name, "w")
# [tt,zUyi,UxU,U]=self.Hecke_GG_form(i,j)
[uy, uxu] = self.prepare_zUy_UxU(i, j)
uy = self.bruhat.split_strict_Bruhat(uy, n_coef=-1)
uxu = self.bruhat.split_strict_Bruhat(uxu)
de = self.weyl.dist_expr[i]
word = self.weyl.word
latex = self.group.latex
truncate = self.group.truncate_u_sr
f.write("############################\n")
f.write("CASE: " + str(i) + str(j) + "\n")
f.write("CONFIGURATION: " + str(de[0]) + "\n")
f.write("DIST EXPR: " + str(de[1][j]) + "\n")
f.write("------------------")
f.write("Z: " + str(word(de[0][2])) + "\n")
# f.write("Y^-1t0: ",zUyi[1]+zUyi[2])
f.write("Y: " + str(word(de[0][1])) + "\n")
# f.write("Xt00: ",UxU[1]+UxU[2])
f.write("X: " + str(word(de[0][0])) + "\n")
f.write("------------------\n")
f.write("U in zUyi:")
f.write("U1: " + latex(truncate(uy[0])) + "\n")
f.write("U2: " + latex(truncate(uxu[0])) + "\n")
f.write("U in UxU:")
f.write("U3: " + latex(truncate(uxu[3])) + "\n")
f.write("------------------\n")
f.write("Condition on toral element:\n")
f.write("A) " + latex(uxu[2]) + "\n")
f.write("------------------\n")
f.write("U to evaluate psi on:\n")
Ux_left = truncate(uxu[0])
Ux_right = truncate(uxu[3])
Ux = Ux_left + Ux_right
Ux = self.group.invert(Ux)
Ux = self.group.canonic_u(Ux)
Ux = truncate(Ux)
U = Ux + uy[0]
U = self.group.canonic_u(U)
U = truncate(U)
U = self.group.simplify_params(U)
f.write(latex(U) + "\n")
f.write("############################\n")
f.close()
"""
Returns the index in the list dist_expr_p of the case c
"""
def index(self, c):
de = self.dist_expr_p
tmp = [i[0] for i in de]
return tmp.index(c)
def latex_dist_expr(self, i, j):
de = self.weyl.dist_expr[i][1][j]
result = "$" + str([i + 1 for i in de[0]]) + "$"
result += " (of type "
t = ""
vari = ""
for k in range(len(de[0])):
if k in de[1][0]:
t += "A"
vari += "$x_{" + str(k + 1) + "}\in k$, "
elif k in de[1][1]:
t += "B"
vari += "$x_{" + str(k + 1) + "}\in k^{\\ast}$, "
elif k in de[1][2]:
t += "C"
vari += "$x_{" + str(k + 1) + "}=1$, "
else:
print("curtis.latex_dist_expr: this should not be!")
return
result += t + ") " + vari
return result
"""
Produce a report for the j-th cell in the i-th case
"""
def report_latex(self, i):
ii = self.dist_expr_p[i][2]
w0w = list(self.para.w0w)
#
# atentie inversez ultimul cu primul element aici
#
tmp = w0w[3]
w0w[3] = w0w[2]
w0w[2] = tmp
case = | |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic Task classes."""
import functools
import itertools
from absl import logging
from flax.deprecated import nn
from flax.training import common_utils
import jax
import jax.numpy as jnp
import scipy
from gift.data import all_datasets
from gift.tasks import all_metrics
from gift.tasks import domain_mapping_utils
from gift.tasks import metrics
class Task(object):
"""Base Task class.
Task objects contain all the information about the objective of the
training, evaluation metrics, and the dataset.
"""
def __init__(self, task_params, num_shards, regularisers=None):
"""Init task objects.
Args:
task_params: ConfigDict; hyperparameters of the task.
num_shards: int; Number of shards used for data parallelization (should
normally be set to `jax.device_count()`).
regularisers: list of functions; List of auxilary losses that get module
parameters as input (L2 loss is handled seperately).
"""
self.task_params = task_params
self.dataset_name = task_params.get('dataset_name')
self.regularisers = regularisers
self.load_dataset(self.dataset_name, num_shards)
self.task_params.output_dim = self.dataset.meta_data['num_classes']
def load_dataset(self, dataset_name, num_shards):
"""Loads the dataset for the task.
Args:
dataset_name: str; Name of the dataset.
num_shards: int; Number of shards used for data parallelization (should
normally be set to `jax.device_count()`).
"""
self.dataset = all_datasets.get_dataset(dataset_name)(
batch_size=self.task_params.local_batch_size,
eval_batch_size=self.task_params.eval_local_batch_size,
resolution=self.task_params.get('resolution', None),
data_augmentations=self.task_params.get('data_augmentations', None),
teacher_data_augmentations=self.task_params.get(
'teacher_data_augmentations', None),
num_shards=num_shards)
def loss_function(self, logits, batch, model_params=None, step=None):
raise NotImplementedError
def metrics_fn(self, logits, batch):
raise NotImplementedError
def get_l2_rate(self, step):
del step
return self.task_params.get('l2_decay_factor')
class ClassificationTask(Task):
"""Classification Task."""
def __init__(self, task_params, num_shards):
"""Initializing Classification based Tasks.
Args:
task_params: configdict; Hyperparameters of the task.
num_shards: int; Number of deviced that we shard the batch over.
"""
super().__init__(task_params, num_shards)
loss_fn_name = self.task_params.get('main_loss', None)
if loss_fn_name is None:
if self.dataset.meta_data['num_classes'] == 1:
# Use the loss function for binary classification.
loss_fn_name = 'sigmoid_cross_entropy'
else:
loss_fn_name = 'categorical_cross_entropy'
self.main_loss_fn = functools.partial(metrics.weighted_loss,
all_metrics.ALL_LOSSES[loss_fn_name])
_METRICS = all_metrics.CLASSIFICATION_METRICS
def metrics_fn(self, logits, batch):
"""Calculates metrics for the classification task.
Args:
logits: float array; Output of the model->[batch, length, num_classes].
batch: dict; Batch of data that has 'label' and optionally 'weights'.
Returns:
a dict of metrics.
"""
target_is_onehot = logits.shape == batch['label'].shape
if target_is_onehot:
one_hot_targets = batch['label']
else:
one_hot_targets = common_utils.onehot(batch['label'], logits.shape[-1])
if self.dataset.meta_data['num_classes'] == 1:
# If this is a binary classification task, make sure the shape of labels
# is (bs, 1) and is the same as the shape of logits.
one_hot_targets = jnp.reshape(one_hot_targets, logits.shape)
if self.task_params.get('class_indices'):
possible_labels_indices = self.task_params.get('class_indices')
one_hot_targets = one_hot_targets[:, possible_labels_indices]
logits = logits[:, possible_labels_indices]
weights = batch.get('weights') # weights might not be defined
metrics_dic = {}
for key in self._METRICS:
metric_val, metric_normalizer = self._METRICS[key](logits,
one_hot_targets,
weights)
metrics_dic[key] = (jax.lax.psum(metric_val, 'batch'),
jax.lax.psum(metric_normalizer, 'batch'))
# Store dataset related factors.
for key in batch:
if 'factor' in key:
factors = batch[key]
if weights is not None:
val = jnp.sum(metrics.apply_weights(factors, weights))
norm = jnp.sum(weights)
else:
val = jnp.sum(factors)
norm = len(factors)
metrics_dic[key] = (jax.lax.psum(val,
'batch'), jax.lax.psum(norm, 'batch'))
return metrics_dic
def loss_function(self, logits, batch, model_params=None, step=None):
"""Return cross entropy loss with an L2 penalty on the weights."""
weights = batch.get('weights')
if self.dataset.meta_data['num_classes'] == 1:
# If this is a binary classification task, make sure the shape of labels
# is (bs, 1) and is the same as the shape of logits.
targets = jnp.reshape(batch['label'], logits.shape)
elif batch['label'].shape[-1] == self.dataset.meta_data['num_classes']:
# If the labels are already the shape of (bs, num_classes) use them as is.
targets = batch['label']
else:
# Otherwise convert the labels to onehot labels.
targets = common_utils.onehot(batch['label'], logits.shape[-1])
loss_value, loss_normalizer = self.main_loss_fn(
logits,
targets,
weights,
label_smoothing=self.task_params.get('label_smoothing'))
total_loss = loss_value / loss_normalizer
if model_params:
l2_decay_factor = self.get_l2_rate(step)
if l2_decay_factor is not None:
l2_loss = metrics.l2_regularization(
model_params,
include_bias_terms=self.task_params.get('l2_for_bias', False))
total_loss = total_loss + 0.5 * l2_decay_factor * l2_loss
if self.regularisers:
for reg_fn in self.regularisers:
total_loss += reg_fn(model_params)
return total_loss
class MultiEnvClassificationTask(ClassificationTask):
"""Multi environment classification Task."""
_METRICS = all_metrics.MULTI_ENV_CLASSIFICATION_METRICS
def load_dataset(self, dataset_name, num_shards):
"""Loads the dataset for the task.
Args:
dataset_name: str; Name of the dataset.
num_shards: int; Number of shards used for data parallelization (should
normally be set to `jax.device_count()`).
"""
self.dataset = all_datasets.get_dataset(dataset_name)(
batch_size=self.task_params.local_batch_size,
eval_batch_size=self.task_params.eval_local_batch_size,
num_shards=num_shards,
resolution=self.task_params.get('resolution', None),
data_augmentations=self.task_params.get('data_augmentations', None),
teacher_data_augmentations=self.task_params.get(
'teacher_data_augmentations', None),
train_environments=self.task_params.train_environments,
eval_environments=self.task_params.eval_environments)
def aggregate_envs_losses(self, env_losses):
"""Aggregate losses of all environments.
Args:
env_losses: list(float); list of losses of the environments.
Returns:
Average of the env losses.
"""
return jnp.mean(jnp.array(env_losses))
def environments_penalties(self, env_logits, env_batches):
"""Computes a penalty term based on inconsistencies between different env.
Args:
env_logits: list(dict); List of logits for examples from different
environment (env_logits[0] is the logits for examples from env 0 which
is a float array of shape `[batch, length, num_classes]`).
env_batches: list(dict); List of batches of examples from different
environment (env_batches[0] is a batch dict for examples from env 0 that
has 'label' and optionally 'weights'.).
Returns:
Environments penalty term for the loss.
"""
del env_logits
del env_batches
return 0
def penalty_weight(self, step):
"""Return the weight of the environments penalty term in the loss.
Args:
step: int; Number of training steps passed so far.
Returns:
float; Weight of the environment penalty term.
"""
del step
return 0
def metrics_fn(self, env_logits, env_batches, env_ids, params):
"""Calculates metrics for the classification task.
Args:
env_logits: list(dict); List of logits for examples from different
environment (env_logits[0] is the logits for examples from env 0 which
is a float array of shape `[batch, length, num_classes]`).
env_batches: list(dict); List of batches of examples from different
environment (env_batches[0] is a batch dict for examples from env 0 that
has 'label' and optionally 'weights'.).
env_ids: list(int); List of environment codes.
params: pytree; parameters of the model.
Returns:
a dict of metrics.
"""
metrics_dic = {}
envs_metrics_dic = {}
# Add all the keys to envs_metrics_dic, each key will point to a list of
# values from the correspondig metric for each environment.
# Task related metrics
for key in self._METRICS:
envs_metrics_dic[key] = []
# Dataset related metrics (e.g., perturbation factors)
for key in env_batches[0]:
if 'factor' in key:
envs_metrics_dic[key] = []
for i in range(len(env_logits)):
logits = env_logits[i]
batch = env_batches[i]
env_name = self.dataset.get_full_env_name(self.dataset.id2env(env_ids[i]))
env_metric_dic = super().metrics_fn(logits, batch)
for key in env_metric_dic:
metrics_dic[env_name + '/' + key] = env_metric_dic[key]
envs_metrics_dic[key].append(env_metric_dic[key])
# Add overall metric values over all environments,
for key in self._METRICS:
metrics_dic[key] = (jnp.sum(
jnp.array(jnp.array(envs_metrics_dic[key])[:, 0])),
jnp.sum(
jnp.array(jnp.array(envs_metrics_dic[key])[:,
1])))
if params:
metrics_dic['l2'] = metrics.l2_regularization(
params, include_bias_terms=self.task_params.get('l2_for_bias', False))
return metrics_dic
def get_env_losses(self, env_logits, env_batches):
"""Computes and return the loss on each environment.
Args:
env_logits: list(dict); List of logits for examples from different
environment (env_logits[0] is the logits for examples from env 0).
env_batches: list(dict); List of batches of examples from different
environment (env_batches[0] is a batch dict for examples from env 0).
Returns:
List of loss values in all environments.
"""
env_losses = []
for i in range(len(env_logits)):
logits = env_logits[i]
batch = env_batches[i]
ce_loss = super().loss_function(logits, batch)
env_losses.append(ce_loss)
return env_losses
def loss_function(self, env_logits, env_batches, model_params=None, step=0):
"""Returns loss with an L2 penalty on the weights.
Args:
env_logits: list(dict); List of logits for examples from different
environment (env_logits[0] is the logits for examples from env 0).
env_batches: list(dict); List of batches of examples from different
environment (env_batches[0] is a batch dict for examples from env 0).
model_params: dict; Parameters of the model (used to commpute l2).
step: int; Global training step.
Returns:
Total loss.
"""
env_losses = self.get_env_losses(env_logits, env_batches)
total_loss = self.aggregate_envs_losses(env_losses)
p_weight = self.penalty_weight(step)
total_loss += p_weight * self.environments_penalties(
env_logits, env_batches)
if model_params:
l2_decay_rate = self.get_l2_rate(step)
if l2_decay_rate is not None:
l2_loss = metrics.l2_regularization(
model_params,
include_bias_terms=self.task_params.get('l2_for_bias', False))
total_loss = total_loss + 0.5 * l2_decay_rate * l2_loss
if self.regularisers:
for reg_fn in self.regularisers:
reg_value = reg_fn(model_params)
total_loss += reg_value
# If p_weights > 1:
# Rescale the entire loss to keep gradients in a reasonable range.
total_loss /= | |
<reponame>kemerelab/ghostipy
import numpy as np
from abc import ABC, abstractmethod
from numba import njit
from scipy.signal import correlate
__all__ = ['Wavelet',
'MorseWavelet',
'AmorWavelet',
'BumpWavelet']
def reference_coi(psifn, reference_scale, *, threshold=1/(np.e**2)):
"""
Estimates a wavelet's cone of influence.
Parameters
----------
psifn : function
Function to get the wavelet frequency domain representation
reference_scale : float
Scale at which 'psifn' should be evaluated
threshold : float, optional
The value C that determines the wavelet's cone of influence. The
maximum value P of the wavelet's power autocorrelation is taken
in the time domain. Then the cone of influence is given by the
region where the power autocorrelation is above C*P. Default value
for C is e^(-2).
Returns
-------
reference_coi : float
The COI for the passed-in 'reference_scale'
"""
omega = np.fft.fftfreq(2**22) * 2 * np.pi
psif = psifn(omega, reference_scale).squeeze()
psit = np.fft.ifftshift(np.fft.ifft(psif))
psit_power = psit.real**2 + psit.imag**2
power_acs = correlate(psit_power, psit_power)
maxind = np.argmax(power_acs)
inds = np.argwhere(power_acs < threshold * np.max(power_acs)).squeeze()
mask = inds > maxind
inds = inds[mask]
reference_coi = inds[0] - maxind
return reference_coi
def coi(scales, reference_scale, reference_coi, *, fs=1):
"""
Estimates a wavelet's cone of influence (in seconds)
for requested scales, given a reference scale and
reference COI
Parameters
----------
scales : np.ndarray, with shape (n_scales, )
Array of scales for which to estimate the COI
reference_scale : float
The scale used as a reference
reference_coi : float
The COI used as a reference
fs : float, optional
Sampling rate.
Default is 1, where the COI in seconds is identical
to the COI in number of samples
Returns
-------
cois : np.ndarray, with shape (n_scales, )
The COIs for each value of 'scales'
"""
scales = np.atleast_1d(scales)
factors = scales / reference_scale
return factors * reference_coi / fs
class Wavelet(ABC):
"""
The abstract base class that all wavelets in this package inherit from.
A custom wavelet should use this template if it is intended to be used
for the cwt() and wsst() methods. Note that the built-in wavelets have
been implemented as a bandpass filter bank with peak value 2 in the
frequency domain.
"""
def __init__(self):
pass
@abstractmethod
def freq_domain(self, omega, scales, *, derivative=False):
"""
Get the frequency domain representation of the wavelet
Parameters
----------
omega : np.ndarray, with shape (n_freqs, )
Array of angular frequencies
scales : np.narray, with shape (n_scales, )
Array of scales to use
derivative : boolean, optional
If True, return the derivative of the wavelet
Returns
-------
psif : np.ndarray, with shape (n_scales, n_freqs)
The frequency domain representation given the
passed-in 'scales'
"""
pass
@abstractmethod
def freq_domain_numba(self, omega, scales, out, *, derivative=False):
"""
Get the frequency domain representation of the wavelet using
numba as the backend
Parameters
----------
omega : np.ndarray, with shape (n_freqs, )
Array of angular frequencies
scales : np.narray, with shape (n_scales, )
Array of scales to use
out : np.ndarray, with shape (n_scales, n_freqs)
Output array to store the result
derivative : boolean, optional
If True, return the derivative of the wavelet
Returns
-------
result : boolean
True if successfully, False otherwise
"""
pass
@abstractmethod
def freq_to_scale(self, freqs):
"""
Map center frequency to scale
Parameters
----------
freqs : np.ndarray, with shape (n_freqs, )
Array of frequencies. Units should be radians within
the range [-pi, pi]
Returns
-------
scales : np.ndarray, with shape (n_scales, )
The scales corresponding to each frequency
"""
pass
@abstractmethod
def scale_to_freq(self, scales):
"""
Map scale to center frequency
Parameters
----------
scales : np.ndarray, with shape (n_scales, )
Array of scales
Returns
-------
freqs : np.ndarray, with shape (n_scales, )
The center frequencies corresponding to each
scale. Units are in radians.
"""
pass
@abstractmethod
def reference_coi(self, *, threshold=1/(np.e**2)):
"""
Get the COI for the base scale
Parameters
----------
threshold : float, optional
The value C that determines the wavelet's cone of influence. The
maximum value P of the wavelet's power autocorrelation is taken
in the time domain. Then the cone of influence is given by the
region where the power autocorrelation is above C*P. Default value
for C is e^(-2).
"""
pass
@abstractmethod
def coi(self, scales, reference_scale, reference_coi, *, fs=1):
"""
Estimates a wavelet's cone of influence (in seconds)
for requested scales, given a reference scale and
reference COI
Parameters
----------
scales : np.ndarray, with shape (n_scales, )
Array of scales for which to estimate the COI
reference_scale : float
The scale used as a reference
reference_coi : float
The COI used as a reference
fs : float, optional
Sampling rate.
Default is 1, where the COI in seconds is identical
to the COI in number of samples
Returns
-------
cois : np.ndarray, with shape (n_scales, )
The COIs for each value of 'scales'
"""
pass
@property
@abstractmethod
def admissibility_constant(self):
"""
The admissibility constant (float)
"""
pass
@property
@abstractmethod
def is_analytic(self):
"""
Whether or not a wavelet is analytic (boolean)
"""
pass
@njit
def _morse_freq_domain(omega, scales, gamma, beta, out,
*, derivative=False):
# out better be initialized to zeros!
log_a = np.log(2) + (beta/gamma) * (1+np.log(gamma) - np.log(beta))
H = np.zeros_like(omega)
H[omega > 0] = 1
for ii in range(scales.shape[0]):
x = scales[ii] * omega
log_psif = log_a + beta * np.log(np.abs(x)) - np.abs(x)**gamma
out[ii] = np.exp(log_psif) * H
if derivative:
out[ii] *= 1j * omega
return True
class MorseWavelet(Wavelet):
def __init__(self, *, gamma=3, beta=20):
super().__init__()
self.gamma = gamma
self.beta = beta
self.wp = np.exp( (np.log(self.beta) - np.log(self.gamma)) / self.gamma )
def freq_domain(self, omega, scales, *, derivative=False):
gamma = self.gamma
beta = self.beta
scales = np.atleast_1d(scales)
x = scales[:, None] * omega
H = np.zeros_like(omega)
H[omega > 0] = 1
with np.errstate(divide='ignore'):
log_a = np.log(2) + (beta/gamma) * (1+np.log(gamma) - np.log(beta))
psifs = np.exp(log_a + beta * np.log(np.abs(x)) - np.abs(x)**gamma) * H
if derivative:
return 1j * omega * psifs
return psifs
def freq_domain_numba(self, omega, scales, out, *, derivative=False):
return _morse_freq_domain(omega,
scales,
self.gamma,
self.beta,
out,
derivative=derivative)
def freq_to_scale(self, freqs):
# input should be in normalized radian frequencies
return self.wp / np.atleast_1d(freqs)
def scale_to_freq(self, scales):
return self.wp / np.atleast_1d(scales)
def reference_coi(self, *, threshold=1/(np.e**2)):
base_scale = self.wp / 1
base_coi = reference_coi(self.freq_domain, base_scale)
return base_scale, base_coi
def coi(self, scales, reference_scale, reference_coi, *, fs=1):
return coi(scales, reference_scale, reference_coi, fs=fs)
@property
def admissibility_constant(self):
def c_psi(omega):
return self.freq_domain(omega, 1)[0] / omega
return quad(c_psi, 0, np.inf)[0]
@property
def is_analytic(self):
return True
@njit
def _amor_freq_domain(omega, scales, w0, out, *, derivative=False):
H = np.zeros_like(omega)
H[omega > 0] = 1
for ii in range(scales.shape[0]):
x = scales[ii] * omega
out[ii] = 2 * np.exp(-(x - w0)**2/2) * H
if derivative:
out[ii] *= 1j * omega
return True
class AmorWavelet(Wavelet):
def __init__(self, *, w0=7):
self.w0 = w0
def freq_domain(self, omega, scales, *, derivative=False):
w0 = self.w0
scales = np.atleast_1d(scales)
x = scales[:, None] * omega
H = np.zeros_like(omega)
H[omega > 0] = 1
psifs = 2 * np.exp(-(x - w0)**2 / 2) * H
if derivative:
return 1j * omega * psifs
return psifs
def freq_domain_numba(self, omega, scales, out, *, derivative=False):
return _amor_freq_domain(omega,
scales,
self.w0,
out,
derivative=derivative)
def freq_to_scale(self, freqs):
return self.w0 / freqs
def scale_to_freq(self, scales):
return self.w0 / np.atleast_1d(scales)
def reference_coi(self, *, threshold=1/(np.e**2)):
base_scale = self.w0 / 1
base_coi = reference_coi(self.freq_domain, base_scale)
return base_scale, base_coi
def coi(self, scales, reference_scale, reference_coi, *, fs=1):
return coi(scales, reference_scale, reference_coi, fs=fs)
@property
def admissibility_constant(self):
def c_psi(omega):
return self.freq_domain(omega, 1)[0] / omega
# sometimes have trouble integrating, so we extend bounds
# to include negative frequencies. This adds a negligible
# amount to the final result
return quad(c_psi, -np.inf, np.inf)[0]
@property
def is_analytic(self):
return True
@njit
def _bump_freq_domain(omega, scales, mu, sigma, out, *, derivative=False):
for ii in range(scales.shape[0]):
w = (scales[ii] * omega - mu) / | |
= [0.973, 0.553]
tr_sur_tv = [0.915, 0.797010362, 0.705988294, 0.724, 0.964, 0.970, 0.952, 0.955, 0.539]
admm_sur_tv = [0.957, 0.790725327, 0.683639638, 0.734, 0.979, 0.973, 0.955, 0.918, 0.715]
tr_st_tv = [0, 0.830134194, 0.669190782, 0.987, 0.951]
admm_st_tv = [0, 0.622728045, 0.669190782, 0.865, 0.001]
width = 0.15
# plt.bar(np.array([1 - width * 2, 2 - width * 2, 3 - width * 2, 4 - width, 5 - width, 6 - width, 7 - width,
# 8 - width / 2 * 5, 9 - width / 2 * 5]), grape_sur_tv, alpha=0.9, width=width,
# hatch='/', edgecolor='black', label='GRAPE+SUR+ALB')
# plt.bar(np.array([1 - width, 2 - width, 3 - width, 4, 5, 6, 7,
# 8 - width / 2 * 3, 9 - width / 2 * 3]), tr_sur_tv, alpha=0.9, width=width,
# hatch='\\', edgecolor='black', label='TR+SUR+ALB')
# plt.bar(np.array([1, 2, 3, 4 + width, 5 + width, 6 + width, 7 + width,
# 8 - width / 2, 9 - width / 2]), admm_sur_tv, alpha=0.9, width=width,
# hatch='+', edgecolor='black', label='ADMM+SUR+ALB')
# plt.bar(np.array([8 + width / 2, 9 + width / 2]), pgrape_sur_tv, alpha=0.9, width=width,
# hatch='o', edgecolor='black', label='p-GRAPE+SUR+ALB')
# plt.bar(np.array([1 + width, 2 + width, 3 + width, 8 + width / 2 * 3, 9 + width / 2 * 3]), tr_st_tv, alpha=0.9,
# width=width, hatch='.', edgecolor='black', label='TR+ST')
# plt.bar(np.array([1 + width * 2, 2 + width * 2, 3 + width * 2, 8 + width / 2 * 5, 9 + width / 2 * 5]), admm_st_tv,
# alpha=0.9, width=width, hatch='*', edgecolor='black', label='ADMM+ST')
plt.bar(np.array([1 - width, 2 - width, 3 - width, 4 - width, 5 - width, 6 - width, 7 - width,
8 - width / 2 * 3, 9 - width / 2 * 3]), grape_sur_tv, alpha=0.9, width=width,
hatch='/', edgecolor='black', label='GRAPE+SUR+ALB')
plt.bar(np.array([1, 2, 3, 4, 5, 6, 7, 8 - width / 2, 9 - width / 2]), tr_sur_tv, alpha=0.9, width=width,
hatch='\\', edgecolor='black', label='TR+SUR+ALB')
plt.bar(np.array([1 + width, 2 + width, 3 + width, 4 + width, 5 + width, 6 + width, 7 + width,
8 + width / 2, 9 + width / 2]), admm_sur_tv, alpha=0.9, width=width,
hatch='+', edgecolor='black', label='ADMM+SUR+ALB')
plt.bar(np.array([8 + width / 2 * 3, 9 + width / 2 * 3]), pgrape_sur_tv, alpha=0.9, width=width,
hatch='o', edgecolor='black', label='p-GRAPE+SUR+ALB')
x_loc = plt.MultipleLocator(1)
ax = plt.gca()
ax.xaxis.set_major_locator(x_loc)
plt.xticks(instance, instance_name)
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
# plt.show()
plt.savefig("../figure_paper/Rounding_improve_all_instances_new.png")
def draw_mt_improve():
plt.figure(figsize=(15, 6), dpi=300)
instance = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
instance_name = ["Energy2", "Energy4", "Energy6", "CNOT5", "CNOT10", "CNOT15", "CNOT20", "CircuitH2", "CircuitLiH"]
grape_mt = [0.997, 0.834465772, 0.554418567, 0.805, 0.996, 0.994, 0.999, 0.992, 0.96]
pgrape_mt = [0.755, 0.998]
tr_mt = [0.997, 0.834465772, 0.554418567, 0.804, 0.991, 0.994, 0.999, 0.993, 0.504]
admm_mt = [0.959, 0.81654493, 0.535016987, 0.805, 0.994, 0.999, 0.999, 0.946, 0.645]
tr_st_mt = [0, 0.837013759, 0.621341738, 0.995, 0.593]
admm_st_mt = [0, 0.635722779, 0.621341738, 0.869, 0.001]
width = 0.15
# plt.bar(np.array([1 - width * 2, 2 - width * 2, 3 - width * 2, 4 - width, 5 - width, 6 - width, 7 - width,
# 8 - width / 2 * 5, 9 - width / 2 * 5]), grape_mt, alpha=0.9, width=width,
# hatch='/', edgecolor='black', label='GRAPE+MT+ALB')
# plt.bar(np.array([1 - width, 2 - width, 3 - width, 4, 5, 6, 7,
# 8 - width / 2 * 3, 9 - width / 2 * 3]), tr_mt, alpha=0.9, width=width,
# hatch='\\', edgecolor='black', label='TR+MT+ALB')
# plt.bar(np.array([1, 2, 3, 4 + width, 5 + width, 6 + width, 7 + width,
# 8 - width / 2, 9 - width / 2]), admm_mt, alpha=0.9, width=width,
# hatch='+', edgecolor='black', label='ADMM+MT+ALB')
# plt.bar(np.array([8 + width / 2, 9 + width / 2]), pgrape_mt, alpha=0.9, width=width,
# hatch='o', edgecolor='black', label='p-GRAPE+MT+ALB')
# plt.bar(np.array([1 + width, 2 + width, 3 + width, 8 + width / 2 * 3, 9 + width / 2 * 3]), tr_st_mt, alpha=0.9,
# width=width, hatch='.', edgecolor='black', label='TR+STMT')
# plt.bar(np.array([1 + width * 2, 2 + width * 2, 3 + width * 2, 8 + width / 2 * 5, 9 + width / 2 * 5]), admm_st_mt,
# alpha=0.9, width=width, hatch='*', edgecolor='black', label='ADMM+STMT')
plt.bar(np.array([1 - width, 2 - width, 3 - width, 4 - width, 5 - width, 6 - width, 7 - width,
8 - width / 2 * 3, 9 - width / 2 * 3]), grape_mt, alpha=0.9, width=width,
hatch='/', edgecolor='black', label='GRAPE+MT+ALB')
plt.bar(np.array([1, 2, 3, 4, 5, 6, 7, 8 - width / 2, 9 - width / 2]), tr_mt, alpha=0.9, width=width,
hatch='\\', edgecolor='black', label='TR+MT+ALB')
plt.bar(np.array([1 + width, 2 + width, 3 + width, 4 + width, 5 + width, 6 + width, 7 + width,
8 + width / 2, 9 + width / 2]), admm_mt, alpha=0.9, width=width,
hatch='+', edgecolor='black', label='ADMM+MT+ALB')
plt.bar(np.array([8 + width / 2 * 3, 9 + width / 2 * 3]), pgrape_mt, alpha=0.9, width=width,
hatch='o', edgecolor='black', label='p-GRAPE+MT+ALB')
x_loc = plt.MultipleLocator(1)
ax = plt.gca()
ax.xaxis.set_major_locator(x_loc)
plt.xticks(instance, instance_name)
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
# plt.show()
plt.savefig("../figure_paper/Minup_time_improve_all_instances_new.png")
def draw_ms_improve():
plt.figure(figsize=(15, 6), dpi=300)
instance = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
instance_name = ["Energy2", "Energy4", "Energy6", "CNOT5", "CNOT10", "CNOT15", "CNOT20", "CircuitH2", "CircuitLiH"]
grape_ms = [0.999, 0.857057924, 0.775408293, 0.83, 0.999, 0.999, 0.9990531, 0.997, 0.994]
pgrape_ms = [0.986, 0.835]
tr_ms = [0.998, 0.857482589, 0.773893823, 0.827, 0.999, 0.997, 0.997, 0.997, 0.88]
admm_ms = [0.997, 0.840496008, 0.767835946, 0.828, 0.999, 0.998, 0.9992551, 0.992, 0.979]
width = 0.15
plt.bar(np.array([1 - width, 2 - width, 3 - width, 4 - width, 5 - width, 6 - width, 7 - width,
8 - width / 2 * 3, 9 - width / 2 * 3]), grape_ms, alpha=0.9, width=width,
hatch='/', edgecolor='black', label='GRAPE+MS+ALB')
plt.bar(np.array([1, 2, 3, 4, 5, 6, 7, 8 - width / 2, 9 - width / 2]), tr_ms, alpha=0.9, width=width,
hatch='\\', edgecolor='black', label='TR+MS+ALB')
plt.bar(np.array([1 + width, 2 + width, 3 + width, 4 + width, 5 + width, 6 + width, 7 + width,
8 + width / 2, 9 + width / 2]), admm_ms, alpha=0.9, width=width,
hatch='+', edgecolor='black', label='ADMM+MS+ALB')
plt.bar(np.array([8 + width / 2 * 3, 9 + width / 2 * 3]), pgrape_ms, alpha=0.9, width=width,
hatch='o', edgecolor='black', label='p-GRAPE+MS+ALB')
x_loc = plt.MultipleLocator(1)
ax = plt.gca()
ax.xaxis.set_major_locator(x_loc)
plt.xticks(instance, instance_name)
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
# plt.show()
plt.savefig("../figure_paper/Max_switching_improve_all_instances.png")
def draw_sur():
plt.figure(figsize=(15, 6), dpi=300)
instance = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
instance_name = ["Energy2", "Energy4", "Energy6", "CNOT5", "CNOT10", "CNOT15", "CNOT20", "CircuitH2", "CircuitLiH"]
grape_sur_tv = [0.459, 0.745625955, 0.709303753, 0.670, 0.883, 0.972, 0.950, -0.062, -0.171]
pgrape_sur_tv = [0.941, 0.452]
tr_sur_tv = [0.455, 0.722694072, 0.6995211, 0.428, 0.883, 0.970, 0.951, 0.937, 0.438]
admm_sur_tv = [0.519, 0.662051979, 0.676312881, 0.400, 0.913, 0.969, 0.952, 0.918, 0.715]
tr_st_tv = [0, 0.830134194, 0.669190782, 0.987, 0.951]
admm_st_tv = [0, 0.622728045, 0.669190782, 0.865, 0.001]
width = 0.15
# plt.bar(np.array([1 - width, 2 - width, 3 - width, 4 - width, 5 - width, 6 - width, 7 - width,
# 8 - width / 2 * 3, 9 - width / 2 * 3]), grape_sur_tv, alpha=0.9, width=width,
# hatch='/', edgecolor='black', label='GRAPE+SUR')
# plt.bar(np.array([1, 2, 3, 4, 5, 6, 7, 8 - width / 2, 9 - width / 2]), tr_sur_tv, alpha=0.9, width=width,
# hatch='\\', edgecolor='black', label='TR+SUR')
# plt.bar(np.array([1 + width, 2 + width, 3 + width, 4 + width, 5 + width, 6 + width, 7 + width,
# 8 + width / 2, 9 + width / 2]), admm_sur_tv, alpha=0.9, width=width,
# hatch='+', edgecolor='black', label='ADMM+SUR')
# plt.bar(np.array([8 + width / 2 * 3, 9 + width / 2 * 3]), pgrape_sur_tv, alpha=0.9, width=width,
# hatch='o', edgecolor='black', label='p-GRAPE+SUR')
plt.bar(np.array([1 - width * 2, 2 - width * 2, 3 - width * 2, 4 - width, 5 - width, 6 - width, 7 - width,
8 - width / 2 * 5, 9 - width / 2 * 5]), grape_sur_tv, alpha=0.9, width=width,
hatch='/', edgecolor='black', label='GRAPE+SUR')
plt.bar(np.array([1 - width, 2 - width, 3 - | |
need be
if not os.path.exists(cachedir): # make cachdir if not exist
os.mkdir(cachedir)
if not os.path.isdir(cachedir): # is not a directory
logging.error("%s is not a directory, exiting...", cachedir)
sys.exit(1)
#
# Trigger an update based on whether the interval (tstart,tstop)
# is covered in the existing cached file based on time of last update
# and maxIdleSeconds given when the file was fetched. The attribute
# 'activeSinceDate' is then maxIdleSeconds prior to last update time
#
if os.path.exists(channel_file): # trigger update based on mtime
statinfo = os.stat(channel_file)
mode = statinfo.st_mode
if not stat.S_IWUSR & mode: # not writeable
os.chmod(channel_file, mode | stat.S_IWUSR)
delta = int(time.time() - statinfo.st_mtime)
logging.debug("existing cache age: %d (s)", delta)
chfile = open(channel_file, mode="rb")
xmlstring = chfile.read()
chfile.close()
trending_tz = sites[site]["tz"]
root = etree.fromstring(xmlstring)
active_since = root.attrib.get("activeSinceDate")
if active_since: # parse, convert and compare to tstart
xml_start_epoch = parse_datestr(active_since)
logging.debug(
"%s channels active_since: %s",
channel_file,
datetime.fromtimestamp(xml_start_epoch, gettz(trending_tz)).isoformat(
timespec="seconds"
),
)
# If tstart is inside the interval: [xml_start, last_update]
# then can guarantee desired channels were being published
# and hence are already in the cached file.
if tstart and xml_start_epoch < tstart < statinfo.st_mtime + 43200:
if not tstop or tstop < (statinfo.st_mtime + 86400):
update = False
if update:
logging.info("updating cached channel_file...")
if tstart:
xstart = tstart - 86400 # adjust to 24h earlier
maxidle = int(time.time() - xstart)
else:
maxidle = 3600 * 24 * 7 # give it a week
xstart = int(time.time() - maxidle)
xmlstring = get_all_channels(site, maxidle)
root = etree.fromstring(xmlstring)
# attributes in channel_file are from datachannels tag
# <datachannels
# activeSinceDate="2020-08-21T11:22:35.241-07:00"
# host="172.16.31.10"
# port="8080">
#
active_since = root.attrib.get("activeSinceDate")
if not active_since: # needed until service adds this attrib
active_since_str = datetime.fromtimestamp(
xstart, gettz(trending_tz)
).isoformat(timespec="seconds")
root.set("activeSinceDate", active_since_str)
logging.warning(
"setting activeSinceDate= %s (missing in res)", active_since_str
)
else:
logging.debug("activeSinceDate= %s found", active_since)
if xmlstring:
tree = etree.ElementTree(root)
tree.write(
channel_file,
xml_declaration=True,
encoding="UTF-8",
pretty_print=False,
standalone="yes",
)
else:
logging.debug("returning existing channel_file=%s", channel_file)
return channel_file
def convert_to_seconds(duration_str) -> int:
"""
for (s)econds, (m)inutes, (h)ours, (d)ays, (w)eeks
return duration in seconds
"""
seconds = 0
if re.match(r"[0-9]+$", duration_str):
seconds = int(duration_str)
elif re.match(r"[0-9]+s$", duration_str):
seconds = int(duration_str[:-1])
elif re.match(r"[0-9]+m$", duration_str):
seconds = 60 * int(duration_str[:-1])
elif re.match(r"[0-9]+h$", duration_str):
seconds = 3600 * int(duration_str[:-1])
elif re.match(r"[0-9]+d$", duration_str):
seconds = 86400 * int(duration_str[:-1])
elif re.match(r"[0-9]+w$", duration_str):
seconds = 7 * 86400 * int(duration_str[:-1])
return seconds
def long_substr(data: list) -> str:
"""
https://stackoverflow.com/questions/2892931/\
longest-common-substring-from-more-than-two-strings-python#
"""
substr = ""
if len(data) > 1 and len(data[0]) > 0:
for i in range(len(data[0])):
for j in range(len(data[0]) - i + 1):
if j > len(substr) and all(data[0][i : i + j] in x for x in data):
substr = data[0][i : i + j]
return substr
def query_rest_server(ts1, ts2, data_url, idstr, nbins):
"""get xml from restful interface for the requested channels
with a single request
inputs:
t1, t2 are start/stop time in ms
data_url is "server-url:port/default-path"
idstr is all channel ids as '&id='.join(id for id in oflds)
nbins: request raw (=None) or binned data from DB
output: raw xml response from the service is returned
"""
s = requests.Session()
if nbins is None: # raw data
options = {"t1": int(ts1), "t2": int(ts2), "flavor": "raw", "n": 1}
else: # CCS stat data
options = {"t1": int(ts1), "t2": int(ts2), "flavor": "stat", "n": int(nbins)}
uri = "{}/data/?id={}".format(data_url, idstr)
t_start = time.time()
try:
resp = s.get(uri, params=options)
except requests.ConnectionError as e:
logging.error("ConnectionError: %s", e)
logging.error("check status of ssh tunnel to trending server")
if resp.status_code != 200:
logging.error("invalid response %s from Trending Server", resp.status_code)
return None
# logging.debug('URL=%s', resp.url)
logging.debug("channels: %s", re.sub(r"(id=)?([0-9]+)&*", r"\2 ", idstr))
logging.debug("dt=%.3f seconds", (time.time() - t_start))
s.close()
return resp.content
# def get_unique_time_intervals(optlist):
def get_unique_time_intervals(starts=None, stops=None, intervalarr=None, duration=None):
"""
Input: Command line options defining a set of intervals
using pairs or start/stop with duration,
empty input will return 1 defaut interval
The set of intervals as ordered pairs in seconds are processed
to merge overlapping periods yielding distinct intervals.
Output: A ordered list of non-overlapping periods are returned
as [[t00,t01], [t10,t11], ...,[tn0,tn1]]
"""
intervals = []
if starts:
for start in starts:
(t1, t2) = get_time_interval(start, None, duration)
intervals.append([t1, t2])
elif stops:
for stop in stops:
(t1, t2) = get_time_interval(None, stop, duration)
intervals.append([t1, t2])
elif intervalarr:
for interval in intervalarr:
(t1, t2) = get_time_interval(interval[0], interval[1])
intervals.append([t1, t2])
else:
(t1, t2) = get_time_interval(None, None, duration)
intervals.append([t1, t2])
for interval in intervals:
if interval[0] is None or interval[1] is None:
logging.error("Date assignment failed")
return None
i = 0
for interval in intervals:
logging.debug(
"time interval[%d] (before merge): %d -- %d (%d sec)",
i,
interval[0],
interval[1],
(interval[1] - interval[0]) / 1000,
)
i += 1
# merge overlaps to generate list of distinct intervals
intervals.sort() # sorts so that intervals[i][0] <= intervals[i+1][0]
i = 1
while i < len(intervals): # loop over pairs of intervals
if intervals[i - 1][1] >= intervals[i][0]:
intervals[i][0] = intervals[i - 1][0] # move left edge down
if intervals[i - 1][1] > intervals[i][1]:
intervals[i][1] = intervals[i - 1][1] # move right edge up
del intervals[i - 1] # delete the 1st of the pair
else:
i += 1 # no overlap so move to next pair
i = 0
for interval in intervals:
logging.debug(
"time interval[%d] (after merge): %d -- %d (%d sec)",
i,
interval[0],
interval[1],
(interval[1] - interval[0]) / 1000,
)
i += 1
return intervals
def get_channel_dict(channel_file):
"""
build a channel dictionary for all channels
struction of channel_file is:
0: datachannels [-]
1: datachannel [-]
2: path [-]
3: pathelement [-]
2: id [-]
2: metadata [name, value]
"""
logging.debug("building full channel dictionary from %s", channel_file)
cdict = dict()
tree = etree.parse(channel_file)
root = tree.getroot()
for dchan in root.iterfind("datachannel"):
chid = dchan.find("id").text
# build path
parr = [] # list to hold path elements
pp = dchan.find("path")
for pe in pp.iterfind("pathelement"):
if pe.text: # work-around for problem xxx
parr.append(pe.text)
path = "/".join(parr)
if path and chid: # create entry in dict
cdict[chid] = path
logging.debug("channel dict contains %d active channels", len(cdict))
del tree
return cdict
def parse_channel_sources(sources: list, channel_cache: str) -> tuple:
"""
Convert list of sources to channels to process
The sources list
inputs:
sources is a list of either filenames or regular expressions
channel_cache is the filename where the channel id map is cached
returns:
fields dict in form {id:path}
regexes list of regexes from sources that had matching channels
"""
if not sources:
return None, None
oflds_f = get_chanids_from_files(sources)
if oflds_f:
logging.debug("found %d valid channels from input files", len(oflds_f))
return oflds_f, None
oflds_r, regexes = get_chanids_from_regexes(sources, channel_cache)
if oflds_r:
logging.debug("found valid channels from channel patterns")
return oflds_r, regexes
return None, None
def get_chanids_from_files(filelist: list) -> dict:
"""
Convert list of sources to channels to process
The sources list
inputs:
sources is a list of either filenames or regular expressions
output:
fields dict in form {id:path}
"""
# loop over filelist to define the channels for query/output
# using a file with 4 fields per line (after comment removal) where
# the line format is '\s+id:\s+<chan_id>\s+path:\s+<path>$'
# example: " id: 15176 path: focal-plane/R22/Reb0/Temp3"
#
# channels listed are used to construct a dict() to be returned
# oflds[channel_id_num] = trending_full_path
# eg: oflds[2372] = aliveness-fp/R00/Reb2/RGL
#
# from https://stackoverflow.com/questions/16710076
# regex to split a string preserving quoted fields
#
rpat = re.compile(
r"""
(?:[^\s"']+)| # match non-delimiter
"(?:\\.|[^"]*)"| # match double quoted
'(?:\\.|[^']*)' # match single quoted
""",
re.X,
)
oflds = dict()
for csource in filelist:
logging.debug("channel_source= %s", csource)
# test to determine type of channel_source
#
logging.debug("test for formatted input file...")
try:
cf = open(csource, mode="r")
except OSError as e:
logging.debug("open(%s) failed: %s", csource, e)
return None
else:
# populate oflds[id] with the corresponding channel path
for line in cf:
if re.match(r"^\s*#", line): # skip block comment
continue
if re.match(r"^\s*$", line): # skip white space line
continue
# strip inline cmnt
sline = re.sub(r"""(#[^\'^"]*$)""", "", line)
| |
* wt
residual = (mtb - tb) / tb_err
return residual
class RegionSelector:
# def set_errorobj(self, xout, yout, errobj, yerr):
# eospec, dummy, (errbar_eospec,) = errobj
# eospec.set_data(xout, yout)
# if yerr is not None:
# yerr_top = yout + yerr
# yerr_bot = yout - yerr
# new_segments_y = [np.array([[x, yt], [x, yb]]) for x, yt, yb in zip(xout, yerr_top, yerr_bot)]
# errbar_eospec.set_segments(new_segments_y)
# return 1
def subdata(self, xs, ys, rfile):
rmap, rdata, rheader, ndim, npol_fits, stokaxis, rfreqs, rdelts = ndfits.read(rfile)
ny, nx = rmap.data.shape
tr_coord = rmap.top_right_coord
bl_coord = rmap.bottom_left_coord
x0 = bl_coord.Tx.to(u.arcsec).value
y0 = bl_coord.Ty.to(u.arcsec).value
x1 = tr_coord.Tx.to(u.arcsec).value
y1 = tr_coord.Ty.to(u.arcsec).value
dx = rmap.scale.axis1.to(u.arcsec / u.pix).value
dy = rmap.scale.axis2.to(u.arcsec / u.pix).value
mapx, mapy = np.linspace(x0, x1, nx) - dx / 2.0, np.linspace(y0, y1, ny) - dy / 2.0
xsmin = np.nanmin(xs)
xsmax = np.nanmax(xs)
ysmin = np.nanmin(ys)
ysmax = np.nanmax(ys)
if np.abs(xsmax - xsmin) < dx:
xsmax = xsmin + dx
if np.abs(ysmax - ysmin) < dy:
ysmax = ysmin + dy
xmask = np.logical_and(mapx >= xsmin, mapx <= xsmax)
nxnew = np.count_nonzero(xmask)
ymask = np.logical_and(mapy >= ysmin, mapy <= ysmax)
nynew = np.count_nonzero(ymask)
xmask = np.tile(xmask, ny).reshape(ny, nx)
ymask = np.tile(ymask, nx).reshape(nx, ny).transpose()
mask = xmask & ymask
# print(np.count_nonzero(mask))
self.npix = np.count_nonzero(mask)
self.area = self.npix * dx * dy
data = rdata[:, mask]
# print(rdata[:, :, mask])
# print(mask.shape, rdata.shape, data.shape)
data = np.squeeze(data)
# print(data.shape)
return data
def __init__(self, clkpnts, boxlines, eofiles, errobjs, cfreqs=None, rms=None, eofile_ref=None, errobj_ref=None,
wTmap=None, outspec_ff=None, scatter_gsfit=None,
get_peak=False, get_sum=False):
self.boxline = []
self.clkpnt = []
self.xs = list(clkpnts[0].get_xdata())
self.ys = list(clkpnts[0].get_ydata())
self.npix = None
self.area = None
self.xout = []
self.yout = []
self.xouterr = []
self.youterr = []
for errobj in errobjs:
eospec, dummy, (errbar_eospec,) = errobj
self.xout.append(eospec.get_xdata())
self.yout.append(eospec.get_ydata())
self.errobjs = errobjs
self.errobj_ref = errobj_ref
self.outspec_ff = outspec_ff
self.scatter_gsfit = scatter_gsfit
self.cfreqs = cfreqs
self.rms = rms
self.eofiles = eofiles
self.eofile_ref = eofile_ref
self.wTmap = wTmap
self.wT = None
self.em = None
self.get_peak = get_peak
self.get_sum = get_sum
self.tps = []
self.params = None
for idx, s in enumerate(clkpnts):
self.boxline.append(boxlines[idx])
self.clkpnt.append(s)
self.cid = s.figure.canvas.mpl_connect('button_press_event', self)
def __call__(self, event):
axes = [clkpnt.axes for clkpnt in self.clkpnt]
if self.clkpnt[0].figure.canvas.toolbar.mode == '':
if event.inaxes not in axes:
return
nxs = len(self.xs)
if event.button == 1:
if nxs < 2:
self.xs.append(event.xdata)
self.ys.append(event.ydata)
else:
self.xs = [event.xdata]
self.ys = [event.ydata]
elif event.button == 3:
if len(self.xs) > 0:
self.xs.pop()
self.ys.pop()
self.get_flux()
def get_flux(self):
if len(self.xs) > 0:
xs = np.array(self.xs, dtype=np.float64)
ys = np.array(self.ys, dtype=np.float64)
for clkpnt in self.clkpnt:
clkpnt.set_data(xs, ys)
else:
for clkpnt in self.clkpnt:
clkpnt.set_data([], [])
nxs = len(self.xs)
if nxs <= 1:
for line in self.boxline:
line.set_data([], [])
elif nxs == 2:
datas = []
# eofile = self.eofiles[0]
# rmap, rdata, rheader, ndim, npol_fits, stokaxis, rfreqs, rdelts = ndfits.read(eofile)
# data = self.subdata(xs, ys, eofile)
# datas.append(data)
for tidx, eofile in enumerate(self.eofiles):
data = self.subdata(xs, ys, eofile)
datas.append(data)
if self.eofile_ref is not None:
data_ref = self.subdata(xs, ys, self.eofile_ref)
if self.wTmap is not None:
datawT = self.subdata(xs, ys, self.wTmap)
if self.get_peak:
youts_outspec = []
for data in datas:
if data.ndim > 1:
youts_outspec.append(np.nanmax(data, axis=-1) / 1e6)
else:
youts_outspec.append(data / 1e6)
if self.eofile_ref is not None:
youts_outspec_ref = np.nanmax(data_ref[0, dd, :, :]) / 1e6
else:
youts_outspec = []
for data in datas:
if data.ndim > 1:
youts_outspec.append(np.nanmean(data, axis=-1) / 1e6)
else:
youts_outspec.append(data / 1e6)
if self.eofile_ref is not None:
if data.ndim > 1:
youts_outspec_ref = np.nanmean(data_ref, axis=-1) / 1e6
else:
youts_outspec_ref = data_ref / 1e6
self.tps = []
for data in datas:
if data.ndim > 1:
self.tps.append(np.nansum(data, axis=-1) / 1e6)
else:
self.tps.append(data / 1e6)
xout = self.cfreqs
for tidx, errobj in enumerate(self.errobjs):
set_errorobj(xout, youts_outspec[tidx], errobj, self.rms)
if self.eofile_ref is not None:
set_errorobj(xout, youts_outspec_ref, self.errobj_ref, self.rms)
if self.wTmap is not None:
print(datawT.shape)
wT = np.nanmean(datawT[..., 1]) * 1e6
em = np.nanmean(datawT[..., 0])
arcsec2cm = (self.wTmap[0].rsun_meters / self.wTmap[0].rsun_obs).to(u.cm / u.arcsec).value
# nele = 4.0e10
# depth = em / nele ** 2 / arcsec2cm
# print('Temperature: {:.1f} MK, EM: {:.2e} cm-5, depth: {:.1f} arcsec if nele is {:.2e} cm-3'.format(wT / 1e6, em, depth, nele))
depth = 20. ## arcsec
nele = np.sqrt(em / (depth * arcsec2cm))
print('Temperature: {:.1f} MK, EM: {:.2e} cm-5, nele: {:.2e} cm-3 if depth is {:.1f} arcsec'.format(
wT / 1e6, em, nele, depth))
self.wT = wT
self.em = em
yout_ff = np.array([ff_emission(em, T=wT, Z=1., mu=ll) for ll in xout * 1e9]) / 1.e6
self.outspec_ff.set_data(xout, yout_ff)
self.errobjs[0][0].figure.canvas.draw_idle()
for line in self.boxline:
line.set_data([xs[0], xs[1], xs[1], xs[0], xs[0]], [ys[0], ys[0], ys[1], ys[1], ys[0]])
clkpnt.figure.canvas.draw_idle()
class GStool:
# def get_showaia(self):
# return self._showaia
#
# def set_showaia(self, value):
# self._showaia = value
#
# showaia = property(fget=get_showaia, fset=set_showaia, doc="`Boolean`-like: Display AIA image or not")
def __init__(self, eofiles, aiafile=None, xycen=None, fov=None, freqghz_bound=[-1, 100], calpha=0.5,
clevels=np.array([0.3, 1.0]), opencontour=None):
self.aiafile = aiafile
self.eofiles = eofiles
self.xycen = xycen
self.fov = fov
self.calpha = calpha
self.clevels = clevels
self.freqghz_bound = freqghz_bound
self.opencontour = opencontour
self._showaia = False
rmap, rdata, rheader, ndim, npol_fits, stokaxis, rfreqs, rdelts = ndfits.read(eofiles[0])
self.bdinfo = bdinfo = ndfits.get_bdinfo(rfreqs, rdelts)
self.cfreqs = cfreqs = bdinfo['cfreqs']
self.cfreqs_all = cfreqs_all = bdinfo['cfreqs_all']
self.freq_dist = lambda fq: (fq - cfreqs_all[0]) / (cfreqs_all[-1] - cfreqs_all[0])
self.ntim = ntim = len(eofiles)
self.xlim = xlim = xycen[0] + np.array([-1, 1]) * 0.5 * fov[0]
self.ylim = ylim = xycen[1] + np.array([-1, 1]) * 0.5 * fov[1]
nspw = len(rfreqs)
eodate = Time(rmap.date.mjd + rmap.exposure_time.value / 2. / 24 / 3600, format='mjd')
ny, nx = rmap.data.shape
x0, x1 = (np.array([1, rmap.meta['NAXIS1']]) - rmap.meta['CRPIX1']) * rmap.meta['CDELT1'] + \
rmap.meta['CRVAL1']
y0, y1 = (np.array([1, rmap.meta['NAXIS2']]) - rmap.meta['CRPIX2']) * rmap.meta['CDELT2'] + \
rmap.meta['CRVAL2']
dx = rmap.meta['CDELT1']
dy = rmap.meta['CDELT2']
mapx, mapy = np.linspace(x0, x1, nx), np.linspace(y0, y1, ny)
fig = plt.figure(figsize=(15, 6))
self.fig = fig
grids = fig.add_gridspec(ncols=3, nrows=1, width_ratios=[1, 1, 0.6])
self.grids = grids
axs = []
axs.append(fig.add_subplot(grids[0, 0]))
axs.append(fig.add_subplot(grids[0, 1], sharex=axs[-1], sharey=axs[-1]))
axs.append(fig.add_subplot(grids[0, 2]))
if aiafile:
if os.path.exists(aiafile):
try:
aiacmap = plt.get_cmap('gray_r')
aiamap = smap.Map(aiafile)
ax = axs[0]
aiamap.plot(axes=ax, cmap=aiacmap)
ax = axs[1]
aiamap.plot(axes=ax, cmap=aiacmap)
self._showaia = True
except:
self._showaia = False
if self._showaia:
if self.opencontour is None:
self.opencontour = False
else:
if self.opencontour is None:
self.opencontour = True
## Plot EOVSA images as filled contour on top of the AIA image
icmap = plt.get_cmap('RdYlBu')
cts = []
## color map for spectra from the image series
tcmap = plt.get_cmap('turbo')
for s, sp in enumerate(rfreqs):
data = rdata[s, ...]
clvls = clevels * np.nanmax(data)
rcmap = [icmap(self.freq_dist(self.cfreqs[s]))] * len(clvls)
if self.opencontour:
cts.append(ax.contour(mapx, mapy, data, levels=clvls,
colors=rcmap,
alpha=calpha))
else:
cts.append(ax.contourf(mapx, mapy, data, levels=clvls,
colors=rcmap,
alpha=calpha))
ax.set_xlim(self.xlim)
ax.set_ylim(self.ylim)
for ax in axs[:2]:
ax.set_xlabel('Solar-X [arcsec]')
ax.set_ylabel('Solar-y [arcsec]')
ax.set_title('')
ax.text(0.02, 0.01,
' '.join(['AIA {:.0f} Å'.format(aiamap.wavelength.value),
aiamap.date.datetime.strftime('%Y-%m-%dT%H:%M:%S')]),
ha='left',
va='bottom',
color='k', transform=ax.transAxes)
ax.text(0.02, 0.05, ' '.join(['EOVSA ', eodate.datetime.strftime('%Y-%m-%dT%H:%M:%S')]), ha='left',
va='bottom',
color='k', transform=ax.transAxes)
divider = make_axes_locatable(axs[0])
cax = divider.append_axes("right", size="8%", pad=0.08)
cax.set_visible(False)
divider = make_axes_locatable(axs[1])
cax = divider.append_axes("right", size="8%", pad=0.08)
ticks, bounds, vmax, vmin, freqmask = ql.get_colorbar_params(bdinfo)
cb = colorbar.ColorbarBase(cax, norm=colors.Normalize(vmin=vmin, vmax=vmax), cmap=icmap,
orientation='vertical', boundaries=bounds, spacing='proportional',
ticks=ticks, format='%4.1f', alpha=calpha)
for fbd_lo, fbd_hi in freqmask:
if fbd_hi is not None:
cax.axhspan(fbd_lo, fbd_hi, hatch='//', edgecolor='k', facecolor='#BBBBBB')
plt.text(0.5, 1.05, 'MW', ha='center', va='bottom', transform=cax.transAxes, color='k', fontweight='normal')
plt.text(0.5, 1.01, '[GHz]', ha='center', va='bottom', transform=cax.transAxes, color='k',
fontweight='normal')
cax.xaxis.set_visible(False)
cax.tick_params(axis="y", pad=-20., length=0, colors='k', labelsize=7)
cax.axhline(vmin, xmin=1.0, xmax=1.2, color='k', clip_on=False)
cax.axhline(vmax, xmin=1.0, xmax=1.2, color='k', clip_on=False)
cax.text(1.25, 0.0, '{:.1f}'.format(vmin), fontsize=9, transform=cax.transAxes, va='center', ha='left')
cax.text(1.25, 1.0, '{:.1f}'.format(vmax), fontsize=9, transform=cax.transAxes, va='center', ha='left')
boxlines = []
clkpnts = []
for idx, ax in enumerate(axs[:2]):
if idx == 0:
c = 'g'
elif idx == 1:
c = 'b'
else:
c = 'k'
line, = ax.plot([], [], '-', c=c, alpha=1.0) # empty line
boxlines.append(line)
clkpnt, = ax.plot([], [], '+', c='white', alpha=0.7) # empty line
clkpnts.append(clkpnt)
if ntim < 2:
cplts = ['k']
else:
cplts = tcmap(np.linspace(0, 1, ntim))
self.cplts = cplts
self.ax_eospec = axs[-1]
errobjs = initspecplot(self.ax_eospec, cplts)
grids.tight_layout(fig)
self.region = RegionSelector(clkpnts, boxlines, eofiles, errobjs, cfreqs=cfreqs, rms=None, wTmap=None)
| |
# by amounra 0714 : http://www.aumhaa.com
from __future__ import with_statement
import Live
import math
import sys
#from re import *
import re
from itertools import imap, chain, starmap
""" _Framework files """
from _Framework.ButtonElement import ButtonElement
from _Framework.ButtonMatrixElement import ButtonMatrixElement
from _Framework.ChannelStripComponent import ChannelStripComponent
from _Framework.ClipSlotComponent import ClipSlotComponent
from _Framework.CompoundComponent import CompoundComponent
from _Framework.ControlElement import ControlElement, ControlElementClient
from _Framework.ControlSurface import ControlSurface
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
from _Framework.DisplayDataSource import DisplayDataSource
from _Framework.DeviceComponent import DeviceComponent
from _Framework.EncoderElement import EncoderElement
from _Framework.InputControlElement import *
from _Framework.ModeSelectorComponent import ModeSelectorComponent
from _Framework.NotifyingControlElement import NotifyingControlElement
from _Framework.SceneComponent import SceneComponent
from _Framework.SessionComponent import SessionComponent
from _Framework.SessionRecordingComponent import SessionRecordingComponent as BaseSessionRecordingComponent
from _Framework.SliderElement import SliderElement
from _Framework.TransportComponent import TransportComponent
from _Framework.PhysicalDisplayElement import *
from _Framework.SubjectSlot import subject_slot, subject_slot_group
from _Framework.Layer import Layer
from _Framework.Skin import Skin
from _Framework.M4LInterfaceComponent import M4LInterfaceComponent
from _Framework.ComboElement import ComboElement, DoublePressElement, MultiElement, DoublePressContext
from _Framework.ModesComponent import Mode, DisableMode, EnablingModesComponent, DelayMode, AddLayerMode, LayerMode, MultiEntryMode, ModesComponent, SetAttributeMode, ModeButtonBehaviour, CancellableBehaviour, AlternativeBehaviour, ReenterBehaviour, DynamicBehaviourMixin, ExcludingBehaviourMixin, ImmediateBehaviour, LatchingBehaviour, ModeButtonBehaviour
from _Framework.ClipCreator import ClipCreator
from _Framework.Resource import PrioritizedResource
from _Framework.Util import mixin
from _Framework.ViewControlComponent import ViewControlComponent
"""Custom files, overrides, and files from other scripts"""
from _Mono_Framework.MonoBridgeElement import MonoBridgeElement
from _Mono_Framework.MonoDeviceComponent import MonoDeviceComponent
from _Mono_Framework.MonoButtonElement import *
from _Mono_Framework.MonoEncoderElement import MonoEncoderElement
from _Mono_Framework.MonoMixerComponent import MixerComponent
from _Mono_Framework.DeviceNavigator import DeviceNavigator
from _Mono_Framework.TranslationComponent import TranslationComponent
from _Mono_Framework.LividUtilities import LividSettings
from _Mono_Framework.MonoModes import SendLividSysexMode, DisplayMessageMode
from _Mono_Framework.Debug import *
from _Mono_Framework._deprecated.AutoArmComponent import AutoArmComponent
from Map import *
import _Mono_Framework.modRemixNet as RemixNet
import _Mono_Framework.modOSC
#from Push.DrumGroupComponent import DrumGroupComponent
#from Push.StepSeqComponent import StepSeqComponent
#from Push.PlayheadElement import PlayheadElement
#from Push.PlayheadComponent import PlayheadComponent
#from Push.GridResolution import GridResolution
#from Push.ConfigurableButtonElement import ConfigurableButtonElement
#from Push.LoopSelectorComponent import LoopSelectorComponent
#from Push.Actions import CreateInstrumentTrackComponent, CreateDefaultTrackComponent, CaptureAndInsertSceneComponent, DuplicateDetailClipComponent, DuplicateLoopComponent, SelectComponent, DeleteComponent, DeleteSelectedClipComponent, DeleteSelectedSceneComponent, CreateDeviceComponent
#from Push.SkinDefault import make_default_skin
ENCODER_SPEED = [0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 0, 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0, 24, 0, 127, 1, 26, 0, 127, 1, 127, 1]
MIDI_NOTE_TYPE = 0
MIDI_CC_TYPE = 1
MIDI_PB_TYPE = 2
MIDI_MSG_TYPES = (MIDI_NOTE_TYPE, MIDI_CC_TYPE, MIDI_PB_TYPE)
MIDI_NOTE_ON_STATUS = 144
MIDI_NOTE_OFF_STATUS = 128
MIDI_CC_STATUS = 176
MIDI_PB_STATUS = 224
def is_device(device):
return (not device is None and isinstance(device, Live.Device.Device) and hasattr(device, 'name'))
def make_pad_translations(chan):
return tuple((x%4, int(x/4), x+16, chan) for x in range(16))
def return_empty():
return []
debug = initialize_debug()
class SessionRecordingComponent(BaseSessionRecordingComponent):
def __init__(self, *a, **k):
super(SessionRecordingComponent, self).__init__(*a, **k)
self._automation_toggle.view_transform = lambda x: 'Recorder.AutomationOn' if x else 'Recorder.AutomationOff'
def set_record_button(self, button):
button and button.set_on_off_values('Recorder.RecordOn', 'Recorder.RecordOff')
super(SessionRecordingComponent, self).set_record_button(button)
class DS1TransportComponent(TransportComponent):
def set_record_button(self, button, *a, **k):
button and button.set_on_off_values('Transport.RecordOn', 'Transport.RecordOff')
super(DS1TransportComponent, self).set_record_button(button, *a, **k)
def set_play_button(self, button, *a, **k):
button and button.set_on_off_values('Transport.PlayOn', 'Transport.PlayOff')
super(DS1TransportComponent, self).set_play_button(button, *a, **k)
def set_stop_button(self, button, *a, **k):
button and button.set_on_off_values('Transport.StopOn', 'Transport.StopOn')
super(DS1TransportComponent, self).set_stop_button(button, *a, **k)
def set_seek_backward_button(self, button, *a, **k):
button and button.set_on_off_values('Transport.SeekBackwardOn', 'Transport.SeekBackwardOff')
super(DS1TransportComponent, self).set_seek_backward_button(button, *a, **k)
def set_loop_button(self, button, *a, **k):
button and button.set_on_off_values('Transport.LoopOn', 'Transport.LoopOff')
super(DS1TransportComponent, self).set_loop_button(button, *a, **k)
class DS1SessionComponent(SessionComponent):
def set_track_select_dial(self, dial):
self._on_track_select_dial_value.subject = dial
@subject_slot('value')
def _on_track_select_dial_value(self, value):
debug('_on_track_select_dial_value', value)
if value > 64:
self._bank_left()
else:
self._bank_right()
class ToggleModeBehaviour(ModeButtonBehaviour):
def press_immediate(self, component, mode):
debug('selected_mode:', component.selected_mode, 'mode:', mode,)
self.cycle_mode(-1)
class ToggledModesComponent(ModesComponent):
@subject_slot('value')
def _on_toggle_value(self, value):
#debug('mode is:', self.selected_mode)
if value:
self.cycle_mode(-1)
self._on_toggle_value.subject and self._on_toggle_value.subject.set_light('ModeButtons.'+self.selected_mode)
class DS1(ControlSurface):
__module__ = __name__
__doc__ = " DS1 controller script "
def __init__(self, c_instance):
super(DS1, self).__init__(c_instance)
self._connected = False
self._host_name = 'DS1'
self.oscServer = None
self._rgb = 0
self._timer = 0
self.flash_status = 1
self._touched = 0
self._update_linked_device_selection = None
self._skin = Skin(DS1Colors)
with self.component_guard():
self._setup_monobridge()
self._setup_controls()
self._setup_m4l_interface()
self._define_sysex()
self._initialize_hardware()
self._setup_mixer_control()
self._setup_session_control()
self._setup_transport_control()
self._setup_device_control()
self._setup_session_recording_component()
#self._setup_translations()
self._setup_main_modes()
#self._device.add_device_listener(self._on_new_device_set)
self.log_message("<<<<<<<<<<<<<<<<<= DS1 log opened =>>>>>>>>>>>>>>>>>>>>>")
#self.schedule_message(3, self._initialize_hardware)
"""script initialization methods"""
def _initialize_hardware(self):
self.local_control_off.enter_mode()
self.encoder_absolute_mode.enter_mode()
self.encoder_speed_sysex.enter_mode()
def _check_connection(self):
if not self._connected:
self._send_midi(QUERYSURFACE)
self.schedule_message(100, self._check_connection)
def _setup_monobridge(self):
self._monobridge = MonoBridgeElement(self)
self._monobridge.name = 'MonoBridge'
def _setup_controls(self):
is_momentary = True
self._fader = [MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, DS1_FADERS[index], Live.MidiMap.MapMode.absolute, 'Fader_' + str(index), index, self) for index in range(8)]
for fader in self._fader:
fader._mapping_feedback_delay = -1
self._dial = [[MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, DS1_DIALS[x][y], Live.MidiMap.MapMode.absolute, 'Dial_' + str(x) + '_' + str(y), x + (y*5), self) for x in range(8)] for y in range(5)]
for row in self._dial:
for dial in row:
dial._mapping_feedback_delay = -1
self._side_dial = [MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, DS1_SIDE_DIALS[x], Live.MidiMap.MapMode.absolute, 'Side_Dial_' + str(x), x, self) for x in range(4)]
for dial in self._side_dial:
dial._mapping_feedback_delay = -1
self._encoder = [MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, DS1_ENCODERS[x], Live.MidiMap.MapMode.absolute, 'Encoder_' + str(x), x, self) for x in range(4)]
for encoder in self._encoder:
encoder._mapping_feedback_delay = -1
self._encoder_button = [MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, DS1_ENCODER_BUTTONS[index], 'EncoderButton_' + str(index), self, skin = self._skin) for index in range(4)]
self._master_fader = MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, DS1_MASTER, Live.MidiMap.MapMode.absolute, 'MasterFader', 0, self)
self._button = [MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, DS1_BUTTONS[index], 'Button_' + str(index), self, skin = self._skin) for index in range(16)]
self._grid = [[MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, DS1_GRID[x][y], 'Button_' + str(x) + '_' + str(y), self, skin = self._skin) for x in range(3)] for y in range(3)]
self._dummy = [MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, 120+x, Live.MidiMap.MapMode.absolute, 'Dummy_Dial_' + str(x), x, self) for x in range(5)]
self._fader_matrix = ButtonMatrixElement(name = 'FaderMatrix', rows = [self._fader])
self._top_buttons = ButtonMatrixElement(name = 'TopButtonMatrix', rows = [self._button[:8]])
self._bottom_buttons = ButtonMatrixElement(name = 'BottomButtonMatrix', rows = [self._button[8:]])
self._dial_matrix = ButtonMatrixElement(name = 'DialMatrix', rows = self._dial)
self._side_dial_matrix = ButtonMatrixElement(name = 'SideDialMatrix', rows = [self._side_dial])
self._encoder_matrix = ButtonMatrixElement(name = 'EncoderMatrix', rows = [self._encoder])
self._encoder_button_matrix = ButtonMatrixElement(name = 'EncoderButtonMatrix', rows = [self._encoder_button])
self._grid_matrix = ButtonMatrixElement(name = 'GridMatrix', rows = self._grid)
self._selected_parameter_controls = ButtonMatrixElement(name = 'SelectedParameterControls', rows = [self._dummy + self._encoder[:1] + self._encoder[2:]])
def _define_sysex(self):
self._livid_settings = LividSettings(model = 16, control_surface = self)
self.encoder_speed_sysex = SendLividSysexMode(livid_settings = self._livid_settings, call = 'set_encoder_mapping', message = ENCODER_SPEED)
self.encoder_absolute_mode = SendLividSysexMode(livid_settings = self._livid_settings, call = 'set_encoder_encosion_mode', message = [2])
self.local_control_off = SendLividSysexMode(livid_settings = self._livid_settings, call = 'set_local_control', message = [0])
self.main_mode_message = DisplayMessageMode(self, 'Mute/Solo Mode')
self.select_mode_message = DisplayMessageMode(self, 'Arm/Select Mode')
self.clip_mode_message = DisplayMessageMode(self, 'Launch/Stop Mode')
def _setup_autoarm(self):
self._auto_arm = AutoArmComponent(name='Auto_Arm')
self._auto_arm.can_auto_arm_track = self._can_auto_arm_track
def _setup_mixer_control(self):
self._num_tracks = (8)
self._mixer = MixerComponent(num_tracks = 8, num_returns = 4, invert_mute_feedback = True, auto_name = True)
self._mixer.name = 'Mixer'
self._mixer.set_track_offset(0)
self._mixer.master_strip().set_volume_control(self._master_fader)
self._mixer.set_prehear_volume_control(self._side_dial[3])
self._mixer.layer = Layer(volume_controls = self._fader_matrix, track_select_dial = self._encoder[1])
self._strip = [self._mixer.channel_strip(index) for index in range(8)]
for index in range(8):
self._strip[index].layer = Layer(parameter_controls = self._dial_matrix.submatrix[index:index+1, :])
self._mixer.selected_strip().layer = Layer(parameter_controls = self._selected_parameter_controls)
self._mixer.master_strip().layer = Layer(parameter_controls = self._side_dial_matrix.submatrix[:3, :])
self._mixer.main_layer = AddLayerMode(self._mixer, Layer(solo_buttons = self._bottom_buttons, mute_buttons = self._top_buttons))
self._mixer.select_layer = AddLayerMode(self._mixer, Layer(arm_buttons = self._bottom_buttons, track_select_buttons = self._top_buttons))
self.song().view.selected_track = self._mixer.channel_strip(0)._track
self._mixer.set_enabled(True)
def _setup_session_control(self):
self._session = DS1SessionComponent(num_tracks = 8, num_scenes = 1, auto_name = True, enable_skinning = True)
self._session.set_offsets(0, 0)
self._session.set_mixer(self._mixer)
self._session.layer = Layer(track_select_dial = ComboElement(self._encoder[1], modifiers = [self._encoder_button[1]]), scene_bank_up_button = self._grid[0][1], scene_bank_down_button = self._grid[0][2], scene_launch_buttons = self._grid_matrix.submatrix[1:2, 1:2])
self._session.clips_layer = AddLayerMode(self._session, Layer(clip_launch_buttons = self._top_buttons, stop_track_clip_buttons = self._bottom_buttons))
self.set_highlighting_session_component(self._session)
self._session._do_show_highlight()
def _setup_transport_control(self):
self._transport = DS1TransportComponent()
self._transport.name = 'Transport'
self._transport.layer = Layer(stop_button = self._grid[1][0], play_button = self._grid[0][0], record_button = self._grid[2][0],)
self._transport.set_enabled(True)
def _setup_device_control(self):
self._device = DeviceComponent()
self._device.name = 'Device_Component'
self.set_device_component(self._device)
self._device_navigator = DeviceNavigator(self._device, self._mixer, self)
self._device_navigator.name = 'Device_Navigator'
#self._device_selection_follows_track_selection = FOLLOW
self._device.device_name_data_source().set_update_callback(self._on_device_name_changed)
def _setup_session_recording_component(self):
self._clip_creator = ClipCreator()
self._clip_creator.name = 'ClipCreator'
self._recorder = SessionRecordingComponent(self._clip_creator, ViewControlComponent())
self._recorder.set_enabled(True)
self._recorder.layer = Layer(automation_button = self._grid[1][2], record_button = self._grid[2][1],)
def _setup_m4l_interface(self):
self._m4l_interface = M4LInterfaceComponent(controls=self.controls, component_guard=self.component_guard, priority = 10)
self._m4l_interface.name = "M4LInterface"
self.get_control_names = self._m4l_interface.get_control_names
self.get_control = self._m4l_interface.get_control
self.grab_control = self._m4l_interface.grab_control
self.release_control = self._m4l_interface.release_control
def _setup_translations(self):
controls = []
for control in self.controls:
controls.append(control)
self._translations = TranslationComponent(controls, 10)
self._translations.name = 'TranslationComponent'
self._translations.set_enabled(False)
def _setup_OSC_layer(self):
self._OSC_id = 0
if hasattr(__builtins__, 'control_surfaces') or (isinstance(__builtins__, dict) and 'control_surfaces' in __builtins__.keys()):
for cs in __builtins__['control_surfaces']:
if cs is self:
break
elif isinstance(cs, DS1):
self._OSC_id += 1
self._prefix = '/Live/DS1/'+str(self._OSC_id)
self._outPrt = OSC_OUTPORT
if not self.oscServer is None:
self.oscServer.shutdown()
self.oscServer = RemixNet.OSCServer('localhost', self._outPrt, 'localhost', 10001)
def _setup_main_modes(self):
self._main_modes = ToggledModesComponent(name = 'MainModes')
self._main_modes.add_mode('Main', [self._mixer.main_layer, self.main_mode_message],)
self._main_modes.add_mode('Select', [self._mixer.select_layer, self.select_mode_message],)
self._main_modes.add_mode('Clips', [self._session.clips_layer, self.clip_mode_message],)
self._main_modes.layer = Layer(priority = 4, toggle_button = self._grid[2][2])
self._main_modes.set_enabled(True)
self._main_modes.selected_mode = 'Main'
def _notify_descriptors(self):
if OSC_TRANSMIT:
for pad in self._pad:
self.oscServer.sendOSC(self._prefix+'/'+pad.name+'/lcd_name/', str(self.generate_strip_string(pad._descriptor)))
for touchpad in self._touchpad:
self.oscServer.sendOSC(self._prefix+'/'+touchpad.name+'/lcd_name/', str(self.generate_strip_string(touchpad._descriptor)))
for button in self._button:
self.oscServer.sendOSC(self._prefix+'/'+button.name+'/lcd_name/', str(self.generate_strip_string(button._descriptor)))
def _get_devices(self, track):
def dig(container_device):
contained_devices = []
if container_device.can_have_chains:
for chain in container_device.chains:
for chain_device in chain.devices:
for item in dig(chain_device):
contained_devices.append(item)
else:
contained_devices.append(container_device)
return contained_devices
devices = []
for device in track.devices:
for item in dig(device):
devices.append(item)
#self.log_message('appending ' + str(item))
return devices
"""called on timer"""
def update_display(self):
super(DS1, self).update_display()
self._timer = (self._timer + 1) % 256
self.flash()
def flash(self):
if(self.flash_status > 0):
for control in self.controls:
if isinstance(control, MonoButtonElement):
control.flash(self._timer)
"""m4l bridge"""
def _on_device_name_changed(self):
name = self._device.device_name_data_source().display_string()
self._monobridge._send('Device_Name', 'lcd_name', str(self.generate_strip_string('Device')))
self._monobridge._send('Device_Name', 'lcd_value', str(self.generate_strip_string(name)))
self.touched()
if OSC_TRANSMIT:
self.oscServer.sendOSC(self._prefix+'/glob/device/', str(self.generate_strip_string(name)))
def _on_device_bank_changed(self):
name = 'No Bank'
if is_device(self._device._device):
name, _ = self._device._current_bank_details()
self._monobridge._send('Device_Bank', 'lcd_name', str(self.generate_strip_string('Bank')))
self._monobridge._send('Device_Bank', 'lcd_value', str(self.generate_strip_string(name)))
self.touched()
def _on_device_chain_changed(self):
name = " "
if is_device(self._device._device) and self._device._device.canonical_parent and isinstance(self._device._device.canonical_parent, Live.Chain.Chain):
name = self._device._device.canonical_parent.name
self._monobridge._send('Device_Chain', 'lcd_name', str(self.generate_strip_string('Chain')))
self._monobridge._send('Device_Chain', 'lcd_value', str(self.generate_strip_string(name)))
self.touched()
def generate_strip_string(self, display_string):
NUM_CHARS_PER_DISPLAY_STRIP = 12
if (not display_string):
return (' ' * NUM_CHARS_PER_DISPLAY_STRIP)
else:
display_string = str(display_string)
if ((len(display_string.strip()) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)) and (display_string.endswith('dB') and (display_string.find('.') != -1))):
display_string = display_string[:-2]
if (len(display_string) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)):
for um in [' ',
'i',
'o',
'u',
'e',
'a']:
while ((len(display_string) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)) and (display_string.rfind(um, 1) != -1)):
um_pos = display_string.rfind(um, 1)
display_string = (display_string[:um_pos] + display_string[(um_pos + 1):])
else:
display_string = display_string.center((NUM_CHARS_PER_DISPLAY_STRIP - 1))
ret = u''
for i in range((NUM_CHARS_PER_DISPLAY_STRIP - 1)):
if ((ord(display_string[i]) > 127) or (ord(display_string[i]) < 0)):
ret += ' '
else:
ret += display_string[i]
ret += ' '
ret = ret.replace(' ', '_')
assert (len(ret) == NUM_CHARS_PER_DISPLAY_STRIP)
return ret
def notification_to_bridge(self, name, value, sender):
#self.log_message('monobridge:' + str(name) + str(value))
if isinstance(sender, MonoEncoderElement):
if OSC_TRANSMIT:
self.oscServer.sendOSC(self._prefix+'/'+sender.name+'/lcd_name/', str(self.generate_strip_string(name)))
self.oscServer.sendOSC(self._prefix+'/'+sender.name+'/lcd_value/', str(self.generate_strip_string(value)))
self._monobridge._send(sender.name, 'lcd_name', str(self.generate_strip_string(name)))
self._monobridge._send(sender.name, 'lcd_value', str(self.generate_strip_string(value)))
else:
self._monobridge._send(name, 'lcd_name', str(self.generate_strip_string(name)))
self._monobridge._send(name, 'lcd_value', str(self.generate_strip_string(value)))
if OSC_TRANSMIT:
self.oscServer.sendOSC(self._prefix+'/'+name+'/lcd_name/', str(self.generate_strip_string(name)))
self.oscServer.sendOSC(self._prefix+'/'+name+'/lcd_value/', str(self.generate_strip_string(value)))
def touched(self):
if self._touched is 0:
self._monobridge._send('touch', 'on')
self.schedule_message(2, self.check_touch)
self._touched +=1
def check_touch(self):
if self._touched > 5:
self._touched = 5
elif self._touched > 0:
self._touched -= 1
if self._touched is 0:
self._monobridge._send('touch', 'off')
else:
self.schedule_message(2, self.check_touch)
"""general functionality"""
def disconnect(self):
if not self.oscServer is None:
self.oscServer.shutdown()
self.oscServer = None
self.log_message("--------------= DS1 log closed =--------------")
super(DS1, self).disconnect()
def _can_auto_arm_track(self, track):
routing = track.current_input_routing
return routing == 'Ext: All Ins' or routing == 'All Ins' or routing.startswith('DS1 Input')
#self._main_modes.selected_mode in ['Sends', 'Device'] and
def _on_selected_track_changed(self):
super(DS1, self)._on_selected_track_changed()
def handle_sysex(self, midi_bytes):
#self.log_message('sysex: ' + str(midi_bytes))
if len(midi_bytes) > 14:
if midi_bytes[3:10] == tuple([6, | |
"""This modules provides functionality in order to seed database."""
import asyncio
import logging
import gino
from app.db import get_database_dsn
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
LOGGER.addHandler(ch)
insert_mcc_categories = """
INSERT INTO mcc_category (id, name, info)
VALUES
(-1, 'Other', 'Other expenses'),
(1, 'Travel', 'Flights, train tickets, car rentals, hotels and much more for your vacation.'),
(2, 'Beauty & Medicine', 'Goods and services in massage or beauty salons, and pharmacies.'),
(3, 'Entertainment & Sports', 'Goods and services in the field of entertainment and sports.'),
(4, 'Cafes & Restaurants', 'Goods and services in restaurants, cafes, bars, etc.'),
(5, 'Products & Supermarkets', 'Goods and services in supermarkets and specialty stores selling food and beverages.'),
(6, 'Cinema', 'Cinema services and goods, rent and purchase of goods in related stores.'),
(7, 'Cars & Gas Stations', 'Auto parts, various types of car services and service stations, car washes, oil products, gas and other related goods and services purchased at gas stations or specialty stores.'),
(8, 'Clothes & Shoes', 'Goods and services in specialized shops of clothes, accessories, footwear.'),
(9, 'Taxi', 'Taxi services.'),
(10, 'Animals', 'Goods and services in pet stores and veterinary clinics.'),
(11, 'Books', 'Goods and services in bookstores and newsstands.'),
(12, 'Flowers', 'Goods and services in flower shops, florist shops and related stores.'),
(13, 'Household Appliances', 'Goods of household appliance in specialized shops.'),
(14, 'Gas Stations', 'Diesel fuel, gasoline, gas fuel at a gas station.'),
(15, 'Fast Food', 'Shopping in fast food restaurants such as McDonald`s, KFC, etc.'),
(16, 'Card Transfers', 'Money transfers between accounts.'),
(17, 'Communal Services', 'Electric, gas, sanitary and water utilities')
ON CONFLICT (id) DO NOTHING
RETURNING id, name, info;
"""
insert_mccs = """
INSERT INTO mcc (code, category_id)
VALUES
(-1, -1),
(3000, 1), (3001, 1), (3002, 1), (3003, 1), (3004, 1), (3005, 1), (3006, 1), (3007, 1), (3008, 1), (3009, 1),
(3010, 1), (3011, 1), (3012, 1), (3013, 1), (3014, 1), (3015, 1), (3016, 1), (3017, 1), (3018, 1), (3019, 1),
(3020, 1), (3021, 1), (3022, 1), (3023, 1), (3024, 1), (3025, 1), (3026, 1), (3027, 1), (3028, 1), (3029, 1),
(3030, 1), (3031, 1), (3032, 1), (3033, 1), (3034, 1), (3035, 1), (3036, 1), (3037, 1), (3038, 1), (3039, 1),
(3040, 1), (3041, 1), (3042, 1), (3043, 1), (3044, 1), (3045, 1), (3046, 1), (3047, 1), (3048, 1), (3049, 1),
(3050, 1), (3051, 1), (3052, 1), (3053, 1), (3054, 1), (3055, 1), (3056, 1), (3057, 1), (3058, 1), (3059, 1),
(3060, 1), (3061, 1), (3062, 1), (3063, 1), (3064, 1), (3065, 1), (3066, 1), (3067, 1), (3068, 1), (3069, 1),
(3070, 1), (3071, 1), (3072, 1), (3073, 1), (3074, 1), (3075, 1), (3076, 1), (3077, 1), (3078, 1), (3079, 1),
(3080, 1), (3081, 1), (3082, 1), (3083, 1), (3084, 1), (3085, 1), (3086, 1), (3087, 1), (3088, 1), (3089, 1),
(3090, 1), (3091, 1), (3092, 1), (3093, 1), (3094, 1), (3095, 1), (3096, 1), (3097, 1), (3098, 1), (3099, 1),
(3100, 1), (3101, 1), (3102, 1), (3103, 1), (3104, 1), (3105, 1), (3106, 1), (3107, 1), (3108, 1), (3109, 1),
(3110, 1), (3111, 1), (3112, 1), (3113, 1), (3114, 1), (3115, 1), (3116, 1), (3117, 1), (3118, 1), (3119, 1),
(3120, 1), (3121, 1), (3122, 1), (3123, 1), (3124, 1), (3125, 1), (3126, 1), (3127, 1), (3128, 1), (3129, 1),
(3130, 1), (3131, 1), (3132, 1), (3133, 1), (3134, 1), (3135, 1), (3136, 1), (3137, 1), (3138, 1), (3139, 1),
(3140, 1), (3141, 1), (3142, 1), (3143, 1), (3144, 1), (3145, 1), (3146, 1), (3147, 1), (3148, 1), (3149, 1),
(3150, 1), (3151, 1), (3152, 1), (3153, 1), (3154, 1), (3155, 1), (3156, 1), (3157, 1), (3158, 1), (3159, 1),
(3160, 1), (3161, 1), (3162, 1), (3163, 1), (3164, 1), (3165, 1), (3166, 1), (3167, 1), (3168, 1), (3169, 1),
(3170, 1), (3171, 1), (3172, 1), (3173, 1), (3174, 1), (3175, 1), (3176, 1), (3177, 1), (3178, 1), (3179, 1),
(3180, 1), (3181, 1), (3182, 1), (3183, 1), (3184, 1), (3185, 1), (3186, 1), (3187, 1), (3188, 1), (3189, 1),
(3190, 1), (3191, 1), (3192, 1), (3193, 1), (3194, 1), (3195, 1), (3196, 1), (3197, 1), (3198, 1), (3199, 1),
(3200, 1), (3201, 1), (3202, 1), (3203, 1), (3204, 1), (3205, 1), (3206, 1), (3207, 1), (3208, 1), (3209, 1),
(3210, 1), (3211, 1), (3212, 1), (3213, 1), (3214, 1), (3215, 1), (3216, 1), (3217, 1), (3218, 1), (3219, 1),
(3220, 1), (3221, 1), (3222, 1), (3223, 1), (3224, 1), (3225, 1), (3226, 1), (3227, 1), (3228, 1), (3229, 1),
(3230, 1), (3231, 1), (3232, 1), (3233, 1), (3234, 1), (3235, 1), (3236, 1), (3237, 1), (3238, 1), (3239, 1),
(3240, 1), (3241, 1), (3242, 1), (3243, 1), (3244, 1), (3245, 1), (3246, 1), (3247, 1), (3248, 1), (3249, 1),
(3250, 1), (3251, 1), (3252, 1), (3253, 1), (3254, 1), (3255, 1), (3256, 1), (3257, 1), (3258, 1), (3259, 1),
(3260, 1), (3261, 1), (3262, 1), (3263, 1), (3264, 1), (3265, 1), (3266, 1), (3267, 1), (3268, 1), (3269, 1),
(3270, 1), (3271, 1), (3272, 1), (3273, 1), (3274, 1), (3275, 1), (3276, 1), (3277, 1), (3278, 1), (3279, 1),
(3280, 1), (3281, 1), (3282, 1), (3283, 1), (3284, 1), (3285, 1), (3286, 1), (3287, 1), (3288, 1), (3289, 1),
(3290, 1), (3291, 1), (3292, 1), (3293, 1), (3294, 1), (3295, 1), (3296, 1), (3297, 1), (3298, 1), (3299, 1),
(3351, 1), (3352, 1), (3353, 1), (3354, 1), (3355, 1), (3356, 1), (3357, 1), (3358, 1), (3359, 1), (3360, 1),
(3361, 1), (3362, 1), (3363, 1), (3364, 1), (3365, 1), (3366, 1), (3367, 1), (3368, 1), (3369, 1), (3370, 1),
(3371, 1), (3372, 1), (3373, 1), (3374, 1), (3375, 1), (3376, 1), (3377, 1), (3378, 1), (3379, 1), (3380, 1),
(3381, 1), (3382, 1), (3383, 1), (3384, 1), (3385, 1), (3386, 1), (3387, 1), (3388, 1), (3389, 1), (3390, 1),
(3391, 1), (3392, 1), (3393, 1), (3394, 1), (3395, 1), (3396, 1), (3397, 1), (3398, 1), (3399, 1), (3400, 1),
(3401, 1), (3402, 1), (3403, 1), (3404, 1), (3405, 1), (3406, 1), (3407, 1), (3408, 1), (3409, 1), (3410, 1),
(3411, 1), (3412, 1), (3413, 1), (3414, 1), (3415, 1), (3416, 1), (3417, 1), (3418, 1), (3419, 1), (3420, 1),
(3421, 1), (3422, 1), (3423, 1), (3424, 1), (3425, 1), (3426, 1), (3427, 1), (3428, 1), (3429, 1), (3430, 1),
(3431, 1), (3432, 1), (3433, 1), (3434, 1), (3435, 1), (3436, 1), (3437, 1), (3438, 1), (3439, 1), (3440, 1),
(3441, 1), (3501, 1), (3502, 1), (3503, 1), (3504, 1), (3505, 1), (3506, 1), (3507, 1), (3508, 1), (3509, 1),
(3510, 1), (3511, 1), (3512, 1), (3513, 1), (3514, 1), (3515, 1), (3516, 1), (3517, 1), (3518, 1), (3519, 1),
(3520, 1), (3521, 1), (3522, 1), (3523, 1), (3524, 1), (3525, 1), (3526, 1), (3527, 1), (3528, 1), (3529, 1),
(3530, 1), (3531, 1), (3532, 1), (3533, 1), (3534, 1), (3535, 1), (3536, 1), (3537, 1), (3538, 1), (3539, 1),
(3540, 1), (3541, 1), (3542, 1), (3543, 1), (3544, 1), (3545, 1), (3546, 1), (3547, 1), (3548, 1), (3549, 1),
(3550, 1), (3551, 1), (3552, 1), (3553, 1), (3554, 1), (3555, 1), (3556, 1), (3557, 1), (3558, 1), (3559, 1),
(3560, 1), (3561, 1), (3562, 1), (3563, 1), (3564, 1), (3565, 1), (3566, 1), (3567, 1), (3568, 1), (3569, 1),
(3570, 1), (3571, 1), (3572, 1), (3573, 1), (3574, 1), (3575, 1), (3576, 1), (3577, 1), (3578, 1), (3579, 1),
(3580, 1), (3581, 1), (3582, 1), (3583, 1), (3584, 1), (3585, 1), (3586, 1), (3587, 1), (3588, 1), (3589, 1),
(3590, 1), (3591, 1), (3592, 1), (3593, 1), (3594, 1), (3595, 1), (3596, 1), (3597, 1), (3598, 1), (3599, 1),
(3600, 1), (3601, 1), (3602, 1), (3603, 1), (3604, 1), (3605, 1), (3606, 1), (3607, 1), (3608, 1), (3609, 1),
(3610, 1), (3611, 1), (3612, 1), (3613, 1), | |
"description": "Store a float into local variable 2",
"inputs": [OperandType.Float],
"outputs": [],
},
0x46: {
"name": "fstore_3",
"description": "Store a float into local variable 3",
"inputs": [OperandType.Float],
"outputs": [],
},
0x47: {
"name": "dstore_0",
"description": "Store a double into local variable 0",
"inputs": [OperandType.Double],
"outputs": [],
},
0x48: {
"name": "dstore_1",
"description": "Store a double into local variable 1",
"inputs": [OperandType.Double],
"outputs": [],
},
0x49: {
"name": "dstore_2",
"description": "Store a double into local variable 2",
"inputs": [OperandType.Double],
"outputs": [],
},
0x4a: {
"name": "dstore_3",
"description": "Store a double into local variable 3",
"inputs": [OperandType.Double],
"outputs": [],
},
0x4b: {
"name": "astore_0",
"description": "Store a reference into local variable 0",
"inputs": [OperandType.Reference],
"outputs": [],
},
0x4c: {
"name": "astore_1",
"description": "Store a reference into local variable 1",
"inputs": [OperandType.Reference],
"outputs": [],
},
0x4d: {
"name": "astore_2",
"description": "Store a reference into local variable 2",
"inputs": [OperandType.Reference],
"outputs": [],
},
0x4e: {
"name": "astore_3",
"description": "Store a reference into local variable 3",
"inputs": [OperandType.Reference],
"outputs": [],
},
0x4f: {
"name": "iastore",
"description": "Store into int array",
"inputs": [OperandType.Reference, OperandType.Integer, OperandType.Integer],
"outputs": [],
},
0x50: {
"name": "lastore",
"description": "Store into long array",
"inputs": [OperandType.Reference, OperandType.Integer, OperandType.Long],
"outputs": [],
},
0x51: {
"name": "fastore",
"description": "Store into float array",
"inputs": [OperandType.Reference, OperandType.Integer, OperandType.Float],
"outputs": [],
},
0x52: {
"name": "dastore",
"description": "Store into double array",
"inputs": [OperandType.Reference, OperandType.Integer, OperandType.Double],
"outputs": [],
},
0x53: {
"name": "aastore",
"description": "Store into reference array",
"inputs": [OperandType.Reference, OperandType.Integer, OperandType.Reference],
"outputs": [],
},
0x54: {
"name": "bastore",
"description": "Store into byte or boolean array",
"inputs": [OperandType.Reference, OperandType.Integer, OperandType.Byte],
"outputs": [],
},
0x55: {
"name": "castore",
"description": "Store into char array",
"inputs": [OperandType.Reference, OperandType.Integer, OperandType.Char],
"outputs": [],
},
0x56: {
"name": "sastore",
"description": "Store into short array",
"inputs": [OperandType.Reference, OperandType.Integer, OperandType.Short],
"outputs": [],
},
0x57: {
"name": "pop",
"description": "Pop the top operand stack value",
"inputs": [OperandType.Integer],
"outputs": [],
},
0x58: {
"name": "pop2",
"description": "Pop the top two operand stack values",
"inputs": [OperandType.Integer, OperandType.Integer],
"outputs": [],
},
0x59: {
"name": "dup",
"description": "Duplicate the top operand stack value",
"inputs": [OperandType.Integer],
"outputs": [OperandType.Integer, OperandType.Integer],
},
0x5a: {
"name": "dup_x1",
"description": "Duplicate the top operand stack value and insert beneath the second-top one",
"inputs": [OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer, OperandType.Integer, OperandType.Integer],
},
0x5b: {
"name": "dup_x2",
"description": "Duplicate the top operand stack value and insert beneath the one beneath it",
"inputs": [OperandType.Integer, OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer, OperandType.Integer, OperandType.Integer, OperandType.Integer],
},
0x5c: {
"name": "dup2",
"description": "Duplicate the top two operand stack values",
"inputs": [OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer, OperandType.Integer, OperandType.Integer, OperandType.Integer],
},
0x5d: {
"name": "dup2_x1",
"description": "Duplicate the top two operand stack values and insert beneath the one beneath it",
"inputs": [OperandType.Integer, OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer, OperandType.Integer, OperandType.Integer, OperandType.Integer,
OperandType.Integer],
},
0x5e: {
"name": "dup2_x2",
"description": "Duplicate the top two operand stack values and insert beneath the one beneath it",
"inputs": [OperandType.Integer, OperandType.Integer, OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer, OperandType.Integer, OperandType.Integer, OperandType.Integer,
OperandType.Integer,
OperandType.Integer],
},
0x5f: {
"name": "swap",
"description": "Swap the top two operand stack values",
"inputs": [OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer, OperandType.Integer],
},
0x60: {
"name": "iadd",
"description": "Add two integers",
"inputs": [OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer],
},
0x61: {
"name": "ladd",
"description": "Add two longs",
"inputs": [OperandType.Long, OperandType.Long],
"outputs": [OperandType.Long],
},
0x62: {
"name": "fadd",
"description": "Add two floats",
"inputs": [OperandType.Float, OperandType.Float],
"outputs": [OperandType.Float],
},
0x63: {
"name": "dadd",
"description": "Add two doubles",
"inputs": [OperandType.Double, OperandType.Double],
"outputs": [OperandType.Double],
},
0x64: {
"name": "isub",
"description": "Subtract two integers",
"inputs": [OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer],
},
0x65: {
"name": "lsub",
"description": "Subtract two longs",
"inputs": [OperandType.Long, OperandType.Long],
"outputs": [OperandType.Long],
},
0x66: {
"name": "fsub",
"description": "Subtract two floats",
"inputs": [OperandType.Float, OperandType.Float],
"outputs": [OperandType.Float],
},
0x67: {
"name": "dsub",
"description": "Subtract two doubles",
"inputs": [OperandType.Double, OperandType.Double],
"outputs": [OperandType.Double],
},
0x68: {
"name": "imul",
"description": "Multiply two integers",
"inputs": [OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer],
},
0x69: {
"name": "lmul",
"description": "Multiply two longs",
"inputs": [OperandType.Long, OperandType.Long],
"outputs": [OperandType.Long],
},
0x6a: {
"name": "fmul",
"description": "Multiply two floats",
"inputs": [OperandType.Float, OperandType.Float],
"outputs": [OperandType.Float],
},
0x6b: {
"name": "dmul",
"description": "Multiply two doubles",
"inputs": [OperandType.Double, OperandType.Double],
"outputs": [OperandType.Double],
},
0x6c: {
"name": "idiv",
"description": "Divide two integers",
"inputs": [OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer],
},
0x6d: {
"name": "ldiv",
"description": "Divide two longs",
"inputs": [OperandType.Long, OperandType.Long],
"outputs": [OperandType.Long],
},
0x6e: {
"name": "fdiv",
"description": "Divide two floats",
"inputs": [OperandType.Float, OperandType.Float],
"outputs": [OperandType.Float],
},
0x6f: {
"name": "ddiv",
"description": "Divide two doubles",
"inputs": [OperandType.Double, OperandType.Double],
"outputs": [OperandType.Double],
},
0x70: {
"name": "irem",
"description": "Remainder of two integers",
"inputs": [OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer],
},
0x71: {
"name": "lrem",
"description": "Remainder of two longs",
"inputs": [OperandType.Long, OperandType.Long],
"outputs": [OperandType.Long],
},
0x72: {
"name": "frem",
"description": "Remainder of two floats",
"inputs": [OperandType.Float, OperandType.Float],
"outputs": [OperandType.Float],
},
0x73: {
"name": "drem",
"description": "Remainder of two doubles",
"inputs": [OperandType.Double, OperandType.Double],
"outputs": [OperandType.Double],
},
0x74: {
"name": "ineg",
"description": "Negate an integer",
"inputs": [OperandType.Integer],
"outputs": [OperandType.Integer],
},
0x75: {
"name": "lneg",
"description": "Negate a long",
"inputs": [OperandType.Long],
"outputs": [OperandType.Long],
},
0x76: {
"name": "fneg",
"description": "Negate a float",
"inputs": [OperandType.Float],
"outputs": [OperandType.Float],
},
0x77: {
"name": "dneg",
"description": "Negate a double",
"inputs": [OperandType.Double],
"outputs": [OperandType.Double],
},
0x78: {
"name": "ishl",
"description": "Shift an integer left",
"inputs": [OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer],
},
0x79: {
"name": "lshl",
"description": "Shift a long left",
"inputs": [OperandType.Long, OperandType.Integer],
"outputs": [OperandType.Long],
},
0x7a: {
"name": "ishr",
"description": "Shift an integer right",
"inputs": [OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer],
},
0x7b: {
"name": "lshr",
"description": "Shift a long right",
"inputs": [OperandType.Long, OperandType.Integer],
"outputs": [OperandType.Long],
},
0x7c: {
"name": "iushr",
"description": "Shift an integer right",
"inputs": [OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer],
},
0x7d: {
"name": "lushr",
"description": "Shift a long right",
"inputs": [OperandType.Long, OperandType.Integer],
"outputs": [OperandType.Long],
},
0x7e: {
"name": "iand",
"description": "Bitwise and of two integers",
"inputs": [OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer],
},
0x7f: {
"name": "land",
"description": "Bitwise and of two longs",
"inputs": [OperandType.Long, OperandType.Long],
"outputs": [OperandType.Long],
},
0x80: {
"name": "ior",
"description": "Bitwise or of two integers",
"inputs": [OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer],
},
0x81: {
"name": "lor",
"description": "Bitwise or of two longs",
"inputs": [OperandType.Long, OperandType.Long],
"outputs": [OperandType.Long],
},
0x82: {
"name": "ixor",
"description": "Bitwise xor of two integers",
"inputs": [OperandType.Integer, OperandType.Integer],
"outputs": [OperandType.Integer],
},
0x83: {
"name": "lxor",
"description": "Bitwise xor of two longs",
"inputs": [OperandType.Long, OperandType.Long],
"outputs": [OperandType.Long],
},
0x84: {
"name": "iinc",
"description": "Increment an integer",
"inputs": [OperandType.Integer, OperandType.Integer],
"outputs": [],
},
0x85: {
"name": "i2l",
"description": "Convert an integer to a long",
"inputs": [OperandType.Integer],
"outputs": [OperandType.Long],
},
0x86: {
"name": "i2f",
"description": "Convert an integer to a float",
"inputs": [OperandType.Integer],
"outputs": [OperandType.Float],
},
0x87: {
"name": "i2d",
"description": "Convert an integer to a double",
"inputs": [OperandType.Integer],
"outputs": [OperandType.Double],
},
0x88: {
"name": "l2i",
"description": "Convert a long to an integer",
"inputs": [OperandType.Long],
"outputs": [OperandType.Integer],
},
0x89: {
"name": "l2f",
"description": "Convert a long to a float",
"inputs": [OperandType.Long],
"outputs": [OperandType.Float],
},
0x8a: {
"name": "l2d",
"description": "Convert a long to a double",
"inputs": [OperandType.Long],
"outputs": [OperandType.Double],
},
0x8b: {
"name": "f2i",
"description": "Convert a float to an integer",
"inputs": [OperandType.Float],
"outputs": [OperandType.Integer],
},
0x8c: {
"name": "f2l",
"description": "Convert a float to a long",
"inputs": [OperandType.Float],
"outputs": [OperandType.Long],
},
0x8d: {
"name": "f2d",
"description": "Convert a float to a double",
"inputs": [OperandType.Float],
"outputs": [OperandType.Double],
},
0x8e: {
"name": "d2i",
"description": "Convert a double to an integer",
"inputs": [OperandType.Double],
"outputs": [OperandType.Integer],
},
0x8f: {
"name": "d2l",
"description": "Convert a double to a long",
"inputs": [OperandType.Double],
"outputs": [OperandType.Long],
},
0x90: {
"name": "d2f",
"description": "Convert a double to a float",
"inputs": [OperandType.Double],
"outputs": [OperandType.Float],
},
0x91: {
"name": "i2b",
"description": "Convert an integer to a byte",
"inputs": [OperandType.Integer],
"outputs": [OperandType.Byte],
},
0x92: {
"name": "i2c",
"description": "Convert an integer to a character",
"inputs": [OperandType.Integer],
"outputs": [OperandType.Char],
},
0x93: {
"name": "i2s",
"description": "Convert an integer to a short",
"inputs": [OperandType.Integer],
"outputs": [OperandType.Short],
},
0x94: {
| |
"""
Created on Dec 7, 2016
@author: <NAME> <EMAIL>
"""
import sys, re, os.path
import time
import shutil
'''
StringSeparators is the default list of characters that are used to separate names
in a string representation of a list of names.
The following list is comma,space,semi-colon,colon
'''
StringSeparators = ", ;:"
def stringType(x):
"""
Return True if the given object x is a string type, i.e., unicode string or "regular" string.
"""
isinstance(x, str)
#endDef
# Found this on stackoverflow
currentTimeMillis = lambda: int(round(time.time() * 1000))
def toBoolean (arg):
"""
Return True or False depending on the value of arg.
Canonicalize all the ways you can think of representing a boolean into a 1 or 0.
This is convenient for use with values that ultimately originated from a user, e.g.,
from a property file where the user may enter y, yes, n, no, t, f
etc, to represent true or false.
"""
if (not arg): return False
if (arg == 1): return True
if (isinstance(arg, str)):
if (re.match('^(true|t|yes|y|1)$', arg, flags=re.IGNORECASE)): return True
if (re.match('^(false|f|no|n|0)$', arg, flags=re.IGNORECASE)): return False
#endIf
raise Exception("toBoolean: Unknown boolean value: %s" % arg)
#endDef
def getInputArgs(argsSignature,args=None):
"""
Return a dictionary with the strings in sys.argv processed as name-value pairs or "switch" keyword args.
NOTE: When running wsadmin, sys.argv[0] is the first actual argument, not the name of the script being invoked.
However, when running with Jython directly, the first argument is the name of the Jython script. An exception
will be raised because if the name of the script is not listed in the argsSignature dictionary. The simplest
thing to do when running with Jython directly is to pass in sys.argv[1:].
Input: An args "signature" dictionary with the keyword entries in it and the expected type of the argument value.
The recognized types are:
string, integer, int, float, boolean and switch.
A "switch" type argument is one that is a keyword only and doesn't have a value. If it is present in the argv list,
a boolean true (1) is assigned to its corresponding arg name.
The keywords in the arg signature are assumed to begin with a dash (-) or a double dash (--). (The double dash is
occasionally necessary to avoid conflicts with the wsadmin command line args, e.g. --help to emit usage info. The
dashes are stripped off to create an arg name when assigning key-value pairs in the output dictionary of actual args.
If a string type keyword appears more than once in the argsv[] array, then a list of values for that keyword is created
for that entry in the output dictionary. This is handy for writing scripts where you want to be able to allow the user
repeat a particular argument multiple times so as to provide a list. The other approach to providing a list is to use a
comma or space separated string and then create the list with split. We didn't provide this capability for the other
types of arguments since we couldn't come up with a realistic scenario for passing in a list of numbers or booleans
using multiple keyword args. If we do, we'll modify the method.
NOTE: If a script has all optional arguments then the args argument may end up being the empty list. We explicitly check
for args == None to cover cases where no args at all are passed in and in that case sys.argv is used for args. Be careful
when using Jython directly, because sys.argv[0] holds the name of the Jython script. It is recommended that the caller pass
in sys.argv[1:] when running with Jython directly. When running with wsadmin, the first element in sys.argv is stripped off
by wsadmin.
"""
if (args == None):
# assume it is appropriate to default to sys.argv
args = sys.argv
#endIf
argsDict = {}
i = 0
while (i < len(args)):
keyword = args[i]
if (keyword not in argsSignature):
raise Exception("Unknown command line argument: %s" % keyword)
if (keyword.startswith("--")):
argName = keyword[2:len(keyword)] # strip the leading dashes
else:
argName = keyword[1:len(keyword)] # strip single leading dash
#endIf
if (argsSignature[keyword] == 'string'):
i += 1 # index of arg value
argValue = args[i]
# If argValue is enclosed in double-quotes, strip the double-quotes.
# This handles cases where incoming args from a shell are quoted to
# avoid evaluation of shell special characters, e.g., *
if (argValue[0] == '"' and argValue[-1] == '"'):
argValue = argValue[1:-1]
#endIf
currentValue = argsDict.get(argName)
if (currentValue != None):
if (type(currentValue) == type([])):
# current value already a list
argsDict[argName] = currentValue.append(argValue)
else:
# current value is a string, so make a list
argsDict[argName] = [currentValue, argValue]
#endIf
else:
argsDict[argName] = argValue
#endIf
elif (argsSignature[keyword] == 'integer' or argsSignature[keyword] == 'int'):
i += 1
argsDict[argName] = int(args[i])
elif (argsSignature[keyword] == 'float'):
i += 1
argsDict[argName] = float(args[i])
elif (argsSignature[keyword] == 'boolean'):
i += 1
argsDict[argName] = toBoolean(args[i])
elif (argsSignature[keyword] == 'switch'):
# for a "switch" type arg, the index doesn't get advanced
argsDict[argName] = True
else:
raise Exception("Unknown argument type in command line argument signature: %s" % argsSignature[keyword])
i += 1 # index of next keyword
#endWhile
return argsDict
#endDef
def getValue(args,synonyms,default=None):
"""
Return the value from the given args dictionary for the attribute name in the list of given synonym names.
args is a dictionary (A "rest args" argument dictionary can be passed in directly as well, e.g., **restArgs
may be passed in as restArgs.)
synonyms is a Jython list of names (strings) that may be used to access entries in the args dictionary.
synonyms may also be a string with separators as defined in NameSeparators.
The getValue() method is a convenience for getting a value for a dictionary where there is more than one
name that may be used for the entry in the dictionary. In the case of the application definition dictionary
there are several attributes that may be referred to with more than one name. (The argument names used by
the wsadmin AdminApp methods are often inconsistent with respect to naming conventions. This module provides
more readable aliases for many of the AdminApp method argument names.)
"""
value = None
if (type(synonyms) != type([])):
synonyms = splitString(synonyms)
#endIf
for name in synonyms:
value = args.get(name)
if (value != None):
break
#endIf
#endFor
if (value == None and default != None):
value = default
#endIf
return value
#endDef
def splitString(string,separators=StringSeparators):
"""
The splitString() method is used to create a Jython list from a string of whatever
is passed in the string. This could be a list of names or a list of numbers and names
or whatever. The string argument can be an string that contains an arbitrary set of
substrings separated by the given separators.
It is often convenient to use a string to represent a list, e.g., on the command line.
The separator within that string can be any of the characters provided in the separators
argument. The separators argument defaults to those defined in the NameSeparators global
for this module.
The splitString method originated as support for things coming in on the command
line to a top level script or from a properties file where a multi-valued property
is needed.
The given string is split into a list based on the separators provided. Separators
defaults to NameSeparators which is comma (,) or space ( ) or a plus (+) character
in that order of precedence. If the given string has a comma character in it, the
split is done on the comma character. If the given string has a space character in
it | |
#
# Copyright 2017 Red Hat Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Proton reactive API python client options module"""
from __future__ import absolute_import
import optparse
import sys
### Python 2.x cmdline options to unicode conversion callback functions
def convert_to_unicode(value):
"""
Python 2.x: converts value to unicode
:param value: value to be converted to unicode
:type value: str
:return: unicode string
:rtype: str (unicode)
"""
try:
return value.decode(sys.getfilesystemencoding())
except AttributeError:
return value
def to_unicode(option, _, value, parser):
""" stores values of multi-value cmdline string, converts to unicode for Python 2.x
:param option: option object
:type value: optparse.Option
:param value: option value
:type value: str
:param value: option parser
:type value: related Option object from cli_proton_python.options
"""
lst = getattr(parser.values, option.dest) or []
int_value = convert_to_unicode(value)
lst.append(int_value)
setattr(parser.values, option.dest, lst)
def str_to_unicode(option, _, value, parser):
"""
Python 2.x: stores cmdline string, converts to unicode for Python 2.x
:param option: option object
:type value: optparse.Option
:param value: option value
:type value: str
:param value: option parser
:type value: related Option object from cli_proton_python.options
"""
setattr(parser.values, option.dest, convert_to_unicode(value))
class CoreOptions(optparse.OptionParser, object):
""" Proton reactive API python core client options """
def __init__(self):
""" CoreOptions constructor """
super(CoreOptions, self).__init__()
def add_control_options(self):
""" add the control options """
group = optparse.OptionGroup(self, "Control Options")
group.add_option("-b", "--broker-url", type="string", default="localhost:5672/examples",
help="url broker to connect to (default %default)")
group.add_option("-c", "--count", type="int", default=0,
help="number of messages to be sent/received (default %default)")
group.add_option("-t", "--timeout", type="float",
help="timeout in seconds to wait before exiting (default %default)")
group.add_option("--close-sleep", type="int", default=0,
help="sleep before sender/receiver/session/connection.close() "
"(default %default)")
group.add_option("--sync-mode", type="choice",
help="synchronization mode", choices=['none', 'session', 'action'])
self.add_option_group(group)
def add_connection_options(self):
""" add the connection options """
group = optparse.OptionGroup(self, "Connection Options")
group.add_option("--conn-use-config-file", action="store_true",
help='use configuration file for connection')
group.add_option("--conn-urls", type="string",
help='define connection urls')
group.add_option("--conn-reconnect", type="choice", default='true',
help='client reconnect settings (default %default)',
choices=['true', 'false', 'True', 'False'], action='callback',
callback=lambda option, opt_str, value, parser: setattr(parser.values,
option.dest,
value.lower()))
group.add_option("--conn-reconnect-interval", type="float",
help='client reconnect interval '
'(specifying this option implies custom reconnect, default %default)')
group.add_option("--conn-reconnect-limit", type="int",
# default value set later to distinguish default from explicit
help='client reconnect limit '
'(specifying this option implies custom reconnect, default 99)')
group.add_option("--conn-reconnect-timeout", type="int",
help='client reconnect limit '
'(specifying this option implies custom reconnect, default %default)')
group.add_option("--conn-heartbeat", type="int",
help='enable and set connection heartbeat (seconds)')
group.add_option("--conn-ssl-certificate", type="string",
help='path to client certificate '
'(PEM format), enables client authentication')
group.add_option("--conn-ssl-private-key", type="string",
help='path to client private key (PEM format), '
'conn-ssl-certificate must be given')
group.add_option("--conn-ssl-password", type="string",
help="client's certificate database password")
group.add_option("--conn-ssl-trust-store", type="string",
help='path to client trust store (PEM format), '
'conn-ssl-certificate must be given')
group.add_option("--conn-ssl-verify-peer", action="store_true",
help='verifies server certificate, conn-ssl-certificate '
'and trusted db path needs to be specified (PEM format)')
group.add_option("--conn-ssl-verify-peer-name", action="store_true",
help='verifies connection url against server hostname')
group.add_option("--conn-handler", type="string",
help='define custom connection handler')
group.add_option("--conn-max-frame-size", type=int,
help='define custom maximum frame size in bytes (range: 512-4294967295)')
group.add_option("--conn-sasl-enabled", type="choice", default='true',
help='enable connection SASL (default %default)',
choices=['true', 'false', 'True', 'False'], action='callback',
callback=lambda option, opt_str, value, parser: setattr(parser.values,
option.dest,
value.lower()))
group.add_option("--conn-allowed-mechs", type="string",
help='Define custom Allowed SASL mechanism list, '
'separated by space e.g. "GSSAPI PLAIN"')
self.add_option_group(group)
def add_logging_options(self):
""" add the logging options """
group = optparse.OptionGroup(self, "Logging Options")
group.add_option("--log-lib", type="choice",
help="enable client library logging (default %default)",
choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG',
'TRANSPORT_RAW', 'TRANSPORT_FRM', 'TRANSPORT_DRV'])
group.add_option("--log-stats", type="choice", choices=['endpoints'],
help="report various statistic/debug information")
self.add_option_group(group)
class SRCoreOptions(CoreOptions):
""" Proton reactive API python sender/receiver client options """
def __init__(self):
""" SRCoreOptions cconstructor """
super(SRCoreOptions, self).__init__()
def add_control_options(self):
""" add the control options """
super(SRCoreOptions, self).add_control_options()
group = [group for group in self.option_groups if group.title == "Control Options"][0]
group.add_option("--duration", type="int", default=0,
help="message actions total duration "
"(defines msg-rate together with count, default %default)")
group.add_option("--duration-mode", type="choice", choices=[],
help="in use with --duration defines where to wait (default %default) "
"**NOTE: 'after-send/receive-tx-action' "
"is equal to 'after-send/receive' if not in transactional mode "
"(tx-size or tx-endloop-action given)")
group.add_option("--capacity", type="int",
help="session's capacity (default %default)")
def add_logging_options(self):
""" add the logging options """
super(SRCoreOptions, self).add_logging_options()
group = [group for group in self.option_groups if group.title == "Logging Options"][0]
group.add_option("--log-msgs", type="choice", action="store",
choices=['dict', 'body', 'upstream', 'none', 'interop', 'json'],
help="message[s] reporting style (default %default)")
def add_transaction_options(self):
""" add the transaction options """
group = optparse.OptionGroup(self, "Transaction Options")
group.add_option("--tx-size", type="int", default=0,
help="transactional mode: batch message count size")
group.add_option("--tx-action", type="choice", default='commit',
choices=['commit', 'rollback', 'none'],
help="transactional action at the end of tx batch (default %default)")
group.add_option("--tx-endloop-action", type="choice",
choices=['commit', 'rollback', 'none'],
help="transactional action after sending all messages in loop "
"(default %default)")
self.add_option_group(group)
def add_link_options(self):
""" add the link options """
group = optparse.OptionGroup(self, "Link Options")
group.add_option("--link-durable", action="store_true",
help='use durable subscription')
group.add_option("--link-at-least-once", action="store_true",
help='reliable delivery')
group.add_option("--link-at-most-once", action="store_true",
help='best-effort delivery')
self.add_option_group(group)
class ConnectorOptions(CoreOptions):
""" Proton reactive API python connector specific client options """
def __init__(self):
""" ConnectorOptions cconstructor """
super(ConnectorOptions, self).__init__()
self.add_control_options()
self.add_logging_options()
self.add_connection_options()
self.add_connector_options()
self.set_default("broker_url", "localhost:5672")
self.set_default("count", 1)
self.get_option('--count').help = "Specify how many connection/sessions/senders/receivers" \
" connector tries to create and open (default %default)"
self.get_option('--close-sleep').help = "Opened objects will be held" \
" till duration passes by"
self.get_option("--log-stats").choices = ['connector']
def add_connector_options(self):
""" add the connector options """
group = optparse.OptionGroup(self, "Connector options")
group.add_option("--obj-ctrl", type="choice",
default='C', choices=['C', 'CE', 'CES', 'CER', 'CESR'],
help="Optional creation object control based on <object-ids> "
"syntax C/E/S/R stands for Connection, sEssion, Sender, Receiver "
"e.g. --obj-ctrl \"CES\" for creation of Connection+sEssion+Sender "
"(default: %default (address not given), 'CESR' (address specified))")
self.add_option_group(group)
class SenderOptions(SRCoreOptions):
""" Proton reactive API python sender specific client options """
def __init__(self):
""" SenderOptions cconstructor """
super(SenderOptions, self).__init__()
self.add_control_options()
self.add_logging_options()
self.add_transaction_options()
self.add_connection_options()
self.add_link_options()
self.add_message_options()
self.add_reactor_options()
self.set_default('count', 1)
self.set_default('duration_mode', 'after-send')
self.get_option('--duration-mode').choices = ['before-send', 'after-send',
'after-send-tx-action']
def add_message_options(self):
""" add the message options """
group = optparse.OptionGroup(self, "Message options")
group.add_option("-i", "--msg-id", type="string",
help="use the supplied id instead of generating one")
group.add_option("-S", "--msg-subject", type="string",
help="specify a subject")
group.add_option("--msg-address", action="store", type="string",
help="message address")
group.add_option("--msg-reply-to", type="string",
help="specify reply-to address")
group.add_option("--msg-durable", action="store", type="string", default="no",
help="send durable messages")
group.add_option("--msg-ttl", action="store", type="int",
help="message time-to-live (ms)")
group.add_option("--msg-priority", action="store", type="int",
help="message priority")
group.add_option("--msg-correlation-id", action="callback", type="string",
help="message correlation id",
callback=str_to_unicode)
group.add_option("--msg-user-id", type="string",
help="message user id")
group.add_option("--msg-group-id", type="string",
help="message group id")
group.add_option("--msg-group-seq", type="int", action="store",
help="message group sequence")
group.add_option("-P", "--msg-property", type="string",
help="specify message property ('~' enables type auto-cast)",
dest="msg_properties", default=[],
metavar="NAME=VALUE|NAME~VALUE",
action="callback", callback=to_unicode)
group.add_option("-M", "--msg-content-map-item", type="string",
help="specify map entry for message body ('~' enables type auto-cast)",
dest="msg_map_items", default=[],
metavar="NAME=VALUE|NAME~VALUE",
action="callback", callback=to_unicode)
group.add_option("-L", "--msg-content-list-item", type="string",
help="specify list entry for message body ('~' enables type auto-cast)",
dest="msg_list_items", default=[],
metavar="NAME|~NAME",
action="callback", callback=to_unicode)
group.add_option("--msg-content-from-file", action="store", type="string",
help="message content loaded from file", metavar="<filename>")
group.add_option("--msg-content", action="callback", type="string",
help="message content", metavar="<content>",
callback=str_to_unicode)
group.add_option("--msg-content-type", action="store", type="string",
help="message content type", metavar="<content-type>")
group.add_option("--content-type", type="choice",
help="typecast the string arguments in msg-content* (default %default)",
choices=['string', 'int', 'long', 'float', 'bool'])
self.add_option_group(group)
def add_reactor_options(self):
""" add receiver's options """
group = optparse.OptionGroup(self, "Reactor options")
group.add_option("--reactor-auto-settle-off", action="store_true",
help='disable auto settle mode')
group.add_option("--reactor-peer-close-is-error", action="store_true", default=False,
help="report error on peer disconnect")
self.add_option_group(group)
def add_control_options(self):
""" add the control options """
super(SenderOptions, self).add_control_options()
group = [group for group in self.option_groups if group.title == "Control Options"][0]
group.add_option("--on-release", type="choice",
help="action to take when a message is released",
choices=["ignore", "retry", "fail"], default="ignore")
class ReceiverOptions(SRCoreOptions):
""" Proton reactive API python receiver specific client options """
def __init__(self):
""" ReceiverOptions constructor """
super(ReceiverOptions, self).__init__()
self.add_control_options()
self.add_logging_options()
self.add_transaction_options()
self.add_connection_options()
self.add_link_options()
self.add_receiver_options()
self.add_reactor_options()
self.set_default('duration_mode', 'after-receive')
self.get_option("--duration-mode").choices = \
['before-receive', 'after-receive', 'after-receive-action', 'after-receive-tx-action']
def add_control_options(self):
""" add the control options """
super(ReceiverOptions, self).add_control_options()
group = [group for group in self.option_groups if group.title == "Control Options"][0]
group.add_option("--dynamic", action="store_true",
help='use dynamic source')
| |
<reponame>adambiser/agk-steam-plugin
"""
A script to download the apartment prices for Camden Foxcroft and store them in a CSV file.
"""
from bs4 import BeautifulSoup
import os
import re
import requests
OUTPUT_FILENAME = "steam_constants.agc"
JSON_H_OUTPUT_FILENAME = "ToJSON.h"
JSON_CPP_OUTPUT_FILENAME = "ToJSON.cpp"
STEAMWORKS_API_URL_BASE = "https://partner.steamgames.com/doc/api/"
# Manually ignored structs. Those with descriptions starting with "Deprecated" are ignored automatically.
IGNORED = {
# "ISteamApps": [
# "AppProofOfPurchaseKeyResponse_t",
# "RegisterActivationCodeResponse_t",
# ],
# "ISteamUser": [
# "CallbackMsg_t"
# ],
"ISteamMatchmaking": [
"LobbyKicked_t", # Currently unused
"LobbyMatchList_t", # Plugin returns json array of lobby IDs.
]
}
ENUM_VALUE_FIXES = {
# EHTMLKeyModifiers
'EHTMLKeyModifier_AltDown': '1',
'EHTMLKeyModifier_CtrlDown': '2',
'EHTMLKeyModifier_ShiftDown': '4',
# EChatSteamIDInstanceFlags
'EChatInstanceFlagClan': '0x00000800',
'EChatInstanceFlagLobby': '0x00000400',
'EChatInstanceFlagMMSLobby': '0x00000200',
# EMarketingMessageFlags
'EMarketingMessageFlagsHighPriority': '1',
'EMarketingMessageFlagsPlatformWindows': '2',
'EMarketingMessageFlagsPlatformMac': '4',
'EMarketingMessageFlagsPlatformLinux': '8',
# ERemoteStoragePlatform
'ERemoteStoragePlatformWindows': '1',
'ERemoteStoragePlatformOSX': '2',
'ERemoteStoragePlatformPS3': '4',
'ERemoteStoragePlatformLinux': '8',
'ERemoteStoragePlatformReserved2': '16',
# ESteamItemFlags
'ESteamItemNoTrade': '1',
'ESteamItemRemoved': '0x100',
'ESteamItemConsumed': '0x200',
# ESteamPartyBeaconLocationData
'ESteamPartyBeaconLocationDataIconURLSmall': '2',
'ESteamPartyBeaconLocationDataIconURLMedium': '3',
'ESteamPartyBeaconLocationDataIconURLLarge': '4',
# EUGCMatchingUGCType
'EUGCMatchingUGCType_All': '0xffffffff',
}
EXCLUDED_STRUCT_MEMBER = {
'HTML_NewWindow_t': [
'NewWindow_BrowserHandle',
],
}
MEMBER_NAME_OVERRIDE = {
'LobbyEnter_t': {
'EChatRoomEnterResponse': 'ChatRoomEnterResponse',
},
'SteamPartyBeaconLocation_t': {
'Type': 'LocationType',
},
'ScreenshotReady_t': {
'Local': 'Handle',
},
}
def check_status_code(response):
if response.status_code != 200:
print("! Received status code {}".format(response.status_code))
exit(response.status_code)
# noinspection PyShadowingNames
def get_url_filename(filename):
return filename[:filename.index(".")] if "." in filename else filename
def clean_enum_name(name):
if name.startswith("k_"):
name = name[2:]
if name.startswith("e"):
name = name[0].upper() + name[1:]
if name.startswith("EEVRHMDType"):
name = name[1:]
return name
def clean_member_name(name):
if name.startswith("m_"):
name = name[2:]
if name.startswith("rtime"):
name = name[1:]
if name == name.lower() \
or name.startswith("steam") \
or name.startswith('game') \
or name.startswith('num') \
or name.startswith('item') \
or name.startswith('time'):
name = name[0].upper() + name[1:]
else:
if re.match('^[a-z]{3,}', name) \
and not name.startswith("rgch") \
and not name.startswith("cch") \
and not name.startswith("rgf") \
and not name.startswith("pch") \
and not name.startswith("pub") \
and not name.startswith("cub") \
and not name.startswith("rot") \
and not name.startswith("pos"):
print("! Potential name truncation: " + name)
name = re.sub("^[a-z]*", "", name)
return name
def clean_member_type(mtype, name):
# print(mtype)
if mtype.startswith("char") or mtype.startswith("const char"):
return "string"
if mtype in ["float", "double"]:
return "float"
if "[" in mtype or "\U0000FF3B" in mtype: # Web pages use unicode brackets.
return "integer[]"
if name.endswith("IP"): # Plugin converts IP address uint32 to string.
return "string"
return "integer"
def get_first_sentence(desc):
# First sentence only.
match = re.match("^((?![?.][ A-Z]).)*[?.]", desc)
if match is None:
# Use the whole thing.
return desc
desc = match.group()
if desc.endswith(" (e.g."):
desc = desc[:-6] + "."
if desc.endswith(" ."):
desc = desc[:-2] + "."
# print(desc)
return desc
# noinspection PyShadowingNames
def download_api_page(filename):
print("Downloading {}...".format(filename))
url = STEAMWORKS_API_URL_BASE + get_url_filename(filename)
response = requests.get(url)
check_status_code(response)
html = response.text
with open(filename + '.htm', 'w', encoding='utf-8') as f:
f.write(html)
# noinspection PyShadowingNames
def to_json_function(struct):
# Build the member list string first.
member_code = []
for member in struct['members']:
if member['exclude']:
continue
if member['old_type'] in ['CSteamID', 'SteamLeaderboard_t']:
value = 'std::to_string(GetPluginHandle(value.{old_name}))'.format(**member)
elif member['type'] == 'string':
value = 'std::string("\\"" + EscapeJSON(value.{old_name}) + "\\"")'.format(**member)
elif member['type'] in ['integer', 'float', 'uint32', 'uint16']:
value = 'std::to_string(value.{old_name})'.format(**member)
else:
print("! unsupported member type for to_json_function: " + member['type'])
value = 'do_something(value.{old_name})'.format(**member)
member_code.append('"\\"{name}\\": " + {value}'.format(value=value, **member))
member_code = ' + ", "\n\t\t'.join(member_code)
# Now the method code.
return '''std::string ToJSON({name} value)
{{
return std::string("{{"
{member_code} + "}}");
}}
'''.format(member_code=member_code, **struct)
# return utils::CreateString(std::string("{"
# "\"SteamIDLobby\": " + std::to_string(GetPluginHandle(lobbyEnter.m_ulSteamIDLobby)) + ", "
# "\"Locked\": " + std::to_string(lobbyEnter.m_bLocked) + ", "
# "\"ChatRoomEnterResponse\": " + std::to_string(lobbyEnter.m_EChatRoomEnterResponse) + ", "
# "\"ChatPermissions\": " + std::to_string(lobbyEnter.m_rgfChatPermissions) + "}"));
# noinspection PyShadowingNames
def append_text_file(filename, add_header=True, outfile=None):
if not os.path.exists(filename + ".txt"):
return False
print("+ Appending {}...".format(filename))
with open(filename + '.txt', 'r', encoding='utf-8') as f:
text = f.read()
f = open(OUTPUT_FILENAME, 'a') if outfile is None else outfile
if add_header:
f.write('/' * 80 + '\n')
f.write('// {}\n'.format(filename))
f.write('// {}\n'.format(STEAMWORKS_API_URL_BASE + get_url_filename(filename)))
f.write('/' * 80 + '\n')
f.write('\n')
f.write(text)
f.write('\n')
if outfile is None:
f.close()
return True
# noinspection PyShadowingNames
def parse_api_page(filename):
if append_text_file(filename):
return
print("Parsing {}...".format(filename))
with open(filename + '.htm', 'r', encoding='utf-8') as f:
html = f.read()
soup = BeautifulSoup(html, "html5lib")
structs = []
enums = []
for section in soup.find_all("h2", "bb_section"):
# print(section.text)
if section.text in ['Callbacks', 'Structs']:
node = section
struct = {}
while node.nextSibling is not None:
node = node.nextSibling
if node.name == "h2" and "bb_section" in node["class"]:
# Found the next section. Quit.
break
if node.name == "h2" and "bb_subsection" in node["class"]:
# Found a subsection. This contains the struct name. Also grab any description.
struct = {}
structs.append(struct)
struct["name"] = node.text
struct["desc"] = ""
while node is not None and (node.nextSibling.name is None or node.nextSibling.name == "a"):
node = node.nextSibling
if node.name is None:
struct['desc'] += node.strip() + " "
else:
struct['desc'] += node.text.strip() + " "
struct["desc"] = struct["desc"].strip()
elif node.name == "table" and "members" not in struct:
# Parse the table of member variables.
struct["members"] = []
for row in node.find_all("tr")[1:]: # First row is the header. Skip it.
cols = row.find_all("td")
# if struct['name'] == 'FriendsEnumerateFollowingList_t':
# print(cols[1].text)
struct["members"].append({
"name": cols[0].text,
"type": cols[1].text,
"desc": cols[2].text,
})
elif node.name == "strong" and node.text == "Associated Functions:":
# Keep a list of associated functions.
struct["functions"] = []
node = node.nextSibling
while node is not None and node.nextSibling.name == "a" \
and "bb_apilink" in node.nextSibling["class"]:
node = node.nextSibling
struct["functions"].append(node.text)
if section.text in ['Enums']:
# print("enums")
node = section
enum = {}
while node.nextSibling is not None:
node = node.nextSibling
if node.name == "h2" and "bb_section" in node["class"]:
# Found the next section. Quit.
break
if node.name == "h2" and "bb_subsection" in node["class"]:
# Found a subsection. This contains the struct name. Also grab any description.
enum = {}
enums.append(enum)
enum["name"] = node.text
enum["desc"] = ""
while node is not None and (node.nextSibling.name is None or node.nextSibling.name == "a"):
node = node.nextSibling
if node.name is None:
enum['desc'] += node.strip() + " "
else:
enum['desc'] += node.text.strip() + " "
enum["desc"] = enum["desc"].strip()
elif node.name == "table" and "members" not in enum:
# Parse the table of member variables.
enum["members"] = []
for row in node.find_all("tr")[1:]: # First row is the header. Skip it.
cols = row.find_all("td")
# if struct['name'] == 'FriendsEnumerateFollowingList_t':
# print(cols[1].text)
enum["members"].append({
"name": cols[0].text,
"value": cols[1].text,
"desc": cols[2].text,
})
# print([t for t in structs if t['name'] == 'FriendsEnumerateFollowingList_t'])
# noinspection PyShadowingNames
def exclude_struct(struct):
if 'members' not in struct:
print("! Type had no members: {name}".format(**struct))
return True
if filename in IGNORED and struct['name'] in IGNORED[filename]:
return True
if 'desc' in struct:
desc_lower = struct['desc'].lower()
if 'used internally' in desc_lower:
return True
return desc_lower.startswith('deprecated')
return False
# noinspection PyShadowingNames
def exclude_enum(enum):
if 'desc' in enum:
desc_lower = enum['desc'].lower()
if 'used internally' in desc_lower:
return True
if 'steam internal usage only' in desc_lower:
return True
return desc_lower.startswith('deprecated')
return False
def is_deprecated_enum_member(enum_member):
desc_lower = enum_member['desc'].lower()
if 'used internally' in desc_lower:
if enum_member['name'] != 'EUGCQuery_RankedByTotalVotesAsc':
return True
if 'deprecated' in desc_lower:
return True
if 'deprecated' in enum_member['name'].lower():
return True
if enum_member['name'] in ['EFriendRelationshipMax', 'EPersonaStateMax', 'ESteamPartyBeaconLocationType_Max',
'EMarketingMessageFlagsPlatformRestrictions']:
return True
return False
# noinspection PyShadowingNames
def cleanup_struct(struct):
struct['desc'] = get_first_sentence(struct["desc"].strip())
for x in range(len(struct['members'])):
member = struct['members'][x]
member['old_name'] = member['name']
member['old_type'] = member['type']
member['name'] = clean_member_name(member['name'])
if struct['name'] in MEMBER_NAME_OVERRIDE:
if member['name'] in MEMBER_NAME_OVERRIDE[struct['name']]:
member['name'] = MEMBER_NAME_OVERRIDE[struct['name']][member['name']]
member['desc'] = get_first_sentence(member['desc'])
if member['type'].startswith('E'):
member['desc'] = member['type'] + ". " + member['desc']
member['type'] = clean_member_type(member['type'], member['name'])
member['exclude'] = struct['name'] in EXCLUDED_STRUCT_MEMBER \
and member['name'] in EXCLUDED_STRUCT_MEMBER[struct['name']]
# noinspection PyShadowingNames
def cleanup_enum(enum):
enum['desc'] = get_first_sentence(enum["desc"].strip())
for x in range(len(enum['members'])):
member = enum['members'][x]
member['name'] = clean_enum_name(member['name'])
member['desc'] = get_first_sentence(member['desc'])
if member['name'] in ENUM_VALUE_FIXES:
member['desc'] += "\t// {}".format(member['value'])
member['value'] = ENUM_VALUE_FIXES[member['name']]
# Output to file.
with open(OUTPUT_FILENAME, 'a') as f:
f.write('/' * 80 + '\n')
f.write('// {}\n'.format(filename))
f.write('// {}\n'.format(STEAMWORKS_API_URL_BASE + get_url_filename(filename)))
f.write('/' * 80 + '\n')
f.write('\n')
structs[:] = [struct for struct in structs if not exclude_struct(struct)]
enums[:] = [enum for enum in enums if not exclude_enum(enum)]
if structs or os.path.exists(filename + "-structs-append.txt"):
f.write('//' + '-' * 78 + '\n')
f.write('// Structs\n')
f.write('//' + '-' * 78 + '\n')
f.write('\n')
for struct in structs:
cleanup_struct(struct)
if append_text_file(struct['name'] + '-replace', False, outfile=f):
continue
if struct['desc']:
f.write('// {desc}\n'.format(**struct))
if 'functions' in struct:
f.write('// Associated Functions: {0}\n'.format(' '.join(struct['functions'])))
f.write('Type {name}\n'.format(**struct))
for member in struct['members']: | |
<filename>pipeline.py<gh_stars>0
import base64
import collections
import functools
import gzip
import inspect
import itertools
import json
import os
import re
import traceback
import typing
import conducto.internal.host_detection as hostdet
from .shared import constants, log, types as t
from . import api, callback, image as image_mod
State = constants.State
class TreeError(Exception):
pass
def jsonable(obj):
try:
json.dumps(obj)
return True
except TypeError:
return False
def load_node(**kwargs):
if kwargs["type"] == "Exec":
return Exec(**kwargs)
elif kwargs["type"] == "Serial":
return Serial(**kwargs)
elif kwargs["type"] == "Parallel":
return Parallel(**kwargs)
else:
raise TypeError("Type {} not a valid node type".format(kwargs["type"]))
class Node:
"""
The node classes :py:class:`Exec`, :py:class:`Serial` and
:py:class:`Parallel` all derive from this class. The parameters here apply
directly to `Exec` nodes and as defaults on `Serial` and `Parallel` for the
sub-nodes.
:param cpu: `float`, default 1, Number of CPUs to allocate to the Node.
Must be >0 if assigned.
:param mem: `float`, default 2, GB of memory to allocate to the Node. Must
be >0 if assigned.
:param requires_docker: `bool`, default `False`, If True, enable the Node
to use
:param env: `dict` with keys environment variables and the values
:param image: :py:class:`conducto.Image` or `str`, Run Node in container
using the given :py:class:`conducto.Image` or image identified
by name in Docker.
:param image_name: `str`, Reference an :py:class:`conducto.Image` by
name instead of passing it explicitly. The Image must have been
registered with :py:func:`conducto.Node.register_image`.
:param same_container: See :ref:`Running Exec nodes` for details. Note this
has special inheritance rules when propagating to child nodes.
:param skip: bool, default `False`, If False the Node will be run normally.
If True execution will pass over it and it will not be run.
:param suppress_errors: bool, default `False`, If True the Node will go to
the Done state when finished, even if some children have failed. If False,
any failed children will cause it to go to the Error state.
:param name: If creating Node inside a context manager, you may pass
`name=...` instead of using normal dict assignment.
All of these arguments, except for `name`, may be set in the Node
constructor or later. For example, `n = co.Parallel(cpu=2)` and
.. code-block::
n = co.Parallel()
n.cpu = 2
are equivalent.
:ivar name: Immutable. The name of this Node must be unique among sibling
Nodes. It is most commonly set through dict assignment with
`parent['nodename'] = co.Parallel()`. It may also be set in the
constructor with `co.Parallel(name='nodename')` if you're using another
Node as a context manager. It may not contain a `/`, as `/` is reserved
as the path separator.
"""
# Enum regarding skip statuses. The naming is awkward but intentional:
# 'skip' is the namespace, but we should phrase the terms in the positive,
# i.e., how many are running.
SKIP_RUN_NONE = 0
SKIP_RUN_SOME = 1
SKIP_RUN_ALL = 2
# In AWS cloud mode, mem and cpu must fit on an EC2 instance (in EC2
# mode), and must be one of allowed pairings (in FARGATE mode).
DEFAULT_MEM = 2
DEFAULT_CPU = 1
DEFAULT_GPU = 0
_CONTEXT_STACK = []
_NUM_FILE_AND_LINE_CALLS = 0
_MAX_FILE_AND_LINE_CALLS = 50000
if api.Config().get("config", "force_debug_info") or t.Bool(
os.getenv("CONDUCTO_FORCE_DEBUG_INFO")
):
_MAX_FILE_AND_LINE_CALLS = 10 ** 20
__slots__ = (
"_name",
"id",
"id_root",
"user_set",
"_root",
"pipeline_id",
"id_generator",
"token",
"parent",
"children",
"_callbacks",
"suppress_errors",
"same_container",
"env",
"doc",
"title",
"tags",
"file",
"line",
"_repo",
"_autorun",
"_sleep_when_done",
)
def __init__(
self,
*,
env=None,
skip=False,
name=None,
cpu=None,
gpu=None,
mem=None,
requires_docker=None,
suppress_errors=False,
same_container=constants.SameContainer.INHERIT,
image: typing.Union[str, image_mod.Image] = None,
image_name=None,
doc=None,
title=None,
tags: typing.Iterable = None,
file=None,
line=None,
):
self.id_generator, self.id_root = itertools.count(), self
self.id = None
self.parent = None
self._root = self
self.children = {}
self._callbacks = []
self.token = None
assert image_name is None or image is None, "can only specify one image"
self._repo = image_mod.Repository()
# store actual values of each attribute
self.user_set = {
"skip": skip,
"cpu": cpu,
"gpu": gpu,
"mem": mem,
"requires_docker": requires_docker,
}
if image:
self.image = image
else:
self.user_set["image_name"] = image_name
self.env = env or {}
self.doc = doc
self.title = title
self.tags = self.sanitize_tags(tags)
if name is not None:
if not Node._CONTEXT_STACK:
raise ValueError(
f"Cannot assign name={name} outside of a context manager."
)
if "/" in name:
raise ValueError(
f"Disallowed character in name, may not use '/': {name}"
)
parent = Node._CONTEXT_STACK[-1]
parent[name] = self
else:
self._name = "/"
self.suppress_errors = suppress_errors
self.same_container = same_container
# These are only to be set on the root node, and only by co.main().
self._autorun = None
self._sleep_when_done = None
if file is not None:
self.file = file
self.line = line
else:
self.file, self.line = self._get_file_and_line()
def __enter__(self):
Node._CONTEXT_STACK.append(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if Node._CONTEXT_STACK[-1] is not self:
raise Exception(
f"Node context error: {repr(Node._CONTEXT_STACK[-1])} is not {repr(self)}"
)
Node._CONTEXT_STACK.pop()
def __str__(self):
"""
The full path of Node, computed by joining the names of this Node's ancestry with `/`.
.. code-block:: python
import conducto as co
x = co.Parallel()
x["foo"] = y = co.Parallel()
x["foo/bar"] = z = co.Exec("echo foobar")
print(f"x.name={x.name} str(x)={x}")
# x.name=/ str(x) = /
print(f"y.name={y.name} str(y)={y}")
# y.name=foo str(y) = /foo
print(f"z.name={z.name} str(z)={z}")
# z.name=bar str(z) = /foo/bar
for node in x.stream():
print(str(node))
# /
# /foo
# /foo/bar
"""
name = []
cur = self
while cur:
name.append(cur.name)
cur = cur.parent
return "/".join(name[::-1]).replace("//", "/")
@property
def name(self):
return self._name
@property
def repo(self):
return self.root._repo
@property
def _id(self):
return self.id
@property
def mem(self):
return self.user_set["mem"]
@property
def gpu(self):
return self.user_set["gpu"]
@property
def cpu(self):
return self.user_set["cpu"]
@property
def requires_docker(self):
return self.user_set.get("requires_docker")
@property
def skip(self):
return self.user_set.get("skip", False)
@mem.setter
def mem(self, val):
self.user_set["mem"] = val
@gpu.setter
def gpu(self, val):
self.user_set["gpu"] = val
@cpu.setter
def cpu(self, val):
self.user_set["cpu"] = val
@property
def image(self) -> typing.Optional[image_mod.Image]:
if self.image_name is None:
return None
return self.repo[self.image_name]
@property
def image_name(self):
return self.get_inherited_attribute("image_name")
@image.setter
def image(self, val):
if val is None:
self.user_set["image_name"] = None
return
if isinstance(val, str):
val = image_mod.Image(val)
if isinstance(val, image_mod.Image):
self.repo.add(val)
self.user_set["image_name"] = val.name
else:
raise ValueError(f"Unknown type for Node.image: {repr(val)}")
@requires_docker.setter
def requires_docker(self, val: bool):
self.user_set["requires_docker"] = val
@skip.setter
def skip(self, val: bool):
self.user_set["skip"] = val
def register_image(self, image: image_mod.Image):
"""
Register a named Image for use by descendant Nodes that specify
image_name. This is especially useful with lazy pipeline creation to
ensure that the correct base image is used.
:param image: :py:class:`conducto.Image`
"""
self.repo.add(image)
def on_done(self, cback):
assert isinstance(cback, callback.base)
self._callbacks.append((State.DONE, cback))
def on_error(self, cback):
assert isinstance(cback, callback.base)
self._callbacks.append((State.ERROR, cback))
def on_queued(self, cback):
assert isinstance(cback, callback.base)
self._callbacks.append((State.QUEUED, cback))
def on_running(self, cback):
assert isinstance(cback, callback.base)
self._callbacks.append((State.RUNNING, cback))
def _pull(self):
if self.id is None or self.root != self.id_root:
self.id_root = self.root
self.id = next(self.root.id_generator)
# get root with path compression
@property
def root(self):
if self._root != self:
self._root = self._root.root
return self._root
def __setitem__(self, name, node):
if "/" in name:
path, new = name.rsplit("/", 1)
self[path][new] = node
return
if name in self.children or node.root == self.root or node.root != node:
raise TreeError(
f"Adding node {name} violates the integrity of the pipeline"
)
self.children[name] = node
self.repo.merge(node.repo)
node.parent = self
node._root = self.root
node._name = name
def __getitem__(self, item):
# Absolute paths start with a '/' and begin at the root
if item.startswith("/"):
current = self.root
else:
current = self
for i in item.split("/"):
# Ignore consecutive delimiters: 'a/b//c' == 'a/b/c'
if not i:
continue
# Find the referenced child and iterate
current = current.children[i]
return current
def __contains__(self, item):
try:
self[item]
except KeyError:
return False
else:
return True
def describe(self):
output = {
**self.user_set,
**{"__env__" + key: value for key, value in self.env.items()},
"id": self,
"callbacks": [(event, cb.to_literal()) for event, cb in self._callbacks],
"type": self.__class__.__name__,
"file": self.file,
"line": self.line,
}
if self.doc:
output["doc"] = self.doc
if self.title:
output["title"] = self.title
if self.tags:
output["tags"] = self.tags
if self.same_container != constants.SameContainer.INHERIT:
output["same_container"] = self.same_container
if self.suppress_errors:
output["suppress_errors"] = self.suppress_errors
if isinstance(self, Serial):
output["stop_on_error"] = self.stop_on_error
if isinstance(self, Exec):
output["command"] = self.command
return output
def serialize(self, pretty=False):
def validate_env(node):
for key, value in node.env.items():
if not isinstance(key, str):
raise TypeError(
f"{node} has {type(key).__name__} in env key when | |
ocean along x [W m-1]
if ( "utau" in ncU.data_vars ):
TAUX = ncU.utau
elif ( "sozotaux" in ncU.data_vars ):
TAUX = ncU.sozotaux
else:
print(' WARNING : No data found for TAUX --> filled with NaNs')
TAUX = xr.DataArray( np.zeros((mtime,my,mx))*np.nan, dims=['time', 'y', 'x'] )
# surface stress received by the ocean along x [W m-1]
if ( "vtau" in ncV.data_vars ):
TAUY = ncV.vtau
elif ( "sometauy" in ncV.data_vars ):
TAUY = ncV.sometauy
else:
print(' WARNING : No data found for TAUY --> filled with NaNs')
TAUY = xr.DataArray( np.zeros((mtime,my,mx))*np.nan, dims=['time', 'y', 'x'] )
# Sea surface height [m]
if ( "zos" in ncS.data_vars ):
ZOS = ncS.zos
elif ( "sossh" in ncS.data_vars ):
ZOS = ncS.sossh
elif ( "ssh" in ncS.data_vars ):
ZOS = ncS.ssh
elif ( "sossheig" in ncS.data_vars ):
ZOS = ncS.sossheig
elif ( "zos" in ncT.data_vars ):
ZOS = ncT.zos
elif ( "sossh" in ncT.data_vars ):
ZOS = ncT.sossh
elif ( "ssh" in ncT.data_vars ):
ZOS = ncT.ssh
elif ( "sossheig" in ncT.data_vars ):
ZOS = ncT.sossheig
else:
print(' WARNING : No data found for ZOS --> filled with NaNs')
ZOS = xr.DataArray( np.zeros((mtime,my,mx))*np.nan, dims=['time', 'y', 'x'] )
# mass barotropic streamfunction
# see Griffies et al. (2016, section H26): d(psi)/dy=-U (U: x-ward mass transport), d(psi)/dx=V (V: yward mass transport)
if ( "sobarstf" in ncP.data_vars ):
MSFTBAROT = ncP.sobarstf * rho0
else:
print(' WARNING : No data found for MSFTBAROT --> filled with NaNs')
MSFTBAROT = xr.DataArray( np.zeros((mtime,my,mx))*np.nan, dims=['time', 'y', 'x'] )
# ice shelf melt [kg m-2 s-1, positive for actual melting] :
if ( "fwfisf" in ncS.data_vars ):
FICESHELF = ncS.fwfisf*(-1)
elif ( "sowflisf_cav" in ncS.data_vars ):
FICESHELF = ncS.sowflisf_cav*(-1)
else:
print(' WARNING : No data found for FICESHELF --> filled with NaNs')
FICESHELF = xr.DataArray( np.zeros((mtime,my,mx))*np.nan, dims=['time', 'y', 'x'] )
# ice shelf dynamical driving (heat exchange velocity) [m s-1]:
if ( "isfgammat" in ncS.data_vars ):
DYDRFLI = ncS.isfgammat
elif ( "sogammat_cav" in ncS.data_vars ):
DYDRFLI = ncS.sogammat_cav
else:
print(' WARNING : No data found for DYDRFLI --> filled with NaNs')
DYDRFLI = xr.DataArray( np.zeros((mtime,my,mx))*np.nan, dims=['time', 'y', 'x'] )
# ice shelf thermal driving [degC]:
if ( "isfthermdr" in ncS.data_vars ):
THDRFLI = ncS.isfthermdr
elif ( "thermald_cav" in ncS.data_vars ):
THDRFLI = ncS.thermald_cav
else:
print(' WARNING : No data found for THDRFLI --> filled with NaNs')
THDRFLI = xr.DataArray( np.zeros((mtime,my,mx))*np.nan, dims=['time', 'y', 'x'] )
# ice shelf haline driving [0.001]:
if ( "isfhalindr" in ncS.data_vars ):
HADRFLI = ncS.isfhalindr
elif ( "halined_cav" in ncS.data_vars ):
HADRFLI = ncS.halined_cav
else:
print(' WARNING : No data found for HADRFLI --> filled with NaNs')
HADRFLI = xr.DataArray( np.zeros((mtime,my,mx))*np.nan, dims=['time', 'y', 'x'] )
# sea-ice concentration [0-100]
if ( "siconc" in ncI.data_vars ):
SICONC = ncI.siconc*100.0
#SICONC = SICONC.where( (~np.isnan(SICONC)) & (~np.isinf(SICONC)), 0.e0 )
else:
print(' WARNING : No data found for SICONC --> filled with NaNs')
SICONC = xr.DataArray( np.zeros((mtime,my,mx))*np.nan, dims=['time', 'y', 'x'] )
# sea-ice volume per area [m]
if ( "sivolu" in ncI.data_vars ):
SIVOL = ncI.sivolu
elif ( "sivol" in ncI.data_vars ):
SIVOL = ncI.sivol
else:
print(' WARNING : No data found for SIVOL --> filled with NaNs')
SIVOL = xr.DataArray( np.zeros((mtime,my,mx))*np.nan, dims=['time', 'y', 'x'] )
# sea-ice x-ward velocity [m/s]
if ( "sivelu" in ncI.data_vars ):
SIUX = ncI.sivelu
elif ("siu" in ncI.data_vars ):
SIUX = ncI.siu
else:
print(' WARNING : No data found for SIUX --> filled with NaNs')
SIUX = xr.DataArray( np.zeros((mtime,my,mx))*np.nan, dims=['time', 'y', 'x'] )
# sea-ice y-ward velocity [m/s]
if ( "sivelv" in ncI.data_vars ):
SIVY = ncI.sivelv
elif ("siv" in ncI.data_vars ):
SIVY = ncI.siv
else:
print(' WARNING : No data found for SIUY --> filled with NaNs')
SIVY = xr.DataArray( np.zeros((mtime,my,mx))*np.nan, dims=['time', 'y', 'x'] )
# Total heat flux received by the ocean surface (including ice-shelf/ocean interface) [W m-2]
# see Griffies et al. (2016, section K4-K5) NB: here, including correction if any unlike Griffies (to avoid 2 variables)
if ( ("qt_oce" in ncS.data_vars) & ("qisf" in ncS.data_vars) ):
HFDS = ncS.qt_oce + ncS.qisf # ice-shelf heat flux not included in qt_oce in tested NEMO versions
elif ( ("sohefldo" in ncS.data_vars) & ("qoceisf_cav" in ncS.data_vars) ):
HFDS = ncS.sohefldo + ncS.qoceisf_cav # not included in sohefldo in tested NEMO versions
else:
print(' WARNING : No data found for HFDS --> filled with NaNs')
HFDS = xr.DataArray( np.zeros((mtime,my,mx))*np.nan, dims=['time', 'y', 'x'] )
# Water flux entering the ocean due to sea-ice (melting-freezing) and surface correction (SSS restoring)
# (= fsitherm + wfocorr in Griffies 2016 section K2) [kg m-2 s-1]
if ( ("erp" in ncS.data_vars) & ("saltflx" in ncS.data_vars) ):
WFOSICOR = ncS.erp.where( (np.abs(ncS.erp)<1.e2) & (maskT.isel(z=0) == 1), 0.e0 ) - ncS.saltflx.where( (maskT.isel(z=0) == 1), 0.e0 ) \
/ SS.isel(z=0).where( (maskT.isel(z=0) == 1), 1.e0 ) # NB: saltflx unit attribute is wrong in nico's output, it is actually in [1e-3 kg m-2 s-1]
elif ( ("erp" in ncS.data_vars) & ("sfx" in ncI.data_vars) ):
WFOSICOR = ncS.erp.where( (np.abs(ncS.erp)<1.e2) & (maskT.isel(z=0) == 1), 0.e0 ) - ncI.sfx.where( (maskT.isel(z=0) == 1), 0.e0 )/86400.0 \
/ SS.isel(z=0).where( (maskT.isel(z=0) == 1), 1.e0 )
elif ( ("sowafld" in ncS.data_vars) & ("sosfldow" in ncS.data_vars) ):
WFOSICOR = ncS.sowafld.where( (np.abs(ncS.sowafld)<1.e2) & (maskT.isel(z=0) == 1), 0.e0 ) - ncS.sosfldow.where( (maskT.isel(z=0) == 1), 0.e0 ) \
/ SS.isel(z=0).where( (maskT.isel(z=0) == 1), 1.e0 )
else:
print(' WARNING : No data found for WFOSICOR --> filled with NaNs')
WFOSICOR = xr.DataArray( np.zeros((mtime,my,mx))*np.nan, dims=['time', 'y', 'x'] )
# Water flux entering the ocean due to rainfall, snowfall, condensation - evap,
# river runoff, iceberg and ice-shelf melt [kg m-2 s-1]
# (= pr+prs+evs+ficeberg+friver+ficeshelf in Griffies 2016, section K2)
if ( "empmr" in ncS.data_vars ):
WFOATRLI = - ncS.empmr + FICESHELF
elif ( "sowaflup" in ncS.data_vars ):
WFOATRLI = - ncS.sowaflup.where( (maskT.isel(z=0) == 1), 0.e0 ) - WFOSICOR + FICESHELF
else:
print(' WARNING : No data found for WFOATRLI --> filled with NaNs')
WFOATRLI = xr.DataArray( np.zeros((mtime,my,mx))*np.nan, dims=['time', 'y', 'x'] )
#----------
# Reduce the size of ocean dataset
[lonmin,lonmax,latmin,latmax] = grid_bounds_oce(region=region)
wdeg = 1.5 * np.max([dxT.where(latT <= latmax).max(),dyT.where(latT <= latmax).max()]) / 6.37e6 * 180. / np.pi / np.cos(0.5*(latmin+latmax)*np.pi/180.)
lonmin=lonmin - wdeg # take a bit more for interpolation
lonmax=lonmax + wdeg
latmin=latmin - wdeg
latmax=latmax + wdeg
condT2d = ( (latT >= latmin) & (latT <= latmax) & (lonT >= lonmin) & (lonT <= lonmax) )
for ii in np.arange(latT.shape[1]):
if ( np.sum(condT2d.isel(x=ii).values) == 0 ):
imin=ii
else:
imin=ii
break
for ii in np.arange(latT.shape[1]-1,0,-1):
if ( np.sum(condT2d.isel(x=ii).values) == 0 ):
imax=ii
else:
imax=ii
break
for jj in np.arange(latT.shape[0]):
if ( np.sum(condT2d.isel(y=jj).values) == 0 ):
jmin=jj
else:
jmin=jj
break
for jj in np.arange(latT.shape[0]-1,0,-1):
if ( np.sum(condT2d.isel(y=jj).values) == 0 ):
jmax=jj
else:
jmax=jj
break
print('Reducing domain to useful part, i.e. : ',[imin,imax,jmin,jmax])
#----------
# Create new xarray dataset including all useful variables:
# reshaping (x,y) as 1-dimensional (sxy)
nxy=(jmax-jmin+1)*(imax-imin+1)
newdepth=depTUV.values
newdepth[0]=0.0 # so that 1st level is taken at the surface without extrapolation
time_conv=ncT.time.dtype
if ( time_conv == 'datetime64[ns]' ):
time_val = ncT.time.values # standard calendar
else:
time_val = ncT.indexes['time'].to_datetimeindex().values # to enable dealing with non-standard calendar (e.g. noleap)
ds = xr.Dataset(
{
"SO": (["time", "z", "sxy"], np.reshape( SO.isel(x=slice(imin,imax+1),y=slice(jmin,jmax+1)).values, (mtime,mz,nxy)) ),
"THETAO": (["time", "z", "sxy"], np.reshape( THETAO.isel(x=slice(imin,imax+1),y=slice(jmin,jmax+1)).values, (mtime,mz,nxy)) ),
"UX": (["time", "z", "sxy"], np.reshape( UX.isel(x=slice(imin,imax+1),y=slice(jmin,jmax+1)).values, (mtime,mz,nxy)) ),
"VY": (["time", "z", "sxy"], np.reshape( VY.isel(x=slice(imin,imax+1),y=slice(jmin,jmax+1)).values, (mtime,mz,nxy)) ),
"TAUX": (["time", "sxy"], np.reshape( TAUX.isel(x=slice(imin,imax+1),y=slice(jmin,jmax+1)).values, (mtime,nxy)) ),
"TAUY": (["time", "sxy"], np.reshape( TAUY.isel(x=slice(imin,imax+1),y=slice(jmin,jmax+1)).values, (mtime,nxy)) ),
"ZOS": (["time", "sxy"], np.reshape( ZOS.isel(x=slice(imin,imax+1),y=slice(jmin,jmax+1)).values, (mtime,nxy)) ),
"TOB": (["time", "sxy"], np.reshape( TOB.isel(x=slice(imin,imax+1),y=slice(jmin,jmax+1)).values, (mtime,nxy)) ),
"SOB": (["time", "sxy"], np.reshape( SOB.isel(x=slice(imin,imax+1),y=slice(jmin,jmax+1)).values, (mtime,nxy)) ),
"FICESHELF": (["time", "sxy"], np.reshape( FICESHELF.isel(x=slice(imin,imax+1),y=slice(jmin,jmax+1)).values, (mtime,nxy)) ),
"DYDRFLI": (["time", "sxy"], np.reshape( DYDRFLI.isel(x=slice(imin,imax+1),y=slice(jmin,jmax+1)).values, (mtime,nxy)) ),
"THDRFLI": (["time", "sxy"], np.reshape( THDRFLI.isel(x=slice(imin,imax+1),y=slice(jmin,jmax+1)).values, (mtime,nxy)) ),
"HADRFLI": (["time", "sxy"], np.reshape( HADRFLI.isel(x=slice(imin,imax+1),y=slice(jmin,jmax+1)).values, (mtime,nxy)) ),
"MSFTBAROT": (["time", "sxy"], np.reshape( MSFTBAROT.isel(x=slice(imin,imax+1),y=slice(jmin,jmax+1)).values, (mtime,nxy)) ),
"HFDS": (["time", "sxy"], np.reshape( HFDS.isel(x=slice(imin,imax+1),y=slice(jmin,jmax+1)).values, (mtime,nxy)) ),
"WFOATRLI": (["time", | |
'b'],
['permanente', 'noun', 'b'],
['permesso', 'past_part', 'b'],
['permesso', 'adjective', 'b'],
['permesso', 'noun', 'b'],
['permettere', 'verb', 'a'],
['pero', 'noun', 'c'],
['però', 'conjunction', 'a'],
['perpendicolare', 'adjective', 'c'],
['perpendicolare', 'noun', 'c'],
['perplesso', 'adjective', 'b'],
['perquisizione', 'noun', 'b'],
['perseguire', 'verb', 'b'],
['persiana', 'noun', 'c'],
['persiano', 'adjective', 'b'],
['persiano', 'noun', 'b'],
['persino', 'adverb', 'a'],
['perso', 'past_part', 'b'],
['perso', 'adjective', 'b'],
['persona', 'noun', 'a'],
['personaggio', 'noun', 'a'],
['personale', 'adjective', 'a'],
['personale', 'noun', 'a'],
['personale', 'noun', 'a'],
['personalità', 'noun', 'b'],
['personalmente', 'adverb', 'a'],
['pertanto', 'conjunction', 'b'],
['perugino', 'adjective', 'c'],
['perugino', 'noun', 'c'],
['peruviano', 'adjective', 'c'],
['peruviano', 'noun', 'c'],
['pervenire', 'verb', 'b'],
['pesante', 'pres_part', 'a'],
['pesante', 'adjective', 'a'],
['pesante', 'adverb', 'a'],
['pesare', 'verb', 'b'],
['pesca', 'noun', 'c'],
['pesca', 'adjective', 'c'],
['pesca', 'noun', 'b'],
['pescare', 'verb', 'b'],
['pescatore', 'noun', 'b'],
['pescatore', 'adjective', 'b'],
['pesce', 'noun', 'a'],
['peschereccio', 'noun', 'c'],
['peschereccio', 'adjective', 'c'],
['pescheria', 'noun', 'c'],
['pesco', 'noun', 'c'],
['peso', 'noun', 'a'],
['pessimo', 'adjective', 'b'],
['pestare', 'verb', 'c'],
['peste', 'noun', 'c'],
['pesto', 'past_part', 'c'],
['pesto', 'adjective', 'c'],
['pesto', 'noun', 'c'],
['petalo', 'noun', 'c'],
['petardo', 'noun', 'c'],
['petroliera', 'noun', 'c'],
['petrolio', 'noun', 'b'],
['pettegolezzo', 'noun', 'c'],
['pettegolo', 'adjective', 'c'],
['pettegolo', 'noun', 'c'],
['pettinare', 'verb', 'c'],
['pettinatura', 'noun', 'c'],
['pettine', 'noun', 'c'],
['pettirosso', 'noun', 'c'],
['petto', 'noun', 'a'],
['pezza', 'noun', 'c'],
['pezzetto', 'noun', 'b'],
['pezzo', 'noun', 'a'],
['pezzuola', 'noun', 'c'],
['pi', 'noun', 'c'],
['piacere', 'verb', 'a'],
['piacere', 'noun', 'a'],
['piacevole', 'adjective', 'b'],
['piadina', 'noun', 'c'],
['piaga', 'noun', 'c'],
['pialla', 'noun', 'c'],
['piallare', 'verb', 'c'],
['pianeggiante', 'pres_part', 'c'],
['pianeggiante', 'adjective', 'c'],
['pianerottolo', 'noun', 'b'],
['pianeta', 'noun', 'a'],
['piangere', 'verb', 'a'],
['piangere', 'noun', 'a'],
['piano', 'noun', 'a'],
['piano', 'noun', 'a'],
['piano', 'adjective', 'a'],
['piano', 'adverb', 'a'],
['pianoforte', 'noun', 'b'],
['pianoterra', 'noun', 'c'],
['pianta', 'noun', 'a'],
['piantare', 'verb', 'b'],
['pianto', 'noun', 'b'],
['pianura', 'noun', 'b'],
['piastra', 'noun', 'c'],
['piattaforma', 'noun', 'b'],
['piatto', 'adjective', 'a'],
['piatto', 'noun', 'a'],
['piazza', 'noun', 'a'],
['piazzale', 'noun', 'b'],
['piazzare', 'verb', 'b'],
['piccante', 'adjective', 'c'],
['picchiare', 'verb', 'b'],
['piccino', 'adjective', 'c'],
['piccino', 'noun', 'c'],
['piccione', 'noun', 'c'],
['picco', 'noun', 'b'],
['piccolo', 'adjective', 'a'],
['piccolo', 'noun', 'a'],
['piccone', 'noun', 'c'],
['picnic', 'noun', 'c'],
['pidocchio', 'noun', 'c'],
['piede', 'noun', 'a'],
['piega', 'noun', 'b'],
['piegare', 'verb', 'b'],
['pieghevole', 'adjective', 'c'],
['pieghevole', 'noun', 'c'],
['piemontese', 'adjective', 'b'],
['piemontese', 'noun', 'b'],
['piena', 'noun', 'c'],
['pienamente', 'adverb', 'b'],
['pieno', 'adjective', 'a'],
['pieno', 'noun', 'a'],
['pietà', 'noun', 'b'],
['pietra', 'noun', 'a'],
['pigiama', 'noun', 'c'],
['pigione', 'noun', 'c'],
['pigliare', 'verb', 'b'],
['pigna', 'noun', 'c'],
['pigrizia', 'noun', 'c'],
['pigro', 'adjective', 'c'],
['pigro', 'noun', 'c'],
['pila', 'noun', 'b'],
['pillola', 'noun', 'b'],
['pilota', 'noun', 'b'],
['pineta', 'noun', 'c'],
['ping-pong', 'noun', 'c'],
['pinguino', 'noun', 'c'],
['pinna', 'noun', 'c'],
['pinolo', 'noun', 'c'],
['pinza', 'noun', 'c'],
['pinzetta', 'noun', 'c'],
['pioggia', 'noun', 'a'],
['piombo', 'noun', 'b'],
['piombo', 'adjective', 'b'],
['piombo', 'noun', 'b'],
['pioppo', 'noun', 'c'],
['piovere', 'verb', 'b'],
['piovoso', 'adjective', 'c'],
['piovoso', 'noun', 'c'],
['pipì', 'noun', 'c'],
['pipistrello', 'noun', 'c'],
['pirata', 'noun', 'b'],
['piscina', 'noun', 'b'],
['pisello', 'noun', 'c'],
['pisello', 'adjective', 'c'],
['pisolino', 'noun', 'c'],
['pista', 'noun', 'b'],
['pistacchio', 'noun', 'c'],
['pistacchio', 'adjective', 'c'],
['pistola', 'noun', 'a'],
['pittare', 'verb', 'c'],
['pittore', 'noun', 'b'],
['pittore', 'adjective', 'b'],
['pittura', 'noun', 'b'],
['pitturare', 'verb', 'c'],
['più', 'adverb', 'a'],
['più', 'adjective', 'a'],
['più', 'preposition', 'a'],
['più', 'noun', 'a'],
['piuma', 'noun', 'c'],
['piumino', 'noun', 'c'],
['piuttosto', 'adverb', 'a'],
['pizza', 'noun', 'b'],
['pizzeria', 'noun', 'c'],
['pizzetta', 'noun', 'c'],
['pizzicare', 'verb', 'c'],
['pizzo', 'noun', 'c'],
['plaid', 'noun', 'c'],
['plastica', 'noun', 'b'],
['plastico', 'adjective', 'b'],
['plastico', 'noun', 'b'],
['platano', 'noun', 'c'],
['platino', 'noun', 'c'],
['platino', 'adjective', 'c'],
['plurale', 'noun', 'c'],
['plurale', 'adjective', 'c'],
['pneumatico', 'noun', 'c'],
['pochino', 'noun', 'b'],
['poco', 'adjective', 'a'],
['poco', 'pronoun', 'a'],
['poco', 'adverb', 'a'],
['podere', 'noun', 'c'],
['poema', 'noun', 'b'],
['poesia', 'noun', 'a'],
['poeta', 'noun', 'a'],
['poetico', 'adjective', 'b'],
['poetico', 'noun', 'b'],
['poggiapiedi', 'noun', 'c'],
['poggiare', 'verb', 'c'],
['poi', 'adverb', 'a'],
['poiché', 'conjunction', 'a'],
['poker', 'noun', 'b'],
['polacco', 'adjective', 'b'],
['polacco', 'noun', 'b'],
['polemica', 'noun', 'b'],
['polenta', 'noun', 'c'],
['polipo', 'noun', 'c'],
['politica', 'noun', 'a'],
['politico', 'adjective', 'a'],
['politico', 'noun', 'a'],
['polizia', 'noun', 'a'],
['poliziotto', 'noun', 'a'],
['pollaio', 'noun', 'c'],
['pollame', 'noun', 'c'],
['pollice', 'noun', 'b'],
['pollo', 'noun', 'c'],
['polmone', 'noun', 'b'],
['polo', 'noun', 'b'],
['polpa', 'noun', 'c'],
['polpastrello', 'noun', 'c'],
['polpetta', 'noun', 'c'],
['polpo', 'noun', 'c'],
['polsino', 'noun', 'c'],
['polso', 'noun', 'b'],
['poltrona', 'noun', 'b'],
['polvere', 'noun', 'a'],
['polverina', 'noun', 'c'],
['polveroso', 'adjective', 'c'],
['pomata', 'noun', 'c'],
['pomello', 'noun', 'c'],
['pomeriggio', 'noun', 'a'],
['pomodoro', 'noun', 'b'],
['pompa', 'noun', 'b'],
['pompelmo', 'noun', 'c'],
['pompiere', 'noun', 'c'],
['ponte', 'noun', 'a'],
['pony', 'noun', 'c'],
['pop', 'adjective', 'b'],
['pop', 'noun', 'b'],
['popolare', 'adjective', 'a'],
['popolare', 'noun', 'a'],
['popolare', 'verb', 'b'],
['popolarità', 'noun', 'c'],
['popolazione', 'noun', 'a'],
['popolo', 'noun', 'a'],
['porcellana', 'noun', 'c'],
['porcheria', 'noun', 'c'],
['porco', 'noun', 'b'],
['porco', 'adjective', 'b'],
['porgere', 'verb', 'b'],
['porno', 'adjective', 'b'],
['porno', 'noun', 'b'],
['porre', 'verb', 'a'],
['porta', 'noun', 'a'],
['portabagagli', 'noun', 'c'],
['portabagagli', 'adjective', 'c'],
['portacenere', 'noun', 'c'],
['portachiavi', 'noun', 'c'],
['portacipria', 'noun', 'c'],
['portaerei', 'noun', 'c'],
['portafinestra', 'noun', 'c'],
['portafoglio', 'noun', 'b'],
['portafortuna', 'noun', 'c'],
['portale', 'noun', 'b'],
['portamonete', 'noun', 'c'],
['portaombrelli', 'noun', 'c'],
['portare', 'verb', 'a'],
['portata', 'noun', 'b'],
['portatore', 'adjective', 'b'],
['portatore', 'noun', 'b'],
['portiere', 'noun', 'b'],
['portineria', 'noun', 'c'],
['porto', 'noun', 'a'],
['portoghese', 'adjective', 'b'],
['portoghese', 'noun', 'b'],
['portone', 'noun', 'b'],
['porzione', 'noun', 'b'],
['posa', 'noun', 'b'],
['posacenere', 'noun', 'c'],
['posare', 'verb', 'b'],
['posata', 'noun', 'c'],
['positivo', 'adjective', 'a'],
['positivo', 'noun', 'a'],
['positivo', 'adverb', 'a'],
['posizionare', 'verb', 'b'],
['posizione', 'noun', 'a'],
['possedere', 'verb', 'a'],
['possesso', 'noun', 'b'],
['possibile', 'adjective', 'a'],
['possibile', 'noun', 'a'],
['possibilità', 'noun', 'a'],
['post', 'noun', 'b'],
['posta', 'noun', 'a'],
['postale', 'adjective', 'b'],
['postare', 'verb', 'b'],
['posteggiatore', 'noun', 'c'],
['posteriore', 'adjective', 'b'],
['posteriore', 'noun', 'b'],
['postino', 'noun', 'c'],
['postino', 'adjective', 'c'],
['posto', 'noun', 'a'],
['potare', 'verb', 'c'],
['potente', 'pres_part', 'a'],
['potente', 'adjective', 'a'],
['potente', 'noun', 'a'],
['potentino', 'adjective', 'c'],
['potentino', 'noun', 'c'],
['potenza', 'noun', 'b'],
['potenziale', 'adjective', 'b'],
['potenziale', 'noun', 'b'],
['potere', 'verb', 'a'],
['potere', 'noun', 'a'],
['povero', 'adjective', 'a'],
['povertà', 'noun', 'b'],
['pozzanghera', 'noun', 'c'],
['pozzo', 'noun', 'b'],
['praghese', 'adjective', 'c'],
['praghese', 'noun', 'c'],
['pranzo', 'noun', 'a'],
['prassi', 'noun', 'b'],
['pratica', 'noun', 'a'],
['praticamente', 'adverb', 'a'],
['praticare', 'verb', 'b'],
['pratico', 'adjective', 'a'],
['prato', 'noun', 'b'],
['precario', 'adjective', 'b'],
['precedente', 'pres_part', 'a'],
['precedente', 'adjective', 'a'],
['precedente', 'noun', 'a'],
['precedentemente', 'adverb', 'b'],
['precedenza', 'noun', 'b'],
['precedere', 'verb', 'b'],
['precipitare', 'verb', 'b'],
['precisamente', 'adverb', 'b'],
['precisare', 'verb', 'a'],
['precisione', 'noun', 'b'],
['preciso', 'adjective', 'a'],
['preciso', 'adverb', 'a'],
['preda', 'noun', 'b'],
['predisporre', 'verb', 'b'],
['preferenza', 'noun', 'b'],
['preferire', 'verb', 'a'],
['preferito', 'past_part', 'b'],
['preferito', 'adjective', 'b'],
['preferito', 'noun', 'b'],
['pregare', 'verb', 'a'],
['preghiera', 'noun', 'b'],
['pregiato', 'past_part', 'c'],
['pregiato', 'adjective', 'c'],
['pregio', 'noun', 'b'],
['pregiudizio', 'noun', 'b'],
['prego', 'exclamation', 'a'],
['prelevare', 'verb', 'b'],
['preliminare', 'adjective', 'b'],
['preliminare', 'noun', 'b'],
['prémaman', 'adjective', 'c'],
['premere', 'verb', 'b'],
['premessa', 'noun', 'b'],
['premiare', 'verb', 'b'],
['premier', 'noun', 'b'],
['premio', 'noun', 'a'],
['premio', 'adjective', 'a'],
['prendere', 'verb', 'a'],
['prenotare', 'verb', 'b'],
['prenotazione', 'noun', 'c'],
['preoccupare', 'verb', 'a'],
['preoccupato', 'past_part', 'b'],
['preoccupato', 'adjective', 'b'],
['preoccupazione', 'noun', 'b'],
['preparare', 'verb', 'a'],
['preparazione', 'noun', 'b'],
['prepotente', 'adjective', 'c'],
['prepotente', 'noun', 'c'],
['presa', 'noun', 'a'],
['prescindere', 'verb', 'b'],
['prescrivere', 'verb', 'b'],
['prescrizione', 'noun', 'b'],
['presentare', 'verb', 'a'],
['presentazione', 'noun', 'b'],
['presente', 'adjective', 'a'],
['presente', 'noun', 'a'],
['presente', 'adverb', 'a'],
['presenza', 'noun', 'a'],
['presepe', 'noun', 'b'],
['preside', 'noun', 'c'],
['presidente', 'noun', 'a'],
['presidente', 'adjective', 'a'],
['presidenza', 'noun', 'b'],
['pressione', 'noun', 'a'],
['presso', 'adverb', 'a'],
['presso', 'preposition', 'a'],
['presso', 'noun', 'a'],
['presso', 'adjective', 'a'],
['prestare', 'verb', 'a'],
['prestazione', 'noun', 'b'],
['prestigio', 'noun', 'b'],
['prestigioso', 'adjective', 'b'],
['prestito', 'noun', 'b'],
['presto', 'adverb', 'a'],
['presto', 'exclamation', 'a'],
['presto', 'adjective', 'a'],
['presumere', 'verb', 'b'],
['presunto', 'past_part', 'b'],
['presunto', 'adjective', 'b'],
['presupposto', 'past_part', 'b'],
['presupposto', 'adjective', 'b'],
['presupposto', 'noun', 'b'],
['prete', 'noun', 'a'],
['pretendere', 'verb', 'a'],
['pretesa', 'noun', 'b'],
['pretesto', 'noun', 'b'],
['prevalentemente', 'adverb', 'b'],
['prevalere', 'verb', 'b'],
['prevedere', 'verb', 'a'],
['prevedibile', 'adjective', 'b'],
['prevenire', 'verb', 'b'],
['preventivo', 'adjective', 'b'],
['preventivo', 'noun', 'b'],
['prevenzione', 'noun', 'b'],
['previdenza', 'noun', 'c'],
['previsione', 'noun', 'b'],
['previsto', 'past_part', 'a'],
['previsto', 'adjective', 'a'],
['previsto', 'noun', 'a'],
['prezioso', 'adjective', 'a'],
['prezioso', 'noun', 'a'],
['prezzemolo', 'noun', 'c'],
['prezzo', 'noun', 'a'],
['prigione', 'noun', 'b'],
['prigioniero', 'adjective', 'b'],
['prigioniero', 'noun', 'b'],
['prima', 'adverb', 'a'],
['prima', 'adjective', 'a'],
['prima', 'noun', 'a'],
['prima', 'noun', 'a'],
['primario', 'adjective', 'b'],
['primario', 'noun', 'b'],
['primavera', 'noun', 'a'],
['primizia', 'noun', 'c'],
['primo', 'adjective', 'a'],
['primo', 'noun', 'a'],
['primo', 'adverb', 'a'],
['primula', 'noun', 'c'],
['principale', 'adjective', 'a'],
['principale', 'noun', 'a'],
['principalmente', 'adverb', 'b'],
['principe', 'noun', 'a'],
['principe', 'adjective', 'a'],
['principessa', 'noun', 'b'],
['principio', 'noun', 'a'],
['priorità', 'noun', 'b'],
['privacy', 'noun', 'b'],
['privare', 'verb', 'b'],
['privato', 'adjective', 'a'],
['privato', 'noun', 'a'],
['privilegio', 'noun', 'b'],
['privo', 'adjective', 'b'],
['privo', 'preposition', 'b'],
['privo', 'noun', 'b'],
['probabile', 'adjective', 'b'],
['probabilità', 'noun', 'b'],
['probabilmente', 'adverb', 'a'],
['problema', 'noun', 'a'],
['problematico', 'adjective', 'b'],
['procedere', 'verb', 'a'],
['procedimento', 'noun', 'b'],
['procedura', 'noun', 'a'],
['processo', 'noun', 'a'],
['proclamare', 'verb', 'b'],
['procura', 'noun', 'b'],
['procurare', 'verb', 'b'],
['procuratore', 'noun', 'b'],
['prodotto', 'past_part', 'a'],
['prodotto', 'adjective', 'a'],
['prodotto', 'noun', 'a'],
['produrre', 'verb', 'a'],
['produttivo', 'adjective', 'b'],
['produttore', 'adjective', 'b'],
['produttore', 'noun', 'b'],
['produzione', 'noun', 'a'],
['prof', 'noun', 'b'],
['professionale', 'adjective', 'a'],
['professione', 'noun', 'b'],
['professionista', 'noun', 'b'],
['professore', 'noun', 'a'],
['professoressa', 'noun', 'b'],
['profeta', 'noun', 'b'],
['profilattico', 'adjective', 'c'],
['profilattico', 'noun', 'c'],
['profilo', 'noun', 'a'],
['profitto', 'noun', 'b'],
['profondamente', 'adverb', 'b'],
['profondità', 'noun', 'b'],
['profondo', 'adjective', 'a'],
['profondo', 'noun', 'a'],
['profondo', 'adverb', 'a'],
['profumare', 'verb', 'b'],
['profumato', 'past_part', 'c'],
['profumato', 'adjective', 'c'],
['profumo', 'noun', 'b'],
['progettare', 'verb', 'b'],
['progettazione', 'noun', 'b'],
['progetto', 'noun', 'a'],
['programma', 'noun', 'a'],
['programmare', 'verb', 'b'],
['programmazione', 'noun', 'b'],
['progressista', 'adjective', 'c'],
['progressista', 'noun', 'c'],
['progressivo', 'adjective', 'b'],
['progresso', 'noun', 'b'],
['proibire', 'verb', 'b'],
['proiettare', 'verb', 'b'],
['proiettile', 'noun', 'b'],
['proiezione', 'noun', 'b'],
['prolunga', 'noun', 'c'],
['promessa', 'noun', 'b'],
['promettere', 'verb', 'a'],
['promozione', 'noun', 'b'],
['promuovere', 'verb', 'b'],
['pronto', 'adjective', 'a'],
['pronuncia', 'noun', 'c'],
['pronunciare', 'verb', 'a'],
['propaganda', 'noun', 'b'],
['propagandare', 'verb', 'c'],
['proporre', 'verb', 'a'],
['proporzione', 'noun', 'b'],
['proposito', 'noun', 'a'],
['proposizione', 'noun', 'c'],
['proposta', 'noun', 'a'],
['proprietà', 'noun', 'a'],
['proprietario', 'adjective', 'a'],
['proprietario', 'noun', 'a'],
['proprio', 'adjective', 'a'],
['proprio', 'adverb', 'a'],
['proprio', 'noun', 'a'],
['prosa', 'noun', 'b'],
['prosciugare', 'verb', 'c'],
['prosciutto', 'noun', 'b'],
['prosecco', 'noun', 'c'],
['proseguire', 'verb', 'a'],
['prospettiva', 'noun', 'b'],
['prossimo', 'adjective', 'a'],
['prossimo', 'noun', 'a'],
['prostituta', 'noun', 'b'],
['protagonista', 'adjective', 'a'],
['protagonista', 'noun', 'a'],
['proteggere', 'verb', 'a'],
['proteina', 'noun', 'b'],
['protesta', 'noun', 'b'],
['protestare', 'verb', 'b'],
['protetto', 'past_part', 'b'],
['protetto', 'adjective', 'b'],
['protetto', 'noun', 'b'],
['protezione', 'noun', 'b'],
['protocollo', 'noun', 'b'],
['prova', 'noun', 'a'],
['provare', 'verb', 'a'],
['provenienza', 'noun', 'b'],
['provenire', 'verb', 'a'],
['provincia', 'noun', 'a'],
['provinciale', 'adjective', 'b'],
['provinciale', 'noun', 'b'],
['provocare', 'verb', 'a'],
['provola', 'noun', 'c'],
['provolone', 'noun', 'c'],
['provvedere', 'verb', 'b'],
['provvedimento', 'noun', 'b'],
['provvisorio', 'adjective', 'b'],
['prudere', 'verb', 'c'],
['prugna', 'noun', 'c'],
['prugna', 'adjective', 'c'],
['prurito', 'noun', 'c'],
['pseudonimo', 'noun', 'b'],
['pseudonimo', 'adjective', 'b'],
['psichiatra', 'noun', 'b'],
['psichiatria', 'noun', 'c'],
['psichico', 'adjective', 'b'],
['psicologia', 'noun', 'b'],
['psicologico', 'adjective', 'b'],
['psicologo', 'noun', 'b'],
['pub', 'noun', 'b'],
['pubblicare', 'verb', 'a'],
['pubblicazione', 'noun', 'b'],
['pubblicità', 'noun', 'a'],
['pubblicitario', 'adjective', 'b'],
['pubblicitario', 'noun', 'b'],
['pubblico', 'adjective', 'a'],
['pubblico', 'noun', 'a'],
['pugilato', 'noun', 'c'],
['pugliese', 'adjective', 'c'],
['pugliese', 'noun', 'c'],
['pugno', 'noun', 'a'],
['pulce', 'noun', 'c'],
['pulce', 'adjective', 'c'],
['pulcino', 'noun', 'c'],
['puledro', 'noun', 'c'],
['pulire', 'verb', 'a'],
['pulito', 'past_part', 'b'],
['pulito', 'adjective', 'b'],
['pulito', 'noun', 'b'],
['pulizia', 'noun', 'b'],
['pullman', 'noun', 'b'],
['pullover', 'noun', 'c'],
['pulmino', 'noun', 'c'],
['pulsante', 'pres_part', 'b'],
['pulsante', 'adjective', 'b'],
['pulsante', 'noun', 'b'],
['puma', 'noun', 'c'],
['pungere', 'verb', 'c'],
['punire', 'verb', 'b'],
['punizione', 'noun', 'b'],
['punk', 'adjective', 'c'],
['punk', 'noun', 'c'],
['punta', 'noun', 'a'],
['puntare', 'verb', 'a'],
['puntata', 'noun', 'b'],
['puntato', 'past_part', 'b'],
['puntato', 'adjective', 'b'],
['punteggio', 'noun', 'c'],
['puntiglio', 'noun', 'c'],
['puntino', 'noun', 'b'],
['punto', 'noun', 'a'],
['puntuale', 'adjective', 'b'],
['puntura', 'noun', 'c'],
['pupa', 'noun', 'b'],
['pupazzo', 'noun', 'c'],
['pupo', 'noun', 'c'],
['purché', 'conjunction', 'b'],
['pure', 'adverb', 'a'],
['pure', 'conjunction', 'a'],
['purè', 'noun', 'c'],
['purga', 'noun', 'c'],
['puro', 'adjective', 'a'],
['puro', 'noun', 'a'],
['purtroppo', 'adverb', 'a'],
['puttana', 'noun', 'b'],
['puzza', 'noun', 'b'],
['puzzare', 'verb', 'b'],
['puzzle', 'noun', 'c'],
['qua', 'adverb', | |
<gh_stars>100-1000
# encoding: utf-8
# module Grasshopper.Kernel.Graphs calls itself Graphs
# from Grasshopper,Version=1.0.0.20,Culture=neutral,PublicKeyToken=dda4f5ec2cd80803
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """
# no imports
# functions
def GH_GraphProxyObject(n_owner): # real signature unknown; restored from __doc__
""" GH_GraphProxyObject(n_owner: IGH_Graph) """
pass
# classes
class GH_AbstractGraph(object,IGH_Graph,GH_ISerializable):
# no doc
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearCaches(self):
""" ClearCaches(self: GH_AbstractGraph) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_AbstractGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_AbstractGraph) """
pass
def CurveToPointFArray(self,*args):
""" CurveToPointFArray(Crv: Curve,dest: RectangleF) -> Array[PointF] """
pass
def Draw_PostRenderGraph(self,g,cnt):
""" Draw_PostRenderGraph(self: GH_AbstractGraph,g: Graphics,cnt: GH_GraphContainer) """
pass
def Draw_PostRenderGrid(self,g,cnt):
""" Draw_PostRenderGrid(self: GH_AbstractGraph,g: Graphics,cnt: GH_GraphContainer) """
pass
def Draw_PostRenderGrip(self,g,cnt,index):
""" Draw_PostRenderGrip(self: GH_AbstractGraph,g: Graphics,cnt: GH_GraphContainer,index: int) """
pass
def Draw_PostRenderTags(self,g,cnt):
""" Draw_PostRenderTags(self: GH_AbstractGraph,g: Graphics,cnt: GH_GraphContainer) """
pass
def Draw_PreRenderGraph(self,g,cnt):
""" Draw_PreRenderGraph(self: GH_AbstractGraph,g: Graphics,cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderGrid(self,g,cnt):
""" Draw_PreRenderGrid(self: GH_AbstractGraph,g: Graphics,cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderGrip(self,g,cnt,index):
""" Draw_PreRenderGrip(self: GH_AbstractGraph,g: Graphics,cnt: GH_GraphContainer,index: int) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderTags(self,g,cnt):
""" Draw_PreRenderTags(self: GH_AbstractGraph,g: Graphics,cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Duplicate(self):
""" Duplicate(self: GH_AbstractGraph) -> IGH_Graph """
pass
def EmitProxyObject(self):
""" EmitProxyObject(self: GH_AbstractGraph) -> IGH_GraphProxyObject """
pass
def GDI_GraphPath(self,reg):
""" GDI_GraphPath(self: GH_AbstractGraph,reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def IntersectionEvaluate(self,*args):
""" IntersectionEvaluate(C: Curve,offset: float) -> float """
pass
def OnGraphChanged(self,bIntermediate):
""" OnGraphChanged(self: GH_AbstractGraph,bIntermediate: bool) """
pass
def PrepareForUse(self):
""" PrepareForUse(self: GH_AbstractGraph) """
pass
def Read(self,reader):
""" Read(self: GH_AbstractGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_AbstractGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_AbstractGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_AbstractGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*args): #cannot find CLR constructor
""" __new__(cls: type,nName: str,nDescription: str) """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Description=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Description(self: GH_AbstractGraph) -> str
"""
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_AbstractGraph) -> Guid
"""
Grips=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Grips(self: GH_AbstractGraph) -> List[GH_GraphGrip]
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_AbstractGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_AbstractGraph) -> bool
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: GH_AbstractGraph) -> str
"""
GH_Evaluator=None
GraphChanged=None
class GH_BezierGraph(GH_AbstractGraph,IGH_Graph,GH_ISerializable):
""" GH_BezierGraph() """
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearCaches(self):
""" ClearCaches(self: GH_BezierGraph) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_BezierGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_BezierGraph) """
pass
def Curve(self,*args):
""" Curve(self: GH_BezierGraph) -> Curve """
pass
def Draw_PreRenderGrip(self,g,cnt,index):
""" Draw_PreRenderGrip(self: GH_BezierGraph,g: Graphics,cnt: GH_GraphContainer,index: int) -> GH_GraphDrawInstruction """
pass
def GDI_GraphPath(self,reg):
""" GDI_GraphPath(self: GH_BezierGraph,reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self,reader):
""" Read(self: GH_BezierGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_BezierGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_BezierGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_BezierGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_BezierGraph) -> Guid
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_BezierGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_BezierGraph) -> bool
"""
class GH_ConicGraph(GH_AbstractGraph,IGH_Graph,GH_ISerializable):
""" GH_ConicGraph() """
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_ConicGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_ConicGraph) """
pass
def Curve(self,*args):
""" Curve(self: GH_ConicGraph) -> NurbsCurve """
pass
def DestroyCurve(self,*args):
""" DestroyCurve(self: GH_ConicGraph) """
pass
def FitConic(self,*args):
""" FitConic(self: GH_ConicGraph,S: Point3d) -> NurbsCurve """
pass
def GDI_GraphPath(self,reg):
""" GDI_GraphPath(self: GH_ConicGraph,reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def MakeConic(self,*args):
""" MakeConic(self: GH_ConicGraph,w: float) -> NurbsCurve """
pass
def Read(self,reader):
""" Read(self: GH_ConicGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_ConicGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_ConicGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_ConicGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_ConicGraph) -> Guid
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_ConicGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_ConicGraph) -> bool
"""
class GH_DoubleSineGraph(GH_AbstractGraph,IGH_Graph,GH_ISerializable):
""" GH_DoubleSineGraph() """
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearCaches(self):
""" ClearCaches(self: GH_DoubleSineGraph) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_DoubleSineGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_DoubleSineGraph) """
pass
def Draw_PreRenderGraph(self,g,cnt):
""" Draw_PreRenderGraph(self: GH_DoubleSineGraph,g: Graphics,cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def Draw_PreRenderGrip(self,g,cnt,index):
""" Draw_PreRenderGrip(self: GH_DoubleSineGraph,g: Graphics,cnt: GH_GraphContainer,index: int) -> GH_GraphDrawInstruction """
pass
def GDI_GraphPath(self,reg):
""" GDI_GraphPath(self: GH_DoubleSineGraph,reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def GraphAccuracy(self,*args):
""" GraphAccuracy(self: GH_DoubleSineGraph,reg: RectangleF) -> float """
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self,reader):
""" Read(self: GH_DoubleSineGraph,reader: GH_IReader) -> bool """
pass
def RecFromPoints(self,*args):
""" RecFromPoints(self: GH_DoubleSineGraph,a: PointF,b: PointF) -> Rectangle """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_DoubleSineGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_DoubleSineGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_DoubleSineGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_DoubleSineGraph) -> Guid
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_DoubleSineGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_DoubleSineGraph) -> bool
"""
m_eq0=None
m_eq1=None
m_path0=None
m_path1=None
class GH_GaussianGraph(GH_AbstractGraph,IGH_Graph,GH_ISerializable):
""" GH_GaussianGraph() """
def AddGrip(self,*args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearGrips(self,*args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self,*args):
""" CreateDerivedDuplicate(self: GH_GaussianGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self,*args):
""" CreateGrips(self: GH_GaussianGraph) """
pass
def GHGraphToPointArray(self,*args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self,*args):
""" Internal_GripChanged(self: GH_GaussianGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self,reader):
""" Read(self: GH_GaussianGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self,*args):
""" UpdateEquation(self: GH_GaussianGraph) """
pass
def ValueAt(self,t):
""" ValueAt(self: GH_GaussianGraph,t: float) -> float """
pass
def Write(self,writer):
""" Write(self: GH_GaussianGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphTypeID(self: GH_GaussianGraph) -> Guid
"""
Icon_16x16=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_16x16(self: GH_GaussianGraph) -> Image
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: GH_GaussianGraph) -> bool
"""
class GH_GraphContainer(object,GH_ISerializable,IGH_ResponsiveObject):
"""
GH_GraphContainer(n_graph: IGH_Graph)
GH_GraphContainer(n_graph: IGH_Graph,n_x0: float,n_x1: float,n_y0: float,n_y1: float)
"""
def ClearCaches(self):
""" ClearCaches(self: GH_GraphContainer) """
pass
def Duplicate(self):
""" Duplicate(self: GH_GraphContainer) -> GH_GraphContainer """
pass
def FromX(self,t):
""" FromX(self: GH_GraphContainer,t: float) -> float """
pass
def FromY(self,t):
""" FromY(self: GH_GraphContainer,t: float) -> float """
pass
def Internal_Render_Graph(self,*args):
""" Internal_Render_Graph(self: GH_GraphContainer,G: Graphics) """
pass
def Internal_Render_Grip(self,*args):
""" Internal_Render_Grip(self: GH_GraphContainer,g: Graphics,x: int,y: int) """
pass
def Internal_Render_Grips(self,*args):
""" Internal_Render_Grips(self: GH_GraphContainer,G: Graphics) """
pass
def Internal_Render_HorizontalConstraint(self,*args):
""" Internal_Render_HorizontalConstraint(self: GH_GraphContainer,g: Graphics,y: int) """
pass
def Internal_Render_InvalidIcon(self,*args):
""" Internal_Render_InvalidIcon(self: GH_GraphContainer,g: Graphics) """
pass
def Internal_Render_LockedIcon(self,*args):
| |
<reponame>jordan-melendez/gp_project<gh_stars>0
from functools import reduce
from itertools import cycle, zip_longest
import numpy as np
import scipy as sp
from scipy.stats import multivariate_normal
#################################################################
# Based on functions defined in Yunus Saatci's Thesis (Ch. 5):
# http://mlg.eng.cam.ac.uk/pub/pdf/Saa11.pdf
#################################################################
def kronecker(K):
"""Return the Kronecker product of list of arrays K:
K_1 \otimes K_2 \otimes ... \otimes K_D
Parameters
----------
K: List of array-like
[K_1, K_2, ..., K_D]
"""
return reduce(np.kron, K)
def cartesian(*arrays):
"""Makes the Cartesian product of arrays.
Parameters
----------
arrays: list of 1D array-like
1D arrays where earlier arrays loop more slowly than later ones
"""
N = len(arrays)
return np.stack(np.meshgrid(*arrays, indexing='ij'), -1).reshape(-1, N)
def flat_mtprod(tens, mat):
"""A matrix-tensor product
Z_{i_1, ..., i_D} = \sum_k M_{i_1,k} T_{k, i_2, ..., i_D}
where tens is the vectorized version of T.
Parameters
-----------
mat : 2D array-like
tens: (N,1)- or (N,)-shaped array-like
Returns
-------
Z: column vector
A (column) vectorized version of the matrix-tensor product
"""
Nm = mat.shape[1]
Tmat = tens.reshape((Nm, -1))
Z = np.dot(mat, Tmat)
return Z.T.reshape((-1, 1))
def kron_mvprod(kron_list, b):
"""Compute the matrix-vector product of kronecker(kron_list).b
Parameters
-----------
kron_list: list of 2D array-like objects
D matrices [A_1, A_2, ..., A_D] to be Kronecker'ed:
A = A_1 \otimes A_2 \otimes ... \otimes A_D
Product of column dimensions must be N
b : array-like
Nx1 column vector
"""
return reduce(flat_mtprod, kron_list, b)
def kron_mmprod(kron_list, m):
"""Compute the matrix product of kronecker(kron_list).m
Parameters
-----------
kron_list: list of 2D array-like objects
D matrices [A_1, A_2, ..., A_D] to be Kronecker'ed:
A = A_1 \otimes A_2 \otimes ... \otimes A_D
Product of column dimensions must be N
m : array-like
NxM matrix
"""
if len(m.shape) == 1:
m = m[:, None] # Treat 1D array as Nx1 matrix
return np.concatenate([kron_mvprod(kron_list, b) for b in m.T], axis=1)
def flattened_outer(a, b):
return np.outer(a, b).ravel()
def kron_diag(diags):
"""Returns diagonal of kronecker product from list of diagonals.
"""
return reduce(flattened_outer, diags)
def flat_chol_solve(b, chol):
"""Solve A.x = b given cholesky decomposition of A
"""
N = chol.shape[1]
B = b.reshape((N, -1))
X = sp.linalg.cho_solve((chol, True), B)
return X.T.reshape((-1, 1))
def kron_chol_vsolve(chol_list, b):
"""Solve kronecker(kron_list).x = b where chol_list is the
cholesky decomposition of matrices to be kronecker'ed: kron_list
Parameters
-----------
chol_list: list of 2D array-like objects
Cholesky decompositions of D matrices [A_1, A_2, ..., A_D]
to be Kronecker'ed:
A = A_1 \otimes A_2 \otimes ... \otimes A_D
Product of column dimensions must be N
b : array-like
Nx1 column vector
"""
return reduce(flat_chol_solve, chol_list, b)
def kron_chol_msolve(chol_list, m):
"""Solve kronecker(kron_list).x = m where chol_list is the
cholesky decomposition of matrices to be kronecker'ed: kron_list
Parameters
-----------
chol_list: list of 2D array-like objects
Cholesky decompositions of D matrices [A_1, A_2, ..., A_D]
to be Kronecker'ed:
A = A_1 \otimes A_2 \otimes ... \otimes A_D
Product of column dimensions must be N
m : array-like
NxM matrix
"""
if len(m.shape) == 1:
m = m[:, None] # Treat 1D array as Nx1 matrix
return np.concatenate([kron_chol_vsolve(chol_list, b) for b in m.T], axis=1)
def flat_lower_solve(b, L):
"""Solve L.x = b given lower triangular matrix L
"""
N = L.shape[1]
B = b.reshape((N, -1))
X = sp.linalg.solve_triangular(L, B, lower=True)
return X.T.reshape((-1, 1))
def kron_lower_vsolve(lowers, b):
"""Solve kronecker(lowers).x = b where lowers is a list of lower
triangular matrices.
Parameters
-----------
lowers : list of 2D array-like objects
Lower triangular matrices
L = L_1 \otimes L_2 \otimes ... \otimes L_D
Product of column dimensions must be N
b : array-like
Nx1 column vector
"""
return reduce(flat_lower_solve, lowers, b)
def kron_lower_msolve(lowers, m):
"""Solve kronecker(lowers).x = m where lowers is a list of lower
triangular matrices.
Parameters
-----------
lowers : list of 2D array-like objects
Lower triangular matrices
L = L_1 \otimes L_2 \otimes ... \otimes L_D
Product of column dimensions must be N
m : array-like
NxM matrix
"""
if len(m.shape) == 1:
m = m[:, None] # Treat 1D array as Nx1 matrix
return np.concatenate([kron_lower_vsolve(lowers, b) for b in m.T], axis=1)
#################################################################
# Statistical classes for use in GP regression. Based on PyMC3's
# GP implementation and Yunus Saatci's Thesis mentioned above
#################################################################
def gaussian_kernel(x, xp, ell):
return np.exp(-np.subtract.outer(x, xp)**2/ell**2)
class KroneckerNormal:
"""A multivariate normal that makes use of Kronecker structure of covariance.
Parameters
----------
mu : array-like
covs : list of arrays
The set of covariance matrices to be Kroneckered
[K_1, K_2, ...]
such that K = K_1 \otimes K_2 \otimes ...
chols: list of arrays
The set of lower cholesky matrices to be Kroneckered
[chol_1, chol_2, ...]
such that K_i = chol_i * chol_i^T
EVDs : list of tuples
The set of eigenvalue-vector, eigenvector-matrix pairs, e.g.,
[(v1, Q1), (v2, Q2), ...]
such that K_i = Q_i^T * diag(v_i) * Q_i
noise: float
"""
def __init__(self, mu=0, covs=None, chols=None, EVDs=None, noise=None):
self._setup(covs, chols, EVDs, noise)
self.mu = mu
def _setup(self, covs, chols, EVDs, noise):
if len([i for i in [covs, chols, EVDs] if i is not None]) != 1:
raise ValueError('Incompatible parameterization. '
'Specify exactly one of covs, chols, '
'or EVDs.')
self.isEVD = False
if covs is not None:
self.covs = covs
if noise is not None and noise != 0:
# Noise requires eigendecomposition
self.isEVD = True
eigs_sep, self.Qs = zip(*map(np.linalg.eigh, covs)) # Unzip
self.QTs = list(map(np.transpose, self.Qs))
self.eigs = kron_diag(eigs_sep) # Combine separate eigs
self.eigs += noise
self.N = len(self.eigs)
else:
# Otherwise use cholesky
self.chols = list(map(np.linalg.cholesky, self.covs))
self.chol_diags = np.array(list(map(np.diag, self.chols)))
self.sizes = np.array([len(chol) for chol in self.chols])
self.N = np.prod(self.sizes)
elif chols is not None:
self.chols = chols
self.chol_diags = np.array(list(map(np.diag, self.chols)))
self.sizes = np.array([len(chol) for chol in self.chols])
self.N = np.prod(self.sizes)
else:
self.isEVD = True
eigs_sep, self.Qs = zip(*EVDs) # Unzip tuples
self.QTs = list(map(np.transpose, self.Qs))
self.eigs = kron_diag(eigs_sep) # Combine separate eigs
if noise is not None:
self.eigs += noise
self.N = len(self.eigs)
def random(self, size=None):
"""Drawn using x = mu + A.z for z~N(0,I) and
A = Q.sqrt(Lambda), if isEVD
A = chol, otherwise
Warning: EVD does not (yet) match with random draws from numpy
since A is only defined up to some unknown orthogonal transformation.
Numpy used svd while we must use eigendecomposition, which aren't
easily related due to sign ambiguities and permutations of eigenvalues.
"""
if size is None:
size = [self.N]
elif isinstance(size, int):
size = [size, self.N]
else:
raise NotImplementedError
z = np.random.standard_normal(size)
if self.isEVD:
sqrtLz = np.sqrt(self.eigs) * z
Az = kron_mmprod(self.Qs, sqrtLz.T).T
else:
Az = kron_mmprod(self.chols, z.T).T
return self.mu + Az
def _quaddist(self, value):
"""Computes the quadratic (x-mu)^T @ K^-1 @ (x-mu) and log(det(K))"""
delta = value - self.mu
if self.isEVD:
sqrt_quad = kron_mmprod(self.QTs, delta.T)
sqrt_quad = sqrt_quad/np.sqrt(self.eigs[:, None])
logdet = np.sum(np.log(self.eigs))
else:
sqrt_quad = kron_lower_msolve(self.chols, delta.T)
logchols = np.log(self.chol_diags) * self.N/self.sizes[:, None]
logdet = np.sum(2*logchols)
# Square each sample
quad = np.einsum('ij,ij->j', sqrt_quad, sqrt_quad)
# For theano: quad = tt.batched_dot(sqrt_quad.T, sqrt_quad.T)
return quad, logdet
def logp(self, value):
quad, logdet = self._quaddist(value)
return -1/2 * (quad + logdet + self.N*np.log(2*np.pi))
def update(self):
# How will updates to hyperparameters be performed?
raise NotImplementedError
class MarginalKron:
"""
"""
def __init__(self, mean_func, cov_funcs):
self.mean_func = mean_func
try:
self.cov_funcs = list(cov_funcs)
except TypeError:
self.cov_funcs = [cov_funcs]
def _build_marginal_likelihood(self, Xs):
self.X = cartesian(*Xs)
mu = self.mean_func(self.X)
covs = [f(X) for f, X in zip_longest(cycle(self.cov_funcs), Xs)]
return mu, covs
def marginal_likelihood(self, Xs, y, noise, is_observed=True, **kwargs):
"""
Returns the marginal likelihood distribution, given the input
locations `X` and the data `y`.
"""
mu, covs = self._build_marginal_likelihood(Xs)
self.Xs = Xs
self.y = y
self.noise = noise
return KroneckerNormal(mu=mu, covs=covs, noise=noise)
def total_cov(self, X, Xs=None, diag=False):
if Xs is None:
covs = [f(x, diag) for f, x in
zip_longest(cycle(self.cov_funcs), X.T)]
else:
covs = [f(x, xs, diag) for f, x, xs in
zip_longest(cycle(self.cov_funcs), X.T, Xs.T)]
return reduce(mul, covs)
def _build_conditional(self, Xnew, pred_noise, diag, Xs, y, noise,
cov_total, mean_total):
# Old points
delta = y - self.mean_func(cartesian(*Xs))
Kns = [f(X) for f, X in zip_longest(cycle(self.cov_funcs), Xs)]
eigs_sep, Qs = zip(*map(np.linalg.eigh, Kns)) # Unzip
QTs = list(map(np.transpose, Qs))
eigs = kron_diag(eigs_sep) # Combine separate eigs
if noise is not | |
<gh_stars>1-10
#!/usr/bin/env python
#
# xnatbrowser.py - The XNATBrowserPanel class.
#
# Author: <NAME> <<EMAIL>>
#
"""This module provides the :class:`XNATBrowserPanel`, a ``wx`` panel which
allows the user to connect to an XNAT server, and browse the projects and
files contained on it. The ``xnatpy`` library is used for communication with
the XNAT server.
Another class, the :class:`.XNATBrowserDialog` is also contained in this
module. This is a simple ``wx.Dialog`` which contains a ``XNATBrowserPanel``,
and *Download* and *Cancel* buttons.
"""
import os.path as op
import re
import fnmatch
import logging
import collections
import wx
import wx.lib.newevent as wxevent
import xnat
import fsleyes_widgets.placeholder_textctrl as pt
import fsleyes_widgets.autotextctrl as at
import fsleyes_widgets.utils.status as status
import fsleyes_widgets.utils.progress as progress
import fsleyes_widgets.widgetgrid as wgrid
import wxnat.icons as icons
log = logging.getLogger(__name__)
XNAT_HIERARCHY = {
'project' : ['subjects', 'resources'],
'projects' : ['subjects', 'resources'],
'subject' : ['experiments', 'resources'],
'subjects' : ['experiments', 'resources'],
'experiment' : ['assessors', 'scans', 'resources'],
'experiments' : ['assessors', 'scans', 'resources'],
'assessor' : ['scans', 'resources'],
'assessors' : ['scans', 'resources'],
'scan' : ['resources'],
'scans' : ['resources'],
'resource' : ['files'],
'resources' : ['files'],
}
"""This dictionary defines the hierarchy used in XNAT, allowing the panel to
traverse the hierarchy without knowing the names of the attributes on the
``xnat.Session`` instance.
"""
XNAT_NAME_ATT = {
'project' : 'name',
'subject' : 'label',
'experiment' : 'label',
'assessor' : 'label',
'scan' : 'id',
'resource' : 'label',
'file' : 'id',
}
"""This dictionary defines the attribute to use as the name for objects at
each level of the XNAT hierarchy.
"""
XNAT_INFO_ATTS = {
'project' : ['id', 'name'],
'subject' : ['id', 'label'],
'experiment' : ['id', 'label'],
'scan' : ['id', 'type'],
'assessor' : ['id'],
'resource' : ['id', 'label', 'file_size'],
'file' : ['id', 'size'],
}
"""This dictionary defines the attributes to show in the information panel for
highlihgted objects at each level of the XNAT hierarchy.
"""
LABELS = {
'host' : 'Host',
'username' : 'Username',
'password' : 'Password',
'connect' : 'Connect',
'disconnect' : 'Disconnect',
'connecting' : 'Connecting to {} ...',
'refresh' : 'Refresh',
'filter' : 'Filter by',
'connected' : u'\u2022',
'disconnected' : u'\u2022',
'connect.error.title' : 'Connection error',
'connect.error.message' :
'An error occurred while trying to connect to {}',
'download.title' : 'Downloading file',
'download.startMessage' : 'Downloading {} ...',
'download.updateMessage' : 'Downloading {} ({:0.2f} of {:0.2f} MB)',
'download.exists.title' : 'File already exists',
'download.exists.message' :
'A file with the name {} already exists. What do you want to do?',
'download.exists.overwrite' : 'Overwrite',
'download.exists.newdest' : 'Choose new name',
'download.exists.skip' : 'Skip',
'download.exists.choose' : 'Choose a new destination',
'download.error.title' : 'Download error',
'download.error.message' :
'An error occurred while trying to download {}',
'expand.error.title' : 'Error downloading XNAT data',
'expand.error.message' :
'An error occurred while communicating with the XNAT server',
'projects' : 'Projects',
'project' : 'Project',
'subjects' : 'Subjects',
'subject' : 'Subject',
'experiments' : 'Experiments',
'experiment' : 'Experiment',
'scans' : 'Scans',
'scan' : 'Scan',
'assessors' : 'Assessors',
'assessor' : 'Assessor',
'resource' : 'Resource',
'resources' : 'Resources',
'files' : 'Files',
'file' : 'File',
'project.id' : 'ID',
'project.name' : 'Name',
'subject.id' : 'ID',
'subject.label' : 'Label',
'experiment.id' : 'ID',
'experiment.label' : 'Label',
'assessor.id' : 'ID',
'scan.id' : 'ID',
'scan.type' : 'Type',
'resource.id' : 'ID',
'resource.label' : 'Label',
'resource.file_size' : 'Total size',
'file.id' : 'Name',
'file.size' : 'Size',
}
"""This dictionary contains labels used for various things in the user
interface.
"""
TOOLTIPS = {
'filter.glob' :
'Items with a label that does not match these shell-style glob patterns '
'will be hidden in the browser. You can use the pipe | character to '
'specify multiple patterns - items which do not match any pattern will '
'be hidden.',
'filter.regexp' :
'Items with a label that does not match these regular expressions '
'will be hidden in the browser.',
}
"""This dictionary contains tooltips for various things in the user interface.
"""
XNAT_INFO_FORMATTERS = {
'resource.file_size' : lambda s: '{:0.2f} MB'.format(float(s) / 1048576),
'file.size' : lambda s: '{:0.2f} MB'.format(float(s) / 1048576)
}
"""This dictionary contains string formatters for some attributes that are
shown in the information panel.
"""
class XNATBrowserPanel(wx.Panel):
"""The ``XNATBrowserPanel`` allows the user to connect to and browse
a XNAT repository. It contains:
- controls allowing the user to enter a XNAT host and login
credentials, and to connect to the host
- A drop down box allowing the user to select a project on the
XNAT host
- A tree browser allowing the user to browse the contents of the
currently selected project
- A panel which displays information about the currently selected
item in the tree browser.
When the user double-clicks on a file object in the tree browser,
a :class:`XNATFileSelectEvent`` is generated.
The ``XNATBrowserPanel`` has a handful of useful methods:
.. autosummary::
:nosignatures:
StartSession
EndSession
SessionActive
GetSelectedFiles
ExpandTreeItem
DownloadFile
GetHosts
GetAccounts
"""
def __init__(self,
parent,
knownHosts=None,
knownAccounts=None,
filterType=None,
filters=None):
"""Create a ``XNATBrowserPanel``.
:arg parent: ``wx`` parent object.
:arg knownHosts: A sequence of hosts to be used as auto-complete
options in the host input field.
:arg knownAccounts: A mapping of ``{ host : (username, password) }``,
which are used to automatically fill in the
login credentials when a particular host name
is entered.
:arg filterType: How the filter patterns should be applied -
either ``'regexp'`` for regular expressions, or
``'glob'`` for shell-style wildcard patterns.
Defaults to ``'regexp'``.
:arg filters: Mapping containing initial filter values. Must
be of the form ``{ level : pattern }``, where
``level`` is the name of an XNAT hierarchy level
(e.g. ``'subject'``, ``'file'``, etc.).
"""
if knownHosts is None: knownHosts = []
if knownAccounts is None: knownAccounts = {}
if filterType is None: filterType = 'regexp'
if filters is None: filters = {}
if filterType not in ('regexp', 'glob'):
raise ValueError('Unrecognised value for filterType: '
'{}. May be one of \'regexp\' or '
'\'glob\''.format(filterType))
# store hosts without
# the http[s]:// prefix
knownHosts = [h.strip('https://') for h in knownHosts]
knownAccounts = {h.strip('https://') : (u, p) for h, (u, p)
in knownAccounts.items()}
knownHosts += [h for h in knownAccounts.keys()
if h not in knownHosts]
wx.Panel.__init__(self, parent)
self.__knownHosts = knownHosts
self.__knownAccounts = knownAccounts
self.__filterType = filterType
self.__session = None
self.__filters = collections.OrderedDict([
('subject', ''),
('experiment', ''),
('file', ''),
])
self.__filters.update(**filters)
self.__host = at.AutoTextCtrl(self,
style=at.ATC_NO_PROPAGATE_ENTER)
self.__username = pt.PlaceholderTextCtrl(self,
placeholder='username',
style=wx.TE_PROCESS_ENTER)
self.__password = pt.PlaceholderTextCtrl(self,
placeholder='password',
style=(wx.TE_PASSWORD |
wx.TE_PROCESS_ENTER))
self.__connect = wx.Button(self)
self.__status = wx.StaticText(self)
self.__project = wx.Choice(self)
self.__refresh = wx.Button(self)
self.__filter = wx.Choice(self)
self.__filterText = pt.PlaceholderTextCtrl(self,
placeholder=filterType,
style=wx.TE_PROCESS_ENTER)
self.__splitter = wx.SplitterWindow(self,
style=(wx.SP_LIVE_UPDATE |
wx.SP_BORDER))
self.__info = wgrid.WidgetGrid(self.__splitter)
self.__browser = wx.TreeCtrl(self.__splitter,
style=(wx.TR_MULTIPLE |
wx.TR_NO_LINES |
wx.TR_HAS_BUTTONS |
wx.TR_TWIST_BUTTONS))
self.__splitter.SetMinimumPaneSize(50)
self.__splitter.SplitHorizontally(self.__info, self.__browser)
self.__splitter.SetSashPosition(50)
self.__splitter.SetSashGravity(0.2)
images = [icons.loadBitmap(icons.FILE_ICON),
icons.loadBitmap(icons.FOLDER_UNLOADED_ICON),
icons.loadBitmap(icons.FOLDER_LOADED_ICON)]
self.__fileImageId = 0
self.__unloadedFolderImageId = 1
self.__loadedFolderImageId = 2
imageList = wx.ImageList(16, 16)
for i in images:
imageList.Add(i)
self.__browser.AssignImageList(imageList)
self.__filter.SetItems([LABELS[f] for f in self.__filters.keys()])
self.__filter.SetSelection(0)
self.__hostLabel = wx.StaticText(self)
self.__usernameLabel = wx.StaticText(self)
self.__passwordLabel = wx.StaticText(self)
self.__projectLabel = wx.StaticText(self)
self.__filterLabel = wx.StaticText(self)
self.__status.SetFont(self.__status.GetFont().Larger().Larger())
self.__info.SetColours(border=self.__info._defaultEvenColour)
self.__host .AutoComplete(knownHosts)
self.__hostLabel .SetLabel(LABELS['host'])
self.__usernameLabel.SetLabel(LABELS['username'])
self.__passwordLabel.SetLabel(LABELS['password'])
self.__connect .SetLabel(LABELS['connect'])
self.__projectLabel .SetLabel(LABELS['project'])
self.__filterLabel .SetLabel(LABELS['filter'])
self.__refresh .SetLabel(LABELS['refresh'])
filterTooltip = TOOLTIPS['filter.{}'.format(filterType)]
self.__filterLabel.SetToolTip(filterTooltip)
self.__filter .SetToolTip(filterTooltip)
self.__filterText .SetToolTip(filterTooltip)
self.__loginSizer = wx.BoxSizer(wx.HORIZONTAL)
self.__filterSizer = wx.BoxSizer(wx.HORIZONTAL)
self.__mainSizer = wx.BoxSizer(wx.VERTICAL)
self.__loginSizer.Add((5, 1))
self.__loginSizer.Add(self.__hostLabel)
self.__loginSizer.Add((5, 1))
self.__loginSizer.Add(self.__host, proportion=1)
self.__loginSizer.Add((5, 1))
self.__loginSizer.Add(self.__usernameLabel)
self.__loginSizer.Add((5, 1))
self.__loginSizer.Add(self.__username, proportion=1)
self.__loginSizer.Add((5, 1))
self.__loginSizer.Add(self.__passwordLabel)
self.__loginSizer.Add((5, 1))
self.__loginSizer.Add(self.__password, proportion=1)
self.__loginSizer.Add((5, 1))
self.__loginSizer.Add(self.__connect)
self.__loginSizer.Add((5, 1))
self.__loginSizer.Add(self.__status)
self.__loginSizer.Add((5, 1))
self.__filterSizer.Add((5, 1))
self.__filterSizer.Add(self.__projectLabel)
self.__filterSizer.Add((5, 1))
self.__filterSizer.Add(self.__project, proportion=1)
self.__filterSizer.Add((5, 1))
self.__filterSizer.Add(self.__filterLabel)
self.__filterSizer.Add((5, 1))
self.__filterSizer.Add(self.__filter)
self.__filterSizer.Add((5, 1))
self.__filterSizer.Add(self.__filterText, proportion=1)
self.__filterSizer.Add((5, 1))
self.__filterSizer.Add(self.__refresh)
self.__filterSizer.Add((5, 1))
self.__mainSizer.Add(self.__loginSizer, flag=wx.EXPAND)
self.__mainSizer.Add((1, 10))
self.__mainSizer.Add(self.__filterSizer, flag=wx.EXPAND)
self.__mainSizer.Add((1, 10))
self.__mainSizer.Add(self.__splitter, flag=wx.EXPAND, proportion=1)
self.SetSizer(self.__mainSizer)
self.__host .Bind(at.EVT_ATC_TEXT_ENTER,self.__onHost)
self.__username .Bind(wx.EVT_TEXT_ENTER, self.__onUsername)
self.__password .Bind(wx.EVT_TEXT_ENTER, self.__onPassword)
self.__connect .Bind(wx.EVT_BUTTON, self.__onConnect)
self.__project .Bind(wx.EVT_CHOICE, self.__onProject)
self.__refresh .Bind(wx.EVT_BUTTON, self.__onRefresh)
self.__filter .Bind(wx.EVT_CHOICE, self.__onFilter)
self.__browser .Bind(wx.EVT_TREE_ITEM_ACTIVATED,
self.__onTreeSelect)
self.__browser .Bind(wx.EVT_TREE_SEL_CHANGED,
self.__onTreeHighlight)
self.__filterText.Bind(wx.EVT_TEXT_ENTER,
self.__onFilterText)
self.__updateFilter()
self.EndSession()
def GetSelectedFiles(self):
"""Returns a list of ``xnat`` objects representing all of the
files that are currently selected in the tree browser.
"""
items = self.__browser.GetSelections()
files = []
for i in items:
obj, level = self.__browser.GetItemData(i)
if level == 'file':
files.append(obj)
return files
def DownloadFile(self, fobj, dest, showProgress=True):
"""Download the given ``xnat.FileData`` file object to the path
specified by ``dest``.
See the :func:`generateFilePath` function for a quick way to
generate a unique file path.
:arg fobj: An XNAT file object, as returned by
:meth:`GetSelectedFiles`.
:arg dest: Directory to download the file to.
:arg showProgress: If ``True``, a ``wx.ProgressDialog`` is shown,
displaying the download progress.
:returns: | |
"""
Contains the base Exposure and Filename classes.
"""
import datetime
import json
import os
import threading
import time
from typing import List, Optional, Union
import azcam
import numpy
from azcam.tools.console_tools import ConsoleTools
from azcam.tools.exposure_filename import Filename
from azcam.tools.exposure_obstime import ObsTime
from azcam.tools.header import Header, ObjectHeaderMethods
from azcam.tools.image import Image
from azcam.tools.tools import Tools
class Exposure(Tools, Filename, ObjectHeaderMethods):
"""
The base exposure tool.
Usually implemented as the "exposure" tool.
Only required attributes and stub methods are defined here. Additional
methods and attributes are added as needed in exposure-specific classes
which should inherit this class.
"""
def __init__(self, tool_id="exposure", description=None):
Tools.__init__(self, tool_id, description)
Filename.__init__(self)
self.obstime = ObsTime()
self.image = Image()
# exposure flags, may be used anywhere
self.exposureflags = {
"NONE": 0,
"EXPOSING": 1,
"ABORT": 2,
"PAUSE": 3,
"RESUME": 4,
"READ": 5,
"PAUSED": 6,
"READOUT": 7,
"SETUP": 8,
"WRITING": 9,
"GUIDEERROR": 10,
"ERROR": 11,
}
azcam.db.exposureflags = self.exposureflags
self.exposureflags_rev = {v: k for k, v in self.exposureflags.items()}
# exposure flag defining state of current exposure
self.exposure_flag = self.exposureflags["NONE"]
# current image type, 'zero', 'object', 'dark', 'flat', 'ramp', etc
self.image_type = "zero"
# default imagetypes
self.image_types = ["zero", "object", "flat", "dark"]
# dictionary of shutter states for imagetypes {imagetype:ShutterState}
self.shutter_dict = {
"zero": 0,
"object": 1,
"flat": 1,
"dark": 0,
"ramp": 1,
}
# True to flush detector before exposures
self.flush_array = 1
# True to display an image after readout
self.display_image = 1
# True to send image to remote image server after readout
self.send_image = 0
self.message = "" # exposure status message
self.guide_status = 0
self.guide_image_copy = 0
# TdiMode flag, 0=not in TDI mode, 1=TDI mode
self.tdi_mode = 0
# TdiDelay mode
self.tdi_delay = 5
# ParDelay mode
self.par_delay = 5
# guide mode
self.guide_mode = 0
# True when exposure type is a comparision, to turn on comp lamps
self.comp_exposure = 0
# True when in a comparision exposure sequence so lamps stay on
self.comp_sequence = 0
# True when exposure has been aborted
self.aborted = 0
# True when exposure is completed, then toggled off
self.completed = 0
# requested exposure time in seconds
self.exposure_time = 1.0
# remaining exposure time in seconds for an exposure in progress
self.exposure_time_remaining = 0.0
# actual elapsed exposure time in seconds of last/current exposure
self.exposure_time_actual = 0.0
# exposure time saved for each exposure, used for zeros
self.exposure_time_saved = 0.0
# total time in seconds an exposure was paused
self.paused_time = 0.0
# starting clock paused time of exposure
self.paused_time_start = 0.0
# actual elapsed dark time of last/current exposure
self.dark_time = 0.0
# starting clock dark time of exposure
self.dark_time_start = 0.0
# True when in an exposure sequence
self.is_exposure_sequence = 0
# current exposure sequence number
self.exposure_sequence_number = 1
# total number of exposures in sequence
self.exposure_sequence_total = 1
# delay between sequence exposures in seconds
self.exposure_sequence_delay = 0.0
# sequence flush flag: -1=> use FlushArray, 0==> flush all, 1=> flush only first exposure, 2=> no flush
self.exposure_sequence_flush = 0
# remaining number of pixels to read for an exposure in progress
self.pixels_remaining = 0
# True to update headers in a thread to save time
self.update_headers_in_background = 0
self.updating_header = 0
# True to save image file after exposure
self.save_file = 1
# file types
self.filetypes = {"FITS": 0, "MEF": 1, "BIN": 2, "ASM": 6}
self.filetype = self.filetypes["MEF"]
# Exposure title
self.title = ""
# True to make image title the same as image type
self.auto_title = 0
# deinterlace mode; 1 = generic mode; x = ODI mode
self.deinterlace_mode = 1
# temporary image files
self.temp_image_file = ""
# filename of current image
self.last_filename = ""
# write data asynchronously
self.write_async = 0
# create the exposure header
self.header = Header("Exposure")
# flag indicating ROI has been changed
self.new_roi = 0
self.header.set_header("exposure", 1)
# data order
self.data_order = []
self.imageheaderfile = ""
self.pgress = 0 # debug
def initialize(self):
"""
Initialize exposure.
"""
# call initialize() method on other tools
for tool in azcam.db.tools_init:
azcam.db.get(tool).initialize()
self.initialized = 1
return
def reset(self):
"""
Reset exposure tool.
"""
# initialize only once
if not self.initialized:
self.initialize()
# set temporary filenames
self.set_temp_files()
# setup for exposures
self.is_exposure_sequence = 0
self.exposure_sequence_number = 1
self.set_auto_title()
azcam.db.abortflag = 0
self.save_file = 1
self.exposure_flag = self.exposureflags["NONE"]
# call reset() method on other tools
for tool in azcam.db.tools_reset:
azcam.db.get(tool).reset()
return
# **********************************************************************************************
# Exposure control
# **********************************************************************************************
def start(self):
"""
Allow custom operations at start of exposure.
"""
if azcam.db.get("instrument") is not None:
azcam.db.instrument.exposure_start()
if azcam.db.get("telescope") is not None:
azcam.db.telescope.exposure_start()
return
def finish(self):
"""
Allow custom operations at end of exposure.
"""
if azcam.db.get("instrument") is not None:
azcam.db.instrument.exposure_finish()
if azcam.db.get("telescope") is not None:
azcam.db.telescope.exposure_finish()
return
def finished(self):
if self.completed:
return 1
else:
return 0
def get_exposureflag(self):
return [self.exposure_flag, self.exposureflags_rev[self.exposure_flag]]
def test(self, exposure_time=0.0, shutter=0):
"""
Make a test exposure.
exposure_time is the exposure time in seconds
shutter is 0 for closed and 1 for open
title is the image title.
"""
old_testimage = self.test_image
old_imagetype = self.image_type
old_exposuretime = self.exposure_time
self.test_image = 1
if shutter:
shutter_state = "object"
else:
shutter_state = "dark"
self.expose(exposure_time, shutter_state, "test image")
self.test_image = old_testimage
self.image_type = old_imagetype
self.exposure_time = old_exposuretime
return
def expose(self, exposure_time=-1, imagetype="", title=""):
"""
Make a complete exposure.
exposure_time is the exposure time in seconds
imagetype is the type of exposure ('zero', 'object', 'flat', ...)
title is the image title.
"""
# allow custom operations
self.start()
azcam.log("Exposure started")
# if last exposure was aborted, warn before clearing flag
if self.exposure_flag == self.exposureflags["ABORT"]:
azcam.AzcamWarning("Previous exposure was aborted")
# begin
if self.exposure_flag != self.exposureflags["ABORT"]:
self.begin(exposure_time, imagetype, title)
# integrate
if self.exposure_flag != self.exposureflags["ABORT"]:
self.integrate()
# readout
if (
self.exposure_flag != self.exposureflags["ABORT"]
and self.exposure_flag == self.exposureflags["READ"]
):
try:
self.readout()
except azcam.AzcamError:
pass
# end
if self.exposure_flag != self.exposureflags["ABORT"]:
self.end()
self.exposure_flag = self.exposureflags["NONE"]
self.completed = 1
azcam.log("Exposure finished")
# allow custom operations
self.finish()
return
def expose1(self, exposure_time: float = -1, image_type: str = "", image_title: str = ""):
"""
Make a complete exposure with immediate return to caller.
:param exposure_time: the exposure time in seconds
:param image_type: type of exposure ('zero', 'object', 'flat', ...)
:param image_title: image title, usually surrounded by double quotes
"""
arglist = [exposure_time, image_type, image_title]
thread = threading.Thread(target=self.expose, name="expose1", args=arglist)
thread.start()
return
def guide(self, number_exposures=1):
"""
Make a complete guider exposure sequence.
NumberExposures is the number of exposures to make, -1 loop forever
"""
AbortFlag = 0
number_exposures = int(number_exposures)
# system must be reset once before an exposure can be made
if not azcam.db.controller.is_reset:
azcam.db.controller.reset()
# parameters for faster operation
flusharray = self.flush_array
azcam.log("Guide started")
# this loop continues even for errors since data is sent to a seperate client receiving images
LoopCount = 0
while True:
if 0:
self.begin(exposure_time=-1, imagetype="object", title="guide image")
# integrate
self.integrate()
# readout
if self.exposure_flag == self.exposureflags["READ"]:
try:
self.readout()
self.guide_status = 1 # image read OK
self.guide_image_copy = self.image
except Exception:
self.guide_status = 2 # image not read OK, but don't stop guide loop
self.image = self.guide_image_copy
# image writing
self.end()
self.exposure_flag = self.exposureflags["NONE"]
else:
self.expose(-1, "object", "guide image")
AbortFlag = azcam.db.abortflag
if AbortFlag:
break
if number_exposures == -1:
continue
else:
LoopCount += 1
if LoopCount >= number_exposures:
break
# finish
self.guide_status = 0
self.flush_array = flusharray
if AbortFlag:
azcam.AzcamWarning("Guide aborted")
else:
azcam.log("Guide finished")
return
def guide1(self, number_exposures=1):
"""
Make a complete guider exposure with an immediate return.
NumberExposures is the number of exposures to make, -1 loop forever
"""
arglist = [number_exposures]
thread = threading.Thread(target=self.guide, name="guide1", args=arglist)
thread.start()
return
def begin(self, exposure_time=-1, imagetype="", title=""):
"""
Initiates the first part of an exposure, through image flushing.
exposure_time is in seconds.
imagetype is one of zero, object, flat, dark, ramp, ...
"""
# system must be reset once before an exposure can be made
x = self.is_exposure_sequence # save this flag which is lost by reset
if not azcam.db.controller.is_reset:
self.reset()
self.is_exposure_sequence = x
# set exposure flag
self.exposure_flag | |
-1, -3]
intimidations -1.4 1.49666 [1, -2, -2, -2, -1, -1, -1, -4, 1, -3]
intimidator -1.6 0.4899 [-1, -1, -2, -2, -1, -2, -2, -1, -2, -2]
intimidators -1.6 0.8 [-1, -1, -3, -2, -1, -1, -3, -1, -2, -1]
intimidatory -1.1 1.22066 [-1, -2, -3, -1, -1, -1, -2, 2, -1, -1]
intricate 0.6 0.66332 [1, 0, 2, 1, 0, 1, 1, 0, 0, 0]
intrigues 0.9 0.9434 [2, -1, 2, 1, 2, 0, 1, 1, 0, 1]
invigorate 1.9 0.83066 [2, 2, 2, 0, 2, 3, 3, 2, 1, 2]
invigorated 0.8 1.8868 [-2, 3, 2, 2, -2, -2, 2, 2, 1, 2]
invigorates 2.1 0.53852 [3, 2, 3, 2, 1, 2, 2, 2, 2, 2]
invigorating 2.1 0.7 [2, 1, 1, 3, 3, 3, 2, 2, 2, 2]
invigoratingly 2.0 0.63246 [2, 2, 1, 2, 1, 3, 2, 2, 2, 3]
invigoration 1.5 1.36015 [2, 2, 1, -2, 1, 3, 3, 2, 1, 2]
invigorations 1.2 0.87178 [1, -1, 2, 2, 1, 1, 2, 1, 2, 1]
invigorator 1.1 1.3 [3, 1, 0, 2, 2, -2, 1, 1, 2, 1]
invigorators 1.2 0.87178 [1, 1, 1, 1, 3, 2, 0, 0, 1, 2]
invincible 2.2 1.77764 [4, 1, 3, 2, 4, 1, 4, -1, 0, 4]
invite 0.6 0.66332 [2, 1, 1, 0, 0, 0, 0, 1, 1, 0]
inviting 1.3 0.45826 [1, 1, 1, 2, 1, 2, 2, 1, 1, 1]
invulnerable 1.3 1.73494 [2, 3, 4, 2, 0, 3, 0, 1, -2, 0]
irate -2.9 0.53852 [-3, -3, -3, -2, -3, -4, -3, -3, -2, -3]
ironic -0.5 1.28452 [1, 0, 0, 0, 0, 0, -4, -1, -1, 0]
irony -0.2 1.07703 [-1, 0, -3, 0, 0, 0, 1, 0, 1, 0]
irrational -1.4 0.4899 [-1, -1, -1, -2, -2, -2, -1, -1, -2, -1]
irrationalism -1.5 0.5 [-1, -2, -1, -1, -2, -1, -2, -2, -1, -2]
irrationalist -2.1 0.9434 [-1, -4, -2, -2, -3, -3, -2, -1, -1, -2]
irrationalists -1.5 0.92195 [-2, -2, -1, -2, -2, 1, -1, -2, -2, -2]
irrationalities -1.5 0.80623 [-2, -2, 0, -1, -1, -1, -2, -3, -1, -2]
irrationality -1.7 0.9 [-3, -3, -1, -2, -1, -1, -1, -1, -3, -1]
irrationally -1.6 0.4899 [-1, -2, -1, -2, -1, -2, -2, -2, -1, -2]
irrationals -1.1 0.83066 [-2, 0, -1, 0, -1, -1, -3, -1, -1, -1]
irresistible 1.4 2.2 [2, 3, 2, 3, 4, 4, 1, -1, -2, -2]
irresolute -1.4 0.66332 [-2, -2, -1, -2, -1, -1, -1, -2, -2, 0]
irresponsible -1.9 0.3 [-2, -2, -2, -2, -2, -2, -1, -2, -2, -2]
irreversible -0.8 0.87178 [-2, -2, 0, -1, 0, 0, 0, -1, -2, 0]
irritabilities -1.7 0.64031 [-2, -2, -2, -1, -1, -1, -1, -2, -3, -2]
irritability -1.4 1.28062 [-2, -1, -2, -1, 2, -2, -2, -2, -1, -3]
irritable -2.1 0.7 [-2, -2, -3, -1, -2, -1, -3, -2, -3, -2]
irritableness -1.7 0.64031 [-2, -2, -2, -1, -2, -1, -1, -3, -1, -2]
irritably -1.8 0.6 [-2, -2, -1, -1, -3, -2, -1, -2, -2, -2]
irritant -2.3 0.78102 [-3, -3, -3, -3, -3, -1, -2, -2, -2, -1]
irritants -2.1 0.83066 [-2, -3, -1, -4, -2, -1, -2, -2, -2, -2]
irritate -1.8 0.6 [-3, -2, -2, -2, -1, -2, -1, -2, -1, -2]
irritated -2.0 0.63246 [-1, -2, -2, -2, -3, -2, -2, -3, -1, -2]
irritates -1.7 0.78102 [-1, -2, -1, -1, -3, -2, -2, -3, -1, -1]
irritating -2.0 0.63246 [-2, -2, -2, -1, -1, -3, -2, -2, -3, -2]
irritatingly -2.0 0.44721 [-2, -2, -2, -3, -1, -2, -2, -2, -2, -2]
irritation -2.3 0.78102 [-3, -2, -2, -2, -1, -3, -3, -1, -3, -3]
irritations -1.5 0.67082 [-2, -2, -1, -1, -1, -1, -1, -2, -3, -1]
irritative -2.0 0.63246 [-3, -2, -3, -2, -1, -2, -2, -2, -1, -2]
isolatable 0.2 1.249 [1, 0, -2, 0, -1, -1, 2, 0, 2, 1]
isolate -0.8 0.74833 [-1, -1, -1, 0, 0, 0, 0, -2, -1, -2]
isolated -1.3 0.64031 [-1, -1, -1, -1, -1, -1, -2, -1, -3, -1]
isolates -1.3 0.64031 [-1, -1, -1, -1, -2, -1, -3, -1, -1, -1]
isolation -1.7 0.78102 [-1, -3, -1, -2, -2, -1, -3, -1, -2, -1]
isolationism 0.4 1.62481 [2, 0, -1, -2, -1, 3, 0, 2, -1, 2]
isolationist 0.7 1.55242 [2, 0, 0, -1, -1, 3, 0, 2, -1, 3]
isolations -0.5 1.11803 [-1, -2, -2, -1, 1, 1, -1, 0, 1, -1]
isolator -0.4 0.66332 [0, 0, -1, 0, -1, 0, -2, 0, 0, 0]
isolators -0.4 1.42829 [-2, -1, -1, -2, 2, 2, -1, 1, -1, -1]
itchy -1.1 0.53852 [-1, -1, -1, -1, -2, 0, -1, -1, -2, -1]
jackass -1.8 1.07703 [-1, 0, -3, -2, -2, -3, 0, -2, -2, -3]
jackasses -2.8 0.9798 [-2, -2, -4, -3, -4, -4, -3, -1, -2, -3]
jaded -1.6 0.66332 [-1, -1, -1, -2, -2, -1, -2, -2, -1, -3]
jailed -2.2 0.87178 [-4, -3, -2, -2, -1, -2, -2, -3, -1, -2]
jaunty 1.2 0.6 [2, 1, 0, 2, 1, 1, 1, 2, 1, 1]
jealous -2.0 0.63246 [-2, -2, -3, -2, -3, -2, -1, -1, -2, -2]
jealousies -2.0 0.63246 [-2, -3, -2, -1, -2, -3, -2, -1, -2, -2]
jealously -2.0 0.89443 [-1, -3, -1, -4, -2, -2, -1, -2, -2, -2]
jealousness -1.7 0.45826 [-1, -2, -2, -2, -2, -1, -1, -2, -2, -2]
jealousy -1.3 1.73494 [-2, -3, -2, -2, -2, 2, -3, -1, 2, -2]
jeopardy -2.1 0.9434 [-3, -3, -3, -1, -1, -3, -2, -1, -1, -3]
jerk -1.4 0.8 [-1, -1, -1, -2, -3, 0, -2, -1, -1, -2]
jerked -0.8 0.74833 [0, -1, -1, 0, -2, -1, 0, -1, 0, -2]
jerks -1.1 1.51327 [-2, -2, -1, -2, -1, 0, -2, 3, -2, -2]
jewel 1.5 1.20416 [1, 3, 2, 0, 2, 1, 3, 0, 3, 0]
jewels 2.0 1.34164 [3, 1, 0, 0, 4, 3, 3, 2, 3, 1]
jocular 1.2 1.249 [0, 1, 2, -2, 1, 2, 2, 2, 2, 2]
join 1.2 0.74833 [2, 2, 1, 2, 1, 0, 1, 2, 0, 1]
joke 1.2 0.74833 [1, 1, 1, 1, 1, 1, 0, 2, 1, 3]
joked 1.3 0.64031 [1, 1, 2, 2, 0, 2, 1, 1, 2, 1]
joker 0.5 0.92195 [1, 1, -1, 1, 2, -1, 1, 1, 0, 0]
jokes 1.0 0.7746 [1, 1, -1, 1, 1, 2, 1, 1, 2, 1]
jokester 1.5 0.67082 [2, 2, 2, 2, 1, 1, 0, 1, 2, 2]
jokesters 0.9 0.83066 [0, 0, 2, 1, 1, 2, 2, 0, 1, 0]
jokey 1.1 0.3 [1, 1, 2, 1, 1, 1, 1, 1, 1, 1]
joking 0.9 0.53852 [1, 2, 1, 1, 1, 0, 1, 1, 0, 1]
jollied 2.4 0.66332 [3, 3, 2, 2, 3, 3, 2, 2, 1, 3]
jollier 2.4 0.4899 [2, 2, 3, 3, 2, 2, 2, 3, 2, 3]
jollies 2.0 0.63246 [1, 2, 3, 2, 2, 2, 1, 2, 2, 3]
jolliest 2.9 0.7 [3, 3, 2, 4, 3, 2, 3, 4, 2, 3]
jollification 2.2 0.74833 [2, 3, 3, 1, 2, 1, 3, 2, 3, 2]
jollifications 2.0 0.7746 [2, 3, 2, 2, 2, 2, 3, 2, 0, 2]
jollify 2.1 0.53852 [2, 3, 2, 2, 2, 2, 2, 3, 1, 2]
jollily 2.7 0.64031 [3, 3, 3, 3, 3, 3, 3, 1, 3, 2]
jolliness 2.5 0.67082 [3, 1, 2, 3, 2, 3, 3, 3, 2, 3]
jollities 1.7 0.64031 [2, 1, 2, 2, 1, 2, 2, 3, 1, 1]
jollity 1.8 1.6 [3, 2, 1, 1, 4, 3, 1, 2, 3, -2]
jolly 2.3 1.00499 [4, 3, 3, 1, 1, 1, 3, 2, 3, 2]
jollying 2.3 0.64031 [2, 3, 3, 1, 3, 3, 2, 2, 2, 2]
jovial 1.9 0.53852 [2, 2, 1, 2, 1, 2, 3, 2, 2, 2]
joy 2.8 0.74833 [3, 2, 3, 4, 3, 3, 3, 1, 3, 3]
joyance 2.3 0.9 [1, 3, 4, 2, 2, 1, 2, 3, 2, 3]
joyed 2.9 0.3 [3, 3, 3, 3, 3, 3, 3, 2, 3, 3]
joyful 2.9 0.53852 [3, 2, 3, 3, 2, 3, 4, 3, 3, 3]
joyfuller 2.4 0.66332 [2, 4, 3, 2, 2, 2, 3, 2, 2, 2]
joyfully 2.5 0.67082 [2, 2, 3, 3, 2, 2, 3, 4, 2, 2]
joyfulness 2.7 1.00499 [4, 3, 1, 3, 3, 3, 4, 2, 1, 3]
joying 2.5 0.67082 [2, 2, 1, 3, 3, 3, 3, 3, 3, 2]
joyless -2.5 0.67082 [-1, -2, -3, -3, -3, -3, -2, -2, -3, -3]
joylessly -1.7 1.1 [-2, -2, -3, -3, -2, -1, 1, -2, -1, -2]
joylessness -2.7 0.9 [-4, -3, -3, -3, -3, -3, -3, -1, -1, -3]
joyous 3.1 0.7 [3, 4, 3, 2, 4, 3, 3, 4, 2, 3]
joyously 2.9 0.7 [2, 3, 4, 3, 4, 2, 2, 3, 3, 3]
joyousness 2.8 0.74833 [3, 3, 1, 3, 3, 3, 4, 2, 3, 3]
joypop -0.2 1.93907 [-3, 1, 2, 2, -3, -1, -2, -1, 1, 2]
joypoppers -0.1 1.22066 [2, -1, 1, -1, -1, 1, 1, 0, -2, -1]
joyridden 0.6 1.8 [-2, -1, 4, 0, 0, 2, 1, -1, 0, 3]
joyride 1.1 1.22066 [-1, 1, 2, 0, 2, 0, 2, 2, 3, 0]
joyrider 0.7 1.26886 [2, -2, 1, 2, 0, 2, -1, 1, 1, 1]
joyriders 1.3 1.18743 [1, 0, 0, 1, 4, 3, 1, 1, 1, 1]
joyrides 0.8 1.32665 [2, -2, 1, 2, 0, 2, -1, 1, 2, 1]
joyriding 0.9 1.04403 [1, -1, 1, 1, 0, 2, 0, 1, 3, 1]
joyrode 1.0 1.48324 [4, 0, -2, 0, 2, 1, 2, 1, 1, 1]
joys 2.2 0.4 [2, 2, 2, 2, 2, 3, 2, 3, 2, 2]
joystick 0.7 0.78102 [1, 0, 2, 2, 0, 0, 1, 1, 0, 0]
joysticks 0.2 0.4 [0, 0, 0, 0, 1, 0, 0, 1, 0, 0]
jubilant 3.0 0.63246 [3, 3, 4, 3, 3, 2, 3, 4, 2, 3]
jumpy -1.0 0.63246 [0, 0, -2, -1, -1, -1, -1, -1, -2, -1]
justice 2.4 1.0198 [3, 2, 1, 2, 3, 2, 4, 4, 2, 1]
justifiably 1.0 0.7746 [0, 1, 0, 1, 1, 1, 0, 2, 2, 2]
justified 1.7 0.64031 [1, 2, 2, 3, 1, 1, 1, 2, 2, 2]
keen 1.5 0.67082 [1, 1, 3, 1, 2, 1, 1, 2, 2, 1]
keened 0.3 1.00499 [-2, 0, 1, 0, 1, 1, 0, 2, 0, 0]
keener 0.5 1.20416 [-1, -1, 0, -1, 2, 1, 2, 2, 1, 0]
keeners 0.6 0.4899 [1, 0, 0, 1, 1, 0, 1, 1, 0, 1]
keenest 1.9 0.83066 [3, 3, 1, 1, 3, 2, 2, 1, 2, 1]
keening -0.7 1.41774 [0, -3, -1, -1, -3, 1, -1, 1, 1, -1]
keenly 1.0 0.7746 [2, 1, 1, 0, 1, 0, 2, 1, 2, 0]
keenness 1.4 0.4899 [1, 1, 2, 2, 1, 1, 1, 2, 2, 1]
keens 0.1 1.22066 [1, -3, 0, 0, 0, 2, 1, 0, 0, 0]
kewl 1.3 0.45826 [2, 1, 1, 1, 2, 1, 2, 1, 1, 1]
kidding 0.4 0.8 [0, 1, 0, -1, 1, 1, 1, 1, -1, 1]
kill -3.7 0.45826 [-4, | |
from collections import OrderedDict
from hwtypes import BitVector
import os
from ..bit import VCC, GND, BitType, BitIn, BitOut, MakeBit, BitKind
from ..array import ArrayKind, ArrayType, Array
from ..tuple import TupleKind, TupleType, Tuple
from ..clock import wiredefaultclock, wireclock, ClockType, Clock, ResetType, ClockKind, EnableKind, ResetKind, AsyncResetType, AsyncResetKind
from ..bitutils import seq2int
from ..backend.verilog import find
from ..logging import error
import coreir
from ..ref import ArrayRef, DefnRef, TupleRef
from ..passes import InstanceGraphPass
from ..t import In
import logging
from .util import make_relative, get_codegen_debug_info
from ..interface import InterfaceKind
import inspect
import copy
import json
from .. import singleton
from warnings import warn
from collections import defaultdict
logger = logging.getLogger('magma').getChild('coreir_backend')
level = os.getenv("MAGMA_COREIR_BACKEND_LOG_LEVEL", "WARN")
# TODO: Factor this with magma.logging code for debug level validation
if level in ["DEBUG", "WARN", "INFO"]:
logger.setLevel(getattr(logging, level))
elif level is not None:
logger.warning("Unsupported value for MAGMA_COREIR_BACKEND_LOG_LEVEL:"
f" {level}")
# logger.setLevel(logging.DEBUG)
class CoreIRBackendError(RuntimeError):
pass
class keydefaultdict(defaultdict):
# From https://stackoverflow.com/questions/2912231/is-there-a-clever-way-to-pass-the-key-to-defaultdicts-default-factory
def __missing__(self, key):
if self.default_factory is None:
raise KeyError( key ) # pragma: no cover
else:
ret = self[key] = self.default_factory(key)
return ret
def get_top_name(name):
if isinstance(name, TupleRef):
return get_top_name(name.tuple.name)
if isinstance(name, ArrayRef):
return get_top_name(name.array.name)
return name
def magma_port_to_coreir(port):
select = repr(port)
name = port.name
if isinstance(name, TupleRef):
# Prefix integer indexes for unnamed tuples (e.g. 0, 1, 2) with "_"
if name.index.isdigit():
select = select.split(".")
select[-1] = "_" + select[-1]
select = ".".join(select)
name = get_top_name(name)
if isinstance(name, DefnRef):
if name.defn.name != "":
select_list = select.split(".")
select_list[0] = "self"
select = ".".join(select_list)
return select.replace("[", ".").replace("]", "")
# Singleton context meant to be used with coreir/magma code
@singleton
class CoreIRContextSingleton:
__instance = None
def get_instance(self):
return self.__instance
def reset_instance(self):
self.__instance = coreir.Context()
def __init__(self):
self.__instance = coreir.Context()
CoreIRContextSingleton()
class CoreIRBackend:
context_to_modules_map = {}
def __init__(self, context=None, check_context_is_default=True):
if context is None:
context = CoreIRContextSingleton().get_instance()
elif check_context_is_default & (context != CoreIRContextSingleton().get_instance()):
warn("Creating CoreIRBackend with non-singleton CoreIR context. "
"If you're sure you want to do this, set check_context_is_default "
"when initializing the CoreIRBackend.")
if context not in CoreIRBackend.context_to_modules_map:
CoreIRBackend.context_to_modules_map[context] = {}
self.modules = CoreIRBackend.context_to_modules_map[context]
self.context = context
self.libs = keydefaultdict(self.context.get_lib)
self.libs_used = set()
self.__constant_cache = {}
self.__unique_concat_id = -1
def check_interface(self, definition):
# for now only allow Bit, Array, or Tuple
def check_type(port, errorMessage=""):
if isinstance(port, ArrayKind):
check_type(port.T, errorMessage.format("Array({}, {})").format(
str(port.N), "{}"))
elif isinstance(port, TupleKind):
for (k, t) in zip(port.Ks, port.Ts):
check_type(t, errorMessage.format("Tuple({}:{})".format(k, "{}")))
elif isinstance(port, (BitKind, ClockKind, EnableKind, ResetKind, AsyncResetKind)):
return
else:
raise CoreIRBackendError(errorMessage.format(str(port)))
for name, port in definition.interface.ports.items():
check_type(type(port), 'Error: Argument {} must be comprised only of Bit, Array, or Tuple')
def get_type(self, port):
if isinstance(port, (ArrayType, ArrayKind)):
_type = self.context.Array(port.N, self.get_type(port.T))
elif isinstance(port, (TupleType, TupleKind)):
def to_string(k):
"""
Unnamed tuples have integer keys (e.g. 0, 1, 2),
we prefix them with "_" so they can be consumed by coreir's
Record type (key names are constrained such that they can't be
integers)
"""
if isinstance(k, int):
return f"_{k}"
return k
_type = self.context.Record({
to_string(k): self.get_type(t) for (k, t) in
zip(port.Ks, port.Ts)
})
elif port.isinput():
if isinstance(port, (ClockType, ClockKind)):
_type = self.context.named_types[("coreir", "clk")]
elif isinstance(port, (AsyncResetType, AsyncResetKind)):
_type = self.context.named_types[("coreir", "arst")]
else:
_type = self.context.Bit()
elif port.isoutput():
if isinstance(port, (ClockType, ClockKind)):
_type = self.context.named_types[("coreir", "clkIn")]
elif isinstance(port, (AsyncResetType, AsyncResetKind)):
_type = self.context.named_types[("coreir", "arstIn")]
else:
_type = self.context.BitIn()
else:
_type = self.context.BitInOut()
return _type
coreirNamedTypeToPortDict = {
"clk": Clock,
"coreir.clkIn": Clock
}
def get_ports(self, coreir_type, renamed_ports):
if (coreir_type.kind == "Bit"):
return BitOut
elif (coreir_type.kind == "BitIn"):
return BitIn
elif (coreir_type.kind == "Array"):
return Array[len(coreir_type), self.get_ports(coreir_type.element_type, renamed_ports)]
elif (coreir_type.kind == "Record"):
elements = {}
for item in coreir_type.items():
name = item[0]
# replace the in port with I as can't reference that
if name == "in":
name = "I"
renamed_ports[name] = "in"
elements[name] = self.get_ports(item[1], renamed_ports)
return Tuple(**elements)
elif (coreir_type.kind == "Named"):
# exception to handle clock types, since other named types not handled
if coreir_type.name in self.coreirNamedTypeToPortDict:
return In(self.coreirNamedTypeToPortDict[coreir_type.name])
else:
raise NotImplementedError("not all named types supported yet")
else:
raise NotImplementedError("Trying to convert unknown coreir type to magma type")
def get_ports_as_list(self, ports):
return [item for i in range(ports.N) for item in [ports.Ks[i], ports.Ts[i]]]
def convert_interface_to_module_type(self, interface):
args = OrderedDict()
for name, port in interface.ports.items():
args[name] = self.get_type(port)
return self.context.Record(args)
def compile_instance(self, instance, module_definition):
name = instance.__class__.coreir_name
lib = self.libs[instance.coreir_lib]
logger.debug(f"Compiling instance {(instance.name, type(instance))}")
if instance.coreir_genargs is None:
if hasattr(instance, "wrappedModule") and \
instance.wrappedModule.context == self.context:
module = instance.wrappedModule
else:
module = lib.modules[name]
args = {}
for name, value in instance.kwargs.items():
if name in {"name", "loc"}:
continue # Skip
elif isinstance(value, tuple):
args[name] = BitVector[value[1]](value[0])
else:
args[name] = value
args = self.context.new_values(args)
return module_definition.add_module_instance(instance.name, module, args)
else:
generator = lib.generators[name]
config_args = {}
for name, value in instance.coreir_configargs.items():
config_args[name] = value
config_args = self.context.new_values(config_args)
gen_args = {}
for name, value in type(instance).coreir_genargs.items():
if isinstance(value, AsyncResetKind):
value = self.context.named_types["coreir", "arst"]
elif isinstance(value, ClockKind):
value = self.context.named_types["coreir", "clk"]
gen_args[name] = value
gen_args = self.context.new_values(gen_args)
return module_definition.add_generator_instance(instance.name,
generator, gen_args, config_args)
def add_non_input_ports(self, non_input_ports, port):
if not port.isinput():
non_input_ports[port] = magma_port_to_coreir(port)
if isinstance(port, (TupleType, ArrayType)):
for element in port:
self.add_non_input_ports(non_input_ports, element)
def compile_declaration(self, declaration):
if declaration.coreir_lib is not None:
self.libs_used.add(declaration.coreir_lib)
# These libraries are already available by default in coreir, so we
# don't need declarations
if declaration.coreir_lib in ["coreir", "corebit", "commonlib"]:
if declaration.coreir_genargs is None:
return self.libs[declaration.coreir_lib].modules[declaration.coreir_name]
else:
return self.libs[declaration.coreir_lib].generators[declaration.coreir_name]
if declaration.name in self.modules:
logger.debug(f" {declaration} already compiled, skipping")
return
module_type = self.convert_interface_to_module_type(declaration.interface)
if isinstance(declaration.interface, InterfaceKind):
module_type = self.context.Flip(module_type)
coreir_module = self.context.global_namespace.new_module(declaration.coreir_name,
module_type)
if get_codegen_debug_info() and declaration.debug_info:
coreir_module.add_metadata("filename", json.dumps(make_relative(declaration.debug_info.filename)))
coreir_module.add_metadata("lineno", json.dumps(str(declaration.debug_info.lineno)))
return coreir_module
def compile_definition_to_module_definition(self, definition, module_definition):
if definition.coreir_lib is not None:
self.libs_used.add(definition.coreir_lib)
non_input_ports = {}
for name, port in definition.interface.ports.items():
logger.debug("{}, {}, {}".format(name, port, port.isoutput()))
self.add_non_input_ports(non_input_ports, port)
for instance in definition.instances:
wiredefaultclock(definition, instance)
wireclock(definition, instance)
coreir_instance = self.compile_instance(instance, module_definition)
if get_codegen_debug_info() and getattr(instance, "debug_info", False):
coreir_instance.add_metadata("filename", json.dumps(make_relative(instance.debug_info.filename)))
coreir_instance.add_metadata("lineno", json.dumps(str(instance.debug_info.lineno)))
for name, port in instance.interface.ports.items():
self.add_non_input_ports(non_input_ports, port)
for instance in definition.instances:
for name, port in instance.interface.ports.items():
self.connect_non_outputs(module_definition, port,
non_input_ports)
for port in definition.interface.ports.values():
self.connect_non_outputs(module_definition, port, non_input_ports)
def connect_non_outputs(self, module_definition, port,
non_input_ports):
# Recurse into non input types that may contain inout children
if isinstance(port, TupleType) and not port.isinput() or \
isinstance(port, ArrayType) and not port.T.isinput():
for elem in port:
self.connect_non_outputs(module_definition, elem,
non_input_ports)
elif not port.isoutput():
self.connect(module_definition, port, port.value(),
non_input_ports)
def compile_definition(self, definition):
logger.debug(f"Compiling definition {definition}")
if definition.name in self.modules:
logger.debug(f" {definition} already compiled, skipping")
return self.modules[definition.name]
self.check_interface(definition)
module_type = self.convert_interface_to_module_type(definition.interface)
coreir_module = self.context.global_namespace.new_module(definition.coreir_name, module_type)
if get_codegen_debug_info() and definition.debug_info:
coreir_module.add_metadata("filename", json.dumps(make_relative(definition.debug_info.filename)))
coreir_module.add_metadata("lineno", json.dumps(str(definition.debug_info.lineno)))
# If this module was imported from verilog, do not go through the
# general module construction flow. Instead just attach the verilog
# source as metadata and return the module. Also, we attach any
# contained instances as CoreIR instances.
if hasattr(definition, "verilogFile") and definition.verilogFile:
verilog_metadata = {"verilog_string": definition.verilogFile}
coreir_module.add_metadata("verilog", json.dumps(verilog_metadata))
module_definition = coreir_module.new_definition()
coreir_module.definition = module_definition
for instance in definition.instances:
self.compile_instance(instance, module_definition)
return coreir_module
module_definition = coreir_module.new_definition()
self.compile_definition_to_module_definition(definition, module_definition)
coreir_module.definition = module_definition
return coreir_module
def connect(self, module_definition, port, value, non_input_ports):
self.__unique_concat_id
# allow clocks or arrays of clocks to be unwired as CoreIR can wire them up
def is_clock_or_nested_clock(p):
if isinstance(p, (ClockType, ClockKind)):
return True
elif isinstance(p, (ArrayType, ArrayKind)):
return is_clock_or_nested_clock(p.T)
elif isinstance(p, (TupleType, TupleKind)):
for item in p.Ts:
if is_clock_or_nested_clock(item):
return True
return False
if value is None and is_clock_or_nested_clock(port):
return
elif value is None:
if port.isinout():
# Skip inouts because they might be connected as an input
return
raise Exception(f"Got None for port '{port.debug_name}', is it "
"connected to anything?")
elif isinstance(value, coreir.Wireable):
source = value
elif isinstance(value, ArrayType) and all(x in {VCC, GND} for x in value):
source = self.get_constant_instance(value, len(value),
module_definition)
elif value.anon() and isinstance(value, ArrayType):
for p, v in zip(port, value):
self.connect(module_definition, p, v, non_input_ports)
return
elif isinstance(value, TupleType) and value.anon():
for p, v in zip(port, value):
self.connect(module_definition, p, v, non_input_ports)
return
elif value is VCC or value is GND:
source = self.get_constant_instance(value, None, module_definition)
else:
# logger.debug((value, non_input_ports))
# logger.debug((id(value), [id(key) for key in non_input_ports]))
source = module_definition.select(non_input_ports[value])
sink = module_definition.select(magma_port_to_coreir(port))
module_definition.connect(source, sink)
if get_codegen_debug_info() and getattr(port, "debug_info", False):
module_definition.add_metadata(source, sink, "filename", json.dumps(make_relative(port.debug_info.filename)))
module_definition.add_metadata(source, sink, "lineno", json.dumps(str(port.debug_info.lineno)))
__unique_constant_id = -1
def get_constant_instance(self, constant, num_bits, module_definition):
if module_definition not in self.__constant_cache:
self.__constant_cache[module_definition] = {}
bit_type_to_constant_map | |
# -*- coding: UTF-8 -*-
'''
Created on 4 nov. 2014
@author: <NAME>
Written By:
<NAME>
@Email: < robert [--DOT--] pastor0691 (--AT--) orange [--DOT--] fr >
@http://trajectoire-predict.monsite-orange.fr/
@copyright: Copyright 2015 <NAME>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
density at mean sea level = 1.225 kg / cubic meters
'''
import unittest
import numpy
import xlsxwriter
import os
import math
MeterPerSecond2Knots = 1.94384449
Knots2MeterPerSecond = 0.514444444
class Atmosphere():
'''
The standard sea level conditions are as follows:
Temperature (T0) = 288.15 K = 150C
Pressure (p0) = 101325 N/m2 = 760 mm of Hg
'''
SeaLevelTemperatureDegrees = 15.0
SeaLevelPressureNewtonsSquareMeters = 101325.0
''' MSL Mean Sea Level '''
StandardAtmosphericTemperatureMslKelvins = 288.15 # kelvins
StandardAtmosphericPressureMslPascal = 101325 # pascals
StandardAtmosphericDensityMslKgCubicMeters = 1.225 # [kg/m3]
SpeedOfSoundMslMetersSeconds = 340.294 # at mean sea level [m/s]
'''ISA temperature gradient with altitude below the tropopause :
betaT = - 0.0065 [°K/m]
'''
betaT = - 0.0065 # [°K/m]
'''
Tropopause
Tropopause is the separation between two different layers: the troposphere, which stands
below it, and the stratosphere, which is placed above. Its altitude HP,trop is constant when
expressed in terms of geopotential pressure altitude:
H p,trop = 11000 [m]
'''
TropopauseGeoPotentialPressureAltitude = 11000.0 # meters
className = ''
# altitude in Meters
AltitudeMeters = numpy.array( [-2000,
0, 2000, 4000, 6000, 8000, 10000,
12000, 14000, 16000, 18000, 20000,
22000, 24000, 26000, 28000, 30000,
32000, 34000, 36000, 38000, 40000,
42000, 44000, 46000, 48000, 50000,
52000, 54000, 56000, 58000, 60000,
62000, 64000, 66000, 68000, 70000,
72000, 74000, 76000, 78000, 80000,
82000, 84000, 86000 ] )
'''
alt-km sigma delta theta temp-Kelvin
pressure-N-sq-m dens-kg-cu-m a-sound-m-s viscosity-kg-m-s k-visc-sq-m-s
n this table from -2 to 86 km in 2 km intervals
alt is altitude in meters.
sigma is density divided by sea-level density.
delta is pressure divided by sea-level pressure.
theta is temperature divided by sea-level temperature.
temp is temperature in kelvins.
press is pressure in newtons per square meter.
dens is density in kilograms per cubic meter.
a is the speed of sound in meters per second.
visc is viscosity in 10**(-6) kilograms per meter-second.
k.visc is kinematic viscosity in square meters per second.
'''
AtmosphereTemperatureKelvins = None
AirDensityKilogramsCubicMeters = None
SpeedOfSoundMetersPerSecond = None
TabularAtmosphere = numpy.array(
(
# sigma delta theta temp press density a visc k.visc
numpy.array([ '1.21E+00','1.26E+00','1.0451','301.2','1.28E+05','1.48E+00','347.9','18.51','1.25E-05' ]),
numpy.array([ '1.0' ,'1.0' ,'1.0' ,'288.1','1.01E+05','1.23E+00','340.3','17.89','1.46E-05' ] ),
numpy.array([ '8.22E-01','7.85E-01','0.9549','275.2','7.95E+04','1.01E+00','332.5','17.26','1.71E-05' ]),
numpy.array([ '6.69E-01','6.09E-01','0.9098','262.2','6.17E+04','8.19E-01','324.6','16.61','2.03E-05' ]),
numpy.array([ '5.39E-01','4.66E-01','0.8648','249.2','4.72E+04','6.60E-01','316.5','15.95','2.42E-05' ]),
numpy.array([ '4.29E-01','3.52E-01','0.8198','236.2','3.57E+04','5.26E-01','308.1','15.27','2.90E-05' ]),
numpy.array([ '3.38E-01','2.62E-01','0.7748','223.3','2.65E+04','4.14E-01','299.5','14.58','3.53E-05' ]),
numpy.array([ '2.55E-01','1.91E-01','0.7519','216.6','1.94E+04','3.12E-01','295.1','14.22','4.56E-05' ]),
numpy.array([ '1.86E-01','1.40E-01','0.7519','216.6','1.42E+04','2.28E-01','295.1','14.22','6.24E-05' ]),
numpy.array([ '1.36E-01','1.02E-01','0.7519','216.6','1.04E+04','1.67E-01','295.1','14.22','8.54E-05' ]),
numpy.array([ '9.93E-02','7.47E-02','0.7519','216.6','7.57E+03','1.22E-01','295.1','14.22','1.17E-04' ]),
numpy.array([ '7.26E-02','5.46E-02','0.7519','216.6','5.53E+03','8.89E-02','295.1','14.22','1.60E-04' ]),
numpy.array([ '5.27E-02','3.99E-02','0.7585','218.6','4.05E+03','6.45E-02','296.4','14.32','2.22E-04' ]),
numpy.array([ '3.83E-02','2.93E-02','0.7654','220.6','2.97E+03','4.69E-02','297.7','14.43','3.07E-04' ]),
numpy.array([ '2.80E-02','2.16E-02','0.7723','222.5','2.19E+03','3.43E-02','299.1','14.54','4.24E-04' ]),
numpy.array([ '2.05E-02','1.60E-02','0.7792','224.5','1.62E+03','2.51E-02','300.4','14.65','5.84E-04' ]),
numpy.array([ '1.50E-02','1.18E-02','0.7861','226.5','1.20E+03','1.84E-02','301.7','14.75','8.01E-04' ]),
numpy.array([ '1.11E-02','8.77E-03','0.793' ,'228.5','8.89E+02','1.36E-02','303.0','14.86','1.10E-03' ]),
numpy.array([ '8.07E-03','6.55E-03','0.8112','233.7','6.63E+02','9.89E-03','306.5','15.14','1.53E-03' ]),
numpy.array([ '5.92E-03','4.92E-03','0.8304','239.3','4.99E+02','7.26E-03','310.1','15.43','2.13E-03' ]),
numpy.array([ '4.38E-03','3.72E-03','0.8496','244.8','3.77E+02','5.37E-03','313.7','15.72','2.93E-03' ]),
numpy.array([ '3.26E-03','2.83E-03','0.8688','250.4','2.87E+02','4.00E-03','317.2','16.01','4.01E-03' ]),
numpy.array([ '2.44E-03','2.17E-03','0.888' ,'255.9','2.20E+02','3.00E-03','320.7','16.29','5.44E-03' ]),
numpy.array([ '1.84E-03','1.67E-03','0.9072','261.4','1.70E+02','2.26E-03','324.1','16.57','7.34E-03' ]),
numpy.array([ '1.40E-03','1.30E-03','0.9263','266.9','1.31E+02','1.71E-03','327.5','16.85','9.83E-03' ]),
numpy.array([ '1.07E-03','1.01E-03','0.9393','270.6','1.02E+02','1.32E-03','329.8','17.04','1.29E-02' ]),
numpy.array([ '8.38E-04','7.87E-04','0.9393','270.6','7.98E+01','1.03E-03','329.8','17.04','1.66E-02' ]),
numpy.array([ '6.58E-04','6.14E-04','0.9336','269.0','6.22E+01','8.06E-04','328.8','16.96','2.10E-02' ]),
numpy.array([ '5.22E-04','4.77E-04','0.9145','263.5','4.83E+01','6.39E-04','325.4','16.68','2.61E-02' ]),
numpy.array([ '4.12E-04','3.69E-04','0.8954','258.0','3.74E+01','5.04E-04','322.0','16.40','3.25E-02' ]),
numpy.array([ '3.23E-04','2.83E-04','0.8763','252.5','2.87E+01','3.96E-04','318.6','16.12','4.07E-02' ]),
numpy.array([ '2.53E-04','2.17E-04','0.8573','247.0','2.20E+01','3.10E-04','315.1','15.84','5.11E-02' ]),
numpy.array([ '1.96E-04','1.65E-04','0.8382','241.5','1.67E+01','2.41E-04','311.5','15.55','6.46E-02' ]),
numpy.array([ '1.52E-04','1.24E-04','0.8191','236.0','1.26E+01','1.86E-04','308.0','15.26','8.20E-02' ]),
numpy.array([ '1.17E-04','9.34E-05','0.8001','230.5','9.46E+00','1.43E-04','304.4','14.97','1.05E-01' ]),
numpy.array([ '8.91E-05','6.96E-05','0.7811','225.1','7.05E+00','1.09E-04','300.7','14.67','1.34E-01' ]),
numpy.array([ '6.76E-05','5.15E-05','0.7620','219.6','5.22E+00','8.28E-05','297.1','14.38','1.74E-01' ]),
numpy.array([ '5.09E-05','3.79E-05','0.7436','214.3','3.84E+00','6.24E-05','293.4','14.08','2.26E-01' ]),
numpy.array([ '3.79E-05','2.76E-05','0.7300','210.3','2.80E+00','4.64E-05','290.7','13.87','2.99E-01' ]),
numpy.array([ '2.80E-05','2.01E-05','0.7164','206.4','2.03E+00','3.43E-05','288.0','13.65','3.98E-01' ]),
numpy.array([ '2.06E-05','1.45E-05','0.7029','202.5','1.47E+00','2.52E-05','285.3','13.43','5.32E-01' ]),
numpy.array([ '1.51E-05','1.04E-05','0.6893','198.6','1.05E+00','1.85E-05','282.5','13.21','7.16E-01' ]),
numpy.array([ '1.10E-05','7.40E-06','0.6758','194.7','7.50E-01','1.34E-05','279.7','12.98','9.68E-01' ]),
numpy.array([ '7.91E-06','5.24E-06','0.6623','190.8','5.31E-01','9.69E-06','276.9','12.76','1.32E+00' ]),
numpy.array([ '5.68E-06','3.68E-06','0.6488','186.9','3.73E-01','6.96E-06','274.1','12.53','1.80E+00' ]) ) )
def __init__(self):
self.className = self.__class__.__name__
''' convert array of strings into floats '''
#print self.className, 'array shape= ', self.TabularAtmosphere.shape[0]
self.AtmosphereTemperatureKelvins = numpy.empty(self.TabularAtmosphere.shape[0])
self.AirDensityKilogramsCubicMeters = numpy.empty(self.TabularAtmosphere.shape[0])
self.SpeedOfSoundMetersPerSecond = numpy.empty(self.TabularAtmosphere.shape[0])
self.PressurePascals = numpy.empty(self.TabularAtmosphere.shape[0])
indexI = 0
for row in self.TabularAtmosphere:
index = 0
for item in row:
if index == 1:
self.PressurePascals[indexI] = item
elif index == 3:
self.AtmosphereTemperatureKelvins[indexI] = item
elif index == 5:
self.AirDensityKilogramsCubicMeters[indexI] = item
elif index == 6:
self.SpeedOfSoundMetersPerSecond[indexI] = item
index += 1
indexI += 1
#print self.className, "============="
#print self.AtmosphereTemperatureKelvins
'''
Does not check that the x-coordinate sequence xp is increasing.
If xp is not increasing, the results are nonsense. A simple check for increasing is:
'''
if numpy.all(numpy.diff(self.AltitudeMeters) < 0):
raise ValueError(self.className + "Altitude table is not increasing !!!")
def getAirDensitySeaLevelKilogramsPerCubicMeters(self):
return self.getAirDensityKilogramsPerCubicMeters(0.0)
def getAirDensityKilogramsPerCubicMeters(self, altitudeMeters):
assert (isinstance(altitudeMeters, float)) or isinstance(altitudeMeters, int)
if (altitudeMeters > -1999.0) and (altitudeMeters <= 86000.0):
airDensityKilogramsPerCubicMeters = numpy.interp(altitudeMeters, self.AltitudeMeters, self.AirDensityKilogramsCubicMeters)
#print self.className , ': altitude meters= ', altitudeMeters, ' air density= ', airDensityKilogramsPerCubicMeters, ' kilograms per cubic meters'
return airDensityKilogramsPerCubicMeters
else:
raise ValueError(self.className + ' altitude Meters argument out of bound: ' + str(altitudeMeters))
def getTemperatureDegrees(self, altitudeMeters):
assert (isinstance(altitudeMeters, float))
if (altitudeMeters > -1999.0) and (altitudeMeters <= 86000.0):
temperatureKelvins = numpy.interp(altitudeMeters, self.AltitudeMeters, self.AtmosphereTemperatureKelvins)
'''
The temperature T in degrees Celsius (�C) is equal to the temperature T in Kelvin (K) minus 273.15:
'''
temperatureDegrees = temperatureKelvins - 273.15
#print ( self.className , ': altitude= {0} meters - temperature= {1} degrees'.format(altitudeMeters, temperatureDegrees) )
return temperatureDegrees
else:
raise ValueError(self.className + ' altitude Meters argument out of bound: ' + str(altitudeMeters))
def getTemperatureKelvins(self, altitudeMeters):
assert (isinstance(altitudeMeters, float))
if (altitudeMeters > -1999.0) and (altitudeMeters <= 86000.0):
temperatureKelvins = numpy.interp(altitudeMeters, self.AltitudeMeters, self.AtmosphereTemperatureKelvins)
'''
The temperature T in degrees Celsius (�C) is equal to the temperature T in Kelvin (K) minus 273.15:
'''
#print ( self.className , ': altitude= {0} meters - temperature= {1} kelvins'.format(altitudeMeters, temperatureKelvins) )
return temperatureKelvins
else:
raise ValueError(self.className + ' altitude Meters argument out of bound: ' + str(altitudeMeters))
def getSpeedOfSoundMetersPerSecond(self, altitudeMeters):
assert (isinstance(altitudeMeters, float))
if (altitudeMeters > -1999.0) and (altitudeMeters <= 86000.0):
speedOfSound = numpy.interp(altitudeMeters, self.AltitudeMeters, self.SpeedOfSoundMetersPerSecond)
'''
speed of sound in Meters per Second
'''
#print self.className , ': altitude meters= ', altitudeMeters, ' speef of sound= ', speedOfSound, ' meters per second'
return speedOfSound
else:
raise ValueError(self.className + ' altitude Meters argument out of bound: ' + str(altitudeMeters))
def computeGeoPotentialPressureAltitude(self):
raise ValueError (self.className + ': not yet implemented')
def getPressureMeanSeaLevelPascals(self):
return self.getPressurePascals(altitudeMeters = 0.0)
def getPressurePascals(self, altitudeMeters):
assert isinstance(altitudeMeters, float) or isinstance(altitudeMeters, int)
if (altitudeMeters > -1999.0) and (altitudeMeters <= 86000.0):
pressurePascals = numpy.interp(altitudeMeters, self.AltitudeMeters, self.PressurePascals)
pressurePascals = pressurePascals * self.StandardAtmosphericPressureMslPascal
'''
speed of sound in Meters per Second
'''
#print self.className , ': altitude meters= ', altitudeMeters, ' pressure= ', pressurePascals, ' pascals'
return pressurePascals
else:
raise ValueError(self.className + ' altitude Meters argument out of bound: ' + str(altitudeMeters))
def tas2cas(self, tas , altitude , temp='std', speed_units = 'm/s', alt_units = 'm'):
if speed_units == 'kt':
tas = tas * Knots2MeterPerSecond
elif speed_units == 'm/s':
pass
else:
raise ValueError(' BadaSpeed: tas2cas: unknown speed units= {0}'.format(speed_units))
if alt_units == 'm':
altitudeMeters = altitude
else:
raise ValueError ('not yet implemented')
''' 1.4 adiabatic '''
mu = (1.4 - 1.0 ) / 1.4
densityKgm3 = self.getAirDensityKilogramsPerCubicMeters(altitudeMeters)
pressurePascals = self.getPressurePascals(altitudeMeters)
densityMSLkgm3 = self.getAirDensitySeaLevelKilogramsPerCubicMeters()
pressureMSLpascals = self.getPressureMeanSeaLevelPascals()
cas = 1 + ( mu * densityKgm3 * tas * tas) / ( 2 * pressurePascals)
cas = math.pow(cas , 1.0 / mu) - 1.0
cas = 1 + (pressurePascals / pressureMSLpascals) * (cas)
cas = math.pow(cas, mu) - 1.0
cas = ( 2 * pressureMSLpascals)/ (mu * densityMSLkgm3) * cas
cas = math.pow(cas , 0.5)
return cas
def tas2mach(self, tas , altitude , speed_units = 'm/s' , alt_units = 'm'):
'''
mach = TAS / speed of sound
'''
assert speed_units == 'm/s'
assert alt_units == 'm'
a = self.getSpeedOfSoundMetersPerSecond(altitude)
| |
<filename>test/py/ganeti.hypervisor.hv_kvm_unittest.py
#!/usr/bin/python
#
# Copyright (C) 2010, 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing the hypervisor.hv_kvm module"""
import threading
import tempfile
import unittest
import socket
import os
import struct
import re
from ganeti import serializer
from ganeti import constants
from ganeti import compat
from ganeti import objects
from ganeti import errors
from ganeti import utils
from ganeti import pathutils
from ganeti.hypervisor import hv_kvm
import ganeti.hypervisor.hv_kvm.netdev as netdev
import ganeti.hypervisor.hv_kvm.monitor as monitor
import mock
import testutils
from testutils.config_mock import ConfigMock
class QmpStub(threading.Thread):
"""Stub for a QMP endpoint for a KVM instance
"""
_QMP_BANNER_DATA = {
"QMP": {
"version": {
"package": "",
"qemu": {
"micro": 50,
"minor": 13,
"major": 0,
},
"capabilities": [],
},
}
}
_EMPTY_RESPONSE = {
"return": [],
}
_SUPPORTED_COMMANDS = {
"return": [
{"name": "command"},
{"name": "query-kvm"},
{"name": "eject"},
{"name": "query-status"},
{"name": "query-name"},
]
}
def __init__(self, socket_filename, server_responses):
"""Creates a QMP stub
@type socket_filename: string
@param socket_filename: filename of the UNIX socket that will be created
this class and used for the communication
@type server_responses: list
@param server_responses: list of responses that the server sends in response
to whatever it receives
"""
threading.Thread.__init__(self)
self.socket_filename = socket_filename
self.script = server_responses[:]
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.bind(self.socket_filename)
self.socket.listen(1)
def run(self):
# Hypothesis: the messages we receive contain only a complete QMP message
# encoded in JSON.
conn, addr = self.socket.accept()
# Send the banner as the first thing
conn.send(self.encode_string(self._QMP_BANNER_DATA))
# Expect qmp_capabilities and return an empty response
conn.recv(4096)
conn.send(self.encode_string(self._EMPTY_RESPONSE))
# Expect query-commands and return the list of supported commands
conn.recv(4096)
conn.send(self.encode_string(self._SUPPORTED_COMMANDS))
while True:
# We ignore the expected message, as the purpose of this object is not
# to verify the correctness of the communication but to act as a
# partner for the SUT (System Under Test, that is QmpConnection)
msg = conn.recv(4096)
if not msg:
break
if not self.script:
break
response = self.script.pop(0)
if isinstance(response, str):
conn.send(response)
elif isinstance(response, list):
for chunk in response:
conn.send(chunk)
else:
raise errors.ProgrammerError("Unknown response type for %s" % response)
conn.close()
def encode_string(self, message):
return (serializer.DumpJson(message) +
hv_kvm.QmpConnection._MESSAGE_END_TOKEN)
class TestQmpMessage(testutils.GanetiTestCase):
def testSerialization(self):
test_data = {
"execute": "command",
"arguments": ["a", "b", "c"],
}
message = hv_kvm.QmpMessage(test_data)
for k, v in test_data.items():
self.assertEqual(message[k], v)
serialized = str(message)
self.assertEqual(len(serialized.splitlines()), 1,
msg="Got multi-line message")
rebuilt_message = hv_kvm.QmpMessage.BuildFromJsonString(serialized)
self.assertEqual(rebuilt_message, message)
self.assertEqual(len(rebuilt_message), len(test_data))
def testDelete(self):
toDelete = "execute"
test_data = {
toDelete: "command",
"arguments": ["a", "b", "c"],
}
message = hv_kvm.QmpMessage(test_data)
oldLen = len(message)
del message[toDelete]
newLen = len(message)
self.assertEqual(oldLen - 1, newLen)
class TestQmp(testutils.GanetiTestCase):
REQUESTS = [
{"execute": "query-kvm", "arguments": []},
{"execute": "eject", "arguments": {"device": "ide1-cd0"}},
{"execute": "query-status", "arguments": []},
{"execute": "query-name", "arguments": []},
]
SERVER_RESPONSES = [
# One message, one send()
'{"return": {"enabled": true, "present": true}}\r\n',
# Message sent using multiple send()
['{"retur', 'n": {}}\r\n'],
# Multiple messages sent using one send()
'{"return": [{"name": "quit"}, {"name": "eject"}]}\r\n'
'{"return": {"running": true, "singlestep": false}}\r\n',
]
EXPECTED_RESPONSES = [
{"enabled": True, "present": True},
{},
[{"name": "quit"}, {"name": "eject"}],
{"running": True, "singlestep": False},
]
def testQmp(self):
# Set up the stub
socket_file = tempfile.NamedTemporaryFile()
os.remove(socket_file.name)
qmp_stub = QmpStub(socket_file.name, self.SERVER_RESPONSES)
qmp_stub.start()
# Set up the QMP connection
qmp_connection = hv_kvm.QmpConnection(socket_file.name)
qmp_connection.connect()
# Format the script
for request, expected_response in zip(self.REQUESTS,
self.EXPECTED_RESPONSES):
response = qmp_connection.Execute(request["execute"],
request["arguments"])
self.assertEqual(response, expected_response)
msg = hv_kvm.QmpMessage({"return": expected_response})
self.assertEqual(len(str(msg).splitlines()), 1,
msg="Got multi-line message")
self.assertRaises(monitor.QmpCommandNotSupported,
qmp_connection.Execute,
"unsupported-command")
def testQmpContextManager(self):
# Set up the stub
socket_file = tempfile.NamedTemporaryFile()
os.remove(socket_file.name)
qmp_stub = QmpStub(socket_file.name, self.SERVER_RESPONSES)
qmp_stub.start()
# Test the context manager functionality
with hv_kvm.QmpConnection(socket_file.name) as qmp:
for request, expected_response in zip(self.REQUESTS,
self.EXPECTED_RESPONSES):
response = qmp.Execute(request["execute"], request["arguments"])
self.assertEqual(response, expected_response)
class TestConsole(unittest.TestCase):
def MakeConsole(self, instance, node, group, hvparams):
cons = hv_kvm.KVMHypervisor.GetInstanceConsole(instance, node, group,
hvparams, {})
self.assertEqual(cons.Validate(), None)
return cons
def testSerial(self):
instance = objects.Instance(name="kvm.example.com",
primary_node="node6017-uuid")
node = objects.Node(name="node6017", uuid="node6017-uuid",
ndparams={})
group = objects.NodeGroup(name="group6134", ndparams={})
hvparams = {
constants.HV_SERIAL_CONSOLE: True,
constants.HV_VNC_BIND_ADDRESS: None,
constants.HV_KVM_SPICE_BIND: None,
}
cons = self.MakeConsole(instance, node, group, hvparams)
self.assertEqual(cons.kind, constants.CONS_SSH)
self.assertEqual(cons.host, node.name)
self.assertEqual(cons.command[0], pathutils.KVM_CONSOLE_WRAPPER)
self.assertEqual(cons.command[1], constants.SOCAT_PATH)
def testVnc(self):
instance = objects.Instance(name="kvm.example.com",
primary_node="node7235-uuid",
network_port=constants.VNC_BASE_PORT + 10)
node = objects.Node(name="node7235", uuid="node7235-uuid",
ndparams={})
group = objects.NodeGroup(name="group3632", ndparams={})
hvparams = {
constants.HV_SERIAL_CONSOLE: False,
constants.HV_VNC_BIND_ADDRESS: "192.0.2.1",
constants.HV_KVM_SPICE_BIND: None,
}
cons = self.MakeConsole(instance, node, group, hvparams)
self.assertEqual(cons.kind, constants.CONS_VNC)
self.assertEqual(cons.host, "192.0.2.1")
self.assertEqual(cons.port, constants.VNC_BASE_PORT + 10)
self.assertEqual(cons.display, 10)
def testSpice(self):
instance = objects.Instance(name="kvm.example.com",
primary_node="node7235",
network_port=11000)
node = objects.Node(name="node7235", uuid="node7235-uuid",
ndparams={})
group = objects.NodeGroup(name="group0132", ndparams={})
hvparams = {
constants.HV_SERIAL_CONSOLE: False,
constants.HV_VNC_BIND_ADDRESS: None,
constants.HV_KVM_SPICE_BIND: "192.0.2.1",
}
cons = self.MakeConsole(instance, node, group, hvparams)
self.assertEqual(cons.kind, constants.CONS_SPICE)
self.assertEqual(cons.host, "192.0.2.1")
self.assertEqual(cons.port, 11000)
def testNoConsole(self):
instance = objects.Instance(name="kvm.example.com",
primary_node="node24325",
network_port=0)
node = objects.Node(name="node24325", uuid="node24325-uuid",
ndparams={})
group = objects.NodeGroup(name="group9184", ndparams={})
hvparams = {
constants.HV_SERIAL_CONSOLE: False,
constants.HV_VNC_BIND_ADDRESS: None,
constants.HV_KVM_SPICE_BIND: None,
}
cons = self.MakeConsole(instance, node, group, hvparams)
self.assertEqual(cons.kind, constants.CONS_MESSAGE)
class TestVersionChecking(testutils.GanetiTestCase):
@staticmethod
def ParseTestData(name):
help = testutils.ReadTestData(name)
return hv_kvm.KVMHypervisor._ParseKVMVersion(help)
def testParseVersion112(self):
self.assertEqual(
self.ParseTestData("kvm_1.1.2_help.txt"), ("1.1.2", 1, 1, 2))
def testParseVersion10(self):
self.assertEqual(self.ParseTestData("kvm_1.0_help.txt"), ("1.0", 1, 0, 0))
def testParseVersion01590(self):
self.assertEqual(
self.ParseTestData("kvm_0.15.90_help.txt"), ("0.15.90", 0, 15, 90))
def testParseVersion0125(self):
self.assertEqual(
self.ParseTestData("kvm_0.12.5_help.txt"), ("0.12.5", 0, 12, 5))
def testParseVersion091(self):
self.assertEqual(
self.ParseTestData("kvm_0.9.1_help.txt"), ("0.9.1", 0, 9, 1))
class TestSpiceParameterList(unittest.TestCase):
def setUp(self):
self.defaults = constants.HVC_DEFAULTS[constants.HT_KVM]
def testAudioCompressionDefaultOn(self):
self.assertTrue(self.defaults[constants.HV_KVM_SPICE_AUDIO_COMPR])
def testVdAgentDefaultOn(self):
self.assertTrue(self.defaults[constants.HV_KVM_SPICE_USE_VDAGENT])
def testTlsCiphersDefaultOn(self):
self.assertTrue(self.defaults[constants.HV_KVM_SPICE_TLS_CIPHERS])
def testBindDefaultOff(self):
self.assertFalse(self.defaults[constants.HV_KVM_SPICE_BIND])
def testAdditionalParams(self):
params = compat.UniqueFrozenset(
getattr(constants, name)
for name in dir(constants)
if name.startswith("HV_KVM_SPICE_"))
fixed = set([
constants.HV_KVM_SPICE_BIND, constants.HV_KVM_SPICE_TLS_CIPHERS,
constants.HV_KVM_SPICE_USE_VDAGENT, constants.HV_KVM_SPICE_AUDIO_COMPR])
self.assertEqual(hv_kvm._SPICE_ADDITIONAL_PARAMS, params - fixed)
class TestHelpRegexps(testutils.GanetiTestCase):
"""Check _BOOT_RE
It has to match -drive.*boot=on|off except if there is another dash-option
at the beginning of the line.
"""
@staticmethod
def SearchTestData(name):
boot_re = hv_kvm.KVMHypervisor._BOOT_RE
help = testutils.ReadTestData(name)
return boot_re.search(help)
def testBootRe112(self):
self.assertFalse(self.SearchTestData("kvm_1.1.2_help.txt"))
def testBootRe10(self):
self.assertFalse(self.SearchTestData("kvm_1.0_help.txt"))
def testBootRe01590(self):
self.assertFalse(self.SearchTestData("kvm_0.15.90_help.txt"))
def testBootRe0125(self):
self.assertTrue(self.SearchTestData("kvm_0.12.5_help.txt"))
def testBootRe091(self):
self.assertTrue(self.SearchTestData("kvm_0.9.1_help.txt"))
def testBootRe091_fake(self):
self.assertFalse(self.SearchTestData("kvm_0.9.1_help_boot_test.txt"))
class TestGetTunFeatures(unittest.TestCase):
def testWrongIoctl(self):
tmpfile = tempfile.NamedTemporaryFile()
# A file does not have the right ioctls, so this must always fail
result = netdev._GetTunFeatures(tmpfile.fileno())
self.assertTrue(result is None)
def _FakeIoctl(self, features, fd, request, buf):
self.assertEqual(request, netdev.TUNGETFEATURES)
(reqno, ) = struct.unpack("I", buf)
self.assertEqual(reqno, 0)
return struct.pack("I", features)
def test(self):
tmpfile = tempfile.NamedTemporaryFile()
fd = tmpfile.fileno()
for features in [0, netdev.IFF_VNET_HDR]:
fn = compat.partial(self._FakeIoctl, features)
result = netdev._GetTunFeatures(fd, _ioctl=fn)
self.assertEqual(result, features)
class TestProbeTapVnetHdr(unittest.TestCase):
def _FakeTunFeatures(self, expected_fd, flags, fd):
self.assertEqual(fd, expected_fd)
return flags
def test(self):
tmpfile = tempfile.NamedTemporaryFile()
fd = tmpfile.fileno()
for flags in [0, netdev.IFF_VNET_HDR]:
fn = compat.partial(self._FakeTunFeatures, fd, flags)
result = netdev._ProbeTapVnetHdr(fd, _features_fn=fn)
if flags == 0:
self.assertFalse(result)
else:
self.assertTrue(result)
def testUnsupported(self):
tmpfile = tempfile.NamedTemporaryFile()
fd = tmpfile.fileno()
self.assertFalse(netdev._ProbeTapVnetHdr(fd, _features_fn=lambda _: None))
class TestGenerateDeviceKVMId(unittest.TestCase):
def test(self):
device = objects.NIC()
target = constants.HOTPLUG_TARGET_NIC
fn = hv_kvm._GenerateDeviceKVMId
device.uuid = "003fc157-66a8-4e6d-8b7e-ec4f69751396"
self.assertTrue(re.match("nic-003fc157-66a8-4e6d", fn(target, device)))
class TestGenerateDeviceHVInfo(testutils.GanetiTestCase):
def testPCI(self):
"""Test the placement of the first PCI device during startup."""
self.MockOut(mock.patch('ganeti.utils.EnsureDirs'))
hypervisor = hv_kvm.KVMHypervisor()
dev_type = constants.HOTPLUG_TARGET_NIC
kvm_devid = "nic-9e7c85f6-b6e5-4243"
hv_dev_type = constants.HT_NIC_PARAVIRTUAL
bus_slots = hypervisor._GetBusSlots()
hvinfo = hv_kvm._GenerateDeviceHVInfo(dev_type,
kvm_devid,
hv_dev_type,
bus_slots)
# NOTE: The PCI slot is zero-based, i.e. 13th slot has addr hex(12)
expected_hvinfo = {
"driver": "virtio-net-pci",
"id": kvm_devid,
"bus": "pci.0",
"addr": hex(constants.QEMU_DEFAULT_PCI_RESERVATIONS),
}
self.assertTrue(hvinfo == expected_hvinfo)
def testSCSI(self):
"""Test the placement of the first SCSI device during startup."""
self.MockOut(mock.patch('ganeti.utils.EnsureDirs'))
hypervisor = hv_kvm.KVMHypervisor()
dev_type = constants.HOTPLUG_TARGET_DISK
kvm_devid = "disk-932df160-7a22-4067"
hv_dev_type = constants.HT_DISK_SCSI_BLOCK
bus_slots = hypervisor._GetBusSlots()
hvinfo = hv_kvm._GenerateDeviceHVInfo(dev_type,
kvm_devid,
hv_dev_type,
| |
import mltk
import os
from explib.eval_methods import get_best_f1, get_adjusted_composite_metrics
from algorithm.utils import GraphNodes, get_data, time_generator, get_sliding_window_data_flow, get_score, \
get_avg_recons
import tfsnippet as spt
import tensorflow as tf
from tqdm import tqdm
from algorithm.InterFusion import MTSAD
from algorithm.InterFusion_swat import MTSAD_SWAT
import numpy as np
from typing import Optional
import pickle
from algorithm.mcmc_recons import mcmc_reconstruct, masked_reconstruct
from algorithm.cal_IPS import cal_IPS
__all__ = ['PredictConfig', 'final_testing', 'build_test_graph']
class PredictConfig(mltk.Config):
load_model_dir: Optional[str]
# evaluation params
test_n_z = 100
test_batch_size = 50
test_start = 0
max_test_size = None # `None` means full test set
save_results = True
output_dirs = 'analysis_results'
train_score_filename = 'train_score.pkl'
test_score_filename = 'test_score.pkl'
preserve_feature_dim = False # whether to preserve the feature dim in score. If `True`, the score will be a 2-dim ndarray
anomaly_score_calculate_latency = 1 # How many scores are averaged for the final score at a timestamp. `1` means use last point in each sliding window only.
plot_recons_results = True
use_mcmc = True # use mcmc on the last point for anomaly detection
mcmc_iter = 10
mcmc_rand_mask = False
n_mc_chain: int = 10
pos_mask = True
mcmc_track = True # use mcmc tracker for anomaly interpretation and calculate IPS.
def build_test_graph(chain: spt.VariationalChain, input_x, origin_chain: spt.VariationalChain=None) -> GraphNodes:
test_recons = tf.reduce_mean(chain.model['x'].log_prob(), axis=0)
logpx = chain.model['x'].log_prob()
logpz = chain.model['z2'].log_prob() + chain.model['z1'].log_prob()
logqz_x = chain.variational['z1'].log_prob() + chain.variational['z2'].log_prob()
test_lb = tf.reduce_mean(logpx + logpz - logqz_x, axis=0)
log_joint = logpx + logpz
latent_log_prob = logqz_x
test_ll = spt.importance_sampling_log_likelihood(log_joint=log_joint, latent_log_prob=latent_log_prob, axis=0)
test_nll = -test_ll
# average over sample dim
if origin_chain is not None:
full_recons_prob = tf.reduce_mean(
(chain.model['x'].distribution.base_distribution.log_prob(input_x) -
origin_chain.model['x'].distribution.base_distribution.log_prob(input_x)),
axis=0
)
else:
full_recons_prob = tf.reduce_mean(chain.model['x'].distribution.base_distribution.log_prob(input_x), axis=0)
if origin_chain is not None:
origin_log_joint = origin_chain.model['x'].log_prob() + origin_chain.model['z1'].log_prob() + origin_chain.model['z2'].log_prob()
origin_latent_log_prob = origin_chain.variational['z1'].log_prob() + origin_chain.variational['z2'].log_prob()
origin_ll = spt.importance_sampling_log_likelihood(log_joint=origin_log_joint, latent_log_prob=origin_latent_log_prob, axis=0)
test_ll_score = test_ll - origin_ll
else:
test_ll_score = test_ll
outputs = {
'test_nll': test_nll,
'test_lb': test_lb,
'test_recons': test_recons,
'test_kl': test_recons - test_lb,
'full_recons_prob': full_recons_prob,
'test_ll': test_ll_score
}
return GraphNodes(outputs)
def build_recons_graph(chain: spt.VariationalChain, window_length, feature_dim, unified_x_std=False) -> GraphNodes:
# average over sample dim
recons_x = tf.reduce_mean(chain.model['x'].distribution.base_distribution.mean, axis=0)
recons_x = spt.utils.InputSpec(shape=['?', window_length, feature_dim]).validate('recons', recons_x)
if unified_x_std:
recons_x_std = chain.model['x'].distribution.base_distribution.std
recons_x_std = spt.ops.broadcast_to_shape(recons_x_std, tf.shape(recons_x))
else:
recons_x_std = tf.reduce_mean(chain.model['x'].distribution.base_distribution.std, axis=0)
recons_x_std = spt.utils.InputSpec(shape=['?', window_length, feature_dim]).validate('recons_std', recons_x_std)
return GraphNodes({'recons_x': recons_x, 'recons_x_std': recons_x_std})
def get_recons_results(recons_nodes: GraphNodes, input_x, input_u, data_flow: spt.DataFlow, total_batch_count, dataset,
mask=None, rand_x=None):
data_flow = data_flow.threaded(5)
recons_collector = []
recons_std_collector = []
session = spt.utils.get_default_session_or_error()
with data_flow:
for batch_x, batch_u in tqdm(data_flow, unit='step', total=total_batch_count, ascii=True):
if mask is not None:
batch_mask = np.zeros(shape=batch_x.shape)
batch_mask[:, -1, :] = 1 # mask all dims of the last point in x
if rand_x is not None:
batch_output = recons_nodes.eval(session,
feed_dict={input_x: batch_x, input_u: batch_u, mask: batch_mask,
rand_x: np.random.random(batch_x.shape)})
else:
batch_output = recons_nodes.eval(session, feed_dict={input_x: batch_x, input_u: batch_u, mask: batch_mask})
else:
batch_output = recons_nodes.eval(session, feed_dict={input_x: batch_x, input_u: batch_u})
for k, v in batch_output.items():
if k == 'recons_x':
if dataset == 'SWaT' or dataset == 'WADI':
# idx = min(10, v.shape[1])
recons_collector.append(v[:, -10:, :])
else:
recons_collector.append(v)
elif k == 'recons_x_std':
if dataset == 'SWaT' or dataset == 'WADI':
# idx = min(10, v.shape[1])
recons_std_collector.append(v[:, -10:, :])
else:
recons_std_collector.append(v)
all_recons = np.concatenate(recons_collector, axis=0) # (data_length - window_length + 1, window_length, x_dim)
print(all_recons.shape)
all_recons_std = np.concatenate(recons_std_collector, axis=0)
return all_recons, all_recons_std
def final_testing(test_metrics: GraphNodes, input_x, input_u,
data_flow: spt.DataFlow, total_batch_count, y_test=None, mask=None, rand_x=None):
data_flow = data_flow.threaded(5)
full_recons_collector = []
ll_collector = []
epoch_out = {}
stats = {}
session = spt.utils.get_default_session_or_error()
with data_flow:
for batch_x, batch_u in tqdm(data_flow, unit='step', total=total_batch_count, ascii=True):
if mask is not None:
batch_mask = np.zeros(shape=batch_x.shape)
batch_mask[:, -1, :] = 1 # mask all dims of the last point in x
if rand_x is not None:
batch_output = test_metrics.eval(session, feed_dict={input_x: batch_x, input_u: batch_u, mask: batch_mask,
rand_x: np.random.random(batch_x.shape)})
else:
batch_output = test_metrics.eval(session,
feed_dict={input_x: batch_x, input_u: batch_u, mask: batch_mask})
else:
batch_output = test_metrics.eval(session, feed_dict={input_x: batch_x, input_u: batch_u})
for k, v in batch_output.items():
if k == 'full_recons_prob':
full_recons_collector.append(v)
elif k == 'test_ll':
ll_collector.append(v)
if k not in epoch_out:
epoch_out[k] = []
epoch_out[k].append(v)
else:
if k not in epoch_out:
epoch_out[k] = []
epoch_out[k].append(v)
# save the results of this epoch, and compute epoch stats. Take average over both batch and window_length dim.
for k, v in epoch_out.items():
epoch_out[k] = np.concatenate(epoch_out[k], axis=0)
if k not in stats:
stats[k] = []
stats[k].append(float(np.mean(epoch_out[k])))
# collect full recons prob for calculate anomaly score
full_recons_probs = np.concatenate(full_recons_collector, axis=0) # (data_length-window_length+1, window_length, x_dim)
ll = np.concatenate(ll_collector, axis=0)
if y_test is not None:
assert full_recons_probs.shape[0] + full_recons_probs.shape[1] - 1 == len(y_test)
tmp1 = []
for i in range(full_recons_probs.shape[0]):
if y_test[i + full_recons_probs.shape[1] - 1] < 0.5:
tmp1.append(np.sum(full_recons_probs[i, -1], axis=-1)) # normal point recons score
stats['normal_point_test_recons'] = [float(np.mean(tmp1))]
# calculate average statistics
for k, v in stats.items():
stats[k] = float(np.mean(v))
return stats, full_recons_probs, ll
def mcmc_tracker(flow: spt.DataFlow, baseline, model, input_x, input_u, mask, max_iter, total_window_num,
window_length, x_dim, mask_last=False, pos_mask=False, use_rand_mask=False, n_mc_chain=1):
# the baseline is the avg total score in a window on training set.
session = spt.utils.get_default_session_or_error()
last_x = tf.placeholder(dtype=tf.float32, shape=[None, window_length, x_dim], name='last_x')
x_r = masked_reconstruct(model.reconstruct, last_x, input_u, mask)
score, recons_mean, recons_std = model.get_score(x_embed=x_r, x_eval=input_x, u=input_u)
tot_score = tf.reduce_sum(tf.multiply(score, tf.cast((1-mask), score.dtype)))
def avg_multi_chain(x, n_chain):
shape = (-1,) + (n_chain,) + x.shape[1:]
return np.mean(x.reshape(shape), axis=1)
res = {}
with flow.threaded(5) as flow:
for batch_x, batch_u, batch_score, batch_ori_recons, batch_ori_std, batch_idx \
in tqdm(flow, unit='step', total=total_window_num, ascii=True):
batch_idx = batch_idx[0]
res[batch_idx] = {'x': [batch_x], 'recons': [batch_ori_recons], 'std': [batch_ori_std], 'score': [batch_score],
'K': [0], 'iter': [-1], 'mask': [np.zeros(shape=batch_x.shape)],
'total_score': [np.mean(np.sum(batch_score, axis=-1))]}
best_score = batch_score
best_total_score = np.mean(np.sum(batch_score, axis=-1))
best_K = 0
if pos_mask:
pos_scores = np.mean(batch_score, axis=0) # (window, x_dim)
sorted_pos_idx = np.argsort(pos_scores, axis=None)
potential_dim_num = np.sum((pos_scores < (baseline/(x_dim*window_length))).astype(np.int32))
else:
dim_scores = np.mean(batch_score, axis=(-2,-3)) # (x_dim, )
sorted_dim_idx = np.argsort(dim_scores)
potential_dim_num = np.sum((dim_scores < (baseline/(x_dim*window_length))).astype(np.int32)) # num of dims whose avg score < baseline
if potential_dim_num > 0:
K_init = max(potential_dim_num//5, 1)
K_inc = max(potential_dim_num//10, 1)
else:
res[batch_idx]['best_score'] = best_score
res[batch_idx]['best_total_score'] = best_total_score
res[batch_idx]['best_K'] = best_K
continue
if use_rand_mask:
rand_x = np.random.random(size=batch_x.shape)
if pos_mask:
max_K = x_dim * window_length
else:
max_K = x_dim
for K in range(K_init, min(potential_dim_num+1, max_K), K_inc):
if pos_mask:
mask_idx = sorted_pos_idx[:K]
batch_mask = np.zeros(shape=batch_x.shape)
batch_mask = batch_mask.reshape([batch_x.shape[0], -1])
batch_mask[:, mask_idx] = 1
batch_mask = batch_mask.reshape(batch_x.shape)
else:
mask_idx = sorted_dim_idx[:K]
batch_mask = np.zeros(shape=batch_x.shape)
batch_mask[:, :, mask_idx] = 1
if mask_last:
batch_mask[:, -1, :] = 1
batch_last_x = batch_x
if use_rand_mask:
batch_last_x = np.where(batch_mask.astype(np.bool), rand_x, batch_last_x)
if n_mc_chain > 1:
init_x = np.repeat(batch_x, n_mc_chain, axis=0)
init_u = np.repeat(batch_u, n_mc_chain, axis=0)
init_mask = np.repeat(batch_mask, n_mc_chain, axis=0)
init_last_x = np.repeat(batch_last_x, n_mc_chain, axis=0)
for i in range(max_iter):
if n_mc_chain > 1:
x_mc, x_recons, x_std, x_score, x_tot_score = \
session.run([x_r, recons_mean, recons_std, score, tot_score],
feed_dict={input_x: init_x, input_u: init_u, mask: init_mask,
last_x: init_last_x})
init_last_x = x_mc
x_mc = avg_multi_chain(x_mc, n_mc_chain)
x_recons = avg_multi_chain(x_recons, n_mc_chain)
x_std = avg_multi_chain(x_std, n_mc_chain)
x_score = avg_multi_chain(x_score, n_mc_chain)
x_tot_score = float(x_tot_score) / float(n_mc_chain)
else:
x_mc, x_recons, x_std, x_score, x_tot_score = \
session.run([x_r, recons_mean, recons_std, score, tot_score],
feed_dict={input_x: batch_x, input_u: batch_u, mask: batch_mask, last_x: batch_last_x})
batch_last_x = x_mc
total_score = float(x_tot_score) / (window_length * x_dim - np.sum(batch_mask)) / batch_x.shape[0] * x_dim
res[batch_idx]['x'].append(x_mc)
res[batch_idx]['recons'].append(x_recons)
res[batch_idx]['std'].append(x_std)
res[batch_idx]['score'].append(x_score)
res[batch_idx]['K'].append(K)
res[batch_idx]['iter'].append(i)
res[batch_idx]['mask'].append(batch_mask)
res[batch_idx]['total_score'].append(total_score)
last_score = res[batch_idx]['total_score'][-1]
if last_score >= best_total_score:
best_total_score = last_score
best_score = res[batch_idx]['score'][-1]
best_K = res[batch_idx]['K'][-1]
if best_total_score >= (baseline/window_length):
break
res[batch_idx]['best_score'] = best_score
res[batch_idx]['best_total_score'] = best_total_score
res[batch_idx]['best_K'] = best_K
return res
def log_mean_exp(x, axis, keepdims=False):
x_max = np.max(x, axis=axis, keepdims=True)
ret = x_max + np.log(np.mean(np.exp(x - x_max), axis=axis, keepdims=True))
if not keepdims:
ret = np.squeeze(ret, axis=axis)
return ret
def log_sum_exp(x, axis, keepdims=False):
x_max = np.max(x, axis=axis, keepdims=True)
ret = x_max + np.log(np.sum(np.exp(x - x_max), axis=axis, keepdims=True))
if not keepdims:
ret = np.squeeze(ret, axis=axis)
return ret
def main(exp: mltk.Experiment[PredictConfig], test_config: PredictConfig):
if test_config.load_model_dir is None:
raise ValueError('`--load_model_dir` is required.')
exp_config_path = os.path.join(test_config.load_model_dir, 'config.json')
from algorithm.stack_train import ExpConfig
loader = mltk.ConfigLoader(ExpConfig())
loader.load_file(exp_config_path)
train_config = loader.get()
print(mltk.format_key_values(train_config, title='Train configurations'))
print('')
print(mltk.format_key_values(test_config, title='Test configurations'))
print('')
# set TFSnippet settings
spt.settings.enable_assertions = False
spt.settings.check_numerics = train_config.check_numerics
exp.make_dirs(test_config.output_dirs)
# prepare the data
# simple data
(x_train, _), (x_test, y_test) = \
get_data(train_config.dataset, train_config.train.max_train_size, train_config.test.max_test_size,
train_start=train_config.train.train_start, test_start=train_config.test.test_start,
valid_portion=train_config.train.valid_portion)
if train_config.use_time_info:
u_train = np.asarray([time_generator(_i) for _i in |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.