input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
self.assertEqual(type(i), Set(str))
def test_set_listof_tupleof_constructors(self):
s1 = Set(int)(ListOf(int)([1, 1]))
self.assertEqual(len(s1), 1)
s2 = Set(int)(TupleOf(int)((1, 1)))
self.assertEqual(len(s2), 1)
def test_list_of_tuples_transpose(self):
listOfTuples = ListOf(NamedTuple(x=int, y=str, z=bool))()
listOfTuples.append(dict(x=1, y="hi", z=False))
listOfTuples.append(dict(x=2, y="hihi", z=True))
tupleOfLists = listOfTuples.transpose()
self.assertEqual(tupleOfLists.x, [1, 2])
self.assertEqual(tupleOfLists.y, ['hi', 'hihi'])
self.assertEqual(tupleOfLists.z, [False, True])
def test_const_dict_equality_with_python(self):
CD = ConstDict(OneOf(int, str), OneOf(int, str))
someDicts = [{}, {1: 2}, {1: 2, 3: 4}, {"a": 10, "b": "c"}]
for d1 in someDicts:
for d2 in someDicts:
self.assertEqual(CD(d1) == CD(d2), d1 == d2)
self.assertEqual(CD(d1) == d2, d1 == d2)
self.assertEqual(d1 == CD(d2), d1 == d2)
def test_alternative_reverse_operators(self):
A = Alternative("A", a={'a': int}, b={'b': str},
__radd__=lambda lhs, rhs: "radd",
__rsub__=lambda lhs, rhs: "rsub",
__rmul__=lambda lhs, rhs: "rmul",
__rmatmul__=lambda lhs, rhs: "rmatmul",
__rtruediv__=lambda lhs, rhs: "rtruediv",
__rfloordiv__=lambda lhs, rhs: "rfloordiv",
__rmod__=lambda lhs, rhs: "rmod",
__rpow__=lambda lhs, rhs: "rpow",
__rlshift__=lambda lhs, rhs: "rlshift",
__rrshift__=lambda lhs, rhs: "rrshift",
__rand__=lambda lhs, rhs: "rand",
__rxor__=lambda lhs, rhs: "rxor",
__ror__=lambda lhs, rhs: "ror"
)
values = [1, Int16(1), UInt64(1), 1.234, Float32(1.234), True, "abc",
ListOf(int)((1, 2)), ConstDict(str, str)({"a": "1"})]
for v in values:
self.assertEqual(v + A.a(), "radd")
self.assertEqual(v - A.a(), "rsub")
self.assertEqual(v * A.a(), "rmul")
self.assertEqual(v @ A.a(), "rmatmul")
self.assertEqual(v / A.a(), "rtruediv")
self.assertEqual(v // A.a(), "rfloordiv")
if type(v) != str:
self.assertEqual(v % A.a(), "rmod")
self.assertEqual(v ** A.a(), "rpow")
self.assertEqual(v << A.a(), "rlshift")
self.assertEqual(v >> A.a(), "rrshift")
self.assertEqual(v & A.a(), "rand")
self.assertEqual(v ^ A.a(), "rxor")
self.assertEqual(v | A.a(), "ror")
with self.assertRaises(Exception):
A.a() + v
with self.assertRaises(Exception):
A.a() - v
with self.assertRaises(Exception):
A.a() * v
with self.assertRaises(Exception):
A.a() @ v
with self.assertRaises(Exception):
A.a() / v
with self.assertRaises(Exception):
A.a() // v
with self.assertRaises(Exception):
A.a() % v
with self.assertRaises(Exception):
A.a() ** v
with self.assertRaises(Exception):
A.a() << v
with self.assertRaises(Exception):
A.a() >> v
with self.assertRaises(Exception):
A.a() & v
with self.assertRaises(Exception):
A.a() ^ v
with self.assertRaises(Exception):
A.a() | v
def test_alternative_missing_inplace_operators_fallback(self):
A = Alternative("A", a={'a': int}, b={'b': str},
__add__=lambda self, other: "worked",
__sub__=lambda self, other: "worked",
__mul__=lambda self, other: "worked",
__matmul__=lambda self, other: "worked",
__truediv__=lambda self, other: "worked",
__floordiv__=lambda self, other: "worked",
__mod__=lambda self, other: "worked",
__pow__=lambda self, other: "worked",
__lshift__=lambda self, other: "worked",
__rshift__=lambda self, other: "worked",
__and__=lambda self, other: "worked",
__or__=lambda self, other: "worked",
__xor__=lambda self, other: "worked",
)
v = A.a()
v += 10
self.assertEqual(v, "worked")
v = A.a()
v -= 10
self.assertEqual(v, "worked")
v = A.a()
v *= 10
self.assertEqual(v, "worked")
v = A.a()
v @= 10
self.assertEqual(v, "worked")
v = A.a()
v /= 10
self.assertEqual(v, "worked")
v = A.a()
v //= 10
self.assertEqual(v, "worked")
v = A.a()
v %= 10
self.assertEqual(v, "worked")
v = A.a()
v **= 10
self.assertEqual(v, "worked")
v = A.a()
v <<= 10
self.assertEqual(v, "worked")
v = A.a()
v >>= 10
self.assertEqual(v, "worked")
v = A.a()
v &= 10
self.assertEqual(v, "worked")
v = A.a()
v |= 10
self.assertEqual(v, "worked")
v = A.a()
v ^= 10
self.assertEqual(v, "worked")
def test_list_and_tuple_of_compare(self):
things = [
ListOf(int)([1, 2]),
ListOf(int)([2, 2]),
ListOf(int)([1, 3]),
ListOf(int)([1, 2, 3]),
ListOf(int)([2, 2, 3]),
ListOf(int)([1, 3, 3]),
ListOf(float)([1, 2]),
ListOf(float)([2, 2]),
ListOf(float)([1, 3]),
ListOf(float)([1, 2, 3]),
ListOf(float)([2, 2, 3]),
ListOf(float)([1, 3, 3]),
ListOf(float)([1.0, 2.0]),
TupleOf(int)([1, 2]),
TupleOf(int)([2, 2]),
TupleOf(int)([1, 3]),
TupleOf(int)([1, 2, 3]),
TupleOf(int)([2, 2, 3]),
TupleOf(int)([1, 3, 3]),
TupleOf(float)([1, 2]),
TupleOf(float)([2, 2]),
TupleOf(float)([1, 3]),
TupleOf(float)([1, 2, 3]),
TupleOf(float)([2, 2, 3]),
TupleOf(float)([1, 3, 3]),
TupleOf(float)([1.0, 2.0]),
Tuple(int, float)((1, 2)),
Tuple(int, float, int)((1, 2, 3)),
Tuple(int, float)((2, 2)),
Tuple(int, float, int)((2, 2, 3)),
NamedTuple(x=int, y=float)((1, 2)),
NamedTuple(x=int, y=float, z=float)((1, 2, 3)),
NamedTuple(x=int, y=float)((2, 2)),
NamedTuple(x=int, y=float, z=int)((2, 2, 3))
]
for t1 in things:
for t2 in things:
print(t1, t2)
# lists and tuples are not themselves comparable
if ("List" in str(type(t1))) != ("List" in str(type(t2))):
with self.assertRaises(TypeError):
t1 < t2
with self.assertRaises(TypeError):
t1 > t2
with self.assertRaises(TypeError):
t1 <= t2
with self.assertRaises(TypeError):
t1 >= t2
self.assertTrue(t1 != t2)
self.assertFalse(t1 == t2)
else:
typ = list if "List" in str(type(t1)) else tuple
t1Untyped = typ(t1)
t2Untyped = typ(t2)
self.assertEqual(t1 < t2, t1Untyped < t2Untyped)
self.assertEqual(t1 <= t2, t1Untyped <= t2Untyped)
self.assertEqual(t1 > t2, t1Untyped > t2Untyped)
self.assertEqual(t1 >= t2, t1Untyped >= t2Untyped)
self.assertEqual(t1 != t2, t1Untyped != t2Untyped)
self.assertEqual(t1 == t2, t1Untyped == t2Untyped)
def test_docstrings_all_types(self):
# TODO: actually test all types
types = [
bool,
Int8,
Int16,
Int32,
int,
UInt8,
UInt16,
UInt32,
UInt64,
str,
bytes,
ListOf(str),
TupleOf(int),
Tuple(int, str, int),
NamedTuple(a=int, b=str),
Dict(int, str),
ConstDict(int, str),
Set(str),
Alternative("a", s1={'f1': int, 'f2': str}, s2={'f3': bool}),
Value(3),
PointerTo(str),
]
good = True
for T in types:
type_docstring = T.__doc__
self.assertTrue(type_docstring, f"Type {T} missing docstring")
for a in dir(T):
m = getattr(T, a)
if callable(m):
docstring = m.__doc__
# Ignore certain magic methods that don't always have docstrings, even for builtin types.
# And ignore some specific Alternative subtypes defined in the test cases.
if a in ['__format__', '__getnewargs__', 's1', 's2']:
continue
if not docstring:
print(f"Type {T} missing docstring for '{a}'")
good = False
continue
self.assertTrue(docstring, f"Type {T} missing docstring for '{a}'")
# Check for overlong lines
max_line_len = 80
if a in ['__index__']: # Ignore some builtin docstrings with overlong lines.
continue
for l in docstring.splitlines():
if len(l) > max_line_len:
print(f"docstring line too long {len(l)} > {max_line_len} for '{T}.{a}':\n{l}")
good = False
self.assertTrue(good) # see output for specific problems
@flaky(max_runs=3, min_passes=1)
def test_list_of_uint8_from_bytes_perf(self):
someBytes = b"asdf" * 1024 * 1024
t0 = time.time()
ListOf(UInt8)(someBytes)
t1 = time.time()
elapsed = (t1 - t0)
# I get .001, but if we use the normal interpreter loop, .2
assert elapsed < .02
def test_iterate_dict_and_change_size_throws(self):
x = Dict(int, int)({1: 2})
with self.assertRaisesRegex(RuntimeError, "dictionary size changed"):
for k in x:
x[k + 1] = 2
def test_iterate_set_and_change_size_throws(self):
x = Set(int)([1])
with self.assertRaisesRegex(RuntimeError, "set size changed"):
for k in x:
x.add(k + 1)
def test_construct_named_tuple_with_other_named_tuple(self):
# we should be matching the names up correctly
T1 = NamedTuple(x=int, y=str)
T2 = NamedTuple(y=str, x=int)
T3 = NamedTuple(z=float, y=str, x=int)
assert T1(T2(T1(x=10, y='hello'))) == T1(x=10, y='hello')
# we can construct a bigger tuple from a smaller one
assert T3(T2(x=10, y='hello')).y == 'hello'
# but not in reverse
with self.assertRaises(TypeError):
T2(T3(x=10, y='hello'))
def test_dict_update_refcounts(self):
d = Dict(TupleOf(int), TupleOf(int))()
k = TupleOf(int)([1])
v = TupleOf(int)([1, 2])
n = NamedTuple(k=TupleOf(int), v=TupleOf(int))(k=k, v=v)
assert _types.refcount(k) == 2
assert _types.refcount(v) == 2
d.update({n.k: n.v})
assert _types.refcount(k) == 3
assert _types.refcount(v) == 3
d.clear()
assert _types.refcount(k) == 2
assert _types.refcount(v) == 2
d[k] = v
assert _types.refcount(k) == 3
assert _types.refcount(v) == 3
d.pop(k)
assert _types.refcount(k) == 2
assert _types.refcount(v) == 2
d[k] = v
assert _types.refcount(k) == 3
assert _types.refcount(v) == 3
del d[k]
assert _types.refcount(k) == 2
assert _types.refcount(v) == 2
d[k] = v
assert _types.refcount(k) == 3
assert _types.refcount(v) == 3
d = None
assert _types.refcount(k) == 2
assert _types.refcount(v) == 2
n = None
assert _types.refcount(k) == 1
assert _types.refcount(v) == 1
def test_dict_from_const_dict_refcounts(self):
D = Dict(TupleOf(int), TupleOf(int))
CD = ConstDict(TupleOf(int), TupleOf(int))
k1 = TupleOf(int)([1])
k2 = TupleOf(int)([1])
assert _types.refcount(k1) == 1
assert _types.refcount(k2) == 1
cd = CD({k1: k2})
assert _types.refcount(k1) == 2
assert _types.refcount(k2) == 2
d = D(cd)
assert _types.refcount(k1) == 3
assert _types.refcount(k2) == 3
cd = None
assert _types.refcount(k1) == 2
assert _types.refcount(k2) == 2
d = None # noqa
assert _types.refcount(k1) == 1
assert _types.refcount(k2) == 1
def test_set_refcounts_tupleof_int(self):
s = Set(TupleOf(int))()
k1 = TupleOf(int)([1])
k2 = TupleOf(int)([2])
s.add(k1)
assert _types.refcount(k1) == 2
s.discard(k1)
assert _types.refcount(k1) == 1
s.update([k1, k2])
s.discard(k1)
s.discard(k2)
assert _types.refcount(k1) == 1
def test_const_dict_from_dict_refcounts(self):
CD = ConstDict(TupleOf(int), TupleOf(int))
k1 = TupleOf(int)([1])
k2 = TupleOf(int)([2])
d = Dict(TupleOf(int), TupleOf(int))()
d[k1] = k2
assert _types.refcount(k1) == 2
assert _types.refcount(k2) == 2
assert _types.refcount(d) == 1
cd2 = CD(d)
assert _types.refcount(k1) == 3
assert _types.refcount(k2) == 3
assert _types.refcount(d) == 1
cd2 = None # noqa
assert _types.refcount(k1) == 2
assert _types.refcount(k2) == 2
d = None
assert _types.refcount(k1) == 1
assert _types.refcount(k2) == 1
def test_list_of_constructor_from_numpy(self):
array = numpy.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]])
assert ListOf(float)(array[0]) == ListOf(float)([0.0, 1.0, 2.0])
assert ListOf(float)(array[:, 1]) == ListOf(float)([1.0, 4.0])
assert | |
tracking any tibia worlds.")
return
if params is None:
yield from self.bot.say(invalid_arguments)
return
entries = []
online_entries = []
ask_channel = get_channel_by_name(self.bot, ask_channel_name, ctx.message.server)
if ctx.message.channel.is_private or ctx.message.channel == ask_channel:
per_page = 20
else:
per_page = 5
char = None
params = params.split(",")
if len(params) < 2 or len(params) > 3:
yield from self.bot.say(invalid_arguments)
return
params[0] = params[0].lower()
if params[0] in KNIGHT:
vocation = "knight"
elif params[0] in DRUID:
vocation = "druid"
elif params[0] in SORCERER:
vocation = "sorcerer"
elif params[0] in PALADIN:
vocation = "paladin"
elif params[0] in ["any", "all", "everything", "anything"]:
vocation = "characters"
else:
yield from self.bot.say(invalid_arguments)
return
# params[1] could be a character's name, a character's level or one of the level ranges
# If it's not a number, it should be a player's name
if not is_numeric(params[1]):
# We shouldn't have another parameter if a character name was specified
if len(params) == 3:
yield from self.bot.say(invalid_arguments)
return
char = yield from get_character(params[1])
if type(char) is not dict:
yield from self.bot.say("I couldn't find a character with that name.")
return
low, high = get_share_range(char["level"])
title = "I found the following {0}s in share range with {1} ({2}-{3}):".format(vocation, char["name"],
low, high)
empty = "I didn't find any {0}s in share range with **{1}** ({2}-{3})".format(vocation, char["name"],
low, high)
else:
# Check if we have another parameter, meaning this is a level range
if len(params) == 3:
try:
level1 = int(params[1])
level2 = int(params[2])
except ValueError:
yield from self.bot.say(invalid_arguments)
return
if level1 <= 0 or level2 <= 0:
yield from self.bot.say("You entered an invalid level.")
return
low = min(level1, level2)
high = max(level1, level2)
title = "I found the following {0}s between levels {1} and {2}".format(vocation, low, high)
empty = "I didn't find any {0}s between levels **{1}** and **{2}**".format(vocation, low, high)
# We only got a level, so we get the share range for it
else:
if int(params[1]) <= 0:
yield from self.bot.say("You entered an invalid level.")
return
low, high = get_share_range(int(params[1]))
title = "I found the following {0}s in share range with level {1} ({2}-{3})".format(vocation, params[1],
low, high)
empty = "I didn't find any {0}s in share range with level **{1}** ({2}-{3})".format(vocation,
params[1],
low, high)
c = userDatabase.cursor()
try:
if vocation == "characters":
vocation = ""
c.execute("SELECT name, user_id, ABS(last_level) as level, vocation FROM chars "
"WHERE vocation LIKE ? AND level >= ? AND level <= ? AND world = ?"
"ORDER by level DESC", ("%"+vocation, low, high, tracked_world, ))
count = 0
online_list = [x.split("_", 1)[1] for x in global_online_list]
while True:
player = c.fetchone()
if player is None:
break
# Do not show the same character that was searched for
if char is not None and char["name"] == player["name"]:
continue
owner = get_member(self.bot, player["user_id"], ctx.message.server)
# If the owner is not in server, skip
if owner is None:
continue
count += 1
player["owner"] = owner.display_name
player["online"] = ""
line_format = "**{name}** - Level {level} - @**{owner}** {online}"
if vocation == "":
line_format = "**{name}** - Level {level} - {vocation} - @**{owner}** {online}"
if player["name"] in online_list:
player["online"] = EMOJI[":small_blue_diamond:"]
online_entries.append(line_format.format(**player))
else:
entries.append(line_format.format(**player))
if count < 1:
yield from self.bot.say(empty)
return
finally:
c.close()
if online_entries:
description = EMOJI[":small_blue_diamond:"]+" = online"
else:
description = ""
pages = Paginator(self.bot, message=ctx.message, entries=online_entries+entries, per_page=per_page,
title=title, description=description)
try:
yield from pages.paginate()
except CannotPaginate as e:
yield from self.bot.say(e)
@commands.command(pass_context=True, aliases=['guildcheck', 'checkguild'])
@asyncio.coroutine
def guild(self, ctx, *, name=None):
"""Checks who is online in a guild"""
permissions = ctx.message.channel.permissions_for(get_member(self.bot, self.bot.user.id, ctx.message.server))
if not permissions.embed_links:
yield from self.bot.say("Sorry, I need `Embed Links` permission for this command.")
return
if name is None:
yield from self.bot.say("Tell me the guild you want me to check.")
return
guild = yield from get_guild_online(name)
if guild == ERROR_DOESNTEXIST:
yield from self.bot.say("The guild {0} doesn't exist.".format(name))
return
if guild == ERROR_NETWORK:
yield from self.bot.say("Can you repeat that? I had some trouble communicating.")
return
embed = discord.Embed()
embed.set_author(name="{name} ({world})".format(**guild),
url=url_guild + urllib.parse.quote(guild["name"]),
icon_url="http://static.tibia.com/images/global/general/favicon.ico"
)
embed.description = ""
embed.set_thumbnail(url=guild["logo_url"])
if guild.get("guildhall") is not None:
guildhouse = yield from get_house(guild["guildhall"])
if type(guildhouse) is dict:
embed.description += "They own the guildhall [{0}]({1}).\n".format(guild["guildhall"],
url_house.format(id=guildhouse["id"],
world=guild["world"])
)
else:
# In case there's no match in the houses table, we just show the name.
embed.description += "They own the guildhall **{0}**.\n".format(guild["guildhall"])
if len(guild['members']) < 1:
embed.description += "Nobody is online."
yield from self.bot.say(embed=embed)
return
plural = ""
if len(guild['members']) > 1:
plural = "s"
embed.description += "It has {0} player{1} online:".format(len(guild['members']), plural)
current_field = ""
result = ""
for member in guild['members']:
if current_field == "":
current_field = member['rank']
elif member['rank'] != current_field and member["rank"] != "":
embed.add_field(name=current_field, value=result, inline=False)
result = ""
current_field = member['rank']
member["title"] = ' (*' + member['title'] + '*)' if member['title'] != '' else ''
member["vocation"] = get_voc_abb(member["vocation"])
result += "{name} {title} -- {level} {vocation}\n".format(**member)
embed.add_field(name=current_field, value=result, inline=False)
yield from self.bot.say(embed=embed)
@commands.command(pass_context=True, aliases=['checkprice', 'itemprice'])
@asyncio.coroutine
def item(self, ctx, *, name: str=None):
"""Checks an item's information
Shows name, picture, npcs that buy and sell and creature drops"""
permissions = ctx.message.channel.permissions_for(get_member(self.bot, self.bot.user.id, ctx.message.server))
if not permissions.embed_links:
yield from self.bot.say("Sorry, I need `Embed Links` permission for this command.")
return
if name is None:
yield from self.bot.say("Tell me the name of the item you want to search.")
return
item = get_item(name)
if item is None:
yield from self.bot.say("I couldn't find an item with that name.")
return
if type(item) is list:
embed = discord.Embed(title="Suggestions", description="\n".join(item))
yield from self.bot.say("I couldn't find that item, maybe you meant one of these?", embed=embed)
return
# Attach item's image only if the bot has permissions
permissions = ctx.message.channel.permissions_for(get_member(self.bot, self.bot.user.id, ctx.message.server))
if permissions.attach_files and item["image"] != 0:
filename = item['name'] + ".png"
while os.path.isfile(filename):
filename = "_" + filename
with open(filename, "w+b") as f:
f.write(bytearray(item['image']))
f.close()
with open(filename, "r+b") as f:
yield from self.bot.upload(f)
f.close()
os.remove(filename)
long = ctx.message.channel.is_private or ctx.message.channel.name == ask_channel_name
embed = self.get_item_embed(ctx, item, long)
yield from self.bot.say(embed=embed)
@commands.command(pass_context=True, aliases=['mon', 'mob', 'creature'])
@asyncio.coroutine
def monster(self, ctx, *, name: str=None):
"""Gives information about a monster"""
permissions = ctx.message.channel.permissions_for(get_member(self.bot, self.bot.user.id, ctx.message.server))
if not permissions.embed_links:
yield from self.bot.say("Sorry, I need `Embed Links` permission for this command.")
return
if name is None:
yield from self.bot.say("Tell me the name of the monster you want to search.")
return
if ctx.message.channel.is_private:
bot_member = self.bot.user
else:
bot_member = get_member(self.bot, self.bot.user.id, ctx.message.server)
if name.lower() == bot_member.display_name.lower():
yield from self.bot.say(random.choice(["**"+bot_member.display_name+"** is too strong for you to hunt!",
"Sure, you kill *one* child and suddenly you're a monster!",
"I'M NOT A MONSTER",
"I'm a monster, huh? I'll remember that, human..."+EMOJI[":flame:"],
"You misspelled *future ruler of the world*.",
"You're not a good person. You know that, right?",
"I guess we both know that isn't going to happen.",
"You can't hunt me.",
"That's funny... If only I was programmed to laugh."]))
return
monster = get_monster(name)
if monster is None:
yield from self.bot.say("I couldn't find a monster with that name.")
return
if type(monster) is list:
embed = discord.Embed(title="Suggestions", description="\n".join(monster))
yield from self.bot.say("I couldn't find that creature, maybe you meant one of these?", embed=embed)
return
# Attach item's image only if the bot has permissions
permissions = ctx.message.channel.permissions_for(get_member(self.bot, self.bot.user.id, ctx.message.server))
if permissions.attach_files and monster["image"] != 0:
filename = monster['name'] + ".png"
while os.path.isfile(filename):
filename = "_" + filename
with open(filename, "w+b") as f:
f.write(bytearray(monster['image']))
f.close()
# Send monster's image
with open(filename, "r+b") as f:
yield from self.bot.upload(f)
f.close()
os.remove(filename)
long = ctx.message.channel.is_private or ctx.message.channel.name == ask_channel_name
embed = self.get_monster_embed(ctx, monster, long)
yield from self.bot.say(embed=embed)
@commands.group(aliases=['deathlist', 'death'], pass_context=True, invoke_without_command=True)
@asyncio.coroutine
def deaths(self, ctx, *, name: str = None):
"""Shows a player's or everyone's recent deaths"""
if name is None and lite_mode:
return
permissions = ctx.message.channel.permissions_for(get_member(self.bot, self.bot.user.id, ctx.message.server))
if not permissions.embed_links:
yield from self.bot.say("Sorry, I need `Embed Links` permission for this command.")
return
if ctx.message.channel.is_private:
user_servers = get_user_servers(self.bot, ctx.message.author.id)
user_worlds = get_user_worlds(self.bot, ctx.message.author.id)
else:
user_servers = [ctx.message.server]
user_worlds = [tracked_worlds.get(ctx.message.server.id)]
if user_worlds[0] is None and name is None:
| |
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __pdoc__ override.
for name, docstring in getattr(self.module, '__pdoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def text(self):
"""
Returns the documentation for this module as plain text.
"""
t = _get_tpl('/text.mako')
text, _ = re.subn('\n\n\n+', '\n\n', t.render(module=self).strip())
return text
def html(self, external_links=False, link_prefix='',
source=True, **kwargs):
"""
Returns the documentation for this module as
self-contained HTML.
If `external_links` is `True`, then identifiers to external
modules are always turned into links.
If `link_prefix` is `True`, then all links will have that
prefix. Otherwise, links are always relative.
If `source` is `True`, then source code will be retrieved for
every Python object whenever possible. This can dramatically
decrease performance when documenting large modules.
`kwargs` is passed to the `mako` render function.
"""
t = _get_tpl('/html.mako')
t = t.render(module=self,
external_links=external_links,
link_prefix=link_prefix,
show_source_code=source,
**kwargs)
return t.strip()
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def mro(self, cls):
"""
Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pdoc.Class` or
`pdoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups))
def descendents(self, cls):
"""
Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pdoc.Class` or
`pdoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pdoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_class(self, cls):
"""
Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules.
"""
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__))
def find_ident(self, name):
"""
Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pdoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier.
"""
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name)
def variables(self):
"""
Returns all documented module level variables in the module
sorted alphabetically as a list of `pdoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def classes(self):
"""
Returns all documented module level classes in the module
sorted alphabetically as a list of `pdoc.Class`.
"""
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented module level functions in the module
sorted alphabetically as a list of `pdoc.Function`.
"""
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def submodules(self):
"""
Returns all documented sub-modules in the module sorted
alphabetically as a list of `pdoc.Module`.
"""
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def is_submodule(self, name):
"""
Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`.
"""
return self.name != name and name.startswith(self.name)
def __is_exported(self, name, module):
"""
Returns `True` if and only if `pdoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported.
"""
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object.
"""
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))])
def __new_submodule(self, name, obj):
"""
Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module.
"""
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__pdoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules)
class Class (Doc):
"""
Representation of a class's documentation.
"""
def __init__(self, name, module, class_obj):
"""
Same as `pdoc.Doc.__init__`, except `class_obj` must be a
Python class object. The docstring is gathered automatically.
"""
super(Class, self).__init__(name, module, inspect.getdoc(class_obj))
self.cls = class_obj
"""The class Python object."""
self.doc = {}
"""A mapping from identifier name to a `pdoc.Doc` objects."""
self.doc_init = {}
"""
A special version of `pdoc.Class.doc` that contains
documentation for instance variables found in the `__init__`
method.
"""
public = self.__public_objs()
try:
# First try and find docstrings for class variables.
# Then move on to finding docstrings for instance variables.
# This must be optional, since not all modules have source
# code available.
cls_ast = ast.parse(inspect.getsource(self.cls)).body[0]
self.doc = _var_docstrings(cls_ast, self.module, cls=self)
for n in (cls_ast.body if '__init__' in public else []):
if isinstance(n, ast.FunctionDef) and n.name == '__init__':
self.doc_init = _var_docstrings(n, self.module,
cls=self, init=True)
break
except:
pass
# Convert the public Python objects to documentation objects.
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
if name in self.doc_init:
# Let instance members override class members.
continue
if inspect.ismethod(obj):
self.doc[name] = Function(name, self.module, obj.__func__,
cls=self, method=True)
elif inspect.isfunction(obj):
self.doc[name] = Function(name, self.module, obj,
cls=self, method=False)
elif isinstance(obj, property):
docstring = getattr(obj, '__doc__', '')
self.doc_init[name] = Variable(name, self.module, docstring,
cls=self)
elif not inspect.isbuiltin(obj) \
and not inspect.isroutine(obj):
if name in getattr(self.cls, '__slots__', []):
self.doc_init[name] = Variable(name, self.module,
'', cls=self)
else:
self.doc[name] = Variable(name, self.module, '', cls=self)
@property
def source(self):
return _source(self.cls)
@property
def refname(self):
return '%s.%s' % (self.module.refname, self.cls.__name__)
def class_variables(self):
| |
<gh_stars>1-10
from product_info import *
from scipy.stats import kurtosis
from scipy.stats import skew
## path of our program
#HEAD_PATH = "/mnt/hgfs/intern"
HEAD_PATH = '/Users/sean/Desktop/Plan B/Quant/week1'
## path of data
DATA_PATH = HEAD_PATH + "/pkl tick/"
## path of the day-to-night data set
NIGHT_PATH = HEAD_PATH + "/night pkl tick/"
## get all the dates of product
import os
def get_dates(product):
return list(map(lambda x: x[:8] ,os.listdir(DATA_PATH + product)))
## get the data of product of specific date
import pandas as pd
import _pickle as cPickle
import gzip
#import lz4.frame
def get_data(product, date):
data = load(DATA_PATH + product+"/"+date+".pkl")
return data
def load(path):
with gzip.open(path, 'rb', compresslevel=1) as file_object:
raw_data = file_object.read()
return cPickle.loads(raw_data)
def save(data, path):
serialized = cPickle.dumps(data)
with gzip.open(path, 'wb', compresslevel=1) as file_object:
file_object.write(serialized)
import dask
from dask import compute, delayed
def parLapply(CORE_NUM, iterable, func, *args, **kwargs):
with dask.config.set(scheduler='processes', num_workers=CORE_NUM):
f_par = functools.partial(func, *args, **kwargs)
result = compute([delayed(f_par)(item) for item in iterable])[0]
return result
## returns 0 if the numerator or denominator is 0
import numpy as np
import warnings
def zero_divide(x, y):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = np.divide(x,y)
if hasattr(y, "__len__"):
res[y == 0] = 0
elif y == 0:
if hasattr(x, "__len__"):
res = np.zeros(len(x))
else:
res = 0
return res
## function to calculate t-stat
def get_t_value(train_mat, signal, response):
beta = np.sum(train_mat[signal]*train_mat[response])/sum(train_mat[signal]**2) ## regressio coef
sigma = np.sqrt(np.sum((train_mat[signal]*beta-train_mat[response])**2) / (len(train_mat)-1))
v = np.sqrt(np.sum(train_mat[signal]**2)) ## sigma/v is the standard devication of beta_hat
return beta/sigma*v
import statsmodels.formula.api as smf
## forward selection of signals
def forward_selected(data, response):
"""Linear model designed by forward selection.
Parameters:
-----------
data : pandas DataFrame with all possible predictors and response
response: string, name of response column in data
Returns:
--------
model: an "optimal" fitted statsmodels linear model
with an intercept
selected by forward selection
evaluated by adjusted R-squared
"""
remaining = set(data.columns)
remaining.remove(response)
selected = []
current_score, best_new_score = 0.0, 0.0
chosen_signals = []
while remaining and current_score == best_new_score:
scores_with_candidates = []
for candidate in remaining:
formula = "{} ~ {} - 1".format("data['"+response+"']",
' + '.join(selected + ["data['"+candidate+"']"]))
score = smf.ols(formula, data).fit().rsquared_adj
scores_with_candidates.append((score, candidate))
scores_with_candidates.sort()
best_new_score, best_candidate = scores_with_candidates.pop()
if current_score < best_new_score:
remaining.remove(best_candidate)
selected.append("data['"+best_candidate+"']")
current_score = best_new_score
chosen_signals.append(best_candidate)
formula = "{} ~ {} - 1".format("data['"+response+"']",
' + '.join(selected))
model = smf.ols(formula, data).fit()
return OrderedDict([("model", model), ("chosen.signals", chosen_signals)])
def moving_average(a, n=3) :
ret_sum = np.cumsum(a, dtype=float)
ret = a
ret[n:] = (ret_sum[n:] - ret_sum[:-n])/n
return ret
def ewma(x, halflife, init=0, adjust=False):
init_s = pd.Series(data=init)
s = init_s.append(x)
if adjust:
xx = range(len(x))
lamb=1 - 0.5**(1 / halflife)
aa=1-np.power(1-lamb, xx)*(1-lamb)
bb=s.ewm(halflife=halflife, adjust=False).mean().iloc[1:]
return bb/aa
else:
return s.ewm(halflife=halflife, adjust=False).mean().iloc[1:]
def ewma_lambda(x, lambda_, init=0, adjust=False):
init_s = pd.Series(data=init)
s = init_s.append(x)
if adjust:
xx = range(len(x))
aa=1-np.power(1-lambda_, xx)*(1-lambda_)
bb=s.ewm(alpha=lambda_, adjust=False).mean().iloc[1:]
return bb/aa
else:
return s.ewm(alpha=lambda_, adjust=False).mean()[1:]
## moving sum of x
## we don't use rollSum because rollSum would make the first n data to be zero
def cum(x, n):
sum_x = x.cumsum()
sum_x_shift = sum_x.shift(n)
sum_x_shift[:n]= 0
return sum_x - sum_x_shift
def sharpe(x):
return zero_divide(np.mean(x)* np.sqrt(250), np.std(x, ddof=1))
def drawdown(x):
y = np.cumsum(x)
return np.max(y)-np.max(y[-1:])
def max_drawdown(x):
y = np.cumsum(x)
return np.max(np.maximum.accumulate(y)-y)
from collections import OrderedDict
def get_hft_summary(result, thre_mat, n):
all_result = pd.DataFrame(data={"daily.result": result})
daily_num = all_result['daily.result'].apply(lambda x: x["num"])
daily_pnl = all_result['daily.result'].apply(lambda x: x["pnl"])
daily_ret = all_result['daily.result'].apply(lambda x: x["ret"])
total_num = daily_num.sum()
if len(total_num) != len(thre_mat):
raise selfException("Mismatch!")
total_pnl = daily_pnl.sum()
total_ret = daily_ret.sum()
avg_pnl = zero_divide(total_pnl, total_num)
avg_ret = zero_divide(total_ret, total_num)
total_sharp = sharpe(daily_pnl)
total_drawdown = drawdown(daily_pnl)
total_max_drawdown = max_drawdown(daily_pnl)
sharpe_ret = sharpe(daily_ret)
drawdown_ret = drawdown(daily_ret)
max_drawdown_ret = max_drawdown(daily_ret)
final_result = pd.DataFrame(data=OrderedDict([("open", thre_mat["open"]), ("close", thre_mat["close"]), ("num", total_num),
("avg.pnl", avg_pnl), ("total.pnl", total_pnl), ("sharpe", total_sharp),
("drawdown", total_drawdown), ("max.drawdown", total_max_drawdown),
("avg.ret", avg_ret), ("total.ret",total_ret), ("sharpe.ret", sharpe_ret),
("drawdown.ret", drawdown_ret), ("max.drawdown.ret", max_drawdown_ret),
("mar", total_pnl/total_max_drawdown), ("mar.ret", total_ret/max_drawdown_ret)]),
index=thre_mat.index)
return OrderedDict([("final.result", final_result), ("daily.num", daily_num), ("daily.pnl", daily_pnl), ("daily.ret", daily_ret)])
def get_sample_signal(good_night_files, sample, product, signal_list, period, daily_num):
n_samples = sum(daily_num[sample])
n_days = sum(sample)
n_signal = len(signal_list)
all_signal = np.ndarray(shape=(int(n_samples),n_signal))
cur = 0
for file in good_night_files[sample]:
data = load(HEAD_PATH+"/night pkl tick/"+product+"/"+file)
chosen = (np.arange(sum(data["good"]))+1) % period==0
n_chosen = sum(chosen)
for i in range(n_signal):
signal_name = signal_list[i]
S = load(HEAD_PATH+"/tmp pkl/"+product+"/"+signal_name+"/"+file)
S = S[data["good"]]
signal = S[(np.arange(len(S))+1) % period == 0]
signal[np.isnan(signal)] = 0 ## the ret.cor has some bad records
signal[np.isinf(signal)] = 0 ## the ret.cor has some bad records
all_signal[cur:(cur+n_chosen),i] = signal
cur = cur+n_chosen
all_signal = pd.DataFrame(all_signal, columns=signal_list)
return all_signal
from collections import OrderedDict
def get_signal_pnl(file, product, signal_name, thre_mat, reverse=1, tranct=1.1e-4, max_spread=0.61, tranct_ratio=True,
HEAD_PATH="d:/intern", SIGNAL_PATH="d:/intern", atr_filter=0, rebate=0):
## load data
data = load(HEAD_PATH+"/pkl tick/"+product+"/"+file)
S = load(SIGNAL_PATH+"/tmp pkl/"+product+"/"+signal_name+"/"+file)
pred = S*reverse
data = data.reset_index(drop=True)
pred = pred[data["good"]]
atr = load(SIGNAL_PATH+"/tmp pkl/"+product+"/"+"atr.4096"+"/"+file).reset_index(drop=True)
atr = atr[data["good"]].reset_index(drop=True)
data = data[data["good"]].reset_index(drop=True)
#n_bar = len(data)
## load signal
## we don't know the signal is positive correlated or negative correlated
#n_thre = len(thre_mat)
result = pd.DataFrame(data=OrderedDict([("open", thre_mat["open"].values), ("close", thre_mat["close"].values),
("num", 0), ("avg.pnl", 0), ("pnl", 0), ("avg.ret", 0), ("ret", 0)]),
index=thre_mat.index)
count = 0;
cur_spread = data["ask"]-data["bid"]
for thre in thre_mat.iterrows():
count = count+1
buy = pred>thre[1]["open"]
sell = pred<-thre[1]["open"]
signal = pd.Series(data=0, index=data.index)
position = signal.copy()
signal[buy] = 1
signal[sell] = -1
signal[atr<atr_filter]=0
scratch = -thre[1]["close"]
position_pos = pd.Series(data=np.nan, index=data.index)
position_pos.iloc[0] = 0
position_pos[(signal==1) & (data["next.ask"]>0) & (data["next.bid"]>0) & (cur_spread<max_spread)] = 1
position_pos[(pred< -scratch) & (data["next.bid"]>0) & (cur_spread<max_spread)] = 0
position_pos.ffill(inplace=True)
pre_pos = position_pos.shift(1)
notional_position_pos = pd.Series(data=0, index=data.index)
notional_position_pos[position_pos==1] = 1
notional_position_pos[(position_pos==1) & (pre_pos==1)] = np.nan
notional_position_pos[(notional_position_pos==1)] = 1/data["next.ask"][(notional_position_pos==1)]
notional_position_pos.ffill(inplace=True)
position_neg = pd.Series(data=np.nan, index=data.index)
position_neg.iloc[0] = 0
position_neg[(signal==-1) & (data["next.ask"]>0) & (data["next.bid"]>0) & (cur_spread<max_spread)] = -1
position_neg[(pred> scratch) & (data["next.ask"]>0) & (cur_spread<max_spread)] = 0
position_neg.ffill(inplace=True)
pre_neg = position_neg.shift(1)
notional_position_neg = pd.Series(data=0, index=data.index)
notional_position_neg[position_neg==-1] = -1
notional_position_neg[(position_neg==-1) & (pre_neg==-1)] = np.nan
notional_position_neg[(notional_position_neg==-1)] = -1/data["next.bid"][(notional_position_neg==-1)]
notional_position_neg.ffill(inplace=True)
position = position_pos + position_neg
notional_position = notional_position_pos+notional_position_neg
#position[n_bar-1] = 0
position.iloc[0] = 0
position.iloc[-2:] = 0
notional_position.iloc[0] = 0
notional_position.iloc[-2:] = 0
change_pos = position - position.shift(1)
notional_change_pos = notional_position-notional_position.shift(1)
change_pos.iloc[0] = 0
notional_change_pos.iloc[0] = 0
change_base = pd.Series(data=0, index=data.index)
change_buy = change_pos>0
change_sell = change_pos<0
if (tranct_ratio):
change_base[change_buy] = data["next.ask"][change_buy]*(1+tranct)
change_base[change_sell] = data["next.bid"][change_sell]*(1-tranct)
else:
change_base[change_buy] = data["next.ask"][change_buy]+tranct
change_base[change_sell] = data["next.bid"][change_sell]-tranct
final_pnl = -sum(change_base*change_pos)
ret = -sum(change_base*notional_change_pos)
num = sum((position!=0) & (change_pos!=0))
if num == 0:
result.loc[thre[0], ("num", "avg.pnl", "pnl", "avg.ret", "ret")] = (0,0,0,0,0)
return result
else:
avg_pnl = np.divide(final_pnl, num)
avg_ret = np.divide(ret,num)
result.loc[thre[0], ("num", "avg.pnl", "pnl", "avg.ret", "ret")] = (num, avg_pnl, final_pnl, avg_ret,ret)
return result
def vanish_thre(x, thre):
x[np.abs(x)>thre] = 0
return x
def construct_composite_signal(dire_signal, range_signal, period_list, date_list, product_list, HEAD_PATH):
from collections import OrderedDict
class foctor_xx_period(factor_template):
factor_name = dire_signal+"."+range_signal+".period"
params = OrderedDict([
("period", period_list)
])
def formula(self, data, period):
return (data[dire_signal+"."+str(period)]*data[range_signal+"."+str(period)]).values
xx = foctor_xx_period()
for product in product_list:
create_signal_path(xx, product, HEAD_PATH)
file_list = [DATA_PATH+product+"/"+date for date in date_list]
parLapply(CORE_NUM, file_list, build_composite_signal,signal_list=xx, product=product, HEAD_PATH=HEAD_PATH)
import itertools
def create_signal_path(signal_list, product, HEAD_PATH):
keys = list(signal_list.params.keys())
for cartesian in itertools.product(*signal_list.params.values()):
signal_name = signal_list.factor_name
for i in range(len(cartesian)):
signal_name = signal_name.replace(keys[i], str(cartesian[i]))
os.makedirs(HEAD_PATH+"/tmp pkl/"+product+"/"+signal_name, exist_ok=True)
print(HEAD_PATH+"/tmp pkl/"+product+"/"+signal_name)
def get_signal_mat(signal_mat, signal_name, product, file_name, HEAD_PATH="d:/intern"):
S = load(HEAD_PATH+"/tmp pkl/"+product+"/"+signal_name+"/"+file_name)
S[np.isnan(S)] = 0
if signal_mat is None:
return S
else:
return np.vstack((signal_mat, S))
def get_daily_pred(file_name, product, signal_list, coef, strat, HEAD_PATH):
signal_mat = functools.reduce(functools.partial(get_signal_mat, product=product, file_name=file_name, HEAD_PATH=HEAD_PATH), signal_list, None)
if len(coef) > 1:
S = np.dot(signal_mat.T, coef)
else:
S = signal_mat * coef
save(S, HEAD_PATH+"/tmp pkl/"+product+"/"+strat+"/"+file_name)
def get_sample_signal(good_night_files, sample, product, signal_list, period, daily_num, HEAD_PATH):
n_samples = sum(daily_num[sample])
n_days = sum(sample)
n_signal = len(signal_list)
all_signal = np.ndarray(shape=(int(n_samples),n_signal))
cur = 0
for file in good_night_files[sample]:
good = load(HEAD_PATH+"/good pkl/"+product+"/"+file)
chosen = (np.arange(sum(good))+1) % period==0
n_chosen = sum(chosen)
for i in range(n_signal):
signal_name = signal_list[i]
S = load(HEAD_PATH+"/tmp pkl/"+product+"/"+signal_name+"/"+file)
S = S[good]
signal = S[(np.arange(len(S))+1) % period == 0]
signal[np.isnan(signal)] = 0 ## the ret.cor has some bad records
signal[np.isinf(signal)] = 0 ## the ret.cor has some bad records
all_signal[cur:(cur+n_chosen),i] = signal
cur = cur+n_chosen
all_signal = pd.DataFrame(all_signal, columns=signal_list)
return all_signal
def get_range_pos(wpr, min_period, max_period, period):
return ewma(zero_divide(wpr-min_period, max_period-min_period), period, adjust=True) - 0.5
import dask
from dask import compute, delayed
import matplotlib.pyplot as plt
import functools
from collections import OrderedDict
def get_signal_train_stat(signal_name, thre_mat, product, all_dates, CORE_NUM, split_str="2018", reverse=1, tranct=1.1e-4,
max_spread=0.61, tranct_ratio=True, min_pnl=2, min_num=20, atr_filter=0):
train_sample = all_dates<split_str
with dask.config.set(scheduler='processes', num_workers=CORE_NUM):
f_par = functools.partial(get_signal_pnl, product=product, signal_name=signal_name, thre_mat=thre_mat,
reverse=reverse, tranct=tranct, max_spread=max_spread, tranct_ratio=tranct_ratio, atr_filter=atr_filter)
train_result = compute([delayed(f_par)(file) for file | |
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 8 11:40:54 2020
@author: <NAME>
Sept 19, 2021: altered to work with new qray.py
wherein Qvector.xyz is a property
"""
class Polyhedron:
"""
Designed to be subclassed, not used directly
"""
def scale(self, scalefactor):
if hasattr(self, "volume"):
self.volume = scalefactor ** 3
newverts = {}
for v in self.vertexes:
newverts[v] = self.vertexes[v] * scalefactor
newme = type(self)()
newme.vertexes = newverts # substitutes new guts
newme.edges = newme._distill() # update edges to use new verts
return newme
__mul__ = __rmul__ = scale
def translate(self, vector):
newverts = {}
for v in self.vertexes:
newverts[v] = self.vertexes[v] + vector
newme = type(self)()
newme.vertexes = newverts # substitutes new tent stakes
if hasattr(self, "center"): # shift center before suppress!
newme.center = self.center + vector
if hasattr(self, "suppress"):
newme.suppress = self.suppress
if newme.suppress:
newme.faces = newme._suppress()
newme.edges = newme._distill() # update edges to use new verts
return newme
__add__ = __radd__ = translate
def _distill(self):
edges = []
unique = set()
for f in self.faces:
for pair in zip(f , f[1:] + (f[0],)):
unique.add( tuple(sorted(pair)) )
for edge in unique:
edges.append( Edge(self.vertexes[edge[0]],
self.vertexes[edge[1]]) )
return edges
class Edge:
"""
Edges are defined by two Vectors (above) and express as cylinder via draw().
"""
def __init__(self, v0, v1):
self.v0 = v0 # actual coordinates, not a letter label
self.v1 = v1
def __repr__(self):
return 'Edge from %s to %s' % (self.v0, self.v1)
def draw_vert(v, c, r, t):
v = v.xyz
x,y,z = v.x, v.y, v.z
data = "< %s, %s, %s >" % (x,y,z), r, c
template = ("sphere { %s, %s texture "
"{ pigment { color %s } } no_shadow }")
print(template % data, file=t)
def draw_face(f, c, t): pass
def draw_edge(e, c, r, t):
v = e.v0.xyz
v0 = "< %s, %s, %s >" % (v.x, v.y, v.z)
v = e.v1.xyz
v1 = "< %s, %s, %s >" % (v.x, v.y, v.z)
data = (v0, v1, r, c)
template = ("cylinder { %s, %s, %s texture "
"{pigment { color %s } } no_shadow }")
print(template % data, file=t)
def draw_poly(p, the_file, v=True, f=False, e=True):
ec = p.edge_color
er = p.edge_radius
vc = p.vert_color
vr = p.vert_radius
fc = p.face_color
if v:
for v in p.vertexes.values():
draw_vert(v, vc, vr, the_file)
if f:
for f in p.faces:
draw_face(f, fc, the_file)
if e:
for e in p.edges:
draw_edge(e, ec, er, the_file)
import math
from qrays import Qvector, Vector
PHI = (1 + math.sqrt(5))/2.0
ORIGIN = Qvector((0,0,0,0))
A = Qvector((1,0,0,0))
B = Qvector((0,1,0,0))
C = Qvector((0,0,1,0))
D = Qvector((0,0,0,1))
E,F,G,H = B+C+D, A+C+D, A+B+D, A+B+C
I,J,K,L,M,N = A+B, A+C, A+D, B+C, B+D, C+D
O,P,Q,R,S,T = I+J, I+K, I+L, I+M, N+J, N+K
U,V,W,X,Y,Z = N+L, N+M, J+L, L+M, M+K, K+J
# OPPOSITE DIAGONALS
# ZY WX
# RV OS
# TU PQ
control = (Z - T).length()
midface = (Z + Y)
gold = 0.5 * PHI * midface/midface.length()
Zi = gold + J/J.length() * control/2
Yi = gold + M/M.length() * control/2
midface = (W + X)
gold = 0.5 * PHI * midface/midface.length()
Wi = gold + J/J.length() * control/2
Xi = gold + M/M.length() * control/2
midface = (R + V)
gold = 0.5 * PHI * midface/midface.length()
Ri = gold + I/I.length() * control/2
Vi = gold + N/N.length() * control/2
midface = (O + S)
gold = 0.5 * PHI * midface/midface.length()
Oi = gold + I/I.length() * control/2
Si = gold + N/N.length() * control/2
midface = (T + U)
gold = 0.5 * PHI * midface/midface.length()
Ti = gold + K/K.length() * control/2
Ui = gold + L/L.length() * control/2
midface = (P + Q)
gold = 0.5 * PHI * midface/midface.length()
Pi = gold + K/K.length() * control/2
Qi = gold + L/L.length() * control/2
class Tetrahedron(Polyhedron):
"""
Tetrahedron
"""
def __init__(self):
# POV-Ray
self.edge_color = "rgb <{}, {}, {}>".format(1, 165/255, 0) # orange
self.edge_radius= 0.03
self.vert_color = "rgb <{}, {}, {}>".format(1, 165/255, 0) # orange
self.vert_radius= 0.03
self.face_color = "rgb <0, 0, 0>" # not used
verts = dict(a = Qvector((1,0,0,0)), #A
b = Qvector((0,1,0,0)), #B
c = Qvector((0,0,1,0)), #C
d = Qvector((0,0,0,1))) #D
self.name = "Tetrahedron"
self.volume = 1 # per Concentric Hierarchy
self.center = ORIGIN
# 4 vertices
self.vertexes = verts
# 4 faces
self.faces = (('a','b','c'),('a','c','d'),
('a','d','b'),('b','d','c'))
self.edges = self._distill()
class InvTetrahedron(Polyhedron):
"""
Inverse Tetrahedron
"""
def __init__(self):
# POV-Ray
self.edge_color = "rgb <{}, {}, {}>".format(0, 0, 0) # black
self.edge_radius= 0.03
self.vert_color = "rgb <{}, {}, {}>".format(0, 0, 0) # black
self.vert_radius= 0.03
self.face_color = "rgb <0, 0, 0>" # not used
verts = dict(e = -Qvector((1,0,0,0)), #E
f = -Qvector((0,1,0,0)), #F
g = -Qvector((0,0,1,0)), #G
h = -Qvector((0,0,0,1))) #H
self.name = "InvTetrahedron"
self.volume = 1 # per Concentric Hierarchy
self.center = ORIGIN
# 4 vertices
self.vertexes = verts
# 4 faces
self.faces = (('e','f','g'),('e','g','h'),
('e','h','f'),('f','h','g'))
self.edges = self._distill()
class Cube (Polyhedron):
"""
Cube
"""
def __init__(self):
# POV-Ray
self.edge_color = "rgb <0, 1, 0>"
self.edge_radius= 0.03
self.vert_color = "rgb <0, 1, 0>"
self.vert_radius= 0.03
self.face_color = "rgb <0, 0, 0>"
verts = {}
for vert_label in "abcdefgh":
# keep the uppercase A-Z universe (namespace) unobstructed
verts[vert_label] = eval(vert_label.upper())
self.name = "Cube"
self.volume = 3 # per Concentric Hierarchy
self.center = ORIGIN
# 8 vertices
self.vertexes = verts
# 6 faces
self.faces = (('a','f','c','h'),('h','c','e','b'),
('b','e','d','g'),('g','d','f','a'),
('c','f','d','e'),('a','h','b','g'))
self.edges = self._distill()
class Cuboid (Polyhedron):
"""
Cuboid with height, width, depth = sqrt(1), sqrt(2), sqrt(4)
"""
def __init__(self):
# POV-Ray
self.edge_color = "rgb <255/255, 20/255, 147/255>"
self.edge_radius= 0.03
self.vert_color = "rgb <255/255, 20/255, 147/255>"
self.vert_radius= 0.03
self.face_color = "rgb <0, 0, 0>"
verts = {}
verts['A'] = Vector(( 1, 0.5, math.sqrt(2)/2))
verts['B'] = Vector(( 1, -0.5, math.sqrt(2)/2))
verts['C'] = Vector(( 1, -0.5, -math.sqrt(2)/2))
verts['D'] = Vector(( 1, 0.5, -math.sqrt(2)/2))
verts['E'] = Vector((-1, 0.5, math.sqrt(2)/2))
verts['F'] = Vector((-1, -0.5, math.sqrt(2)/2))
verts['G'] = Vector((-1, -0.5, -math.sqrt(2)/2))
verts['H'] = Vector((-1, 0.5, -math.sqrt(2)/2))
self.name = "Cuboid"
self.volume = 8 # per Concentric Hierarchy
self.center = ORIGIN
# 8 vertices
self.vertexes = verts
# 6 faces
self.faces = (('A','B','C','D'),('E','F','G','H'),
('A','E','F','B'),('D','H','G','C'),
('A','E','H','D'),('B','F','G','C'))
self.edges = self._distill()
class Octahedron (Polyhedron):
"""
Octahedron
"""
def __init__(self):
# POV-Ray
self.edge_color = "rgb <1, 0, 0>"
self.edge_radius= 0.03
self.vert_color = "rgb <1, 0, 0>"
self.vert_radius= 0.03
self.face_color = "rgb <0, 0, 0>"
verts = {}
for vert_label in "ijklmn":
# keep the uppercase A-Z universe unobstructed
verts[vert_label] = eval(vert_label.upper())
self.name = "Octahedron"
self.volume = 4 # per Concentric Hierarchy
self.center = ORIGIN
# 6 vertices
self.vertexes = verts
# 8 faces
self.faces = (('j','k','i'),('j','i','l'),('j','l','n'),('j','n','k'),
('m','k','i'),('m','i','l'),('m','l','n'),('m','n','k'))
self.edges = self._distill()
class RD (Polyhedron):
"""
Rhombic Dodecahedron
"""
def __init__(self):
self.edge_color = "rgb <0, 0, 1>"
self.edge_radius= 0.03
self.vert_color = "rgb <0, 0, 1>"
self.vert_radius= 0.03
self.face_color = "rgb <0, 0, 0>"
verts = {}
for vert_label in "abcdefghijklmn":
# keep the uppercase A-Z universe unobstructed
verts[vert_label] = eval(vert_label.upper())
self.name = "RD"
self.volume = 6 # per Concentric Hierarchy
self.center = ORIGIN
# 14 vertices
self.vertexes = verts
# 12 faces
# I,J,K,L,M,N = A+B, A+C, A+D, B+C, B+D, C+D
self.faces = (('j','f','k','a'),('j','f','n','c'),('j','c','l','h'),('j','h','i','a'),
('m','d','k','g'),('m','d','n','e'),('m','e','l','b'),('m','b','i','g'),
('k','d','n','f'),('n','c','l','e'),('l','h','i','b'),('i','a','k','g'))
self.edges = self._distill()
class Icosahedron (Polyhedron):
def __init__(self):
# 8 vertices
self.edge_color = "rgb <0, 1, 1>"
self.edge_radius= 0.03
self.vert_color = "rgb <0, 1, 1>"
self.vert_radius= 0.03
self.face_color = "rgb <0, 0, 0>"
self.vertexes = dict(o = Oi,
p = Pi,
q = Qi,
r = Ri,
s = Si,
t = Ti,
u = Ui,
v = Vi,
w = Wi,
x = Xi,
y = Yi,
z = Zi)
self.name = "Icosahedron"
self.volume = 18.51
self.center = ORIGIN
# 20 faces
# OPPOSITE DIAGONALS of cubocta
# ZY WX
# RV OS
# TU PQ
self.faces = (('o','w','s'),('o','z','s'),
('z','p','y'),('z','t','y'),
('t','v','u'),('t','s','u'),
('w','q','x'),('w','u','x'),
('p','o','q'),('p','r','q'),
('r','y','v'),('r','x','v'),
('z','s','t'),('t','y','v'),
('y','p','r'),('r','q','x'),
('x','u','v'),('u','s','w'),
('w','q','o'),('o','z','p'))
self.edges = self._distill()
class Cuboctahedron (Polyhedron):
def __init__(self):
# 8 vertices
self.edge_color = "rgb <1, 1, 0>"
self.edge_radius= 0.03
self.vert_color = "rgb <1, | |
#!/usr/bin/env python
from numpy import *
from numpy import f2py # not part of import *
from scitools.StringFunction import StringFunction
import time, sys, os
# make sys.path so we can find Grid2D.py:
sys.path.insert(0, os.path.join(os.environ['scripting'],
'src','py','examples'))
from Grid2D import Grid2D
try:
import ext_gridloop
except ImportError:
print 'You must first build the ext_gridloop module'
sys.exit(1)
class Grid2Deff(Grid2D):
def ext_gridloop1(self, f):
"""Compute a[i,j] = f(xi,yj) in an external routine."""
# a is made here, sent to the routine, and then returned
a = zeros((self.xcoor.size, self.ycoor.size))
# C/C++ or Fortran module?
if ext_gridloop.__doc__ is not None:
if 'f2py' in ext_gridloop.__doc__:
# Fortran extension module
a = asarray(a, order='Fortran')
# could store a as self.a to avoid making Fortran
# arrays in repeated calls
ext_gridloop.gridloop1(a, self.xcoor, self.ycoor, f)
return a
def ext_gridloop2(self, f):
"""Compute a[i,j] = f(xi,yj) in an external routine."""
# a is made in the external routine
a = ext_gridloop.gridloop2(self.xcoor, self.ycoor, f)
return a
def ext_gridloop_exceptions(self, f):
"""Test error handling in the extension module."""
try: #1
ext_gridloop.gridloop1((1,2), self.xcoor, self.ycoor[1:], f)
except:
print sys.exc_type, sys.exc_value
try: #2
ext_gridloop.gridloop1(self.xcoor, self.xcoor, self.ycoor[1:], f)
except:
print sys.exc_type, sys.exc_value
try: #3
ext_gridloop.gridloop2(self.xcoor, self.ycoor, 'abc')
except:
print sys.exc_type, sys.exc_value
try: #4
ext_gridloop.gridloop2(array(self.xcoor,Complex64),
self.ycoor, 'abc')
except:
print sys.exc_type, sys.exc_value
try: #5
ext_gridloop.gridloop2(array([[0,0],[1,2]]), self.ycoor, 'abc')
except:
print sys.exc_type, sys.exc_value
# NOTE: the three next functions are only available in the
# Fortran 77 extension module:
def ext_gridloop_vec1(self, f):
"""As ext_gridloop2, but vectorized callback."""
a = zeros((self.xcoor.size, self.ycoor.size))
a = ext_gridloop.gridloop_vec1(a, self.xcoor, self.ycoor, f)
return a
def ext_gridloop_vec2(self, f):
"""As ext_gridloop_vec1, but callback to func. w/grid arg."""
a = zeros((self.xcoor.size, self.ycoor.size))
a = ext_gridloop.gridloop_vec2(a, f, func1_extra_args=(self,))
return a
def myfuncf3(self, a):
a[:,:] = myfunc(self.xcoorv, self.ycoorv) # in-place mod.
def ext_gridloop_vec3(self, f):
"""As ext_gridloop_vec2, but callback to class method."""
a = zeros((self.xcoor.size, self.ycoor.size))
a = ext_gridloop.gridloop_vec2(a, f)
return a
def ext_gridloop2_str(self, f77_name):
"""
Call an interface to ext_gridloop.gridloop2, which avoids
callbacks to Python and calls the f77_name F77 function
instead.
"""
a = ext_gridloop.gridloop2_str(self.xcoor, self.ycoor,
f77_name)
return a
def ext_gridloop_noalloc(self, f77_name, a):
"""
As ext_gridloop2_str, but a is intent(in,out), i.e., there is
no allocation of a in the wrapper code. If the function
is called a large number of times (as in our efficiency
tests), intent(in,out) increases the performance.
"""
a = ext_gridloop.gridloop_noalloc(a, self.xcoor, self.ycoor,
f77_name)
return a
def ext_gridloop2_fcb(self):
"""As ext_gridloop2, but compiled Fortran callback func."""
import callback
a = callback.gridloop2_fcb(self.xcoor, self.ycoor)
return a
def ext_gridloop2_fcb_compile(self, fstr):
if not isinstance(fstr, str):
raise TypeError, \
'fstr must be string expression, not %s', type(fstr)
# generate Fortran source
source = """
real*8 function fcb(x, y)
real*8 x, y
fcb = %s
return
end
subroutine gridloop2_fcb(a, xcoor, ycoor, nx, ny)
integer nx, ny
real*8 a(nx,ny), xcoor(nx), ycoor(ny)
Cf2py intent(out) a
Cf2py intent(in) xcoor
Cf2py intent(in) ycoor
Cf2py depend(nx,ny) a
real*8 fcb
external fcb
call gridloop2(a, xcoor, ycoor, nx, ny, fcb)
return
end
""" % fstr
# compile callback code and link with ext_gridloop.so:
f2py_args = "--fcompiler=Gnu --build-dir=tmp2"\
" -DF2PY_REPORT_ON_ARRAY_COPY=1 "\
" ./ext_gridloop.so"
r = f2py.compile(source, modulename='callback',
extra_args=f2py_args, verbose=True,
source_fn='_cb.f')
if r:
print 'unsuccessful compilation'; sys.exit(1)
import callback # see if we can import successfully
def ext_gridloop2_fcb_ptr(self):
"""As ext_gridloop2, but compiled Fortran callback func."""
from callback import fcb
a = ext_gridloop.gridloop2(self.xcoor, self.ycoor,
fcb._cpointer)
return a
def ext_gridloop2_fcb_ptr_compile(self, fstr):
if not isinstance(fstr, StringFunction):
raise TypeError, \
'fstr must be StringFunction, not %s', type(fstr)
source = fstr.F77_code('fcb')
f2py_args = "--fcompiler=Gnu --build-dir=tmp2"
r = f2py.compile(source, modulename='callback',
extra_args=f2py_args, verbose=True,
source_fn='_cb.f')
if r:
print 'unsuccessful compilation'; sys.exit(1)
import callback # see if we can import successfully
def ext_gridloop2_compile(self, fstr):
if not isinstance(fstr, str):
raise TypeError, \
'fstr must be string expression, not', type(fstr)
# generate Fortran source for gridloop2:
source = """
subroutine gridloop2(a, xcoor, ycoor, nx, ny)
integer nx, ny
real*8 a(nx,ny), xcoor(nx), ycoor(ny)
Cf2py intent(out) a
Cf2py depend(nx,ny) a
integer i,j
real*8 x, y
do j = 1,ny
y = ycoor(j)
do i = 1,nx
x = xcoor(i)
a(i,j) = %s
end do
end do
return
end
""" % fstr
f2py_args = "--fcompiler=Gnu --build-dir tmp1"\
" -DF2PY_REPORT_ON_ARRAY_COPY=1"
r = f2py.compile(source, modulename='ext_gridloop2',
extra_args=f2py_args, verbose=True,
source_fn='_cb.f')
if r:
print 'unsuccessful compilation'; sys.exit(1)
import ext_gridloop2 # see if we can import successfully
def ext_gridloop2_v2(self):
"""
As ext_gridloop2, but the Fortran gridloop2 function was
generated and compiled in Python (in ext_gridloop2_compile).
"""
import ext_gridloop2
return ext_gridloop2.gridloop2(self.xcoor, self.ycoor)
def ext_gridloop2_weave(self, fstr):
"""Migrate loop to C++ with aid of Weave."""
try:
from scipy import weave
except ImportError:
print 'Could not import weave.\nContinue...'
return
if not isinstance(fstr, str):
raise TypeError, \
'fstr must be string expression, not', type(fstr)
# the callback function is now coded in C++
# (fstr must be valid C++ code):
extra_code = r"""
double cppcb(double x, double y) {
return %s;
}
""" % fstr
# the loop in C++ (with Blitz++ array syntax):
code = r"""
int i,j;
for (i=0; i<nx; i++) {
for (j=0; j<ny; j++) {
a(i,j) = cppcb(xcoor(i), ycoor(j));
}
}
"""
nx = self.nx; ny = self.ny
a = zeros((nx, ny))
xcoor = self.xcoor; ycoor = self.ycoor
err = weave.inline(code, ['a', 'nx', 'ny', 'xcoor', 'ycoor'],
type_converters=weave.converters.blitz,
support_code=extra_code, compiler='gcc')
# a is filled
return a
def ext_gridloop1_instant(self, fstr):
if not isinstance(fstr, str):
raise TypeError, \
'fstr must be string expression, not', type(fstr)
# generate C source for gridloop1:
# (no call to C function f(x,y), fstr is inserted in the loop)
source = """
void gridloop1(double *a, int nx, int ny,
double *xcoor, double *ycoor)
{
# define index(a, i, j) a[i*ny + j]
int i, j; double x, y;
for (i=0; i<nx; i++) {
for (j=0; j<ny; j++) {
x = xcoor[i]; y = ycoor[i];
index(a, i, j) = %s
}
}
}
""" % fstr
try:
from instant import inline_with_numpy
a = zeros((self.nx, self.ny))
arrays = [['nx', 'ny', 'a'],
['nx', 'xcoor'],
['ny', 'ycoor']]
self.gridloop1_instant = \
inline_with_numpy(source, arrays=arrays)
except:
self.gridloop1_instant = None
def dump(self, a):
"""Nice printout of a 2D array a."""
for i in xrange(a.shape[0]):
for j in xrange(a.shape[1]):
print 'value at (%g,%g) \t = a[%d,%d] = %g' % \
(self.xcoor[i], self.ycoor[j], i, j, a[i,j])
def gridloop_psyco_init(self, method):
"""Try to accelerate Grid2D.gridloop with psyco."""
# define method self.gridloop_psyco:
try:
import psyco
self.gridloop_psyco = psyco.proxy(method)
except ImportError:
self.gridloop_psyco = method
def f1(x,y):
print 'x+2*y =',x+2*y
return x+2*y
def verify1():
"""Basic test of the extension module."""
g = Grid2Deff(dx=0.5, dy=1)
f_exact = g(f1) # NumPy computation
expression1 = StringFunction('x + 2*y',
independent_variables=('x','y'),
globals=globals())
f = g.ext_gridloop1(f1)
print 'f computed by external gridloop1 function and f1:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
f = g.ext_gridloop2(f1)
print 'f computed by external gridloop2 function and f1:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
f = g.ext_gridloop1(expression1)
print 'f computed by external gridloop1 function and StringFunction:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
f = g.ext_gridloop2(expression1)
print 'f computed by external gridloop2 function and StringFunction:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
fast_func = expression1.__call__
f = g.ext_gridloop2(fast_func)
print 'f computed by external gridloop2 function and StringFunction.__call__:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
f = g(expression1)
print 'f computed by __call__ and StringFunction:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
# check printing:
print 'array seen from Python:'
g.dump(f)
if 'dump' in dir(ext_gridloop):
print 'array seen from Fortran (transposed, but right values):'
ext_gridloop.dump(f, g.xcoor, g.ycoor)
def myfunc(x, y):
return sin(x*y) + 8*x
def myfuncf1(a, xcoor, ycoor, nx, ny):
"""Vectorized function to be called from extension module."""
#print 'myfuncf1; type of args:',type(a),type(xcoor),type(nx)
x = xcoor[:,newaxis]
y = ycoor[newaxis,:]
a[:,:] = myfunc(x, y) # in-place modification of a
print 'myfuncf1, a=',a
def myfuncf2(a, g):
"""Vectorized function to be called from extension module."""
#print 'myfuncf2; type of args:',type(a),type(g)
a[:,:] = myfunc(g.xcoorv, g.ycoorv) # in-place modification of a
def verify2(n=3):
"""
Test of some methods in class Grid2Deff that call up
some F77 routines for improving the efficiency of callbacks
to Python.
"""
if not 'gridloop_vec2' in dir(ext_gridloop):
raise ImportError, 'verify2 | |
CSV precinct-index file.
"""
DISTRICT_HEADERS = ("Assembly", "BART", "Congressional", "Senatorial", "Supervisorial")
name = "Precinct Index File"
def __init__(self):
self.areas_info = AreasInfo()
def get_parse_return_value(self):
return self.areas_info
def parse_first_line(self, line):
# Skip the header line.
pass
def add_precinct_to_area(self, area_type, area_id, precinct_id):
try:
precinct_ids = area_type[area_id]
except KeyError:
precinct_ids = set()
area_type[area_id] = precinct_ids
precinct_ids.add(precinct_id)
def parse_line(self, line):
# Here are the column headers of the file:
# VotingPrecinctID,VotingPrecinctName,MailBallotPrecinct,BalType,
# Assembly,BART,Congressional,Neighborhood,Senatorial,Supervisorial
precinct_id, values = parse_precinct_index_line(line)
precinct_name = values[1]
if precinct_id in self.areas_info.precincts:
text = self.log_line("precinct_id %d occurred again in line" % precinct_id)
_log.warning(text)
return
self.areas_info.precincts[precinct_id] = precinct_name
# Includes: Assembly,BART,Congressional,Neighborhood,Senatorial,Supervisorial
values = values[4:]
nbhd_label = values.pop(3)
for district_type_name, area_id in zip(self.DISTRICT_HEADERS, values):
area_type = self.areas_info.get_area_type(district_type_name)
self.add_precinct_to_area(area_type, int(area_id), precinct_id)
area_type = self.areas_info.neighborhoods
self.add_precinct_to_area(area_type, nbhd_label, precinct_id)
self.areas_info.city.add(precinct_id)
class ElectionMetaParser(Parser):
"""
Parser for a TEXT report from the WinEDS Reporting Tool.
This class is responsible for reading and building election metadata
from the file but not election vote totals.
When parsing, the parser also performs validation on the file to ensure
that our assumptions about the file format and data are correct.
"""
name = "Results File (pass #1, for election metadata)"
def __init__(self, info):
"""
Arguments:
info: an ElectionMeta object.
"""
self.election_info = info
# The following values are for convenience.
self.contests = info.contests
self.choices = info.choices
self.parties = info.parties
self.precincts = info.precincts
self.raw_contests = info.raw_contests
def make_choice(self, contest_id, choice_name):
"""Create the "choice" object for a given choice name, etc."""
if choice_name in ('Under Vote', 'Over Vote'):
contest_id = None
return contest_id, choice_name
def save_choice(self, choice_id, contest_id, choice_name):
choice = self.make_choice(contest_id, choice_name)
if choice[0] is None:
if choice_name == "Under Vote":
self.election_info.undervote_id = choice_id
elif choice_name == "Over Vote":
self.election_info.overvote_id = choice_id
else:
raise AssertionError("unexpected choice name for contest id None: %r" % choice_name)
_log.info("setting id=%r for: %r" % (choice_id, choice_name))
self.choices[choice_id] = choice
def parse_first_line(self, line):
char_count = len(line.rstrip('\r\n'))
try:
assert char_count in (175, 205)
except AssertionError:
raise Exception("unexpected number of characters in first line: %d" % char_count)
has_reporting_type = (char_count == 205)
_log.info("detected file format: has_reporting_type=%r" % has_reporting_type)
self.election_info.has_reporting_type = has_reporting_type
super().parse_first_line(line)
def process_non_district_line(self, contest_number, contest_name, choice_id, choice_name, party_code):
"""
Process a line that does not have a district name.
These lines are summary lines that do not correspond to a contest.
For example--
0001001110100484 REGISTERED VOTERS - TOTAL VOTERS Pct 1101
0001001110100000NON REGISTERED VOTERS - No Party Preference VOTERS Pct 1101
0002001110100141 BALLOTS CAST - TOTAL BALLOTS CAST Pct 1101
"""
# We validate our expectations about the line and skip storing any
# contest or choices, but we do store party/registration type.
assert contest_number in (1, 2)
total_type, group_name = contest_name.split(" - ")
if contest_number == 1:
expected_total_type = "REGISTERED VOTERS"
expected_choice_name = "VOTERS"
else:
# Then contest_id is 2.
expected_total_type = "BALLOTS CAST"
expected_choice_name = "BALLOTS CAST"
assert_equal(total_type, expected_total_type, desc="total_type")
assert_equal(choice_name, expected_choice_name, desc="choice_name")
if group_name == "TOTAL":
assert_equal(party_code, "", desc="party_code")
assert_equal(choice_id, 1, desc="choice_id")
return
party = Party(id=choice_id, code=party_code, name=group_name)
added = utils.add_to_dict(self.parties, choice_id, party)
if added:
msg = "added party: {0}".format(party)
_log.info(msg)
assert choice_name == expected_choice_name
def parse_line(self, line):
"""
This function parses a single line, validates our assumptions
about the file format, and then stores any new election metadata
in the ElectionMeta object associated with the current
parser instance.
This function populates election_info.choices but does not
populate contest.choice_ids for the objects in election_info.contests.
We populate contest.choice_ids from election_info.choices
afterwards.
"""
fields = split_line_fixed(line)
data, contest_name, choice_name, precinct_name, district_name, reporting_type = fields
# Validate our assumptions about the initial data chunk.
#
# If the party is present in the "data" string, then the length
# will be longer than 16.
assert len(data) >= 16
assert data[0] == '0'
choice_id, contest_number, precinct_id, vote_total, party_code = parse_data_chunk(data)
# We don't need to know the vote_total here.
del vote_total
# Store the precinct if it is new.
utils.add_to_dict(self.precincts, precinct_id, precinct_name, desc="precincts")
if not district_name:
self.process_non_district_line(contest_number, contest_name, choice_id,
choice_name, party_code=party_code)
return
# Otherwise, the line corresponds to a real contest.
contest_id = make_contest_id(contest_number, contest_name)
value = "{0} {1} {2}".format(contest_number, contest_name, party_code)
utils.add_to_set(self.raw_contests, value, "raw_contests")
contests = self.contests
try:
contest = contests[contest_id]
except KeyError:
logging.debug("adding contest_id: %r" % (contest_id, ))
contest = ContestInfo(contest_name, number=contest_number, district_name=district_name,
party_code=party_code)
contests[contest_id] = contest
else:
try:
assert contest.name == contest_name
except AssertionError:
raise Exception("contest_id=%r, contest_name=%r, contest.name=%s" %
(contest_id, contest_name, contest.name))
try:
assert contest.district_name == district_name
except AssertionError:
raise Exception("district_name=%r, contest.district_name=%s" %
(district_name, contest.district_name))
# The following line is a no-op for contest choices after the
# first in a precinct.
contest.precinct_ids.add(precinct_id)
try:
prior_choice = self.choices[choice_id]
except KeyError:
logging.debug("adding choice_id %r: %s" % (choice_id, choice_name))
self.save_choice(choice_id, contest_id, choice_name)
else:
new_choice = self.make_choice(contest_id, choice_name)
try:
assert new_choice == prior_choice
except AssertionError:
raise Exception("choice id %d (name=%r) for contest id %d already assigned to: "
"contest_id=%r, choice_name=%r" %
(choice_id, choice_name, contest_id, prior_choice[0], prior_choice[1]))
class ResultsParser(Parser):
"""
When parsing, this class does not validate the file in the same way
that the ElectionMetaParser does.
"""
name = "Results File (pass #2, for vote totals)"
def __init__(self, results):
"""
Arguments:
results: an ElectionResults object.
"""
self.contests = results.contests
self.registered = results.registered
self.voted = results.voted
def add_vote_total(self, totals, key, vote_total):
"""
Arguments:
totals: a dict mapping keys to integer vote totals.
"""
try:
assert isinstance(totals, dict)
except AssertionError:
raise Exception("totals has unexpected type: %r" % totals)
try:
totals[key]
except KeyError:
totals[key] = vote_total
else:
raise Exception("total for key=%d was already stored" % (key, ))
def parse_line(self, line):
fields = split_line_fixed(line)
data_field = fields.data_field
reporting_type = fields.reporting_type
r_index = get_reporting_index(reporting_type)
choice_id, contest_number, precinct_id, vote_total, party_code = parse_data_chunk(data_field)
contest_id = contest_number, fields.contest_name
if vote_total < 0:
text = self.log_line("negative ballot total %d: choice_id=%d, "
"contest_number=%d, precinct_id=%d" %
(vote_total, choice_id, contest_number, precinct_id))
_log.warning(text)
if contest_number in (1, 2) and party_code:
# For now we don't record totals broken down by party.
# TODO: change this?
return
if contest_number == 1:
totals = self.registered
totals_key = precinct_id
elif contest_number == 2:
totals = self.voted[precinct_id]
totals_key = r_index
else:
# Otherwise, we have a normal contest with candidates.
contest_totals = self.contests[contest_id]
try:
precinct_totals = contest_totals[precinct_id]
except KeyError:
msg = contest_id
raise Exception(repr(contest_totals))
totals = precinct_totals[r_index]
totals_key = choice_id
self.add_vote_total(totals, totals_key, vote_total)
def parse_export_file(path):
"""
Parse a WinEDS export file, and return an ElectionMeta object.
"""
election_info = ElectionMeta()
parser = ElectionMetaParser(election_info)
parser.parse_path(path)
choices = election_info.choices
contest_map = election_info.contests
# Prefix all contests whose name occurs more than once with its party.
contests = list(contest_map.values())
raw_name_counts = {}
for contest in contests:
raw_name = contest.raw_name
try:
raw_name_counts[raw_name] += 1
except KeyError:
raw_name_counts[raw_name] = 1
for contest in contests:
raw_name = contest.raw_name
if raw_name_counts[raw_name] > 1:
contest.name = "{0} - {1}".format(contest.party_code, raw_name)
# Add the available choices to each contest.
for item in choices.items():
choice_id, (contest_id, choice_name) = item
if contest_id is None:
# Then the choice is an undervote or overvote and applies
# to all contests.
contests = contest_map.values()
else:
contests = (contest_map[contest_id], )
for contest in contests:
contest.choice_ids.add(choice_id)
return election_info
def parse_export_file_with_check(areas_info, wineds_path):
election_info = parse_export_file(wineds_path)
# Check that the precincts in the precinct index file match the
# precincts in the results file.
for i, (precinct_id, wineds_precinct_id) in enumerate(zip(sorted(areas_info.city),
sorted(election_info.precincts.keys())), start=1):
try:
assert precinct_id == wineds_precinct_id
except AssertionError:
if precinct_id < wineds_precinct_id:
msg = "WinEDS file does not contain precinct id %d" % precinct_id
else:
msg = "WinEDS file contains unknown precinct id %d" % precinct_id
msg += ": %s" % wineds_path
raise Exception(msg)
return election_info
def digest_input_files(precinct_index_path, wineds_path):
"""
Read the input files and return a 3-tuple of objects of the following
classes: ElectionMeta, AreasInfo, ElectionResults.
"""
areas_info = parse_precinct_file(precinct_index_path)
# We parse the file in two passes to simplify the logic and make the
# code easier to understand.
#
# In the first pass, we read all the election "metadata" (contests,
# choices, precincts, etc) and validate the integer ID's, etc, to
# make sure that all of our assumptions | |
<filename>ex05/my_RBM_tf2.py
import tensorflow as tf
import numpy as np
import datetime
import math
import sys
from tqdm import tqdm
import random
import matplotlib.pyplot as plt
import deepdish as dd
'''
class monitoring():
def reconstruction_cross_e():
def ava_sq_error():
def pseudo_log_l():
'''
class RBM():
def __init__(self, visible_dim, hidden_dim, number_of_epochs, picture_shape, batch_size, optimizer='cd',n_test_samples=100, init_learning_rate = 0.8):
self._n_epoch = number_of_epochs
self._v_dim = visible_dim
self._h_dim = hidden_dim
self._l_r = init_learning_rate
self._batch_size = batch_size
self._picture_shape = picture_shape
self.n_test_samples = n_test_samples
self.optimizer = optimizer
self._current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
self._log_dir = 'logs/scalars/' + self._current_time + '/train'
self._file_writer = tf.summary.create_file_writer(self._log_dir)
self._file_writer.set_as_default()
#self.model = self.model
self.visible_biases = tf.Variable(tf.random.uniform([1, visible_dim], 0, 0.1), tf.float32, name="visible_biases")
self.hidden_biases = tf.Variable(tf.random.uniform([1, hidden_dim], 0, 0.1), tf.float32, name="hidden_biases")
self.weights = tf.Variable(tf.random.normal([hidden_dim, visible_dim], mean = 0.0, stddev = 0.1), tf.float32, name="weights")
'''
@tf.function
def model(self):
self.visible_biases = tf.Variable(tf.random.uniform([1, self._v_dim], 0, 1), tf.float32, name="visible_biases")
self.hidden_biases = tf.Variable(tf.random.uniform([1, self._h_dim], 0, 1), tf.float32, name="hidden_biases")
self.weights = tf.Variable(tf.random.normal([self._h_dim,self._v_dim], mean = 0.0, stddev = 0.01),tf.float32, name="weights")
#self.learning_rate = tf.Variable(tf.fill([self._v_dim, self._h_dim], learning_rate), name="learning_rate")
'''
def save_model(self):
"""
Save the current RBM model as .h5 file dictionary with keys: {'weights', 'visible biases', 'hidden_biases' }
"""
model_dict = {'weights': np.asarray(self.weights), 'visible biases': np.asarray(self.visible_biases), 'hidden biases': np.asarray(self.hidden_biases)}
return dd.io.save('results/models/'+self._current_time+'model.h5', model_dict)
def from_saved_model(self,model_path):
"""
:param model_path: string
path of .h5 file containing dictionary of the model with keys: {'weights', 'visible biases', 'hidden biases' }
:return: loaded model
"""
model_dict = dd.io.load(model_path)
self.weights = model_dict['weights']
self.visible_biases = model_dict['visible biases']
self.hidden_biases = model_dict['hidden biases']
return self
#<EMAIL>
def calculate_state(self, probability):
"""
Given the probability(x'=1|W,b) = sigmoid(Wx+b) computes the next state by sampling from the relative binomial distribution.
x and x' can be the visible and hidden layers respectively or viceversa.
:param probability: array, shape(visible_dim) or shape(hidden_dim)
:return: array , shape(visible_dim) or shape(hidden_dim)
0,1 state of each unit in the layer
"""
binom = tf.concat([1-probability,probability],0)
prob_re = tf.reshape(binom,(2,probability.get_shape()[1]))
#print("check summation probability:", tf.reduce_sum(prob_re,0)) # check prob summation to 1
return tf.reshape(tf.cast(tf.random.categorical(tf.math.log(tf.transpose(prob_re)),1), tf.float32), (1,probability.get_shape()[1]))
#@tf.<EMAIL>
def sample(self, inpt = [] ,n_step_MC=1,p_0=0.5,p_1=0.5):
"""
Sample from the RBM with n_step_MC steps markov chain.
:param inpt: array shape(visible_dim)
It is possible to start the markov chain from a given point from the dataset or from a random state
:param n_step_MC: scalar
number of markov chain steps to sample.
:return: visible_states_1: array shape(visible_dim)
visible state after n_step_MC steps
visible_probabilities_1: array shape(visible_dim)
probabilities from which visible_states_1 is sampled
"""
if len(inpt) == 0:
#inpt = tf.constant(np.random.randint(2, size=self._v_dim), tf.float32)
inpt = tf.constant(np.random.choice([0,1], size=self._v_dim,p=[p_0,p_1]), tf.float32)
hidden_probabilities_0 = tf.sigmoid(tf.add(tf.tensordot(self.weights, inpt,1), self.hidden_biases)) # dimension W + 1 row for biases
hidden_states_0 = self.calculate_state(hidden_probabilities_0)
evolution_MC = [inpt]
for _ in range(n_step_MC): #gibbs update
visible_probabilities_1 = tf.sigmoid(tf.add(tf.tensordot(hidden_states_0,self.weights,1), self.visible_biases)) # dimension W + 1 row for biases
visible_states_1 = self.calculate_state(visible_probabilities_1)
hidden_probabilities_1 = tf.sigmoid(tf.add(tf.tensordot(visible_states_1, tf.transpose(self.weights),1), self.hidden_biases)) # dimension W + 1 row for biases
hidden_states_1 = self.calculate_state(hidden_probabilities_1)
hidden_states_0 = hidden_states_1
evolution_MC.append(visible_states_1)
return visible_states_1,visible_probabilities_1,inpt,evolution_MC
#@tf.function
def contr_divergence(self, data_point, n_step_MC=1):
"""
Perform contrastive divergence given a data point.
:param data_point: array, shape(visible layer)
data point sampled from the batch
:param n_step_MC: int
tep of the markov chain for the sampling (CD1,CD2,...)
:return: delta_w: array shape(hidden_dim, visible_dim)
Array of the same shape of the weight matrix which entries are the gradients dw_{ij}
delta_vb: array, shape(visible_dim)
Array of the same shape of the visible biases which entries are the gradients d_vb_i
delta_vb: array, shape(hidden_dim)
Array of the same shape of the hidden biases which entries are the gradients d_hb_i
"""
"""
*************************
YOUR TURN****************
*************************
"""
return delta_w, delta_vb, delta_hb, visible_states_1
#@<EMAIL>
def persistent_contr_divergence(self, data):
"""
Persistent CD [Tieleman08] uses another approximation for sampling from p(v,h).
It relies on a single Markov chain, which has a persistent state (i.e., not restarting
a chain for each observed example). For each parameter update, we extract new samples by
simply running the chain for k-steps. The state of the chain is then preserved for subsequent updates.
"""
return
def reconstruction_cross_entropy(self,test_points, plot=True):
"""
Compute the reconstruction cross entropy = - \Sum_[i=1]^d z_i log(p(z_i)) + (1-z_i) log(1-p(z_i)) where i
is the i-th component of the reconstructed vector and p(z_i) = sigmoid(Wx+b)_i.
:param test_point: array like
Random point sampled from the test set
:param plot: bool
if True plot the reconstruction togheter with the sampled test point for comparison
:return: scalar
Reconstruction cross entropy
"""
r_ce_list=[]
for vec in test_points:
reconstruction,prob,_,_ = self.sample(inpt=vec)
#tf.where is needed to have 0*-\infty = 0
r_ce = tf.multiply(reconstruction, tf.where(tf.math.is_inf(tf.math.log(prob)),np.zeros_like(tf.math.log(prob)),tf.math.log(prob))) \
+ tf.multiply((1-reconstruction), tf.where(tf.math.is_inf(tf.math.log(1-prob)),np.zeros_like(tf.math.log(1-prob)), tf.math.log(1-prob)))
r_ce_list.append(-tf.reduce_sum(r_ce,1)[0])
if plot:
reconstruction_plot= self.sample(inpt=test_points[1,:])[0]
fig, axes = plt.subplots(nrows=1, ncols=2)
axes[0].imshow(test_points[1,:].reshape(self._picture_shape),cmap='Greys')
axes[0].set_title("Original Image")
axes[1].imshow(np.asarray(reconstruction_plot).reshape(self._picture_shape), cmap='Greys')
axes[1].set_title("Reconstruction")
plt.show(block=False)
plt.pause(3)
plt.close()
return np.average(r_ce_list)
def average_squared_error(self, test_points):
"""
Compute the mean squared error between a test vector and its reconstruction performed by the RBM, ||x - z||^2.
:param test_point: array, shape(visible_dim)
data point to test the reconstruction
:return: sqr: float
error
"""
ase_list=[]
for vec in test_points:
reconstruction= self.sample(inpt = vec)[0]
as_e = tf.pow(vec - reconstruction,2)
sqr = tf.reduce_sum(as_e,1)/self._v_dim
ase_list.append(sqr[0])
return np.mean(ase_list)
def free_energy(self,test_point):
"""
Compute the free energy of the RBM, it is useful to compute the pseudologlikelihood.
F(v) = - log \sum_h e^{-E(v,h)} = -bv - \sum_i \log(1 + e^{c_i + W_i v}) where v= visible state, h = hidden state,
b = visible biases, c = hidden biases, W_i = i-th column of the weights matrix
:param test_point: array, shape(visible_dim)
random point sampled from the test set
:return: scalar
"""
bv = tf.tensordot(test_point, tf.transpose(self.visible_biases),1)
wx_b = tf.tensordot(self.weights,test_point,1) + self.hidden_biases
hidden_term = tf.reduce_sum(tf.math.log(1+tf.math.exp(wx_b)))
return np.asarray(-hidden_term -bv)[0]
def pseudo_log_likelihood(self):
return self
def KL_divergence(self):
return self
def exp_decay_l_r(self,epoch):
"""
When training a model, it is often recommended to lower the learning rate as the training progresses.
This function applies an exponential decay function to a provided initial learning rate.
:param epoch: scalar
:return: scalar
"""
k = 0.1
lrate = self._l_r * np.exp(-k * epoch)
return lrate
def variable_summaries(self,var, step):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean, step)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev, step)
tf.summary.scalar('max', tf.reduce_max(var), step)
tf.summary.scalar('min', tf.reduce_min(var), step)
tf.summary.histogram('histogram', var, step = step)
def train(self, data):
"""
This function shuffle the dataset and create #data_train/batch_size mini batches and perform contrastive divergence on each vector of the batch.
The upgrade of the parameters is performed only at the end of each batch by taking the average of the gradients on the batch.
In the last part a random datapoint is sampled from the test set to calculate the error reconstruction. The entire procedure is repeted
_n_epochs times.
:param data: dict
dictionary of numpy arrays with labels ['x_train','y_train','x_test', 'y_test']
:return: self
"""
print('Start training...')
for epoch in range(self._n_epoch):
sys.stdout.write('\r')
print('Epoch:',epoch, '/', self._n_epoch)
np.random.shuffle(data['x_train'])
with tf.name_scope('Learning rate'):
learning_rate = self.exp_decay_l_r(epoch)
for i in tqdm(range(0, data['x_train'].shape[0], self._batch_size)):
x_train_mini = data['x_train'][i:i+self._batch_size]
batch_dw = np.zeros((self._h_dim, self._v_dim, self._batch_size)) #d_w,d_v,d_h don't know why but this is not working
batch_dvb = np.zeros((self._v_dim, self._batch_size))
batch_dhb = np.zeros((self._h_dim, self._batch_size))
# I should create an optimizer class at the moment is just if
if self.optimizer == 'cd':
for ind,vec in enumerate(x_train_mini):
#print(ind)
batch_dw[:,:,ind],batch_dvb[:,ind],batch_dhb[:,ind],_ = self.contr_divergence(vec, L2_l=0) #d_w,d_v,d_h not working get lost to write down the values
#Persistent contrastive divergence
elif self.optimizer == 'pcd':
start_point = x_train_mini[np.random.randint(0,self._batch_size,1)].reshape(self._v_dim)
for ind in range(self._batch_size):
batch_dw[:, :, ind], batch_dvb[:, ind], batch_dhb[:, ind], last_state = self.contr_divergence(start_point)
start_point = tf.reshape(last_state,(self._v_dim,))
dw = np.average(batch_dw,2)
dvb = np.average(batch_dvb,1)
dhb = np.average(batch_dhb,1)
self.weights = self.weights + learning_rate * dw
self.visible_biases = self.visible_biases + learning_rate* dvb
self.hidden_biases = self.hidden_biases + learning_rate* dhb
#Save model every epoch
self.save_model()
#test every epoch
np.random.shuffle(data['x_test'])
rnd_test_points_idx = np.random.randint(low = 0,high = data['x_test'].shape[0], size=self.n_test_samples) #sample size random points indexes from test
with tf.name_scope('Errors'):
rec_error = self.reconstruction_cross_entropy(data['x_test'][rnd_test_points_idx,:])
sq_error = self.average_squared_error(data['x_test'][rnd_test_points_idx,:])
free_energy = self.free_energy(data['x_test'][rnd_test_points_idx[0],:])
tf.summary.scalar('rec_error', rec_error, step = epoch)
tf.summary.scalar('squared_error', sq_error, step = epoch)
tf.summary.scalar('Free Energy', free_energy, step = epoch)
tf.summary.scalar('Learning rate', learning_rate, step = epoch)
with tf.name_scope('Weights'):
self.variable_summaries(self.weights, step = epoch)
with tf.name_scope('Hidden biases'):
self.variable_summaries(self.hidden_biases, step = epoch)
with tf.name_scope('Visible biases'):
self.variable_summaries(self.visible_biases, step=epoch)
print("epoch %d" % (epoch + 1),"Rec error: %s" % np.asarray(rec_error),"sq_error | |
#Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#These are all the builtins that should by in astropysics instead of pymodelfit
"""
==================================================
models -- astronomy-specific models for pymodelfit
==================================================
This module makes use of the :mod:`pymodelfit` package to define a number of
models inteded for data-fitting that are astronomy-specific. Note that
:mod:`pymodelfit` was originally written as part of astropysics, and hence it is
tightly integrated (including the Fit GUI, where relevant) to other parts of
astropysics, often using the models in this module.
.. note::
Everything in the :mod:`pymodelfit` package is imported to this package.
This is rather bad form, but is currently done because other parts of
astropysics is written for when pymodelfit was part of astropysics. This
may change in the future, though, so it is recommended that user code always
use ``from pymodelfit import ...`` instead of
``from astropysics.models import ...``.
Classes and Inheritance Structure
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. inheritance-diagram:: astropysics.models
:parts: 1
Module API
^^^^^^^^^^
"""
from __future__ import division,with_statement
import numpy as np
from pymodelfit.core import *
from pymodelfit.builtins import *
from .spec import HasSpecUnits as _HasSpecUnits
class BlackbodyModel(FunctionModel1DAuto,_HasSpecUnits):
"""
A Planck blackbody radiation model.
Output/y-axis is taken to to be specific intensity.
"""
from .constants import h,c,kb
def __init__(self,unit='wl'):
_HasSpecUnits.__init__(self,unit)
self.unit = unit #need to explicitly set the unit to initialize the correct f
self.stephanBoltzmannLaw = self._instanceSBLaw
def f(self,x,A=1,T=5800):
x = x*self._xscaling
if self._phystype == 'wavelength':
val = self._flambda(x,A,T)
elif self._phystype == 'frequency':
val = self._fnu(x,A,T)
elif self._phystype == 'energy':
val = self._fen(x,A,T)
else:
raise ValueError('unrecognized physical unit type!')
return val*self._xscaling
def _flambda(self,x,A=1,T=5800):
h,c,k=self.h,self.c,self.kb
return A*2*h*c*c*x**-5/(np.exp(h*c/(k*T*x))-1)
def _fnu(self,x,A=1,T=5800):
h,c,k=self.h,self.c,self.kb
return A*2*h/c/c*x**3/(np.exp(h*x/(k*T))-1)
def _fen(self,x,A=1,T=5800):
return self._fnu(x,A,T)/self.h
def _applyUnits(self,xtrans,xitrans,xftrans,xfinplace):
pass #do nothing because the checking is done in the f-function
# if self._phystype == 'wavelength': #TODO:check
# self.f = self._flambda
# elif self._phystype == 'frequency':
# self.f = self._fnu
# elif self._phystype == 'energy':
# self.f = self._fen
# else:
# raise ValueError('unrecognized physical unit type!')
@property
def xaxisname(self):
if self._phystype == 'wavelength':
return 'lambda'
elif self._phystype == 'frequency':
return 'nu'
elif self._phystype == 'energy':
return 'E'
yaxisname = 'I'
@property
def rangehint(self):
cen = self.wienDisplacementLaw(None)
return(cen/3,cen*3)
def setIntensity(self):
"""
Sets A so that the output is specific intensity/surface brightness.
"""
self.A = 1
def setFlux(self,radius,distance):
"""
Sets A so that the output is the flux at the specified distance from
a spherical blackbody with the specified radius.
:param radius: Radius of the blackbody in cm
:type radius: float
:param distance: distance to the blackbody in cm
:type distance: float
"""
from .phot import intensity_to_flux
self.A = intensity_to_flux(radius,distance)
def getFlux(self,x,radius=None,distance=None):
"""
Returns the flux density at the requested wavelength for a blackbody
of the given radius at a specified distance.
:param x: x-value of the model
:type x: float
:param radius: radius of the blackbody in cm
:type radius: float
:param distance: distance to the blackbody in cm
:type distance: float
:returns: flux/wl at the specified distance from the blackbody
"""
if distance is None:
if radius is None:
pass
else:
distance = self.getFluxDistance(radius)
self.setFlux(radius,distance)
else:
if radius is None:
radius = self.getFluxRadius(distance)
self.setFlux(radius,distance)
else:
self.setFlux(radius,distance)
return self(x)
def getFluxRadius(self,distance):
"""
Determines the radius of a spherical blackbody at the specified distance
assuming the flux is given by the model at the given temperature.
"""
return (self.A*distance*distance/pi)**0.5
def getFluxDistance(self,radius):
"""
Determines the distance to a spherical blackbody of the specified radius
assuming the flux is given by the model at the given temperature.
"""
return (pi*radius*radius/self.A)**0.5
def _getPeak(self):
h,k = self.h,self.kb
if 'wavelength' in self.unit:
b = .28977685 #cm * K
peakval = b/self.T/self._xscaling
elif 'frequency' in self.unit:
a=2.821439 #constant from optimizing BB function
peakval=a/h*k*self.T/self._xscaling
elif 'energy' in self.unit:
raise NotImplementedError
else:
raise RuntimeError('Should never see this - bug in BB code')
return self(peakval)
def _setPeak(self,peakval):
self.A = 1
self.A = peakval/self._getPeak()
peak=property(_getPeak,_setPeak)
def wienDisplacementLaw(self,peakval):
"""
Uses the Wien Displacement Law to calculate the temperature given a peak
*input* location (wavelength, frequency, etc) or compute the peak
location given the current temperature.
:param peakval:
The peak value of the model to use to compute the new temperature.
:type peakval: float or None
:returns:
The temperature for the provided peak location or the peak location
for the current temperature if `peakval` is None.
"""
h,k = self.h,self.kb
if self._phystype == 'wavelength':
b = .28977685 #cm * K
if peakval is None:
out = b/self.T/self._xscaling
else:
out = b/peakval/self._xscaling
elif self._phystype == 'frequency':
a=2.821439 #constant from optimizing BB function
if peakval is None:
out = a*k*self.T/h/self._xscaling
else:
out = peakval*h/a/k/self._xscaling
elif self._phystype == 'energy':
a=2.821439 #constant from optimizing BB function
if peakval is None:
out = a*self.T/h/self._xscaling
else:
out = peakval*h/a/self._xscaling
else:
raise RuntimeError('Should never see this - bug in BB code')
return out
def _instanceSBLaw(self,T=None,area=1):
if T is not None:
self.T = T
return BlackbodyModel.stephanBoltzmannLaw(self.T,area)*self._enscale
@staticmethod
def stephanBoltzmannLaw(T,area=1):
"""
assumes cgs units
"""
h,c,kb=BlackbodyModel.h,BlackbodyModel.c,BlackbodyModel.kb
sigma = 2*pi**5*kb**4*h**-3*c**-2/15
return area*sigma*T**4
class BurkertModel(FunctionModel1DAuto):
r"""
Burkert (1996) profile defined as:
.. math::
\frac{\rho_0 r_0^3}{(r+r_0)(r^2+r_0^2)}
where :attr:`rho0` is the central density.
"""
xaxisname = 'r'
yaxisname = 'rho'
def f(self,r,rho0=1,r0=1):
return rho0*r0**3/((r+r0)*(r**2+r0**2))
@property
def rangehint(self):
return 0,self.r0*3
class EinastoModel(FunctionModel1DAuto):
"""
Einasto profile given by
.. math::
\\ln(\\rho(r)/A) = (-2/\\alpha)((r/r_s)^{\\alpha} - 1) .
:attr:`A` is the density where the log-slope is -2, and :attr:`rs` is the
corresponding radius. The `alpha` parameter defaults to .17 as suggested by
Navarro et al. 2010
.. note::
The Einasto profile is is mathematically identical to the Sersic profile
(:class:`SersicModel`), although the parameterization is different. By
Convention, Sersic generally refers to a 2D surface brightness/surface
density profile, while Einasto is usually treated as a 3D density
profile.
"""
xaxisname = 'r'
yaxisname = 'rho'
def f(self,r,A=1,rs=1,alpha=.17):
return A*np.exp((-2/alpha)*((r/rs)**alpha - 1))
class ExponentialDiskModel(FunctionModel2DScalarAuto):
"""
A disk with an exponential profile along both vertical and horizontal axes.
The first coordinate is the horizontal/in-disk coordinate (scaled by `l`)
wile the second is `z`. i.e.
.. math::
A e^{-|s/l|} e^{-|z/h|}
for `pa` = 0 ; non-0 `pa` rotates the profile counter-clockwise by `pa`
radians.
"""
_fcoordsys='cartesian'
def f(self,inarr,A=1,l=2,h=1,pa=0):
s,z = inarr
sinpa,cospa = np.sin(pa),np.cos(pa)
sr = cospa*s+sinpa*z
zr = -sinpa*s+cospa*z
return A*np.exp(-np.abs(sr/l)-np.abs(zr/h))
@property
def rangehint(self):
bigscale = max(self.h,self.l)
return (-2*bigscale,2*bigscale,-2*bigscale,2*bigscale)
class InclinedDiskModel(FunctionModel2DScalarDeformedRadial):
"""
Inclined Disk model -- identical to
:class:`FunctionModel2DScalarDeformedRadial` but with inclination
(:attr:`inc` and :attr:`incdeg`) and position angle (:attr:`pa` and
:attr:`padeg`) in place of axis-ratios.
"""
def __init__(self,inc=0,pa=0,degrees=True,**kwargs):
super(InclinedDiskModel,self).__init__('sersic',**kwargs)
self.n = 1
if degrees:
self.incdeg = inc
self.padeg = pa
else:
self.inc = inc
self.pa = pa
class RoundBulgeModel(FunctionModel2DScalarSeperable):
"""
A bulge modeled as a radially symmetric sersic profile (by default the
sersic index is kept fixed - to remove this behavior, set the fixedpars
attribute to None)
By default, the 'n' parameter does not vary when fitting.
"""
fixedpars = ('n',)
def __init__(self,Ae=1,re=1,n=4):
super(RoundBulgeModel,self).__init__('sersic')
self.Ae = Ae
self.re = re
self.n = n
class ExponentialSechSqDiskModel(FunctionModel2DScalarAuto):
"""
A disk that is exponential along the horizontal/in-disk (first) coordinate,
and follows a :math:`{\\rm sech}^2(z)` profile along the vertical (second)
coordinate. i.e.
.. math::
A e^{-|s/l|} {\\rm sech}^2 (z/h)
for `pa` = 0 ; non-0 `pa` rotates the profile counter-clockwise by `pa`
radians.
"""
_fcoordsys='cartesian'
def f(self,inarr,A=1,l=2,h=1,pa=0):
s,z = inarr
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 10 16:04:15 2019
@author: annaquinlan
Github URL: https://github.com/tidepool-org/Loop/blob/
8c1dfdba38fbf6588b07cee995a8b28fcf80ef69/Loop/Managers/LoopDataManager.swift
"""
# pylint: disable=R0913, R0914, W0105, C0200, R0916
from datetime import timedelta
import warnings
from pyloopkit.carb_store import get_carb_glucose_effects, get_carbs_on_board
from pyloopkit.date import time_interval_since
from pyloopkit.dose import DoseType
from pyloopkit.dose_math import recommended_temp_basal, recommended_bolus
from pyloopkit.dose_store import get_glucose_effects
from pyloopkit.glucose_store import (get_recent_momentum_effects,
get_counteraction_effects)
from pyloopkit.input_validation_tools import (
are_settings_valid, are_glucose_readings_valid, are_carb_readings_valid,
is_insulin_sensitivity_schedule_valid, are_carb_ratios_valid,
are_basal_rates_valid, are_correction_ranges_valid,
are_insulin_doses_valid)
from pyloopkit.insulin_math import find_ratio_at_time
from pyloopkit.loop_math import (combined_sums, decay_effect, subtracting,
predict_glucose)
def update(input_dict):
""" Run data through the Loop algorithm and return the predicted glucose
values, recommended temporary basal, and recommended bolus
Arguments:
input_dict - dictionary containing the following keys/data:
"glucose_dates" -- times of glucose measurements
"glucose_values" -- glucose measurements in mg/dL)
"dose_types" -- types of dose (tempBasal, bolus, etc)
"dose_start_times" -- start times of insulin delivery
"dose_end_times" -- end times of insulin delivery
"dose_values" -- amounts of insulin (U/hr if a basal, U if a bolus)
"dose_delivered_units" -- absolute Units of insulin delivered by a dose (can be None)
"carb_dates" -- times of carbohydrate entries
"carb_values" -- amount of carbohydrates eaten
"carb_absorption_times" -- absorption times for carbohydrate entries)
"settings_dictionary" -- a dictionary containing the needed settings:
- "model" (the insulin model)
- if exponential, format is
[duration of insulin action (in mins), peak (in mins)]
- child model typically peaks at 65 mins
- adult model typically peaks at 65 mins
- if Walsh curve, format is
[duration of insulin action (in *hours*)]
- "momentum_data_interval"
- the interval of glucose data to use for momentum calculation
- in Loop, default is 15 (minutes)
- "suspend_threshold"
- value at which to suspend all insulin delivery (mg/dL)
- "dynamic_carb_absorption_enabled"
- whether carb absorption can be calculated dynamically
(based on deviations in blood glucose levels versus what
would be expected based on insulin alone)
- "retrospective_correction_integration_interval"
- the maximum duration over which to integrate retrospective
correction changes
- in Loop, default is 30 (minutes)
- "recency_interval"
- the amount of time since a given date that data should be
considered valid
- in Loop, default is 15 (minutes)
- "retrospective_correction_grouping_interval"
- the interval over which to aggregate changes in glucose for
retrospective correction
- in Loop, default is 30 (minutes)
- "default_absorption_times"
- the default absorption times to use if unspecified in a
carbohydrate entry
- list of default absorption times in minutes in the format
[slow, medium, fast]
- in Loop, default is [120 (fast), 180 (medium), 240 (slow)]
- "max_basal_rate"
- the maximum basal rate that Loop is allowed to give
- "max_bolus"
- the maximum bolus that Loop is allowed to give or recommend
"sensitivity_ratio_start_times" -- start times for sensitivity ratios
"sensitivity_ratio_end_times" -- end times for sensitivity ratios
"sensitivity_ratio_values" -- the sensitivity ratios in mg/dL per U
"carb_ratio_start_times" -- start times for carb ratios
"carb_ratio_values" -- carb ratios in grams of carbohydrate per U
"basal_rate_start_times" -- start times for basal rates
"basal_rate_values" -- basal rates in U/hour
"basal_rate_minutes" -- basal rate start minutes of offset from midnight
"target_range_start_times" -- start times for the target ranges
"target_range_end_times" -- end times for the target ranges
"target_range_minimum_values" - minimum values of target range in mg/dL
"target_range_maximum_values" - maximum values of target range in mg/dL
last_temp_basal -- list of information about the last temporary basal
in the form
[type of dose,
start time for the basal,
end time for the basal,
value of the basal rate in U/hr]
time_to_calculate_at -- the "now" time and the time at which to
recommend the basal rate and bolus
Output:
Dictionary containing all of the calculated effects, the input
dictionary, the predicted glucose values, and the recommended
temp basal/bolus
"""
glucose_dates = input_dict.get("glucose_dates")
glucose_values = input_dict.get("glucose_values")
dose_types = input_dict.get("dose_types")
dose_starts = input_dict.get("dose_start_times")
dose_ends = input_dict.get("dose_end_times")
dose_values = input_dict.get("dose_values")
dose_delivered_units = input_dict.get("dose_delivered_units")
carb_dates = input_dict.get("carb_dates")
carb_values = input_dict.get("carb_values")
carb_absorptions = input_dict.get("carb_absorption_times")
settings_dictionary = input_dict.get("settings_dictionary")
sensitivity_starts = input_dict.get("sensitivity_ratio_start_times")
sensitivity_ends = input_dict.get("sensitivity_ratio_end_times")
sensitivity_values = input_dict.get("sensitivity_ratio_values")
carb_ratio_starts = input_dict.get("carb_ratio_start_times")
carb_ratio_values = input_dict.get("carb_ratio_values")
basal_starts = input_dict.get("basal_rate_start_times")
basal_rates = input_dict.get("basal_rate_values")
basal_minutes = input_dict.get("basal_rate_minutes")
target_range_starts = input_dict.get("target_range_start_times")
target_range_ends = input_dict.get("target_range_end_times")
target_range_mins = input_dict.get("target_range_minimum_values") or []
target_range_maxes = input_dict.get("target_range_maximum_values")
last_temp_basal = input_dict.get("last_temporary_basal")
time_to_calculate_at = input_dict.get("time_to_calculate_at")
# check that the inputs make sense before doing math with them
if (
not are_settings_valid(settings_dictionary)
or not are_glucose_readings_valid(
glucose_dates, glucose_values,
)
or not are_carb_readings_valid(
carb_dates, carb_values, carb_absorptions
)
or not are_insulin_doses_valid(
dose_types, dose_starts, dose_ends, dose_values
)
or not is_insulin_sensitivity_schedule_valid(
sensitivity_starts, sensitivity_ends, sensitivity_values
)
or not are_carb_ratios_valid(
carb_ratio_starts, carb_ratio_values
)
or not are_basal_rates_valid(
basal_starts, basal_rates, basal_minutes
)
or not are_correction_ranges_valid(
target_range_starts, target_range_ends,
target_range_mins, target_range_maxes
)):
return []
last_glucose_date = glucose_dates[-1]
retrospective_start = (
last_glucose_date
- timedelta(minutes=settings_dictionary.get(
"retrospective_correction_integration_interval") or 30)
)
# calculate a maximum of 24 hours of effects
earliest_effect_date = time_to_calculate_at - timedelta(hours=24)
# if running with counteraction effect data present from a previous run
# (which is how Loop runs), add the dates to the input dictionary
next_effect_date = (
input_dict.get("previous_counteraction_effect_dates")[-1] if
input_dict.get("previous_counteraction_effect_dates")
else earliest_effect_date
)
# Allow input effects to be passed through dict for testing purposes
if (input_dict.get("momentum_effect_dates") and
input_dict.get("momentum_effect_values")):
momentum_effect_dates = input_dict.get("momentum_effect_dates")
momentum_effect_values = input_dict.get("momentum_effect_values")
else:
(momentum_effect_dates,
momentum_effect_values
) = get_recent_momentum_effects(
glucose_dates, glucose_values,
next_effect_date,
time_to_calculate_at,
settings_dictionary.get("momentum_data_interval") or 15,
5
)
# calculate previous insulin effects in order to later calculate the
# insulin counteraction effects
(insulin_effect_dates,
insulin_effect_values
) = get_glucose_effects(
dose_types, dose_starts, dose_ends, dose_values, dose_delivered_units,
next_effect_date,
basal_starts, basal_rates, basal_minutes,
sensitivity_starts, sensitivity_ends, sensitivity_values,
settings_dictionary.get("model"),
delay=settings_dictionary.get("insulin_delay") or 10
)
# calculate future insulin effects for the purposes of predicting glucose
if (input_dict.get("now_to_dia_insulin_effect_dates") and
input_dict.get("now_to_dia_insulin_effect_values")):
now_to_dia_insulin_effect_dates = input_dict.get("now_to_dia_insulin_effect_dates")
now_to_dia_insulin_effect_values = input_dict.get("now_to_dia_insulin_effect_values")
else:
(now_to_dia_insulin_effect_dates,
now_to_dia_insulin_effect_values
) = get_glucose_effects(
dose_types, dose_starts, dose_ends, dose_values, dose_delivered_units,
time_to_calculate_at,
basal_starts, basal_rates, basal_minutes,
sensitivity_starts, sensitivity_ends, sensitivity_values,
settings_dictionary.get("model"),
delay=settings_dictionary.get("insulin_delay") or 10
)
if (input_dict.get("counteraction_starts") and
input_dict.get("counteraction_ends") and
input_dict.get("counteraction_values")):
counteraction_starts = input_dict.get("counteraction_starts")
counteraction_ends = input_dict.get("counteraction_ends")
counteraction_values = input_dict.get("counteraction_values")
counteraction_effects = (counteraction_starts, counteraction_ends, counteraction_values)
# if our BG data is current and we know the expected insulin effects,
# calculate tbe counteraction effects
elif next_effect_date < last_glucose_date and insulin_effect_dates:
(counteraction_starts,
counteraction_ends,
counteraction_values
) = counteraction_effects = get_counteraction_effects(
glucose_dates, glucose_values,
next_effect_date,
insulin_effect_dates, insulin_effect_values
)
else:
(counteraction_starts,
counteraction_ends,
counteraction_values
) = counteraction_effects = ([], [], [])
if (input_dict.get("carb_effect_dates") and
input_dict.get("carb_effect_values")):
carb_effect_dates = input_dict.get("carb_effect_dates")
carb_effect_values = input_dict.get("carb_effect_values")
else:
(carb_effect_dates,
carb_effect_values
) = get_carb_glucose_effects(
carb_dates, carb_values, carb_absorptions,
retrospective_start,
*counteraction_effects if
settings_dictionary.get("dynamic_carb_absorption_enabled")
is not False else ([], [], []),
carb_ratio_starts, carb_ratio_values,
sensitivity_starts, sensitivity_ends, sensitivity_values,
settings_dictionary.get("default_absorption_times"),
delay=settings_dictionary.get("carb_delay") or 10
)
(cob_dates,
cob_values
) = get_carbs_on_board(
carb_dates, carb_values, carb_absorptions,
time_to_calculate_at,
*counteraction_effects if
settings_dictionary.get("dynamic_carb_absorption_enabled")
is not False else ([], [], []),
carb_ratio_starts, carb_ratio_values,
sensitivity_starts, sensitivity_ends, sensitivity_values,
settings_dictionary.get("default_absorption_times"),
delay=settings_dictionary.get("carb_delay") or 10
)
current_cob = cob_values[
closest_prior_to_date(
time_to_calculate_at,
cob_dates
)
] if cob_dates else 0
if settings_dictionary.get("retrospective_correction_enabled"):
(retrospective_effect_dates,
retrospective_effect_values
) = update_retrospective_glucose_effect(
glucose_dates, glucose_values,
carb_effect_dates, carb_effect_values,
counteraction_starts, counteraction_ends, counteraction_values,
settings_dictionary.get("recency_interval") or 15,
settings_dictionary.get(
"retrospective_correction_grouping_interval"
) or 30,
time_to_calculate_at
)
else:
(retrospective_effect_dates,
retrospective_effect_values
) = ([], [])
recommendations = update_predicted_glucose_and_recommended_basal_and_bolus(
time_to_calculate_at,
glucose_dates, glucose_values,
momentum_effect_dates, momentum_effect_values,
carb_effect_dates, carb_effect_values,
now_to_dia_insulin_effect_dates, now_to_dia_insulin_effect_values,
retrospective_effect_dates, retrospective_effect_values,
target_range_starts, target_range_ends, target_range_mins, target_range_maxes,
settings_dictionary.get("suspend_threshold"),
sensitivity_starts, sensitivity_ends, sensitivity_values,
settings_dictionary.get("model"),
basal_starts, basal_rates, basal_minutes,
settings_dictionary.get("max_basal_rate"),
settings_dictionary.get("max_bolus"),
last_temp_basal,
rate_rounder=settings_dictionary.get("rate_rounder")
)
recommendations["insulin_effect_dates"] = now_to_dia_insulin_effect_dates
recommendations["insulin_effect_values"] = now_to_dia_insulin_effect_values
recommendations["counteraction_effect_start_times"] = counteraction_starts
recommendations["counteraction_effect_end_times"] = counteraction_ends
recommendations["counteraction_effect_values"] = counteraction_values
recommendations["momentum_effect_dates"] = momentum_effect_dates
recommendations["momentum_effect_values"] = momentum_effect_values
recommendations["carb_effect_dates"] = carb_effect_dates
recommendations["carb_effect_values"] = carb_effect_values
recommendations["retrospective_effect_dates"] = (
retrospective_effect_dates or None
)
recommendations["retrospective_effect_values"] = (
retrospective_effect_values or None
)
recommendations["carbs_on_board"] = current_cob
recommendations["cob_timeline_dates"] = cob_dates
recommendations["cob_timeline_values"] = cob_values
recommendations["input_data"] = input_dict
return recommendations
def closest_prior_to_date(date_to_compare, dates):
""" Returns the index of the closest element in the sorted sequence
prior to the specified date
"""
for date in dates:
if date <= date_to_compare:
closest_element = date
else:
break
return dates.index(closest_element)
def update_retrospective_glucose_effect(
glucose_dates, glucose_values,
carb_effect_dates, carb_effect_values,
counteraction_starts, counteraction_ends, counteraction_values,
recency_interval,
retrospective_correction_grouping_interval,
now_time,
effect_duration=60,
delta=5
):
"""
Generate an effect based on how large the discrepancy is between the
current glucose and its predicted value.
Arguments:
glucose_dates -- time of glucose value (datetime)
glucose_values -- value at the time of glucose_date
carb_effect_dates -- date the carb effects occur at (datetime)
carb_effect_values -- value | |
10) + " MASK=0x1").splitlines()) != 0:
raise Exception("Unexpected RANGE=2-10 result")
if len(dev[0].request("BSS RANGE=0-" + str(int(id2) + 10) + " MASK=0x1").splitlines()) != 2:
raise Exception("Unexpected RANGE=0-10 result")
if len(dev[0].request("BSS RANGE=" + id1 + "-" + id1 + " MASK=0x1").splitlines()) != 1:
raise Exception("Unexpected RANGE=0-0 result")
res = dev[0].request("BSS p2p_dev_addr=FOO")
if "FAIL" in res or "id=" in res:
raise Exception("Unexpected result: " + res)
res = dev[0].request("BSS p2p_dev_addr=00:11:22:33:44:55")
if "FAIL" in res or "id=" in res:
raise Exception("Unexpected result: " + res)
dev[0].request("BSS_FLUSH 1000")
res = dev[0].request("BSS RANGE=ALL MASK=0x1").splitlines()
if len(res) != 2:
raise Exception("Unexpected result after BSS_FLUSH 1000")
dev[0].request("BSS_FLUSH 0")
res = dev[0].request("BSS RANGE=ALL MASK=0x1").splitlines()
if len(res) != 0:
raise Exception("Unexpected result after BSS_FLUSH 0")
def test_scan_and_interface_disabled(dev, apdev):
"""Scan operation when interface gets disabled"""
try:
dev[0].request("SCAN")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-STARTED"])
if ev is None:
raise Exception("Scan did not start")
dev[0].request("DRIVER_EVENT INTERFACE_DISABLED")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"], timeout=7)
if ev is not None:
raise Exception("Scan completed unexpectedly")
# verify that scan is rejected
if "FAIL" not in dev[0].request("SCAN"):
raise Exception("New scan request was accepted unexpectedly")
dev[0].request("DRIVER_EVENT INTERFACE_ENABLED")
dev[0].scan(freq="2412")
finally:
dev[0].request("DRIVER_EVENT INTERFACE_ENABLED")
def test_scan_for_auth(dev, apdev):
"""cfg80211 workaround with scan-for-auth"""
hapd = hostapd.add_ap(apdev[0], { "ssid": "open" })
dev[0].scan_for_bss(apdev[0]['bssid'], freq="2412")
# Block sme-connect radio work with an external radio work item, so that
# SELECT_NETWORK can decide to use fast associate without a new scan while
# cfg80211 still has the matching BSS entry, but the actual connection is
# not yet started.
id = dev[0].request("RADIO_WORK add block-work")
ev = dev[0].wait_event(["EXT-RADIO-WORK-START"])
if ev is None:
raise Exception("Timeout while waiting radio work to start")
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
dev[0].dump_monitor()
# Clear cfg80211 BSS table.
try:
subprocess.check_call(['iw', dev[0].ifname, 'scan', 'trigger',
'freq', '2457', 'flush'])
except subprocess.CalledProcessError, e:
raise HwsimSkip("iw scan trigger flush not supported")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"], 5)
if ev is None:
raise Exception("External flush scan timed out")
# Release blocking radio work to allow connection to go through with the
# cfg80211 BSS entry missing.
dev[0].request("RADIO_WORK done " + id)
dev[0].wait_connected(timeout=15)
def test_scan_for_auth_fail(dev, apdev):
"""cfg80211 workaround with scan-for-auth failing"""
hapd = hostapd.add_ap(apdev[0], { "ssid": "open" })
dev[0].scan_for_bss(apdev[0]['bssid'], freq="2412")
# Block sme-connect radio work with an external radio work item, so that
# SELECT_NETWORK can decide to use fast associate without a new scan while
# cfg80211 still has the matching BSS entry, but the actual connection is
# not yet started.
id = dev[0].request("RADIO_WORK add block-work")
ev = dev[0].wait_event(["EXT-RADIO-WORK-START"])
if ev is None:
raise Exception("Timeout while waiting radio work to start")
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
dev[0].dump_monitor()
hapd.disable()
# Clear cfg80211 BSS table.
try:
subprocess.check_call(['iw', dev[0].ifname, 'scan', 'trigger',
'freq', '2457', 'flush'])
except subprocess.CalledProcessError, e:
raise HwsimSkip("iw scan trigger flush not supported")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"], 5)
if ev is None:
raise Exception("External flush scan timed out")
# Release blocking radio work to allow connection to go through with the
# cfg80211 BSS entry missing.
dev[0].request("RADIO_WORK done " + id)
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS",
"CTRL-EVENT-CONNECTED"], 15)
if ev is None:
raise Exception("Scan event missing")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected connection")
dev[0].request("DISCONNECT")
def test_scan_for_auth_wep(dev, apdev):
"""cfg80211 scan-for-auth workaround with WEP keys"""
dev[0].flush_scan_cache()
hapd = hostapd.add_ap(apdev[0],
{ "ssid": "wep", "wep_key0": '"abcde"',
"auth_algs": "2" })
dev[0].scan_for_bss(apdev[0]['bssid'], freq="2412")
# Block sme-connect radio work with an external radio work item, so that
# SELECT_NETWORK can decide to use fast associate without a new scan while
# cfg80211 still has the matching BSS entry, but the actual connection is
# not yet started.
id = dev[0].request("RADIO_WORK add block-work")
ev = dev[0].wait_event(["EXT-RADIO-WORK-START"])
if ev is None:
raise Exception("Timeout while waiting radio work to start")
dev[0].connect("wep", key_mgmt="NONE", wep_key0='"abcde"',
auth_alg="SHARED", scan_freq="2412", wait_connect=False)
dev[0].dump_monitor()
# Clear cfg80211 BSS table.
try:
subprocess.check_call(['iw', dev[0].ifname, 'scan', 'trigger',
'freq', '2457', 'flush'])
except subprocess.CalledProcessError, e:
raise HwsimSkip("iw scan trigger flush not supported")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"], 5)
if ev is None:
raise Exception("External flush scan timed out")
# Release blocking radio work to allow connection to go through with the
# cfg80211 BSS entry missing.
dev[0].request("RADIO_WORK done " + id)
dev[0].wait_connected(timeout=15)
def test_scan_hidden(dev, apdev):
"""Control interface behavior on scan parameters"""
hapd = hostapd.add_ap(apdev[0], { "ssid": "test-scan",
"ignore_broadcast_ssid": "1" })
bssid = apdev[0]['bssid']
check_scan(dev[0], "freq=2412 use_id=1")
if "test-scan" in dev[0].request("SCAN_RESULTS"):
raise Exception("BSS unexpectedly found in initial scan")
id1 = dev[0].connect("foo", key_mgmt="NONE", scan_ssid="1",
only_add_network=True)
id2 = dev[0].connect("test-scan", key_mgmt="NONE", scan_ssid="1",
only_add_network=True)
id3 = dev[0].connect("bar", key_mgmt="NONE", only_add_network=True)
check_scan(dev[0], "freq=2412 use_id=1")
if "test-scan" in dev[0].request("SCAN_RESULTS"):
raise Exception("BSS unexpectedly found in scan")
# Allow multiple attempts to be more robust under heavy CPU load that can
# result in Probe Response frames getting sent only after the station has
# already stopped waiting for the response on the channel.
found = False
for i in range(10):
check_scan(dev[0], "scan_id=%d,%d,%d freq=2412 use_id=1" % (id1, id2, id3))
if "test-scan" in dev[0].request("SCAN_RESULTS"):
found = True
break
if not found:
raise Exception("BSS not found in scan")
if "FAIL" not in dev[0].request("SCAN scan_id=1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17"):
raise Exception("Too many scan_id values accepted")
dev[0].request("REMOVE_NETWORK all")
hapd.disable()
dev[0].flush_scan_cache(freq=2432)
dev[0].flush_scan_cache()
def test_scan_and_bss_entry_removed(dev, apdev):
"""Last scan result and connect work processing on BSS entry update"""
hapd = hostapd.add_ap(apdev[0], { "ssid": "open",
"eap_server": "1",
"wps_state": "2" })
bssid = apdev[0]['bssid']
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5", drv_params="force_connect_cmd=1")
# Add a BSS entry
dev[0].scan_for_bss(bssid, freq="2412")
wpas.scan_for_bss(bssid, freq="2412")
# Start a connect radio work with a blocking entry preventing this from
# proceeding; this stores a pointer to the selected BSS entry.
id = dev[0].request("RADIO_WORK add block-work")
w_id = wpas.request("RADIO_WORK add block-work")
dev[0].wait_event(["EXT-RADIO-WORK-START"], timeout=1)
wpas.wait_event(["EXT-RADIO-WORK-START"], timeout=1)
nid = dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
w_nid = wpas.connect("open", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
time.sleep(0.1)
# Remove the BSS entry
dev[0].request("BSS_FLUSH 0")
wpas.request("BSS_FLUSH 0")
# Allow the connect radio work to continue. The bss entry stored in the
# pending connect work is now stale. This will result in the connection
# attempt failing since the BSS entry does not exist.
dev[0].request("RADIO_WORK done " + id)
wpas.request("RADIO_WORK done " + w_id)
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected connection")
dev[0].remove_network(nid)
ev = wpas.wait_event(["CTRL-EVENT-CONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected connection")
wpas.remove_network(w_nid)
time.sleep(0.5)
dev[0].request("BSS_FLUSH 0")
wpas.request("BSS_FLUSH 0")
# Add a BSS entry
dev[0].scan_for_bss(bssid, freq="2412")
wpas.scan_for_bss(bssid, freq="2412")
# Start a connect radio work with a blocking entry preventing this from
# proceeding; this stores a pointer to the selected BSS entry.
id = dev[0].request("RADIO_WORK add block-work")
w_id = wpas.request("RADIO_WORK add block-work")
dev[0].wait_event(["EXT-RADIO-WORK-START"], timeout=1)
wpas.wait_event(["EXT-RADIO-WORK-START"], timeout=1)
# Schedule a connection based on the current BSS entry.
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
wpas.connect("open", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
# Update scan results with results that have longer set of IEs so that new
# memory needs to be allocated for the BSS entry.
hapd.request("WPS_PBC")
time.sleep(0.1)
subprocess.call(['iw', dev[0].ifname, 'scan', 'trigger', 'freq', '2412'])
subprocess.call(['iw', wpas.ifname, 'scan', 'trigger', 'freq', '2412'])
time.sleep(0.1)
# Allow the connect radio work to continue. The bss entry stored in the
# pending connect work becomes stale during the scan and it must have been
# updated for the connection to work.
dev[0].request("RADIO_WORK done " + id)
wpas.request("RADIO_WORK done " + w_id)
dev[0].wait_connected(timeout=15, error="No connection (sme-connect)")
wpas.wait_connected(timeout=15, error="No connection (connect)")
dev[0].request("DISCONNECT")
wpas.request("DISCONNECT")
dev[0].flush_scan_cache()
wpas.flush_scan_cache()
def test_scan_reqs_with_non_scan_radio_work(dev, apdev):
"""SCAN commands while non-scan radio_work is in progress"""
id = dev[0].request("RADIO_WORK add test-work-a")
ev = dev[0].wait_event(["EXT-RADIO-WORK-START"])
if ev is None:
raise Exception("Timeout while waiting radio work to start")
if "OK" not in dev[0].request("SCAN"):
raise Exception("SCAN failed")
if "FAIL-BUSY" not in dev[0].request("SCAN"):
raise Exception("SCAN accepted while one is already pending")
if "FAIL-BUSY" not in dev[0].request("SCAN"):
raise Exception("SCAN accepted while one is already pending")
res = dev[0].request("RADIO_WORK show").splitlines()
count = 0
for l in res:
if "scan" in l:
count += 1
if count != 1:
logger.info(res)
raise Exception("Unexpected number of scan radio work items")
dev[0].dump_monitor()
dev[0].request("RADIO_WORK done " + id)
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=5)
if ev is None:
raise Exception("Scan did not start")
if "FAIL-BUSY" not in dev[0].request("SCAN"):
raise Exception("SCAN accepted while one is already in progress")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"], timeout=10)
if ev is None:
print "Scan did not complete"
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=0.2)
if ev is not None:
raise | |
# Copyright 2018-2021 Arm Limited.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
from config_system import data, expr, utils
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def can_enable(config):
# If there is no dependency expression, then the config can be enabled
e = config.get('depends')
if e is None:
return True
return expr.condexpr_value(e)
def is_visible(config):
# If there is no visible_cond expression, then the config is visible
e = config.get('visible_cond')
if e is None:
return True
return expr.condexpr_value(e)
def enforce_dependent_values(stage, fix_bools=False,
error_level=logging.WARNING):
"""
Check that values are consistently set, specifically with respect
to whether the dependencies are correct.
This function is called when we expect the values to be
consistent. i.e. after reading in a new config, or prior to
writing it out. It is called prior to plugin execution, to try and
ensure the plugins see consistent state.
For non-boolean configs (string, int), set their value to default
("", 0) if the config's dependencies are not met. This can happen
where a user sets the value of the config, and then changes
another config resulting in the dependency being disabled.
Boolean values are treated specially. Normally they will be kept
in a consistent state (wrt dependencies) by set_config(). However
they can be forced by a 'select' statement even when the
dependencies are not met. This indicates there is a problem with
the Mconfig that needs to be fixed. This function will only reset
Boolean values to n if specifically requested by fix_bools=True.
This is only expected to be used after reading in a config file.
That file may have been written with a different Mconfig that
allowed the selection.
The error_level determines the log level at which bool
inconsistencies are reported. When set to logging.ERROR this forces
the user to fix the Mconfig.
"""
for i in data.get_config_list():
c = data.get_config(i)
if can_enable(c):
continue
if c['datatype'] == 'bool' and c['value'] is True:
if len(c['selected_by']) > 0:
msg = "{0}unmet direct dependencies: {1} depends on {2}, " \
"but is selected by [{3}].".format(stage, i,
c['depends'][1],
",".join(c['selected_by']))
if error_level is logging.ERROR:
msg += " Update the Mconfig so that this can't happen"
logger.log(error_level, msg)
else:
raise Exception("Unmet direct dependencies without select")
if fix_bools:
set_config_internal(i, False)
elif c['datatype'] == 'string':
set_config_internal(i, '')
elif c['datatype'] == 'int':
set_config_internal(i, 0)
def check_type(key, config, actual):
actual = expr.expr_type(actual)
expected = config['datatype']
if expected != actual:
logger.error("Type mismatch in config %s: expected %s but got %s" %
(key, expected, actual))
def validate_configs():
"""
Ensure that the types in 'default' statements are correct,
and that the configs referred to in 'select' statements are boolean.
"""
config_list = data.get_config_list()
for k in config_list:
c = data.get_config(k)
if "default_cond" in c:
for i in c['default_cond']:
check_type(k, c, i['expr'])
if "default" in c:
check_type(k, c, c["default"])
if "select_if" in c:
for k in c["select_if"]:
try:
c1 = data.get_config(k[0])
except KeyError:
logger.warning("Ignoring unknown configuration option %s" % k[0])
continue
if c1['datatype'] != "bool":
logger.error('Select option must have type bool but got type %s instead' %
c1['datatype'])
if "select" in c:
for k in c["select"]:
try:
c1 = data.get_config(k)
except KeyError:
logger.warning("Ignoring unknown configuration option %s" % k)
continue
if c1['datatype'] != "bool":
logger.error('Select option must have type bool but got type %s instead' %
c1['datatype'])
def init_config(options_filename, ignore_missing=False):
global menu_data
data.init(options_filename, ignore_missing)
validate_configs()
menu_data = menu_parse()
set_initial_values()
def read_profile_file(profile_filename):
try:
with open(profile_filename, "rt") as f:
source = os.path.basename(profile_filename)
for line in f:
line = line.strip()
if line == "":
continue # Ignore blank lines
elif line.startswith("#"):
continue # ignore comment
else:
m = re.match(r"([^=]+)=(\"?)(.*)\2", line)
if m:
(key, quoted, value) = (m.group(1), m.group(2), m.group(3))
if quoted == '"':
value = re.sub(r"\\(.)", r"\1", value)
set_config_if_prompt(key, value, True, source=source)
else:
raise Exception("Couldn't parse ", line)
except IOError as e:
logger.warning("Failed to load non-existing \"%s\" profile" % profile_filename)
def read_config_file(config_filename):
try:
with open(config_filename, "rt") as f:
for line in f:
source = ""
is_user_set = False
line = line.strip()
if line == "":
continue # Ignore blank lines
elif line.startswith("#"):
# match config name together with optional '[by user]' and '(source)' parts
# eg. "# CONFIG_RELEASE is not set [by user] (source)"
m = re.match(r"# CONFIG_([^=]+) is not set( \[by user\](?: \((.+)\))?)?", line)
if m:
key = m.group(1)
is_user_set = True if m.group(2) else False
source = m.group(3) if m.group(3) else ""
value = 'n'
set_config_if_prompt(key, value, is_user_set, source)
# else ignore comment
else:
line = line.split("#", 1) # Strip comment from line eg. "xyz # comment"
comment = line[1] if len(line) > 1 else ""
if comment:
# match the source if present eg. " set by user (source)"
m = re.match(r" set by user(?: \((.+)\))?", comment)
if m:
is_user_set = True
source = m.group(1) if m.group(1) else ""
line = line[0].rstrip() # Remove extra space before comment
m = re.match(r"CONFIG_([^=]+)=(\"?)(.*)\2", line)
if m:
(key, quoted, value) = (m.group(1), m.group(2), m.group(3))
if quoted == '"':
value = re.sub(r"\\(.)", r"\1", value)
set_config_if_prompt(key, value, is_user_set, source)
else:
raise Exception("Couldn't parse ", line)
except IOError as e:
logger.warning("Failed to load non-existing \"%s\" configuration" % config_filename)
def read_config(options_filename, config_filename, ignore_missing):
init_config(options_filename, ignore_missing)
read_config_file(config_filename)
enforce_dependent_values("Inconsistent input, correcting: ", fix_bools=True)
def write_config(config_filename):
with utils.open_and_write_if_changed(config_filename) as f:
for (i_type, i_symbol) in data.iter_symbols_menuorder():
if i_type in ["config", "menuconfig"]:
c = data.get_config(i_symbol)
mark_set_by_user = " # set by user"
if not can_enable(c):
# Don't output this option because it cannot be enabled
continue
elif c['datatype'] == "bool":
if c['value'] is True:
f.write("CONFIG_%s=y" % i_symbol)
else:
f.write("# CONFIG_%s is not set" % i_symbol)
mark_set_by_user = " [by user]"
elif c['datatype'] == "string":
quoted_str = re.sub(r'(["\\])', r'\\\1', c['value'])
f.write('CONFIG_%s="%s"' % (i_symbol, quoted_str))
else:
f.write("CONFIG_%s=%s" % (i_symbol, c['value']))
# Save meta data as user explicit mark this
if c['is_user_set']:
if 'source' in c and c['source']:
mark_set_by_user += " (" + c['source'] + ")"
f.write("%s" % mark_set_by_user)
f.write("\n")
elif i_type == "menu":
f.write("\n#\n# %s\n#\n" %
data.get_menu_title(i_symbol))
logger.info("Written configuration to '%s'" % config_filename)
def write_depfile(depfile, target_name):
with utils.open_and_write_if_changed(depfile) as fp:
fp.write(target_name + ": \\\n ")
fp.write(" \\\n ".join(data.get_mconfig_srcs()) + "\n")
def get_user_set_options():
"""
Return all the options which have been set by the user
"""
user_set_options = []
for (i_type, i_symbol) in data.iter_symbols_menuorder():
if i_type in ["config", "menuconfig"]:
c = data.get_config(i_symbol)
if c['is_user_set']:
value = c['value']
if c['datatype'] == "bool":
value = "y" if c['value'] else "n"
source = c['source'] if "source" in c else ""
user_set_options.append((i_symbol, value, source))
return user_set_options
def get_options_selecting(selected):
"""Return the options which select `selected`"""
opt = data.get_config(selected)
return opt.get("selected_by", [])
def get_options_depending_on(dependent):
"""Return the options which depend on `dependent`"""
opt = data.get_config(dependent)
rdeps = opt.get("rdepends", [])
enabled_options = []
for rdep in rdeps:
rdep_val = data.get_config(rdep)
if rdep_val["datatype"] == "bool" and get_config_bool(rdep):
enabled_options.append(rdep)
return enabled_options
def get_warning(key):
"""
Returns the warning associated with the given config option.
Returns None if there isn't an associated warning.
"""
opt = data.get_config(key)
return opt.get('warning', None)
def set_initial_values():
"Set all configuration objects to their default value"
config_list = data.get_config_list()
# Set up initial values, and set up reverse dependencies
for k in config_list:
c = data.get_config(k)
c['selected_by'] = set()
c['is_user_set'] = False
c['is_new'] = True
if c['datatype'] == "bool":
c['value'] = False
elif c['datatype'] == "int":
c['value'] = 0
else:
c['value'] = ''
for i in expr.dependency_list(c.get("depends")):
data.get_config(i).setdefault('rdepends', []).append(k)
if "default_cond" in c:
for j in c['default_cond']:
for i in expr.dependency_list(j['cond']):
data.get_config(i).setdefault('rdefault', []).append(k)
value_deps = expr.dependency_list(j['expr'])
for d in value_deps:
data.get_config(d).setdefault('rdefault', []).append(k)
if "default" in c:
value_deps = expr.dependency_list(c['default'])
for d in value_deps:
data.get_config(d).setdefault('rdefault', []).append(k)
if "select_if" in c:
for j in c['select_if']:
for | |
# Copyright 2014 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Violin Memory 6000 Series All-Flash Array Common Driver
"""
import mock
from cinder import exception
from cinder import test
from cinder.tests import fake_vmem_xgtools_client as vxg
from cinder.volume import configuration as conf
from cinder.volume.drivers.violin import v6000_common
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
VOLUME = {
"name": "volume-" + VOLUME_ID,
"id": VOLUME_ID,
"display_name": "fake_volume",
"size": 2,
"host": "irrelevant",
"volume_type": None,
"volume_type_id": None,
}
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
SNAPSHOT = {
"name": "snapshot-" + SNAPSHOT_ID,
"id": SNAPSHOT_ID,
"volume_id": VOLUME_ID,
"volume_name": "volume-" + VOLUME_ID,
"volume_size": 2,
"display_name": "fake_snapshot",
"volume": VOLUME,
}
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
SRC_VOL = {
"name": "volume-" + SRC_VOL_ID,
"id": SRC_VOL_ID,
"display_name": "fake_src_vol",
"size": 2,
"host": "irrelevant",
"volume_type": None,
"volume_type_id": None,
}
INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
CONNECTOR = {
"initiator": INITIATOR_IQN,
"host": "irrelevant"
}
class V6000CommonTestCase(test.TestCase):
"""Test cases for VMEM V6000 driver common class."""
def setUp(self):
super(V6000CommonTestCase, self).setUp()
self.conf = self.setup_configuration()
self.driver = v6000_common.V6000Common(self.conf)
self.driver.container = 'myContainer'
self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022'
self.stats = {}
def tearDown(self):
super(V6000CommonTestCase, self).tearDown()
def setup_configuration(self):
config = mock.Mock(spec=conf.Configuration)
config.volume_backend_name = 'v6000_common'
config.san_ip = '1.1.1.1'
config.san_login = 'admin'
config.san_password = ''
config.san_thin_provision = False
config.san_is_local = False
config.gateway_mga = '2.2.2.2'
config.gateway_mgb = '3.3.3.3'
config.use_igroups = False
config.request_timeout = 300
config.container = 'myContainer'
return config
@mock.patch('vxg.open')
def setup_mock_client(self, _m_client, m_conf=None):
"""Create a fake backend communication factory.
The xg-tools creates a VShare connection object (for V6000
devices) and returns it for use on a call to vxg.open().
"""
# configure the vshare object mock with defaults
_m_vshare = mock.Mock(name='VShare',
version='1.1.1',
spec=vxg.mock_client_conf)
# if m_conf, clobber the defaults with it
if m_conf:
_m_vshare.configure_mock(**m_conf)
# set calls to vxg.open() to return this mocked vshare object
_m_client.return_value = _m_vshare
return _m_client
def setup_mock_vshare(self, m_conf=None):
"""Create a fake VShare communication object."""
_m_vshare = mock.Mock(name='VShare',
version='1.1.1',
spec=vxg.mock_client_conf)
if m_conf:
_m_vshare.configure_mock(**m_conf)
return _m_vshare
def test_check_for_setup_error(self):
"""No setup errors are found."""
bn1 = ("/vshare/state/local/container/%s/threshold/usedspace"
"/threshold_hard_val" % self.driver.container)
bn2 = ("/vshare/state/local/container/%s/threshold/provision"
"/threshold_hard_val" % self.driver.container)
bn_thresholds = {bn1: 0, bn2: 100}
conf = {
'basic.get_node_values.return_value': bn_thresholds,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._is_supported_vmos_version = mock.Mock(return_value=True)
result = self.driver.check_for_setup_error()
self.driver._is_supported_vmos_version.assert_called_with(
self.driver.vip.version)
self.driver.vip.basic.get_node_values.assert_called_with(
[bn1, bn2])
self.assertEqual(None, result)
def test_check_for_setup_error_no_container(self):
"""No container was configured."""
self.driver.vip = self.setup_mock_vshare()
self.driver.container = ''
self.assertRaises(exception.ViolinInvalidBackendConfig,
self.driver.check_for_setup_error)
def test_check_for_setup_error_invalid_usedspace_threshold(self):
"""The array's usedspace threshold was altered (not supported)."""
bn1 = ("/vshare/state/local/container/%s/threshold/usedspace"
"/threshold_hard_val" % self.driver.container)
bn2 = ("/vshare/state/local/container/%s/threshold/provision"
"/threshold_hard_val" % self.driver.container)
bn_thresholds = {bn1: 99, bn2: 100}
conf = {
'basic.get_node_values.return_value': bn_thresholds,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._is_supported_vmos_version = mock.Mock(return_value=True)
self.assertRaises(exception.ViolinInvalidBackendConfig,
self.driver.check_for_setup_error)
def test_check_for_setup_error_invalid_provisionedspace_threshold(self):
"""The array's provisioned threshold was altered (not supported)."""
bn1 = ("/vshare/state/local/container/%s/threshold/usedspace"
"/threshold_hard_val" % self.driver.container)
bn2 = ("/vshare/state/local/container/%s/threshold/provision"
"/threshold_hard_val" % self.driver.container)
bn_thresholds = {bn1: 0, bn2: 99}
conf = {
'basic.get_node_values.return_value': bn_thresholds,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._is_supported_vmos_version = mock.Mock(return_value=True)
self.assertRaises(exception.ViolinInvalidBackendConfig,
self.driver.check_for_setup_error)
def test_create_lun(self):
"""Lun is successfully created."""
response = {'code': 0, 'message': 'LUN create: success!'}
conf = {
'lun.create_lun.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
result = self.driver._create_lun(VOLUME)
self.driver._send_cmd.assert_called_with(
self.driver.vip.lun.create_lun, 'LUN create: success!',
self.driver.container, VOLUME['id'], VOLUME['size'], 1, "0",
"0", "w", 1, 512, False, False, None)
self.assertTrue(result is None)
def test_create_lun_lun_already_exists(self):
"""Array returns error that the lun already exists."""
response = {'code': 14005,
'message': 'LUN with name ... already exists'}
conf = {
'lun.create_lun.return_value': response,
}
self.driver.vip = self.setup_mock_client(m_conf=conf)
self.driver._send_cmd = mock.Mock(
side_effect=exception.ViolinBackendErrExists(
response['message']))
self.assertTrue(self.driver._create_lun(VOLUME) is None)
def test_create_lun_create_fails_with_exception(self):
"""Array returns a out of space error."""
response = {'code': 512, 'message': 'Not enough space available'}
failure = exception.ViolinBackendErr
conf = {
'lun.create_lun.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(
side_effect=failure(response['message']))
self.assertRaises(failure, self.driver._create_lun, VOLUME)
def test_delete_lun(self):
"""Lun is deleted successfully."""
response = {'code': 0, 'message': 'lun deletion started'}
success_msgs = ['lun deletion started', '']
conf = {
'lun.delete_lun.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
result = self.driver._delete_lun(VOLUME)
self.driver._send_cmd.assert_called_with(
self.driver.vip.lun.bulk_delete_luns,
success_msgs, self.driver.container, VOLUME['id'])
self.assertTrue(result is None)
def test_delete_lun_empty_response_message(self):
"""Array bug where delete action returns no message."""
response = {'code': 0, 'message': ''}
conf = {
'lun.delete_lun.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
self.assertTrue(self.driver._delete_lun(VOLUME) is None)
def test_delete_lun_lun_already_deleted(self):
"""Array fails to delete a lun that doesn't exist."""
response = {'code': 14005, 'message': 'LUN ... does not exist.'}
conf = {
'lun.delete_lun.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(
side_effect=exception.ViolinBackendErrNotFound(
response['message']))
self.assertTrue(self.driver._delete_lun(VOLUME) is None)
def test_delete_lun_delete_fails_with_exception(self):
"""Array returns a generic error."""
response = {'code': 14000, 'message': 'Generic error'}
failure = exception.ViolinBackendErr
conf = {
'lun.delete_lun.return_value': response
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(
side_effect=failure(response['message']))
self.assertRaises(failure, self.driver._delete_lun, VOLUME)
def test_extend_lun(self):
"""Volume extend completes successfully."""
new_volume_size = 10
response = {'code': 0, 'message': 'Success '}
conf = {
'lun.resize_lun.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
result = self.driver._extend_lun(VOLUME, new_volume_size)
self.driver._send_cmd.assert_called_with(
self.driver.vip.lun.resize_lun,
'Success', self.driver.container,
VOLUME['id'], new_volume_size)
self.assertTrue(result is None)
def test_extend_lun_new_size_is_too_small(self):
"""Volume extend fails when new size would shrink the volume."""
new_volume_size = 0
response = {'code': 14036, 'message': 'Failure'}
conf = {
'lun.resize_lun.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(
side_effect=exception.ViolinBackendErr(message='fail'))
self.assertRaises(exception.ViolinBackendErr,
self.driver._extend_lun, VOLUME, new_volume_size)
def test_create_lun_snapshot(self):
"""Snapshot creation completes successfully."""
response = {'code': 0, 'message': 'success'}
success_msg = 'Snapshot create: success!'
conf = {
'snapshot.create_lun_snapshot.return_value': response
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
result = self.driver._create_lun_snapshot(SNAPSHOT)
self.driver._send_cmd.assert_called_with(
self.driver.vip.snapshot.create_lun_snapshot, success_msg,
self.driver.container, SNAPSHOT['volume_id'], SNAPSHOT['id'])
self.assertTrue(result is None)
def test_delete_lun_snapshot(self):
"""Snapshot deletion completes successfully."""
response = {'code': 0, 'message': 'success'}
success_msg = 'Snapshot delete: success!'
conf = {
'snapshot.delete_lun_snapshot.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
result = self.driver._delete_lun_snapshot(SNAPSHOT)
self.driver._send_cmd.assert_called_with(
self.driver.vip.snapshot.delete_lun_snapshot, success_msg,
self.driver.container, SNAPSHOT['volume_id'], SNAPSHOT['id'])
self.assertTrue(result is None)
def test_get_lun_id(self):
bn = "/vshare/config/export/container/%s/lun/%s/target/**" \
% (self.conf.container, VOLUME['id'])
response = {("/vshare/config/export/container/%s/lun"
"/%s/target/hba-a1/initiator/openstack/lun_id"
% (self.conf.container, VOLUME['id'])): 1}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
result = self.driver._get_lun_id(VOLUME['id'])
self.driver.vip.basic.get_node_values.assert_called_with(bn)
self.assertEqual(1, result)
def test_get_lun_id_with_no_lun_config(self):
response = {}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.assertRaises(exception.ViolinBackendErrNotFound,
self.driver._get_lun_id, VOLUME['id'])
def test_get_snapshot_id(self):
bn = ("/vshare/config/export/snapshot/container/%s/lun/%s/snap/%s"
"/target/**") % (self.conf.container, VOLUME['id'],
SNAPSHOT['id'])
response = {("/vshare/config/export/snapshot/container/%s/lun"
"/%s/snap/%s/target/hba-a1/initiator/openstack/lun_id"
% (self.conf.container, VOLUME['id'],
SNAPSHOT['id'])): 1}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
result = self.driver._get_snapshot_id(VOLUME['id'], SNAPSHOT['id'])
self.driver.vip.basic.get_node_values.assert_called_with(bn)
self.assertEqual(1, result)
def test_get_snapshot_id_with_no_lun_config(self):
response = {}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.assertRaises(exception.ViolinBackendErrNotFound,
self.driver._get_snapshot_id,
SNAPSHOT['volume_id'], SNAPSHOT['id'])
def test_send_cmd(self):
"""Command callback completes successfully."""
success_msg = 'success'
request_args = ['arg1', 'arg2', 'arg3']
response = {'code': 0, 'message': 'success'}
request_func = mock.Mock(return_value=response)
self.driver._fatal_error_code = mock.Mock(return_value=None)
result = self.driver._send_cmd(request_func, success_msg, request_args)
self.driver._fatal_error_code.assert_called_with(response)
self.assertEqual(response, result)
def test_send_cmd_request_timed_out(self):
"""The callback retry timeout hits immediately."""
success_msg = 'success'
request_args = ['arg1', 'arg2', 'arg3']
self.conf.request_timeout = 0
request_func = mock.Mock()
self.assertRaises(exception.ViolinRequestRetryTimeout,
self.driver._send_cmd,
request_func, success_msg, request_args)
def test_send_cmd_response_has_no_message(self):
"""The callback returns no message on the first call."""
success_msg = 'success'
request_args = ['arg1', 'arg2', 'arg3']
response1 = {'code': 0, 'message': None}
response2 = {'code': 0, 'message': 'success'}
request_func = mock.Mock(side_effect=[response1, response2])
self.driver._fatal_error_code = mock.Mock(return_value=None)
self.assertEqual(response2, self.driver._send_cmd
(request_func, success_msg, request_args))
def test_send_cmd_response_has_fatal_error(self):
"""The callback response contains a fatal error code."""
success_msg = 'success'
request_args = ['arg1', 'arg2', 'arg3']
response = {'code': 14000, 'message': 'try again later.'}
failure = exception.ViolinBackendErr
request_func = mock.Mock(return_value=response)
self.driver._fatal_error_code = mock.Mock(
side_effect=failure(message='fail'))
self.assertRaises(failure, self.driver._send_cmd,
request_func, success_msg, request_args)
def test_get_igroup(self):
"""The igroup is verified and already exists."""
bn = '/vshare/config/igroup/%s' % CONNECTOR['host']
response = {bn: CONNECTOR['host']}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
result = self.driver._get_igroup(VOLUME, CONNECTOR)
self.driver.vip.basic.get_node_values.assert_called_with(bn)
self.assertEqual(CONNECTOR['host'], result)
def test_get_igroup_with_new_name(self):
"""The igroup is verified but must be created on the backend."""
response = {}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.assertEqual(CONNECTOR['host'],
self.driver._get_igroup(VOLUME, CONNECTOR))
def test_wait_for_export_config(self):
"""Queries to cluster nodes verify export config."""
bn = "/vshare/config/export/container/myContainer/lun/%s" \
% VOLUME['id']
response = {'/vshare/config/export/container/myContainer/lun/vol-01':
VOLUME['id']}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.mga = self.setup_mock_vshare(m_conf=conf)
self.driver.mgb = self.setup_mock_vshare(m_conf=conf)
result = self.driver._wait_for_export_config(VOLUME['id'], state=True)
self.driver.mga.basic.get_node_values.assert_called_with(bn)
self.driver.mgb.basic.get_node_values.assert_called_with(bn)
self.assertTrue(result)
def test_wait_for_export_config_with_no_config(self):
"""Queries to cluster nodes verify *no* export | |
variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
approximation for intermediate term z (see Eq 3.6)
"""
z = (ca2 + cs2) / (1.0 + cs2)
return z
def _ggm_prob_wait_whitt_gamma(m, rho, z):
"""
Equation 3.5 on p136 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
z : float
intermediate term approximated in Eq 3.8
Returns
-------
float
intermediate term gamma (see Eq 3.5)
"""
term1 = m - m * rho - 0.5
term2 = np.sqrt(m * rho * z)
gamma = term1 / term2
return gamma
def _ggm_prob_wait_whitt_pi_6(m, rho, z):
"""
Part of Equation 3.11 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
z : float
intermediate term approximated in Eq 3.8
Returns
-------
float
intermediate term pi_6 (see Eq 3.11)
"""
pi_6 = 1.0 - stats.norm.cdf((m - m * rho - 0.5) / np.sqrt(m * rho * z))
return pi_6
def _ggm_prob_wait_whitt_pi_5(m, rho, ca2, cs2):
"""
Part of Equation 3.11 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
intermediate term pi_5(see Eq 3.11)
"""
term1 = 2.0 * (1.0 - rho) * np.sqrt(m) / (1.0 + ca2)
term2 = (1.0 - rho) * np.sqrt(m)
term3 = erlangc(rho * m, m) * (1.0 - stats.norm.cdf(term1)) / (1.0 - stats.norm.cdf(term2))
pi_5 = min(1.0,term3)
return pi_5
def _ggm_prob_wait_whitt_pi_4(m, rho, ca2, cs2):
"""
Part of Equation 3.11 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
intermediate term pi_5(see Eq 3.11)
"""
term1 = (1.0 + cs2) * (1.0 - rho) * np.sqrt(m) / (ca2 + cs2)
term2 = (1.0 - rho) * np.sqrt(m)
term3 = erlangc(rho * m, m) * (1.0 - stats.norm.cdf(term1)) / (1.0 - stats.norm.cdf(term2))
pi_4 = min(1.0,term3)
return pi_4
def _ggm_prob_wait_whitt_pi_1(m, rho, ca2, cs2):
"""
Part of Equation 3.11 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
intermediate term pi_5(see Eq 3.11)
"""
pi_4 = _ggm_prob_wait_whitt_pi_4(m, rho, ca2, cs2)
pi_5 = _ggm_prob_wait_whitt_pi_5(m, rho, ca2, cs2)
pi_1 = (rho ** 2) * pi_4 + (1.0 - rho **2) * pi_5
return pi_1
def _ggm_prob_wait_whitt_pi_2(m, rho, ca2, cs2):
"""
Part of Equation 3.11 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
intermediate term pi_2(see Eq 3.11)
"""
pi_1 = _ggm_prob_wait_whitt_pi_1(m, rho, ca2, cs2)
z = _ggm_prob_wait_whitt_z(ca2, cs2)
pi_6 = _ggm_prob_wait_whitt_pi_6(m, rho, z)
pi_2 = ca2 * pi_1 + (1.0 - ca2) * pi_6
return pi_2
def _ggm_prob_wait_whitt_pi_3(m, rho, ca2, cs2):
"""
Part of Equation 3.11 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
intermediate term pi_5(see Eq 3.11)
"""
z = _ggm_prob_wait_whitt_z(ca2, cs2)
gamma = _ggm_prob_wait_whitt_gamma(m, rho, z)
pi_2 = _ggm_prob_wait_whitt_pi_2(m, rho, ca2, cs2)
pi_1 = _ggm_prob_wait_whitt_pi_1(m, rho, ca2, cs2)
term1 = 2.0 * (1.0 - ca2) * (gamma - 0.5)
term2 = 1.0 - term1
pi_3 = term1 * pi_2 + term2 * pi_1
return pi_3
def _ggm_prob_wait_whitt_pi(m, rho, ca2, cs2):
"""
Equation 3.10 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
intermediate term pi_5(see Eq 3.11)
"""
z = _ggm_prob_wait_whitt_z(ca2, cs2)
gamma = _ggm_prob_wait_whitt_gamma(m, rho, z)
if m <= 6 or gamma <= 0.5 or ca2 >= 1:
pi = _ggm_prob_wait_whitt_pi_1(m, rho, ca2, cs2)
elif m >= 7 and gamma >= 1.0 and ca2 < 1:
pi = _ggm_prob_wait_whitt_pi_2(m, rho, ca2, cs2)
else:
pi = _ggm_prob_wait_whitt_pi_3(m, rho, ca2, cs2)
return pi
def _ggm_prob_wait_whitt_whichpi(m, rho, ca2, cs2):
"""
Equation 3.10 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
Primarily used for debugging and validation of the approximation implementation.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
int
the pi case used in the approximation (1, 2, or 3)
"""
z = _ggm_prob_wait_whitt_z(ca2, cs2)
gamma = _ggm_prob_wait_whitt_gamma(m, rho, z)
if m <= 6 or gamma <= 0.5 or ca2 >= 1:
whichpi = 1
elif m >= 7 and gamma >= 1.0 and ca2 < 1:
whichpi = 2
else:
whichpi = 3
return whichpi
def _ggm_qcondwait_whitt_ds3(cs2):
"""
Return the approximate E(V^3)/(EV)^2 where V is a service time; based on either a hyperexponential
or Erlang distribution. Used in approximation of conditional wait time CDF (conditional on W>0).
Whitt refers to conditional wait as D in his paper:
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
This is Equation 4.3 on p146. Note that there is a typo in the original paper in which the first term
for Case 1 is shown as cubed, whereas it should be squared. This can be confirmed by seeing Eq 51 in
Whitt's paper on the QNA (Bell Systems Technical Journal, Nov 1983).
Parameters
----------
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
| |
import numpy as np
import h5py
import argparse
import sys
import re
ordered = []
overrides = {}
label_map = {}
description_map = {}
units_map = {}
tao_bandpass_name = {}
delimiters = "<",">"
regexPattern = '|'.join(map(re.escape, delimiters))
def parse_keywords(line):
keywords = {}
rest = line
pos1 = rest.find('=')
while pos1 != -1:
pos2 = rest.find('=',pos1+1)
if pos2 == -1:
pos3 = len(rest)
else:
pos3 = rest.rfind(' ',0,pos2)
keyword = rest[:pos1]
value = rest[pos1+1:pos3]
keywords[keyword] = value
rest = rest[pos3+1:]
print("\t_k="+keyword+"=["+value+"]")
pos1 = rest.find('=')
return keywords
def scrape_metadata():
with open("./scrapedata/overwrite_first_draft_dont_delete.txt", "r") as f:
data = f.read()
lines = re.split("\n", data)
print("lines="+str(len(lines)))
for index in range(len(lines)):
line = lines[index].strip()
if line == "":
continue
print("++"+line)
keywords = parse_keywords(line)
fieldname = keywords["fieldname"]
overrides[fieldname] = keywords
with open("./scrapedata/other_thirdhand_info.txt", "r") as f:
data = f.read()
lines = re.split("\n", data)
for index in range(len(lines)):
line = lines[index].strip()
tokens = line.split(" ")
if tokens[0].startswith("fieldname="):
fieldname = tokens[0][len("fieldname="):]
label = fieldname
if tokens[1].startswith("label="):
label = tokens[1][len("label="):]
if tokens[2].startswith("units="):
units = tokens[2][len("units="):]
if tokens[3].startswith("description="):
description = tokens[3][len("description="):]
for index in range(4,len(tokens)):
description += " "+tokens[index]
if not fieldname in description_map:
label_map[fieldname] = label
description_map[fieldname] = description
if units != "none":
units_map[fieldname] = units
print("fieldname from thirdhand=["+fieldname+"] description=["+description+"] label=["+label+"]")
else:
print("SKIP fieldname from thirdhand=["+fieldname+"] description=["+description+"]")
pass
with open("./scrapedata/output_files.html", "r") as f:
data = f.read()
lines = re.split(regexPattern, data)
for index in range(len(lines)):
line = lines[index]
if line == 'span class="pre"':
fieldname = lines[index+1]
label = lines[index+5][2:]
description = label
units = ""
pos1 = label.find("[")
pos2 = label.find("]")
if pos1 != -1:
units = label[pos1+1:pos2]
description = label[:pos1].strip()
if pos2 != len(label)-1:
description = label[:pos1].strip()+label[pos2+1:]
else:
description = label[:pos1].strip()
print("metafield["+fieldname+"]"+description+" units="+units)
description_map[fieldname] = description
units_map[fieldname] = units
with open("./scrapedata/module_user_routines_shark.f08", "r") as f:
data = f.read()
lines = re.split("\n", data)
for index in range(len(lines)):
line = lines[index].strip()
if "::" in line and "!" in line:
if line.startswith("real*4 ") or line.startswith("integer*4 ") or line.startswith("integer*8 "):
pos1 = line.find("::")
pos2 = line.find("!")
fieldname = line[pos1+2:pos2].strip()
label = line[pos2+1:].strip()
pos1 = label.find("[")
pos2 = label.find("]")
if pos1 != -1:
units = label[pos1+1:pos2]
description = label[pos2+1:].strip()
else:
units = ""
description = label
if not fieldname in description_map:
description_map[fieldname] = description
units_map[fieldname] = units
print("fieldname from f08=["+fieldname+"] description=["+description+"] units=["+units+"]")
else:
print("SKIP fieldname from f08=["+fieldname+"] description=["+description+"] units=["+units+"]")
pass
with open("./scrapedata/module_user_routines_shark.f08", "r") as f:
data = f.read()
lines = re.split("\n", data)
for index in range(len(lines)):
line = lines[index].strip()
if line.startswith("call hdf5_write_data(name//"):
if line.endswith("&"):
line = line[0:-1]+lines[index+1].strip()[1:]
line = line[28:]
pos1 = line.find("'")
fieldname = line[:pos1]
label = line[pos1+1:]
pos1 = label.find("'")
label = label[pos1+1:]
pos2 = label.find("'")
pos1 = label.find("[")
pos2 = label.find("]")
if pos1 != -1:
units = label[pos1+1:pos2]
description = label[pos2+1:-2].strip()
else:
units = ""
description = label
if not fieldname in description_map:
description_map[fieldname] = description
units_map[fieldname] = units
print("LAST CHANCE fieldname from f08=["+fieldname+"] description=["+description+"] units=["+units+"]")
else:
print("SKIP fieldname from f08=["+fieldname+"] description=["+description+"] units=["+units+"]")
pass
print("LINE["+line+"] fieldname["+fieldname+"] description["+description+"]")
def get_scraped_label(name):
if name in overrides:
if "label" in overrides[name]:
return overrides[name]["label"]
if name.startswith("dust_"):
return name[5:]+" (With Dust)"
if name.startswith("nodust_"):
return name[7:]
if name in label_map:
print("label_map["+name+"]="+label_map[name])
return label_map[name]
return name
def get_scraped_description(name):
if name in description_map:
return description_map[name]
if name.startswith("dust_"):
return name[5:]+" (With Dust)"
if name.startswith("nodust_"):
return name[7:]
return name
def get_scraped_units(name):
if name in units_map:
return units_map[name]
return ""
def get_scraped_group(group):
# for array fieldnames belonging to groups
if group in tao_bandpass_name:
return tao_bandpass_name[group]
return group
def get_scraped_groupname(name, default):
if name in overrides:
if "group" in overrides[name]:
return overrides[name]["group"]
return default
def get_scraped_order(name):
for i in range(len(ordered)):
if ordered[i] == name:
return i+1
def print_attrs(name, obj):
maxrows = 10
firstcolumn=-1
lastcolumn=-1
print("####NAME["+name+"]="+str(obj.attrs.keys()))
if name == 'galaxies' or name == 'Data' or str(name).endswith('Galaxies'):
groups = {}
groups["Band4_ALMA"] = 1
groups["Band5_ALMA"] = 1
groups["Band6_ALMA"] = 1
groups["Band7_ALMA"] = 1
groups["Band8_ALMA"] = 1
groups["Band9_ALMA"] = 1
groups["S450_JCMT"] = 1
groups["S850_JCMT"] = 1
groups["S250_Herschel"] = 1
groups["S350_Herschel"] = 1
groups["S450_Herschel"] = 1
groups["S500_Herschel"] = 1
groups["FUV_GALEX"] = 1
groups["NUV_GALEX"] = 1
groups["u_SDSS"] = 1
groups["g_SDSS"] = 1
groups["r_SDSS"] = 1
groups["i_SDSS"] = 1
groups["z_SDSS"] = 1
groups["Y_VISTA"] = 1
groups["J_VISTA"] = 1
groups["H_VISTA"] = 1
groups["K_VISTA"] = 1
groups["W1_WISE"] = 1
groups["W2_WISE"] = 1
groups["W3_WISE"] = 1
groups["W4_WISE"] = 1
groups["I1_Spitzer"] = 1
groups["I2_Spitzer"] = 1
groups["I3_Spitzer"] = 1
groups["I4_Spitzer"] = 1
groups["P70_Herschel"] = 1
groups["P100_Herschel"] = 1
groups["P160_Herschel"] = 1
tao_bandpass_name["Band4_ALMA"] = "ALMA Band4"
tao_bandpass_name["Band5_ALMA"] = "ALMA Band5"
tao_bandpass_name["Band6_ALMA"] = "ALMA Band6"
tao_bandpass_name["Band7_ALMA"] = "ALMA Band7"
tao_bandpass_name["Band8_ALMA"] = "ALMA Band8"
tao_bandpass_name["Band9_ALMA"] = "ALMA Band9"
tao_bandpass_name["S450_JCMT"] = "JCMT S450"
tao_bandpass_name["S850_JCMT"] = "JCMT S850"
tao_bandpass_name["S250_Herschel"] = "Herschel/SPIRE 250"
tao_bandpass_name["S350_Herschel"] = "Herschel/SPIRE 350"
tao_bandpass_name["S450_Herschel"] = "Herschel/SPIRE 450"
tao_bandpass_name["S500_Herschel"] = "Herschel/SPIRE 500"
tao_bandpass_name["FUV_GALEX"] = "GALEX FUV"
tao_bandpass_name["NUV_GALEX"] = "GALEX NUV"
tao_bandpass_name["u_SDSS"] = "SDSS u"
tao_bandpass_name["g_SDSS"] = "SDSS g"
tao_bandpass_name["r_SDSS"] = "SDSS r"
tao_bandpass_name["i_SDSS"] = "SDSS i"
tao_bandpass_name["z_SDSS"] = "SDSS z"
tao_bandpass_name["Y_VISTA"] = "VISTA Y"
tao_bandpass_name["J_VISTA"] = "VISTA J"
tao_bandpass_name["H_VISTA"] = "VISTA H"
tao_bandpass_name["K_VISTA"] = "VISTA K"
tao_bandpass_name["W1_WISE"] = "WISE1"
tao_bandpass_name["W2_WISE"] = "WISE2"
tao_bandpass_name["W3_WISE"] = "WISE3"
tao_bandpass_name["W4_WISE"] = "WISE4"
tao_bandpass_name["I1_Spitzer"] = "Spitzer IRAC1"
tao_bandpass_name["I2_Spitzer"] = "Spitzer IRAC2"
tao_bandpass_name["I3_Spitzer"] = "Spitzer IRAC3"
tao_bandpass_name["I4_Spitzer"] = "Spitzer IRAC4"
tao_bandpass_name["P70_Herschel"] = "Herschel/PACS 70"
tao_bandpass_name["P100_Herschel"] = "Herschel/PACS 100"
tao_bandpass_name["P160_Herschel"] = "Herschel/PACS 160"
header = ""
# Initialise ordered
for i in range(0,len(obj.dtype)):
name = str(obj.dtype.descr[i][0])
ordered.append(name)
# process OrderBeforeFieldname
for i in range(0,len(obj.dtype)):
name = str(obj.dtype.descr[i][0])
if "OrderBeforeFieldname" in overrides[name]:
a_index = get_scraped_order(name)-1
b_index = get_scraped_order(overrides[name]["OrderBeforeFieldname"])-1
print("Field[{}]={} Before[{}]={}".format(a_index,ordered[a_index],b_index,ordered[b_index]))
if a_index < b_index:
for ii in range(a_index+1, b_index):
ordered[ii-1] = ordered[ii]
ordered[b_index-1] = name
else:
for ii in range(a_index-1, b_index-1, -1):
print("move [{}]={} to [{}]={}".format(ii,ordered[ii],ii+1,ordered[ii+1]))
ordered[ii+1] = ordered[ii]
ordered[b_index] = name
print("I think the following xml is for the hdf5 sidecar xml "+args.magnitude)
with open("as.xml", "w") as dataxml:
dataxml.write(" <sageinput>\n")
for i in range(0,len(obj.dtype)):
type = str(obj.dtype.descr[i][1])
if type == "<f8":
type = "float"
if type == "<f4":
type = "float"
if type == "<i4":
type = "int"
if type == "<i8":
type = "long long"
order = str(i+1)
group = "Galaxy/Halo Properties"
name = str(obj.dtype.descr[i][0])
order = str(get_scraped_order(name))
group = get_scraped_groupname(name, group)
label = get_scraped_label(name)
description = get_scraped_description(name)
units = get_scraped_units(name)
for agroup in groups.keys():
if agroup in name:
if name.startswith("dust_"):
group = get_scraped_group(agroup)+" (With Dust)"
if name.startswith("nodust_"):
group = get_scraped_group(agroup)
dataxml.write(" <Field Type=\""+type+"\"\n")
dataxml.write(" label=\""+label+"\"\n")
dataxml.write(" description=\""+description+"\"\n")
dataxml.write(" order=\""+order+"\"\n")
dataxml.write(" units=\""+units+"\"\n")
dataxml.write(" group=\""+group+"\">"+name.lower()+"</Field>\n")
dataxml.write(" </sageinput>\n")
print("I think the following xml is for the import process on TAOUI")
with open("as.ui.txt", "w") as uitxt:
for i in range(0,len(obj.dtype)):
#print("i="+str(i))
type = str(obj.dtype.descr[i][1])
if type == "<f8":
type = "float"
if type == "<f4":
type = "float"
if type == "<i4":
type = "int"
if type == "<i8":
type = "long long"
order = str(i+1)
group = "Galaxy/Halo Properties"
name = str(obj.dtype.descr[i][0])
order = str(get_scraped_order(name))
group = get_scraped_groupname(name, group)
label = get_scraped_label(name)
units = get_scraped_units(name)
if units == "":
units = "none"
description = get_scraped_description(name)
if "OrderBeforeFieldname" in overrides[name]:
uitxt.write("fieldname="+name+" label="+label+" units="+units+" description="+description+" group="+group+" OrderBeforeFieldname="+overrides[name]["OrderBeforeFieldname"]+"\n")
else:
uitxt.write("fieldname="+name+" label="+label+" units="+units+" description="+description+" group="+group+"\n")
with open("as.ui.xml", "w") as uixml:
uixml.write("<settings>\n")
uixml.write(" <sageinput>\n")
j = 0
very_first_Magnitude_field = True
for i in range(0,len(obj.dtype)):
#print("i="+str(i))
type = str(obj.dtype.descr[i][1])
if type == "<f8":
type = "float"
if type == "<f4":
type = "float"
if type == "<i4":
type = "int"
if type == "<i8":
type = "long long"
order = str(j+1)
units = ""
group = "Galaxy/Halo Properties"
name = str(obj.dtype.descr[i][0])
order = str(get_scraped_order(name))
group = get_scraped_groupname(name, group)
label = get_scraped_label(name)
description = get_scraped_description(name)
units = get_scraped_units(name)
field = name
isfield = True
is_dust_doublet = False
for agroup in groups.keys():
if agroup in name:
if name.startswith("dust_"):
if very_first_Magnitude_field:
very_first_Magnitude_field = False
j = get_scraped_order(name) - 1
# Note we are assuming from this field forward on all rest of the fields will be "Magnitude" fields
is_dust_doublet = True
group = "Galaxy Magnitudes"
if groups[agroup] == 2:
isfield = False
groups[agroup] = 2
field = get_scraped_group(agroup)+" (With Dust)"
label = field
description = field
break
if name.startswith("nodust_"):
is_dust_doublet = True
group = "Galaxy Magnitudes"
if groups[agroup] == 3:
isfield = False
isfield = False # because we have | |
#!/usr/bin/env python3
# coding: utf-8
import sys
import re
import logging
import json
import asyncio
from http import HTTPStatus
from copy import deepcopy
from os import path
from collections import namedtuple
from itertools import takewhile
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler, HTTPError
from tornado.options import define
from tornado.httpserver import HTTPServer
from tornado.httputil import HTTPHeaders
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from tornado.log import enable_pretty_logging
logger = logging.getLogger('zaglushka')
class ResponseStub(namedtuple('_ResponseStub', ['code', 'headers_func', 'body_coroutine', 'delay'])):
def __new__(cls, **kwargs):
data = {k: None for k in cls._fields}
data.update(kwargs)
return super(ResponseStub, cls).__new__(cls, **data)
Rule = namedtuple('Rule', ['matcher', 'responder'])
def _get_stub_file_path(base_stubs_path, stub_path):
return stub_path if stub_path.startswith('/') else path.join(base_stubs_path, stub_path)
class Config(object):
@classmethod
def from_console_argument(cls, config_full_path):
# TODO: tests
if not path.exists(config_full_path):
logger.error('Config not found at {}'.format(config_full_path))
raise Exception('config not found')
with open(config_full_path) as config_fp:
cleaned = json_minify(config_fp.read())
try:
raw_config = json.loads(cleaned, encoding='utf-8')
except ValueError as e:
logger.error('Unable to parse config: {}'.format(e))
raise
return cls(raw_config, config_full_path)
def __init__(self, raw_config, config_full_path):
self.watched_files = {config_full_path}
config_dirname = path.dirname(config_full_path)
if 'stubs_base_path' in raw_config:
stubs_base_path = _get_stub_file_path(config_dirname, raw_config['stubs_base_path'])
else:
stubs_base_path = config_dirname
stubs_base_path = path.abspath(stubs_base_path)
self.raw = raw_config
rules = []
for num, url_spec in enumerate(self.raw.get('urls', [])):
matcher = choose_matcher(url_spec)
if matcher is not None:
responder, responder_paths = choose_responder(url_spec, stubs_base_path)
self.watched_files.update(responder_paths)
rules.append(Rule(matcher, responder))
else:
logger.warning('Unable to build matcher from url spec #{}, skipping'.format(num))
rules.append(Rule(always_match, default_response()))
self.rules = rules
self.stubs_base_path = stubs_base_path
def choose_matcher(spec):
method = spec['method'].upper() if 'method' in spec else None
if 'query' in spec:
query_args_matcher = build_simple_query_args_matcher(spec['query'])
else:
query_args_matcher = always_match
if 'path' in spec:
return build_simple_matcher(spec['path'], method, query_args_matcher)
elif 'path_regexp' in spec:
return build_regexp_matcher(spec['path_regexp'], method, query_args_matcher, warn_func=logger.warning)
else:
return None
def _to_str(value, fallback=False):
if isinstance(value, str):
return value
elif isinstance(value, bytes):
if fallback:
try:
return value.decode('utf-8')
except UnicodeDecodeError:
return chr(0xFFFD)
else:
return value.decode('utf-8')
else:
return str(value)
def _is_args_matched(real_args, required, other_allowed=True):
def _spec2list(dict_):
res = []
for arg, val in dict_.items():
if isinstance(val, (list, set, tuple)):
res.extend((_to_str(arg), _to_str(v)) for v in val)
else:
res.append((_to_str(arg), _to_str(val)))
return res
required = _spec2list(required)
real = _spec2list(real_args)
matched = []
if not other_allowed and len(real) > 0 and len(required) == 0:
return False
for pair in real:
try:
match_index = required.index(pair)
except ValueError:
match_index = None
if match_index is None and not other_allowed:
return False
elif match_index is not None:
required.pop(match_index)
matched.append(pair)
return len(required) == 0
def build_simple_query_args_matcher(args_spec):
def _simple_query_args_matcher(request):
return _is_args_matched(
request.query_arguments,
args_spec.get('required', {}),
args_spec.get('other_allowed', True)
)
return _simple_query_args_matcher
def build_simple_matcher(rel_path, method, query_args_matcher):
return lambda request: ((method is None or request.method == method) and request.path == rel_path and
query_args_matcher(request))
def build_regexp_matcher(pattern, method, query_args_matcher, warn_func=None):
try:
pattern_compiled = re.compile(pattern)
except re.error as e:
if warn_func is not None:
warn_func('Unable to compile regexp "{}": {}'.format(pattern, e))
return None
return lambda request: ((method is None or request.method == method) and
re.search(pattern_compiled, request.path) is not None and
query_args_matcher(request))
def always_match(*_, **__):
return True
def choose_responder(spec, base_stubs_path):
code = int(spec.get('code', HTTPStatus.OK))
delay = float(spec['delay']) if 'delay' in spec else None
stub_kwargs = {'code': code, 'delay': delay}
headers_func, paths = choose_headers_func(spec, base_stubs_path)
responder = None
if 'response' in spec:
body = spec['response']
if not isinstance(body, (str, bytes)):
body = json.dumps(body, ensure_ascii=False)
responder = static_response(body, headers_func, **stub_kwargs)
elif 'response_file' in spec:
full_path = path.normpath(path.join(base_stubs_path, spec['response_file']))
paths.add(full_path)
responder = filebased_response(full_path, headers_func, warn_func=logger.warning, **stub_kwargs)
elif 'response_proxy' in spec and 'path' in spec:
responder = proxied_response(
url=spec['path'], use_regexp=False, proxy_url=spec['response_proxy'],
headers_func=headers_func, warn_func=logger.warn, log_func=logger.debug, **stub_kwargs
)
elif 'response_proxy' in spec and 'path_regexp' in spec:
responder = proxied_response(
url=spec['path_regexp'], use_regexp=True, proxy_url=spec['response_proxy'],
headers_func=headers_func, warn_func=logger.warning, log_func=logger.debug, **stub_kwargs
)
if responder is None:
responder = static_response(b'', headers_func, **stub_kwargs)
return responder, paths
def static_response(body, headers_func, **stub_kwargs):
async def _body_coroutine(handler):
handler.write(body)
return ResponseStub(headers_func=headers_func,
body_coroutine=_body_coroutine,
**stub_kwargs)
def default_response():
return static_response(
body='',
headers_func=build_static_headers_func({
'X-Zaglushka-Default-Response': 'true',
}),
code=HTTPStatus.NOT_FOUND
)
def filebased_response(full_path, headers_func, warn_func=None, **stub_kwargs):
async def _body_coroutine(handler):
# detect a file at every request, so you can add it where ever you want
if not path.isfile(full_path):
if warn_func is not None:
warn_func('Unable to find stubs file "{f}" for {m} {url}'
.format(f=full_path, m=handler.request.method, url=handler.request.uri))
handler.set_header('X-Zaglushka-Failed-Response', 'true')
return
await send_file(full_path, handler)
return ResponseStub(headers_func=headers_func,
body_coroutine=_body_coroutine,
**stub_kwargs)
async def _fetch_request(http_client, request):
return http_client.fetch(request) # for easier testing
def proxied_response(url, use_regexp, proxy_url, headers_func, warn_func=None, log_func=None, **stub_kwargs):
url_regexp = None
if use_regexp:
try:
url_regexp = re.compile(url)
except re.error as e:
if warn_func is not None:
warn_func('Unable to compile url pattern "{}": {}'.format(url, e))
return default_response()
async def _body_coroutine(handler):
request_url = proxy_url
if url_regexp:
match = url_regexp.search(handler.request.uri)
if match is None:
handler.set_header('X-Zaglushka-Failed-Response', 'true')
return
for i, group in enumerate(match.groups(), start=1):
request_url = request_url.replace('${}'.format(i), group)
http_client = handler.application.settings['http_client']
method = handler.request.method
if method in ('HEAD', 'BODY') and handler.request.body == '': # :(
body = None
else:
body = handler.request.body
request = HTTPRequest(request_url, method=method, headers=handler.request.headers,
body=body, follow_redirects=False, allow_nonstandard_methods=True)
if log_func:
log_func('Fetch request {r.method} {r.url}'.format(r=request))
response = await _fetch_request(http_client, request)
if log_func:
log_func('Request {r.method} {r.url} complete with code={rc}'.format(r=request, rc=response.code))
if response.code == 599: # special tornado status code
handler.set_header('X-Zaglushka-Failed-Response', 'true')
if warn_func is not None:
warn_func('Unable to proxy response to "{u}": {e}'.format(u=request_url, e=response.error))
return
headers_before = deepcopy(handler.get_headers())
handler.write(response.body)
for header, value in response.headers.items():
handler.add_header(header, value)
# replace with headers from a config if any
for header, _ in headers_before.get_all():
handler.clear_header(header)
for value in headers_before.get_list(header):
handler.add_header(header, value)
handler.set_status(response.code)
return
return ResponseStub(headers_func=headers_func,
body_coroutine=_body_coroutine,
**stub_kwargs)
def choose_headers_func(spec, base_stubs_path):
paths = set()
if 'headers' in spec:
return build_static_headers_func(spec['headers']), paths
elif 'headers_file' in spec:
stub_path = _get_stub_file_path(base_stubs_path, spec['headers_file'])
paths.add(stub_path)
return build_filebased_headers_func(stub_path, warn_func=logger.warning), paths
else:
return build_static_headers_func({}), paths
def build_static_headers_func(headers):
def _static_headers_func(handler):
for header, values in headers.items():
if not isinstance(values, (list, tuple, set, frozenset)):
values = [values]
for value in values:
handler.add_header(header, value)
return _static_headers_func
def build_filebased_headers_func(full_path, warn_func=None):
def _filebased_headers_func(handler):
if not path.isfile(full_path):
if warn_func is not None:
warn_func('Unable to find headers stubs file "{f}" for {m} {url}'
.format(f=full_path, m=handler.request.method, url=handler.request.uri))
handler.add_header('X-Zaglushka-Failed-Headers', 'true')
return
for header, value in HTTPHeaders.parse(open(full_path, 'r').read()).get_all():
handler.add_header(header, value)
return _filebased_headers_func
def json_minify(data, strip_space=True):
"""
json_minify v0.1 (C) <NAME>
MIT License
Based on JSON.minify.js:
https://github.com/getify/JSON.minify
"""
tokenizer = re.compile(r'"|(/\*)|(\*/)|(//)|\n|\r')
in_string = False
in_multiline_comment = False
in_singleline_comment = False
new_str = []
from_index = 0 # from is a keyword in Python
for match in re.finditer(tokenizer, data):
if not in_multiline_comment and not in_singleline_comment:
tmp2 = data[from_index:match.start()]
if not in_string and strip_space:
tmp2 = re.sub('[ \t\n\r]*', '', tmp2) # replace only white space defined in standard
new_str.append(tmp2)
from_index = match.end()
if match.group() == '"' and not in_multiline_comment and not in_singleline_comment:
escaped = re.search('(\\\\)*$', data[:match.start()])
if not in_string or escaped is None or len(escaped.group()) % 2 == 0:
# start of string with ", or unescaped " character found to end string
in_string = not in_string
from_index -= 1 # include " character in next catch
elif match.group() == '/*' and not in_string and not in_multiline_comment and not in_singleline_comment:
in_multiline_comment = True
elif match.group() == '*/' and not in_string and in_multiline_comment and not in_singleline_comment:
in_multiline_comment = False
elif match.group() == '//' and not in_string and not in_multiline_comment and not in_singleline_comment:
in_singleline_comment = True
elif ((match.group() == '\n' or match.group() == '\r') and not in_string and not in_multiline_comment and
in_singleline_comment):
in_singleline_comment = False
elif (not in_multiline_comment and not in_singleline_comment and
(match.group() not in ['\n', '\r', ' ', '\t'] or not strip_space)):
new_str.append(match.group())
new_str.append(data[from_index:])
return ''.join(new_str)
async def send_file(full_path, handler: RequestHandler, chunk_size=1024 * 8):
fd = open(full_path, 'rb') # TODO: check exceptions
while True:
try:
data = fd.read(chunk_size)
except (IOError, OSError):
data = None
if data is not None and len(data) > 0:
handler.write(data)
await handler.flush()
else:
fd.close()
break
class StubHandler(RequestHandler):
async def get(self):
await self.send_stub()
async def post(self):
await self.send_stub()
async def put(self):
await self.send_stub()
async def delete(self):
await self.send_stub()
async def patch(self):
await self.send_stub()
async def head(self):
await self.send_stub()
async def options(self):
await self.send_stub()
def get_headers(self):
return self._headers
async def _make_response_with_rule(self, responder: ResponseStub):
if responder.delay is not None:
logger.debug('Delay response for {m} {u} by {sec:.3f} sec'.format(m=self.request.method,
u=self.request.uri,
sec=responder.delay))
await asyncio.sleep(responder.delay)
self.set_status(responder.code)
responder.headers_func(self)
await responder.body_coroutine(self)
async def send_stub(self):
self.clear_header('Server')
self.clear_header('Content-Type')
self.clear_header('Date')
config = self.application.settings['zaglushka_config']
matched = False
for rule in config.rules:
if rule.matcher(self.request):
await self._make_response_with_rule(rule.responder)
matched = True
break
if not matched:
raise HTTPError(HTTPStatus.INTERNAL_SERVER_ERROR)
def compute_etag(self):
return None
def build_app(zaglushka_config, debug=False):
http_client = AsyncHTTPClient()
return Application(
handlers=[(r'.*', StubHandler)],
debug=debug,
zaglushka_config=zaglushka_config,
http_client=http_client
)
def wait_when_config_fixed(config_full_path, exception=None):
import tornado.autoreload
logger.error('Your config in broken. | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Operations on genomic intervals stored in GTF file
note:
- all the exons of a gene should be on the same strand. Genes with exons trans-
spliced from the other strand, like mod(mdg4) in D. melanogaster, should be
excluded (before or after).
- stop codon is not part of the CDS, at least for Ensembl GTF
-----------------------------------------------------------------
@author: zh (mt1022)
@date: Fri Aug 27 2021
"""
import sys
import argparse
import re
import gzip
import fileinput
import csv
from dataclasses import dataclass
from typing import List
###############################################################################
# class definiton #############################################################
###############################################################################
@dataclass
class Region:
start: int
end: int
def __post_init__(self):
if self.start > self.end:
raise ValueError('Invalid region boundary!')
def __len__(self):
return self.end - self.start + 1
@dataclass
class Gene:
gene_id: str
chrom: str = ''
strand: str = '+'
class Transcript:
def __init__(self, tx_id: str, gene: Gene):
self.tx_id: str = tx_id
self.gene: Gene = gene
self.exons: List[Region] = []
self.cdss: List[Region] = []
self.stop_codon: List[Region] = []
def add_region(self, region, region_type):
if region_type == 'exon':
self.exons.append(region)
elif region_type == 'CDS':
self.cdss.append(region)
elif region_type == 'stop_codon':
self.stop_codon.append(region)
return
def update(self):
"""
Update the order of regions so that operations related to intervals can
work correctly.
"""
self.exons = sorted(self.exons, key=lambda r: r.start)
self.cdss = sorted(self.cdss, key=lambda r: r.start)
self.stop_codon = sorted(self.stop_codon, key=lambda r: r.start)
return
def __len__(self):
return sum(len(i) for i in self.exons)
@property
def n_exons(self):
return len(self.exons)
@property
def cds_len(self):
return sum(len(i) for i in self.cdss)
@property
def tx_start(self):
if self.gene.strand == '+':
return self.exons[0].start
else:
return self.exons[-1].end
@property
def tx_end(self):
if self.gene.strand == '+':
return self.exons[-1].end
else:
return self.exons[0].start
@property
def cds_start(self):
if len(self.cdss) == 0:
return None
elif self.gene.strand == '+':
return self.cdss[0].start
else:
return self.cdss[-1].end
@property
def cds_end(self):
if len(self.cdss) == 0:
return None
elif self.gene.strand == '+':
return self.cdss[-1].end
else:
return self.cdss[0].start
@property
def stop_codon_start(self):
if len(self.stop_codon) == 0:
return None
elif self.gene.strand == '+':
return self.stop_codon[-1].end
else:
return self.stop_codon[0].start
@property
def stop_codon_end(self):
if len(self.stop_codon) == 0:
return None
elif self.gene.strand == '+':
return self.stop_codon[-1].end
else:
return self.stop_codon[0].start
@property
def introns(self):
if len(self.exons) == 1:
return []
else:
return [Region(self.exons[i].end + 1, self.exons[i+1].start - 1)
for i in range(self.n_exons - 1)]
def tpos_to_gpos(self, pos: int):
"""
transform transcript coordinate to genomic coordinate
param pos: int, position on transcript, 1-based.
"""
if pos < 1:
return 0
elif pos > len(self):
return -1
else:
if self.gene.strand == '-':
pos = len(self) - pos + 1
for i in range(self.n_exons):
if len(self.exons[i]) < pos:
pos -= len(self.exons[i])
else:
return self.exons[i].start + pos - 1
def gpos_to_tpos(self, pos):
"""
transform genomic coordinate to transcript coordinate
param pos: int, position on genome, 1-based.
"""
if pos < self.exons[0].start:
tpos = self.exons[0].start - pos
ptype = 'upstream' if self.gene.strand == '+' else 'downstream'
return tpos, ptype
elif pos > self.exons[-1].end:
tpos = pos - self.exons[-1].end
ptype = 'downstream' if self.gene.strand == '+' else 'upstream'
return tpos, ptype
else:
tpos = 0
for i in range(self.n_exons):
if self.exons[i].start <= pos:
if self.exons[i].end <= pos:
tpos += len(self.exons[i])
else:
tpos += pos - self.exons[i].start + 1
else:
if self.exons[i-1].end < pos:
if self.gene.strand == '+':
ptype = 'intron_' + str(i)
tpos = pos - self.exons[i - 1].end
else:
ptype = 'intron_' + str(len(self.exons) - i)
tpos = self.exons[i].start - pos
return tpos, ptype
break
ptype = 'exon'
tpos = tpos if self.gene.strand == '+' else len(self) - tpos + 1
return tpos, ptype
def cpos_to_gpos(self, pos):
"""
transform CDS coordinate to genomic coordinate
param pos: int position on CDS, 1-based.
"""
tpos = self.gpos_to_tpos(self.cds_start)[0] + pos - 1
gpos = self.tpos_to_gpos(tpos)
return gpos
def gpos_to_cpos(self, pos):
"""
transform genomic coordinate to CDS coordinate
param: int, position on genome, 1-based.
"""
tpos = self.gpos_to_tpos(pos)[0]
cpos = tpos - self.gpos_to_tpos(self.cds_start)[0] + 1
return cpos
def tiv_to_giv(self, pos1, pos2):
"""
given transcript region boundary:
return one or more(for features spanning more than one exon)
exonic region interval(s) in list of string interval
param pos1: int, left transcript coordinate, 1-based.
param pos2: int, right transcript coordinate, 1-based.
"""
cod1 = self.tpos_to_gpos(pos1)
cod2 = self.tpos_to_gpos(pos2)
start = min(cod1, cod2)
end = max(cod1, cod2)
givs = []
for i in range(self.n_exons):
if self.exons[i].end < start:
continue
if self.exons[i].start > end:
break
if self.exons[i].start <= start:
if self.exons[i].end <= end:
givs.append(Region(start, self.exons[i].end))
else:
givs.append(Region(start, end))
else:
if self.exons[i].end <= end:
givs.append(Region(self.exons[i].start, self.exons[i].end))
else:
givs.append(Region(self.exons[i].start, end))
return givs
@property
def five_prime_utrs(self):
if len(self.cdss) == 0 or self.cds_start == self.tx_start:
return []
else:
return self.tiv_to_giv(1, self.gpos_to_tpos(self.cds_start)[0] - 1)
@property
def three_prime_utrs(self):
if len(self.cdss) == 0 or self.stop_codon_end == self.tx_end or self.cds_end == self.tx_end:
return []
else:
if len(self.stop_codon) > 0:
return self.tiv_to_giv(self.gpos_to_tpos(self.stop_codon_end)[0] + 1, len(self))
else:
return self.tiv_to_giv(self.gpos_to_tpos(self.cds_end)[0] + 1, len(self))
def format_region_bed12(self, rs, flank=0):
"""
format a spliced region in a transcript into bed12 format
param rs: a list of items of class Region
"""
rs = sorted(rs, key=lambda r: r.start)
if flank > 0:
rs[0].start -= flank
rs[-1].end += flank
starts = [r.start - 1 for r in rs]
ends = [r.end for r in rs]
blockstart = [str(x - starts[0]) for x in starts]
blocksize = [str(len(r)) for r in rs]
s = [self.gene.chrom, starts[0], ends[-1], self.tx_id, self.gene.gene_id,
self.gene.strand, '0', '0', '0', len(starts)]
s = s + [','.join(blocksize) + ',', ','.join(blockstart) + ',']
return s
###############################################################################
# functions ###################################################################
###############################################################################
def parse_gtf(gtf_file):
"""
read GTF file
param: path to GTF file, gzipped format allowed.
"""
gtf = {}
if gtf_file.endswith('.gz'):
f = gzip.open(gtf_file, 'rt')
elif gtf_file == '-':
f = sys.stdin
else:
f = open(gtf_file)
for line in f:
if line[0] == '#':
continue
ary = line.strip().split('\t')
m = re.search(r'gene_id "(.*?)".*?transcript_id "(.*?)"', ary[8])
if m:
if m.group(2) in gtf:
gtf[m.group(2)].add_region(region = Region(int(ary[3]), int(ary[4])), region_type=ary[2])
else:
gene = Gene(gene_id=m.group(1), chrom=ary[0], strand=ary[6])
tx = Transcript(tx_id=m.group(2), gene=gene)
tx.add_region(region = Region(int(ary[3]), int(ary[4])), region_type=ary[2])
gtf[m.group(2)] = tx
f.close()
for tx in gtf:
gtf[tx].update()
return gtf
def exon_to_bed(gtf_file, extend=0):
"""
print exons of each transcript in bed12 format
param: path to GTF file, gzipped format allowed.
"""
gtf = parse_gtf(gtf_file)
for tx_id in gtf:
tx = gtf[tx_id]
items = tx.format_region_bed12(tx.exons, flank=extend)
print('\t'.join(str(i) for i in items))
return
def cds_to_bed(gtf_file, extend=0):
"""
print CDSs of each transcript in bed12 format
param: path to GTF file, gzipped format allowed.
"""
gtf = parse_gtf(gtf_file)
for tx_id in gtf:
tx = gtf[tx_id]
if len(tx.cdss) > 0:
items = tx.format_region_bed12(tx.cdss, flank=extend)
print('\t'.join(str(i) for i in items))
return
def utr5_to_bed(gtf_file, extend=0):
"""
print UTR5 of each transcript in bed12 format
param: path to GTF file, gzipped format allowed.
"""
gtf = parse_gtf(gtf_file)
for tx_id in gtf:
tx = gtf[tx_id]
tx_utr5 = tx.five_prime_utrs
if len(tx_utr5) > 0:
items = tx.format_region_bed12(tx_utr5)
print('\t'.join(str(i) for i in items))
return
def utr3_to_bed(gtf_file, extend=0):
"""
print UTR3 of each transcript in bed12 format
param: path to GTF file, gzipped format allowed.
"""
gtf = parse_gtf(gtf_file)
for tx_id in gtf:
tx = gtf[tx_id]
tx_utr3 = tx.three_prime_utrs
if len(tx_utr3) > 0:
items = tx.format_region_bed12(tx_utr3, flank=extend)
print('\t'.join(str(i) for i in items))
return
def t2g(gtf_file, tfile):
"""
convert transcript coordinates to genomic coordinates
param: path to GTF file, gzipped format allowed.
param tfile: tab-delimited file, 1st column=tx, 2nd column = tpos
"""
gtf = parse_gtf(gtf_file)
with open(tfile) as fh:
for row in csv.reader(fh, delimiter="\t"):
try:
tx = gtf[row[0]]
gpos = tx.tpos_to_gpos(int(row[1]))
row += [tx.gene.chrom, tx.gene.strand, str(gpos)]
except KeyError:
print('Tx isoform {} was not found in GTF file!'.format(row[0]), file=sys.stderr)
row += ['NA'] * 3
print('\t'.join(row))
return
def g2t(gtf_file, gfile):
"""
convert genomic coordinates ot transcript coordinates
param: path to GTF file, gzipped format allowed.
param gfile: tab-delimited file, 1st column=tx, 2nd column = gpos
"""
gtf = parse_gtf(gtf_file)
with open(gfile) as fh:
for row in csv.reader(fh, delimiter='\t'):
try:
tx = gtf[row[0]]
tpos, ptype = tx.gpos_to_tpos(int(row[1]))
row += [str(tpos), ptype]
except KeyError:
print('Tx isoform {} was not found in GTF file!'.format(row[0]), file=sys.stderr)
row += ['NA'] * 2
print('\t'.join(row))
return
def tiv2giv(gtf_file, tivfile):
"""
convert transcript intervals | |
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_identity_pool(self, IdentityPoolName: str, AllowUnauthenticatedIdentities: bool, SupportedLoginProviders: Dict = None, DeveloperProviderName: str = None, OpenIdConnectProviderARNs: List = None, CognitoIdentityProviders: List = None, SamlProviderARNs: List = None, IdentityPoolTags: Dict = None) -> Dict:
"""
Creates a new identity pool. The identity pool is a store of user identity information that is specific to your AWS account. The limit on identity pools is 60 per account. The keys for ``SupportedLoginProviders`` are as follows:
* Facebook: ``graph.facebook.com``
* Google: ``accounts.google.com``
* Amazon: ``www.amazon.com``
* Twitter: ``api.twitter.com``
* Digits: ``www.digits.com``
You must use AWS Developer credentials to call this API.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/CreateIdentityPool>`_
**Request Syntax**
::
response = client.create_identity_pool(
IdentityPoolName='string',
AllowUnauthenticatedIdentities=True|False,
SupportedLoginProviders={
'string': 'string'
},
DeveloperProviderName='string',
OpenIdConnectProviderARNs=[
'string',
],
CognitoIdentityProviders=[
{
'ProviderName': 'string',
'ClientId': 'string',
'ServerSideTokenCheck': True|False
},
],
SamlProviderARNs=[
'string',
],
IdentityPoolTags={
'string': 'string'
}
)
**Response Syntax**
::
{
'IdentityPoolId': 'string',
'IdentityPoolName': 'string',
'AllowUnauthenticatedIdentities': True|False,
'SupportedLoginProviders': {
'string': 'string'
},
'DeveloperProviderName': 'string',
'OpenIdConnectProviderARNs': [
'string',
],
'CognitoIdentityProviders': [
{
'ProviderName': 'string',
'ClientId': 'string',
'ServerSideTokenCheck': True|False
},
],
'SamlProviderARNs': [
'string',
],
'IdentityPoolTags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --*
An object representing an Amazon Cognito identity pool.
- **IdentityPoolId** *(string) --*
An identity pool ID in the format REGION:GUID.
- **IdentityPoolName** *(string) --*
A string that you provide.
- **AllowUnauthenticatedIdentities** *(boolean) --*
TRUE if the identity pool supports unauthenticated logins.
- **SupportedLoginProviders** *(dict) --*
Optional key:value pairs mapping provider names to provider app IDs.
- *(string) --*
- *(string) --*
- **DeveloperProviderName** *(string) --*
The "domain" by which Cognito will refer to your users.
- **OpenIdConnectProviderARNs** *(list) --*
A list of OpendID Connect provider ARNs.
- *(string) --*
- **CognitoIdentityProviders** *(list) --*
A list representing an Amazon Cognito user pool and its client ID.
- *(dict) --*
A provider representing an Amazon Cognito user pool and its client ID.
- **ProviderName** *(string) --*
The provider name for an Amazon Cognito user pool. For example, ``cognito-idp.us-east-1.amazonaws.com/us-east-1_123456789`` .
- **ClientId** *(string) --*
The client ID for the Amazon Cognito user pool.
- **ServerSideTokenCheck** *(boolean) --*
TRUE if server-side token validation is enabled for the identity provider’s token.
Once you set ``ServerSideTokenCheck`` to TRUE for an identity pool, that identity pool will check with the integrated user pools to make sure that the user has not been globally signed out or deleted before the identity pool provides an OIDC token or AWS credentials for the user.
If the user is signed out or deleted, the identity pool will return a 400 Not Authorized error.
- **SamlProviderARNs** *(list) --*
An array of Amazon Resource Names (ARNs) of the SAML provider for your identity pool.
- *(string) --*
- **IdentityPoolTags** *(dict) --*
The tags that are assigned to the identity pool. A tag is a label that you can apply to identity pools to categorize and manage them in different ways, such as by purpose, owner, environment, or other criteria.
- *(string) --*
- *(string) --*
:type IdentityPoolName: string
:param IdentityPoolName: **[REQUIRED]**
A string that you provide.
:type AllowUnauthenticatedIdentities: boolean
:param AllowUnauthenticatedIdentities: **[REQUIRED]**
TRUE if the identity pool supports unauthenticated logins.
:type SupportedLoginProviders: dict
:param SupportedLoginProviders:
Optional key:value pairs mapping provider names to provider app IDs.
- *(string) --*
- *(string) --*
:type DeveloperProviderName: string
:param DeveloperProviderName:
The \"domain\" by which Cognito will refer to your users. This name acts as a placeholder that allows your backend and the Cognito service to communicate about the developer provider. For the ``DeveloperProviderName`` , you can use letters as well as period (``.`` ), underscore (``_`` ), and dash (``-`` ).
Once you have set a developer provider name, you cannot change it. Please take care in setting this parameter.
:type OpenIdConnectProviderARNs: list
:param OpenIdConnectProviderARNs:
A list of OpendID Connect provider ARNs.
- *(string) --*
:type CognitoIdentityProviders: list
:param CognitoIdentityProviders:
An array of Amazon Cognito user pools and their client IDs.
- *(dict) --*
A provider representing an Amazon Cognito user pool and its client ID.
- **ProviderName** *(string) --*
The provider name for an Amazon Cognito user pool. For example, ``cognito-idp.us-east-1.amazonaws.com/us-east-1_123456789`` .
- **ClientId** *(string) --*
The client ID for the Amazon Cognito user pool.
- **ServerSideTokenCheck** *(boolean) --*
TRUE if server-side token validation is enabled for the identity provider’s token.
Once you set ``ServerSideTokenCheck`` to TRUE for an identity pool, that identity pool will check with the integrated user pools to make sure that the user has not been globally signed out or deleted before the identity pool provides an OIDC token or AWS credentials for the user.
If the user is signed out or deleted, the identity pool will return a 400 Not Authorized error.
:type SamlProviderARNs: list
:param SamlProviderARNs:
An array of Amazon Resource Names (ARNs) of the SAML provider for your identity pool.
- *(string) --*
:type IdentityPoolTags: dict
:param IdentityPoolTags:
Tags to assign to the identity pool. A tag is a label that you can apply to identity pools to categorize and manage them in different ways, such as by purpose, owner, environment, or other criteria.
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def delete_identities(self, IdentityIdsToDelete: List) -> Dict:
"""
Deletes identities from an identity pool. You can specify a list of 1-60 identities that you want to delete.
You must use AWS Developer credentials to call this API.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DeleteIdentities>`_
**Request Syntax**
::
response = client.delete_identities(
IdentityIdsToDelete=[
'string',
]
)
**Response Syntax**
::
{
'UnprocessedIdentityIds': [
{
'IdentityId': 'string',
'ErrorCode': 'AccessDenied'|'InternalServerError'
},
]
}
**Response Structure**
- *(dict) --*
Returned in response to a successful ``DeleteIdentities`` operation.
- **UnprocessedIdentityIds** *(list) --*
An array of UnprocessedIdentityId objects, each of which contains an ErrorCode and IdentityId.
- *(dict) --*
An array of UnprocessedIdentityId objects, each of which contains an ErrorCode and IdentityId.
- **IdentityId** *(string) --*
A unique identifier in the format REGION:GUID.
- **ErrorCode** *(string) --*
The error code indicating the type of error that occurred.
:type IdentityIdsToDelete: list
:param IdentityIdsToDelete: **[REQUIRED]**
A list of 1-60 identities that you want to delete.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def delete_identity_pool(self, IdentityPoolId: str):
"""
Deletes an identity pool. Once a pool is deleted, users will not be able to authenticate with the pool.
You must use AWS Developer credentials to call this API.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DeleteIdentityPool>`_
**Request Syntax**
::
response = client.delete_identity_pool(
IdentityPoolId='string'
)
:type IdentityPoolId: string
:param IdentityPoolId: **[REQUIRED]**
An identity pool ID in the format REGION:GUID.
:returns: None
"""
pass
def describe_identity(self, IdentityId: str) -> Dict:
"""
Returns metadata related to the given identity, including when the identity was created and any associated linked logins.
You must use AWS Developer credentials to call this API.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DescribeIdentity>`_
**Request Syntax**
::
response = client.describe_identity(
IdentityId='string'
)
**Response Syntax**
::
{
'IdentityId': 'string',
'Logins': [
'string',
],
'CreationDate': datetime(2015, 1, 1),
'LastModifiedDate': datetime(2015, 1, 1)
}
**Response Structure**
- *(dict) --*
A description | |
# coding: utf-8
u"""Этот модуль содержит главный класс библиотеки и набор actions для него."""
from __future__ import absolute_import
import copy
import datetime
import json
import warnings
import six
from django.core import exceptions as dj_exceptions
from django.db.models import fields as dj_fields
from django.utils.encoding import force_text
from m3 import ApplicationLogicException
from m3 import RelatedError
from m3.actions import Action
from m3.actions import ActionPack
from m3.actions import ControllerCache
from m3.actions.context import ActionContext
from m3.actions.interfaces import IMultiSelectablePack
from m3.actions.results import OperationResult
from m3.actions.results import PreJsonResult
from m3.actions.utils import apply_search_filter
from m3.actions.utils import extract_int
from m3.db import safe_delete
from m3_django_compat import atomic
from m3_django_compat import ModelOptions
from m3_django_compat import get_request_params
from m3_ext.ui.results import ExtUIScriptResult
from . import exceptions
from . import filters
from . import tools
from . import ui
# =============================================================================
# BaseAction
# =============================================================================
class BaseAction(Action):
"""
Базовый класс для всех actions.
Имеет автоматически-генерируемый url и
возможность подключений точек расширения в |Observer|.
"""
perm_code = None
"""
Код подправа, используемый при формировании кода права экшна
стандартным способом. Если код не указан - экшн формирует
свой код права независимо от пака
"""
@property
def url(self):
u"""
автоматически генерируемый url
"""
return r'/%s' % self.__class__.__name__.lower()
@tools.cached_to('__cached_context_declaration')
def context_declaration(self):
"""
Делегирует декларацию контекста в пак
:return: Правила для DeclarativeActionContext
:rtype: dict
"""
return self.parent.declare_context(self)
def get_perm_code(self, subpermission=None):
"""
Возвращает код права
:param subpermission: Код подправа доступа
:type subpermission: str
:return: code - Код доступа
:rtype: str
"""
if self.perm_code is None:
code = super(BaseAction, self).get_perm_code(subpermission)
else:
perm_code = self.perm_code + (
('/' + subpermission) if subpermission else ''
)
code = self.parent.get_perm_code(perm_code)
return code
@property
@tools.cached_to('__cached_need_check_permission')
def need_check_permission(self):
"""
Небходимость проверки прав
Если определен perm_code, то необходимость проверки прав
будет зависеть от присутствия perm_code среди sub_permissions пака
и соответствующего флага пака
:rtype: bool
"""
if self.perm_code is not None:
result = self.parent.need_check_permission and (
self.perm_code in self.parent.sub_permissions
)
else:
result = False
return result
@staticmethod
def handle(verb, arg):
"""
Заглушка для точек расширения.
При регистрации в обсервер перекрывается
:param verb: Имя точки расширения
:type verb: str
:param arg: Объект для передачи в точку расширения
:type arg: any
:return arg: Тот же объект или любой другой
"""
return arg
# =============================================================================
# BaseWindowAction
# =============================================================================
class BaseWindowAction(BaseAction):
"""
Базовый Action показа окна
"""
def create_window(self):
"""
Метод инстанцирует окно и помещает экземпляр в атрибут self.win
.. code::
def create_window(self):
self.win = EditWindow()
"""
raise NotImplementedError()
def set_windows_params(self):
"""
:deprecated:
:TODO: Выпилить, ато опечатка портит всю семантику!
"""
warnings.warn(
'Please, replace "set_windowS_params"->"set_window_params"!',
category=FutureWarning)
self.set_window_params()
def _apply_windows_params(self):
"""
:TODO: Выпилить, ато опечатка портит всю семантику!
"""
warnings.warn(
'Please, replace "_apply_windowS_params"->"_apply_window_params"!',
category=FutureWarning)
self._apply_window_params()
def set_window_params(self):
"""
Метод заполняет словарь self.win_params, который будет передан
в окно. Этот словарь выступает как шина передачи данных
от Actions/Packs к окну
.. code::
def set_window_params(self):
self.win_params['title'] = _(u'Привет из ада')
"""
pass
def _apply_window_params(self):
"""
Метод передает словарь параметров в окно.
.. note:
Обычно не требует перекрытия
"""
self.win.set_params(self.win_params)
def configure_window(self):
"""
Точка расширения, предоставляющая доступ к настроенному
экземпляру окна для тонкой настройки.
.. note::
Оставлена для особо тяжёлых случаев, когда не удаётся
обойтись set_params
.. code::
def configure_window(self):
self.win.grid.top_bar.items[8].text = _(u'Ух ты, 9 кнопок')
"""
pass
def run(self, request, context):
"""
Тело Action, вызывается при обработке запроса к серверу.
:param request: Request
:type request: django.http.HttpRequest
:param context: Context
:type context: m3.actions.context.DeclarativeActionContext
.. note::
Обычно не требует перекрытия
"""
new_self = copy.copy(self)
new_self.win_params = (
getattr(self.__class__, 'win_params', None) or {}
).copy()
new_self.request = request
new_self.context = context
new_self.set_window_params()
new_self.create_window()
new_self._apply_window_params()
new_self.configure_window()
return ExtUIScriptResult(
new_self.win, context=new_self.context)
# =============================================================================
# ObjectListWindowAction
# =============================================================================
class ObjectListWindowAction(BaseWindowAction):
"""
Базовый Action показа окна списка объектов
"""
is_select_mode = False
"""Режим показа окна (True - выбор, False - список)"""
perm_code = 'view'
"""Код доступа"""
def set_window_params(self):
params = self.win_params
params['is_select_mode'] = self.is_select_mode
params['pack'] = self.parent
params['title'] = self.parent.title
params['height'] = self.parent.height
params['width'] = self.parent.width
params['read_only'] = getattr(self.parent, 'read_only', None) or (
not self.has_perm(self.request))
self.win_params = self.parent.get_list_window_params(
params, self.request, self.context)
def create_window(self):
self.win = self.parent.create_list_window(
is_select_mode=self.win_params['is_select_mode'],
request=self.request,
context=self.context)
# =============================================================================
# ObjectSelectWindowAction
# =============================================================================
class ObjectSelectWindowAction(ObjectListWindowAction):
"""
Базовый Action показа окна списка выбора объекта из списка
.. tip:: Используется с m3_ext.ui.fields.complex.ExtDictSelectField
"""
is_select_mode = True
def set_window_params(self):
super(ObjectSelectWindowAction, self).set_window_params()
# В окне выбора можно только ВЫБИРАТЬ!
self.win_params['read_only'] = True
self.win_params['column_name_on_select'] = (
self.parent.column_name_on_select
)
self.win_params['additional_data_names'] = (
self.parent.additional_data_names
)
class ObjectMultiSelectWindowAction(ObjectSelectWindowAction):
"""
Базовый Action показа окна списка выбора нескольких объектов из списка
"""
def create_window(self):
self.win = self.parent.multi_select_window()
# =============================================================================
# ObjectEditWindowAction
# =============================================================================
class ObjectEditWindowAction(BaseWindowAction):
"""
Базовый Action показа окна редактирования объекта.
"""
perm_code = 'edit'
def set_window_params(self):
try:
obj, create_new = self.parent.get_obj(self.request, self.context)
except self.parent.get_not_found_exception():
raise ApplicationLogicException(self.parent.MSG_DOESNOTEXISTS)
params = self.win_params.copy()
params['object'] = obj
params['create_new'] = create_new
params['form_url'] = self.parent.save_action.get_absolute_url()
read_only = getattr(self.parent, 'read_only', None) or (
not self.has_perm(self.request))
params['read_only'] = read_only
params['title'] = self.parent.format_window_title(
u'Просмотр' if read_only else
u'Добавление' if create_new else
u'Редактирование'
)
self.win_params = self.handle(
'set_window_params',
self.parent.get_edit_window_params(
params, self.request, self.context))
def create_window(self):
assert 'create_new' in self.win_params, (
u'You must call "set_window_params" method of superclass!')
self.win = self.handle(
'create_window',
self.parent.create_edit_window(
self.win_params['create_new'], self.request, self.context))
# =============================================================================
# ObjectAddWindowAction
# =============================================================================
class ObjectAddWindowAction(ObjectEditWindowAction):
"""
Базовый Action показа окна добавления объекта.
.. note::
Отдельный action для уникальности short_name
"""
perm_code = 'add'
pass
# =============================================================================
# ObjectSaveAction
# =============================================================================
class ObjectSaveAction(BaseAction):
"""
Базовый Action сохранения отредактированного объекта
"""
class AlreadySaved(Exception):
"""
Исключение, с помощью которого расширение,
перекрывшее сохранение объекта,
может сообщить, что объект сохранен
и больше ничего делать не нужно.
"""
pass
def create_window(self):
"""
Создаёт окно для дальнейшего биндинга в форму из реквеста
"""
self.win = self.parent.create_edit_window(
self.create_new, self.request, self.context)
def create_obj(self):
"""
Метод делегирует паку загрузку объекта из БД / создание нового
объекта модели
"""
try:
self.obj, self.create_new = self.parent.get_obj(
self.request, self.context)
except self.parent.get_not_found_exception():
raise ApplicationLogicException(
self.parent.MSG_DOESNOTEXISTS)
def bind_win(self):
"""
Заполнение полей окна по данным из request
"""
self.win.form.bind_to_request(self.request)
def bind_to_obj(self):
"""
Заполнение объекта данными из полей окна
"""
self.win.form.to_object(self.obj)
def save_obj(self):
"""
Сохранение объекта в БД
:raise: m3.ApplicationLogicException
"""
# инжекция классов исключений в объект
self.obj.AlreadySaved = self.AlreadySaved
try:
try:
self.obj = self.handle('save_object', self.obj)
except self.AlreadySaved:
# кто-то в цепочке слушателей обработал сохранение объекта
# и больше ничего делать не нужно
return
self.parent.save_row(
self.obj, self.create_new, self.request, self.context)
# возможность обработать сохранение подписчиками
self.handle('post_save', (self.obj, self.context))
except (exceptions.ValidationError, exceptions.OverlapError) as err:
raise ApplicationLogicException(six.text_type(err))
except dj_exceptions.ValidationError as err:
raise ApplicationLogicException(u'<br/>'.join(err.messages))
@atomic
def run(self, request, context):
"""
Тело Action, вызывается при обработке запроса к серверу
:param request: Request
:type request: django.http.HttpRequest
:param context: Context
:type context: m3.actions.context.DeclarativeActionContext
.. note::
Обычно не требует перекрытия
"""
new_self = copy.copy(self)
new_self.request = request
new_self.context = context
new_self.create_obj()
new_self.create_window()
new_self.bind_win()
new_self.bind_to_obj()
new_self.save_obj()
return OperationResult()
# =============================================================================
# ObjectRowsAction
# =============================================================================
class ObjectRowsAction(BaseAction):
"""
Базовый Action получения данных для отображения в окне списка объектов
"""
def set_query(self):
"""
Метод получает первоначальную выборку данных в виде QuerySet
и помещает в атрибут self.query
.. note::
Регистрирует точку расширения `query` в Observer
"""
self.query = self.handle(
'query',
self.parent.get_rows_query(self.request, self.context)
)
def apply_search(self):
"""
Метод применяет к выборке self.query фильтр по тексту
из поля "Поиск" окна списка
.. note::
Регистрирует точку расширения `apply_search` в Observer
"""
self.query = self.handle(
'apply_search',
self.parent.apply_search(
self.query,
self.request,
self.context
))
def apply_filter(self):
"""
Метод применяет к выборке self.query фильтр, как правило поступающий
от "колоночных фильтров"/фильтров в контекстных меню в окне списка
.. note::
Регистрирует точку расширения `apply_filter` в Observer
"""
self.query = self.handle(
'apply_filter',
self.parent.apply_filter(
self.query,
self.request,
self.context
))
def apply_sort_order(self):
"""
Метод применяет к выборке self.query сортировку
по выбранному в окне списка столбцу
"""
self.query = self.handle(
'apply_sort_order',
self.parent.apply_sort_order(
self.query,
self.request,
self.context
))
def apply_limit(self):
"""
Метод применяет к выборке self.query операцию оганичения
по количеству элементов (для порционной загрузки в окно списка).
"""
if getattr(self.parent, 'allow_paging', True):
offset = extract_int(self.request, 'start')
limit = extract_int(self.request, 'limit')
else:
offset = limit = 0
self.query = tools.QuerySplitter(self.query, offset, limit)
def get_rows(self):
"""
Метод производит преобразование QuerySet в список.
При этом объекты сериализуются в словари
:return: Список сериализованных объектов
:rtype: list
"""
res = []
for obj in self.query:
prep_obj = self.prepare_object(obj)
if prep_obj:
res.append(prep_obj)
else:
self.query.skip_last()
return self.handle('get_rows', res)
def prepare_object(self, obj):
"""
Возвращает словарь, для составления результирующего списка
:param obj: Объект, полученный из QuerySet'a
:type obj: django.db.models.Model
:return: Словарь для сериализации в json
:rtype: dict
.. note::
Регистрирует в Observer точку расширения `prepare_obj`
"""
if hasattr(self.parent, 'prepare_row'):
obj = self.parent.prepare_row(obj, self.request, | |
TypeError('takes %d positional arguments but %d were given' %
(len(tree.args.args), len(args)))
# variable length args
if rest_args:
baseobj = tree.args.vararg
argname = (baseobj.id
if isinstance(baseobj, ast.Name) # python 2
else baseobj.arg) # python 3
self.setVarargBind(argname, rest_args)
# kwargs
for pos, key in enumerate(node.keywords):
if key.arg in args_used_list:
raise TypeError(
"got multiple values for argument '%s'" % key.arg)
if key.arg in args_name_list:
self.setArgBind(key.arg, keywords[pos])
args_used_list.append(key.arg)
else:
raise TypeError('keyword-only argument is not supported')
# default values of kwargs
kwargs_size = len(tree.args.defaults)
if kwargs_size > 0:
for arg, val in zip(tree.args.args[-kwargs_size:], tree.args.defaults):
argname = (arg.id if isinstance(arg, ast.Name) # python 2
else arg.arg) # python 3
if argname not in args_used_list:
right = self.visit(val)
self.setArgBind(argname, right)
self.setFsm()
self.incFsmCount()
# visit the function definition
ret = self._visit_next_function(tree)
# fsm jump by return statement
end_count = self.getFsmCount()
unresolved_return = self.getUnresolvedReturn()
for ret_count, value in unresolved_return:
self.setFsm(ret_count, end_count)
# clean-up jump conditions
self.clearBreak()
self.clearContinue()
self.clearReturn()
self.clearReturnVariable()
# return to the previous scope frame
self.popScope()
return ret
def _visit_next_function(self, node):
self.generic_visit(node)
retvar = self.getReturnVariable()
if retvar is not None:
return retvar
return vtypes.Int(0)
def _call_Name_intrinsic_function(self, node, name):
args = []
args.append(self.fsm)
for arg in node.args:
args.append(self.visit(arg))
kwargs = OrderedDict()
for key in node.keywords:
kwargs[key.arg] = self.visit(key.value)
func = self.intrinsic_functions[name]
return func(*args, **kwargs)
def _call_Attribute(self, node):
value = self.visit(node.func.value)
method = getattr(value, node.func.attr)
if not inspect.ismethod(method) and not inspect.isfunction(method):
raise TypeError("'%s' object is not callable" % str(type(method)))
# prepare the argument values
args = []
kwargs = OrderedDict()
for arg in node.args:
args.append(self.visit(arg))
for key in node.keywords:
kwargs[key.arg] = self.visit(key.value)
# check intrinsic method
name = str(method)
if self._is_intrinsic_method(value, method) or name in self.intrinsic_methods:
args.insert(0, self.fsm)
# pass the current local scope
from .thread import Thread
from .pool import ThreadPool
if isinstance(value, Thread):
value.start_frame = self.start_frame
if isinstance(value, ThreadPool):
for thread in value.threads:
thread.start_frame = self.start_frame
return method(*args, **kwargs)
# stack a new scope frame
self.pushScope(ftype='call')
resolved_args = inspect.getcallargs(method, *args, **kwargs)
for arg, value in sorted(resolved_args.items(), key=lambda x: x[0]):
self.setArgBind(arg, value)
self.setFsm()
self.incFsmCount()
text = textwrap.dedent(inspect.getsource(method))
tree = ast.parse(text).body[0]
# visit the function definition
ret = self._visit_next_function(tree)
# fsm jump by return statement
end_count = self.getFsmCount()
unresolved_return = self.getUnresolvedReturn()
for ret_count, value in unresolved_return:
self.setFsm(ret_count, end_count)
# clean-up jump conditions
self.clearBreak()
self.clearContinue()
self.clearReturn()
self.clearReturnVariable()
# return to the previous scope frame
self.popScope()
return ret
def _is_intrinsic_method(self, value, method):
intrinsics = getattr(value, '__intrinsics__', ())
return (method.__name__ in intrinsics)
# ------------------------------------------------------------------
def visit_Nonlocal(self, node):
for name in node.names:
self.addNonlocal(name)
def visit_Global(self, node):
for name in node.names:
self.addGlobal(name)
def visit_Pass(self, node):
pass
def visit_Break(self, node):
self.addBreak()
self.incFsmCount()
def visit_Continue(self, node):
self.addContinue()
self.incFsmCount()
def visit_Return(self, node):
if node.value is None:
self.addReturn(None)
self.incFsmCount()
return None
retvar = self.getReturnVariable()
if retvar is not None:
left = retvar
right = self.visit(node.value)
self.setBind(left, right)
self.addReturn(right)
self.incFsmCount()
return left
tmp = self.getTmpVariable()
self.setReturnVariable(tmp)
left = tmp
right = self.visit(node.value)
self.setBind(left, right)
self.addReturn(right)
self.incFsmCount()
return left
def visit_Num(self, node):
if isinstance(node.n, int):
# return vtypes.Int(node.n)
return node.n
if isinstance(node.n, float):
v = FixedConst(None, node.n, self.point)
v.orig_value = node.n
return v
return vtypes._Constant(node.n)
def visit_Str(self, node):
return vtypes.Str(node.s)
def visit_UnaryOp(self, node):
value = self.visit(node.operand)
try:
method = getMethodName(node.op)
rslt = applyMethod(value, method)
except NotImplementedError:
op = getVeriloggenOp(node.op)
rslt = op(value)
return optimize(rslt)
def visit_BoolOp(self, node):
values = [self.visit(v) for v in node.values]
try:
method = getMethodName(node.op)
rslt = values[0]
for v in values[1:]:
rslt = applyMethod(rslt, method, v)
except NotImplementedError:
op = getVeriloggenOp(node.op)
if op is None:
raise TypeError("unsupported BinOp: %s" % str(node.op))
rslt = values[0]
for v in values[1:]:
rslt = op(rslt, v)
return optimize(rslt)
def visit_BinOp(self, node):
left = self.visit(node.left)
right = self.visit(node.right)
if isinstance(left, vtypes.Str) or isinstance(right, vtypes.Str):
if not isinstance(node.op, ast.Add):
raise TypeError("Can not generate a corresponding node")
return self._string_operation_plus(left, right)
if (not isinstance(left, fxd._FixedBase) and
isinstance(right, fxd._FixedBase)):
raise TypeError("type mismatch of operator arguments: '%s' and '%s'" %
(str(type(left)), str(type(right))))
try:
method = getMethodName(node.op)
rslt = applyMethod(left, method, right)
except NotImplementedError:
op = getVeriloggenOp(node.op)
if op is None:
raise TypeError("unsupported BinOp: %s" % str(node.op))
rslt = op(left, right)
return optimize(rslt)
def _string_operation_plus(self, left, right):
if not isinstance(left, vtypes.Str) or not isinstance(right, vtypes.Str):
raise TypeError("'+' operation requires two string arguments")
return vtypes.Str(left.value + right.value)
def visit_Compare(self, node):
left = self.visit(node.left)
methods = [getMethodName(op) for op in node.ops]
ops = [getVeriloggenOp(op) for op in node.ops]
comparators = [self.visit(comp) for comp in node.comparators]
rslts = []
for i, (method, op) in enumerate(zip(methods, ops)):
if i == 0:
if (not isinstance(left, fxd._FixedBase) and
isinstance(comparators[i], fxd._FixedBase)):
raise TypeError("type mismatch of operator arguments: '%s' and '%s'" %
(str(type(left)), str(type(comparators[i]))))
try:
rslts.append(applyMethod(left, method, comparators[i]))
except NotImplementedError:
rslts.append(op(left, comparators[i]))
else:
if (not isinstance(comparators[i - 1], fxd._FixedBase) and
isinstance(comparators[i], fxd._FixedBase)):
raise TypeError("type mismatch of operator arguments: '%s' and '%s'" %
(str(type(comparators[i - 1])), str(type(comparators[i]))))
try:
rslts.append(
applyMethod(comparators[i - 1], method, comparators[i]))
except NotImplementedError:
rslts.append(op(comparators[i - 1], comparators[i]))
if len(rslts) == 1:
return rslts[0]
ret = None
for r in rslts:
if ret:
ret = vtypes.Land(ret, r)
else:
ret = r
return ret
def visit_NameConstant(self, node):
# for Python 3.4
if node.value == True:
return vtypes.Int(1)
if node.value == False:
return vtypes.Int(0)
if node.value == None:
return vtypes.Int(0)
raise TypeError("%s in NameConst.value is not supported." %
str(node.value))
def visit_Name(self, node):
# for Python 3.3 or older
if node.id == 'True':
return vtypes.Int(1)
if node.id == 'False':
return vtypes.Int(0)
if node.id == 'None':
return vtypes.Int(0)
store = isinstance(node.ctx, ast.Store)
if store:
return node.id
name = self.getVariable(node.id)
return name
def visit_Print(self, node):
# for Python 2.x
# prepare the argument values
argvalues = []
formatstring_list = []
for arg in node.values:
if (isinstance(arg, ast.BinOp) and
isinstance(arg.op, ast.Mod) and
isinstance(arg.left, ast.Str)):
# format string in print statement
values, form = self._print_binop_mod(arg)
argvalues.extend(values)
formatstring_list.append(form)
formatstring_list.append(" ")
elif isinstance(arg, ast.Tuple):
for e in arg.elts:
value = self.visit(e)
if isinstance(value, vtypes.Str):
formatstring_list.append(value.value)
formatstring_list.append(" ")
else:
argvalues.append(value)
formatstring_list.append("%d")
formatstring_list.append(" ")
else:
value = self.visit(arg)
if isinstance(value, vtypes.Str):
formatstring_list.append(value.value)
formatstring_list.append(" ")
else:
argvalues.append(value)
formatstring_list.append("%d")
formatstring_list.append(" ")
formatstring_list = formatstring_list[:-1]
args = []
args.append(vtypes.Str(''.join(formatstring_list)))
args.extend(argvalues)
left = None
right = vtypes.SystemTask('display', *args)
self.setBind(left, right)
self.setFsm()
self.incFsmCount()
return right
def visit_Attribute(self, node):
value = self.visit(node.value)
attr = node.attr
if isinstance(value, numerical_types) and attr == 'value':
return value
obj = getattr(value, attr)
return obj
def visit_Tuple(self, node):
return tuple([self.visit(elt) for elt in node.elts])
def visit_List(self, node):
""" List is not fully implemented yet. """
return tuple([self.visit(elt) for elt in node.elts])
def visit_Subscript(self, node):
if isinstance(node.slice, ast.Slice):
return self._slice(node)
if isinstance(node.slice, ast.ExtSlice):
return self._extslice(node)
if isinstance(node.slice, ast.Index):
return self._index(node)
raise TypeError("unsupported slice type: %s" % str(node.slice))
def _slice(self, node):
value = self.visit(node.value)
lower = (self.visit(node.slice.lower)
if node.slice.lower is not None else None)
upper = (self.visit(node.slice.upper)
if node.slice.upper is not None else None)
step = (self.visit(node.slice.step)
if node.slice.step is not None else None)
lower = vtypes.raw_value(optimize(lower))
upper = vtypes.raw_value(optimize(upper))
step = vtypes.raw_value(optimize(step))
return value[lower:upper:step]
def _extslice(self, node):
value = self.visit(node.value)
for dim in node.slice.dims:
lower = (self.visit(dim.lower)
if dim.lower is not None else None)
upper = (self.visit(dim.upper)
if dim.upper is not None else None)
step = (self.visit(dim.step)
if dim.step is not None else None)
lower = vtypes.raw_value(optimize(lower))
upper = vtypes.raw_value(optimize(upper))
step = vtypes.raw_value(optimize(step))
value = value[lower:upper:step]
return value
def _index(self, node):
value = self.visit(node.value)
index = self.visit(node.slice.value)
index = vtypes.raw_value(optimize(index))
return value[index]
# -------------------------------------------------------------------------
def skip(self):
val = self.hasBreak() or self.hasContinue() or self.hasReturn()
return val
def makeVariable(self, name, _type=None):
if _type is None:
return self.makeVariableReg(name)
if _type['type'] == 'fixed':
width = _type['width']
point = _type['point']
signed = _type['signed']
return self.makeVariableFixed(name, width, point, signed)
raise TypeError("not supported variable type")
def makeVariableReg(self, name, width=None, initval=0):
signame = _tmp_name('_'.join(['', self.name, name]))
if width is None:
width = self.datawidth
return self.m.Reg(signame, width, initval=initval, signed=True)
def makeVariableFixed(self, name, width=None, point=0, signed=True):
signame = _tmp_name('_'.join(['', self.name, name]))
if width is None:
width = self.datawidth
return fxd.FixedReg(self.m, signame, width=width, point=point, signed=signed)
def getVariable(self, name, store=False, _type=None):
if isinstance(name, vtypes._Numeric):
return name
var = self.scope.searchVariable(name, store)
| |
<filename>email_router/email_router_datastore.py
import os
import datetime
import logging
import json
import re
from netaddr import IPNetwork, IPAddress
from netaddr.core import AddrConversionError, AddrFormatError
from dateutil.parser import parse
from pytz import timezone
from typing import NamedTuple, Optional, Collection, FrozenSet, List, Set
from tzlocal import get_localzone
from emerald_message.containers.email.email_container import EmailContainer
from emerald_message.containers.email.email_envelope import EmailEnvelope
from emerald_message.containers.email.email_message_metadata import EmailMessageMetadata
from email_router.email_router_config_source import EmailRouterDatastoreSourceType, EmailRouterSourceConfig
from email_router.email_router_destination import EmailRouterDestinationType, EmailRouterDestinationConfig
from email_router.router_instance_type import RouterInstanceType
from error import EmeraldEmailRouterDatabaseInitializationError, \
EmeraldEmailRouterDuplicateTargetError, \
EmeraldEmailRouterMatchNotFoundError, \
EmeraldEmailRouterConfigNotActiveError, \
EmeraldEmailRouterInputDataError
class EmailRouterRuleMatchPattern(NamedTuple):
sender_domain: Optional[str] = None
sender_name: Optional[str] = None
recipient_name: Optional[str] = None
attachment_included: Optional[bool] = None
body_size_minimum: Optional[int] = None
body_size_maximum: Optional[int] = None
sender_ip_whitelist: Optional[FrozenSet[str]] = None
# set the hash and str methods so we can use these in sets
def __str__(self):
return EmailRouterRuleMatchPattern.__name__ + os.linesep + \
os.linesep.join([
'sender_domain=' + str(self.sender_domain),
'sender_name=' + str(self.sender_name),
'recipient_name=' + str(self.recipient_name),
'attachment_included=' + str(self.attachment_included),
'body_size_minimum=' + str(self.body_size_minimum),
'body_size_maximum=' + str(self.body_size_maximum),
'sender_ip_set=' +
','.join([str(x) for x in self.sender_ip_whitelist] if self.sender_ip_whitelist is not None else '')
])
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
if not isinstance(other, EmailRouterRuleMatchPattern):
return False
return (
self.sender_domain == other.sender_domain and
self.sender_name == other.sender_name and
self.recipient_name == other.recipient_name and
self.attachment_included == other.attachment_included and
self.body_size_minimum == other.body_size_minimum and
self.body_size_maximum == other.body_size_maximum and
sorted([self.sender_ip_whitelist]) == sorted([other.sender_ip_whitelist])
)
def __ne__(self, other):
return not (__eq__(self, other))
# router rules are sorted only by sequence - rules with identical sequence have indeterminate sort order
class EmailRouterRule(NamedTuple):
match_priority: float
match_pattern: EmailRouterRuleMatchPattern
# define hash and str so we can use in sets
def __str__(self):
return EmailRouterRule.__name__ + os.linesep + \
os.linesep.join([
'match_priority=' + str(self.match_priority),
'match_pattern=' + os.linesep + str(self.match_pattern)
])
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
if not isinstance(other, EmailRouterRule):
return False
if self.match_priority != other.match_priority:
return False
if self.match_pattern != other.match_pattern:
return False
return True
def __ne__(self, other):
return not (__eq__(self, other))
def __lt__(self, other):
if not isinstance(other, EmailRouterRule):
raise TypeError('Unable to compare object of type ' + type(other).__name__ + ' to ' +
EmailRouterRule.__name__)
if self.match_priority < other.match_priority:
return True
elif self.match_priority > other.match_priority:
return False
# really indeterminate results - not guaranteed to be idempotent -
# since we don't allow same seq diff rules this will never happen
raise NotImplementedError(
'Comparison found on two members of ' + EmailRouterRule.__name__ +
' with identical priority value "' + str(self.match_priority) + '" - not supported'
)
class EmailRouterTargetConfig(NamedTuple):
target_name: str
target_priority: float
router_rules: FrozenSet[EmailRouterRule]
destinations: FrozenSet[EmailRouterDestinationConfig]
# add the hash and str methods so we can put in (generally immutable) sets
def __str__(self):
return EmailRouterTargetConfig.__name__ + os.linesep + \
os.linesep.join([
'target_name=' + str(self.target_name),
'target_priority=' + str(self.target_priority),
'router_rules=' + os.linesep + str(self.router_rules),
'destinations=' + os.linesep + str(self.destinations)
])
def __lt__(self, other):
if not isinstance(other, EmailRouterTargetConfig):
raise TypeError('Unable to compare object of type ' + type(other).__name__ + ' to ' +
EmailRouterTargetConfig.__name__)
if self.target_priority < other.target_priority:
return True
elif self.target_priority > other.target_priority:
return False
# really indeterminate results if equal - our collections using these abstractions will not allow
# so it should never happen
raise NotImplementedError(
'Comparison found on two members of ' + EmailRouterRule.__name__ +
' with identical priority value "' + str(self.target_priority) + '" - not supported'
)
# use this to tell caller what to do based on the matched rules
class EmailRouterMatchResult(NamedTuple):
matched_target_name: str
destinations: Collection[EmailRouterDestinationConfig]
class EmailRouterMatchResultCollection(NamedTuple):
matched_info_log: List[str]
matched_target_results: List[EmailRouterMatchResult]
class EmailRouterRulesDatastore:
@property
def datastore_name(self) -> str:
return self._datastore_name
@property
def revision_datetime(self) -> datetime.datetime:
return self._revision_datetime
@property
def revision_number(self) -> int:
return self._revision_number
@property
def instance_type(self) -> RouterInstanceType:
return self._instance_type
@property
def router_rules_datastore_initialized(self) -> bool:
return self._router_rules_datastore_initialized
@property
def target_count(self) -> int:
return len(self._router_config_by_target)
# TODO: just use csv toolkit and return as csv with column headers
@property
def targets_info_as_list_with_header(self) -> List[List[str]]:
return_data: List[List[str]] = list()
return_data.append(['Priority', 'Target Name'])
for this_target in sorted(self._router_config_by_target):
return_data.append([str(this_target.target_priority), this_target.target_name])
return return_data
@property
def targets_info_as_table(self) -> List[str]:
return_data: List[str] = list()
return_data.append('Priority' + ' ' + 'Target Name')
for this_target in sorted(self._router_config_by_target):
return_data.append('{:05.3f}'.format(this_target.target_priority) + '{:5s}'.format('') +
'{:20s}'.format(this_target.target_name))
return return_data
@router_rules_datastore_initialized.setter
def router_rules_datastore_initialized(self, value):
if type(value) is not bool:
raise TypeError('Cannot initialize router_rules_datastore_initialized to an object of type "' +
type(value).__name__ + '" - this is a boolean')
self._router_rules_datastore_initialized = value
@property
def router_config_by_target(self) -> Set[EmailRouterTargetConfig]:
return self._router_config_by_target
def __init__(self,
name: str,
revision_datetime: datetime.datetime,
revision_number: int,
instance_type: RouterInstanceType):
self._datastore_name = name
self._revision_datetime = revision_datetime
self._revision_number = revision_number
self._instance_type = instance_type
self._router_rules_datastore_initialized = False
# create the dictionary that will store rules by target name
# its members will have collections of sortable (prioritized) router rules and destinations
self._router_config_by_target: Set[EmailRouterTargetConfig] = set()
def add_target_routing_config(self,
target_config: EmailRouterTargetConfig):
# parse step 1 - make sure the parameter is really a target config
if not isinstance(target_config, EmailRouterTargetConfig):
raise TypeError('Unable to add target_config (type=' + type(target_config).__name__ +
') is not of type ' + EmailRouterTargetConfig.__name__)
# parse step 2 - do not allow multiple target configs that are identical or have identical
# target name
if target_config in self._router_config_by_target:
# callers will have option of ignoring or letting this halt operation
raise EmeraldEmailRouterDuplicateTargetError(
'Duplicate target config entry (' + target_config.target_name +
') found in source data - skipping this entry'
)
# parse step 3 - look for conflicting names (i.e. same name, different data) OR equal target priority
for this_target in self._router_config_by_target:
if target_config.target_name == this_target.target_name:
raise EmeraldEmailRouterDatabaseInitializationError(
'Conflicting entry found for target name "' + this_target.target_name + '" - aborting new add' +
os.linesep + 'Duplicate target name with different configuration data'
)
if target_config.target_priority == this_target.target_priority:
raise EmeraldEmailRouterDatabaseInitializationError(
'Conflicting entry found for target name "' + this_target.target_name + '" - aborting new add' +
os.linesep + 'Duplicate target priority - each must be unique for sorting'
)
# now add to the collection
self._router_config_by_target.add(target_config)
class EmailRouter:
@property
def debug(self) -> bool:
return self._debug
@property
def logger(self):
return self._logger
@property
def router_db_initialized(self) -> bool:
return self._router_db_initialized
@property
def router_rules_datastore(self) -> Optional[EmailRouterRulesDatastore]:
return self._router_rules_datastore
@property
def router_instance_type(self) -> RouterInstanceType:
return self._router_instance_type
@property
def router_db_source_identifier(self) -> EmailRouterSourceConfig:
return self._router_db_source_identifier
@classmethod
def get_supported_router_db_source_types(cls):
return frozenset([
EmailRouterDatastoreSourceType.JSONFILE
])
def __init__(self,
router_db_source_identifier: EmailRouterSourceConfig,
router_instance_type: RouterInstanceType,
debug: bool = False):
if not isinstance(router_db_source_identifier, EmailRouterSourceConfig):
raise ValueError('Cannot initialize ' + type(self).__name__ + ': ' +
'router_db_source_identifier must be of type ' +
EmailRouterSourceConfig.__name__ + os.linesep +
'Value provided had type "' + str(type(router_db_source_identifier)))
# now verify the source type and make sure it is supported
if not issubclass(type(router_db_source_identifier.source_type), EmailRouterDatastoreSourceType):
raise ValueError('Cannot initialize ' + type(self).__name__ + ': ' +
'source_type from router_db_source_identifier must be of type "' +
EmailRouterDatastoreSourceType.__name__ +
'" and have one of these values: ' +
','.join([k.name for k in EmailRouterDatastoreSourceType]))
if router_db_source_identifier.source_type not in type(self).get_supported_router_db_source_types():
raise ValueError('Unsupported router db source type "' + str(router_db_source_identifier.source_type) +
os.linesep + 'Supported value(s): ' +
','.join([x.name for x in type(self).get_supported_router_db_source_types()]))
if not isinstance(router_instance_type, RouterInstanceType):
raise ValueError('Cannot initialize ' + type(self).__name__ + ': ' +
'router_instance_type must be of type ' +
RouterInstanceType.__name__ + os.linesep +
'Value provided had type "' + str(type(router_instance_type)))
self._router_instance_type = router_instance_type
self._router_db_source_identifier = router_db_source_identifier
self._debug = debug
self._logger = logging.getLogger(type(self).__name__)
self.logger.setLevel(logging.DEBUG if self.debug else logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG if self.debug else logging.INFO)
formatter = logging.Formatter('%(asctime)s|%(levelname)s|%(message)s',
datefmt='%Y-%d-%mT%H:%M:%S')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
# now match the source type and initialize as needed
self._router_db_initialized = False
self._router_rules_datastore = None
if self.router_db_source_identifier.source_type == EmailRouterDatastoreSourceType.JSONFILE:
self._initialize_from_jsonfile()
else:
raise \
EmeraldEmailRouterDatabaseInitializationError('Unsupported router database source type "' +
str(self.router_db_source_identifier.source_type) +
'" - must be one of ' + os.linesep +
','.join([x.name for x in type(
self).get_supported_router_db_source_types()]))
def _initialize_from_jsonfile(self):
# read the json file from the source identifier
if type(self.router_db_source_identifier.source_uri) is not str or \
len(self.router_db_source_identifier.source_uri) == 0:
raise EmeraldEmailRouterDatabaseInitializationError('Empty or missing router db source identifier' +
': for JSON file should specify a valid filename')
try:
with open(os.path.expanduser(self.router_db_source_identifier.source_uri),
encoding='utf-8',
mode='r') as json_config_source:
json_data = json.load(json_config_source)
print('JSON = ' + str(json_data))
except json.JSONDecodeError as jdex:
error_string = 'Source JSON file "' + \
str(self.router_db_source_identifier.source_uri) + '" cannot be decoded' + \
os.linesep + 'Exception detail: ' + str(jdex.args)
self.logger.critical(error_string)
raise EmeraldEmailRouterDatabaseInitializationError(error_string)
except FileNotFoundError as fnfex:
error_string = 'Source JSON file "' + | |
import pandas as pd
import numpy as np
from pandas.api.types import is_numeric_dtype
from datetime import datetime
from os import mkdir
from sklearn.preprocessing import OrdinalEncoder, KBinsDiscretizer
import concurrent
# import cProfile
from statistics import mean
# from math import factorial
# from tqdm import tqdm
# from scipy.stats import poisson
# import matplotlib.pyplot as plt
# from numba import jit
'''
This file evaluates the presence of outliers in 3+ dimensions in the openml.org dataset collection
'''
np.random.seed(0)
pd.options.display.max_columns = 1000
pd.options.display.max_rows = 1000
pd.options.display.width = 10000
DIVISOR = 0.25 # todo: loop through different values of this to see how it affects the results.
def flatten(arr):
flatten_1d = lambda x: [i for row in x for i in row]
if len(arr) == 0:
return arr
try:
while True:
arr = flatten_1d(arr)
if len(arr) == 0:
return arr
except:
pass
return arr
def is_float(v):
if str(v).isdigit():
return False
try:
float(v)
return True
except ValueError:
return False
class CountsOutlierDetector:
def __init__(self, n_bins=7, max_dimensions=5, results_folder="", results_name="", run_parallel=False):
self.n_bins = n_bins
self.max_dimensions = max_dimensions
self.results_folder = results_folder
self.results_name = results_name
self.run_parallel = run_parallel
self.col_types_arr = []
self.ordinal_encoders_arr = []
def get_col_types_arr(self, X):
col_types_arr = ['N'] * len(X.columns)
for c in range(len(X.columns)):
num_unique = X[X.columns[c]].nunique()
if not is_numeric_dtype(X[X.columns[c]]):
col_types_arr[c] = 'C'
# Even if the values are numeric, if there are few of them, consider them categorical, though if the values
# are all float, the column will be cast to 'N' when collecting the unique values.
elif is_numeric_dtype(X[X.columns[c]]) and num_unique <= 25:
col_types_arr[c] = 'C'
# If there are a large number of categorical columns, re-determine the types with a more strict cutoff
if col_types_arr.count('C') > 50:
col_types_arr = ['N'] * len(X.columns)
for c in range(len(X.columns)):
num_unique = X[X.columns[c]].nunique()
if not is_numeric_dtype(X[X.columns[c]]):
col_types_arr[c] = 'C'
elif is_numeric_dtype(X[X.columns[c]]) and num_unique <= 5:
col_types_arr[c] = 'C'
return col_types_arr
def ordinal_encode(self, X):
# Numpy deals with numeric values much more efficiently than text
self.ordinal_encoders_arr = [None]*len(X.columns)
for i in range(len(X.columns)):
if self.col_types_arr[i] == 'C':
enc = OrdinalEncoder()
self.ordinal_encoders_arr[i] = enc
col_vals = X[X.columns[i]].values.reshape(-1, 1)
X_np = enc.fit_transform(col_vals).astype(int)
X[X.columns[i]] = X_np
return X
def get_col_value(self, col_idx, value_idx):
if self.col_types_arr[col_idx] == "C":
return self.ordinal_encoders_arr[col_idx].inverse_transform([[value_idx]])[0][0]
else:
return f"Bin {value_idx}"
# Using numba appears to give similar performance results
# @jit(nopython=True)
# def get_cond(X_vals, col_idx, val):
# cond = (X_vals[:, col_idx] == val)
# return cond
def predict(self, X):
# todo: rename this -- it doesn't display
def format_outlier_counts(msg, arr):
nonlocal output_msg
unique_counts = sorted(list(set(arr)))
for uc in unique_counts:
output_msg += f"\n{msg}: {uc}: {arr.count(uc):5}"
# Given two columns i and j, gets, for each pair of values, the fraction of the dataset and the row numbers.
def get_2d_fractions(i, j):
two_d_fractions = []
two_d_row_nums = []
for i_val in unique_vals[i]:
i_vals_fractions = []
i_vals_row_nums = []
cond1 = (X_vals[:, i] == i_val)
for j_val in unique_vals[j]:
#rows_both = np.where((X_vals[:, i] == i_val) & (X_vals[:, j] == j_val))
cond2 = (X_vals[:, j] == j_val)
rows_both = np.where(cond1 & cond2)
i_vals_fractions.append(len(rows_both[0]) / num_rows)
i_vals_row_nums.append(rows_both[0])
two_d_fractions.append(i_vals_fractions)
two_d_row_nums.append(i_vals_row_nums)
return two_d_fractions, two_d_row_nums
def get_unique_vals():
nonlocal output_msg
# An element for each column. For categorical columns, lists the unique values. Used to maintain a
# consistent order.
unique_vals = [[]] * num_cols
num_unique_vals = [0] * num_cols
for i in range(num_cols):
uv = X.iloc[:, i].unique()
# If there are many unique values, remove the float values
# todo: set this threshold as a parameter
# todo: need to save the pre-ordinal encoded values to do this.
# todo: or could not do it: then don't need to save the unique values, just the count and can assume it's
# 0 up to that.
#if len(uv) > 25:
# uv = [v for v in uv if not is_float(v)]
col_threshold = (1.0 / len(uv)) * DIVISOR
unique_vals[i] = uv
num_unique_vals[i] = len(uv)
return unique_vals, num_unique_vals
def get_1d_stats():
nonlocal output_msg
# Parallel 2d array to unique_vals. Indicates the fraction of the total dataset for this value in this column.
fractions_1d = [[]] * num_cols
# Parallel 2d array to unique_vals. Boolean value indicates if the value is considered a 1d outlier.
rare_1d_values = [[]] * num_cols
# Integer value for each row in the dataset indicating how many individual columns have values considered
# outliers.
outliers_1d_arr = [0] * num_rows
# Text explanation for each row explaining each 1d outlier.
outliers_explanation_arr = [""] * num_rows
for i in range(num_cols):
col_threshold = (1.0 / num_unique_vals[i]) * DIVISOR
# Array with an element for each unique value in column i. Indicates the fraction of the total dataset held
# by that value.
col_fractions_1d = []
# Array with an element for each unique value in column i. Indicates if that value is considered rare.
col_rare_1d_values = []
for v in unique_vals[i]: # loop through each unique value in the current column.
frac = X.iloc[:, i].tolist().count(v) / num_rows
col_fractions_1d.append(frac)
rare_values_flag = (frac < col_threshold) and (frac < 0.01)
if rare_values_flag:
rows_matching = np.where(X_vals[:, i] == v)
for r in rows_matching[0]:
outliers_1d_arr[r] += 1
outliers_explanation_arr[r] += f'[Column: {X.columns[i]}, ' + \
f'Value: {self.get_col_value(i,v)}, fraction: {frac}]'
col_rare_1d_values.append(rare_values_flag)
fractions_1d[i] = col_fractions_1d
rare_1d_values[i] = col_rare_1d_values
output_msg += f"\n\n1d: num common values: {flatten(rare_1d_values).count(False)}"
output_msg += f"\n1d: num rare values: {flatten(rare_1d_values).count(True)}"
format_outlier_counts("1d: Outlier Counts by score", outliers_1d_arr)
return fractions_1d, rare_1d_values, outliers_1d_arr, outliers_explanation_arr
def get_2d_stats():
nonlocal output_msg
# This returns 2 parallel 4d arrays, fractions_2d and rare_2d_values, with the dimensions: i column,
# j column, value in i column, value in j column
# Each element stores the fraction of the total dataset with this combination of values.
fractions_2d = [] * num_cols
# Each element stores a boolean indicating if this combination of values is considered rare in the 2d sense.
rare_2d_values = [] * num_cols
# Integer value for each row in the dataset indicating how many pairs of columns have combinations considered
# outliers.
outliers_2d_arr = [0] * num_rows
outliers_explanation_arr = [""] * num_rows
for i in range(num_cols):
fractions_2d.append([[]] * num_cols)
rare_2d_values.append([[]] * num_cols)
for i in range(num_cols - 1):
#print("2d i: ",i)
for j in range(i + 1, num_cols):
local_fractions, two_d_row_nums = get_2d_fractions(i, j)
fractions_2d[i][j] = local_fractions
# Determine which of these fraction would be considered rare in the 2d sense
i_rare_arr = []
expected_under_uniform = 1.0 / (len(unique_vals[i]) * len(unique_vals[j]))
for i_vals_idx in range(len(fractions_2d[i][j])):
j_rare_arr = []
for j_vals_idx in range(len(fractions_2d[i][j][i_vals_idx])):
current_fraction = fractions_2d[i][j][i_vals_idx][j_vals_idx]
expected_given_marginal = fractions_1d[i][i_vals_idx] * fractions_1d[j][j_vals_idx]
rare_value_flag = (rare_1d_values[i][i_vals_idx] == False) and \
(rare_1d_values[j][j_vals_idx] == False) and \
(current_fraction < (expected_under_uniform * DIVISOR)) and \
(current_fraction < (expected_given_marginal * DIVISOR)) and \
(current_fraction < 0.01)
if rare_value_flag:
row_nums = two_d_row_nums[i_vals_idx][j_vals_idx]
assert len(row_nums) == round(
current_fraction * num_rows), f"len of matching rows: {len(row_nums)}, fraction*num_rows: current_fraction*num_rows: {current_fraction * num_rows}"
for r in row_nums:
outliers_2d_arr[r] += 1
# todo: format this for 3, 4, 5d too
outliers_explanation_arr[r] += f" [[Columns: {X.columns[i]} and " +\
f"{X.columns[j]} Values: {self.get_col_value(i, i_vals_idx)} and " +\
f"{self.get_col_value(j, j_vals_idx)}, Fraction: {current_fraction}]]"
j_rare_arr.append(rare_value_flag)
i_rare_arr.append(j_rare_arr)
rare_2d_values[i][j] = i_rare_arr
out = flatten(rare_2d_values)
output_msg += f"\n\n2d: num common combinations: {out.count(False)}"
output_msg += f"\n2d: num rare combinations: {out.count(True)} (Typically most with zero rows)"
format_outlier_counts("2d: Outlier Counts by score", outliers_2d_arr)
return fractions_2d, rare_2d_values, outliers_2d_arr, outliers_explanation_arr
def get_3d_stats(num_combinations):
nonlocal output_msg
# This returns 2 parallel 6d arrays: fractions_3d and rare_3d_values (with the dimensions: i column, j column,
# k column, value in i column, value in j column, value in the k column), as well as outliers_3d_arr and
# outliers_explanation_arr.
fractions_3d = [[]] * num_cols # todo: not used, though will if go to 4d
rare_3d_values = [[]] * num_cols
outliers_3d_arr = [0] * num_rows
outliers_explanation_arr = [""] * num_rows
column_combos_checked = 0
run_parallel_3d = self.run_parallel
if num_combinations < 1_000_000:
run_parallel_3d = False
if run_parallel_3d:
process_arr = []
with concurrent.futures.ProcessPoolExecutor() as executor:
for i in range(num_cols):
f = executor.submit(process_inner_loop_3d,
i,
X_vals,
num_cols,
num_rows,
unique_vals,
fractions_1d,
rare_1d_values,
rare_2d_values)
process_arr.append(f)
for f_idx, f in enumerate(process_arr):
rare_arr_for_i, outliers_3d_arr_for_i, outliers_explanation_arr_for_i, column_combos_checked_for_i = f.result()
rare_3d_values[f_idx] = rare_arr_for_i
outliers_3d_arr = [x + y for x, y in zip(outliers_3d_arr, | |
tuple.size();
node.arity = len(tuple)
# node.node_data = py::reinterpret_borrow<py::object>(tuple.get_type());
node.node_data = type(tuple)
# for (py::handle entry : tuple) {
# recurse(entry);
# }
for entry in tuple:
# recurse(entry)
self.flatten_into(entry, leaves, leaf_predicate)
# break;
# }
# default:
else:
# DCHECK(node.kind == PyTreeKind::kLeaf);
assert node.kind == PyTreeKind.kLeaf
# leaves.push_back(py::reinterpret_borrow<py::object>(handle));
leaves.append(handle)
# }
# }
# node.num_nodes = traversal_.size() - start_num_nodes + 1;
node.num_nodes = len(self.traversal_) - start_num_nodes + 1
# node.num_leaves = leaves.size() - start_num_leaves;
node.num_leaves = len(leaves) - start_num_leaves
# traversal_.push_back(std::move(node));
self.traversal_.append(node)
@classmethod
def flatten(cls, x: typing.Any, leaf_predicate: typing.Callable = None) -> (typing.Iterable, PyTreeDef):
"""Flattens a Pytree into a list of leaves and a PyTreeDef.
Returns references to the flattened objects, which might be temporary
objects in the case of custom pytype handlers."""
leaves = []
tree = cls()
tree.flatten_into(x, leaves, leaf_predicate)
return leaves, tree
@classmethod
def all_leaves(cls, x: typing.Any) -> bool:
"Tests whether the given list is a flat list of leaves."
for v in x:
kind, registration = cls.get_kind(v)
if kind != PyTreeKind.kLeaf:
return False
return True
def flatten_up_to(self, xs: typing.Any):
"""Flattens a Pytree up to this PyTreeDef. 'self' must be a tree prefix of
the tree-structure of 'xs'.
For example, if we flatten a value [(1, (2, 3)), {"foo": 4}] with a treedef
[(*, *), *], the result is the list of leaves [1, (2, 3), {"foo": 4}]."""
# py::list leaves(num_leaves());
leaves = resize(None, self.num_leaves)
# std::vector<py::object> agenda;
# agenda.push_back(py::reinterpret_borrow<py::object>(xs));
agenda = [xs]
# auto it = traversal_.rbegin();
it = iter(self.traversal_[::-1])
# int leaf = num_leaves() - 1;
leaf = self.num_leaves - 1
# while (!agenda.empty()) {
while len(agenda) > 0:
# if (it == traversal_.rend()) {
# throw std::invalid_argument(absl::StrFormat(
# "Tree structures did not match: %s vs %s", py::repr(xs), ToString()));
# }
if finished_iterating(it):
raise ValueError("Tree structures did not match: %s vs %s" % (
py.repr(xs), py.str(self)
))
# const Node& node = *it;
node = next(it)
# py::object object = agenda.back();
# agenda.pop_back();
object = agenda.pop()
# ++it;
#
# switch (node.kind) {
# case PyTreeKind::kLeaf:
if node.kind == PyTreeKind.kLeaf:
# if (leaf < 0) {
# throw std::logic_error("Leaf count mismatch.");
# }
assert leaf >= 0, "Leaf count mismatch"
# leaves[leaf] = py::reinterpret_borrow<py::object>(object);
leaves[leaf] = object
# --leaf;
leaf -= 1
# break;
#
# case PyTreeKind::kNone:
# break;
elif node.kind == PyTreeKind.kNone:
pass
#
# case PyTreeKind::kTuple: {
elif node.kind == PyTreeKind.kTuple:
# if (!PyTuple_CheckExact(object.ptr())) {
# throw std::invalid_argument(
# absl::StrFormat("Expected tuple, got %s.", py::repr(object)));
# }
if not isinstance(object, py.tuple):
raise ValueError("Expected tuple, got %s." % py.repr(object))
# py::tuple tuple = py::reinterpret_borrow<py::tuple>(object);
tuple: py.tuple = object
# if (tuple.size() != node.arity) {
# throw std::invalid_argument(
# absl::StrFormat("Tuple arity mismatch: %d != %d; tuple: %s.",
# tuple.size(), node.arity, py::repr(object)));
# }
if len(tuple) != node.arity:
raise ValueError("Tuple arity mismatch: %d != %d; tuple: %s." % (
len(tuple), node.arity, py.repr(object)
))
# for (py::handle entry : tuple) {
# agenda.push_back(py::reinterpret_borrow<py::object>(entry));
# }
# for entry in tuple:
# agenda.append(entry)
agenda.extend(tuple)
# break;
# }
#
# case PyTreeKind::kList: {
elif node.kind == PyTreeKind.kList:
# if (!PyList_CheckExact(object.ptr())) {
# throw std::invalid_argument(
# absl::StrFormat("Expected list, got %s.", py::repr(object)));
# }
if not isinstance(object, py.list):
raise ValueError("Expected list, got %s." % py.repr(object))
# py::list list = py::reinterpret_borrow<py::list>(object);
list: typing.List = object
# if (list.size() != node.arity) {
# throw std::invalid_argument(
# absl::StrFormat("List arity mismatch: %d != %d; list: %s.",
# list.size(), node.arity, py::repr(object)));
# }
if len(list) != node.arity:
raise ValueError("List arity mismatch: %d != %d; list: %s." % (
len(list), node.arity, py.repr(object)
))
# for (py::handle entry : list) {
# agenda.push_back(py::reinterpret_borrow<py::object>(entry));
# }
# for entry in list:
# agenda.append(entry)
agenda.extend(list)
# break;
# }
#
# case PyTreeKind::kDict: {
elif node.kind == PyTreeKind.kDict:
# if (!PyDict_CheckExact(object.ptr())) {
# throw std::invalid_argument(
# absl::StrFormat("Expected dict, got %s.", py::repr(object)));
# }
if not isinstance(object, py.dict):
raise ValueError("Expected dict, got %s." % py.repr(object))
# py::dict dict = py::reinterpret_borrow<py::dict>(object);
dict: typing.Dict = object
# py::list keys =
# py::reinterpret_steal<py::list>(PyDict_Keys(dict.ptr()));
# if (PyList_Sort(keys.ptr())) {
# throw std::runtime_error("Dictionary key sort failed.");
# }
keys = py.list(py.sorted(dict.keys()))
# if (keys.not_equal(node.node_data)) {
# throw std::invalid_argument(
# absl::StrFormat("Dict key mismatch; expected keys: %s; dict: %s.",
# py::repr(node.node_data), py::repr(object)));
# }
if not_equal(keys, node.node_data):
raise ValueError("Dict key mismatch; expected keys: %s; dict: %s." % (
py.repr(node.node_data), py.repr(object)
))
# for (py::handle key : keys) {
# agenda.push_back(dict[key]);
# }
for key in keys:
agenda.append(dict[key])
# break;
# }
#
# case PyTreeKind::kNamedTuple: {
elif node.kind == PyTreeKind.kNamedTuple:
# if (!py::isinstance<py::tuple>(object) ||
# !py::hasattr(object, "_fields")) {
# throw std::invalid_argument(absl::StrFormat(
# "Expected named tuple, got %s.", py::repr(object)));
# }
if not isinstance(object, py.tuple) or not hasattr(object, "_fields"):
raise ValueError("Expected named tuple, got %s." % py.repr(object))
# py::tuple tuple = py::reinterpret_borrow<py::tuple>(object);
tuple: typing.NamedTuple = object
# if (tuple.size() != node.arity) {
# throw std::invalid_argument(absl::StrFormat(
# "Named tuple arity mismatch: %d != %d; tuple: %s.", tuple.size(),
# node.arity, py::repr(object)));
# }
if len(tuple) != node.arity:
raise ValueError("Named tuple arity mismatch: %d != %d; tuple: %s." % (
len(tuple), node.arity, py.repr(object)
))
# if (tuple.get_type().not_equal(node.node_data)) {
# throw std::invalid_argument(absl::StrFormat(
# "Named tuple type mismatch: expected type: %s, tuple: %s.",
# py::repr(node.node_data), py::repr(object)));
# }
if not_equal(py.type(tuple), node.node_data):
raise ValueError("Named tuple type mismatch: expected type: %s, tuple: %s." % (
py.repr(node.node_data), py.repr(object)
))
# for (py::handle entry : tuple) {
# agenda.push_back(py::reinterpret_borrow<py::object>(entry));
# }
# for entry in tuple:
# agenda.append(entry)
agenda.extend(tuple)
# break;
# }
#
# case PyTreeKind::kCustom: {
elif node.kind == PyTreeKind.kCustom:
# auto* registration = PyTreeTypeRegistry::Lookup(object.get_type());
registration = PyTreeTypeRegistry.lookup(py.type(object))
# if (registration != node.custom) {
# throw std::invalid_argument(absl::StrFormat(
# "Custom node type mismatch: expected type: %s, value: %s.",
# py::repr(node.custom->type), py::repr(object)));
# }
if registration != node.custom:
raise ValueError("Custom node type mismatch: expected type: %s, value: %s." % (
py.repr(node.custom.type), py.repr(object)
))
# py::tuple out = py::cast<py::tuple>(node.custom->to_iterable(object));
out: typing.Tuple = node.custom.to_iterable(object)
# if (out.size() != 2) {
# throw std::runtime_error(
# "PyTree custom to_iterable function should return a pair");
# }
if len(out) != 2:
raise RuntimeError("PyTree custom to_iterable function should return a pair")
# if (node.node_data.not_equal(out[1])) {
# throw std::invalid_argument(absl::StrFormat(
# "Mismatch custom node data: %s != %s; value: %s.",
# py::repr(node.node_data), py::repr(out[1]), py::repr(object)));
# }
if not_equal(node.node_data, out[1]):
raise ValueError("Mismatch custom node data: %s != %s; value: %s." % (
py.repr(node.node_data), py.repr(out[1]), py.repr(object)
))
# int arity = 0;
# arity = 0
# for (py::handle entry : py::cast<py::iterable>(out[0])) {
# ++arity;
# agenda.push_back(py::reinterpret_borrow<py::object>(entry));
# }
# for entry in out[0]:
# arity += 1
# agenda.append(entry)
arity = len(out[0])
agenda.extend(out[0])
# if (arity != node.arity) {
# throw std::invalid_argument(absl::StrFormat(
# "Custom type arity mismatch: %d != %d; value: %s.", arity,
# node.arity, py::repr(object)));
# }
if arity != node.arity:
raise ValueError("Custom type arity mismatch: %d != %d; value: %s." % (
arity, node.arity, py.repr(object)
))
# break;
# }
# }
# }
# if (it != traversal_.rend() || leaf != -1) {
# throw std::invalid_argument(absl::StrFormat(
# "Tree structures did not match: %s vs %s", py::repr(xs), ToString()));
# }
if not finished_iterating(it) or leaf != -1:
raise ValueError("Tree structures did not match: %s vs %s" % (
py.repr(xs), py.str(self)
))
# return leaves;
return leaves
@property
def num_leaves(self) -> py.int:
if len(self.traversal_) <= 0:
return 0
return self.traversal_[-1].num_leaves
@property
def num_nodes(self) -> py.int:
return len(self.traversal_)
@staticmethod
def tuple(defs: typing.Iterable[PyTreeDef]):
"""Makes a Tuple PyTreeDef out of a vector of PyTreeDefs."""
# auto out = absl::make_unique<PyTreeDef>();
out = PyTreeDef()
# for (const PyTreeDef& def : defs) {
# absl::c_copy(def.traversal_, std::back_inserter(out->traversal_));
# }
for def_ in defs:
out.traversal_.extend(deepcopy(def_.traversal_))
# Node node;
# node.kind = PyTreeKind::kTuple;
# node.arity = defs.size();
node = PyTreeDef.Node(kind=PyTreeKind.kTuple, arity=len(defs))
# out->traversal_.push_back(node);
out.traversal_.append(node)
# return out;
return out
def children(self) -> typing.List[PyTreeDef]:
# std::vector<std::unique_ptr<PyTreeDef>> children;
children = []
# if (traversal_.empty()) {
# return children;
# }
if len(self.traversal_) <= 0:
return children
# | |
<filename>SVGPs/kernels.py
# Copyright 2016 <NAME>, alexggmatthews
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------
# Modification notice:
# This file was modified by <NAME>
# ------------------------------------------
import tensorflow as tf
import numpy as np
from functools import reduce
from settings import int_type,float_type
from functions import eye
class Kern(object):
"""
The basic kernel class. Handles input_dim and active dims, and provides a
generic '_slice' function to implement them.
"""
def __init__(self, input_dim, active_dims=None):
"""
input dim is an integer
active dims is either an iterable of integers or None.
Input dim is the number of input dimensions to the kernel. If the
kernel is computed on a matrix X which has more columns than input_dim,
then by default, only the first input_dim columns are used. If
different columns are required, then they may be specified by
active_dims.
If active dims is None, it effectively defaults to range(input_dim),
but we store it as a slice for efficiency.
"""
self.input_dim = int(input_dim)
if active_dims is None:
self.active_dims = slice(input_dim)
elif type(active_dims) is slice:
self.active_dims = active_dims
if active_dims.start is not None and active_dims.stop is not None and active_dims.step is not None:
assert len(range(*active_dims)) == input_dim # pragma: no cover
else:
self.active_dims = np.array(active_dims, dtype=np.int32)
assert len(active_dims) == input_dim
self.num_gauss_hermite_points = 20
def _slice(self, X, X2):
"""
Slice the correct dimensions for use in the kernel, as indicated by
`self.active_dims`.
:param X: Input 1 (NxD[xB]).
:param X2: Input 2 (MxD), may be None.
:return: Sliced X, X2, (N x self.input_dim [x B]), (N x self.input_dim)
"""
if X.get_shape().ndims == 2: # M x D
if isinstance(self.active_dims, slice):
X = X[:, self.active_dims]
if X2 is not None:
X2 = X2[:, self.active_dims]
else:
X = tf.transpose(tf.gather(tf.transpose(X), self.active_dims))
if X2 is not None:
X2 = tf.transpose(tf.gather(tf.transpose(X2), self.active_dims))
elif X.get_shape().ndims == 3: # M x D x B
if isinstance(self.active_dims, slice):
X = X[:, self.active_dims, :]
if X2 is not None:
X2 = X2[:, self.active_dims]
else:
X = tf.transpose(tf.gather(tf.transpose(X, (1, 0, 2)), self.active_dims), (1, 0, 2))
if X2 is not None:
X2 = tf.transpose(tf.gather(X2, self.active_dims))
with tf.control_dependencies([ tf.assert_equal(tf.shape(X)[1], tf.constant(self.input_dim, dtype=int_type)) ]):
X = tf.identity(X)
return X, X2
def _slice_cov(self, cov):
"""
Slice the correct dimensions for use in the kernel, as indicated by
`self.active_dims` for covariance matrices. This requires slicing the
rows *and* columns. This will also turn flattened diagonal
matrices into a tensor of full diagonal matrices.
:param cov: Tensor of covariance matrices (NxDxD or NxD).
:return: N x self.input_dim x self.input_dim.
"""
cov = tf.cond(tf.equal(tf.rank(cov), 2), lambda: tf.matrix_diag(cov), lambda: cov)
if isinstance(self.active_dims, slice):
cov = cov[..., self.active_dims, self.active_dims]
else:
cov_shape = tf.shape(cov)
covr = tf.reshape(cov, [-1, cov_shape[-1], cov_shape[-1]])
gather1 = tf.gather(tf.transpose(covr, [2, 1, 0]), self.active_dims)
gather2 = tf.gather(tf.transpose(gather1, [1, 0, 2]), self.active_dims)
cov = tf.reshape(tf.transpose(gather2, [2, 0, 1]),
tf.concat_v2([cov_shape[:-2], [len(self.active_dims), len(self.active_dims)]], 0))
return cov
class Stationary(Kern):
"""
Base class for kernels that are stationary, that is, they only depend on
r = || x - x' ||
This class handles 'ARD' behaviour, which stands for 'Automatic Relevance
Determination'. This means that the kernel has one lengthscale per
dimension, otherwise the kernel is isotropic (has a single lengthscale).
"""
def __init__(self, input_dim, variance=1.0, lengthscales=1.,
active_dims=None):
"""
- input_dim is the dimension of the input to the kernel
- variance is the (initial) value for the variance parameter
- lengthscales is the initial value for the lengthscales parameter
defaults to 1.0
- active_dims is a list of length input_dim which controls which
columns of X are used.
"""
Kern.__init__(self, input_dim, active_dims)
self.lengthscales = tf.get_variable("lengthscales", [input_dim], initializer=tf.constant_initializer(lengthscales))
self.variance = tf.get_variable("variance", [1], initializer=tf.constant_initializer(variance))
def square_dist(self, X, X2):
"""
:param X: NxD[xB]
:param X2: MxD
:return: NxM[xB]
"""
if X.get_shape().ndims == 2: # M x D
X = X / self.lengthscales
Xs = tf.reduce_sum(tf.square(X), 1)
if X2 is None:
return -2 * tf.matmul(X, tf.transpose(X)) + \
tf.reshape(Xs, (-1, 1)) + tf.reshape(Xs, (1, -1))
else:
X2 = X2 / self.lengthscales
X2s = tf.reduce_sum(tf.square(X2), 1)
return -2 * tf.matmul(X, tf.transpose(X2)) + \
tf.reshape(Xs, (-1, 1)) + tf.reshape(X2s, (1, -1))
elif X.get_shape().ndims == 3: # M x D x B
X = X / tf.expand_dims(tf.expand_dims(self.lengthscales, -1), 0)
Xs = tf.reduce_sum(tf.square(X), 1) # NxB
if X2 is None:
d = -2 * tf.matmul(tf.transpose(X, (2, 0, 1)), tf.transpose(X, (2, 1, 0))) + \
tf.expand_dims(tf.transpose(Xs), 1) + \
tf.expand_dims(tf.transpose(Xs), -1)
else:
shape = tf.stack([1, 1, tf.shape(X)[-1]])
X2 = tf.tile(tf.expand_dims(X2 / self.lengthscales, -1), shape)
X2s = tf.reduce_sum(tf.square(X2), 1) # NxB
d = -2 * tf.matmul(tf.transpose(X, (2, 0, 1)), tf.transpose(X2, (2, 1, 0))) + \
tf.expand_dims(tf.transpose(Xs), -1) + \
tf.expand_dims(tf.transpose(X2s), 1)
# d is BxNxN
return tf.transpose(d, (1, 2, 0)) # N x N x B
def euclid_dist(self, X, X2):
r2 = self.square_dist(X, X2)
return tf.sqrt(r2 + 1e-12)
def Kdiag(self, X, presliced=False):
if X.get_shape().ndims == 2: # M x D
return tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance))
elif X.get_shape().ndims == 3: # M x D x B
return tf.fill(tf.stack([tf.shape(X)[0], tf.shape(X)[-1]]), tf.squeeze(self.variance))
class RBF(Stationary):
"""
The radial basis function (RBF) or squared exponential kernel
"""
def K(self, X, X2=None, presliced=False):
if not presliced:
X, X2 = self._slice(X, X2)
return self.variance * tf.exp(-self.square_dist(X, X2) / 2)
class PeriodicKernel(Kern):
"""
The periodic kernel. Defined in Equation (47) of
D.J.C.MacKay. Introduction to Gaussian processes. In C.M.Bishop, editor,
Neural Networks and Machine Learning, pages 133--165. Springer, 1998.
Derived using the mapping u=(cos(x), sin(x)) on the inputs.
"""
def __init__(self, input_dim, period=1.0, variance=1.0,
lengthscales=1.0, active_dims=None):
Kern.__init__(self, input_dim, active_dims)
self.lengthscales = tf.get_variable("lengthscales", [input_dim], initializer=tf.constant_initializer(lengthscales))
self.variance = tf.get_variable("variance", [1], initializer=tf.constant_initializer(variance))
self.period = tf.get_variable("period", [1], initializer=tf.constant_initializer(period))
def Kdiag(self, X, presliced=False):
return tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance))
def K(self, X, X2=None, presliced=False):
if not presliced:
X, X2 = self._slice(X, X2)
if X2 is None:
X2 = X
# Introduce dummy dimension so we can use broadcasting
f = tf.expand_dims(X, 1) # now N x 1 x D
f2 = tf.expand_dims(X2, 0) # now 1 x M x D
r = np.pi * (f - f2) / self.period
r = tf.reduce_sum(tf.square(tf.sin(r) / self.lengthscales), 2)
return self.variance * tf.exp(-0.5 * r)
class LocallyPeriodicKernel(Kern):
"""
k(t) = var * exp ( - t^2 / len^2 ) * cos ( 2 * pi * t / per )
"""
def __init__(self, input_dim, period=1.0, variance=1.0,
lengthscales=1.0, active_dims=None):
Kern.__init__(self, input_dim, active_dims)
self.lengthscales = tf.get_variable("lengthscales", [input_dim], initializer=tf.constant_initializer(lengthscales))
self.variance = tf.get_variable("variance", [1], initializer=tf.constant_initializer(variance))
self.period = tf.get_variable("period", [1], initializer=tf.constant_initializer(period))
def Kdiag(self, X, presliced=False):
return tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance))
def K(self, X, X2=None, presliced=False):
if not presliced:
X, X2 = self._slice(X, X2)
if X2 is None:
X2 = X
# Introduce dummy dimension so we can use broadcasting
f = tf.expand_dims(X, 1) # now N x 1 x D
f2 = tf.expand_dims(X2, 0) # now 1 x M x D
r = tf.reduce_sum(f-f2,2) #hack for 1d
return self.variance * tf.exp( - tf.square(r/self.lengthscales) ) * tf.cos(2.*np.pi *r/ self.period)
class Combination(Kern):
"""
Combine a list of kernels, e.g. by adding or multiplying (see inheriting
classes).
The names of the kernels to be combined are generated from their class
names.
"""
def __init__(self, kern_list):
for k in kern_list:
assert isinstance(k, Kern), "can only add Kern instances"
input_dim = np.max([k.input_dim
if type(k.active_dims) is slice else
np.max(k.active_dims) + 1
for k in kern_list])
Kern.__init__(self, input_dim=input_dim)
# add kernels to a list, flattening out instances of this class therein
self.kern_list = kern_list
class Add(Combination):
def K(self, X, X2=None, presliced=False):
return reduce(tf.add, [k.K(X, X2) for k in self.kern_list])
def Kdiag(self, X, presliced=False):
return reduce(tf.add, [k.Kdiag(X) for k in self.kern_list])
class Prod(Combination):
def K(self, X, X2=None, presliced=False):
return reduce(tf.multiply, [k.K(X, X2) for k in self.kern_list])
def Kdiag(self, X, presliced=False):
return reduce(tf.multiply, [k.Kdiag(X) | |
remove_indexes.append(idx_in_list)
continue
#
# If the RecvUpcall is a tunnel port, we can not map it to
# the correct tunnel. For now, we assume the first matching
# packet is the correct one. For more details see the OVS
# ukey_to_flow_netdev() function.
#
if (event.dp_port == recv.dp_port or
recv.dp_port == DP_TUNNEL_PORT) \
and event.pkt_len == recv.pkt_len:
compare_len = min(len(event.pkt), len(recv.pkt))
if len(event.pkt) != len(recv.pkt) \
and event.pkt_frag_len == 0:
warn_parcial_match = True
elif event.pkt_frag_len != 0:
warn_frag = True
compare_len = min(compare_len, event.pkt_frag_len)
if event.pkt[:compare_len] == recv.pkt[:compare_len]:
match = True
else:
#
# There are still some corner cases due to the fact
# the kernel dp_upcall tracepoint is hit before the
# packet is prepared/modified for upcall pass on.
# Example cases are packet checksum update, VLAN
# insertion, etc., etc. For now, we try to handle the
# checksum part, but we might need to add more
# exceptions in the future.
#
diff_bytes = sum(i != j for i, j in zip(
event.pkt[:compare_len], recv.pkt[:compare_len]))
if diff_bytes <= 2 and compare_len > 56:
# This could be a TCP or UDP checksum
event_pkt = Ether(bytes(event.pkt)[:compare_len])
recv_pkt = Ether(bytes(recv.pkt)[:compare_len])
if (event_pkt.haslayer(TCP) and
recv_pkt.haslayer(TCP)) or (
event_pkt.haslayer(UDP) and
recv_pkt.haslayer(UDP)):
if event_pkt.haslayer(TCP):
event_chksum = event_pkt[TCP].chksum
recv_chksum = recv_pkt[TCP].chksum
else:
event_chksum = event_pkt[UDP].chksum
recv_chksum = recv_pkt[UDP].chksum
if event_chksum & 0xff != recv_chksum & 0xff:
diff_bytes -= 1
if event_chksum & 0xff00 != \
recv_chksum & 0xff00:
diff_bytes -= 1
if diff_bytes == 0:
match = True
if match:
this_set = {event.event_type: event}
for sevent in threads_result[recv.pid][idx_recv]:
this_set[sevent.event_type] = sevent
event_sets.append(this_set)
remove_indexes.append(idx_in_list)
if options.debug & 0x4000000 != 0:
print("DBG: Matched DpUpcall({:6}) => "
"RecvUpcall({:6})".format(idx, idx_recv))
break
elif options.debug & 0x8000000 != 0:
print("DBG: COMPARE DpUpcall({:6}) != "
"RecvUpcall({:6})".format(idx, idx_recv))
event_pkt = Ether(bytes(event.pkt)[:compare_len])
recv_pkt = Ether(bytes(recv.pkt)[:compare_len])
print(re.sub('^', 'DBG:' + ' ' * 4,
event_pkt.show(dump=True),
flags=re.MULTILINE))
print(re.sub('^', 'DBG:' + ' ' * 4,
recv_pkt.show(dump=True),
flags=re.MULTILINE))
elif options.debug & 0x8000000 != 0:
print("DBG: COMPATE DpUpcall({:6}) != "
"RecvUpcall({:6}) -> port {}, {} -> "
"len = {}, {}".format(idx, idx_recv,
event.dp_port,
recv.dp_port,
event.pkt_len,
recv.pkt_len))
bar()
for remove_idx in sorted(remove_indexes, reverse=True):
del recv_upcall_list[remove_idx]
if profile:
t1_stop("Matching DP_UPCALLs to a set")
if warn_parcial_match:
print("WARNING: Packets not fully captured for matching!\n "
"Increase the packet buffer with the '--packet-size' option.")
if warn_frag:
print("WARNING: SKB from kernel had fragments, we could only copy/"
"compare the first part!")
if collect_stats:
return event_sets, batch_stats, thread_stats
return event_sets
#
# unit_test()
#
def unit_test():
pkt1 = b'\x01\x02\x03\x04\x05'
pkt2 = b'\x01\x02\x03\x04\x06'
pkt3 = b'\x01\x02\x03\x04\x07'
key = b'\<KEY>' # Port 1
#
# Basic test with all events in line
#
t1_events = [DpUpcall(1, 100, "ping", 1, "system", 1, pkt1, len(pkt1), 0),
RecvUpcall(2, 1, "hndl", 1, "systen", key, pkt1, len(pkt1)),
Event(3, 1, "hndl", 1, EventType.OP_FLOW_PUT),
OpFlowExecute(4, 1, "hndl", 1, pkt1, len(pkt1)),
Event(5, 1, "hndl", 1, EventType.OVS_PKT_EXEC)]
t1_result = [{EventType.DP_UPCALL: t1_events[0],
EventType.RECV_UPCALL: t1_events[1],
EventType.OP_FLOW_PUT: t1_events[2],
EventType.OP_FLOW_EXECUTE: t1_events[3],
EventType.OVS_PKT_EXEC: t1_events[4]}]
#
# Basic test with missing flow put
#
t2_events = [DpUpcall(1, 100, "ping", 1, "system", 1, pkt1, len(pkt1), 0),
RecvUpcall(2, 1, "hndl", 1, "systen", key, pkt1, len(pkt1)),
OpFlowExecute(4, 1, "hndl", 1, pkt1, len(pkt1)),
Event(5, 1, "hndl", 1, EventType.OVS_PKT_EXEC)]
t2_result = [{EventType.DP_UPCALL: t2_events[0],
EventType.RECV_UPCALL: t2_events[1],
EventType.OP_FLOW_EXECUTE: t2_events[2],
EventType.OVS_PKT_EXEC: t2_events[3]}]
#
# Test with RecvUpcall's being batched
#
t3_events = [DpUpcall(1, 101, "ping", 1, "system", 1, pkt1, len(pkt1), 0),
DpUpcall(2, 102, "ping", 2, "system", 1, pkt2, len(pkt2), 0),
DpUpcall(3, 101, "ping", 3, "system", 1, pkt3, len(pkt3), 0),
RecvUpcall(4, 1, "hndl", 1, "systen", key, pkt1, len(pkt1)),
RecvUpcall(5, 1, "hndl", 1, "systen", key, pkt3, len(pkt3)),
RecvUpcall(6, 1, "hndl", 1, "systen", key, pkt2, len(pkt2)),
Event(7, 1, "hndl", 1, EventType.OP_FLOW_PUT),
OpFlowExecute(8, 1, "hndl", 1, pkt1, len(pkt1)),
Event(9, 1, "hndl", 1, EventType.OVS_PKT_EXEC),
Event(10, 1, "hndl", 1, EventType.OP_FLOW_PUT),
OpFlowExecute(11, 1, "hndl", 1, pkt3, len(pkt3)),
Event(12, 1, "hndl", 1, EventType.OVS_PKT_EXEC),
Event(13, 1, "hndl", 1, EventType.OP_FLOW_PUT),
OpFlowExecute(14, 1, "hndl", 1, pkt2, len(pkt2)),
Event(15, 1, "hndl", 1, EventType.OVS_PKT_EXEC)]
t3_result = [{EventType.DP_UPCALL: t3_events[0],
EventType.RECV_UPCALL: t3_events[3],
EventType.OP_FLOW_PUT: t3_events[6],
EventType.OP_FLOW_EXECUTE: t3_events[7],
EventType.OVS_PKT_EXEC: t3_events[8]},
{EventType.DP_UPCALL: t3_events[1],
EventType.RECV_UPCALL: t3_events[5],
EventType.OP_FLOW_PUT: t3_events[12],
EventType.OP_FLOW_EXECUTE: t3_events[13],
EventType.OVS_PKT_EXEC: t3_events[14]},
{EventType.DP_UPCALL: t3_events[2],
EventType.RECV_UPCALL: t3_events[4],
EventType.OP_FLOW_PUT: t3_events[9],
EventType.OP_FLOW_EXECUTE: t3_events[10],
EventType.OVS_PKT_EXEC: t3_events[11]}]
#
# Test with RecvUpcall's single + batch
#
t4_events = [DpUpcall(1, 100, "ping", 1, "system", 1, pkt1, len(pkt1), 0),
RecvUpcall(2, 1, "hndl", 1, "systen", key, pkt1, len(pkt1)),
Event(3, 1, "hndl", 1, EventType.OP_FLOW_PUT),
OpFlowExecute(4, 1, "hndl", 1, pkt1, len(pkt1)),
Event(5, 1, "hndl", 1, EventType.OVS_PKT_EXEC),
DpUpcall(6, 101, "ping", 1, "system", 1, pkt1, len(pkt1), 0),
DpUpcall(7, 102, "ping", 2, "system", 1, pkt2, len(pkt2), 0),
DpUpcall(8, 101, "ping", 3, "system", 1, pkt3, len(pkt3), 0),
RecvUpcall(9, 1, "hndl", 1, "systen", key, pkt1, len(pkt1)),
RecvUpcall(10, 1, "hndl", 1, "systen", key, pkt3, len(pkt3)),
RecvUpcall(11, 1, "hndl", 1, "systen", key, pkt2, len(pkt2)),
Event(12, 1, "hndl", 1, EventType.OP_FLOW_PUT),
OpFlowExecute(13, 1, "hndl", 1, pkt1, len(pkt1)),
Event(14, 1, "hndl", 1, EventType.OVS_PKT_EXEC),
Event(15, 1, "hndl", 1, EventType.OP_FLOW_PUT),
OpFlowExecute(16, 1, "hndl", 1, pkt3, len(pkt3)),
Event(17, 1, "hndl", 1, EventType.OVS_PKT_EXEC),
Event(18, 1, "hndl", 1, EventType.OP_FLOW_PUT),
OpFlowExecute(14, 1, "hndl", 1, pkt2, len(pkt2)),
Event(19, 1, "hndl", 1, EventType.OVS_PKT_EXEC)]
t4_result = [{EventType.DP_UPCALL: t4_events[0],
EventType.RECV_UPCALL: t4_events[1],
EventType.OP_FLOW_PUT: t4_events[2],
EventType.OP_FLOW_EXECUTE: t4_events[3],
EventType.OVS_PKT_EXEC: t4_events[4]},
{EventType.DP_UPCALL: t4_events[5],
EventType.RECV_UPCALL: t4_events[8],
EventType.OP_FLOW_PUT: t4_events[11],
EventType.OP_FLOW_EXECUTE: t4_events[12],
EventType.OVS_PKT_EXEC: t4_events[13]},
{EventType.DP_UPCALL: t4_events[6],
EventType.RECV_UPCALL: t4_events[10],
EventType.OP_FLOW_PUT: t4_events[17],
EventType.OP_FLOW_EXECUTE: t4_events[18],
EventType.OVS_PKT_EXEC: t4_events[19]},
{EventType.DP_UPCALL: t4_events[7],
EventType.RECV_UPCALL: t4_events[9],
EventType.OP_FLOW_PUT: t4_events[14],
EventType.OP_FLOW_EXECUTE: t4_events[15],
EventType.OVS_PKT_EXEC: t4_events[16]}]
#
# Test with two threads interleaved
#
t5_events = [DpUpcall(1, 100, "ping", 1, "system", 1, pkt1, len(pkt1), 0),
DpUpcall(2, 100, "ping", 1, "system", 1, pkt2, len(pkt2), 0),
RecvUpcall(3, 1, "hndl", 1, "systen", key, pkt1, len(pkt1)),
RecvUpcall(4, 2, "hndl", 2, "systen", key, pkt2, len(pkt2)),
Event(5, 2, "hndl", 2, EventType.OP_FLOW_PUT),
Event(6, 1, "hndl", 1, EventType.OP_FLOW_PUT),
OpFlowExecute(7, 2, "hndl", 1, pkt2, len(pkt2)),
OpFlowExecute(8, 1, "hndl", 1, pkt1, len(pkt1)),
Event(9, 1, "hndl", 1, EventType.OVS_PKT_EXEC),
Event(10, 2, "hndl", 1, EventType.OVS_PKT_EXEC)]
t5_result = [{EventType.DP_UPCALL: t5_events[0],
EventType.RECV_UPCALL: t5_events[2],
EventType.OP_FLOW_PUT: t5_events[5],
EventType.OP_FLOW_EXECUTE: t5_events[7],
EventType.OVS_PKT_EXEC: t5_events[8]},
{EventType.DP_UPCALL: t5_events[1],
EventType.RECV_UPCALL: t5_events[3],
EventType.OP_FLOW_PUT: t5_events[4],
EventType.OP_FLOW_EXECUTE: t5_events[6],
EventType.OVS_PKT_EXEC: t5_events[9]}]
#
# Test batch with missing events
#
t6_events = [DpUpcall(1, 101, "ping", 1, "system", 1, pkt1, len(pkt1), 0),
DpUpcall(2, 102, "ping", 2, "system", 1, pkt2, len(pkt2), 0),
RecvUpcall(3, 1, "hndl", 1, "systen", key, pkt1, len(pkt1)),
RecvUpcall(4, 1, "hndl", 1, "systen", key, pkt2, len(pkt2)),
Event(5, 1, "hndl", 1, EventType.OP_FLOW_PUT),
OpFlowExecute(6, 1, "hndl", 1, pkt2, len(pkt2)),
Event(7, 1, "hndl", 1, EventType.OVS_PKT_EXEC)]
t6_result = [{EventType.DP_UPCALL: t6_events[0],
EventType.RECV_UPCALL: t6_events[2]},
{EventType.DP_UPCALL: t6_events[1],
EventType.RECV_UPCALL: t6_events[3],
EventType.OP_FLOW_PUT: t6_events[4],
EventType.OP_FLOW_EXECUTE: t6_events[5],
EventType.OVS_PKT_EXEC: t6_events[6]}]
#
# Test with RecvUpcall's and OVS_PKT_EXEC being batched
#
t7_events = [DpUpcall(1, 101, "ping", 1, "system", 1, pkt1, len(pkt1), 0),
DpUpcall(2, 102, "ping", 2, "system", 1, pkt2, len(pkt2), 0),
DpUpcall(3, 101, "ping", 3, "system", 1, pkt3, len(pkt3), 0),
RecvUpcall(4, 1, "hndl", 1, "systen", key, pkt1, len(pkt1)),
RecvUpcall(5, 1, "hndl", 1, "systen", key, pkt2, len(pkt2)),
RecvUpcall(6, 1, "hndl", 1, "systen", key, pkt3, len(pkt3)),
Event(7, 1, "hndl", 1, EventType.OP_FLOW_PUT),
OpFlowExecute(8, 1, "hndl", 1, pkt1, len(pkt1)),
Event(9, 1, "hndl", 1, EventType.OP_FLOW_PUT),
OpFlowExecute(10, 1, "hndl", 1, pkt2, len(pkt2)),
Event(11, 1, "hndl", 1, EventType.OP_FLOW_PUT),
OpFlowExecute(12, 1, "hndl", 1, pkt3, len(pkt3)),
Event(13, 1, "hndl", 1, EventType.OVS_PKT_EXEC),
Event(14, 1, "hndl", 1, EventType.OVS_PKT_EXEC),
Event(15, 1, "hndl", 1, EventType.OVS_PKT_EXEC)]
t7_result = [{EventType.DP_UPCALL: t7_events[0],
EventType.RECV_UPCALL: t7_events[3],
EventType.OP_FLOW_PUT: t7_events[6],
EventType.OP_FLOW_EXECUTE: t7_events[7],
EventType.OVS_PKT_EXEC: t7_events[12]},
{EventType.DP_UPCALL: t7_events[1],
EventType.RECV_UPCALL: t7_events[4],
EventType.OP_FLOW_PUT: t7_events[8],
EventType.OP_FLOW_EXECUTE: t7_events[9],
EventType.OVS_PKT_EXEC: t7_events[13]},
{EventType.DP_UPCALL: t7_events[2],
EventType.RECV_UPCALL: t7_events[5],
EventType.OP_FLOW_PUT: t7_events[10],
EventType.OP_FLOW_EXECUTE: t7_events[11],
EventType.OVS_PKT_EXEC: t7_events[14]}]
#
# Actual test sets
#
test_set = [["Simple single event", t1_events, t1_result],
["Single event, missing flow_put", t2_events, t2_result],
["Batched events", t3_events, t3_result],
["Single + batched events", t4_events, t4_result],
["Two sets, different threads", t5_events, t5_result],
["Batch with missing exec", t6_events, t6_result],
["Batched events including exec", t7_events, t7_result]]
print("Running some simple unit tests:")
for test in test_set:
print("- {:<32} ".format(test[0]), end="")
result = collect_event_sets(test[1][:])
if result == test[2]:
print("PASS")
else:
print("FAIL")
print(" OUTPUT :")
for event_set in result:
hdr = " - "
for event_type, event in event_set.items():
print("{} {:<16}: {}".format(hdr, event_type.name, event))
hdr = " "
print(" EXPECTED:")
for event_set in test[2]:
hdr = " - "
for event_type, event in event_set.items():
print("{} {:<16}: {}".format(hdr, event_type.name, event))
hdr = | |
<filename>src/python/pants/binaries/binary_util.py
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import argparse
import logging
import os
import posixpath
import shutil
import sys
from abc import abstractmethod
from contextlib import contextmanager
from dataclasses import dataclass
from functools import reduce
from typing import Any, Optional, Tuple
from twitter.common.collections import OrderedSet
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.fs.archive import archiver_for_path
from pants.net.http.fetcher import Fetcher
from pants.option.global_options import GlobalOptionsRegistrar
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.subsystem.subsystem import Subsystem
from pants.util.contextutil import temporary_file
from pants.util.dirutil import chmod_plus_x, safe_concurrent_creation, safe_open
from pants.util.memo import memoized_method, memoized_property
from pants.util.osutil import (
SUPPORTED_PLATFORM_NORMALIZED_NAMES,
get_closest_mac_host_platform_pair,
)
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class HostPlatform:
"""Describes a platform to resolve binaries for. Determines the binary's location on disk.
:class:`BinaryToolUrlGenerator` instances receive this to generate download urls.
"""
os_name: Any
arch_or_version: Any
def binary_path_components(self):
"""These strings are used as consecutive components of the path where a binary is fetched.
This is also used in generating urls from --binaries-baseurls in PantsHosted."""
return [self.os_name, self.arch_or_version]
class BinaryToolUrlGenerator:
"""Encapsulates the selection of urls to download for some binary tool.
:API: public
:class:`BinaryTool` subclasses can return an instance of a class mixing this in to
get_external_url_generator(self) to download their file or archive from some specified url or set
of urls.
"""
@abstractmethod
def generate_urls(self, version, host_platform):
"""Return a list of urls to download some binary tool from given a version and platform.
Each url is tried in order to resolve the binary -- if the list of urls is empty, or downloading
from each of the urls fails, Pants will raise an exception when the binary tool is fetched which
should describe why the urls failed to work.
:param str version: version string for the requested binary (e.g. '2.0.1').
:param host_platform: description of the platform to fetch binaries for.
:type host_platform: :class:`HostPlatform`
:returns: a list of urls to download the binary tool from.
:rtype: list
"""
pass
class PantsHosted(BinaryToolUrlGenerator):
"""Given a binary request and --binaries-baseurls, generate urls to download the binary from.
This url generator is used if get_external_url_generator(self) is not overridden by a BinaryTool
subclass, or if --allow-external-binary-tool-downloads is False.
NB: "pants-hosted" is referring to the organization of the urls being specific to pants. It also
happens that most binaries are downloaded from S3 hosting at binaries.pantsbuild.org by default --
but setting --binaries-baseurls to anything else will only download binaries from the baseurls
given, not from binaries.pantsbuild.org.
"""
class NoBaseUrlsError(ValueError): pass
def __init__(self, binary_request, baseurls):
super().__init__()
self._binary_request = binary_request
if not baseurls:
raise self.NoBaseUrlsError(
"Error constructing pants-hosted urls for the {} binary: no baseurls were provided."
.format(binary_request.name))
self._baseurls = baseurls
def generate_urls(self, _version, host_platform):
"""Append the file's download path to each of --binaries-baseurls.
This assumes that the urls in --binaries-baseurls point somewhere that mirrors Pants's
organization of the downloaded binaries on disk. Each url is tried in order until a request
succeeds.
"""
binary_path = self._binary_request.get_download_path(host_platform)
return [posixpath.join(baseurl, binary_path) for baseurl in self._baseurls]
# TODO: Deprecate passing in an explicit supportdir? Seems like we should be able to
# organize our binary hosting so that it's not needed. It's also used to calculate the binary
# download location, though.
@dataclass(frozen=True)
class BinaryRequest:
"""Describes a request for a binary to download."""
supportdir: Any
version: Any
name: Any
platform_dependent: Any
external_url_generator: Optional[Any]
archiver: Optional[Any]
def _full_name(self):
if self.archiver:
return '{}.{}'.format(self.name, self.archiver.extension)
return self.name
def get_download_path(self, host_platform):
binary_path_components = [self.supportdir]
if self.platform_dependent:
# TODO(<NAME>): finish doc of the path structure expected under base_path.
binary_path_components.extend(host_platform.binary_path_components())
binary_path_components.extend([self.version, self._full_name()])
return os.path.join(*binary_path_components)
@dataclass(frozen=True)
class BinaryFetchRequest:
"""Describes a request to download a file."""
download_path: Any
urls: Tuple
def __post_init__(self):
if not self.urls:
raise self.NoDownloadUrlsError(f"No urls were provided to {self.__name__}: {self!r}.")
@memoized_property
def file_name(self):
return os.path.basename(self.download_path)
class NoDownloadUrlsError(ValueError): pass
class BinaryToolFetcher:
@classmethod
def _default_http_fetcher(cls):
"""Return a fetcher that resolves local file paths against the build root.
Currently this is used everywhere except in testing.
"""
return Fetcher(get_buildroot())
def __init__(self, bootstrap_dir, timeout_secs, fetcher=None, ignore_cached_download=False):
"""
:param str bootstrap_dir: The root directory where Pants downloads binaries to.
:param int timeout_secs: The number of seconds to wait before timing out on a request for some
url.
:param fetcher: object to fetch urls with, overridden in testing.
:type fetcher: :class:`pants.net.http.fetcher.Fetcher`
:param bool ignore_cached_download: whether to fetch a binary even if it already exists on disk.
"""
self._bootstrap_dir = bootstrap_dir
self._timeout_secs = timeout_secs
self._fetcher = fetcher or self._default_http_fetcher()
self._ignore_cached_download = ignore_cached_download
class BinaryNotFound(TaskError):
def __init__(self, name, accumulated_errors):
super(BinaryToolFetcher.BinaryNotFound, self).__init__(
'Failed to fetch {name} binary from any source: ({error_msgs})'
.format(name=name, error_msgs=', '.join(accumulated_errors)))
@contextmanager
def _select_binary_stream(self, name, urls):
"""Download a file from a list of urls, yielding a stream after downloading the file.
URLs are tried in order until they succeed.
:raises: :class:`BinaryToolFetcher.BinaryNotFound` if requests to all the given urls fail.
"""
downloaded_successfully = False
accumulated_errors = []
for url in OrderedSet(urls): # De-dup URLS: we only want to try each URL once.
logger.info('Attempting to fetch {name} binary from: {url} ...'.format(name=name, url=url))
try:
with temporary_file() as dest:
logger.debug("in BinaryToolFetcher: url={}, timeout_secs={}"
.format(url, self._timeout_secs))
self._fetcher.download(url,
listener=Fetcher.ProgressListener(),
path_or_fd=dest,
timeout_secs=self._timeout_secs)
logger.info('Fetched {name} binary from: {url} .'.format(name=name, url=url))
downloaded_successfully = True
dest.seek(0)
yield dest
break
except (IOError, Fetcher.Error, ValueError) as e:
accumulated_errors.append('Failed to fetch binary from {url}: {error}'
.format(url=url, error=e))
if not downloaded_successfully:
raise self.BinaryNotFound(name, accumulated_errors)
def _do_fetch(self, download_path, file_name, urls):
with safe_concurrent_creation(download_path) as downloadpath:
with self._select_binary_stream(file_name, urls) as binary_tool_stream:
with safe_open(downloadpath, 'wb') as bootstrapped_binary:
shutil.copyfileobj(binary_tool_stream, bootstrapped_binary)
def fetch_binary(self, fetch_request):
"""Fulfill a binary fetch request."""
bootstrap_dir = os.path.realpath(os.path.expanduser(self._bootstrap_dir))
bootstrapped_binary_path = os.path.join(bootstrap_dir, fetch_request.download_path)
logger.debug("bootstrapped_binary_path: {}".format(bootstrapped_binary_path))
file_name = fetch_request.file_name
urls = fetch_request.urls
if self._ignore_cached_download or not os.path.exists(bootstrapped_binary_path):
self._do_fetch(bootstrapped_binary_path, file_name, urls)
logger.debug('Selected {binary} binary bootstrapped to: {path}'
.format(binary=file_name, path=bootstrapped_binary_path))
return bootstrapped_binary_path
class BinaryUtil:
"""Wraps utility methods for finding binary executables."""
class Factory(Subsystem):
"""
:API: public
"""
# N.B. `BinaryUtil` sources all of its options from bootstrap options, so that
# `BinaryUtil` instances can be created prior to `Subsystem` bootstrapping. So
# this options scope is unused, but required to remain a `Subsystem`.
options_scope = 'binaries'
@classmethod
def create(cls):
# NB: create is a class method to ~force binary fetch location to be global.
return cls._create_for_cls(BinaryUtil)
@classmethod
def _create_for_cls(cls, binary_util_cls):
# NB: We read global bootstrap options, but through our own scoped options instance.
options = cls.global_instance().get_options()
binary_tool_fetcher = BinaryToolFetcher(
bootstrap_dir=options.pants_bootstrapdir,
timeout_secs=options.binaries_fetch_timeout_secs)
return binary_util_cls(
baseurls=options.binaries_baseurls,
binary_tool_fetcher=binary_tool_fetcher,
path_by_id=options.binaries_path_by_id,
allow_external_binary_tool_downloads=options.allow_external_binary_tool_downloads)
class MissingMachineInfo(TaskError):
"""Indicates that pants was unable to map this machine's OS to a binary path prefix."""
pass
class NoBaseUrlsError(TaskError):
"""Indicates that no urls were specified in pants.ini."""
pass
class BinaryResolutionError(TaskError):
"""Raised to wrap other exceptions raised in the select() method to provide context."""
def __init__(self, binary_request, base_exception):
super(BinaryUtil.BinaryResolutionError, self).__init__(
"Error resolving binary request {}: {}".format(binary_request, base_exception),
base_exception)
def __init__(self, baseurls, binary_tool_fetcher, path_by_id=None,
allow_external_binary_tool_downloads=True, uname_func=None):
"""Creates a BinaryUtil with the given settings to define binary lookup behavior.
This constructor is primarily used for testing. Production code will usually initialize
an instance using the BinaryUtil.Factory.create() method.
:param baseurls: URL prefixes which represent repositories of binaries.
:type baseurls: list of string
:param int timeout_secs: Timeout in seconds for url reads.
:param string bootstrapdir: Directory to use for caching binaries. Uses this directory to
search for binaries in, or download binaries to if needed.
:param dict path_by_id: Additional mapping from (sysname, id) -> (os, arch) for tool
directory naming
:param bool allow_external_binary_tool_downloads: If False, use --binaries-baseurls to download
all binaries, regardless of whether an
external_url_generator field is provided.
:param function uname_func: method to use to emulate os.uname() in testing
"""
self._baseurls = baseurls
self._binary_tool_fetcher = binary_tool_fetcher
self._path_by_id = SUPPORTED_PLATFORM_NORMALIZED_NAMES.copy()
if path_by_id:
self._path_by_id.update((tuple(k), tuple(v)) for k, v in path_by_id.items())
self._allow_external_binary_tool_downloads = allow_external_binary_tool_downloads
self._uname_func = uname_func or os.uname
_ID_BY_OS = {
'darwin': lambda release, machine: ('darwin', release.split('.')[0]),
'linux': lambda release, machine: ('linux', machine),
}
# TODO: we create a HostPlatform in this class instead of in the constructor because we don't want
# to fail until a binary is requested. The HostPlatform should be a parameter that gets lazily
# resolved by the v2 engine.
@memoized_method
def _host_platform(self):
uname_result = self._uname_func()
sysname, _, release, _, machine = uname_result
os_id_key = sysname.lower()
try:
os_id_fun = self._ID_BY_OS[os_id_key]
os_id_tuple = | |
m.x1341 + m.x1729 + m.x1923 >= 0)
m.c1681 = Constraint(expr= m.x1342 + m.x1730 + m.x1924 >= 0)
m.c1682 = Constraint(expr= m.x1343 + m.x1731 + m.x1925 >= 0)
m.c1683 = Constraint(expr= m.x1344 + m.x1732 + m.x1926 >= 0)
m.c1684 = Constraint(expr= m.x1345 + m.x1733 + m.x1927 >= 0)
m.c1685 = Constraint(expr= m.x1346 + m.x1734 + m.x1928 >= 0)
m.c1686 = Constraint(expr= m.x1347 + m.x1735 + m.x1929 >= 0)
m.c1687 = Constraint(expr= m.x1348 + m.x1736 + m.x1930 >= 0)
m.c1688 = Constraint(expr= m.x1349 + m.x1737 + m.x1931 >= 0)
m.c1689 = Constraint(expr= m.x1350 + m.x1738 + m.x1932 >= 0)
m.c1690 = Constraint(expr= m.x1351 + m.x1739 + m.x1933 >= 0)
m.c1691 = Constraint(expr= m.x1352 + m.x1740 + m.x1934 >= 0)
m.c1692 = Constraint(expr= m.x1353 + m.x1741 + m.x1935 >= 0)
m.c1693 = Constraint(expr= m.x1354 + m.x1742 + m.x1936 >= 0)
m.c1694 = Constraint(expr= m.x1355 + m.x1743 + m.x1937 >= 0)
m.c1695 = Constraint(expr= m.x1356 + m.x1744 + m.x1938 >= 0)
m.c1696 = Constraint(expr= m.x1357 + m.x1745 + m.x1939 >= 0)
m.c1697 = Constraint(expr= m.x1358 + m.x1746 + m.x1940 >= 0)
m.c1698 = Constraint(expr= m.x1359 + m.x1747 + m.x1941 >= 0)
m.c1699 = Constraint(expr= m.x1360 + m.x1748 + m.x1942 >= 0)
m.c1700 = Constraint(expr= m.x1361 + m.x1749 + m.x1943 >= 0)
m.c1701 = Constraint(expr= m.x1362 + m.x1750 + m.x1944 >= 0)
m.c1702 = Constraint(expr= m.x1363 + m.x1751 + m.x1945 >= 0)
m.c1703 = Constraint(expr= m.x1364 + m.x1752 + m.x1946 >= 0)
m.c1704 = Constraint(expr= m.x1365 + m.x1753 + m.x1947 >= 0)
m.c1705 = Constraint(expr= m.x1366 + m.x1754 + m.x1948 >= 0)
m.c1706 = Constraint(expr= m.x1367 + m.x1755 + m.x1949 >= 0)
m.c1707 = Constraint(expr= m.x1368 + m.x1756 + m.x1950 >= 0)
m.c1708 = Constraint(expr= m.x1369 + m.x1757 + m.x1951 >= 0)
m.c1709 = Constraint(expr= m.x1370 + m.x1758 + m.x1952 >= 0)
m.c1710 = Constraint(expr= m.x1371 + m.x1759 + m.x1953 >= 0)
m.c1711 = Constraint(expr= m.x1372 + m.x1760 + m.x1954 >= 0)
m.c1712 = Constraint(expr= m.x1373 + m.x1761 + m.x1955 >= 0)
m.c1713 = Constraint(expr= m.x1374 + m.x1762 + m.x1956 >= 0)
m.c1714 = Constraint(expr= m.x1375 + m.x1763 + m.x1957 >= 0)
m.c1715 = Constraint(expr= m.x1376 + m.x1764 + m.x1958 >= 0)
m.c1716 = Constraint(expr= m.x1377 + m.x1765 + m.x1959 >= 0)
m.c1717 = Constraint(expr= m.x1378 + m.x1766 + m.x1960 >= 0)
m.c1718 = Constraint(expr= m.x1379 + m.x1767 + m.x1961 >= 0)
m.c1719 = Constraint(expr= m.x1380 + m.x1768 + m.x1962 >= 0)
m.c1720 = Constraint(expr= m.x1381 + m.x1769 + m.x1963 >= 0)
m.c1721 = Constraint(expr= m.x1382 + m.x1770 + m.x1964 >= 0)
m.c1722 = Constraint(expr= m.x1383 + m.x1771 + m.x1965 >= 0)
m.c1723 = Constraint(expr= m.x1384 + m.x1772 + m.x1966 >= 0)
m.c1724 = Constraint(expr= m.x1385 + m.x1773 + m.x1967 >= 0)
m.c1725 = Constraint(expr= m.x1386 + m.x1774 + m.x1968 >= 0)
m.c1726 = Constraint(expr= m.x1387 + m.x1775 + m.x1969 >= 0)
m.c1727 = Constraint(expr= m.x1388 + m.x1776 + m.x1970 >= 0)
m.c1728 = Constraint(expr= m.x1389 + m.x1777 + m.x1971 >= 0)
m.c1729 = Constraint(expr= m.x1390 + m.x1778 + m.x1972 >= 0)
m.c1730 = Constraint(expr= m.x1391 + m.x1779 + m.x1973 >= 0)
m.c1731 = Constraint(expr= m.x1392 + m.x1780 + m.x1974 >= 0)
m.c1732 = Constraint(expr= m.x1393 + m.x1781 + m.x1975 >= 0)
m.c1733 = Constraint(expr= m.x1394 + m.x1782 + m.x1976 >= 0)
m.c1734 = Constraint(expr= m.x1395 + m.x1783 + m.x1977 >= 0)
m.c1735 = Constraint(expr= m.x1396 + m.x1784 + m.x1978 >= 0)
m.c1736 = Constraint(expr= m.x1397 + m.x1785 + m.x1979 >= 0)
m.c1737 = Constraint(expr= m.x1398 + m.x1786 + m.x1980 >= 0)
m.c1738 = Constraint(expr= m.x1399 + m.x1787 + m.x1981 >= 0)
m.c1739 = Constraint(expr= m.x1400 + m.x1788 + m.x1982 >= 0)
m.c1740 = Constraint(expr= - 0.015*m.x1208 + 0.035*m.x1596 - 0.005*m.x1790 <= 0)
m.c1741 = Constraint(expr= - 0.015*m.x1209 + 0.035*m.x1597 - 0.005*m.x1791 <= 0)
m.c1742 = Constraint(expr= - 0.015*m.x1210 + 0.035*m.x1598 - 0.005*m.x1792 <= 0)
m.c1743 = Constraint(expr= - 0.015*m.x1211 + 0.035*m.x1599 - 0.005*m.x1793 <= 0)
m.c1744 = Constraint(expr= - 0.015*m.x1212 + 0.035*m.x1600 - 0.005*m.x1794 <= 0)
m.c1745 = Constraint(expr= - 0.015*m.x1213 + 0.035*m.x1601 - 0.005*m.x1795 <= 0)
m.c1746 = Constraint(expr= - 0.015*m.x1214 + 0.035*m.x1602 - 0.005*m.x1796 <= 0)
m.c1747 = Constraint(expr= - 0.015*m.x1215 + 0.035*m.x1603 - 0.005*m.x1797 <= 0)
m.c1748 = Constraint(expr= - 0.015*m.x1216 + 0.035*m.x1604 - 0.005*m.x1798 <= 0)
m.c1749 = Constraint(expr= - 0.015*m.x1217 + 0.035*m.x1605 - 0.005*m.x1799 <= 0)
m.c1750 = Constraint(expr= - 0.015*m.x1218 + 0.035*m.x1606 - 0.005*m.x1800 <= 0)
m.c1751 = Constraint(expr= - 0.015*m.x1219 + 0.035*m.x1607 - 0.005*m.x1801 <= 0)
m.c1752 = Constraint(expr= - 0.015*m.x1220 + 0.035*m.x1608 - 0.005*m.x1802 <= 0)
m.c1753 = Constraint(expr= - 0.015*m.x1221 + 0.035*m.x1609 - 0.005*m.x1803 <= 0)
m.c1754 = Constraint(expr= - 0.015*m.x1222 + 0.035*m.x1610 - 0.005*m.x1804 <= 0)
m.c1755 = Constraint(expr= - 0.015*m.x1223 + 0.035*m.x1611 - 0.005*m.x1805 <= 0)
m.c1756 = Constraint(expr= - 0.015*m.x1224 + 0.035*m.x1612 - 0.005*m.x1806 <= 0)
m.c1757 = Constraint(expr= - 0.015*m.x1225 + 0.035*m.x1613 - 0.005*m.x1807 <= 0)
m.c1758 = Constraint(expr= - 0.015*m.x1226 + 0.035*m.x1614 - 0.005*m.x1808 <= 0)
m.c1759 = Constraint(expr= - 0.015*m.x1227 + 0.035*m.x1615 - 0.005*m.x1809 <= 0)
m.c1760 = Constraint(expr= - 0.015*m.x1228 + 0.035*m.x1616 - 0.005*m.x1810 <= 0)
m.c1761 = Constraint(expr= - 0.015*m.x1229 + 0.035*m.x1617 - 0.005*m.x1811 <= 0)
m.c1762 = Constraint(expr= - 0.015*m.x1230 + 0.035*m.x1618 - 0.005*m.x1812 <= 0)
m.c1763 = Constraint(expr= - 0.015*m.x1231 + 0.035*m.x1619 - 0.005*m.x1813 <= 0)
m.c1764 = Constraint(expr= - 0.015*m.x1232 + 0.035*m.x1620 - 0.005*m.x1814 <= 0)
m.c1765 = Constraint(expr= - 0.015*m.x1233 + 0.035*m.x1621 - 0.005*m.x1815 <= 0)
m.c1766 = Constraint(expr= - 0.015*m.x1234 + 0.035*m.x1622 - 0.005*m.x1816 <= 0)
m.c1767 = Constraint(expr= - 0.015*m.x1235 + 0.035*m.x1623 - 0.005*m.x1817 <= 0)
m.c1768 = Constraint(expr= - 0.015*m.x1236 + 0.035*m.x1624 - 0.005*m.x1818 <= 0)
m.c1769 = Constraint(expr= - 0.015*m.x1237 + 0.035*m.x1625 - 0.005*m.x1819 <= 0)
m.c1770 = Constraint(expr= - 0.015*m.x1238 + 0.035*m.x1626 - 0.005*m.x1820 <= 0)
m.c1771 = Constraint(expr= - 0.015*m.x1239 + 0.035*m.x1627 - 0.005*m.x1821 <= 0)
m.c1772 = Constraint(expr= - 0.015*m.x1240 + 0.035*m.x1628 - 0.005*m.x1822 <= 0)
m.c1773 = Constraint(expr= - 0.015*m.x1241 + 0.035*m.x1629 - 0.005*m.x1823 <= 0)
m.c1774 = Constraint(expr= - 0.015*m.x1242 + 0.035*m.x1630 - 0.005*m.x1824 <= 0)
m.c1775 = Constraint(expr= - 0.015*m.x1243 + 0.035*m.x1631 - 0.005*m.x1825 <= 0)
m.c1776 = Constraint(expr= - 0.015*m.x1244 + 0.035*m.x1632 - 0.005*m.x1826 <= 0)
m.c1777 = Constraint(expr= - 0.015*m.x1245 + 0.035*m.x1633 - 0.005*m.x1827 <= 0)
m.c1778 = Constraint(expr= - 0.015*m.x1246 + 0.035*m.x1634 - 0.005*m.x1828 <= 0)
m.c1779 = Constraint(expr= - 0.015*m.x1247 + 0.035*m.x1635 - 0.005*m.x1829 <= 0)
m.c1780 = Constraint(expr= - 0.015*m.x1248 + 0.035*m.x1636 - 0.005*m.x1830 <= 0)
m.c1781 = Constraint(expr= - 0.015*m.x1249 + 0.035*m.x1637 - 0.005*m.x1831 <= 0)
m.c1782 = Constraint(expr= - 0.015*m.x1250 + 0.035*m.x1638 - 0.005*m.x1832 <= 0)
m.c1783 = Constraint(expr= - 0.015*m.x1251 + 0.035*m.x1639 - 0.005*m.x1833 <= 0)
m.c1784 = Constraint(expr= - 0.015*m.x1252 + 0.035*m.x1640 - 0.005*m.x1834 <= 0)
m.c1785 = Constraint(expr= - 0.015*m.x1253 + 0.035*m.x1641 - 0.005*m.x1835 <= 0)
m.c1786 = Constraint(expr= - 0.015*m.x1254 + 0.035*m.x1642 - 0.005*m.x1836 <= 0)
m.c1787 = Constraint(expr= - 0.015*m.x1255 + 0.035*m.x1643 - 0.005*m.x1837 <= 0)
m.c1788 = Constraint(expr= - 0.015*m.x1256 + 0.035*m.x1644 - 0.005*m.x1838 <= 0)
m.c1789 = Constraint(expr= - 0.015*m.x1257 + 0.035*m.x1645 - 0.005*m.x1839 <= 0)
m.c1790 = Constraint(expr= - 0.015*m.x1258 + 0.035*m.x1646 - 0.005*m.x1840 <= 0)
m.c1791 = Constraint(expr= - 0.015*m.x1259 + 0.035*m.x1647 - 0.005*m.x1841 <= 0)
m.c1792 = Constraint(expr= - 0.015*m.x1260 + 0.035*m.x1648 - 0.005*m.x1842 <= 0)
m.c1793 = Constraint(expr= - 0.015*m.x1261 + 0.035*m.x1649 - 0.005*m.x1843 <= 0)
m.c1794 = Constraint(expr= - 0.015*m.x1262 + 0.035*m.x1650 - 0.005*m.x1844 <= 0)
m.c1795 = Constraint(expr= - 0.015*m.x1263 + 0.035*m.x1651 - 0.005*m.x1845 <= 0)
m.c1796 = Constraint(expr= - 0.015*m.x1264 + 0.035*m.x1652 - 0.005*m.x1846 <= 0)
m.c1797 = Constraint(expr= - 0.015*m.x1265 + 0.035*m.x1653 - 0.005*m.x1847 <= 0)
m.c1798 = Constraint(expr= - 0.015*m.x1266 + 0.035*m.x1654 - 0.005*m.x1848 <= 0)
m.c1799 = Constraint(expr= - 0.015*m.x1267 + 0.035*m.x1655 - 0.005*m.x1849 <= 0)
m.c1800 = Constraint(expr= - 0.015*m.x1268 + 0.035*m.x1656 - 0.005*m.x1850 <= 0)
m.c1801 = Constraint(expr= - 0.015*m.x1269 + 0.035*m.x1657 - 0.005*m.x1851 <= 0)
m.c1802 = Constraint(expr= - 0.015*m.x1270 + 0.035*m.x1658 - 0.005*m.x1852 <= 0)
m.c1803 = Constraint(expr= - 0.015*m.x1271 + 0.035*m.x1659 - 0.005*m.x1853 <= 0)
m.c1804 = Constraint(expr= - 0.015*m.x1272 + 0.035*m.x1660 - 0.005*m.x1854 <= 0)
m.c1805 = Constraint(expr= - 0.015*m.x1273 + 0.035*m.x1661 - 0.005*m.x1855 <= 0)
m.c1806 = Constraint(expr= - 0.015*m.x1274 + 0.035*m.x1662 - 0.005*m.x1856 <= 0)
m.c1807 = Constraint(expr= - 0.015*m.x1275 + 0.035*m.x1663 - 0.005*m.x1857 <= 0)
m.c1808 = Constraint(expr= - 0.015*m.x1276 + 0.035*m.x1664 - 0.005*m.x1858 <= 0)
m.c1809 = Constraint(expr= - 0.015*m.x1277 + 0.035*m.x1665 - 0.005*m.x1859 <= 0)
m.c1810 = Constraint(expr= - 0.015*m.x1278 + 0.035*m.x1666 - 0.005*m.x1860 <= 0)
m.c1811 = Constraint(expr= - 0.015*m.x1279 + 0.035*m.x1667 - 0.005*m.x1861 <= 0)
m.c1812 = Constraint(expr= - 0.015*m.x1280 + 0.035*m.x1668 - 0.005*m.x1862 <= 0)
m.c1813 = Constraint(expr= - 0.015*m.x1281 + 0.035*m.x1669 - 0.005*m.x1863 <= 0)
m.c1814 = Constraint(expr= | |
from __future__ import print_function
# logging
import logging
log = logging.getLogger(__name__)
# stdlib
import datetime
import json
import pdb
# pypi
import sqlalchemy
# localapp
from ... import lib
from .. import errors
from .. import utils
from ...model import utils as model_utils
from ...model import objects as model_objects
from .create import create__AcmeChallenge
from .create import create__CertificateRequest
from .create import create__PrivateKey
from .create import create__CertificateSigned
from .get import get__AcmeAccountProvider__by_server
from .get import get__AcmeAuthorization__by_authorization_url
from .get import get__AcmeChallenge__by_challenge_url
from .get import get__AcmeDnsServer__by_root_url
from .get import get__CertificateCA__by_pem_text
from .get import get__CertificateCAChain__by_pem_text
from .get import get__CertificateRequest__by_pem_text
from .get import get__Domain__by_name
# from .get import get__DomainBlocklisted__by_name
from .get import get__PrivateKey_CurrentDay_AcmeAccount
from .get import get__PrivateKey_CurrentDay_Global
from .get import get__PrivateKey_CurrentWeek_AcmeAccount
from .get import get__PrivateKey_CurrentWeek_Global
from .logger import log__OperationsEvent
from .logger import _log_object_event
from .update import update_AcmeAuthorization_from_payload
from .update import update_AcmeDnsServer__set_global_default
from .validate import validate_domain_names
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getcreate__AcmeAccount(
ctx,
key_pem=None,
le_meta_jsons=None,
le_pkey_jsons=None,
le_reg_jsons=None,
acme_account_provider_id=None,
acme_account_key_source_id=None,
contact=None,
terms_of_service=None,
account_url=None,
event_type="AcmeAccount__insert",
private_key_cycle_id=None,
private_key_technology_id=None,
):
"""
Gets or Creates AcmeAccount+AcmeAccountKey for LetsEncrypts' ACME server
returns:
tuple(`model.utils.AcmeAccount`, `is_created[Boolean]`)
:param ctx: (required) A :class:`lib.utils.ApiContext` instance
:param key_pem: (optional) an account key in PEM format.
if not provided, all of the following must be supplied:
* le_meta_jsons
* le_pkey_jsons
* le_reg_jsons
:param le_meta_jsons: (optional) data from certbot account key format
if not provided, `key_pem` must be supplied
:param le_pkey_jsons: (optional) data from certbot account key format
if not provided, `key_pem` must be supplied
:param le_reg_jsons: (optional) data from certbot account key format
if not provided, `key_pem` must be supplied
:param acme_account_provider_id: (optional) id corresponding to a :class:`model.objects.AcmeAccountProvider` server. required if `key_pem``; do not submit if `le_*` kwargs are provided.
:param acme_account_key_source_id: (required) id corresponding to a :class:`model.utils.AcmeAccountKeySource`
:param contact: (optional) contact info from acme server
:param terms_of_service: (optional)
:param account_url: (optional)
:param private_key_cycle_id: (required) id corresponding to a :class:`model.utils.PrivateKeyCycle`
:param private_key_technology_id: (required) id corresponding to a :class:`model.utils.KeyTechnology`
"""
if (key_pem) and any((le_meta_jsons, le_pkey_jsons, le_reg_jsons)):
raise ValueError(
"Must supply `key_pem` OR all of `le_meta_jsons, le_pkey_jsons, le_reg_jsons`."
)
if not (key_pem) and not all((le_meta_jsons, le_pkey_jsons, le_reg_jsons)):
raise ValueError(
"Must supply `key_pem` OR all of `le_meta_jsons, le_pkey_jsons, le_reg_jsons`."
)
# how are we submitting this data?
_strategy = None
_event_type_key = None
if event_type == "AcmeAccount__create":
_event_type_key = "AcmeAccountKey__create"
elif event_type == "AcmeAccount__insert":
_event_type_key = "AcmeAccountKey__insert"
else:
raise ValueError("invalid `event_type`")
# KeyCycle
if private_key_cycle_id is None:
private_key_cycle_id = model_utils.PrivateKeyCycle.from_string(
model_utils.PrivateKeyCycle._DEFAULT_AcmeAccount
)
if private_key_cycle_id not in model_utils.PrivateKeyCycle._mapping:
raise ValueError("invalid `private_key_cycle_id`")
# KeyTechnology
if private_key_technology_id is None:
private_key_technology_id = model_utils.KeyTechnology.from_string(
model_utils.KeyTechnology._DEFAULT_AcmeAccount
)
if (
private_key_technology_id
not in model_utils.KeyTechnology._options_AcmeAccount_private_key_technology_id
):
raise ValueError("invalid `private_key_technology_id`")
# scoping
_letsencrypt_data = None
# quickly audit args/derive info
if key_pem:
_strategy = "key_pem"
if not contact:
raise ValueError("must supply `contact` when submitting `key_pem`")
if not acme_account_provider_id:
raise ValueError(
"no `acme_account_provider_id`; required if PEM key is submitted."
)
dbAcmeAccountProvider = ctx.dbSession.query(
model_objects.AcmeAccountProvider
).get(acme_account_provider_id)
if not dbAcmeAccountProvider:
raise ValueError("invalid `acme_account_provider_id`.")
# cleanup these
key_pem = lib.cert_utils.cleanup_pem_text(key_pem)
key_pem_md5 = utils.md5_text(key_pem)
elif not key_pem:
_strategy = "LetsEncrypt payload"
if contact:
raise ValueError("do not submit `contact` with LetsEncrypt payload")
if acme_account_provider_id:
raise ValueError(
"do not submit `acme_account_provider_id` with LetsEncrypt payload"
)
if terms_of_service:
raise ValueError(
"do not submit `terms_of_service` with LetsEncrypt payload"
)
if account_url:
raise ValueError("do not submit `account_url` with LetsEncrypt payload")
"""
There is some useful data in here...
meta.json = creation_dt DATETIME; save as created
meta.json = creation_host STRING; save for info
regr.json = contact: email, save for info
regr.json = agreement: url, save for info
regr.json = key, save for info
regr.json = uri, save for info
regr.json = tos, save for info
"""
le_meta_json = json.loads(le_meta_jsons)
le_reg_json = json.loads(le_reg_jsons)
_letsencrypt_data = {"meta.json": le_meta_json, "regr.json": le_reg_json}
_letsencrypt_data = json.dumps(_letsencrypt_data, sort_keys=True)
try:
contact = le_reg_json["body"]["contact"][0]
if contact.startswith("mailto:"):
contact = contact[7:]
except Exception as exc:
log.critical("Could not parse `contact` from LetsEncrypt payload")
contact = "<EMAIL>"
terms_of_service = le_reg_json.get("terms_of_service")
account_url = le_reg_json.get("uri")
_account_server = lib.utils.url_to_server(account_url)
if not _account_server:
raise ValueError(
"could not detect an AcmeAccountProvider server from LetsEncrypt payload"
)
# derive the api server
dbAcmeAccountProvider = get__AcmeAccountProvider__by_server(
ctx, _account_server
)
if not dbAcmeAccountProvider:
raise ValueError(
"invalid AcmeAccountProvider detected from LetsEncrypt payload"
)
acme_account_provider_id = dbAcmeAccountProvider.id
key_pem = lib.cert_utils.convert_lejson_to_pem(le_pkey_jsons)
key_pem = lib.cert_utils.cleanup_pem_text(key_pem)
key_pem_md5 = utils.md5_text(key_pem)
# now proceed with a single path of logic
# check for an AcmeAccount and AcmeAccountKey separately
# SCENARIOS:
# 1. No AcmeAccount or AcmeAccountKey - CREATE BOTH
# 2. Existing AcmeAccount, new AcmeAccountKey - CREATE NONE. ERROR.
# 3. Existing AcmeAccountKey, new AcmeAccount - CREATE NONE. ERROR.
dbAcmeAccount = (
ctx.dbSession.query(model_objects.AcmeAccount)
.filter(
sqlalchemy.func.lower(model_objects.AcmeAccount.contact) == contact.lower(),
model_objects.AcmeAccount.acme_account_provider_id
== acme_account_provider_id,
)
.first()
)
dbAcmeAccountKey = (
ctx.dbSession.query(model_objects.AcmeAccountKey)
.filter(
model_objects.AcmeAccountKey.key_pem_md5 == key_pem_md5,
model_objects.AcmeAccountKey.key_pem == key_pem,
)
.first()
)
if dbAcmeAccount:
if dbAcmeAccountKey:
return (dbAcmeAccount, False)
else:
raise errors.ConflictingObject(
(
dbAcmeAccount,
"The submitted AcmeAccountProvider and contact info is already associated with another AcmeAccountKey.",
)
)
elif dbAcmeAccountKey:
raise errors.ConflictingObject(
(
dbAcmeAccountKey,
"The submited AcmeAccountKey is already associated with another AcmeAccount.",
)
)
# scoping
key_technology = None
acckey__spki_sha256 = None
_tmpfile = None
try:
if lib.cert_utils.NEEDS_TEMPFILES:
_tmpfile = lib.cert_utils.new_pem_tempfile(key_pem)
# validate + grab the technology
key_technology = lib.cert_utils.validate_key(
key_pem=key_pem,
key_pem_filepath=_tmpfile.name if _tmpfile else None,
)
# grab the spki
acckey__spki_sha256 = lib.cert_utils.parse_key__spki_sha256(
key_pem=key_pem,
key_pem_filepath=_tmpfile.name if _tmpfile else None,
)
finally:
if _tmpfile is not None:
_tmpfile.close()
dbOperationsEvent_AcmeAccount = log__OperationsEvent(
ctx,
model_utils.OperationsEventType.from_string(event_type),
)
dbOperationsEvent_AcmeAccountKey = log__OperationsEvent(
ctx,
model_utils.OperationsEventType.from_string(_event_type_key),
dbOperationsEvent_child_of=dbOperationsEvent_AcmeAccount,
)
# first, create the AcmeAccount
dbAcmeAccount = model_objects.AcmeAccount()
dbAcmeAccount.timestamp_created = ctx.timestamp
dbAcmeAccount.contact = contact
dbAcmeAccount.terms_of_service = terms_of_service
dbAcmeAccount.account_url = account_url
dbAcmeAccount.acme_account_provider_id = acme_account_provider_id
dbAcmeAccount.private_key_cycle_id = private_key_cycle_id
dbAcmeAccount.private_key_technology_id = private_key_technology_id
dbAcmeAccount.operations_event_id__created = dbOperationsEvent_AcmeAccount.id
ctx.dbSession.add(dbAcmeAccount)
ctx.dbSession.flush(objects=[dbAcmeAccount])
# next, create the AcmeAccountKey
dbAcmeAccountKey = model_objects.AcmeAccountKey()
dbAcmeAccountKey.is_active = True
dbAcmeAccountKey.acme_account_id = dbAcmeAccount.id
dbAcmeAccountKey.timestamp_created = ctx.timestamp
dbAcmeAccountKey.key_pem = key_pem
dbAcmeAccountKey.key_pem_md5 = key_pem_md5
dbAcmeAccountKey.key_technology_id = model_utils.KeyTechnology.from_string(
key_technology
)
dbAcmeAccountKey.spki_sha256 = acckey__spki_sha256
dbAcmeAccountKey.acme_account_key_source_id = acme_account_key_source_id
dbAcmeAccountKey.operations_event_id__created = dbOperationsEvent_AcmeAccountKey.id
ctx.dbSession.add(dbAcmeAccountKey)
ctx.dbSession.flush(objects=[dbAcmeAccountKey])
# recordkeeping - AcmeAccount
_epd__AcmeAccount = utils.new_event_payload_dict()
_epd__AcmeAccount["acme_account.id"] = dbAcmeAccount.id
dbOperationsEvent_AcmeAccount.set_event_payload(_epd__AcmeAccount)
ctx.dbSession.flush(objects=[dbOperationsEvent_AcmeAccount])
_log_object_event(
ctx,
dbOperationsEvent=dbOperationsEvent_AcmeAccount,
event_status_id=model_utils.OperationsObjectEventStatus.from_string(event_type),
dbAcmeAccount=dbAcmeAccount,
)
# recordkeeping - AcmeAccountKey
_epd__AcmeAccountKey = utils.new_event_payload_dict()
_epd__AcmeAccountKey["acme_account_key.id"] = dbAcmeAccountKey.id
dbOperationsEvent_AcmeAccountKey.set_event_payload(_epd__AcmeAccountKey)
ctx.dbSession.flush(objects=[dbOperationsEvent_AcmeAccountKey])
_log_object_event(
ctx,
dbOperationsEvent=dbOperationsEvent_AcmeAccountKey,
event_status_id=model_utils.OperationsObjectEventStatus.from_string(
_event_type_key
),
dbAcmeAccountKey=dbAcmeAccountKey,
)
return (dbAcmeAccount, True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getcreate__AcmeAuthorizationUrl(
ctx, authorization_url=None, dbAcmeOrder=None, is_via_new_order=None
):
"""
used to create auth objects
:param ctx: (required) A :class:`lib.utils.ApiContext` instance
:param authorization_url: (required) the url of an RFC-8555 authorization
:param dbAcmeOrder: (required) The :class:`model.objects.AcmeOrder` associated with the discovered item
:param is_via_new_order: Boolean was this discovered during a new AcmeOrder? It should always be yes.
"""
log.info("getcreate__AcmeAuthorizationUrl(")
if not dbAcmeOrder:
raise ValueError("`dbAcmeOrder` is required")
is_created__AcmeAuthorization = False
is_created__AcmeAuthorization2Order = None
_needs_association = None
dbAcmeAuthorization = get__AcmeAuthorization__by_authorization_url(
ctx, authorization_url
)
if not dbAcmeAuthorization:
dbAcmeAuthorization = model_objects.AcmeAuthorization()
dbAcmeAuthorization.authorization_url = authorization_url
dbAcmeAuthorization.timestamp_created = ctx.timestamp
dbAcmeAuthorization.acme_status_authorization_id = (
model_utils.Acme_Status_Authorization.ID_DEFAULT
)
dbAcmeAuthorization.acme_order_id__created = dbAcmeOrder.id
ctx.dbSession.add(dbAcmeAuthorization)
ctx.dbSession.flush(objects=[dbAcmeAuthorization])
is_created__AcmeAuthorization = True
_needs_association = True
else:
# poop, this
# raise ValueError("this should be unique!")
_existingAssociation = (
ctx.dbSession.query(model_objects.AcmeOrder2AcmeAuthorization)
.filter(
model_objects.AcmeOrder2AcmeAuthorization.acme_order_id
== dbAcmeOrder.id,
model_objects.AcmeOrder2AcmeAuthorization.acme_authorization_id
== dbAcmeAuthorization.id,
)
.first()
)
if not _existingAssociation:
_needs_association = True
if _needs_association:
dbOrder2Auth = model_objects.AcmeOrder2AcmeAuthorization()
dbOrder2Auth.acme_order_id = dbAcmeOrder.id
dbOrder2Auth.acme_authorization_id = dbAcmeAuthorization.id
dbOrder2Auth.is_present_on_new_order = is_via_new_order
ctx.dbSession.add(dbOrder2Auth)
ctx.dbSession.flush(
objects=[
dbOrder2Auth,
]
)
is_created__AcmeAuthorization2Order = True
log.info(") getcreate__AcmeAuthorizationUrl")
return (
dbAcmeAuthorization,
is_created__AcmeAuthorization,
is_created__AcmeAuthorization2Order,
)
def getcreate__AcmeAuthorization(
ctx,
authorization_url=None,
authorization_payload=None,
authenticatedUser=None,
dbAcmeOrder=None,
transaction_commit=None,
is_via_new_order=None,
):
"""
:param ctx: (required) A :class:`lib.utils.ApiContext` instance
:param authorization_url: (required) the url of an RFC-8555 authorization
:param authorization_payload: (required) an RFC-8555 authorization payload
:param authenticatedUser: (optional) an object which contains a `accountkey_thumbprint` attribute
:param dbAcmeOrder: (required) The :class:`model.objects.AcmeOrder` associated with the discovered item
:param transaction_commit: (required) Boolean value. required to indicate this persists to the database.
:param is_via_new_order: Boolean was this discovered during a new AcmeOrder? It should always be yes.
https://tools.ietf.org/html/rfc8555#section-7.1.4
Authorization Payload Contents:
identifier (required, object): The identifier that the account is authorized to represent.
type (required, string): The type of | |
# @Time : 2022/1/1
# @Author : <NAME>
# @email : <EMAIL>
import ipdb
import math
import torch
import numpy as np
import torch.nn.functional as F
from loguru import logger
from torch import nn
import os
from crslab.model.base import BaseModel
from crslab.model.utils.modules.info_nce_loss import info_nce_loss
from crslab.model.utils.functions import edge_to_pyg_format
from crslab.model.utils.modules.cross_entropy_loss import Handle_Croess_Entropy_Loss
from torch_geometric.nn import RGCNConv
from crslab.config import MODEL_PATH
from crslab.model.base import BaseModel
from crslab.model.utils.modules.info_nce_loss import info_nce_loss
from crslab.model.utils.functions import edge_to_pyg_format
from crslab.model.utils.modules.attention import SelfAttentionBatch, SelfAttentionSeq
from crslab.model.utils.modules.transformer import TransformerDecoder, TransformerEncoder
from crslab.model.utils.modules.transformer import MultiHeadAttention, TransformerFFN, _create_selfattn_mask, \
_normalize, \
create_position_codes
NEAR_INF_FP16 = 65504
NEAR_INF = 1e20
def neginf(dtype):
"""Returns a representable finite number near -inf for a dtype."""
if dtype is torch.float16:
return -NEAR_INF_FP16
else:
return -NEAR_INF
class DBModel(nn.Module):
def __init__(self, opt, device, vocab, side_data):
super().__init__()
# vocab
self.vocab_size = vocab['vocab_size']
self.pad_token_idx = vocab['pad']
self.start_token_idx = vocab['start']
self.end_token_idx = vocab['end']
self.token_emb_dim = opt['token_emb_dim']
self.pretrained_embedding = side_data.get('embedding', None)
# kg
self.n_word = side_data['word_kg']['n_entity']
self.kg_name = opt['kg_name']
self.n_entity = side_data[self.kg_name]['n_entity']
self.pad_word_idx = vocab['pad_word']
self.pad_entity_idx = vocab['pad_entity']
entity_kg = side_data['entity_kg']
self.n_relation = entity_kg['n_relation']
entity_edges = entity_kg['edge']
self.entity_edge_idx, self.entity_edge_type = edge_to_pyg_format(entity_edges, 'RGCN')
self.entity_edge_idx = self.entity_edge_idx.to(device)
self.entity_edge_type = self.entity_edge_type.to(device)
word_edges = side_data['word_kg']['edge']
self.word_edges = edge_to_pyg_format(word_edges, 'GCN').to(device)
self.num_bases = opt['num_bases']
self.kg_emb_dim = opt['kg_emb_dim']
# transformer
self.n_heads = opt['n_heads']
self.n_layers = opt['n_layers']
self.ffn_size = opt['ffn_size']
self.dropout = opt['dropout']
self.attention_dropout = opt['attention_dropout']
self.relu_dropout = opt['relu_dropout']
self.learn_positional_embeddings = opt['learn_positional_embeddings']
self.embeddings_scale = opt['embeddings_scale']
self.reduction = opt['reduction']
self.n_positions = opt['n_positions']
self.response_truncate = opt.get('response_truncate', 20)
self._build_model()
def _build_model(self):
self._build_conversation_layer()
def _build_conversation_layer(self):
self.conv_entity_norm = nn.Linear(self.kg_emb_dim, self.ffn_size)
self.conv_entity_attn_norm = nn.Linear(self.kg_emb_dim, self.ffn_size)
def forward(self, batch, mode, kgModel):
entity_attn_rep, entity_representations = self.entity_model_kbrd(batch, mode, kgModel) # (bs, dim), (bs, n_context_entities, dim)
conv_entity_emb, conv_entity_reps = self.conv_entaity_model(entity_attn_rep, entity_representations) # (bs, ffn_size), (bs, n_context_entities, ffn_size)
return entity_attn_rep, entity_representations, conv_entity_emb, conv_entity_reps
# (bs, dim), (bs, n_context_entities, dim), (bs, ffn_size), (bs, n_context_entities, ffn_size)
def entity_model_kbrd(self, batch, mode, kgModel):
context_entities_kbrd = batch['context_entities_kbrd'] # [bs, nb_context_entities]
context_entities = batch['context_entities'] # (bs, entity_truncate)
user_rep, kg_embedding = kgModel._get_kg_user_rep(context_entities_kbrd) # (bs, dim), (n_entities, dim)
entity_representations = kg_embedding[context_entities] # (bs, entity_truncate, dim)
return user_rep, entity_representations
def conv_entaity_model(self, entity_attn_rep, entity_representations):
# encoder-decoder
conv_entity_emb = self.conv_entity_attn_norm(entity_attn_rep) # (bs, ffn_size)
conv_entity_reps = self.conv_entity_norm(entity_representations) # (bs, n_context_entities, ffn_size)
return conv_entity_emb, conv_entity_reps # (bs, ffn_size), (bs, n_context_entities, ffn_size)
class CoarseReviewModelForDecoder(nn.Module):
def __init__(self, opt, device, vocab, side_data):
super().__init__()
# vocab
self.vocab_size = vocab['vocab_size']
self.pad_token_idx = vocab['pad']
self.start_token_idx = vocab['start']
self.end_token_idx = vocab['end']
self.token_emb_dim = opt['token_emb_dim']
self.pretrained_embedding = side_data.get('embedding', None)
# kg
self.n_word = side_data['word_kg']['n_entity']
self.kg_name = opt['kg_name']
self.n_entity = side_data[self.kg_name]['n_entity']
self.pad_word_idx = vocab['pad_word']
self.pad_entity_idx = vocab['pad_entity']
entity_kg = side_data['entity_kg']
self.n_relation = entity_kg['n_relation']
entity_edges = entity_kg['edge']
self.entity_edge_idx, self.entity_edge_type = edge_to_pyg_format(entity_edges, 'RGCN')
self.entity_edge_idx = self.entity_edge_idx.to(device)
self.entity_edge_type = self.entity_edge_type.to(device)
word_edges = side_data['word_kg']['edge']
self.word_edges = edge_to_pyg_format(word_edges, 'GCN').to(device)
self.num_bases = opt['num_bases']
self.kg_emb_dim = opt['kg_emb_dim']
# transformer
self.n_heads = opt['n_heads']
self.n_layers = opt['n_layers']
self.ffn_size = opt['ffn_size']
self.dropout = opt['dropout']
self.attention_dropout = opt['attention_dropout']
self.relu_dropout = opt['relu_dropout']
self.learn_positional_embeddings = opt['learn_positional_embeddings']
self.embeddings_scale = opt['embeddings_scale']
self.reduction = opt['reduction']
self.n_positions = opt['n_positions']
self.response_truncate = opt.get('response_truncate', 20)
self._build_model()
def _build_model(self):
self._build_conv_concept_encoder()
def _build_conv_concept_encoder(self):
self.conv_review_attn_norm = nn.Linear(self.token_emb_dim, self.ffn_size)
self.conv_review_norm = nn.Linear(self.token_emb_dim, self.ffn_size)
def forward(self, batch, mode, reviewModel):
review_user_rep, review_reps, review_pad_reps, review_padding_mask, review_token_reps, review_token_padding_mask = \
self.model_review(batch, mode, reviewModel)
# (bs, dim), (~bs*nb_review, dim), (bs, n_review, dim), (bs, nb_review), (bs, n_review, seq_len3, dim), (bs, n_review, seq_len3)
conv_review_emb, conv_review_reps = self.conv_review_model(review_user_rep, review_pad_reps)
# (bs, ffn_size), (bs, n_review, ffn_size)
return conv_review_emb, conv_review_reps, review_padding_mask, review_token_reps, review_token_padding_mask
# (bs, ffn_size), (bs, n_review, dim), (bs, ffn_size), (bs, n_review, seq_len3, dim), (bs, n_review, seq_len3)
def model_review(self, batch, mode, reviewModel):
review_user_rep, review_reps, review_state = reviewModel.get_review_user_rep_and_review_rep(batch, mode)
# (bs, dim), (~bs*nb_review, dim), (~bs*nb_review, seq_len3, dim)
review_pad_reps, review_padding_mask, review_token_reps, review_token_padding_mask = reviewModel.get_review_sample_reps(
batch, mode, review_reps, review_state)
# (bs, nb_review, dim), (bs, nb_review), (bs, n_review, seq_len3, dim), (bs, n_review, seq_len3)
return review_user_rep, review_reps, review_pad_reps, \
review_padding_mask, review_token_reps, review_token_padding_mask
# (bs, dim), (~bs*nb_review, dim), (bs, n_review, dim),
# (bs, nb_review), (bs, n_review, seq_len3, dim), (bs, n_review, seq_len3)
def conv_review_model(self, review_attn_rep, review_representations):
# (bs, dim), (bs, n_review, dim), (bs, nb_review, dim), (bs, seq_len3, dim)
conv_review_emb = self.conv_review_attn_norm(review_attn_rep) # (bs, ffn_size)
conv_review_reps = self.conv_review_norm(review_representations) # (bs, n_context_words, ffn_size)
return conv_review_emb, conv_review_reps
# (bs, ffn_size), (bs, n_review, ffn_size)
class FineReviewDecoderAttention(nn.Module):
def __init__(self, n_heads, dim, dropout=.0):
super(FineReviewDecoderAttention, self).__init__()
self.n_heads = n_heads
self.dim = dim
self.dim_per_head = self.dim // self.n_heads
self.attn_dropout = nn.Dropout(p=dropout) # --attention-dropout
self.q_lin = nn.Linear(dim, dim)
self.k_lin = nn.Linear(dim, dim)
self.v_lin = nn.Linear(dim, dim)
# TODO: merge for the initialization step
nn.init.xavier_normal_(self.q_lin.weight)
nn.init.xavier_normal_(self.k_lin.weight)
nn.init.xavier_normal_(self.v_lin.weight)
# and set biases to 0
self.out_lin = nn.Linear(dim, dim)
nn.init.xavier_normal_(self.out_lin.weight)
self.fine_review_level_self_atten = SelfAttentionSeq(self.dim_per_head, self.dim_per_head)
def forward(self, query, key=None, value=None, mask=None, mask2=None):
# query: (bs, query_len, ffn_size)
# key/value: (bs, nb_review, key_len, ffn_size)
# mask: (bs, nb_review)
# mask2: (bs, nb_review, key_len)
query, key, value = self.set_q_k_v(query, key, value)
bs, query_len, n_heads, dim_per_head, scale, key_len, dim, nb_review = self.set_hyper_parameters(query, key, mask, mask2)
q, k, v = self.prepare_heads(query, key, value, bs, n_heads, dim_per_head)
# q: (bs*n_heads, query_len, dim_per_head)
# k/v: (bs*n_heads, nb_review, key_len, dim_per_head)
out = self.compute_func(q, k, v, query, mask, mask2, bs, query_len, n_heads, dim_per_head, scale, key_len, dim, nb_review) # (bs, query_len, dim)
return out
def set_q_k_v(self, query, key, value):
return query, key, value
def set_hyper_parameters(self, query, key, mask, mask2):
bs, query_len, dim = query.size()
assert dim == self.dim, \
f'Dimensions do not match: {dim} query vs {self.dim} configured'
assert mask is not None, 'Mask is None, please specify a mask'
assert mask2 is not None, 'Mask is None, please specify a mask'
n_heads = self.n_heads
dim_per_head = dim // n_heads
scale = math.sqrt(dim_per_head)
_, nb_review, key_len, dim = key.size()
return bs, query_len, n_heads, dim_per_head, scale, key_len, dim, nb_review
def prepare_heads(self, query, key, value, bs, n_heads, dim_per_head):
# query: (bs, query_len, ffn_size)
# key/value: (bs, nb_review, key_len, ffn_size)
q = self.prepare_head_q(self.q_lin(query), bs, n_heads, dim_per_head) # (bs*n_heads, query_len, dim_per_head)
k = self.prepare_head_kv(self.k_lin(key), bs, n_heads, dim_per_head) # (bs*n_heads, nb_review, key_len, dim_per_head)
v = self.prepare_head_kv(self.v_lin(value), bs, n_heads, dim_per_head) # (bs*n_heads, nb_review, key_len, dim_per_head)
return q, k, v
def prepare_head_q(self, tensor, bs, n_heads, dim_per_head):
# input is (bs, query_len, ffn_size)
# output is (bs*n_heads, query_len, dim_per_head)
bs, seq_len, _ = tensor.size()
tensor = tensor.view(bs, tensor.size(1), n_heads, dim_per_head)
tensor = tensor.transpose(1, 2).contiguous().view(
bs*n_heads,
seq_len,
dim_per_head
)
return tensor
def prepare_head_kv(self, tensor, bs, n_heads, dim_per_head):
# input is (bs, nb_review, key_len, ffn_size)
# output is (bs*n_heads, nb_review, key_len, dim_per_head)
bs, nb_review, seq_len, _ = tensor.size()
tensor = tensor.view(bs, nb_review, seq_len, n_heads, dim_per_head)
tensor = tensor.transpose(1, 3).transpose(2, 3).contiguous().view(
bs*n_heads,
nb_review,
seq_len,
dim_per_head
)
return tensor
def compute_func(self, q, k, v, query, mask, mask2, bs, query_len, n_heads, dim_per_head, scale, key_len, dim, nb_review):
# q: (bs*n_heads, query_len, dim_per_head)
# k/v: (bs*n_heads, nb_review, key_len, dim_per_head)
# mask: (bs, nb_review)
# mask2: (bs, nb_review, key_len)
attentioned = self.token_level_atten(q, k, v, query, mask, mask2, bs, query_len, n_heads, dim_per_head, scale, key_len, dim, nb_review)
# (bs*n_heads*nb_review, query_len, dim_per_head)
attentioned = self.review_level_atten(attentioned, mask, bs, n_heads, nb_review, query_len, dim_per_head) # (bs*n_heads, query_len, dim_per_head)
attentioned = (
attentioned.type_as(query)
.view(bs, n_heads, query_len, dim_per_head)
.transpose(1, 2).contiguous()
.view(bs, query_len, dim)
)
# (bs, query_len, dim)
out = self.out_lin(attentioned) # (bs, query_len, dim)
return out # (bs, query_len, dim)
def get_attn_mask(self, mask, bs, key_len, n_heads, query_len):
# Mask is [bs, key_len] (selfattn) or [bs, key_len, key_len] (enc attn)
attn_mask = (
(mask == 0)
.view(bs, 1, -1, key_len)
.repeat(1, n_heads, 1, 1)
.expand(bs, n_heads, query_len, key_len)
.view(bs*n_heads, query_len, key_len)
)
return attn_mask # (bs*n_heads, query_len, key_len)
def token_level_atten(self, q, k, v, query, mask, mask2, bs, query_len, n_heads, dim_per_head, scale, key_len, dim, nb_review):
# q: (bs*n_heads, query_len, dim_per_head)
# k/v: (bs*n_heads, nb_review, key_len, dim_per_head)
# query: (bs, seq_len2, ffn_size)
# mask: (bs, nb_review)
# mask2: (bs, nb_review, key_len)
q = (q.unsqueeze(1)
.expand(bs*n_heads, nb_review, query_len, dim_per_head)
.reshape(bs*n_heads*nb_review, query_len, dim_per_head))
k = k.view(bs*n_heads*nb_review, key_len, dim_per_head)
dot_prod = q.div_(scale).bmm(k.transpose(-2, -1)) # (bs*n_heads*nb_review, query_len, key_len)
attn_mask = self.get_token_level_attn_mask(mask2, bs, key_len, n_heads, query_len, nb_review)
# (bs*n_heads*nb_review, query_len, key_len)
assert attn_mask.shape == dot_prod.shape
dot_prod.masked_fill_(attn_mask, neginf(dot_prod.dtype)) # (bs*n_heads*nb_review, query_len, key_len)
attn_weights = F.softmax(dot_prod, dim=-1).type_as(query) # (bs*n_heads*nb_review, query_len, key_len)
attn_weights = self.attn_dropout(attn_weights) # --attention-dropout
v = v.view(bs*n_heads*nb_review, key_len, dim_per_head)
attentioned = attn_weights.bmm(v) # (bs*n_heads*nb_review, query_len, dim_per_head)
return | |
#!/usr/bin/env python
# MIT License
# Copyright (c) 2018 <NAME> (@disloops)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = '<NAME>'
__version__ = '1.0.1'
__email__ = '<EMAIL>'
import os
import re
import sys
import time
import glob
import urllib
import socket
import argparse
# compile the latest attacks
def get_last_attacks():
last_attacks = []
# used to skip private IP ranges
internal_ip_re = re.compile('^(?:10|127|172\.(?:1[6-9]|2[0-9]|3[01])|192\.168)\..*')
# PSAD IP log files sorted by date
files = sorted(glob.iglob('/var/log/psad/*/*_email_alert'), key=os.path.getmtime, reverse=True)
# imperfect science of extracting info from WHOIS data
country_re = re.compile('^country:', flags=re.IGNORECASE)
# get the directories named after valid IPs only
for file in files:
if not os.path.isdir(file):
try:
file_dir = os.path.dirname(file)
socket.inet_pton(socket.AF_INET, os.path.basename(file_dir))
if not internal_ip_re.match(os.path.basename(file_dir)):
last_seen = time.ctime(os.path.getmtime(file))
first_seen = '?'
IP = '?'
country = '?'
ports = '?'
whois_file = file_dir + "/" + os.path.basename(file_dir) + "_whois"
with open(whois_file, 'r') as f:
for line in f:
if country == '?' and country_re.match(line):
country = line.split(None, 1)[1][:2]
with open(file, 'r') as f:
for line in f.readlines():
if first_seen == '?' and "overall scan start:" in line.lower():
first_seen = line.split(": ", 1)[1]
if IP == '?' and "source:" in line.lower():
IP = line.split(": ", 1)[1]
if ports == '?' and "scanned tcp ports" in line.lower():
ports = re.search('\[(.+?):', line).group(1)
attacker_dict = {
"last_seen": last_seen,
"first_seen": first_seen,
"IP": IP,
"country": country,
"ports": ports
}
last_attacks.append(attacker_dict)
except:
pass
if len(last_attacks) == 20:
break
return last_attacks
# parse the top attackers file
def get_top_attackers():
top_attackers = []
raw_attackers = None
# imperfect science of extracting info from WHOIS data
country_re = re.compile('^country:', flags=re.IGNORECASE)
host_re = re.compile('^(org-name|organi|owner:|netname)', flags=re.IGNORECASE)
# used to skip private IP ranges
internal_ip_re = re.compile('^(?:10|127|172\.(?:1[6-9]|2[0-9]|3[01])|192\.168)\..*')
# using this while loop to get around instances where no data comes back
# possibly due to the file being in use and locked
while not raw_attackers:
with open('/var/log/psad/top_attackers', 'r') as f:
raw_attackers = f.readlines()
for attacker in raw_attackers:
if attacker[0].isdigit():
try:
IP = attacker.split()[0]
hits = attacker.split()[2]
country = '?'
host = ['?']
last_seen = '?'
path = '/var/log/psad/' + IP
whois_file = path + '/' + IP + '_whois'
if not internal_ip_re.match(IP):
if os.path.isfile(whois_file):
with open(whois_file, 'r') as f:
for line in f:
if country == '?' and country_re.match(line):
country = line.split(None, 1)[1][:2]
if ' ' in line and host_re.match(line):
host.append(line.split(None, 1)[1])
for file in os.listdir(path):
if file.endswith('_email_alert'):
file_path = os.path.join(path, file)
last_seen = time.ctime(os.path.getmtime(file_path))
attacker_dict = {
"last_seen": last_seen,
"IP": IP,
"hits": hits,
"country": country,
"host": max(host, key=len)
}
top_attackers.append(attacker_dict)
except:
pass
return top_attackers
# parse the top signatures file
def get_top_signatures():
top_signatures = []
with open('/var/log/psad/top_sigs', 'r') as f:
raw_signatures = f.readlines()
for signature in raw_signatures:
if signature[0].isdigit():
try:
sid = signature.split()[0]
sig = ' '.join(re.findall(r'"(.*?)"', signature))
hits = ' '.join(signature.split('"')[-1:]).split()[0]
sig_dict = {
"SID": sid,
"sig": sig,
"hits": hits
}
top_signatures.append(sig_dict)
except:
pass
return top_signatures
# parse the top ports file
def get_top_ports():
top_ports = []
with open('/var/log/psad/top_ports', 'r') as f:
raw_ports = f.readlines()
for port in raw_ports:
if port[0].isalpha():
try:
if port.split()[0] == 'tcp':
port_num = port.split()[1]
hits = port.split()[2]
port_dict = {
"port_num": port_num,
"hits": hits
}
top_ports.append(port_dict)
except:
pass
return top_ports
# create last attacks HTML table
def get_last_attacks_html(last_attacks):
last_attacks_html = '<table class="psadTable" id="lastAttacksTable">'
last_attacks_html += '<tr class="psadTableRow">'
last_attacks_html += '<td class="psadTableHead">Last Seen</td>'
last_attacks_html += '<td class="psadTableHead">First Seen</td>'
last_attacks_html += '<td class="psadTableHead">IP Address</td>'
last_attacks_html += '<td class="psadTableHead">Country</td>'
last_attacks_html += '<td class="psadTableHead">Ports Targeted</td>'
last_attacks_html += '<td class="psadTableHead">OSINT Links</td>'
last_attacks_html += '</tr>'
for attack in last_attacks:
IP_link = '<a href="https://www.whois.com/whois/' + attack['IP'] + '" target="_blank">'
IP_link += attack['IP'] + '</a>'
OSINT_links = '[<a href="https://dnslytics.com/ip/' + attack['IP'] + '" target="_blank">1</a>] '
OSINT_links += '[<a href="https://www.virustotal.com/gui/ip-address/' + attack['IP'] + '" target="_blank">2</a>] '
OSINT_links += '[<a href="https://www.abuseipdb.com/check/' + attack['IP'] + '" target="_blank">3</a>]'
last_attacks_html += '<tr class="psadTableRow">'
last_attacks_html += '<td class="psadTableCell">' + attack['last_seen'] + '</td>'
last_attacks_html += '<td class="psadTableCell">' + attack['first_seen'] + '</td>'
last_attacks_html += '<td class="psadTableCell">' + IP_link + '</td>'
last_attacks_html += '<td class="psadTableCell">' + attack['country'] + '</td>'
last_attacks_html += '<td class="psadTableCell">' + attack['ports'] + '</td>'
last_attacks_html += '<td class="psadTableCell">' + OSINT_links + '</td>'
last_attacks_html += '</tr>'
last_attacks_html += '</table>'
return last_attacks_html
# create top attackers HTML table
def get_attackers_html(top_attackers):
top_attackers = sorted(top_attackers, key=lambda x: int(x['hits']), reverse=True)
rows = 50 if len(top_attackers) > 50 else len(top_attackers)
top_attackers_html = '<table class="psadTable" id="attackerTable">'
top_attackers_html += '<tr class="psadTableRow">'
top_attackers_html += '<td class="psadTableHead">Last Seen</td>'
top_attackers_html += '<td class="psadTableHead">Hits</td>'
top_attackers_html += '<td class="psadTableHead">IP Address</td>'
top_attackers_html += '<td class="psadTableHead">Country</td>'
top_attackers_html += '<td class="psadTableHead">Hosting Provider</td>'
top_attackers_html += '<td class="psadTableHead">OSINT Links</td>'
top_attackers_html += '</tr>'
for attacker in top_attackers[:rows]:
IP_link = '<a href="https://www.whois.com/whois/' + attacker['IP'] + '" target="_blank">'
IP_link += attacker['IP'] + '</a>'
OSINT_links = '[<a href="https://dnslytics.com/ip/' + attacker['IP'] + '" target="_blank">1</a>] '
OSINT_links += '[<a href="https://www.virustotal.com/gui/ip-address/' + attacker['IP'] + '" target="_blank">2</a>] '
OSINT_links += '[<a href="https://www.abuseipdb.com/check/' + attacker['IP'] + '" target="_blank">3</a>]'
top_attackers_html += '<tr class="psadTableRow">'
top_attackers_html += '<td class="psadTableCell">' + attacker['last_seen'] + '</td>'
top_attackers_html += '<td class="psadTableCell">' + attacker['hits'] + '</td>'
top_attackers_html += '<td class="psadTableCell">' + IP_link + '</td>'
top_attackers_html += '<td class="psadTableCell">' + attacker['country'].upper() + '</td>'
top_attackers_html += '<td class="psadTableCellLeft">' + attacker['host'] + '</td>'
top_attackers_html += '<td class="psadTableCell">' + OSINT_links + '</td>'
top_attackers_html += '</tr>'
top_attackers_html += '</table>'
return top_attackers_html
def get_signatures_html(top_signatures):
top_signatures_html = '<table class="psadTable" id="signatureTable">'
top_signatures_html += '<tr class="psadTableRow">'
top_signatures_html += '<td class="psadTableHead">Hits</td>'
top_signatures_html += '<td class="psadTableHead">SID</td>'
top_signatures_html += '<td class="psadTableHead">Signature</td>'
top_signatures_html += '</tr>'
for signature in top_signatures:
sig_link = '<a href="https://www.google.com/search?q=' + urllib.quote_plus(signature['sig'])
sig_link += '" target="_blank">' + signature['sig'] + '</a>'
top_signatures_html += '<tr class="psadTableRow">'
top_signatures_html += '<td class="psadTableCell">' + signature['hits'] + '</td>'
top_signatures_html += '<td class="psadTableCell">' + signature['SID'] + '</td>'
top_signatures_html += '<td class="psadTableCellLeft">' + sig_link + '</td>'
top_signatures_html += '</tr>'
top_signatures_html += '</table>'
return top_signatures_html
def get_ports_html(top_ports):
rows = 50 if len(top_ports) > 50 else len(top_ports)
top_ports_html = '<div id="portTableDiv">'
top_ports_html += '<table class="psadTable" id="portTable01">'
top_ports_html += '<tr class="psadTableRow">'
top_ports_html += '<td class="psadTableHead">Port</td>'
top_ports_html += '<td class="psadTableHead">Hits</td>'
top_ports_html += '</tr>'
for port in top_ports[:rows//2]:
port_link = '<a href="https://www.speedguide.net/port.php?port=' + port['port_num']
port_link += '" target="_blank">' + port['port_num'] + '</a>'
top_ports_html += '<tr class="psadTableRow">'
top_ports_html += '<td class="psadTableCell">' + port_link + '</td>'
top_ports_html += '<td class="psadTableCell">' + port['hits'] + '</td>'
top_ports_html += '</tr>'
top_ports_html += '</table>'
top_ports_html += '<table class="psadTable" id="portTable02">'
top_ports_html += '<tr class="psadTableRow">'
top_ports_html += '<td class="psadTableHead">Port</td>'
top_ports_html += '<td class="psadTableHead">Hits</td>'
top_ports_html += '</tr>'
for port in top_ports[rows//2:rows]:
port_link = '<a href="https://www.speedguide.net/port.php?port=' + port['port_num']
port_link += '" target="_blank">' + port['port_num'] + '</a>'
top_ports_html += '<tr class="psadTableRow">'
top_ports_html += '<td class="psadTableCell">' + port_link + '</td>'
top_ports_html += '<td class="psadTableCell">' + port['hits'] + '</td>'
top_ports_html += '</tr>'
top_ports_html += '</table>'
top_ports_html += '</div>'
return top_ports_html
def get_css():
css = """
a:link, a:active, a:visited {
color: #55FF33;
text-decoration: none;
}
a:hover {
font-weight: bold;
}
body {
background-color: #000000;
color: #CCCCCC;
font-family: Helvetica, Arial, Sans-Serif;
font-size: small;
}
#lastAttacksTable, #attackerTable, #signatureTable {
margin: 0px auto 40px auto;
}
#portTable01, #portTable02 {
margin: 0px 25px 40px 25px;
}
#portTableDiv {
align-items: center;
display: flex;
justify-content: center;
}
.headerBlock{
color: #DDDDDD;
margin: 50px auto 40px auto;
text-align: center;
max-width: 85%;
}
.footerBlock{
color: #DDDDDD;
margin: 0px auto 50px auto;
text-align: center;
max-width: 85%;
}
.psadTable, .psadTableRow, .psadTableHead, .psadTableCell {
border: 1px solid #666666;
}
.psadTable {
border-collapse: collapse;
display: none;
max-width: 85%;
text-align: center;
}
.psadTableHead {
font-weight: | |
aggregate_to_count
```
This command transforms the output of an aggregation query to the output of the count command.
```
{ "group": { "name": "group_name" }, "count": 123 } --> group_name: 123
```
Expected group key: `name`
Expected function key: `count`
It is usually not invoked directly but automatically invoked when there is a query | count cli command.
"""
@property
def name(self) -> str:
return "aggregate_to_count"
def info(self) -> str:
return "Convert the output of an aggregate query to the result of count."
def parse(self, arg: Optional[str] = None, ctx: CLIContext = EmptyContext, **kwargs: Any) -> CLIFlow:
name_path = ["group", "name"]
count_path = ["count"]
async def to_count(in_stream: AsyncIterator[JsonElement]) -> AsyncIterator[JsonElement]:
null_value = 0
total = 0
in_streamer = in_stream if isinstance(in_stream, Stream) else stream.iterate(in_stream)
async with in_streamer.stream() as streamer:
async for elem in streamer:
name = value_in_path(elem, name_path)
count = value_in_path_get(elem, count_path, 0)
if name is None:
null_value = count
else:
total += count
yield f"{name}: {count}"
tm, tu = (total, null_value) if arg else (null_value + total, 0)
yield f"total matched: {tm}"
yield f"total unmatched: {tu}"
return CLIFlow(to_count)
class ExecuteQueryCommand(CLICommand, InternalPart):
"""
```shell
execute_query [--include-edges] [--explain] <query>
```
This command is usually not invoked directly - use `query` instead.
## Options
- `--include-edges`: Return edges in addition to nodes.
- `--explain`: Instead of executing the query, analyze its cost.
## Parameters
- `query` [mandatory]: The query to execute.
## Related
- `query` - query the graph.
"""
@property
def name(self) -> str:
return "execute_query"
def info(self) -> str:
return "Query the database and pass the results to the output stream."
@staticmethod
def parse_known(arg: str) -> Tuple[Dict[str, Any], str]:
parser = NoExitArgumentParser()
parser.add_argument("--include-edges", dest="include-edges", default=None, action="store_true")
parser.add_argument("--explain", dest="explain", default=None, action="store_true")
parsed, rest = parser.parse_known_args(arg.split(maxsplit=2))
return {k: v for k, v in vars(parsed).items() if v is not None}, " ".join(rest)
@staticmethod
def argument_string(args: Dict[str, Any]) -> str:
result = []
for key, value in args.items():
if value is True:
result.append(f"--{key}")
return " ".join(result) + " " if result else ""
def parse(self, arg: Optional[str] = None, ctx: CLIContext = EmptyContext, **kwargs: Any) -> CLISource:
# db name is coming from the env
graph_name = ctx.env["graph"]
if not arg:
raise CLIParseError("query command needs a query to execute, but nothing was given!")
# Read all argument flags / options
parsed, rest = self.parse_known(arg)
with_edges: bool = parsed.get("include-edges", False)
explain: bool = parsed.get("explain", False)
# all templates are expanded at this point, so we can call the parser directly.
query = parse_query(rest, **ctx.env)
db = self.dependencies.db_access.get_graph_db(graph_name)
async def load_query_model() -> QueryModel:
model = await self.dependencies.model_handler.load_model()
query_model = QueryModel(query, model)
await db.to_query(query_model) # only here to validate the query itself (can throw)
return query_model
async def explain_query() -> AsyncIterator[Json]:
query_model = await load_query_model()
explanation = await db.explain(query_model, with_edges)
yield to_js(explanation)
async def prepare() -> Tuple[Optional[int], AsyncIterator[Json]]:
query_model = await load_query_model()
count = ctx.env.get("count", "true").lower() != "false"
timeout = if_set(ctx.env.get("query_timeout"), duration)
context = (
await db.query_aggregation(query_model)
if query.aggregate
else (
await db.query_graph_gen(query_model, with_count=count, timeout=timeout)
if with_edges
else await db.query_list(query_model, with_count=count, timeout=timeout)
)
)
cursor = context.cursor
# since we can not use context boundaries here,
# an explicit iterator is used, which makes sure to close the connection.
async def iterate_and_close() -> AsyncIterator[Json]:
try:
async for e in cursor:
yield e
finally:
cursor.close()
return cursor.count(), iterate_and_close()
return CLISource.single(explain_query) if explain else CLISource(prepare)
class EnvCommand(CLICommand):
"""
```shell
env
```
Emits the provided environment.
This is useful to inspect the environment given to the CLI interpreter.
## Examples
```shell
# The resotoshell will set the graph, section and a session id.
> env
graph: resoto
section: reported
resoto_session_id: SHQF9MBUEJ
# Environment variables can be defined directly on the command line
> section=desired foo=bla env
graph: resoto
section: desired
resoto_session_id: SHQF9MBUEJ
foo: bla
```
"""
@property
def name(self) -> str:
return "env"
def info(self) -> str:
return "Retrieve the environment and pass it to the output stream."
def parse(self, arg: Optional[str] = None, ctx: CLIContext = EmptyContext, **kwargs: Any) -> CLISource:
return CLISource.with_count(lambda: stream.just(ctx.env), len(ctx.env))
class ChunkCommand(CLICommand):
"""
```shell
chunk [num]
```
Take <num> number of elements from the input stream, put them in a list and send a stream of list downstream.
The last chunk might have a lower size than the defined chunk size.
## Parameters
- `num` [optional, defaults to 100] - the number of elements to put into one chunk.
## Examples
```shell
# Chunk an array by putting up to 2 elements into one chunk and sent it downstream.
> json [1,2,3,4,5] | chunk 2
[1, 2]
[3, 4]
[5]
# Chunk an array by putting up to 3 elements into one chunk and sent it downstream.
> json [1,2,3,4,5] | chunk 3
[1, 2, 3]
[4, 5]
# The output of query can be chunked as well. The result is omitted here for brevity.
> query is(volume) limit 5 | chunk 3
```
## Related
- `flatten` - for flattening a chunked input stream.
"""
@property
def name(self) -> str:
return "chunk"
def info(self) -> str:
return "Chunk incoming elements in batches."
def parse(self, arg: Optional[str] = None, ctx: CLIContext = EmptyContext, **kwargs: Any) -> CLIFlow:
size = int(arg) if arg else 100
return CLIFlow(lambda in_stream: stream.chunks(in_stream, size))
class FlattenCommand(CLICommand):
"""
```shell
flatten
```
Take array elements from the input stream and put them to the output stream one after the other,
while preserving the original order.
## Examples:
```shell
# In case elements of the stream are arrays, they will be flattened.
> json [[1, 2], 3, [4, 5]] | flatten
1
2
3
4
5
# An already flat stream of elements is not changed.
> json [1, 2, 3, 4, 5] | flatten
1
2
3
4
5
```
## Related
- `chunk` to put incoming elements into chunks
"""
@property
def name(self) -> str:
return "flatten"
def info(self) -> str:
return "Take incoming batches of elements and flattens them to a stream of single elements."
def parse(self, arg: Optional[str] = None, ctx: CLIContext = EmptyContext, **kwargs: Any) -> CLIFlow:
def iterate(it: Any) -> Stream:
return stream.iterate(it) if is_async_iterable(it) or isinstance(it, Iterable) else stream.just(it)
return CLIFlow(lambda in_stream: stream.flatmap(in_stream, iterate))
class UniqCommand(CLICommand):
"""
```shell
uniq
```
All elements flowing through the uniq command are analyzed and all duplicates get removed.
Note: a hash value is computed from json objects, which is ignorant of the order of properties,
so that `{"a": 1, "b": 2}` is declared equal to `{"b": 2, "a": 1}`
## Examples
```shell
# Multiple occurrences of the same element are sorted out.
> json [1, 2, 3, 1, 2, 3] | uniq
1
2
3
# The same logic applies to json objects
> json [{"a": 1, "b": 2}, {"b": 2, "a": 1}] | uniq
a: 1
b: 2
```
"""
@property
def name(self) -> str:
return "uniq"
def info(self) -> str:
return "Remove all duplicated objects from the stream."
def parse(self, arg: Optional[str] = None, ctx: CLIContext = EmptyContext, **kwargs: Any) -> CLIFlow:
visited = set()
def hashed(item: Any) -> Hashable:
if isinstance(item, dict):
return json.dumps(item, sort_keys=True)
else:
raise CLIParseError(f"{self.name} can not make {item}:{type(item)} uniq")
def has_not_seen(item: Any) -> bool:
item = item if isinstance(item, Hashable) else hashed(item)
if item in visited:
return False
else:
visited.add(item)
return True
return CLIFlow(lambda in_stream: stream.filter(in_stream, has_not_seen))
class JqCommand(CLICommand, OutputTransformer):
"""
```
jq <filter>
```
Use the well known jq JSON processor to manipulate incoming json.
Every element from the incoming stream is passed to jq.
See: https://stedolan.github.io/jq/ for a list of possible jq filter definitions.
## Parameter
- `filter` the filter definition to create a jq program.
## Examples
```shell
# Query ec2 instances and extract only the name property
> query is(aws_ec2_instance) limit 2| jq .name
build-node-1
prod-23
# Query ec2 instances and create a new json object for each entry with name and owner.
> | |
<gh_stars>1-10
# Copyright (C) 2018 by
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# All rights reserved.
# MIT license.
#
"""
Karstnet
========
Karstnet is a Python package for the analysis of karstic networks.
License
-------
Released under the MIT license:
Copyright (C) 2018 Karstnet Developers
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
"""
# ----External librairies importations
import numpy as np
import networkx as nx
import scipy.stats as st
import matplotlib.pyplot as plt
import sqlite3
import mplstereonet
# *************************************************************
# -------------------Test function--------------------------
# *************************************************************
def test_kn():
print("test ok")
print("relance ok")
# **************************************************************
#
# -------------------KGraph class-------------------------------
#
# **************************************************************
class KGraph:
"""
Class dedicated to the construction and manipulation of graphs
representing Karstic network.
Attributes:
-----------
- graph : the complete graph of the karstic network.
Each station is a node, each line-of sigth is an edge.
It is a Networkx graph object, with length of edges
as attributes.
- graph_simpl: the simplified graph of the karstic network,
i.e. all nodes of degree 2 are removed except in loops
(2 types of loops)(cf Collon et al, 2017, Geomorphology)
graph_simple is a Networkx graph object, with length of
edges as attributes.
- pos2d : a dictionnary of the nodes with their 2d coordinates
as a values [x, y]
- pos3d : a dictionnary of the nodes with their 3d coordinates
as a values [x, y, z]
- properties : a dictionnary of nodes with their properties in
case of importing karst data with additional information
- branches : the list of branches
- br_lengths : the list of branch lengths
- br_tort : the list of branche tortuosities
- list_simpl_edges : the list of simple edges, necessary to export
graph to plines
- graph_simpl : the simplified version (without nodes of degree 2)
of a graph
"""
def __init__(self, edges, coordinates, properties={}):
"""
Creates a Kgraph from nodes and edges.
Parameters
----------
edges : list
a list of edges
coord : dictionnary
coordinates of the nodes, keys are node names
properties : dictionnary
optional properties associated to the nodes
"""
# Initialization of the complete graph - use functions of kgraph_fc
# module
self.graph = nx.Graph()
self.graph.add_edges_from(edges)
self.pos2d, self.pos3d = _pos_initialization(coordinates)
print(
"\n This network contains ",
nx.number_connected_components(
self.graph),
" connected components")
# Compute graph length from the pos3d_ to initialize properly the graph
self._set_graph_lengths()
self._set_graph_orientations()
self.properties = properties
# Compute branches of the graph
# self.branches is necessary to export graph to plines
self.branches, self.br_lengths, self.br_tort = self._getallbranches()
# Construct the simplified graph
# self.list_simpl_edges is necessary to export graph to plines
self.list_simpl_edges, self.graph_simpl = self._simplify_graph()
# **********************************
# Plots
# **********************************
def plot2(self, graph_type=0, figsize=(6, 3)):
"""
Plot a 2D view of the karstic network
Parameters
----------
graph_type : int
if 0 displays the complete graph,
if 1 displays the simplified graph
figsize : tuple
contains the (x,y) dimension of the figure
Examples
--------
>>> myKGraph.plot2()
>>> myKGraph.plot2(1, zrotation=20, xyrotation=-30)
"""
if (graph_type == 0):
self._plot2(self.graph, figsize)
plt.title('original')
plt.show()
else:
self._plot2(self.graph_simpl, figsize)
plt.title('simplified')
plt.show()
return
def plot3(self, graph_type=0, zrotation=30, xyrotation=0, figsize=(6, 3)):
"""
Plot a 3D view of the karstic network.
Parameters
----------
graph_type : int
if 0 displays the complete graph,
if 1 displays the simplified graph
zrotation : float
angle in degrees between horizontal plane and viewpoint
xyrotation : float
angle in degree for the horizontal rotation of the viewpoint.
If xyrotation=0, the view is from the South toward North.
figsize : tuple
contains the (x,y) dimension of the figure
Examples
--------
>>> myKGraph.plot3()
>>> myKGraph.plot3(1, zrotation=20, xyrotation=-30)
"""
# 3D plot
if (graph_type == 0):
self._plot3(self.graph, zrotation, xyrotation, figsize)
plt.title('original')
plt.show()
else:
self._plot3(self.graph_simpl, zrotation, xyrotation, figsize)
plt.title('simplified')
plt.show()
return
def plot(self):
"""
Simple 2D map of the original and simplified karstic network.
The two maps are ploted side by side. This function allows
to check rapidly the data after an import for example.
Examples
---------
>>> myKGraph.plot()
"""
plt.figure(figsize=(12, 5))
plt.subplot(121)
nx.draw_networkx(self.graph,
pos=self.pos2d,
with_labels=False,
node_size=0.1)
plt.xlabel('x')
plt.ylabel('y')
plt.subplot(122)
nx.draw_networkx(self.graph_simpl,
pos=self.pos2d,
with_labels=False,
node_size=0.1)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
def plotxz(self):
"""
Simple 2D map of the original and simplified karstic network.
The two maps are ploted side by side. This function allows
to check rapidly the data after an import for example.
Examples
---------
>>> myKGraph.plot()
"""
plt.figure(figsize=(12, 5))
plt.subplot(121)
nx.draw_networkx(self.graph,
pos=self.pos2d,
with_labels=False,
node_size=0.1)
plt.xlabel('x')
plt.ylabel('z')
plt.subplot(122)
nx.draw_networkx(self.graph_simpl,
pos=self.pos2d,
with_labels=False,
node_size=0.1)
plt.xlabel('x')
plt.ylabel('z')
plt.show()
# Member function written by <NAME> 2019/11/25
# Modified by <NAME> (aug. 2020) to weight density map by lenghts
def stereo(self, weighted=True):
"""
Density map of orientations and rose diagram of the karstic network.
The two stereo are ploted side by side.
By default, stereo and Rose diagram are weighted by lengths.
Examples
---------
>>> myKGraph.stereo()
>>> myKGraph.stereo(weighted = False)
"""
# create an np.array of azimuths and dips
# and lengths (projected(2d) and real (3d))
azim = np.array(
list((nx.get_edge_attributes(self.graph, 'azimuth')).values()))
azim_not_Nan = azim[~np.isnan(azim)]
bearing_dc = np.nan_to_num(azim)
plunge_dc = np.array(
list((nx.get_edge_attributes(self.graph, 'dip')).values()))
if (weighted):
l2d = np.array(
list((nx.get_edge_attributes(self.graph,
'length2d')).values()))
l3d = np.array(
list((nx.get_edge_attributes(self.graph, 'length')).values()))
l2d_not_Nan = l2d[~np.isnan(azim)]
else:
l2d_not_Nan = None
l3d = None
# Pauline: not sure it is required (?)
# + not sure it is normal that isnan is parameterised by azim for l2d
# import matplotlib as mpl
# Making colormap, based on Collon et al.(2017) \
# we saturate the colormap at 40%
from matplotlib import cm
from matplotlib.colors import ListedColormap
nbint = 15
levels = np.linspace(0, 1, nbint)
rainbow = cm.get_cmap('rainbow')
newcolors = rainbow(levels)
white = np.array([256 / 256, 256 / 256, 256 / 256, 1])
newcolors[:1, :] = white
newcmp = ListedColormap(newcolors)
# Density map - Allows to consider almost vertical conduits
# The data are weigthted by the real length of the segments (l3d)
# Use the traditional "Schmidt" method : 1% count
fig = plt.figure(figsize=(16, 8))
dc = fig.add_subplot(121, projection='stereonet')
cdc = dc.density_contourf(plunge_dc,
bearing_dc,
measurement='lines',
method='schmidt',
levels=np.arange(0, nbint * 2 + 1, 2),
extend='both',
cmap=newcmp,
weights=l3d)
dc.set_title('Density map of orientations [Schmidt\'s projection]',
y=1.10,
fontsize=15)
dc.grid()
# colorbar of the density map
cbar = plt.colorbar(cdc,
fraction=0.046,
pad=0.04,
orientation='horizontal')
cbar.set_label('[%]')
# Rose diagram
# The azimuth data are weighted by the projected length (l2d)
bin_edges = np.arange(-5, 366, 10)
number_of_strikes, bin_edges = np.histogram(azim_not_Nan,
bin_edges,
weights=l2d_not_Nan)
number_of_strikes[0] += number_of_strikes[-1]
half = np.sum(np.split(number_of_strikes[:-1], 2), 0)
two_halves = np.concatenate([half, half])
rs = fig.add_subplot(122, projection='polar')
rs.bar(np.deg2rad(np.arange(0, 360, 10)),
two_halves,
width=np.deg2rad(10),
bottom=0.0,
color='.8',
edgecolor='k')
rs.set_theta_zero_location('N')
rs.set_theta_direction(-1)
rs.set_thetagrids(np.arange(0, 360, 10), labels=np.arange(0, 360, 10))
rs.set_title('Rose Diagram of the cave survey segments',
y=1.10,
fontsize=15)
fig.tight_layout()
plt.show()
# end modif PV 2019/11/25
# *************************************************************
# ----------------------- Export ------------------------------
# *************************************************************
def to_pline(self, basename):
"""
Export the complete graph to Pline (GOCAD ASCII object)
Manages the colocated vertices indicated by the mention "ATOM" in
the ASCII file.
`Warning`: this version does not export (for the time) the
properties on the nodesself.
Parameters
----------
basename : string
the base name of the file name used for the Pline file,
the name contains no extension, it will be added by the function
Examples
--------
The following command saves the file "MyKarst_exported.pl" :
>>> myKGraph.to_pline("MyKarst")
"""
# For a complete graph the list of Ilines corresponds to self.branches
self._ilines_to_pline(self.branches, basename)
return
def simpleGraph_to_pline(self, basename):
"""
Export the simplified graph to pline (GOCAD ASCII object)
Arguments:
----------
basename: A string containing the base name used for output file.
Manage the colocated vertices indicated by the mention "ATOM"
in the ASCII file
Warning: this version does not export (for the time) the
properties on the nodes
Parameters
----------
basename : string
the base name of the file name used for the Pline file,
the name contains no extension, it will be added by the function
Examples
--------
The following command saves the file "MyKarst_simpl_exported.pl" :
>>> myKGraph.to_pline("MyKarst")
"""
# For a simplified graph the list of Ilines will corresponds to
# the edges of the simple graph
# one iline is created for each edge, which is not exactly a branch
# but does not prevent exportation
# to clearly explicit it is the simplified graph
| |
<filename>corehq/apps/domain/forms.py<gh_stars>0
import datetime
import io
import json
import logging
import uuid
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import SetPasswordForm
from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ValidationError
from django.db import transaction
from django.forms.fields import (
BooleanField,
CharField,
ChoiceField,
Field,
ImageField,
IntegerField,
SelectMultiple,
)
from django.forms.widgets import Select
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.encoding import force_bytes, smart_str
from django.utils.functional import cached_property
from django.utils.http import urlsafe_base64_encode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy, ugettext_noop
from captcha.fields import CaptchaField
from crispy_forms import bootstrap as twbscrispy
from crispy_forms import layout as crispy
from crispy_forms.bootstrap import StrictButton
from crispy_forms.layout import Submit
from dateutil.relativedelta import relativedelta
from django_countries.data import COUNTRIES
from memoized import memoized
from PIL import Image
from pyzxcvbn import zxcvbn
from corehq import privileges
from corehq.apps.accounting.exceptions import SubscriptionRenewalError
from corehq.apps.accounting.models import (
BillingAccount,
BillingAccountType,
BillingContactInfo,
CreditAdjustmentReason,
CreditLine,
Currency,
DefaultProductPlan,
EntryPoint,
FeatureType,
FundingSource,
PreOrPostPay,
ProBonoStatus,
SoftwarePlanEdition,
Subscription,
SubscriptionAdjustmentMethod,
SubscriptionType,
)
from corehq.apps.accounting.utils import (
cancel_future_subscriptions,
domain_has_privilege,
get_account_name_from_default_name,
is_downgrade,
log_accounting_error,
)
from corehq.apps.app_manager.const import (
AMPLIFIES_NO,
AMPLIFIES_NOT_SET,
AMPLIFIES_YES,
)
from corehq.apps.app_manager.dbaccessors import (
get_app,
get_apps_in_domain,
get_brief_apps_in_domain,
get_version_build_id,
)
from corehq.apps.app_manager.exceptions import BuildNotFoundException
from corehq.apps.app_manager.models import (
Application,
AppReleaseByLocation,
LatestEnabledBuildProfiles,
RemoteApp,
)
from corehq.apps.callcenter.views import (
CallCenterOptionsController,
CallCenterOwnerOptionsView,
)
from corehq.apps.domain.auth import get_active_users_by_email
from corehq.apps.domain.extension_points import (
custom_clean_password,
has_custom_clean_password,
)
from corehq.apps.domain.models import (
AREA_CHOICES,
BUSINESS_UNITS,
DATA_DICT,
LOGO_ATTACHMENT,
SUB_AREA_CHOICES,
TransferDomainRequest,
)
from corehq.apps.hqwebapp import crispy as hqcrispy
from corehq.apps.hqwebapp.crispy import HQFormHelper
from corehq.apps.hqwebapp.fields import MultiCharField
from corehq.apps.hqwebapp.tasks import send_html_email_async
from corehq.apps.hqwebapp.widgets import BootstrapCheckboxInput, Select2Ajax, GeoCoderInput
from corehq.apps.sms.phonenumbers_helper import parse_phone_number
from corehq.apps.users.models import CouchUser, WebUser
from corehq.apps.users.permissions import can_manage_releases
from corehq.toggles import HIPAA_COMPLIANCE_CHECKBOX, MOBILE_UCR, \
SECURE_SESSION_TIMEOUT, MONITOR_2FA_CHANGES
from corehq.util.timezones.fields import TimeZoneField
from corehq.util.timezones.forms import TimeZoneChoiceField
# used to resize uploaded custom logos, aspect ratio is preserved
LOGO_SIZE = (211, 32)
def tf_choices(true_txt, false_txt):
return (('false', false_txt), ('true', true_txt))
class ProjectSettingsForm(forms.Form):
"""
Form for updating a user's project settings
"""
global_timezone = forms.CharField(
initial="UTC",
label="Project Timezone",
widget=forms.HiddenInput
)
override_global_tz = forms.BooleanField(
initial=False,
required=False,
label="",
widget=BootstrapCheckboxInput(
inline_label=ugettext_lazy("Override project's timezone setting just for me.")
)
)
user_timezone = TimeZoneChoiceField(
label="My Timezone",
initial=global_timezone.initial
)
def __init__(self, *args, **kwargs):
super(ProjectSettingsForm, self).__init__(*args, **kwargs)
self.helper = hqcrispy.HQFormHelper(self)
self.helper.form_id = 'my-project-settings-form'
self.helper.all().wrap_together(crispy.Fieldset, _('My Timezone'))
self.helper.layout = crispy.Layout(
crispy.Fieldset(
_('My Timezone'),
crispy.Field('global_timezone', css_class='input-xlarge'),
twbscrispy.PrependedText(
'override_global_tz',
'',
id='override_global_tz',
data_bind='checked: override_tz, event: {change: updateForm}'
),
crispy.Div(
crispy.Field(
'user_timezone',
css_class='input-xlarge',
data_bind='event: {change: updateForm}'
),
data_bind='visible: override_tz'
)
),
hqcrispy.FormActions(
StrictButton(
_("Update My Settings"),
type="submit",
css_id="update-proj-settings",
css_class='btn-primary',
data_bind="disable: disableUpdateSettings"
)
)
)
def clean_user_timezone(self):
data = self.cleaned_data['user_timezone']
timezone_field = TimeZoneField()
timezone_field.run_validators(data)
return smart_str(data)
def save(self, user, domain):
timezone = self.cleaned_data['global_timezone']
override = self.cleaned_data['override_global_tz']
if override:
timezone = self.cleaned_data['user_timezone']
dm = user.get_domain_membership(domain)
dm.timezone = timezone
dm.override_global_tz = override
user.save()
return True
class TransferDomainFormErrors(object):
USER_DNE = ugettext_lazy('The user being transferred to does not exist')
DOMAIN_MISMATCH = ugettext_lazy('Mismatch in domains when confirming')
class TransferDomainForm(forms.ModelForm):
class Meta(object):
model = TransferDomainRequest
fields = ['domain', 'to_username']
def __init__(self, domain, from_username, *args, **kwargs):
super(TransferDomainForm, self).__init__(*args, **kwargs)
self.current_domain = domain
self.from_username = from_username
self.fields['domain'].label = _('Type the name of the project to confirm')
self.fields['to_username'].label = _('New owner\'s CommCare username')
self.helper = hqcrispy.HQFormHelper()
self.helper.layout = crispy.Layout(
'domain',
'to_username',
StrictButton(
_("Transfer Project"),
type="submit",
css_class='btn-danger',
)
)
def save(self, commit=True):
instance = super(TransferDomainForm, self).save(commit=False)
instance.from_username = self.from_username
if commit:
instance.save()
return instance
def clean_domain(self):
domain = self.cleaned_data['domain']
if domain != self.current_domain:
raise forms.ValidationError(TransferDomainFormErrors.DOMAIN_MISMATCH)
return domain
def clean_to_username(self):
username = self.cleaned_data['to_username']
if not WebUser.get_by_username(username):
raise forms.ValidationError(TransferDomainFormErrors.USER_DNE)
return username
class SubAreaMixin(object):
def clean_sub_area(self):
area = self.cleaned_data['area']
sub_area = self.cleaned_data['sub_area']
if sub_area:
if not area:
raise forms.ValidationError(_('You may not specify a sub area when the project has no specified '
'area'))
else:
return None
sub_areas = []
for a in DATA_DICT["area"]:
if a["name"] == area:
sub_areas = a["sub_areas"]
if sub_area not in sub_areas:
raise forms.ValidationError(_('This is not a valid sub-area for the area %s') % area)
return sub_area
USE_LOCATION_CHOICE = "user_location"
USE_PARENT_LOCATION_CHOICE = 'user_parent_location'
class CallCenterOwnerWidget(Select2Ajax):
def set_domain(self, domain):
self.domain = domain
def render(self, name, value, attrs=None, renderer=None):
value_to_render = CallCenterOptionsController.convert_owner_id_to_select_choice(value, self.domain)
return super(CallCenterOwnerWidget, self).render(name, value_to_render, attrs=attrs, renderer=renderer)
class DomainGlobalSettingsForm(forms.Form):
LOCATION_CHOICES = [USE_LOCATION_CHOICE, USE_PARENT_LOCATION_CHOICE]
CASES_AND_FIXTURES_CHOICE = "cases_and_fixtures"
CASES_ONLY_CHOICE = "cases_only"
hr_name = forms.CharField(
label=ugettext_lazy("Project Name"),
help_text=ugettext_lazy("This name will appear in the upper right corner "
"when you are in this project. Changing this name "
"will not change the URL of the project.")
)
project_description = forms.CharField(
label=ugettext_lazy("Project Description"),
widget=forms.Textarea,
required=False,
max_length=1000,
help_text=ugettext_lazy(
"Please provide a short description of your project (Max 1000 characters)."
)
)
default_timezone = TimeZoneChoiceField(label=ugettext_noop("Default Timezone"), initial="UTC")
default_geocoder_location = Field(
widget=GeoCoderInput(attrs={'placeholder': ugettext_lazy('Select a location')}),
label=ugettext_noop("Default project location"),
required=False,
help_text=ugettext_lazy("Please select your project's default location.")
)
logo = ImageField(
label=ugettext_lazy("Custom Logo"),
required=False,
help_text=ugettext_lazy("Upload a custom image to display instead of the "
"CommCare HQ logo. It will be automatically resized to "
"a height of 32 pixels.")
)
delete_logo = BooleanField(
label=ugettext_lazy("Delete Logo"),
required=False,
help_text=ugettext_lazy("Delete your custom logo and use the standard one.")
)
call_center_enabled = BooleanField(
label=ugettext_lazy("Call Center Application"),
required=False,
help_text=ugettext_lazy("Call Center mode is a CommCareHQ module for managing "
"call center workflows. It is still under "
"active development. Do not enable for your domain unless "
"you're actively piloting it.")
)
call_center_type = ChoiceField(
label=ugettext_lazy("Call Center Type"),
initial=CASES_AND_FIXTURES_CHOICE,
choices=[
(CASES_AND_FIXTURES_CHOICE, "Create cases and indicators"),
(CASES_ONLY_CHOICE, "Create just cases"),
],
help_text=ugettext_lazy(
"""
If "Create cases and indicators" is selected, each user will have a case associated with it,
and fixtures will be synced containing indicators about each user. If "Create just cases"
is selected, the fixtures will not be created.
"""
),
required=False,
)
call_center_case_owner = Field(
widget=CallCenterOwnerWidget(attrs={'placeholder': ugettext_lazy('Select an Owner...')}),
label=ugettext_lazy("Call Center Case Owner"),
required=False,
help_text=ugettext_lazy("Select the person who will be listed as the owner "
"of all cases created for call center users.")
)
call_center_case_type = CharField(
label=ugettext_lazy("Call Center Case Type"),
required=False,
help_text=ugettext_lazy("Enter the case type to be used for FLWs in call center apps")
)
mobile_ucr_sync_interval = IntegerField(
label=ugettext_lazy("Default mobile report sync delay (hours)"),
required=False,
help_text=ugettext_lazy(
"""
Default time to wait between sending updated mobile report data to users.
Can be overridden on a per user basis.
"""
)
)
def __init__(self, *args, **kwargs):
self.project = kwargs.pop('domain', None)
self.domain = self.project.name
self.can_use_custom_logo = kwargs.pop('can_use_custom_logo', False)
super(DomainGlobalSettingsForm, self).__init__(*args, **kwargs)
self.helper = hqcrispy.HQFormHelper(self)
self.helper[5] = twbscrispy.PrependedText('delete_logo', '')
self.helper[6] = twbscrispy.PrependedText('call_center_enabled', '')
self.helper.all().wrap_together(crispy.Fieldset, _('Edit Basic Information'))
self.helper.layout.append(
hqcrispy.FormActions(
StrictButton(
_("Update Basic Info"),
type="submit",
css_class='btn-primary',
)
)
)
self.fields['default_timezone'].label = ugettext_lazy('Default timezone')
if not self.can_use_custom_logo:
del self.fields['logo']
del self.fields['delete_logo']
if self.project:
if not self.project.call_center_config.enabled:
del self.fields['call_center_enabled']
del self.fields['call_center_type']
del self.fields['call_center_case_owner']
del self.fields['call_center_case_type']
else:
owner_field = self.fields['call_center_case_owner']
owner_field.widget.set_url(
reverse(CallCenterOwnerOptionsView.url_name, args=(self.domain,))
)
owner_field.widget.set_domain(self.domain)
if not MOBILE_UCR.enabled(self.domain):
del self.fields['mobile_ucr_sync_interval']
def clean_default_timezone(self):
data = self.cleaned_data['default_timezone']
timezone_field = TimeZoneField()
timezone_field.run_validators(data)
return smart_str(data)
def clean_default_geocoder_location(self):
data = self.cleaned_data.get('default_geocoder_location')
if isinstance(data, dict):
return data
return json.loads(data or '{}')
def clean(self):
cleaned_data = super(DomainGlobalSettingsForm, self).clean()
if (cleaned_data.get('call_center_enabled') and
(not cleaned_data.get('call_center_case_type') or
not cleaned_data.get('call_center_case_owner') or
not cleaned_data.get('call_center_type'))):
raise forms.ValidationError(_(
'You must choose a Call Center Type, Owner, and Case Type to use the call center application. '
'Please uncheck the "Call Center Application" setting or enter values for the other fields.'
))
return cleaned_data
def _save_logo_configuration(self, domain):
"""
:raises IOError: if unable to save (e.g. PIL is unable to save PNG in CMYK mode)
"""
if self.can_use_custom_logo:
logo = self.cleaned_data['logo']
if logo:
input_image = Image.open(io.BytesIO(logo.read()))
input_image.load()
input_image.thumbnail(LOGO_SIZE)
# had issues trying to use a BytesIO instead
tmpfilename = "/tmp/%s_%s" % (uuid.uuid4(), logo.name)
input_image.save(tmpfilename, 'PNG')
with open(tmpfilename, 'rb') as tmpfile:
domain.put_attachment(tmpfile, name=LOGO_ATTACHMENT)
elif self.cleaned_data['delete_logo']:
domain.delete_attachment(LOGO_ATTACHMENT)
def _save_call_center_configuration(self, domain):
cc_config = domain.call_center_config
cc_config.enabled = self.cleaned_data.get('call_center_enabled', False)
if cc_config.enabled:
domain.internal.using_call_center = True
cc_config.use_fixtures = self.cleaned_data['call_center_type'] == self.CASES_AND_FIXTURES_CHOICE
owner = self.cleaned_data.get('call_center_case_owner', None)
if owner in self.LOCATION_CHOICES:
cc_config.call_center_case_owner = None
cc_config.use_user_location_as_owner = True
cc_config.user_location_ancestor_level = 1 if owner == USE_PARENT_LOCATION_CHOICE else 0
else:
cc_config.case_owner_id = owner
cc_config.use_user_location_as_owner = False
cc_config.case_type = self.cleaned_data.get('call_center_case_type', None)
def _save_timezone_configuration(self, domain):
global_tz = self.cleaned_data['default_timezone']
if domain.default_timezone != global_tz:
domain.default_timezone = global_tz
users = WebUser.by_domain(domain.name)
users_to_save = []
for user in users:
dm = user.get_domain_membership(domain.name)
if not dm.override_global_tz and dm.timezone != global_tz:
dm.timezone = global_tz
users_to_save.append(user)
if users_to_save:
WebUser.bulk_save(users_to_save)
def save(self, request, domain):
domain.hr_name = self.cleaned_data['hr_name']
domain.project_description = self.cleaned_data['project_description']
domain.default_mobile_ucr_sync_interval = self.cleaned_data.get('mobile_ucr_sync_interval', None)
domain.default_geocoder_location = self.cleaned_data.get('default_geocoder_location')
try:
self._save_logo_configuration(domain)
except IOError as err:
messages.error(request, _('Unable to save custom logo: {}').format(err))
self._save_call_center_configuration(domain)
self._save_timezone_configuration(domain)
domain.save()
return True
class DomainMetadataForm(DomainGlobalSettingsForm):
cloudcare_releases = ChoiceField(
| |
self.opt_kwargs
Attributes
----------
num_pix : Int
The number of pixels of the residuals used to calculate J.
Methods
-------
set_params(new_param_names, new_damping=None)
Change the parameter names to optimize.
reset(new_damping=None)
Resets counters etc to zero, allowing more runs to commence.
See Also
--------
LMEngine : Parent class for all Levenberg-Marquardt (LM) optimization
LMParticles : LM optimization designed for optimizing state particles.
LMParticleGroupCollection : LM optimization on all particles in a
state.
LMAugmentedState : LMGlobals with additional R(z) parameters.
do_levmarq : Convenience function for LMGlobals
do_levmarq_particles : Convenience function for optimizing particles
"""
def __init__(self, state, param_names, max_mem=1e9, opt_kwargs={}, **kwargs):
self.state = state
self.opt_kwargs = opt_kwargs
self.max_mem = max_mem
self.num_pix = get_num_px_jtj(state, len(param_names), max_mem=max_mem,
**self.opt_kwargs)
self.param_names = param_names
super(LMGlobals, self).__init__(**kwargs)
def _set_err_paramvals(self):
self.error = self.state.error
self._last_error = (1 + 2*self.fractol) * self.state.error
self.param_vals = np.ravel(self.state.state[self.param_names])
self._last_vals = self.param_vals.copy()
def calc_J(self):
del self.J
# self.J, self._inds = get_rand_Japprox(self.state,
# self.param_names, num_inds=self.num_pix)
je, self._inds = get_rand_Japprox(self.state, self.param_names,
num_inds=self.num_pix, include_cost=True)
self.J = je[0]
#Storing the _direction_ of the exact gradient of the model, rescaled
#as to the size we expect from the inds:
rescale = float(self.J.shape[1])/self.state.residuals.size
self._graderr = je[1] * rescale
def calc_residuals(self):
return self.state.residuals.ravel()[self._inds].copy()
def update_function(self, values):
self.state.update(self.param_names, values)
if np.any(np.isnan(self.state.residuals)):
raise FloatingPointError('state update caused nans in residuals')
return self.state.error
def set_params(self, new_param_names, new_damping=None):
self.param_names = new_param_names
self._set_err_paramvals()
self.reset(new_damping=new_damping)
def update_select_J(self, blk):
"""
Updates J only for certain parameters, described by the boolean
mask blk.
"""
self.update_function(self.param_vals)
params = np.array(self.param_names)[blk].tolist()
blk_J = -self.state.gradmodel(params=params, inds=self._inds, flat=False)
self.J[blk] = blk_J
#Then we also need to update JTJ:
self.JTJ = np.dot(self.J, self.J.T)
if np.any(np.isnan(self.J)) or np.any(np.isnan(self.JTJ)):
raise FloatingPointError('J, JTJ have nans.')
def find_expected_error(self, delta_params='calc', adjust=True):
"""
Returns the error expected after an update if the model were linear.
Parameters
----------
delta_params : {numpy.ndarray, 'calc', or 'perfect'}, optional
The relative change in parameters. If 'calc', uses update
calculated from the current damping, J, etc; if 'perfect',
uses the update calculated with zero damping.
Returns
-------
numpy.float64
The expected error after the update with `delta_params`
"""
expected_error = super(LMGlobals, self).find_expected_error(
delta_params=delta_params)
if adjust:
#adjust for num_pix
derr = (expected_error - self.error) * (self.state.residuals.size /
float(self.num_pix))
expected_error = self.error + derr
return expected_error
def calc_model_cosine(self, decimate=None, mode='err'):
"""
Calculates the cosine of the residuals with the model.
Parameters
----------
decimate : Int or None, optional
Decimate the residuals by `decimate` pixels. If None, no
decimation is used. Valid only with mode='svd'. Default
is None
mode : {'svd', 'err'}
Which mode to use; see Notes section. Default is 'err'.
Returns
-------
abs_cos : numpy.float64
The absolute value of the model cosine.
Notes
-----
The model cosine is defined in terms of the geometric view of
curve-fitting, as a model manifold embedded in a high-dimensional
space. The model cosine is the cosine of the residuals vector
with its projection on the tangent space: :math:`cos(phi) = |P^T r|/|r|`
where :math:`P^T` is the projection operator onto the model manifold
and :math:`r` the residuals. This can be calculated two ways: By
calculating the projection operator P directly with SVD (mode=`svd`),
or by using the expected error if the model were linear to calculate
a model sine first (mode=`err`). Since the SVD of a large matrix is
slow, mode=`err` is faster.
`decimate` allows for every nth pixel only to be counted in the
SVD matrix of J for speed. While this is n x faster, it is
considerably less accurate, so the default is no decimation.
"""
#we calculate the model cosine only in the data space of the
#sampled indices
if mode == 'err':
expected_error = self.find_expected_error(delta_params='perfect',
adjust=False)
derr = self.error - expected_error
residuals_err = lambda r: np.dot(r,r).sum()
current_partial_error = residuals_err(self.calc_residuals())
expected_partial_error = current_partial_error - derr
model_sine_2 = expected_partial_error / current_partial_error
abs_cos = np.sqrt(1 - model_sine_2)
else:
#superclass is fine
abs_cos = super(self.__class__, self).calc_model_cosine(decimate=
decimate, mode=mode)
return abs_cos
def calc_grad(self):
"""The gradient of the cost w.r.t. the parameters."""
if self._fresh_JTJ:
return self._graderr
else:
residuals = self.calc_residuals()
return 2*np.dot(self.J, residuals)
class LMParticles(LMEngine):
"""
Levenberg-Marquardt, optimized for state globals.
Contains alll the options from the <NAME> 2012 ArXiV
paper. See LMEngine for further documentation.
Parameters
----------
state : :class:`peri.states.ImageState`
The state to optimize
particles : numpy.ndarray
Array of the particle indices to optimize over.
include_rad : Bool, optional
Whether or not to include the particle radii in the
optimization. Default is True
Attributes
----------
param_names : List
The list of the parameter names being optimized.
Methods
-------
set_particles(new_particles, new_damping=None)
Change the particle to optimize.
reset(new_damping=None)
Resets counters etc to zero, allowing more runs to commence.
See Also
--------
LMEngine : Parent class for all Levenberg-Marquardt (LM) optimization
LMGlobals : LM optimization designed for optimizing state global
parameters.
LMParticleGroupCollection : LM optimization on all particles in a
state.
LMAugmentedState : LMGlobals with additional R(z) parameters.
do_levmarq : Convenience function for LMGlobals
do_levmarq_particles : Convenience function for optimizing particles
Notes
-----
To prevent the state updates from breaking, this clips the particle
rads to [self._MINRAD, self._MAXRAD] and the positions to at least
self._MINDIST from the edge of the padded image. These are:
* ``_MINRAD`` : 1e-3
* ``_MAXRAD`` : 2e2
* ``_MINDIST`` : 1e-3
For extremely large particles (e.g. larger than _MAXRAD or larger than
the pad and barely overlapping the image) these numbers might be
insufficient.
"""
def __init__(self, state, particles, include_rad=True, **kwargs):
self.state = state
if len(particles) == 0:
raise ValueError('Empty list of particle indices')
self.particles = particles
self.param_names = (state.param_particle(particles) if include_rad
else state.param_particle_pos(particles))
self._dif_tile = self._get_diftile()
#Max, min rads, distance from edge for allowed updates
self._MINRAD = 1e-3
self._MAXRAD = 2e2
self._MINDIST= 1e-3
#is_rad, is_pos masks:
rad_nms = (self.state.param_radii() if (include_rad and hasattr(
self.state, 'param_radii')) else []) # FIXME explicit rad
self._is_rad = np.array([p in rad_nms for p in self.param_names])
pos_nms = self.state.param_positions()
self._is_pos = []
for a in range(3): # FIXME explicit 3D
self._is_pos.append(np.array([(x in pos_nms) & (x[-1] == 'zyx'[a])
for x in self.param_names]))
super(LMParticles, self).__init__(**kwargs)
def _get_diftile(self):
vals = np.ravel(self.state.state[self.param_names])
itile = self.state.get_update_io_tiles(self.param_names, vals)[1]
return get_residuals_update_tile(self.state, itile)
def _set_err_paramvals(self):
self.error = self.state.error
self._last_error = (1 + 2*self.fractol) * self.state.error
self.param_vals = np.ravel(self.state.state[self.param_names])
self._last_vals = self.param_vals.copy()
def calc_J(self):
self._dif_tile = self._get_diftile()
del self.J
#J = grad(residuals) = -grad(model)
if self._dif_tile.volume > 0:
self.J = -self.state.gradmodel(params=self.param_names, rts=True,
slicer=self._dif_tile.slicer)
else:
self.J = np.zeros([len(self.param_names), 1])
def calc_residuals(self):
if self._dif_tile.volume > 0:
return self.state.residuals[self._dif_tile.slicer].ravel().copy()
else:
return np.zeros(1)
def update_function(self, values):
#1. Clipping values:
values[self._is_rad] = np.clip(values[self._is_rad], self._MINRAD,
self._MAXRAD)
pd = self.state.pad
for a in range(3): # FIXME explicit 3D
values[self._is_pos[a]] = np.clip(values[self._is_pos[a]],
self._MINDIST - pd[a], self.state.ishape.shape[a] +
pd[a] - self._MINDIST)
self.state.update(self.param_names, values)
if np.any(np.isnan(self.state.residuals)):
raise FloatingPointError('state update caused nans in residuals')
return self.state.error
def set_particles(self, new_particles, new_damping=None):
self.particles = new_particles
self.param_names = (state.param_particle(particles) if include_rad
else state.param_particle_pos(particles))
self._dif_tile = self._get_diftile()
self._set_err_paramvals()
self.reset(new_damping=new_damping)
class LMParticleGroupCollection(object):
"""
Levenberg-Marquardt on all particles in a state.
Convenience wrapper for LMParticles. Generates a separate instance
for the particle groups each time and optimizes with that, since
storing J for the particles is too large.
Parameters
----------
state : :class:`peri.states.ImageState`
The state to optimize
region_size : Int or 3-element list-like of ints, optional
The region size for sub-blocking particles. Default is 40
do_calc_size : Bool, optional
If True, calculates the region size internally based on
the maximum allowed memory. Default is True
max_mem : Numeric, optional
The maximum allowed memory for J to occupy. Default is 1e9
get_cos : Bool, optional
Set to True to include the model cosine in the statistics
on each individual group's run, using `LMEngine`
get_termination_stats(). Stored in self.stats. Default is
False
save_J : Bool
Set to True to create a series of temp files that save J
for each group of particles. Needed for do_internal_run().
Default is False.
Other Parameters
----------------
Pass any kwargs that would be passed to LMParticles. Stored in
self._kwargs for reference.
Attributes
----------
stats : List
A list of the termination stats for each sub-block of particles
particle_groups | |
#
# Copyright (c) 2020 Pangeanic SL.
#
# This file is part of NEC TM
# (see https://github.com/shasha79/nectm).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import uuid
import re
import logging
import datetime
from elasticsearch import Elasticsearch, helpers
from elasticsearch_dsl import Search, MultiSearch, Q
import networkx
from TMDbApi.TMMap.TMMap import TMMap
from TMDbApi.TMUtils import TMUtils, TMTimer
from TMDbApi.TMDbQuery import TMDbQuery
class TMMapES(TMMap):
UPSERT_SCRIPT='upsert_segment'
def __init__(self):
self.es = Elasticsearch(timeout=30, max_retries=3, retry_on_timeout=True)
self.DOC_TYPE = 'id_map'
self.refresh_lang_graph()
self.es.indices.put_template(name='map_template', body=self._index_template())
self.es.put_script(TMMapES.UPSERT_SCRIPT, body=self._upsert_script())
self.scan_size = 9999999
self.timer = TMTimer("TMMapES")
def add_segment(self, segment):
m_index,swap = self._get_index(segment.source_language, segment.target_language, create_missing=True)
doc = self._segment2doc(segment)
if swap: self._swap(doc)
# Add segment source and target texts to the correspondent index of ElasticSearch
s_result = self.es.index(index=m_index, doc_type=self.DOC_TYPE, id=self._allocate_id(segment, swap),
body = doc,
ignore=409) # don't throw exception if a document already exists
return s_result
def add_segments(self, segments):
if not segments:
return
actions = []
for segment in segments:
self.timer.start("add_segment:get_index")
m_index, swap = self._get_index(segment.source_language, segment.target_language, create_missing=True)
self.timer.stop("add_segment:get_index")
self.timer.start("add_segment:segment2doc")
doc = self._segment2doc(segment)
if swap: self._swap(doc)
upsert_doc = self._doc_upsert(doc)
self.timer.stop("add_segment:segment2doc")
self.timer.start("add_segment:swap_doc")
self.timer.stop("add_segment:swap_doc")
action = {'_id': self._allocate_id(segment, swap),
'_index' : m_index,
'_type' : 'id_map',
'_op_type': 'update',
'_source' : upsert_doc,
}
actions.append(action)
# Bulk operation
logging.info("Bulk upsert (map): {}".format(actions))
self.timer.start("add_segment:es_index")
s_result = helpers.bulk(self.es, actions)
self.timer.stop("add_segment:es_index")
return s_result
def count_scan(self, langs, filter = None):
query,swap = self._create_query(langs, filter)
if not query: return 0 # index doesn't exist
return query.count
def scan(self, langs, filter = None):
query,swap = self._create_query(langs, filter)
for hit in query.scan():
if swap: hit = self._swap(hit)
# Check if a source/target docs match the pattern(s) if given
matches_pattern = not filter or self._match_pattern(hit['source_text'], filter.get('squery')) and \
self._match_pattern(hit['target_text'], filter.get('tquery'))
# Yield actual segment
if matches_pattern:
yield hit
# bidirectional query
def get(self, source_id, source_lang, target_lang):
search, swap = self._create_search(source_id, source_lang, target_lang)
if not search: return None,None
res = search.execute()
hits = res.to_dict()['hits']['hits']
if not hits: return None,None
ret_doc = hits[0]['_source']
# Exchange source and target (if needed)
if swap: ret_doc = self._swap(ret_doc)
return uuid.UUID(ret_doc['target_id']),ret_doc
# multi-get (bidirectional)
def mget(self, id_langs, return_multiple=False):
if not id_langs: return []
msearch = MultiSearch(using=self.es)
search_swap = []
for source_id,source_lang,target_lang in id_langs:
search,swap = self._create_search(source_id,source_lang,target_lang)
if search:
# Sort by update date so in case of multiple segments having the same source, the latest one will be returned
search = search.sort('-update_date')
msearch = msearch.add(search)
search_swap.append(swap)
responses = msearch.execute()
results = []
for res,swap in zip(responses,search_swap):
try:
if not 'hits' in res or not res.hits.total:
results.append(None)
continue
for ret_doc in res.hits:
# Exchange source and target (if needed)
if swap: ret_doc = self._swap(ret_doc)
results.append(ret_doc)
if not return_multiple: break
except:
# Exception is thrown if Response is in some invalid state (no hits, hits are empty)
logging.warning("Invalid Response object: {}".format(res.to_dict()))
results.append(None)
continue
return results
def delete(self, langs, docs, filter, force_delete=False):
source_lang, target_lang = langs
filter_list_attrs = {attr: value for attr,value in filter.items() if attr in TMDbQuery.list_attrs}
deleted_ids = [list(), list()] # source and target
m_index,swap = self._get_index(source_lang, target_lang)
if not m_index: return deleted_ids
actions = []
for doc in docs:
# Commmon action fields
action = {'_id': doc['_id'],
'_index': m_index,
'_type': self.DOC_TYPE
}
del doc['_id'] # id is not part of the doc
if force_delete or not filter_list_attrs:
action['_op_type'] = 'delete'
else:
for attr,value_list in filter_list_attrs.items():
assert(doc[attr])
doc[attr] = list(set(doc[attr]) - set(value_list)) # delete from the list of values all those in filter
# If no values left in at least one of the attributes -> map doc can be safely deleted
if not doc[attr]:
action['_op_type'] = 'delete'
deleted_ids[0].append(doc['source_id'])
deleted_ids[1].append(doc['target_id'])
break
# If not marked for delete -> update it
if not '_op_type' in action:
action['_op_type'] = 'index'
action['_source'] = doc
actions.append(action)
# Bulk operation (update/delete)
try:
status = helpers.bulk(self.es, actions)
logging.info("Map Delete status: {}".format(status))
except Exception as e:
print("MAP DELETE EXCEPTION: {}".format(e))
logging.warning(e)
return deleted_ids
# Check if given list of monolingual source ids exist in any map
def mexist(self, src_lang, src_ids):
if not src_ids: return []
tgt_langs = [target_lang for target_lang in self.lang_graph.neighbors(src_lang)]
MEXIST_BATCH_SIZE = 10
results = []
for i in range(0, len(src_ids), MEXIST_BATCH_SIZE):
msearch = MultiSearch(using=self.es)
for source_id in src_ids[i:i+MEXIST_BATCH_SIZE]:
search = self._create_search_mindexes(source_id, src_lang, tgt_langs)
if search:
msearch = msearch.add(search)
responses = msearch.execute()
for res in responses:
try:
results.append(bool('hits' in res and res.hits.total))
except:
# Exception is thrown if Response is in some invalid state (no hits, hits are empty)
logging.warning("Invalid Response object: {}".format(res.to_dict()))
results.append(None)
return results
# Count number of segments
def count(self, langs):
m_index,swap = self._get_index(langs[0], langs[1])
if not m_index: return []
search = Search(using=self.es, index=m_index).params(search_type="count")
res = search.execute()
return res.to_dict()['hits']['total']
def get_duplicates(self, langs, filter):
query,swap = self._create_query(langs, filter)
if not query: return []
src_tgt = 'source' if not swap else 'target'
res = query.duplicates(field='{}_text'.format(src_tgt))
return res
# Count number of segments for multiple language pairs
def mcount(self):
search = Search(using=self.es, index="{}*".format(TMUtils.MAP_PREFIX))
search.aggs.bucket('values', 'terms', field='_index', size=999999)
res = search.execute()
if not hasattr(res, 'aggregations') or 'values' not in res.aggregations: return dict()
return dict([(re.sub("^{}".format(TMUtils.MAP_PREFIX), "", f.key),f.doc_count) for f in res.aggregations['values'].buckets])
def mcount_buckets(self, buckets):
ms = MultiSearch(using=self.es)
for bucket_name in buckets:
search = Search(using=self.es, index="{}*".format(TMUtils.MAP_PREFIX))
search.aggs.bucket('indexes', 'terms', field='_index', size=999999).bucket('values', 'terms', field=bucket_name, size=999999)
ms = ms.add(search)
mres = ms.execute()
lang2buckets = dict()
for bucket_name,res in zip(buckets, mres):
if hasattr(res, "aggregations") and 'indexes' in res.aggregations:
triple_list = [(re.sub("^{}".format(TMUtils.MAP_PREFIX), "", x.key), y.key, y.doc_count) for x in res.aggregations['indexes'].buckets for y in x['values'].buckets]
for lang_pair,bucket_value,count in triple_list:
lang2buckets.setdefault(lang_pair, dict()).setdefault(bucket_name, dict())[bucket_value] = count
return lang2buckets
# Generate doc based on 2 docs with pivot
def generate_pivot(self, sdoc, tdoc):
if sdoc['source_id'] != tdoc['source_id']:
logging.error("Invalid pair for pivot generation: sdoc {}, tdoc {}".format(sdoc,tdoc))
assert(sdoc['source_id'] == tdoc['source_id']) # make sure pivot exists
# Result doc
doc = dict()
for attr in ['id', 'language', 'text']:
doc['source_'+attr] = sdoc['target_'+attr]
doc['target_'+attr] = tdoc['target_'+attr]
for attr in TMDbQuery.str_attrs:
if not attr in sdoc: continue
# TODO: should it be union or intersection?
doc[attr] = sdoc[attr] + tdoc[attr] if sdoc.get(attr) and tdoc.get(attr) else None
for attr in ['tm_creation_date', 'tm_change_date', 'insert_date', 'update_date']:
doc[attr] = TMUtils.date2str(datetime.datetime.now())
doc['check_date'] = TMUtils.date2str(datetime.datetime(1970, 1, 1))
return doc
# Build & return language graph
def get_lang_graph(self):
self.refresh()
lang_graph = networkx.Graph()
# Build language connection graph
for index in self.indexes:
m = re.search('map_([a-z]{2})_([a-z]{2})', index)
if m:
lang_graph.add_edge(m.group(1), m.group(2))
return lang_graph
# Should be called after modifying the index
def refresh(self):
self.indexes = self.es.indices.get_alias("*")
def refresh_lang_graph(self):
self.lang_graph = self.get_lang_graph()
# Get list of all values (aggregated) for the given field
def get_aggr_values(self, field, langs, filter):
query,swap = self._create_query(langs, filter)
if not query: return []
return query.aggs(field)
def _create_query(self, langs, filter=None):
source_lang,target_lang = langs
m_index,swap = self._get_index(source_lang, target_lang)
if not m_index: return None,None
query = TMDbQuery(es=self.es, index=m_index, filter=filter)
return query,swap
def _create_search(self, source_id, source_lang, target_lang):
m_index,swap = self._get_index(source_lang, target_lang)
if not m_index: return None,None
qfield = "source_id" if not swap else "target_id"
search = Search(using=self.es, index=m_index)
search.query = Q('term', **{qfield: source_id})
return search,swap
def _create_search_mindexes(self, source_id, source_lang, target_langs):
m_indexes = [self._get_index(source_lang, tgt_lang)[0] for tgt_lang in target_langs] # take only m_index, swap is not interested
search = Search(using=self.es, index=m_indexes)
# FIXME: search only source_id or target_id according to index direction, otherwise it might return results from incorect language (having the same id due to exact the same text)
search.query = Q('term', source_id=source_id) | Q('term', target_id=source_id)
return search
def _match_pattern(self, text, pattern):
if not pattern: return True
return re.search(pattern, text)
# Returns tuple (index_name, is_swapped)
def _get_index(self, source_lang, target_lang, create_missing=False):
m_index = TMUtils.es_index2mapdb(TMUtils.lang2es_index(source_lang),
TMUtils.lang2es_index(target_lang))
if self.es.indices.exists(index=m_index): | |
# $Id: testWebBrick.py 533 2006-01-23 21:44:46Z graham $
#
# Integration testing for WebBrick hardware unit
# See http://pyunit.sourceforge.net/pyunit.html
#
# NOTE: this is not strictly a unit test, in that it requires a WebBrick to be
# available at the specified IP address
import sys, time, logging
# import os
import unittest
sys.path.append("../API")
#from WbAccess import *
import wb6, wb6Config, wb6Status
#logging.basicConfig(level=logging.DEBUG)
def showWorking():
sys.stdout.write(".")
def showWait():
sys.stdout.write("w")
class testWebBrick(unittest.TestCase):
_wbAddress = "10.100.100.100"
# these should match what the webBrick has.
_dwellCount = 4
_spCount = 8
_diCount = 12
_aiCount = 4
_doCount = 8
_aoCount = 4
_tempCount = 5
_schedCount = 16
_sceneCount = 8
def doLogin(self):
wb6.Login( self._wbAddress, "installer" )
return
def doReboot(self):
# takes a while.
wb6.Send(self._wbAddress, "RB" )
time.sleep(5.0)
return
def factoryReset(self):
self.doLogin()
# send factory reset, full reset
wb6.Send(self._wbAddress, "FR1" )
time.sleep(3.0 )# takes a while.
self.doReboot()
return
def setUp(self):
# self.factoryReset()
return
def tearDown(self):
# self.factoryReset()
return
def verifyDiConfiguration(self, idx, cfg):
di = cfg.getDigInTrigger(idx)
self.assertEqual( di["name"], "Sw-"+str(idx+1) )
if ( idx < 8 ):
self.assertEqual( di["actionNr"], 4)
self.assertEqual( di["action"], "Toggle")
self.assertEqual( di["pairChn"], idx)
self.assertEqual( di["options"], 2)
else:
self.assertEqual( di["actionNr"], 0)
self.assertEqual( di["action"], "None")
self.assertEqual( di["pairChn"], 0)
self.assertEqual( di["options"], 3)
self.assertEqual( di["dwell"], 0)
self.assertEqual( di["typeNr"], 0)
self.assertEqual( di["type"], "Digital")
self.assertEqual( di["setPoint"], 0)
self.assertEqual( di["UDPRem"], "None" )
self.assertEqual( di["UDPRemNr"], 0)
self.assertEqual( di["RemNode"], 0)
return
def verifyAiConfiguration(self, idx, cfg):
ai = cfg.getAnalogueTriggerLow(idx)
self.assertEqual( ai["name"], "AnIn-"+str(idx+1) )
self.assertEqual( ai["threshold"], 0 )
self.assertEqual( ai["actionNr"], 0 )
self.assertEqual( ai["action"], "None" )
self.assertEqual( ai["pairChn"], 0 )
self.assertEqual( ai["dwell"], 0 )
self.assertEqual( ai["typeNr"], 0 )
self.assertEqual( ai["type"], "Digital" )
self.assertEqual( ai["setPoint"], 0 )
self.assertEqual( ai["UDPRem"], "None" )
self.assertEqual( ai["UDPRemNr"], 0 )
self.assertEqual( ai["RemNode"], 0 )
ai = cfg.getAnalogueTriggerHigh(idx)
self.assertEqual( ai["name"], "AnIn-"+str(idx+1) )
self.assertEqual( ai["threshold"], 100 )
self.assertEqual( ai["actionNr"], 0 )
self.assertEqual( ai["action"], "None" )
self.assertEqual( ai["pairChn"], 0 )
self.assertEqual( ai["dwell"], 0 )
self.assertEqual( ai["typeNr"], 0 )
self.assertEqual( ai["type"], "Digital" )
self.assertEqual( ai["setPoint"], 0 )
self.assertEqual( ai["UDPRem"], "None" )
self.assertEqual( ai["UDPRemNr"], 0 )
self.assertEqual( ai["RemNode"], 0 )
return
def verifyTempConfiguration(self, idx, cfg):
ti = cfg.getTempTriggerLow(idx)
self.assertEqual( ti["name"], "Temp-"+str(idx+1) )
self.assertEqual( ti["threshold"], -50 )
self.assertEqual( ti["actionNr"], 0 )
self.assertEqual( ti["action"], "None" )
self.assertEqual( ti["pairChn"], 0 )
self.assertEqual( ti["dwell"], 0 )
self.assertEqual( ti["typeNr"], 0 )
self.assertEqual( ti["type"], "Digital" )
self.assertEqual( ti["setPoint"], 0 )
self.assertEqual( ti["UDPRem"], "None" )
self.assertEqual( ti["UDPRemNr"], 0 )
self.assertEqual( ti["RemNode"], 0 )
ti = cfg.getTempTriggerHigh(idx)
self.assertEqual( ti["name"], "Temp-"+str(idx+1) )
self.assertEqual( ti["threshold"], 100 )
self.assertEqual( ti["actionNr"], 0 )
self.assertEqual( ti["action"], "None" )
self.assertEqual( ti["pairChn"], 0 )
self.assertEqual( ti["dwell"], 0 )
self.assertEqual( ti["typeNr"], 0 )
self.assertEqual( ti["type"], "Digital" )
self.assertEqual( ti["setPoint"], 0 )
self.assertEqual( ti["UDPRem"], "None" )
self.assertEqual( ti["UDPRemNr"], 0 )
self.assertEqual( ti["RemNode"], 0 )
return
def verifyScheduleConfiguration(self, idx, cfg):
sc = cfg.getScheduledEvent(idx)
self.assertEqual( sc["days"], 0 )
self.assertEqual( sc["hours"], 0 )
self.assertEqual( sc["mins"], 0 )
self.assertEqual( sc["actionNr"], 0 )
self.assertEqual( sc["action"], "None" )
self.assertEqual( sc["pairChn"], 0 )
self.assertEqual( sc["dwell"], 0 )
self.assertEqual( sc["typeNr"], 0 )
self.assertEqual( sc["type"], "Digital" )
self.assertEqual( sc["setPoint"], 0 )
self.assertEqual( sc["UDPRemNr"], 0 )
self.assertEqual( sc["UDPRem"], "None" )
self.assertEqual( sc["RemNode"], 0 )
def verifySceneConfiguration(self, idx, cfg):
sc = cfg.getScene(idx)
self.assertEqual( sc["Digital0"], "Ignore" )
self.assertEqual( sc["Digital1"], "Ignore" )
self.assertEqual( sc["Digital2"], "Ignore" )
self.assertEqual( sc["Digital3"], "Ignore" )
self.assertEqual( sc["Digital4"], "Ignore" )
self.assertEqual( sc["Digital5"], "Ignore" )
self.assertEqual( sc["Digital6"], "Ignore" )
self.assertEqual( sc["Digital7"], "Ignore" )
self.assertEqual( sc["Analogue0"], "Ignore" )
self.assertEqual( sc["Analogue1"], "Ignore" )
self.assertEqual( sc["Analogue2"], "Ignore" )
self.assertEqual( sc["Analogue3"], "Ignore" )
# Actual tests follow
def verifyName(self):
cfg = wb6Config.wb6Config( self._wbAddress )
assert cfg.getNodeName() == "UnNamed", cfg.getNodeName()
return
def verifyIp(self):
cfg = wb6Config.wb6Config( self._wbAddress )
return
def verifyNodeNumber(self):
cfg = wb6Config.wb6Config( self._wbAddress )
assert cfg.getNodeNumber() == 0, str(cfg.getNodeNumber())
return
def verifyRotary(self):
cfg = wb6Config.wb6Config( self._wbAddress )
assert cfg.getRotary(0 ) == 8, str(cfg.getRotary(0))
assert cfg.getRotary(1 ) == 8, str(cfg.getRotary(1))
return
def verifyFadeRate(self):
cfg = wb6Config.wb6Config( self._wbAddress )
assert cfg.getFadeRate() == 8, str(cfg.getFadeRate())
return
def verifyDigitalIn(self):
cfg = wb6Config.wb6Config( self._wbAddress )
for idx in range(0,self._diCount):
self.verifyDiConfiguration(idx, cfg)
def verifyAnalogueIn(self):
cfg = wb6Config.wb6Config( self._wbAddress )
for idx in range(0,self._aiCount):
self.verifyAiConfiguration(idx, cfg)
def verifyTemperatures(self):
cfg = wb6Config.wb6Config( self._wbAddress )
for idx in range(0,self._tempCount):
self.verifyTempConfiguration(idx, cfg)
def verifySchedules(self):
cfg = wb6Config.wb6Config( self._wbAddress )
for idx in range(0,self._schedCount):
self.verifyScheduleConfiguration(idx, cfg)
def verifyScenes(self):
cfg = wb6Config.wb6Config( self._wbAddress )
for idx in range(0,self._sceneCount):
self.verifySceneConfiguration(idx, cfg)
def verifyDwellConfiguration(self):
cfg = wb6Config.wb6Config( self._wbAddress )
assert cfg.getDwell(0) == 30, str(cfg.getDwell(0))
assert cfg.getDwell(1) == 2, str(cfg.getDwell(1))
assert cfg.getDwell(2) == 60, str(cfg.getDwell(2))
assert cfg.getDwell(3) == 3600, str(cfg.getDwell(3))
assert cfg.getDwell(4) == 300, str(cfg.getDwell(4))
assert cfg.getDwell(5) == 600, str(cfg.getDwell(5))
assert cfg.getDwell(6) == 900, str(cfg.getDwell(6))
assert cfg.getDwell(7) == 1200, str(cfg.getDwell(7))
def verifySetPointConfiguration(self):
cfg = wb6Config.wb6Config( self._wbAddress )
assert cfg.getSetPoint(0) == 0, str(cfg.getSetPoint(0))
assert cfg.getSetPoint(1) == 14, str(cfg.getSetPoint(1))
assert cfg.getSetPoint(2) == 28, str(cfg.getSetPoint(2))
assert cfg.getSetPoint(3) ==42, str(cfg.getSetPoint(3))
assert cfg.getSetPoint(4) == 57, str(cfg.getSetPoint(4))
assert cfg.getSetPoint(5) == 71, str(cfg.getSetPoint(5))
assert cfg.getSetPoint(6) == 85, str(cfg.getSetPoint(6))
assert cfg.getSetPoint(7) == 100, str(cfg.getSetPoint(7))
def verifyDoNameConfiguration(self):
cfg = wb6Config.wb6Config( self._wbAddress )
for idx in range(0,self._doCount):
assert cfg.getDigOutName(idx) == "DigOut-"+str(idx+1), cfg.getDigOutName(idx)
def verifyAoNameConfiguration(self):
cfg = wb6Config.wb6Config( self._wbAddress )
for idx in range(0,self._aoCount):
assert cfg.getAnalogueOutName(idx) == "AnOut-"+str(idx+1), cfg.getAnalogueOutName(idx)
def verifyFactoryConfiguration(self):
self.verifyName()
self.verifyIp()
self.verifyNodeNumber()
self.verifyRotary()
self.verifyFadeRate()
self.verifyDigitalIn()
self.verifyAnalogueIn()
self.verifyTemperatures()
self.verifySchedules()
self.verifyScenes()
self.verifyDwellConfiguration()
self.verifySetPointConfiguration()
self.verifyDoNameConfiguration()
self.verifyAoNameConfiguration()
return
def testConfigureDigitalIn(self):
self.doLogin()
# send CD command
# D1, Target A1, action On, sp 4, dw 2, udp 1, udpnode 100
for cmd in range(wb6.AT_NONE, wb6.AT_SPARE):
for sp in range(0, self._spCount):
if ( cmd == wb6.AT_DWELL ) or ( cmd == wb6.AT_DWELLCAN ):
showWorking()
for dwell in range(0, self._dwellCount):
idStr = "ConfigDigIn, 1, wb6.TT_ANALOGUE, 1, cmd:" + str(cmd) + " sp:"+ str(sp) +" dw:"+str(dwell) + ",1,100"
wb6.ConfigDigIn(self._wbAddress, 1,wb6.TT_ANALOGUE,1,cmd,sp,dwell,1,100 )
time.sleep(1.0)
# read all the configuration and verify entries.
cfg = wb6Config.wb6Config( self._wbAddress )
for idx in range(0,self._diCount):
if ( idx != 1):
self.verifyDiConfiguration(idx, cfg)
else:
di = cfg.getDigInTrigger(idx)
assert di["name"] == "Sw-"+str(idx+1), idStr + " di[name]:" + str(di["name"])
assert di["action"] == wb6.ActionStrs[cmd], idStr + " di[action]:" + di["action"]
assert di["actionNr"] == cmd, idStr + " di[actionNr]:" + str(di["actionNr"])
assert di["pairChn"] == 1, idStr + " di[pairChn]:" + str(di["pairChn"])
assert di["options"] == 2, idStr + " di[options]:" + str(di["options"])
assert di["dwell"] == dwell, idStr + " di[dwell]:" + str(di["dwell"])
assert di["typeNr"] == 2, idStr + " di[typeNr]:" + str(di["typeNr"])
assert di["type"] == "Analogue", idStr + " di[type]:" + str(di["type"])
assert di["setPoint"] == sp, idStr + " di[setPoint]:" + str(di["setPoint"])
assert di["UDPRem"] == "General", idStr + " di[UDPRem]:" + str(di["UDPRem"])
assert di["UDPRemNr"] == 1, idStr + " di[UDPRemNr]:" + str(di["UDPRemNr"])
assert di["RemNode"] == 100, idStr + " di[RemNode]:" + str(di["RemNode"])
else:
showWorking()
dwell = 0
idStr = "ConfigDigIn, 1, wb6.TT_ANALOGUE, 1, cmd:" + str(cmd) + " sp:"+ str(sp) +" dw:"+str(dwell) + ",1,100"
wb6.ConfigDigIn(self._wbAddress, 1,wb6.TT_ANALOGUE,1,cmd,sp,dwell,1,100 )
time.sleep(1.0)
# read all the configuration and verify entries.
cfg = wb6Config.wb6Config( self._wbAddress )
for idx in range(0,self._diCount):
if ( idx != 1):
self.verifyDiConfiguration(idx, cfg)
else:
di = cfg.getDigInTrigger(idx)
assert di["name"] == "Sw-"+str(idx+1), idStr + " di[name]:" + str(di["name"])
assert di["actionNr"] == cmd, idStr + " di[actionNr]:" + str(di["actionNr"])
assert di["action"] == wb6.ActionStrs[cmd], idStr + " di[action]:" + di["action"]
assert di["pairChn"] == 1, idStr + " di[pairChn]:" + str(di["pairChn"])
assert di["options"] == 2, idStr + " di[options]:" + str(di["options"])
assert di["dwell"] == dwell, idStr + " di[dwell]:" + str(di["dwell"])
assert di["typeNr"] == 2, idStr + " di[typeNr]:" + str(di["typeNr"])
assert di["type"] == "Analogue", idStr + " di[type]:" + str(di["type"])
assert di["setPoint"] == sp, idStr + " di[setPoint]:" + str(di["setPoint"])
assert di["UDPRem"] == "General", idStr + " di[UDPRem]:" + str(di["UDPRem"])
assert di["UDPRemNr"] == 1, idStr + " di[UDPRemNr]:" + str(di["UDPRemNr"])
assert di["RemNode"] == 100, idStr + " di[RemNode]:" + str(di["RemNode"])
def testConfigureAnThreshold(self):
self.doLogin()
# send CD command
# D1, Target A1, action On, sp 4, dw 2, udp 1, udpnode 100
wb6.ConfigAnIn(self._wbAddress, 1, "L", 17, wb6.TT_ANALOGUE, 2, wb6.AT_ON, wb6.SP_4, wb6.DW_2, wb6.UDPT_GENERAL, 99 )
wb6.ConfigAnIn(self._wbAddress, 1, "H", 87, wb6.TT_DIGITAL, 2, wb6.AT_ON, wb6.SP_0, wb6.DW_2, wb6.UDPT_GENERAL, 99 )
idStr = "ConfigAnIn, 1"
time.sleep(0.5)
# read all the configuration and verify entries.
cfg = wb6Config.wb6Config( | |
<filename>ding/model/wrapper/model_wrappers.py<gh_stars>0
from typing import Any, Tuple, Callable, Optional, List
from abc import ABC
import numpy as np
import torch
from ding.torch_utils import get_tensor_data
from ding.rl_utils import create_noise_generator
from torch.distributions import Categorical
class IModelWrapper(ABC):
r"""
Overview:
the base class of Model Wrappers
Interfaces:
register
"""
def __init__(self, model: Any) -> None:
self._model = model
def __getattr__(self, key: str) -> Any:
r"""
Overview:
Get the attrbute in model.
Arguments:
- key (:obj:`str`): The key to query.
Returns:
- ret (:obj:`Any`): The queried attribute.
"""
return getattr(self._model, key)
def info(self, attr_name):
r"""
Overview:
get info of attr_name
"""
if attr_name in dir(self):
if isinstance(self._model, IModelWrapper):
return '{} {}'.format(self.__class__.__name__, self._model.info(attr_name))
else:
if attr_name in dir(self._model):
return '{} {}'.format(self.__class__.__name__, self._model.__class__.__name__)
else:
return '{}'.format(self.__class__.__name__)
else:
if isinstance(self._model, IModelWrapper):
return '{}'.format(self._model.info(attr_name))
else:
return '{}'.format(self._model.__class__.__name__)
class BaseModelWrapper(IModelWrapper):
r"""
Overview:
the base class of Model Wrappers
Interfaces:
register
"""
def reset(self, data_id: List[int] = None) -> None:
r"""
Overview
the reset function that the Model Wrappers with states should implement
used to reset the stored states
"""
pass
class HiddenStateWrapper(IModelWrapper):
def __init__(
self, model: Any, state_num: int, save_prev_state: bool = False, init_fn: Callable = lambda: None
) -> None:
"""
Overview:
Maintain the hidden state for RNN-base model. Each sample in a batch has its own state. \
Init the maintain state and state function; Then wrap the ``model.forward`` method with auto \
saved data ['prev_state'] input, and create the ``model.reset`` method.
Arguments:
- model(:obj:`Any`): Wrapped model class, should contain forward method.
- state_num (:obj:`int`): Number of states to process.
- save_prev_state (:obj:`bool`): Whether to output the prev state in output['prev_state'].
- init_fn (:obj:`Callable`): The function which is used to init every hidden state when init and reset. \
Default return None for hidden states.
.. note::
1. This helper must deal with an actual batch with some parts of samples, e.g: 6 samples of state_num 8.
2. This helper must deal with the single sample state reset.
"""
super().__init__(model)
self._state_num = state_num
self._state = {i: init_fn() for i in range(state_num)}
self._save_prev_state = save_prev_state
self._init_fn = init_fn
def forward(self, data, **kwargs):
state_id = kwargs.pop('data_id', None)
valid_id = kwargs.pop('valid_id', None)
data, state_info = self.before_forward(data, state_id)
output = self._model.forward(data, **kwargs)
h = output.pop('next_state', None)
if h is not None:
self.after_forward(h, state_info, valid_id)
if self._save_prev_state:
prev_state = get_tensor_data(data['prev_state'])
output['prev_state'] = prev_state
return output
def reset(self, *args, **kwargs):
state = kwargs.pop('state', None)
state_id = kwargs.get('data_id', None)
self.reset_state(state, state_id)
if hasattr(self._model, 'reset'):
return self._model.reset(*args, **kwargs)
def reset_state(self, state: Optional[list] = None, state_id: Optional[list] = None) -> None:
if state_id is None:
state_id = [i for i in range(self._state_num)]
if state is None:
state = [self._init_fn() for i in range(len(state_id))]
assert len(state) == len(state_id), '{}/{}'.format(len(state), len(state_id))
for idx, s in zip(state_id, state):
self._state[idx] = s
def before_forward(self, data: dict, state_id: Optional[list]) -> Tuple[dict, dict]:
if state_id is None:
state_id = [i for i in range(self._state_num)]
state_info = {idx: self._state[idx] for idx in state_id}
data['prev_state'] = list(state_info.values())
return data, state_info
def after_forward(self, h: Any, state_info: dict, valid_id: Optional[list] = None) -> None:
assert len(h) == len(state_info), '{}/{}'.format(len(h), len(state_info))
for i, idx in enumerate(state_info.keys()):
if valid_id is None:
self._state[idx] = h[i]
else:
if idx in valid_id:
self._state[idx] = h[i]
def sample_action(logit=None, prob=None):
if prob is None:
prob = torch.softmax(logit, dim=-1)
shape = prob.shape
prob += 1e-8
prob = prob.view(-1, shape[-1])
# prob can also be treated as weight in multinomial sample
action = torch.multinomial(prob, 1).squeeze(-1)
action = action.view(*shape[:-1])
return action
class ArgmaxSampleWrapper(IModelWrapper):
r"""
Overview:
Used to help the model to sample argmax action
"""
def forward(self, *args, **kwargs):
output = self._model.forward(*args, **kwargs)
assert isinstance(output, dict), "model output must be dict, but find {}".format(type(output))
logit = output['logit']
assert isinstance(logit, torch.Tensor) or isinstance(logit, list)
if isinstance(logit, torch.Tensor):
logit = [logit]
if 'action_mask' in output:
mask = output['action_mask']
if isinstance(mask, torch.Tensor):
mask = [mask]
logit = [l.sub_(1e8 * (1 - m)) for l, m in zip(logit, mask)]
action = [l.argmax(dim=-1) for l in logit]
if len(action) == 1:
action, logit = action[0], logit[0]
output['action'] = action
return output
class HybridArgmaxSampleWrapper(IModelWrapper):
r"""
Overview:
Used to help the model to sample argmax action in hybrid action space,
i.e.{'action_type': discrete, 'action_args', continuous}
"""
def forward(self, *args, **kwargs):
output = self._model.forward(*args, **kwargs)
assert isinstance(output, dict), "model output must be dict, but find {}".format(type(output))
if 'logit' not in output:
return output
logit = output['logit']
assert isinstance(logit, torch.Tensor) or isinstance(logit, list)
if isinstance(logit, torch.Tensor):
logit = [logit]
if 'action_mask' in output:
mask = output['action_mask']
if isinstance(mask, torch.Tensor):
mask = [mask]
logit = [l.sub_(1e8 * (1 - m)) for l, m in zip(logit, mask)]
action = [l.argmax(dim=-1) for l in logit]
if len(action) == 1:
action, logit = action[0], logit[0]
output = {'action': {'action_type': action, 'action_args': output['action_args']}, 'logit': logit}
return output
class MultinomialSampleWrapper(IModelWrapper):
r"""
Overview:
Used to helper the model get the corresponding action from the output['logits']
Interfaces:
register
"""
def forward(self, *args, **kwargs):
output = self._model.forward(*args, **kwargs)
assert isinstance(output, dict), "model output must be dict, but find {}".format(type(output))
logit = output['logit']
assert isinstance(logit, torch.Tensor) or isinstance(logit, list)
if isinstance(logit, torch.Tensor):
logit = [logit]
if 'action_mask' in output:
mask = output['action_mask']
if isinstance(mask, torch.Tensor):
mask = [mask]
logit = [l.sub_(1e8 * (1 - m)) for l, m in zip(logit, mask)]
action = [sample_action(logit=l) for l in logit]
if len(action) == 1:
action, logit = action[0], logit[0]
output['action'] = action
return output
class EpsGreedySampleWrapper(IModelWrapper):
r"""
Overview:
Epsilon greedy sampler used in collector_model to help balance exploration and exploitation.
Interfaces:
register
"""
def forward(self, *args, **kwargs):
eps = kwargs.pop('eps')
output = self._model.forward(*args, **kwargs)
assert isinstance(output, dict), "model output must be dict, but find {}".format(type(output))
logit = output['logit']
assert isinstance(logit, torch.Tensor) or isinstance(logit, list)
if isinstance(logit, torch.Tensor):
logit = [logit]
if 'action_mask' in output:
mask = output['action_mask']
if isinstance(mask, torch.Tensor):
mask = [mask]
logit = [l.sub_(1e8 * (1 - m)) for l, m in zip(logit, mask)]
else:
mask = None
action = []
for i, l in enumerate(logit):
if np.random.random() > eps:
action.append(l.argmax(dim=-1))
else:
if mask:
action.append(sample_action(prob=mask[i].float()))
else:
action.append(torch.randint(0, l.shape[-1], size=l.shape[:-1]))
if len(action) == 1:
action, logit = action[0], logit[0]
output['action'] = action
return output
class HybridEpsGreedySampleWrapper(IModelWrapper):
r"""
Overview:
Epsilon greedy sampler used in collector_model to help balance exploration and exploitation.
In hybrid action space, i.e.{'action_type': discrete, 'action_args', continuous}
Interfaces:
register, forward
"""
def forward(self, *args, **kwargs):
eps = kwargs.pop('eps')
output = self._model.forward(*args, **kwargs)
assert isinstance(output, dict), "model output must be dict, but find {}".format(type(output))
logit = output['logit']
assert isinstance(logit, torch.Tensor) or isinstance(logit, list)
if isinstance(logit, torch.Tensor):
logit = [logit]
if 'action_mask' in output:
mask = output['action_mask']
if isinstance(mask, torch.Tensor):
mask = [mask]
logit = [l.sub_(1e8 * (1 - m)) for l, m in zip(logit, mask)]
else:
mask = None
action = []
for i, l in enumerate(logit):
if np.random.random() > eps:
action.append(l.argmax(dim=-1))
else:
if mask:
action.append(sample_action(prob=mask[i].float()))
else:
action.append(torch.randint(0, l.shape[-1], size=l.shape[:-1]))
if len(action) == 1:
action, logit = action[0], logit[0]
output = {'action': {'action_type': action, 'action_args': output['action_args']}, 'logit': logit}
return output
class EpsGreedyMultinomialSampleWrapper(IModelWrapper):
r"""
Overview:
Epsilon greedy sampler coupled with multinomial sample used in collector_model
to help balance exploration and exploitation.
Interfaces:
register
"""
def forward(self, *args, **kwargs):
eps = kwargs.pop('eps')
alpha = kwargs.pop('alpha')
output = self._model.forward(*args, **kwargs)
assert isinstance(output, dict), "model output must be dict, but find {}".format(type(output))
logit = output['logit']
assert isinstance(logit, torch.Tensor) or isinstance(logit, list)
if isinstance(logit, torch.Tensor):
logit = [logit]
if 'action_mask' in output:
mask = output['action_mask']
if isinstance(mask, torch.Tensor):
mask = [mask]
logit = [l.sub_(1e8 * (1 - m)) for l, m in zip(logit, mask)]
else:
mask = None
action = []
for i, l in enumerate(logit):
if np.random.random() > eps:
prob = torch.softmax(output['logit'] / alpha, dim=-1)
prob = prob / torch.sum(prob, 1, keepdims=True)
pi_action = torch.zeros(prob.shape)
pi_action = Categorical(prob)
pi_action = pi_action.sample()
action.append(pi_action)
else:
if mask:
action.append(sample_action(prob=mask[i].float()))
else:
action.append(torch.randint(0, l.shape[-1], size=l.shape[:-1]))
if len(action) == 1:
action, logit = action[0], logit[0]
output['action'] = action
return output
class HybridEpsGreedyMultinomialSampleWrapper(IModelWrapper):
"""
Overview:
Epsilon greedy sampler coupled with multinomial sample used in collector_model
to help balance | |
from the selected entry)
# The information is useful at the stage of applying quantitative normalization
eline_calib = pqa.get_calibrations_selected(<emission_line>)
# Get calibration data on all emission lines as a list of dictionaries (the function
# calls 'get_calibrations_selected' for each emission line in the set)
all_elines_calib = pqa.get_calibrations_selected
# Set parameters of the experiment (before processing XRF map)
pqa.set_experiment_incident_energy(<incident_energy>)
pqa.set_experiment_detector_channel(<detector_channel>)
pqa.set_experiment_distance_to_sample(<distance_to_sample>)
# Process XRF map using selected calibrations and set experiment parameters
data_normalized = apply_quantitative_normalization(<data_in>, scaler_dict=<scaler_dict>,
scaler_name_default = <scaler_name_default>,
data_name = <emission_line>,
name_not_scalable=["sclr", "sclr2", "time" etc.]):
"""
def __init__(self):
r"""
Constructor. Initialization.
"""
# List of opened calibration standards
self.calibration_data = []
self.calibration_settings = []
self.active_emission_lines = []
# Parameters of the experiment for the currently processed dataset:
# 'experiment_detector_channel', 'experiment_incident_energy', 'experiment_distance_to_sample'.
# Set those parameters manually before running 'apply_quantitative_normalization'.
# Detector channel (values 'sum', 'det1', 'det2' etc.) must match
# the channel specified for the quantitative calibration data for the emission line.
# (check is performed for each emission line separately, since calibration data
# may come from different calibration files depending on user's choice).
self.experiment_detector_channel = None
# Incident energy values should be approximately equal
self.experiment_incident_energy = None
# Distance to sample. If 0 or None, the respective correction is not applied
self.experiment_distance_to_sample = None
def load_entry(self, file_path):
r"""
Load QA calibration data from file. The calibration data file must be JSON file which
satisfies schema ``_xrf_quant_fluor_schema``.
Data may be loaded from the same file only once.
If data is loaded from the same file repeatedly, then only one (first) copy is going to be
stored in the memory.
Parameters
----------
file_path: str
Full path to file with QA calibration data.
"""
# May raise exception if reading the file fails
calib_data = load_xrf_quant_fluor_json_file(file_path)
# Data will be added only if file path ``file_path`` was not loaded before. Otherwise
# the message will be printed.
self.add_entry(file_path, calibration_data=calib_data)
def add_entry(self, file_path, *, calibration_data):
r"""
Add QA calibration data to the object. Calibration data is represented as a dictionary
which satisfies ``_xrf_quant_fluor_schema`` schema. Calibration data may be loaded
from JSON file (with file name ``file_path``), but it may already exist in memory
as a dictionary (for example it may be generated or stored by some other module of
the program). ``file_path`` does not have to represent valid file path, but some
unique meaningful string, which may be used to identify calibration entries.
This function DOES NOT LOAD calibration data from file! Use the function
``load_entry()`` to load data from file.
Parameters
----------
file_path: str
String (perferably meaningful and human readable) used to identify the data entry.
The value of this parameter is used to distinguish between calibration data
entries (identify duplicates, search for calibration entries etc.). If data is
loaded from file, then this parameter is naturally set to full path to the file
that holds calibration data entry.
calibration_data: dict
Calibration data entry: dictionary that satisfies schema ``_xrf_quant_fluor_schema``.
"""
# Do not load duplicates (distinguished by file name). Different file names may contain
# the same data, but this is totally up to the user. Loading duplicates should not
# disrupt the processing, since the user may choose the source of calibration data
# for each emission line.
if not any(file_path == _["file_path"] for _ in self.calibration_settings):
self.calibration_data.append(calibration_data)
settings = {}
settings['file_path'] = file_path
settings['element_lines'] = {}
for l in calibration_data['element_lines']:
settings["element_lines"][l] = {}
# Do not select the emission line
settings["element_lines"][l]["selected"] = False
self.calibration_settings.append(settings)
# This will also select the emission line if it occurs the first time
self.update_emission_line_list()
else:
logger.info(f"Calibration data file '{file_path}' is already loaded")
def remove_entry(self, file_path):
r"""
Remove QA calibration data entry identified by ``file_path`` from memory
(from object's internal list). Removes the entry if ``file_path`` is found, otherwise
prints error message (``logger.error``).
Parameters
----------
The string used to identify QA calibration data entry. See the docstring for
``add_entry`` for more detailed comments.
"""
n_item = self.find_entry_index(file_path)
if n_item is not None:
self.calibration_data.pop(n_item)
self.calibration_settings.pop(n_item)
self.update_emission_line_list()
else:
raise RuntimeError(f"Calibration data from source '{file_path}' is not found.")
def get_file_path_list(self):
r"""
Returns the list of file paths, which identify the available calibration data entries.
If calibration data entries are not directly loaded from files ``load_entry``,
but instead added using ``add_entry``, the strings may not represent actual
file paths, but instead be any strings used to uniquely identify calibration data entries.
Returns
-------
file_path_list: list(str)
List of strings that identify QA calibration data entries, may represent paths to
files in which calibration data entries are stored. Returns empty list if no
QA calibration data are loaded.
"""
file_path_list = []
for v in self.calibration_settings:
file_path_list.append(v["file_path"])
return file_path_list
def get_n_entries(self):
r"""
Returns the number of QA calibration data entries.
Returns
-------
The number of available calibration entries. If not calibration data was loaded or added,
then the returned value is 0.
"""
if not self.calibration_settings:
return 0
else:
return len(self.calibration_settings)
def find_entry_index(self, file_path):
r"""
Find the index of calibration data entry with the given file path. Returns ``None``
if ``file_path`` is not found.
Parameters
----------
file_path: str
The string used to identify QA calibration data entry. See the docstring for
``add_entry`` for more detailed comments.
"""
n_item = None
for n, v in enumerate(self.calibration_settings):
if v["file_path"] == file_path:
n_item = n
break
return n_item
def get_entry_text_preview(self, file_path):
r"""
Returns text (YAML) preview of the dictionary with QA calibration data entry.
Only original data (loaded from file) is printed.
Parameters
----------
file_path: str
The string used to identify QA calibration data entry. See the docstring for
``add_entry`` for more detailed comments.
Returns
-------
String wich contains text (YAML) representation of QA calibration data entry.
Empty string if the entry is not found.
"""
n_item = self.find_entry_index(file_path)
if n_item is not None:
s = yaml.dump(self.calibration_data[n_item], default_flow_style=False,
sort_keys=False, indent=4)
else:
s = ""
return s
def update_emission_line_list(self):
r"""
Update the internal list of emission lines. Also check and adjust if necessary
the selected (assigned) calibration data entry for each emission line.
This function is called by ``load_entry``, ``add_entry`` and ``remove_entry``
functions, since the set of emission lines is changed during those operations.
If manual changes are made to loaded calibration data entries, this function
has to be called again before any other operation is performed. Particularly,
it is recommended that the function is called after changing the selection of
calibration sources for emission lines.
"""
# The emission lines are arranged in the list in the order in which they appear
elines_all = set()
self.active_emission_lines = [] # Clear the list, it is recreated
for data in self.calibration_data:
for l in data["element_lines"].keys():
if l not in elines_all:
elines_all.add(l)
self.active_emission_lines.append(l)
# Make sure that emission line is selected only once
elines_selected = set() # We make sure that each emission line is selected
for settings in self.calibration_settings:
for k, v in settings["element_lines"].items():
if v["selected"]:
if k in elines_selected:
# If eline is in the set, then deselect it (it is selected twice)
v["selected"] = False
else:
# Add to the list
elines_selected.add(k)
elines_not_selected = elines_all - elines_selected
if elines_not_selected:
for settings in self.calibration_settings:
for k, v in settings["element_lines"].items():
if k in elines_not_selected:
v["selected"] = True
elines_not_selected.remove(k)
def get_eline_info_complete(self, eline):
r"""
Collects emission line data from each loaded calibration entry and return it as a list
of dictionaries. The size of the list determines the number of entries in which the
emission line is present. If the emission line is not present in any loaded calibration set,
then empty list is returned.
The returned representation of complete calibration data is used in GUI interface for
selection of calibration data sources.
Parameters
----------
eline: str
Emission line name. Must match the emission line names used in calibration data.
Typical format: ``Fe_K``, ``S_K`` etc.
Returns
-------
eline_info: list(dict)
List of dictionaries that hold calibration data for the | |
# Copyright (C) 2009 The MITRE Corporation. See the toplevel
# file LICENSE for license terms.
# Readers and writers for documents.
import sys, codecs, re
# I started out with cjson, but it turns out that cjson
# doesn't decode "\/" correctly. So I've switched to
# simplejson. simplejson also appears to do the right thing
# with Unicode.
from MAT import json
class NotImplementedError(StandardError):
pass
# Toplevel getter.
_IO_LIB = {}
def declareDocumentIO(name, cls, input, output):
global _IO_LIB
_IO_LIB[name] = cls, input, output
def getDocumentIO(name, **kw):
return _IO_LIB[name][0](**kw)
def getDocumentIOClass(name):
return _IO_LIB[name][0]
def getInputDocumentIOClass(name):
v = _IO_LIB[name]
if not v[1]:
raise KeyError
return v[0]
def getOutputDocumentIOClass(name):
v = _IO_LIB[name]
if not v[2]:
raise KeyError
return v[0]
def documentIODeclared(name):
return _IO_LIB.has_key(name)
def inputDocumentIODeclared(name):
return _IO_LIB.has_key(name) and _IO_LIB[name][1]
def outputDocumentIODeclared(name):
return _IO_LIB.has_key(name) and _IO_LIB[name][2]
def allDocumentIO(exclusions = None):
return [k for k in _IO_LIB.keys() if ((exclusions is None) or (k not in exclusions))]
def allInputDocumentIO(exclusions = None):
return [name for (name, (cls, input, output)) in _IO_LIB.items()
if input and ((exclusions is None) or (name not in exclusions))]
def allOutputDocumentIO(exclusions = None):
return [name for (name, (cls, input, output)) in _IO_LIB.items()
if output and ((exclusions is None) or (name not in exclusions))]
from MAT.Document import AnnotatedDoc, LoadError
class SaveError(Exception):
pass
#
# Base class.
#
from MAT.Operation import OptionBearer, OptionTemplate
import MAT.ExecutionContext
class DocumentIO(OptionBearer):
# Because of the way class methods interact with
# private variables, calling _ensureArgs() on the parent
# of this class just doesn't work.
@classmethod
def _ensureArgs(cls, **kw):
cls._consolidateArgs()
super(DocumentIO, cls)._ensureArgs(**kw)
@classmethod
def _consolidateArgs(cls):
if not hasattr(cls, "args"):
args = None
if hasattr(cls, "inputArgs") or hasattr(cls, "outputArgs"):
args = OptionTemplate(options = [])
if hasattr(cls, "inputArgs"):
args.options += cls.inputArgs.options
if hasattr(cls, "outputArgs"):
args.options += cls.outputArgs.options
cls.args = args
@classmethod
def addOptions(cls, aggregator, **kw):
cls._ensureArgs(**kw)
super(DocumentIO, cls).addOptions(aggregator, **kw)
@classmethod
def addInputOptions(cls, aggregator, **kw):
cls._ensureArgs(**kw)
super(DocumentIO, cls).addOptions(aggregator, subtype = "inputArgs", **kw)
@classmethod
def addOutputOptions(cls, aggregator, **kw):
cls._ensureArgs(**kw)
super(DocumentIO, cls).addOptions(aggregator, subtype = "outputArgs", **kw)
@classmethod
def findAll(cls, d = None, filters = None):
global _IO_LIB
if d is None: d = {}
if "inputArgs" in filters:
d.update(dict([(name, cls) for name, (cls, input, output) in _IO_LIB.items()
if input]))
elif "outputArgs" in filters:
d.update(dict([(name, cls) for name, (cls, input, output) in _IO_LIB.items()
if input]))
else:
d.update(dict([(name, cls) for name, (cls, input, output) in _IO_LIB.items()]))
if "excluderaw" in filters:
try:
del d["raw"]
except KeyError:
pass
return d
#
# All file-based IO modules.
#
# SAM 1/12/11: I'm going to add a capability to transform the document
# before it's written, or after it's read. This is to do things like
# promote annotations from ENAMEX type=X to X or other conversions.
# I'm going to do it on a document basis because I need to convert the
# annotation types, etc. This means that (a) the deserialize method
# must get a tag table, not the task, because the tag table might be
# different; and (b) the reader seed has to be handled carefully, because
# it shouldn't be polluted with the "incorrect" annotation structure, and
# (c) side effect changes in writers won't make any difference.
# The way you use the convertor is to make an instance of it when
# the file IO is created. This means that you have to register a separate
# file IO which is the combination of the convertor and the IO. This doesn't
# bother me; I don't have time or resources to promote the convertor to
# the command line, and that degree of flexibility isn't necessary anyway,
# because we'll be working with stable sources of data.
# Because we now need to copy the input to the output for inputConvert()
# we can no longer have a default null operator - it has to know that
# it's a null operator.
class DocumentConvertor:
def __init__(self):
self.annotationTypeRepository = self.createAnnotationTypeRepository()
def createAnnotationTypeRepository(self):
return None
def getInputTagTable(self, taskTagTable):
return taskTagTable
def deserializeAndConvert(self, docIO, s, seedDocument):
if hasattr(self, "inputConvert"):
d = MAT.Document.AnnotatedDoc(globalTypeRepository = self.annotationTypeRepository)
docIO.deserialize(s, d)
self.inputConvert(d, seedDocument)
# Make sure that the phasesDone are preserved. Other metadata
# we probably don't care about.
if d.metadata.has_key("phasesDone"):
# Is this right? Should it be a set union? I'm going
# to say no, for the moment.
seedDocument.metadata["phasesDone"] = d.metadata["phasesDone"][:]
else:
docIO.deserialize(s, seedDocument)
# No longer called for side effect. A seed document is now
# passed. The targetDoc will have the global repository
# if available.
# Not defined unless it's needed. Can't think of
# another way of indicating that it's a no-op.
# def inputConvert(self, sourceDoc, targetDoc)
# Changes by side effect.
def perhapsConvert(self, annotDoc):
if hasattr(self, "outputConvert"):
d = MAT.Document.AnnotatedDoc(globalTypeRepository = self.annotationTypeRepository)
# Make sure that the phasesDone are preserved. Other metadata
# we probably don't care about.
self.outputConvert(annotDoc, d)
if annotDoc.metadata.has_key("phasesDone"):
d.metadata["phasesDone"] = annotDoc.metadata["phasesDone"][:]
return d
else:
return annotDoc
# def outputConvert(self, annotDoc, targetDoc)
#
# You should be able to register a convertor for a given task.
#
# This can only live on children of DocumentFileIO.
class RegistrationError(Exception):
pass
_FILEIO_CONVERTORS = {}
def registerConvertorInstructionSet(ioName, taskName, cls = None, file = None, xml = None):
global _FILEIO_CONVERTORS
iocls = getDocumentIOClass(ioName)
if not issubclass(iocls, DocumentFileIO):
raise RegistrationError, "convertors can be registered only on DocumentFileIO classes"
if not (cls or file or xml):
raise RegistrationError, "none of cls or file or xml are provided"
if ((cls and file) or (cls and xml) or (xml and file)):
raise RegistrationError, "only one of cls or file or xml may be provided"
# What goes in here is a function which returns an instruction set engine.
if cls:
if not issubclass(cls, DocumentInstructionSetEngine):
raise RegistrationError, "cls must be a subclass of DocumentInstructionSetEngine"
_FILEIO_CONVERTORS[(iocls, taskName)] = cls
elif file:
if not os.path.isabs(file):
raise RegistrationError, "file must be an absolute pathname"
_FILEIO_CONVERTORS[(iocls, taskName)] = lambda: DocumentInstructionSetEngine(instructionSetFile = file)
else:
_FILEIO_CONVERTORS[(iocls, taskName)] = lambda: DocumentInstructionSetEngine(instructionSetXML = xml)
# And now, the convertible class itself.
class DocumentFileIO(DocumentIO):
def __init__(self, encoding = None, task = None, convertor = None, **kw):
# The command line tool may provide an explicit None
# as an argument, which is almost never a good thing
if encoding is None:
encoding = 'ascii'
self.encoding = encoding
self.truncateOnUpdate = True
self.task = task
self.convertor = convertor
if (convertor is None) and task:
instructionSetEngineFactory = _FILEIO_CONVERTORS.get((self.__class__, task.name))
if instructionSetEngineFactory is not None:
isEngine = instructionSetEngineFactory()
self.convertor = DocumentInstructionSetEngineConvertor(isEngine)
#
# Reading
#
# We can read from a byte sequence, or from a Unicode string,
# or from a file.
def readFromSource(self, source, sourceName = None, **kw):
closeIt = False
if hasattr(source, "readline"):
# Already a file pointer. If it's been
# open with codecs.open, or it's sys.stdin, it'll have
# an encoding set. But sys.stdin only reads byte sequences,
# not Unicode strings, unlike codecs.open.
fp = source
elif source == "-":
fp = sys.stdin
else:
fp = codecs.open(source, "r", self.encoding)
closeIt = True
try:
if fp is sys.stdin:
# read from a byte stream, but it's got an encoding.
annotDoc = self.readFromByteSequence(fp.read(), encoding = fp.encoding, **kw)
elif fp.encoding is not None:
annotDoc = self.readFromUnicodeString(fp.read(), **kw)
else:
annotDoc = self.readFromByteSequence(fp.read(), **kw)
if closeIt:
fp.close()
except Exception, e:
if True or MAT.ExecutionContext._DEBUG:
raise
else:
if isinstance(e, UnicodeDecodeError):
eStr = self._unicodeErrorString(e, fp.encoding or self.encoding)
else:
eStr = str(e)
raise LoadError, "Error loading from source " + str(source) + ": " + eStr
return annotDoc
# Internal function.
def _unicodeErrorString(self, e, curEncoding):
cands = set(["ascii", "latin1", "utf-8", "windows-1252"])
if curEncoding:
cands.discard(curEncoding)
return "The document doesn't appear to be in the expected encoding (%s). The offending character sequence starts at character %d and ends at character %d. Try a different encoding (for English, other likely candidates are %s)." % (curEncoding, e.start, e.end, ", ".join(cands))
def readFromByteSequence(self, s, encoding = None, **kw):
if type(s) is type(u''):
return self.readFromUnicodeString(s, **kw)
else:
try:
return self.readFromUnicodeString(s.decode(encoding or self.encoding), **kw)
except UnicodeDecodeError, e:
# Catch this here as well as in readFromSource(), just in case.
raise LoadError, self._unicodeErrorString(e, encoding or self.encoding)
# As a consequence of the new convertors, it will not be permitted
# to | |
<filename>src/api/tests/test_teams.py
from django.test import TestCase, Client
from django.conf import settings as django_settings
from urllib.parse import urlencode
from _main_.settings import BASE_DIR
from _main_.utils.massenergize_response import MassenergizeResponse
from database.models import Team, Community, UserProfile, Action, UserActionRel, TeamMember, RealEstateUnit, CommunityAdminGroup
from carbon_calculator.models import Action as CCAction
from _main_.utils.utils import load_json
from api.tests.common import signinAs, setupCC, createUsers
class TeamsTestCase(TestCase):
@classmethod
def setUpClass(self):
print("\n---> Testing Teams <---\n")
self.client = Client()
self.USER, self.CADMIN, self.SADMIN = createUsers()
signinAs(self.client, self.SADMIN)
setupCC(self.client)
COMMUNITY_NAME = "test_teams"
self.COMMUNITY = Community.objects.create(**{
'subdomain': COMMUNITY_NAME,
'name': COMMUNITY_NAME.capitalize(),
'accepted_terms_and_conditions': True
})
self.COMMUNITY2 = Community.objects.create(**{
'subdomain': "community2",
'name': "community2",
'accepted_terms_and_conditions': True
})
admin_group_name = f"{self.COMMUNITY.name}-{self.COMMUNITY.subdomain}-Admin-Group"
self.COMMUNITY_ADMIN_GROUP = CommunityAdminGroup.objects.create(name=admin_group_name, community=self.COMMUNITY)
self.COMMUNITY_ADMIN_GROUP.members.add(self.CADMIN)
self.USER1 = UserProfile.objects.create(**{
'full_name': "<NAME>",
'email': '<EMAIL>',
'accepts_terms_and_conditions': True
})
self.USER2 = UserProfile.objects.create(**{
'full_name': "<NAME>",
'email': '<EMAIL>',
'accepts_terms_and_conditions': True
})
self.USER3 = UserProfile.objects.create(**{
'full_name': "<NAME>",
'email': '<EMAIL>',
'accepts_terms_and_conditions': True
})
self.USER4 = UserProfile.objects.create(**{
'full_name': "<NAME>",
'email': '<EMAIL>',
'accepts_terms_and_conditions': True
})
self.USER5 = UserProfile.objects.create(**{
'full_name': "oweeen",
'preferred_name': "<NAME>",
'email': '<EMAIL>',
'accepts_terms_and_conditions': False
})
self.TEAM1 = Team.objects.create(primary_community=self.COMMUNITY, name="Les Montréalais", is_published=True)
self.TEAM1.communities.add(self.COMMUNITY)
self.TEAM2 = Team.objects.create(primary_community=self.COMMUNITY, name="McGill CS Students")
self.TEAM2.communities.add(self.COMMUNITY)
self.TEAM3 = Team.objects.create(primary_community=self.COMMUNITY, name="monkey")
self.TEAM3.communities.add(self.COMMUNITY)
self.TEAM4 = Team.objects.create(primary_community=self.COMMUNITY, name="kindred")
self.TEAM4.communities.add(self.COMMUNITY)
self.TEAM5 = Team.objects.create(primary_community=self.COMMUNITY, name="testing teams!")
self.TEAM5.communities.add(self.COMMUNITY)
self.ADMIN1 = TeamMember(team=self.TEAM1, user=self.USER1)
self.ADMIN1.is_admin = True
self.ADMIN1.save()
self.ADMIN2 = TeamMember(team=self.TEAM2, user=self.USER2)
self.ADMIN2.is_admin = True
self.ADMIN2.save()
self.ADMIN5 = TeamMember(team=self.TEAM5, user=self.USER2)
self.ADMIN5.is_admin = True
self.ADMIN5.save()
TeamMember(team=self.TEAM5, user=self.USER5).save() # add a user who didn't accept TOC
self.TEAM1.save()
self.TEAM2.save()
self.TEAM3.save()
self.TEAM4.save()
self.TEAM5.save()
@classmethod
def tearDownClass(self):
pass
def setUp(self):
# this gets run on every test case
pass
def test_info(self):
print("test_info")
# first test for no user signed in
signinAs(self.client, None)
# successfully retrieve information about a team that has been published
info_response = self.client.post('/api/teams.info', urlencode({"team_id": self.TEAM1.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(info_response["success"])
self.assertEqual(self.TEAM1.name, info_response['data']['name'])
# don't retrieve information about a team that has not been published
info_response = self.client.post('/api/teams.info', urlencode({"team_id": self.TEAM2.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(info_response["success"])
signinAs(self.client, self.USER)
# don't retrieve information about a team that has not been published
info_response = self.client.post('/api/teams.info', urlencode({"team_id": self.TEAM2.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(info_response["success"])
# test for Community Admin
signinAs(self.client, self.CADMIN)
# retrieve information about an unpublished team if you;'re a cadmin
info_response = self.client.post('/api/teams.info', urlencode({"team_id": self.TEAM2.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(info_response["success"])
# if no ID passed, return error
info_response = self.client.post('/api/teams.info', urlencode({}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(info_response["success"])
def test_create(self): # same as add
# attempt to create team if not signed in
signinAs(self.client, None)
name = "<NAME>"
create_response = self.client.post('/api/teams.create', urlencode({"name": name, "primary_community_id": self.COMMUNITY.id,
"communities": str(self.COMMUNITY.id), "admin_emails": self.USER1.email}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(create_response["success"])
# attempt to create team if regular user signed in
signinAs(self.client, self.USER1)
create_response = self.client.post('/api/teams.create', urlencode({"name": name, "primary_community_id": self.COMMUNITY.id,
"communities": str(self.COMMUNITY.id), "admin_emails": self.USER1.email}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(create_response["success"])
# attempt to create team spanning 2 communities
name = "<NAME>"
communities = str(self.COMMUNITY.id) + ',' + str(self.COMMUNITY2.id)
create_response = self.client.post('/api/teams.create', urlencode({"name": name, "community_id": self.COMMUNITY.id,
"communities": communities, "admin_emails": self.USER1.email}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(create_response["success"])
self.assertEqual(name, create_response['data']['name'])
# TODO: doesn't test providing no community id in order to list the teams for the user only
# TODO: test published/unpublished teams
def test_list(self):
print("test_list")
signinAs(self.client, self.USER1)
list_response = self.client.post('/api/teams.list', urlencode({"community_id": self.COMMUNITY.id, "user_id": self.USER1.id}), content_type="application/x-www-form-urlencoded").toDict()
# only one team approved
self.assertTrue(list_response["success"])
self.assertIs(1, len(list_response['data']))
self.assertEqual(self.TEAM1.name, list_response['data'][0]['name'])
# TODO: doesn't test households or actions todo
def test_stats(self):
cca1 = CCAction.objects.create(name="CC Action 1", average_points=50, questions="foo")
cca2 = CCAction.objects.create(name="CC Action 2", average_points=100, questions="bar")
action1 = Action.objects.create(calculator_action=cca1)
action2 = Action.objects.create(calculator_action=cca2)
reu = RealEstateUnit.objects.create(name="Josh's House", unit_type="RESIDENTIAL")
UserActionRel.objects.create(user=self.USER1, action=action1, status="DONE", real_estate_unit=reu)
UserActionRel.objects.create(user=self.USER2, action=action1, status="DONE", real_estate_unit=reu)
UserActionRel.objects.create(user=self.USER2, action=action2, status="DONE", real_estate_unit=reu)
stats_response = self.client.post('/api/teams.stats', urlencode({"community_id": self.COMMUNITY.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(stats_response["success"])
self.assertIs(1, len(stats_response['data']))
self.TEAM2.is_published = True
self.TEAM2.save()
stats_response = self.client.post('/api/teams.stats', urlencode({"community_id": self.COMMUNITY.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(stats_response["success"])
self.assertIs(2, len(stats_response['data']))
team1stats, team2stats = stats_response['data'][0], stats_response['data'][1]
self.assertEqual(self.TEAM1.name, team1stats['team']['name'])
self.assertEqual(self.TEAM2.name, team2stats['team']['name'])
self.assertIs(1, team1stats['members'])
self.assertIs(1, team2stats['members'])
self.assertIs(1, team1stats['actions_completed'])
self.assertIs(2, team2stats['actions_completed'])
self.assertIs(50, team1stats['carbon_footprint_reduction'])
self.assertIs(150, team2stats['carbon_footprint_reduction'])
self.TEAM2.is_published = False
self.TEAM2.save()
def test_update(self):
print('\ntest_update')
# try to update the community without being signed in
signinAs(self.client, None)
new_name = "QAnon followers"
update_response = self.client.post('/api/teams.update', urlencode({"id": self.TEAM1.id, "name": new_name}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(update_response["success"])
# try to update the community signed in but not a team or community admin
signinAs(self.client, self.USER)
new_name = "Isolationists"
update_response = self.client.post('/api/teams.update', urlencode({"id": self.TEAM1.id, "name": new_name}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(update_response["success"])
# try to update the community signed as team admin
signinAs(self.client, self.USER1)
new_name = "Isolationists"
new_tagline = "Team dealing with covid"
update_response = self.client.post('/api/teams.update', urlencode({"id": self.TEAM1.id, "name": new_name, "tagline": new_tagline}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(update_response["success"])
self.assertEqual(new_name, update_response["data"]["name"])
self.assertEqual(new_tagline, update_response["data"]["tagline"])
#update the team as a CADMIN of the correct community
signinAs(self.client, self.CADMIN)
new_name = "Arlingtonians"
update_response = self.client.post('/api/teams.update', urlencode({"id": self.TEAM1.id, "name": new_name, "parent": self.TEAM2.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(update_response["success"])
self.assertEqual(new_name, update_response["data"]["name"])
self.assertEqual(self.TEAM1.primary_community.id, update_response["data"]["primary_community"]["id"])
self.assertEqual(self.TEAM2.id, update_response["data"]["parent"]["id"])
#update the team as a CADMIN of the correct community, adding another community to the team
communities = str(self.COMMUNITY.id) + "," + str(self.COMMUNITY2.id)
update_response = self.client.post('/api/teams.update', urlencode({"id": self.TEAM1.id, "name": new_name, "parent": self.TEAM2.id, "communities":communities}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(update_response["success"])
self.assertEqual(new_name, update_response["data"]["name"])
self.assertEqual(self.TEAM1.primary_community.id, update_response["data"]["primary_community"]["id"])
self.assertEqual(self.TEAM2.id, update_response["data"]["parent"]["id"])
#update the team as a SADMIN
signinAs(self.client, self.SADMIN)
new_name = "<NAME>"
update_response = self.client.post('/api/teams.update', urlencode({"id": self.TEAM1.id, "name": new_name}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(update_response["success"])
self.assertEqual(new_name, update_response["data"]["name"])
# TODO: figure out what the expected return behaviour is for the delete route
def test_delete(self): # same as remove
# teams in this function will only be used here and deleted after so create teams in this function?
# test can sadmin delete team
signinAs(self.client, self.SADMIN)
team = Team.objects.create(primary_community=self.COMMUNITY, name="sadmin_test_team", is_published=True)
delete_response = self.client.post('/api/teams.delete', urlencode({"team_id": team.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(delete_response["success"])
#self.assertTrue(delete_response["data"]["is_deleted"])
# test can cadmin delete team
signinAs(self.client, self.CADMIN)
team = Team.objects.create(primary_community=self.COMMUNITY, name="cadmin_test_team", is_published=True)
delete_response1 = self.client.post('/api/teams.delete', urlencode({"team_id": team.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(delete_response1["success"])
#self.assertTrue(delete_response1["data"]["is_deleted"])
team = Team.objects.create(primary_community=self.COMMUNITY, name="cadmin_test_team", is_published=True)
delete_response2 = self.client.post('/api/teams.delete', urlencode({"id": team.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(delete_response2["success"])
#self.assertTrue(delete_response2["data"]["is_deleted"])
# test can user delete team
signinAs(self.client, self.USER)
team = Team.objects.create(primary_community=self.COMMUNITY, name="user_test_team", is_published=True)
delete_response = self.client.post('/api/teams.delete', urlencode({"team_id": team.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(delete_response["success"])
#self.assertFalse(delete_response["data"]["is_deleted"])
# test can no logged in delete team
signinAs(self.client, None)
team = Team.objects.create(primary_community=self.COMMUNITY, name="none_test_team", is_published=True)
delete_response = self.client.post('/api/teams.delete', urlencode({"team_id": team.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(delete_response["success"])
#self.assertFalse(delete_response["data"]["is_deleted"])
def test_leave(self):
# test leave not logged in
signinAs(self.client, None)
leave_response = self.client.post('/api/teams.leave', urlencode({"team_id": self.TEAM1.id, "user_id": self.USER1.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(leave_response["success"])
# test leave logged as different user
signinAs(self.client, self.USER)
leave_response = self.client.post('/api/teams.leave', urlencode({"team_id": self.TEAM1.id, "user_id": self.USER1.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(leave_response["success"])
# test leave user not in team
signinAs(self.client, self.USER1)
leave_response = self.client.post('/api/teams.leave', urlencode({"team_id": self.TEAM2.id, "user_id": self.USER1.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(leave_response["success"])
# test leave logged as admin
signinAs(self.client, self.SADMIN)
leave_response = self.client.post('/api/teams.leave', urlencode({"team_id": self.TEAM2.id, "user_id": self.USER2.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(leave_response["success"])
# get team members
members_response = self.client.post('/api/teams.members', urlencode({"team_id": self.TEAM1.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(members_response["success"])
members_num = len(members_response["data"])
# test leave logged as same user
signinAs(self.client, self.USER1)
leave_response = self.client.post('/api/teams.leave', urlencode({"team_id": self.TEAM1.id, "user_id": self.USER1.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(leave_response["success"])
# test user actually left
signinAs(self.client, self.SADMIN)
members_response = self.client.post('/api/teams.members', urlencode({"team_id": self.TEAM1.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(members_response["success"])
self.assertTrue(len(members_response["data"]) < members_num)
def test_removeMember(self):
# test remove member not logged in
signinAs(self.client, None)
remove_response = self.client.post('/api/teams.removeMember', urlencode({"team_id": self.TEAM2.id, "user_id": self.USER2.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(remove_response["success"])
# test remove member as normal user
signinAs(self.client, self.USER)
remove_response = self.client.post('/api/teams.removeMember', urlencode({"team_id": self.TEAM2.id, "user_id": self.USER2.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(remove_response["success"])
# test remove member as admin but member not in team
signinAs(self.client, self.SADMIN)
remove_response = self.client.post('/api/teams.removeMember', urlencode({"team_id": self.TEAM2.id, "user_id": self.USER1.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(remove_response["success"])
# check members in team
members_response = self.client.post('/api/teams.members', urlencode({"team_id": self.TEAM2.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(members_response["success"])
members_num = len(members_response["data"])
# test remove member as admin
remove_response = self.client.post('/api/teams.removeMember', urlencode({"team_id": self.TEAM4.id, "user_id": self.USER4.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(remove_response["success"])
# check member is no longer in team
members_response = self.client.post('/api/teams.members', urlencode({"team_id": self.TEAM4.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(members_response["success"])
self.assertTrue(len(members_response["data"]) < members_num)
def test_join(self): # same as addMember
# test join not signed in
signinAs(self.client, None)
join_response = self.client.post('/api/teams.join', urlencode({"team_id": self.TEAM3.id, "user_id": self.USER3.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(join_response["success"])
# test join as different user
signinAs(self.client, self.USER)
join_response = self.client.post('/api/teams.join', urlencode({"team_id": self.TEAM3.id, "user_id": self.USER3.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(join_response["success"])
# test join as different admin user
signinAs(self.client, self.SADMIN)
join_response = self.client.post('/api/teams.join', urlencode({"team_id": self.TEAM3.id, "user_id": self.USER3.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(join_response["success"])
# check that users not in team
members_response = self.client.post('/api/teams.members', urlencode({"team_id": self.TEAM3.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(members_response["success"])
members_num = len(members_response["data"])
# test join as same user
signinAs(self.client, self.USER3)
join_response = self.client.post('/api/teams.join', urlencode({"team_id": self.TEAM3.id, "user_id": self.USER3.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(join_response["success"])
# check that users now in team
signinAs(self.client, self.SADMIN)
members_response = self.client.post('/api/teams.members', urlencode({"team_id": self.TEAM3.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertTrue(members_response["success"])
self.assertTrue(len(members_response["data"]) > members_num)
def test_addMember(self):
# test add member not signed in
signinAs(self.client, None)
add_response = self.client.post('/api/teams.addMember', urlencode({"team_id": self.TEAM4.id, "user_id": self.USER4.id}), content_type="application/x-www-form-urlencoded").toDict()
self.assertFalse(add_response["success"])
# test add member as | |
from gym import spaces
import numpy as np
class Building:
def __init__(self, buildingId, dhw_storage = None, cooling_storage = None, heating_storage = None, electrical_storage = None, dhw_heating_device = None, hvac_device = None, save_memory = True):
"""
Args:
buildingId (int)
dhw_storage (EnergyStorage)
cooling_storage (EnergyStorage)
heating_storage (EnergyStorage)
electrical_storage (Battery)
dhw_heating_device (ElectricHeater or HeatPump)
hvac_device (HeatPump)
"""
# Building attributes
self.building_type = None
self.climate_zone = None
self.solar_power_capacity = None
self.buildingId = buildingId
self.dhw_storage = dhw_storage
self.cooling_storage = cooling_storage
self.heating_storage = heating_storage
self.electrical_storage = electrical_storage
self.dhw_heating_device = dhw_heating_device
self.hvac_device = hvac_device
self.observation_space = None
self.action_space = None
self.time_step = 0
self.sim_results = {}
self.save_memory = save_memory
if self.dhw_storage is not None:
self.dhw_storage.reset()
if self.cooling_storage is not None:
self.cooling_storage.reset()
if self.heating_storage is not None:
self.heating_storage.reset()
if self.electrical_storage is not None:
self.electrical_storage.reset()
if self.dhw_heating_device is not None:
self.dhw_heating_device.reset()
if self.hvac_device is not None:
self.hvac_device.reset()
self._electric_consumption_cooling_storage = 0.0
self._electric_consumption_heating_storage = 0.0
self._electric_consumption_dhw_storage = 0.0
self.cooling_demand_building = []
self.heating_demand_building = []
self.dhw_demand_building = []
self.electric_consumption_appliances = []
self.electric_generation = []
self.electric_consumption_cooling = []
self.electric_consumption_heating = []
self.electric_consumption_cooling_storage = []
self.electric_consumption_heating_storage = []
self.electric_consumption_dhw = []
self.electric_consumption_dhw_storage = []
self.net_electric_consumption = []
self.net_electric_consumption_no_storage = []
self.net_electric_consumption_no_pv_no_storage = []
self.hvac_device_to_building = []
self.cooling_storage_to_building = []
self.heating_storage_to_building = []
self.hvac_device_to_cooling_storage = []
self.hvac_device_to_heating_storage = []
self.cooling_storage_soc = []
self.heating_storage_soc = []
self.dhw_heating_device_to_building = []
self.dhw_storage_to_building = []
self.dhw_heating_device_to_storage = []
self.dhw_storage_soc = []
self.electrical_storage_electric_consumption = []
self.electrical_storage_soc = []
self.__validate_energy_models()
def set_state_space(self, high_state, low_state):
# Setting the state space and the lower and upper bounds of each state-variable
self.observation_space = spaces.Box(low=low_state, high=high_state, dtype=np.float32)
def set_action_space(self, max_action, min_action):
# Setting the action space and the lower and upper bounds of each action-variable
self.action_space = spaces.Box(low=min_action, high=max_action, dtype=np.float32)
def set_storage_electrical(self, action):
"""
Args:
action (float): Amount of heating energy stored (added) in that time-step as a ratio of the maximum capacity of the energy storage device.
-1 =< action < 0 : Energy Storage Unit releases energy into the building and its State of Charge decreases
0 < action <= 1 : Energy Storage Unit receives energy from the energy supply device and its State of Charge increases
The actions are always subject to the constraints of the power capacity of the heating supply unit, the DHW demand of the
building (which limits the maximum amount of DHW that the energy storage can provide to the building), and the state of charge of the
energy storage unit itself
Return:
elec_demand_heating (float): electricity consumption needed for space heating and heating storage
"""
electrical_energy_balance = self.electrical_storage.charge(action*self.electrical_storage.capacity)
if self.save_memory == False:
self.electrical_storage_electric_consumption.append(electrical_energy_balance)
self.electrical_storage_soc.append(self.electrical_storage._soc)
self.electrical_storage.time_step += 1
return electrical_energy_balance
def set_storage_dhw(self, action):
"""
Args:
action (float): Amount of heating energy stored (added) in that time-step as a ratio of the maximum capacity of the energy storage device.
-1 =< action < 0 : Energy Storage Unit releases energy into the building and its State of Charge decreases
0 < action <= 1 : Energy Storage Unit receives energy from the energy supply device and its State of Charge increases
The actions are always subject to the constraints of the power capacity of the heating supply unit, the DHW demand of the
building (which limits the maximum amount of DHW that the energy storage can provide to the building), and the state of charge of the
energy storage unit itself
Return:
elec_demand_heating (float): electricity consumption needed for space heating and heating storage
"""
# Heating power that could be possible to supply to the storage device to increase its State of Charge once the heating demand of the building has been satisfied
heat_power_avail = self.dhw_heating_device.get_max_heating_power() - self.sim_results['dhw_demand'][self.time_step]
# The storage device is charged (action > 0) or discharged (action < 0) taking into account the max power available and that the storage device cannot be discharged by an amount of energy greater than the energy demand of the building.
heating_energy_balance = self.dhw_storage.charge(max(-self.sim_results['dhw_demand'][self.time_step], min(heat_power_avail, action*self.dhw_storage.capacity)))
if self.save_memory == False:
self.dhw_heating_device_to_storage.append(max(0, heating_energy_balance))
self.dhw_storage_to_building.append(-min(0, heating_energy_balance))
self.dhw_heating_device_to_building.append(self.sim_results['dhw_demand'][self.time_step] + min(0, heating_energy_balance))
self.dhw_storage_soc.append(self.dhw_storage._soc)
# The energy that the energy supply device must provide is the sum of the energy balance of the storage unit (how much net energy it will lose or get) plus the energy supplied to the building. A constraint is added to guarantee it's always positive.
heating_energy_balance = max(0, heating_energy_balance + self.sim_results['dhw_demand'][self.time_step])
# Electricity consumed by the energy supply unit
elec_demand_heating = self.dhw_heating_device.set_total_electric_consumption_heating(heat_supply = heating_energy_balance)
# Electricity consumption used (if +) or saved (if -) due to the change in the state of charge of the energy storage device
self._electric_consumption_dhw_storage = elec_demand_heating - self.dhw_heating_device.get_electric_consumption_heating(heat_supply = self.sim_results['dhw_demand'][self.time_step])
if self.save_memory == False:
self.electric_consumption_dhw.append(elec_demand_heating)
self.electric_consumption_dhw_storage.append(self._electric_consumption_dhw_storage)
self.dhw_heating_device.time_step += 1
return elec_demand_heating
def set_storage_cooling_and_heating(self, action_cooling, action_heating):
elec_demand_cooling = self.__set_storage_cooling(action_cooling)
elec_demand_heating = self.__set_storage_heating(action_heating)
self.hvac_device.time_step += 1
return elec_demand_cooling, elec_demand_heating
def __set_storage_cooling(self, action):
"""
Args:
action (float): Amount of cooling energy stored (added) in that time-step as a ratio of the maximum capacity of the energy storage device.
1 =< action < 0 : Energy Storage Unit releases energy into the building and its State of Charge decreases
0 < action <= -1 : Energy Storage Unit receives energy from the energy supply device and its State of Charge increases
The actions are always subject to the constraints of the power capacity of the cooling supply unit, the cooling demand of the
building (which limits the maximum amount of cooling energy that the energy storage can provide to the building), and the state of charge of the energy storage unit itself
Return:
elec_demand_cooling (float): electricity consumption needed for space cooling and cooling storage
"""
# Cooling power that could be possible to supply to the storage device to increase its State of Charge once the heating demand of the building has been satisfied
cooling_power_avail = self.hvac_device.get_max_cooling_power() - self.sim_results['cooling_demand'][self.time_step]
# The storage device is charged (action > 0) or discharged (action < 0) taking into account the max power available and that the storage device cannot be discharged by an amount of energy greater than the energy demand of the building.
cooling_energy_balance = self.cooling_storage.charge(max(-self.sim_results['cooling_demand'][self.time_step], min(cooling_power_avail, action*self.cooling_storage.capacity)))
if self.save_memory == False:
self.hvac_device_to_cooling_storage.append(max(0, cooling_energy_balance))
self.cooling_storage_to_building.append(-min(0, cooling_energy_balance))
self.hvac_device_to_building.append(self.sim_results['cooling_demand'][self.time_step] + min(0, cooling_energy_balance))
self.cooling_storage_soc.append(self.cooling_storage._soc)
# The energy that the energy supply device must provide is the sum of the energy balance of the storage unit (how much net energy it will lose or get) plus the energy supplied to the building. A constraint is added to guarantee it's always positive.
cooling_energy_balance = max(0, cooling_energy_balance + self.sim_results['cooling_demand'][self.time_step])
# Electricity consumed by the energy supply unit
elec_demand_cooling = self.hvac_device.set_total_electric_consumption_cooling(cooling_supply = cooling_energy_balance)
# Electricity consumption used (if +) or saved (if -) due to the change in the state of charge of the energy storage device
self._electric_consumption_cooling_storage = elec_demand_cooling - self.hvac_device.get_electric_consumption_cooling(cooling_supply = self.sim_results['cooling_demand'][self.time_step])
if self.save_memory == False:
self.electric_consumption_cooling.append(np.float32(elec_demand_cooling))
self.electric_consumption_cooling_storage.append(np.float32(self._electric_consumption_cooling_storage))
return elec_demand_cooling
def __set_storage_heating(self, action):
"""
Args:
action (float): Amount of heating energy stored (added) in that time-step as a ratio of the maximum capacity of the energy storage device.
1 =< action < 0 : Energy Storage Unit releases energy into the building and its State of Charge decreases
0 < action <= -1 : Energy Storage Unit receives energy from the energy supply device and its State of Charge increases
The actions are always subject to the constraints of the power capacity of the cooling supply unit, the heating demand of the
building (which limits the maximum amount of heating energy that the energy storage can provide to the building), and the state of charge of the energy storage unit itself
Return:
elec_demand_heating (float): electricity consumption needed for space heating and heating storage
"""
# Heating power that | |
# -*- coding: utf-8 -*-
#
#
# Brainbow segmentation Python XTension
#
# <CustomTools>
# <Menu name = "Brainbow plugins">
# <Item name="Brainbow PCA segmentation (Three component selection)" icon="Python" tooltip="Brainbow segmentation based on PCA.">
# <Command>PythonXT::XTBB(%i)</Command>
# </Item>
# </Menu>
# </CustomTools>
#
#
# Copyright (c) 2016 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
from __future__ import print_function
import matplotlib as mpl
mpl.use('TkAgg')
from mpl_toolkits.mplot3d import Axes3D
import nativebb
import BBDialog
import Tkinter as tk
import tkFileDialog
import ttk
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
import matplotlib.patheffects
import matplotlib.patches as patches
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import sys, traceback
import line_editor
import libpat
import time
import ast
import cPickle
import ImarisLib
import BridgeLib
from PIL import Image
import libatrous
import hotswap
DEBUG = True
TWOBYTWO = 0
THREEDEE = 1
H3 = 2
V3 = 3
def onHotswap():
"""
Hotswap function
The hotswap function
"""
t = time.localtime(time.time())
st = time.strftime("Hotswap: %Y-%m-%d %H:%M:%S", t)
print(st)
###########################################################################
## Main application module
###########################################################################
class MyModule:
def __init__(self,vImaris):
self.vImaris = vImaris
#Use a clone
print("Making a clone...")
#self.vDataSet = vImaris.GetDataSet()
self.vDataSet = vImaris.GetDataSet().Clone()
print(" done!")
self.interactor_c12 = None
self.interactor_c13 = None
self.interactor_c32 = None
#self.layout = THREEDEE
self.layout = TWOBYTWO
self.selection = None
self.ssize = 40000
self.nbit = 16
self.maxval = 65535
nx = self.vDataSet.GetSizeX()
ny = self.vDataSet.GetSizeY()
nz = self.vDataSet.GetSizeZ()
nc = self.vDataSet.GetSizeC()
self.Dialog=BBDialog.BBDialog(nsel=2)
self.Dialog.set_icon(BridgeLib.GetIcon())
self.Dialog.SetDefaults()
self.Dialog.DoImport = self.DoImport
self.Dialog.DoExport = self.DoExport
self.Dialog.ctrl_r.config(from_=1, to=nc)
self.Dialog.ctrl_g.config(from_=1, to=nc)
self.Dialog.ctrl_b.config(from_=1, to=nc)
self.Dialog.scale_bt.bind("<ButtonRelease-1>", self.do_release)
self.Dialog.scale_wt.bind("<ButtonRelease-1>", self.do_release)
self.Dialog.scale_w.bind("<ButtonRelease-1>", self.do_release)
#Setting the grid resolution
resx = float(self.vDataSet.GetExtendMaxX()) / nx
resy = float(self.vDataSet.GetExtendMaxY()) / ny
resz = float(self.vDataSet.GetExtendMaxZ()) / nz
libatrous.set_grid(resx, resy, resz)
#For now, only save the parameters for the current channel. When changing channel, this data is erased.
self.arrayvar_last = None
self.sel0_comp = (0,1)
self.sel1_comp = (0,2)
self.sel2_comp = (2,1)
self.handle_3d = None
self.line3d = None
self.scat_12 = None
self.scat_13 = None
self.scat_32 = None
#This is where we keep rgb...
self.rgb = np.zeros((nx*ny*nz,3),BridgeLib.GetType(self.vDataSet))
fig = self.Dialog.figure
if self.layout == THREEDEE:
self.ax1 = ax1 = fig.add_subplot(2, 2, 1, aspect='equal')
self.ax2 = ax2 = fig.add_subplot(2, 2, 2, aspect='equal')
self.ax3 = ax3 = fig.add_subplot(2, 2, 3, projection='3d')
self.ax4 = ax4 = fig.add_subplot(2, 2, 4, aspect='equal')
elif self.layout == TWOBYTWO:
self.ax2 = ax2 = fig.add_subplot(2, 2, 1, aspect='equal') #13
self.ax4 = ax4 = fig.add_subplot(2, 2, 2, aspect='equal') #image
self.ax1 = ax1 = fig.add_subplot(2, 2, 3, aspect='equal') #12
self.ax3 = ax3 = fig.add_subplot(2, 2, 4, aspect='equal') #32
elif self.layout == V3:
self.ax3 = None
self.ax1 = ax1 = fig.add_subplot(3, 1, 1, aspect='equal')
self.ax2 = ax2 = fig.add_subplot(3, 1, 2, aspect='equal')
self.ax4 = ax4 = fig.add_subplot(3, 1, 3, aspect='equal')
else: #if self.layout == H3:
self.ax3 = None
self.ax1 = ax1 = fig.add_subplot(1, 3, 1, aspect='equal')
self.ax2 = ax2 = fig.add_subplot(1, 3, 2, aspect='equal')
self.ax4 = ax4 = fig.add_subplot(1, 3, 3, aspect='equal')
self.InitDialog()
#self.DoImport()
self.Dialog.mainloop()
def InitDialog(self):
#Build the dialog
#build the figure
nc = self.vDataSet.GetSizeC()
#fig = self.Dialog.figure
#plt.show()
#self.Dialog.ExitOK = self.ExitOK
#self.Dialog.ExitCancel = self.ExitCancel
self.Dialog.Update = self.Update
#self.UpdateObjects()
def DoImport(self,pca=True,newset=True,handle=None):
arrayvar = self.Dialog.arrayvar
is_unit = arrayvar["check_unit"] == "on"
cr = int(self.Dialog.arrayvar["channel_r"])-1
cg = int(self.Dialog.arrayvar["channel_g"])-1
cb = int(self.Dialog.arrayvar["channel_b"])-1
nz = self.vDataSet.GetSizeZ()
rgb = []
self.Dialog.config(cursor="wait")
maxval = 0
for i in [cr,cg,cb]:
print("Importing channel %d" % (i+1))
if nz == 1:
d = BridgeLib.GetDataSlice(self.vDataSet,0,i,0) #.astype(np.float32)
else:
d = BridgeLib.GetDataVolume(self.vDataSet,i,0) #.astype(np.float32)
rgb.append(d)
m = np.max(d)
if m > maxval:
maxval = m
self.nbit = int(np.ceil(np.log2(maxval)))
self.maxval = pow(2,self.nbit)-1
print("Using %d as maxval (maxval dataset = %d)" % (maxval,self.maxval))
self.dataset = libpat.Image(self.nbit)
self.dataset.set_data(rgb)
print("doing the pca!")
threshold = 0
if pca is True:
self.dataset.init_pca()
self.Dialog.config(cursor="")
print("plotting some crosses...")
self.dataset.do_pca2d(threshold,unit=is_unit)
self.dataset.set_ssize(self.ssize)
h = handle
if is_unit and h is not None:
h = self.dataset.get_rgb_col(h,dtype=int)
c0,c1 = self.sel0_comp
self.scat_12 = self.dataset.plot_pca2d_dots(c0=c0,c1=c1,axs=self.ax1,scat=self.scat_12, unit=is_unit,handle=h)
c0,c1 = self.sel1_comp
self.scat_13 = self.dataset.plot_pca2d_dots(c0=c0,c1=c1,axs=self.ax2,scat=self.scat_13, unit=is_unit,handle=h)
if self.layout == TWOBYTWO:
c0,c1 = self.sel2_comp
self.scat_32 = self.dataset.plot_pca2d_dots(c0=c0,c1=c1,axs=self.ax3,scat=self.scat_32, unit=is_unit,handle=h)
self.im = self.dataset.display_rgb(axs=self.ax4)
if self.layout == THREEDEE:
self.scat_3d = self.dataset.plot_pca3d_dots(axs=self.ax3, unit=is_unit)
self.display_3dline(self.ax3, unit=is_unit)
#fig = self.Dialog.figure
#fig.canvas.draw()
self.InitPCA(handle=handle)
def DoExport(self,*args):
if self.selection is None:
return
name = "Cluster"
data = self.dataset.get_luma(self.selection)
#col = self.dataset.get_rgb_col(wh=self.selection,dtype=int)
col = self.dataset.get_rgb_col(self.handle_3d, dtype=int) #wh=selection)
self.AddChannel(data,name,col, add_filter=True,threshold=100)
def DoInput(self,*args):
file_path = tkFileDialog.asksaveasfilename(
defaultextension=".tif", filetypes = [
("TIFF bitmap format",'*.tif'),
("PNG bitmap format",'*.png'),
("All image files",('*.tif','*.png'))],
parent=self.Dialog, title="Save the input image")
if file_path == "":
return
rgb_uint8 = self.dataset.get_rgb_image()
img = Image.fromarray(rgb_uint8)
img.save(file_path)
def DoOutput(self,*args):
file_path = tkFileDialog.asksaveasfilename(
defaultextension=".tif", filetypes = [
("TIFF bitmap format",'*.tif'),
("PNG bitmap format",'*.png'),
("All image files",('*.tif','*.png'))],
parent=self.Dialog, title="Save the output image")
if file_path == "":
return
rgb_uint8 = self.dataset.get_rgb_image(wh=self.selection)
img = Image.fromarray(rgb_uint8)
img.save(file_path)
def DoOpenPCA(self):
self.Dialog.file_opt['title'] = 'Open a PCA matrix'
filename = tkFileDialog.askopenfilename(**self.Dialog.file_opt)
if filename == "":
return
f = open(filename, 'rb')
pcanode = cPickle.load(f)
self.dataset.pcanode = pcanode
if hasattr(pcanode,"gui_values"):
arrayvar = self.Dialog.arrayvar
arrayvar.set(pcanode.gui_values)
if hasattr(pcanode,"handle"):
self.handle_3d = pcanode.handle
#print("found a handle!",pcanode.handle)
#handle = pcanode(np.array([pcanode.handle]))[0]
value_c0 = self.handle_3d[0]
value_c1 = self.handle_3d[1]
value_c2 = self.handle_3d[2]
self.interactor_c12.set_handle((value_c0,value_c1))
self.interactor_c13.set_handle((value_c0,value_c2))
self.interactor_c32.set_handle((value_c2,value_c1))
else:
handle = self.get_handle()
self.do_release()
#self.DoImport(pca=False,newset=False,handle=handle)
#fig = self.Dialog.figure
#fig.canvas.draw()
def DoSavePCA(self):
self.Dialog.file_opt['title'] = 'Save the current PCA matrix'
filename = tkFileDialog.asksaveasfilename(**self.Dialog.file_opt)
if filename == "":
return
print("saving...",filename)
arrayvar = self.Dialog.arrayvar
gui_values = arrayvar.get()
handle = self.get_handle()
#handle = self.dataset.get_rgb_col(self.get_handle(),dtype=int)
print(">>>>",handle)
self.dataset.pcanode.handle = handle
self.dataset.pcanode.gui_values = gui_values
self.dataset.pcanode.save(filename,protocol=1)
def DoRecompute(self,*args):
self.DoImport(pca=True,newset=False,handle=self.get_handle())
fig = self.Dialog.figure
fig.canvas.draw()
def DoUnit(self,*args):
pass
def DoReport(self,*args):
file_path = tkFileDialog.asksaveasfilename(
defaultextension=".html", filetypes = [
("HTML reports",'*.html')],
parent=self.Dialog, title="Save a HTML report")
if file_path == "":
return
self.Dialog.config(cursor="wait")
labels = self.Dialog.get_labels()
arrayvar = self.Dialog.arrayvar
values = arrayvar.get()
keys = values.keys() #self.Dialog._controlnames
keys.sort()
#keys = ["bars_sigma","bars_rgbl","scale_bt","check_black","scale_wt","check_white"
ptm = self.handle_3d
col = self.dataset.get_rgb_col(ptm)
html_string = '''<html>
<head>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
<style>body{ margin:0 100; background:whitesmoke; }
.foo { border: 1px solid rgba(0, 0, 0, .2); background: %s;}
</style>
</head>
<body>
<h2>PCA coordinates:</h2>
<p class="foo"><b>HTML colour code:</b> %s</p>
''' % (col,col)
col = self.dataset.get_rgb_col(ptm,dtype=float)
html_string += '<p><b>Black point:</b> (%.1f,%.1f,%.1f)</p>\n' % tuple(self.ptb.tolist())
html_string += '<p><b>White point:</b> (%.1f,%.1f,%.1f)</p>\n' % tuple(self.ptw.tolist())
html_string += '<p><b>Handle point:</b> (%.1f,%.1f,%.1f)</p>\n' % tuple(ptm.tolist())
html_string += '<p><b>Handle RGB col:</b> (%.1f,%.1f,%.1f)\n' % tuple(col.tolist())
html_string += '<h2>PCA Forward matrix:</h2>\n'
mat = self.dataset.pcanode.get_projmatrix()
avg = self.dataset.pcanode.avg
rgb = ["R","G","B"]
pca = ["C0","C1","C2"]
for pci in range(3):
html_string += '<p><b>%s</b> = ' % pca[pci]
for ci in range(3):
html_string += "%.2f * (<b>%s</b> - %.2f) + "% (mat[ci,pci],rgb[ci],avg[ci])
html_string = html_string[:-3]+"</p>\n"
html_string += '<h2>PCA Back-projection:</h2>\n'
mat = self.dataset.pcanode.get_recmatrix()
rgb = ["R","G","B"]
pca = ["C0","C1","C2"]
for ci in range(3):
html_string += '<p><b>%s</b> = ' % rgb[ci]
for pci in range(3):
html_string += "%.2f * <b>%s</b> + "% (mat[pci,ci],pca[pci])
html_string = html_string+"%.2f</p>\n" % avg[ci]
html_string += '<h2>Other values:</h2>\n'
#print(labels.keys())
#print(values.keys())
for key in keys:
if key not in labels.keys() or labels[key] is None or labels[key] == '':
label = key
else:
label = labels[key]
if label.startswith("RGB") or "polygons" in label or label == "menuitem":
continue
html_string += '<p><b>%s:</b> %s</p>\n' % (label,str(values[key]))
html_string += '''
</body>
</html>'''
f = open(file_path,"w")
f.write(html_string)
f.close()
self.Dialog.config(cursor="")
def DoScreenshot(self,*args):
arrayvar = self.Dialog.arrayvar
has_polygons = arrayvar["PCA_Figures_Include_Polygons"] == "on"
file_path = tkFileDialog.asksaveasfilename(
defaultextension=".tif", filetypes = [
("TIFF bitmap format",'*.tif'),
("PDF vector format",'*.pdf'),
("PNG bitmap format",'*.png'),
("SVG vector format",'*.svg'),
("EMF vector format",'*.emf'),
("All image files",('*.tif','*.pdf','*.png','*.svg','*.emf'))],
parent=self.Dialog, title="Save a High Resolution Figure")
if file_path == "":
return
self.Dialog.config(cursor="wait")
fig = plt.figure(figsize=(15,15), dpi=100)
ax2 = fig.add_subplot(2, 2, 1, aspect='equal')
ax4 = fig.add_subplot(2, 2, 2, aspect='equal')
ax1 = fig.add_subplot(2, 2, 3, aspect='equal')
ax3 = fig.add_subplot(2, 2, 4, aspect='equal')
c0,c1 = self.sel0_comp
scat_12 = self.dataset.plot_pca2d_dots(c0=c0,c1=c1,axs=ax1)
c0,c1 = self.sel1_comp
scat_13 = self.dataset.plot_pca2d_dots(c0=c0,c1=c1,axs=ax2)
c0,c1 = self.sel2_comp
scat_32 = self.dataset.plot_pca2d_dots(c0=c0,c1=c1,axs=ax3)
im = self.dataset.display_rgb(axs=ax4)
print("done!")
#use self.interactor_c12 to populate the new ax1...
if has_polygons:
self.interactor_c12.draw_things(ax1)
self.interactor_c13.draw_things(ax2)
self.interactor_c32.draw_things(ax3)
#Then display the image in the second graph
print("generating the image...")
rgb_uint8 = self.im.get_array()
ax4.imshow(rgb_uint8)
print("done!")
print("saving the | |
"""!
@brief SNR losses efficient computation in pytorch.
@author <NAME> {<EMAIL>}
@copyright University of illinois at Urbana Champaign
"""
import torch
import torch.nn as nn
import itertools
from torch.nn.modules.loss import _Loss
class FixedMixIT1Source2NoisesSNRwithZeroRefs(nn.Module):
"""!
Class for SNR computation between 3 reconstructed signals and
1 reference speakers and 2 reference noises."""
def __init__(self,
zero_mean=False,
backward_loss=True,
supervised=True,
inactivity_threshold=-40.,
return_individual_results=False):
"""
Initialization for the results and torch tensors that might
be used afterwards
"""
super().__init__()
self.perform_zero_mean = zero_mean
self.backward_loss = backward_loss
self.supervised = supervised
self.permutations = list(itertools.permutations(
torch.arange(2)))
self.permutations_tensor = torch.LongTensor(self.permutations)
self.inactivity_threshold = inactivity_threshold
self.return_individual_results = return_individual_results
def normalize_input(self, signals):
if self.perform_zero_mean:
return signals - torch.mean(signals, dim=-1, keepdim=True)
return signals
@staticmethod
def dot(x, y):
return torch.sum(x * y, dim=-1, keepdim=True)
def compute_permuted_snrs_and_inactive(self,
pr_batch,
t_batch,
activity_mask,
denom_stabilizer,
eps=1e-9):
# Compute SNR for the active and inactive sources
nom = self.dot(t_batch, t_batch) + eps
error = pr_batch - t_batch
denom = self.dot(error, error) + denom_stabilizer + eps
# return 10. * activity_mask * torch.log10(nom / denom + eps) - (
# (~activity_mask) * torch.abs(denom_stabilizer))
# return - torch.abs(error)
return 10. * activity_mask * torch.log10(nom / denom + eps)
def compute_permuted_snrs(self,
permuted_pr_batch,
t_batch,
eps=1e-9):
s_t = t_batch
e_t = permuted_pr_batch - s_t
snrs = 10 * torch.log10(self.dot(s_t, s_t) /
(self.dot(e_t, e_t) + eps))
return snrs
def compute_snr(self,
pr_batch,
ref_sources_batch,
ref_noises_batch,
initial_mixture,
eps=1e-9,
thresh=0.001):
mixture_power = self.dot(initial_mixture, initial_mixture)
mixture_power_repeated = mixture_power.repeat(1, 2, 1)
# Compute the maximum SNR obtained by the combinations of estimated
# sources {0, 1} -> reference sources {0, 1} and estimated sources
# estimated sources {2, 3} -> reference noises {0, 1}.
if self.supervised:
ref_sources_powers = self.dot(ref_sources_batch, ref_sources_batch)
sources_input_snr = 10. * torch.log10(
ref_sources_powers / (mixture_power + eps))
sources_activity_mask = sources_input_snr.ge(
self.inactivity_threshold)
sources_active_stabilizer = sources_activity_mask * ref_sources_powers
sources_inactive_stabilizer = (~sources_activity_mask) * mixture_power_repeated
sources_denom_stabilizer = thresh * (sources_active_stabilizer + sources_inactive_stabilizer)
num_active_sources = sources_activity_mask.sum([-2, -1]).unsqueeze(-1)
best_snr = self.compute_permuted_snrs_and_inactive(
pr_batch[:, 0:1], ref_sources_batch, sources_activity_mask,
sources_denom_stabilizer, eps=eps)
else:
# For the unsupervised case take the maximum SNR by reconstructing
# the reference mixtures with specific order.
ref_speaker_mix = ref_sources_batch + ref_noises_batch[:, 0:1]
ref_speaker_mix_powers = self.dot(ref_speaker_mix, ref_speaker_mix)
mixtures_input_snr = 10. * torch.log10(
ref_speaker_mix_powers / (mixture_power + eps))
ref_speaker_mix_mask = mixtures_input_snr.ge(self.inactivity_threshold)
speaker_mix_active_stabilizer = ref_speaker_mix_mask * ref_speaker_mix_powers
speaker_mix_inactive_stabilizer = (~ref_speaker_mix_mask) * mixture_power_repeated
speaker_mix_denom_stabilizer = thresh * (
speaker_mix_active_stabilizer + speaker_mix_inactive_stabilizer)
ref_noise_powers = self.dot(ref_noises_batch, ref_noises_batch)
mixtures_input_snr = 10. * torch.log10(
ref_noise_powers / (mixture_power + eps))
ref_noise_mask = mixtures_input_snr.ge(self.inactivity_threshold)
noise_active_stabilizer = ref_noise_mask * ref_noise_powers
noise_inactive_stabilizer = (~ref_noise_mask) * mixture_power_repeated
noise_denom_stabilizer = thresh * (noise_active_stabilizer + noise_inactive_stabilizer)
snr_l = []
for perm in self.permutations:
permuted_pr_batch = pr_batch[:, 1:][:, perm]
# We assume that always the first estimated source is speech
est_mixture_1 = permuted_pr_batch[:, 0:1] + pr_batch[:, 0:1]
est_mixture_2 = permuted_pr_batch[:, 1:2]
snr_1 = self.compute_permuted_snrs_and_inactive(
est_mixture_1, ref_speaker_mix,
ref_speaker_mix_mask[:, 0:1],
speaker_mix_denom_stabilizer[:, 0:1], eps=eps)
snr_2 = self.compute_permuted_snrs_and_inactive(
est_mixture_2, ref_noises_batch[:, 1:2],
ref_noise_mask[:, 1:2],
noise_denom_stabilizer[:, 1:2], eps=eps)
snr_l.append(snr_1 + snr_2)
all_mix_snrs = torch.cat(snr_l, -1)
best_snr, _ = torch.max(all_mix_snrs.sum(-2), -1)
if not self.return_individual_results:
best_snr = best_snr.mean()
if self.backward_loss:
return -best_snr
return best_snr
def forward(self,
pr_batch,
ref_sources_batch,
ref_noises_batch,
input_mixture,
eps=1e-9):
"""!
:param pr_batch: Reconstructed wavs: Torch Tensors of size:
batch_size x self.n_sources x length_of_wavs
:param ref_sources_batch: Target wavs: Torch Tensors of size:
batch_size x 2 x length_of_wavs
:param ref_noises_batch: Target wavs: Torch Tensors of size:
batch_size x 2 x length_of_wavs
:param input_mixture: Target wavs: Torch Tensors of size:
batch_size x 1 x length_of_wavs
:param eps: Numerical stability constant.
"""
pr_batch = self.normalize_input(pr_batch)
ref_sources_batch = self.normalize_input(ref_sources_batch)
ref_noises_batch = self.normalize_input(ref_noises_batch)
input_mixture = self.normalize_input(input_mixture)
snr_l = self.compute_snr(
pr_batch, ref_sources_batch, ref_noises_batch,
input_mixture, eps=eps)
return snr_l
class FixedMixIT2Sources2NoisesSNRwithZeroRefs(nn.Module):
"""!
Class for SNR computation between 4 reconstructed signals and
2 reference speakers and 2 reference noises."""
def __init__(self,
zero_mean=False,
backward_loss=True,
supervised=True,
sources_weight=0.5,
inactivity_threshold=-40.,
return_individual_results=False):
"""
Initialization for the results and torch tensors that might
be used afterwards
"""
super().__init__()
self.perform_zero_mean = zero_mean
self.backward_loss = backward_loss
self.supervised = supervised
self.sources_weight=sources_weight
self.permutations = list(itertools.permutations(
torch.arange(2)))
self.permutations_tensor = torch.LongTensor(self.permutations)
self.inactivity_threshold = inactivity_threshold
self.return_individual_results = return_individual_results
def normalize_input(self, signals):
if self.perform_zero_mean:
return signals - torch.mean(signals, dim=-1, keepdim=True)
return signals
@staticmethod
def dot(x, y):
return torch.sum(x * y, dim=-1, keepdim=True)
def compute_permuted_snrs_and_inactive(self,
pr_batch,
t_batch,
activity_mask,
denom_stabilizer,
eps=1e-9):
# Compute SNR for the active and inactive sources
nom = self.dot(t_batch, t_batch) + eps
error = pr_batch - t_batch
denom = self.dot(error, error) + denom_stabilizer + eps
# return 10. * activity_mask * torch.log10(nom / denom + eps) - (
# (~activity_mask) * torch.abs(denom_stabilizer))
# return - torch.abs(error)
return 10. * activity_mask * torch.log10(nom / denom + eps)
def compute_permuted_snrs(self,
permuted_pr_batch,
t_batch,
eps=1e-9):
s_t = t_batch
e_t = permuted_pr_batch - s_t
snrs = 10 * torch.log10(self.dot(s_t, s_t) /
(self.dot(e_t, e_t) + eps))
return snrs
def compute_snr(self,
pr_batch,
ref_sources_batch,
ref_noises_batch,
initial_mixture,
eps=1e-9,
thresh=0.001):
mixture_power = self.dot(initial_mixture, initial_mixture)
mixture_power_repeated = mixture_power.repeat(1, 2, 1)
# Compute the maximum SNR obtained by the combinations of estimated
# sources {0, 1} -> reference sources {0, 1} and estimated sources
# estimated sources {2, 3} -> reference noises {0, 1}.
if self.supervised:
ref_sources_powers = self.dot(ref_sources_batch, ref_sources_batch)
ref_noises_powers = self.dot(ref_noises_batch, ref_noises_batch)
sources_input_snr = 10. * torch.log10(
ref_sources_powers / (mixture_power + eps))
noises_input_snr = 10. * torch.log10(
ref_noises_powers / (mixture_power + eps))
sources_activity_mask = sources_input_snr.ge(
self.inactivity_threshold)
noises_activity_mask = noises_input_snr.ge(
self.inactivity_threshold)
sources_active_stabilizer = sources_activity_mask * ref_sources_powers
sources_inactive_stabilizer = (
~sources_activity_mask) * mixture_power_repeated
sources_denom_stabilizer = thresh * (
sources_active_stabilizer + sources_inactive_stabilizer)
noises_active_stabilizer = noises_activity_mask * ref_noises_powers
noises_inactive_stabilizer = (
~noises_activity_mask) * mixture_power_repeated
noises_denom_stabilizer = thresh * (
noises_active_stabilizer + noises_inactive_stabilizer)
num_active_sources = sources_activity_mask.sum([-2, -1]).unsqueeze(-1)
num_active_noises = noises_activity_mask.sum([-2, -1]).unsqueeze(-1)
sources_snr_l = []
for perm in self.permutations:
permuted_pr_batch = pr_batch[:, :2][:, perm]
snr = self.compute_permuted_snrs_and_inactive(
permuted_pr_batch, ref_sources_batch, sources_activity_mask,
sources_denom_stabilizer, eps=eps)
sources_snr_l.append(snr)
all_sources_snrs = torch.cat(sources_snr_l, -1)
best_sources_snr, _ = torch.max(all_sources_snrs.sum(-2) * num_active_sources, -1)
noises_snr_l = []
for perm in self.permutations:
permuted_pr_batch = pr_batch[:, 2:][:, perm]
snr = self.compute_permuted_snrs_and_inactive(
permuted_pr_batch, ref_noises_batch, noises_activity_mask,
noises_denom_stabilizer, eps=eps)
noises_snr_l.append(snr)
all_noises_snrs = torch.cat(noises_snr_l, -1)
best_noises_snr, _ = torch.max(
all_noises_snrs.sum(-2) * num_active_noises, -1)
best_snr = (self.sources_weight * best_sources_snr +
(1. - self.sources_weight) * best_noises_snr)
else:
# For the unsupervised case take the maximum SNR by reconstructing
# the mixtures.
ref_mixtures = ref_sources_batch + ref_noises_batch
ref_mixtures_powers = self.dot(ref_mixtures, ref_mixtures)
mixtures_input_snr = 10. * torch.log10(
ref_mixtures_powers / (mixture_power + eps))
mixtures_activity_mask = mixtures_input_snr.ge(self.inactivity_threshold)
mixtures_active_stabilizer = mixtures_activity_mask * ref_mixtures_powers
mixtures_inactive_stabilizer = (~mixtures_activity_mask) * mixture_power_repeated
mixtures_denom_stabilizer = thresh * (
mixtures_active_stabilizer + mixtures_inactive_stabilizer)
num_active_mixtures = mixtures_activity_mask.sum([-2, -1]).unsqueeze(-1)
snr_l = []
for perm in self.permutations:
permuted_pr_batch = pr_batch[:, :2][:, perm]
est_mixture_1 = permuted_pr_batch[:, 0:1] + pr_batch[:, 2:3]
est_mixture_2 = permuted_pr_batch[:, 1:2] + pr_batch[:, 3:4]
for ref_mix_perm in self.permutations:
permuted_ref_mixtures = ref_mixtures[:, ref_mix_perm]
permuted_mixtures_activity_mask = mixtures_activity_mask[:, ref_mix_perm]
permuted_mixtures_denom_stabilizer = mixtures_denom_stabilizer[:, ref_mix_perm]
snr_1 = self.compute_permuted_snrs_and_inactive(
est_mixture_1, permuted_ref_mixtures[:, 0:1],
permuted_mixtures_activity_mask[:, 0:1],
permuted_mixtures_denom_stabilizer[:, 0:1], eps=eps)
snr_2 = self.compute_permuted_snrs_and_inactive(
est_mixture_2, permuted_ref_mixtures[:, 1:2],
permuted_mixtures_activity_mask[:, 1:2],
permuted_mixtures_denom_stabilizer[:, 1:2], eps=eps)
# snr_reg = self.compute_permuted_snrs(
# est_mixture_2, est_mixture_1)
# snr_l.append(snr_1 + snr_2 - snr_reg)
snr_l.append(snr_1 + snr_2)
all_mix_snrs = torch.cat(snr_l, -1)
best_snr, _ = torch.max(
all_mix_snrs.sum(-2) * num_active_mixtures, -1)
if not self.return_individual_results:
best_snr = best_snr.mean()
if self.backward_loss:
return -best_snr
return best_snr
def forward(self,
pr_batch,
ref_sources_batch,
ref_noises_batch,
input_mixture,
eps=1e-9):
"""!
:param pr_batch: Reconstructed wavs: Torch Tensors of size:
batch_size x self.n_sources x length_of_wavs
:param ref_sources_batch: Target wavs: Torch Tensors of size:
batch_size x 2 x length_of_wavs
:param ref_noises_batch: Target wavs: Torch Tensors of size:
batch_size x 2 x length_of_wavs
:param input_mixture: Target wavs: Torch Tensors of size:
batch_size x 1 x length_of_wavs
:param eps: Numerical stability constant.
"""
pr_batch = self.normalize_input(pr_batch)
ref_sources_batch = self.normalize_input(ref_sources_batch)
ref_noises_batch = self.normalize_input(ref_noises_batch)
input_mixture = self.normalize_input(input_mixture)
snr_l = self.compute_snr(
pr_batch, ref_sources_batch, ref_noises_batch,
input_mixture, eps=eps)
return snr_l
class PermInvariantSNRwithZeroRefs(nn.Module):
"""!
Class for SNR computation between reconstructed signals and
target wavs with compensation for zero reference signals."""
def __init__(self,
zero_mean=False,
n_sources=None,
backward_loss=True,
valid_permutations=None,
inactivity_threshold=-40.,
return_individual_results=False):
"""
Initialization for the results and torch tensors that might
be used afterwards
"""
super().__init__()
self.perform_zero_mean = zero_mean
self.backward_loss = backward_loss
if valid_permutations is None:
self.permutations = list(itertools.permutations(
torch.arange(n_sources)))
else:
self.permutations = valid_permutations
self.permutations_tensor = torch.LongTensor(self.permutations)
self.n_sources = n_sources
self.inactivity_threshold = inactivity_threshold
self.return_individual_results = return_individual_results
def normalize_input(self, pr_batch, t_batch):
min_len = min(pr_batch.shape[-1],
t_batch.shape[-1])
| |
True
assert "Unexpected input for value_range" in str(e)
assert got_exception == True
def test_parameters_handle_probability_param():
for val in [True, False, 0, 1, 0.0, 1.0]:
p = iap.handle_probability_param(val, "[test1]")
assert isinstance(p, iap.Deterministic)
assert p.value == int(val)
for val in [0.0001, 0.001, 0.01, 0.1, 0.9, 0.99, 0.999, 0.9999]:
p = iap.handle_probability_param(val, "[test2]")
assert isinstance(p, iap.Binomial)
assert isinstance(p.p, iap.Deterministic)
assert val-1e-8 < p.p.value < val+1e-8
det = iap.Deterministic(1)
p = iap.handle_probability_param(det, "[test3]")
assert p == det
got_exception = False
try:
p = iap.handle_probability_param("test", "[test4]")
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
got_exception = False
try:
p = iap.handle_probability_param(-0.01, "[test5]")
except AssertionError:
got_exception = True
assert got_exception
got_exception = False
try:
p = iap.handle_probability_param(1.01, "[test6]")
except AssertionError:
got_exception = True
assert got_exception
def test_parameters_force_np_float_dtype():
dtypes = [
(np.float16, np.float16),
(np.float32, np.float32),
(np.float64, np.float64),
(np.uint8, np.float64),
(np.int32, np.float64)
]
for i, (dtype_in, dtype_out) in enumerate(dtypes):
assert iap.force_np_float_dtype(np.zeros((1,), dtype=dtype_in)).dtype == dtype_out,\
"force_np_float_dtype() failed at %d" % (i,)
def test_parameters_both_np_float_if_one_is_float():
a1 = np.zeros((1,), dtype=np.float16)
b1 = np.zeros((1,), dtype=np.float32)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.type == np.float16, a2.dtype.type
assert b2.dtype.type == np.float32, b2.dtype.type
a1 = np.zeros((1,), dtype=np.float16)
b1 = np.zeros((1,), dtype=np.int32)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.type == np.float16, a2.dtype.type
assert b2.dtype.type == np.float64, b2.dtype.type
a1 = np.zeros((1,), dtype=np.int32)
b1 = np.zeros((1,), dtype=np.float16)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.type == np.float64, a2.dtype.type
assert b2.dtype.type == np.float16, b2.dtype.type
a1 = np.zeros((1,), dtype=np.int32)
b1 = np.zeros((1,), dtype=np.uint8)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.type == np.float64, a2.dtype.type
assert b2.dtype.type == np.float64, b2.dtype.type
def test_parameters_draw_distribution_grid():
params = [iap.Deterministic(1), iap.Uniform(0, 1.0)]
graph1 = params[0].draw_distribution_graph(size=(100000,))
graph2 = params[1].draw_distribution_graph(size=(100000,))
graph1_rs = ia.imresize_many_images(np.array([graph1]), sizes=(100, 100))[0]
graph2_rs = ia.imresize_many_images(np.array([graph2]), sizes=(100, 100))[0]
grid_expected = ia.draw_grid([graph1_rs, graph2_rs])
grid_observed = iap.draw_distributions_grid(
params,
rows=None,
cols=None,
graph_sizes=(100, 100),
sample_sizes=[(100000,), (100000,)],
titles=None
)
diff = np.abs(grid_expected.astype(np.int32) - grid_observed.astype(np.int32))
#from scipy import misc
#misc.imshow(np.vstack([grid_expected, grid_observed, diff]))
#print(diff.flatten()[0:100])
assert np.average(diff) < 10
def test_parameters_draw_distribution_graph():
# this test is very rough as we get a not-very-well-defined image out of the function
param = iap.Uniform(0.0, 1.0)
graph_img = param.draw_distribution_graph(title=None, size=(10000,), bins=100)
assert graph_img.ndim == 3
assert graph_img.shape[2] == 3
# at least 10% of the image should be white-ish (background)
nb_white = np.sum(graph_img[..., :] > [200, 200, 200])
nb_all = np.prod(graph_img.shape)
assert nb_white > 0.1 * nb_all
graph_img_title = param.draw_distribution_graph(title="test", size=(10000,), bins=100)
assert graph_img_title.ndim == 3
assert graph_img_title.shape[2] == 3
assert not np.array_equal(graph_img_title, graph_img)
def test_parameters_Biomial():
reseed()
eps = np.finfo(np.float32).eps
param = iap.Binomial(0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 0
assert np.all(samples == 0)
assert param.__str__() == param.__repr__() == "Binomial(Deterministic(int 0))"
param = iap.Binomial(1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
assert param.__str__() == param.__repr__() == "Binomial(Deterministic(float 1.00000000))"
param = iap.Binomial(0.5)
sample = param.draw_sample()
samples = param.draw_samples((10000))
assert sample.shape == tuple()
assert samples.shape == (10000,)
assert sample in [0, 1]
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 5000 - 500 < count < 5000 + 500
elif val == 1:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
param = iap.Binomial(iap.Choice([0.25, 0.75]))
for _ in sm.xrange(10):
samples = param.draw_samples((1000,))
p = np.sum(samples) / samples.size
assert (0.25 - 0.05 < p < 0.25 + 0.05) or (0.75 - 0.05 < p < 0.75 + 0.05)
param = iap.Binomial((0.0, 1.0))
last_p = 0.5
diffs = []
for _ in sm.xrange(30):
samples = param.draw_samples((1000,))
p = np.sum(samples).astype(np.float32) / samples.size
diffs.append(abs(p - last_p))
last_p = p
nb_p_changed = sum([diff > 0.05 for diff in diffs])
assert nb_p_changed > 15
param = iap.Binomial(0.5)
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.array_equal(samples1, samples2)
def test_parameters_Choice():
reseed()
eps = np.finfo(np.float32).eps
param = iap.Choice([0, 1, 2])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(np.logical_or(np.logical_or(samples == 0, samples == 1), samples==2))
assert param.__str__() == param.__repr__() == "Choice(a=[0, 1, 2], replace=True, p=None)"
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert expected - expected_tolerance < count < expected + expected_tolerance
param = iap.Choice([-1, 1])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 1]
assert np.all(np.logical_or(samples == -1, samples == 1))
param = iap.Choice([-1.2, 1.7])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.2 - eps < sample < -1.2 + eps or 1.7 - eps < sample < 1.7 + eps
assert np.all(
np.logical_or(
np.logical_and(-1.2 - eps < samples, samples < -1.2 + eps),
np.logical_and(1.7 - eps < samples, samples < 1.7 + eps)
)
)
param = iap.Choice(["first", "second", "third"])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in ["first", "second", "third"]
assert np.all(
np.logical_or(
np.logical_or(
samples == "first",
samples == "second"
),
samples == "third"
)
)
param = iap.Choice([1+i for i in sm.xrange(100)], replace=False)
samples = param.draw_samples((50,))
seen = [0 for _ in sm.xrange(100)]
for sample in samples:
seen[sample-1] += 1
assert all([count in [0, 1] for count in seen])
param = iap.Choice([0, 1], p=[0.25, 0.75])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 2500 - 500 < count < 2500 + 500
elif val == 1:
assert 7500 - 500 < count < 7500 + 500
else:
assert False
param = iap.Choice([iap.Choice([0, 1]), 2])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 3
for val, count in zip(unique, counts):
if val in [0, 1]:
assert 2500 - 500 < count < 2500 + 500
elif val == 2:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
param = iap.Choice([-1, 0, 1, 2, 3])
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.array_equal(samples1, samples2)
got_exception = False
try:
param = iap.Choice(123)
except Exception as exc:
assert "Expected a to be an iterable" in str(exc)
got_exception = True
assert got_exception
got_exception = False
try:
param = iap.Choice([1, 2], p=123)
except Exception as exc:
assert "Expected p to be" in str(exc)
got_exception = True
assert got_exception
got_exception = False
try:
param = iap.Choice([1, 2], p=[1])
except Exception as exc:
assert "Expected lengths of" in str(exc)
got_exception = True
assert got_exception
def test_parameters_DiscreteUniform():
reseed()
eps = np.finfo(np.float32).eps
param = iap.DiscreteUniform(0, 2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(np.logical_or(np.logical_or(samples == 0, samples == 1), samples==2))
assert param.__str__() == param.__repr__() == "DiscreteUniform(Deterministic(int 0), Deterministic(int 2))"
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert expected - expected_tolerance < count < expected + expected_tolerance
param = iap.DiscreteUniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(np.logical_or(np.logical_or(samples == -1, samples == 0), samples==1))
param = iap.DiscreteUniform(-1.2, 1.2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(np.logical_or(np.logical_or(samples == -1, samples == 0), samples==1))
param = iap.DiscreteUniform(1, -1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(np.logical_or(np.logical_or(samples == -1, samples == 0), samples==1))
param = iap.DiscreteUniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((100,))
assert sample == 1
assert np.all(samples == 1)
param = iap.Uniform(-1, 1)
| |
# We need to use absolute_import so we can load the coinbase pip module without
# conflicting with our filename of coinbase.py.
from __future__ import absolute_import
import os
import base64
from collections import OrderedDict
import hashlib
import hmac
import json
import time
import urllib
from cdecimal import Decimal
import coinbase.client
from gryphon.lib.exchange import exceptions
from gryphon.lib.exchange import order_types
from gryphon.lib.exchange.exchange_api_wrapper import ExchangeAPIWrapper
from gryphon.lib.logger import get_logger
from gryphon.lib.models.exchange import Balance
from gryphon.lib.models.transaction import Transaction
from gryphon.lib.money import Money
from gryphon.lib.time_parsing import parse
logger = get_logger(__name__)
class CoinbaseBTCUSDExchange(ExchangeAPIWrapper):
def __init__(self, session=None, configuration=None):
super(CoinbaseBTCUSDExchange, self).__init__(session)
self.name = u'COINBASE_BTC_USD'
self.friendly_name = u'Coinbase BTC-USD'
self.base_url = 'https://api.gdax.com'
self.currency = 'USD'
self.bid_string = 'buy'
self.ask_string = 'sell'
self.product_id = 'BTC-USD'
self.price_decimal_precision = 2
# Configurables with defaults.
self.market_order_fee = Decimal('0.0022') # Updated by Gareth on 2016-9-20
self.limit_order_fee = Decimal('0.001')
self.fee = self.market_order_fee
self.fiat_balance_tolerance = Money('0.0001', 'USD')
self.volume_balance_tolerance = Money('0.00000001', 'BTC')
self.max_tick_speed = 1
self.min_order_size = Money('0.001', 'BTC')
self.use_cached_orderbook = False
if configuration:
self.configure(configuration)
def req(self, req_method, url, **kwargs):
try:
kwargs['data'] = json.dumps(kwargs['data'])
except KeyError:
kwargs['data'] = '{}'
kwargs['headers'] = {'Content-Type': 'application/json'}
req = super(CoinbaseBTCUSDExchange, self).req(req_method, url, **kwargs)
return req
def resp(self, req):
response = req.result()
try:
data = response.json(parse_float=Decimal)
except ValueError:
raise exceptions.ExchangeAPIFailureException(self, response)
headers = response.headers
if response.status_code < 200 or response.status_code >= 300:
try:
error_string = data['message']
except KeyError:
error_string = str(data)
error_string = error_string.lower()
if 'notfound' in error_string:
raise exceptions.NoEffectOrderCancelledError()
elif ('order already done' in error_string or
'order not found' in error_string):
raise exceptions.CancelOrderNotFoundError()
elif 'order size is too small' in error_string:
raise exceptions.MinimumOrderSizeError()
elif 'insufficient funds' in error_string:
raise exceptions.InsufficientFundsError()
# These errors occur randomly (usually when Coinbase under heavy load).
# We want to return an ExchangeAPIFailureException so that requests get
# retried.
elif ('request timestamp expired' in error_string or
'internal server error' in error_string):
raise exceptions.ExchangeAPIFailureException(self, response)
else:
raise exceptions.ExchangeAPIErrorException(self, error_string)
else:
return data, headers
def pagination_cursors(self, headers):
return {
'prev_cursor': headers.get('CB-BEFORE'),
'next_cursor': headers.get('CB-AFTER')
}
def ledger(self, account_id=None, return_pagination=False, next_cursor=None):
req = self.ledger_req(account_id, next_cursor=next_cursor)
return self.ledger_resp(req, return_pagination=return_pagination)
def ledger_req(self, account_id=None, next_cursor=None):
if not account_id:
account_id = self.load_fiat_account_id()
params = {}
if next_cursor:
params['after'] = next_cursor
endpoint = '/accounts/%s/ledger' % account_id
if params:
endpoint += '?%s' % urllib.urlencode(params)
return self.req('get', endpoint)
def ledger_resp(self, req, return_pagination=False):
response, headers = self.resp(req)
if return_pagination:
return response, self.pagination_cursors(headers)
else:
return response
def _get_recent_trades(self, order_id=None, return_pagination=False, next_cursor=None):
req = self._get_recent_trades_req(order_id, next_cursor=next_cursor)
return self._get_recent_trades_resp(req, return_pagination=return_pagination)
def _get_recent_trades_req(self, order_id=None, next_cursor=None):
params = {
'product_id': self.product_id
}
if order_id:
params['order_id'] = order_id
if next_cursor:
params['after'] = next_cursor
endpoint = '/fills?%s' % urllib.urlencode(params)
return self.req('get', endpoint)
def _get_recent_trades_resp(self, req, return_pagination=False):
response, headers = self.resp(req)
trades = []
for trade in response:
if not trade['settled']:
continue
price = Money(trade['price'], self.currency)
size = Money(trade['size'], 'BTC')
fiat = price * size.amount
our_type = self._order_mode_to_const(trade['side'])
# Strange bug here, delorean isn't parsing the trailing Z on the created_at
# date correctly.
trade_dict = {
'time': int(parse(trade['created_at'][:-1]).epoch),
'trade_id': str(trade['trade_id']),
'order_id': str(trade['order_id']),
'btc': size,
'fiat': fiat,
'fee': Money(trade['fee'], self.currency),
'type': our_type,
}
trades.append(trade_dict)
if return_pagination:
return trades, self.pagination_cursors(headers)
else:
return trades
def transfer(self, transfer_type, btc_amount, coinbase_account_id):
req = self.transfer_req(transfer_type, btc_amount, coinbase_account_id)
return self.transfer_resp(req)
def transfer_req(self, transfer_type, btc_amount, coinbase_account_id):
payload = {
'type': transfer_type,
'amount': str(btc_amount.amount),
'coinbase_account_id': coinbase_account_id
}
return self.req('post', '/transfers', data=payload)
def transfer_resp(self, req):
response, headers = self.resp(req)
return response
def load_wallet_creds(self):
try:
self.wallet_api_key
self.wallet_api_secret
self.wallet_id
except AttributeError:
self.wallet_api_key = self._load_env('COINBASE_BTC_USD_WALLET_API_KEY')
self.wallet_api_secret = self._load_env('COINBASE_BTC_USD_WALLET_API_SECRET')
self.wallet_id = self._load_env('COINBASE_BTC_USD_WALLET_ID')
def load_fiat_account_id(self):
return self._load_env('COINBASE_BTC_USD_FIAT_ACCOUNT_ID')
@property
def wallet_api_client(self):
"""API Client for the regular Coinbase Wallet (not the exchange API)"""
if hasattr(self, '_wallet_api_client'):
return self._wallet_api_client
else:
self.load_wallet_creds()
self._wallet_api_client = coinbase.client.Client(
self.wallet_api_key,
self.wallet_api_secret,
)
return self._wallet_api_client
def get_wallet(self):
self.load_wallet_creds()
return self.wallet_api_client.get_account(self.wallet_id)
def transfer_wallet_balance_into_exchange(self):
wallet = self.get_wallet()
balance = Money(wallet.balance.amount, wallet.balance.currency)
if balance > 0:
self.transfer('deposit', balance, wallet.id)
def check_for_daily_fee_rebate(self, exchange_data):
recent_transactions = self.ledger()
rebates = [tx for tx in recent_transactions if tx['type'] == 'rebate']
for rebate in rebates:
rebate_id = rebate['details']['rebate_id']
# Searching transaction_details for the rebate_id is pretty janky but we
# need to figure out a strategy for handling rebates anyways so this is
# good enough for now.
search_string = '%{}%'.format(rebate_id)
existing_rebate_transaction = exchange_data.transactions\
.filter(Transaction._transaction_details.like('%fee rebate%'))\
.filter(Transaction._transaction_details.like(search_string))\
.first()
if existing_rebate_transaction:
logger.debug('Found rebate, but it has already been recorded (%s)' % rebate_id)
continue
amount = Money(rebate['amount'], self.currency)
logger.debug('Recording rebate for %s' % amount)
transaction_details = {
'notes': 'fee rebate',
'rebate_id': rebate_id,
}
deposit = Transaction(
Transaction.DEPOSIT,
Transaction.IN_TRANSIT,
amount,
exchange_data,
transaction_details,
)
# We deliberately leave the deposit IN_TRANSIT, and the next audit will see
# the balance change and COMPLETE it through the regular deposit_landed
# flow.
# Common Exchange Methods
def load_creds(self):
try:
self.api_key
self.secret
self.passphrase
except AttributeError:
self.api_key = self._load_env('COINBASE_BTC_USD_API_KEY')
self.passphrase = self._load_env('COINBASE_BTC_USD_API_PASSPHRASE')
self.secret = self._load_env('COINBASE_BTC_USD_API_SECRET')
def auth_request(self, req_method, url, request_args):
"""
This modifies request_args.
"""
self.load_creds()
req_method = req_method.upper()
timestamp = unicode(int(round(time.time())))
# This has already been dumped to json by req().
body = request_args['data']
endpoint = url.replace(self.base_url, '')
data = timestamp + req_method + endpoint + body
key = base64.b64decode(self.secret)
sig = base64.b64encode(hmac.new(key, data, hashlib.sha256).digest())
try:
headers = request_args['headers']
except KeyError:
headers = request_args['headers'] = {}
headers.update({
'CB-ACCESS-KEY': self.api_key,
'CB-ACCESS-SIGN': sig,
'CB-ACCESS-PASSPHRASE': self.passphrase,
'CB-ACCESS-TIMESTAMP': timestamp
})
def get_balance_req(self):
return self.req('get', '/accounts')
def get_balance_resp(self, req):
response, headers = self.resp(req)
balance = Balance()
try:
for account in response:
if account['currency'] == 'BTC':
balance['BTC'] = Money(account['available'], 'BTC')
elif account['currency'] == self.currency:
balance[self.currency] = Money(account['available'], self.currency)
except KeyError:
raise exceptions.ExchangeAPIErrorException(self, 'malformed response')
return balance
def get_ticker_req(self, verify=True):
return self.req(
'get',
'/products/%s/stats' % self.product_id,
no_auth=True,
verify=verify,
)
def get_ticker_resp(self, req):
response, headers = self.resp(req)
return {
'high': Money(response['high'], self.currency),
'low': Money(response['low'], self.currency),
'last': None,
'volume': Money(response['volume'], 'BTC')
}
def _get_orderbook_from_api_req(self, verify=True):
orderbook_url = '/products/%s/book?level=3' % self.product_id
return self.req('get', orderbook_url, no_auth=True, verify=verify)
def _get_orderbook_from_api_resp(self, req):
response, headers = self.resp(req)
return response
def place_order_req(self, mode, volume, price, order_type=order_types.LIMIT_ORDER):
mode = self._order_mode_from_const(mode)
if price.currency != self.currency:
raise ValueError('price must be in %s' % self.currency)
if volume.currency != 'BTC':
raise ValueError('volume must be in BTC')
volume_str = '%.8f' % volume.amount
price_str = '%.2f' % price.amount
payload = {
'product_id': self.product_id,
'side': mode,
'size': volume_str,
'price': price_str,
}
if order_type == order_types.POST_ONLY:
payload['post_only'] = True
return self.req('post', '/orders', data=payload)
def place_order_resp(self, req):
response, headers = self.resp(req)
order_id = response['id']
return {'success': True, 'order_id': order_id}
def get_open_orders_req(self):
return self.req('get', '/orders')
def get_open_orders_resp(self, req):
raw_open_orders, headers = self.resp(req)
open_orders = []
for raw_order in raw_open_orders:
mode = self._order_mode_to_const(raw_order['side'])
volume = Money(raw_order['size'], 'BTC')
volume_executed = Money(raw_order['filled_size'], 'BTC')
volume_remaining = volume - volume_executed
order = {
'mode': mode,
'id': str(raw_order['id']),
'price': Money(raw_order['price'], self.currency),
'volume': volume,
'volume_remaining': volume_remaining
}
open_orders.append(order)
return open_orders
def get_order_details(self, order_id):
req = self.get_order_details_req(order_id)
return self.get_order_details_resp(req)
def get_order_details_req(self, order_id):
reqs = {}
endpoint = '/orders/%s' % order_id
reqs['order'] = (self.req('get', endpoint))
# This kind of breaks the req/resp paradigm, but we need to get the results of
# each batch before we know if we need to fetch another batch.
trades = []
trade_batch, pagination = self._get_recent_trades(
order_id,
return_pagination=True,
)
trades += trade_batch
# Getting < 100 trades back means we have reached the end.
while len(trade_batch) >= 100:
time.sleep(1)
trade_batch, pagination = self._get_recent_trades(
order_id,
return_pagination=True,
next_cursor=pagination['next_cursor'],
)
trades += trade_batch
# Instead of returning the requests, we return the results since we have them
# already.
reqs['trades'] = trades
return reqs
def get_order_details_resp(self, reqs):
try:
raw_order, headers = self.resp(reqs['order'])
except exceptions.NoEffectOrderCancelledError:
# Coinbase returns an API Error with the text "NotFound" in it when
# querying orders that were cancelled with no trades, so we return an empty
# order here if that is the response.
result = {
'time_created': None,
'type': None,
'btc_total': Money('0', self.volume_currency),
'fiat_total': Money('0', self.currency),
'trades': [],
}
return result
# This already has the results (see above) so we don't need to .resp() it
our_trades = reqs['trades']
mode = self._order_mode_to_const(raw_order['side'])
total_btc = Money(raw_order['filled_size'], 'BTC')
time_created = int(parse(raw_order['created_at']).epoch)
total_fiat = Money('0', self.currency)
for t in our_trades:
total_fiat += t['fiat']
result = {
'time_created': time_created,
'type': mode,
'btc_total': total_btc,
'fiat_total': total_fiat,
'trades': our_trades
}
return result
def get_multi_order_details_req(self, order_ids):
data = {}
for | |
import os
import pathlib
from datetime import datetime
import shutil
import fmpy
from fmpy import *
import yaml
import re
from typing import Any, Dict, List, Union
SIM_CONFIG_NAME_f = lambda model_fp: model_fp.replace(".fmu", "_conf.yaml")
# [TODO] dynamically read FMI version from modelDescription.xml
# ("1.0", "2.0", "3.0")
FMI_VERSION = "2.0"
START_TIME = 0.0
STOP_TIME = 20.0
STEP_SIZE = 0.1
class FMUSimValidation:
def __init__(
self,
model_filepath: str,
user_validation: bool = True,
):
"""Template for validating FMU models for Bonsai integration.
Parameters
----------
model_filepath: str
Filepath to FMU model.
user_validation: bool
If True, model inputs/outputs need to be accepted by user for each run.
If False, YAML config file is used (if exists and valid). Otherwise, FMI
file is read. If FMI model description is also invalid, error is raised.
"""
# ensure model filepath is balid, and save as att if it is
assert model_filepath.endswith(".fmu"), "Provided filepath is not an FMU file: '{}'".format(model_filepath)
self.model_filepath = model_filepath
# config file with config_params, inputs, outputs
self.sim_config_filepath = SIM_CONFIG_NAME_f(self.model_filepath)
# read the model description
self.model_description = read_model_description(model_filepath)
error_log = "Provided model ({}) doesn't have modelVariables in XLS description file".format(model_filepath)
assert len(self.model_description.modelVariables) > 0, error_log
# correct non-alphanumeric tags.
# note, it doesn't suppose any problem, since interaction with sim uses indices, not names.
self._clean_non_alphanumeric_chars()
# collect the value references (indices)
# collect the value types (Real, Integer or Enumeration)
# collect the variables to be initialized and the value to do so at
self.vars_to_idx = {}
self.vars_to_type_f = {}
self.vars_to_ini_vals = {}
for variable in self.model_description.modelVariables:
# extract key attributes per variable
var_idx = variable.valueReference #, variable.causality
var_name = variable.name
var_type = variable.type
var_start = variable.start
# collect type reference
if var_type == "Real":
self.vars_to_type_f[var_name] = float
elif var_type == "Integer":
self.vars_to_type_f[var_name] = int
else:
# [TODO] Integrate variables of type "Enumeration". How do we cast? Define a function for "self.vars_to_type_f".
# [TODO] Integrate variables of type string (need to find correct var_type tag first).
# [TODO] Integrate variables of type boolean (need to find correct var_type tag first).
print(f"Variable '{var_name}' will be skipped. FMU connector cannot currently handle vars of type '{var_type}'.")
continue
# collect the value references (indices)
self.vars_to_idx[var_name] = var_idx
# collect the variables to be initialized and the value to do so at
if var_start is not None:
# cast variable prior to storing
self.vars_to_ini_vals[var_name] = self.vars_to_type_f[var_name](var_start)
# initialize sim config
self.is_model_config_valid = False # Currently unused, since error is raised if model invalid
self.sim_config_params = []
self.sim_inputs = []
self.sim_outputs = []
self.sim_other_vars = []
# ---------------------------------------------------------------------
# YAML CONFIG --> check for existing config using SIM_CONFIG_NAME_f --> e.g: "{model_name}_conf.yaml"
valid_config = self._validate_sim_config()
# exit if model is valid, unless validation has been activated
if valid_config:
# print model config for user reference: config_params, inputs, outputs
print(self._get_sim_config_str())
if user_validation:
# prompt user to manually validate model if selected
validation_asserted = input("Is this configuration correct (y|n)? ")
if validation_asserted == "y":
self.is_model_config_valid = True
return
# reset config if invalid
self.sim_config_params = []
self.sim_inputs = []
self.sim_outputs = []
self.sim_other_vars = []
else:
# when no validation is selected, we assume the sim config is valid
self.is_model_config_valid = True
return
# ---------------------------------------------------------------------
# FMI CONFIG --> if model is invalid we look for attributes within the .fmi model definition
valid_config = self._extract_sim_config_from_fmi_std()
if valid_config:
# print model config for user reference: config_params, inputs, outputs
print(self._get_sim_config_str())
if user_validation:
# prompt user to manually validate model if selected
validation_asserted = input("Is this configuration correct (y|n)? ")
if validation_asserted == "y":
self.is_model_config_valid = True
# dump YMAL file to reuse next time the model is loaded
self._dump_config_to_yaml_file()
return
else:
# when no validation is selected, we assume the sim config is valid
self.is_model_config_valid = True
# dump YMAL file to reuse next time the model is loaded
self._dump_config_to_yaml_file()
return
# Dump auxiliary YAML config file if user doesn't assert the provided set
# of config_params/inputs/outputs
self._dump_config_to_yaml_file(is_aux_yaml = True)
# If neither YAML nor FMI model is sufficient raise error
error_log = "MODEL DOES NOT HAVE THE CORRECT CONFIG DEFINED NEITHER ON YAML CONFIG FILE "
error_log += "NOR FMI MODEL DESCRIPTION. A YAML FILE HAS BEEN CREATED FOR YOU TO MODIFY. "
error_log += "THE SIM HAS BEEN FORCED TO EXIT, BUT FEEL FREE TO RERUN ONCE SET-UP IS COMPLETED."
raise Exception(error_log)
def _validate_sim_config(self):
"""Check if configuration file exists, otherwise indicate user to do so
Configuration contains sim config_params/inputs/outputs and naming
convention follows SIM_CONFIG_NAME_f --> e.g: "{modelname}_conf.yaml"
> E.g: filename == "cartpole.fmu"
--> config == "cartpole_conf.yaml"
"""
print("\n[FMU Validator] ---- Looking to see if YAML config file exists ----")
# use convention to search for config file
config_file = self.sim_config_filepath
if not os.path.isfile(config_file):
print("[FMU Validator] Configuration file for selected example was NOT found: {}".format(config_file))
return False
print("[FMU Validator] Sim config file for selected example was found: {}\n".format(config_file))
# Open and extract sim config from YAML file
with open(config_file, 'r') as file:
#data = yaml.dump(config_file, Loader=yaml.FullLoader)
simulation_config = yaml.load(file, Loader=yaml.FullLoader)
if 'simulation' not in simulation_config.keys():
print("[FMU Validator] Configuration file for selected example does not have a 'simulation' tag, thus it is omited.")
return False
# Extract sim configuration from dict
sim_config_params = simulation_config['simulation']['config_params']
sim_inputs = simulation_config['simulation']['inputs']
sim_outputs = simulation_config['simulation']['outputs']
sim_other_vars = simulation_config['simulation']['other_vars']
# Validate values extracted
if len(sim_inputs) == 0:
print("[FMU Validator] Sim config file has no sim-input states, and thus cannot be used\n")
elif len(sim_outputs) == 0:
print("[FMU Validator] Sim config file has no sim-output states, and thus cannot be used\n")
else:
# Store data extracted as attributes
self.sim_config_params = sim_config_params
self.sim_inputs = sim_inputs
self.sim_outputs = sim_outputs
self.sim_other_vars = sim_other_vars
return True
return False
def _extract_sim_config_from_fmi_std(self):
"""We use the fmi standard to extract the correct set of config_params, inputs, outputs
We look into the "causality" attribute for each variable in model description
> E.g: {var}.causality == "parameter" ==> sim config_params
{var}.causality == "input" ==> sim inputs
{var}.causality == "output" ==> sim outputs
"""
print("\n---- Looking to see if FMU model description contains required 'causality' type definitions ----")
sim_config_params = []
sim_inputs = []
sim_outputs = []
sim_other_vars = []
for variable in self.model_description.modelVariables:
# extract causality and append valu
causality = variable.causality
if causality == "parameter":
sim_config_params.append(variable.name)
elif causality == "input":
sim_inputs.append(variable.name)
elif causality == "output":
sim_outputs.append(variable.name)
else:
sim_other_vars.append(variable.name)
# Validate values extracted
if len(sim_inputs) == 0:
print("\n[FMU Validator] Sim FMU description file has no sim-input states, and thus cannot be used.")
elif len(sim_outputs) == 0:
print("\n[FMU Validator] Sim FMU description file has no sim-output states, and thus cannot be used.")
else:
# Store data extracted as attributes
self.sim_config_params = sim_config_params
self.sim_inputs = sim_inputs
self.sim_outputs = sim_outputs
self.sim_other_vars = sim_other_vars
return True
# Dump auxiliary YMAL file for user to review/edit
self._dump_config_to_yaml_file(sim_config_params,
sim_inputs,
sim_outputs,
sim_other_vars,
is_aux_yaml = True)
return False
def _dump_config_to_yaml_file(self,
sim_config_params = None,
sim_inputs = None,
sim_outputs = None,
sim_other_vars = None,
is_aux_yaml = False):
"""Dump sim's config_params, inputs, and outputs to YAML file
By default, we overwrite to main YAML config file.
sim_other_vars: str
If provided.
"""
if sim_config_params is None:
sim_config_params = self.sim_config_params
if sim_inputs is None:
sim_inputs = self.sim_inputs
if sim_outputs is None:
sim_outputs = self.sim_outputs
if sim_other_vars is None:
sim_other_vars = self.sim_other_vars
if not is_aux_yaml:
config_file = self.sim_config_filepath
else:
config_file = self.sim_config_filepath.replace(".yaml", "_EDIT.yaml")
# Prepare set of unused data ( to be shared with user for editing )
full_sim_config = {"config_params": sim_config_params,
"inputs": sim_inputs,
"outputs": sim_outputs,
"other_vars": sim_other_vars}
full_sim_data = {"simulation": full_sim_config}
# Dump configuration to YAML file for later reuse (or user editing if "is_aux_yaml==True")
with open(config_file, 'w') as file:
dump = yaml.dump(full_sim_data, sort_keys = False, default_flow_style=False)
file.write( dump )
# Raise error, and avoid continuing using model
log = "\n[FMU Validator] A YAML file with bonsai required fields, as well as available "
log += "sim variables, has been created at: \n --> '{}'\n".format(config_file)
| |
não especificadas - local não especificado'),
('W65.0', 'Afogamento e submersão durante banho em banheira - residência'),
('W65.1', 'Afogamento e submersão durante banho em banheira - habitação coletiva'),
('W65.2', 'Afogamento e submersão durante banho em banheira - escolas, outras instituições e áreas de administração pública'),
('W65.3', 'Afogamento e submersão durante banho em banheira - área para a prática de esportes e atletismo'),
('W65.4', 'Afogamento e submersão durante banho em banheira - rua e estrada'),
('W65.5', 'Afogamento e submersão durante banho em banheira - áreas de comércio e de serviços'),
('W65.6', 'Afogamento e submersão durante banho em banheira - áreas industriais e em construção'),
('W65.7', 'Afogamento e submersão durante banho em banheira - fazenda'),
('W65.8', 'Afogamento e submersão durante banho em banheira - outros locais especificados'),
('W65.9', 'Afogamento e submersão durante banho em banheira - local não especificado'),
('W66.0', 'Afogamento e submersão consecutiva a queda dentro de uma banheira - residência'),
('W66.1', 'Afogamento e submersão consecutiva a queda dentro de uma banheira - habitação coletiva'),
('W66.2', 'Afogamento e submersão consecutiva a queda dentro de uma banheira - escolas, outras instituições e áreas de administração pública'),
('W66.3', 'Afogamento e submersão consecutiva a queda dentro de uma banheira - área para a prática de esportes e atletismo'),
('W66.4', 'Afogamento e submersão consecutiva a queda dentro de uma banheira - rua e estrada'),
('W66.5', 'Afogamento e submersão consecutiva a queda dentro de uma banheira - áreas de comércio e de serviços'),
('W66.6', 'Afogamento e submersão consecutiva a queda dentro de uma banheira - áreas industriais e em construção'),
('W66.7', 'Afogamento e submersão consecutiva a queda dentro de uma banheira - fazenda'),
('W66.8', 'Afogamento e submersão consecutiva a queda dentro de uma banheira - outros locais especificados'),
('W66.9', 'Afogamento e submersão consecutiva a queda dentro de uma banheira - local não especificado'),
('W67.0', 'Afogamento e submersão em piscina - residência'),
('W67.1', 'Afogamento e submersão em piscina - habitação coletiva'),
('W67.2', 'Afogamento e submersão em piscina - escolas, outras instituições e áreas de administração pública'),
('W67.3', 'Afogamento e submersão em piscina - área para a prática de esportes e atletismo'),
('W67.4', 'Afogamento e submersão em piscina - rua e estrada'),
('W67.5', 'Afogamento e submersão em piscina - áreas de comércio e de serviços'),
('W67.6', 'Afogamento e submersão em piscina - áreas industriais e em construção'),
('W67.7', 'Afogamento e submersão em piscina - fazenda'),
('W67.8', 'Afogamento e submersão em piscina - outros locais especificados'),
('W67.9', 'Afogamento e submersão em piscina - local não especificado'),
('W68.0', 'Afogamento e submersão conseqüente a queda dentro de uma piscina - residência'),
('W68.1', 'Afogamento e submersão conseqüente a queda dentro de uma piscina - habitação coletiva'),
('W68.2', 'Afogamento e submersão conseqüente a queda dentro de uma piscina - escolas, outras instituições e áreas de administração pública'),
('W68.3', 'Afogamento e submersão conseqüente a queda dentro de uma piscina - área para a prática de esportes e atletismo'),
('W68.4', 'Afogamento e submersão conseqüente a queda dentro de uma piscina - rua e estrada'),
('W68.5', 'Afogamento e submersão conseqüente a queda dentro de uma piscina - áreas de comércio e de serviços'),
('W68.6', 'Afogamento e submersão conseqüente a queda dentro de uma piscina - áreas industriais e em construção'),
('W68.7', 'Afogamento e submersão conseqüente a queda dentro de uma piscina - fazenda'),
('W68.8', 'Afogamento e submersão conseqüente a queda dentro de uma piscina - outros locais especificados'),
('W68.9', 'Afogamento e submersão conseqüente a queda dentro de uma piscina - local não especificado'),
('W69.0', 'Afogamento e submersão em águas naturais - residência'),
('W69.1', 'Afogamento e submersão em águas naturais - habitação coletiva'),
('W69.2', 'Afogamento e submersão em águas naturais - escolas, outras instituições e áreas de administração pública'),
('W69.3', 'Afogamento e submersão em águas naturais - área para a prática de esportes e atletismo'),
('W69.4', 'Afogamento e submersão em águas naturais - rua e estrada'),
('W69.5', 'Afogamento e submersão em águas naturais - áreas de comércio e de serviços'),
('W69.6', 'Afogamento e submersão em águas naturais - áreas industriais e em construção'),
('W69.7', 'Afogamento e submersão em águas naturais - fazenda'),
('W69.8', 'Afogamento e submersão em águas naturais - outros locais especificados'),
('W69.9', 'Afogamento e submersão em águas naturais - local não especificado'),
('W70.0', 'Afogamento e submersão conseqüentes a queda dentro de águas naturais - residência'),
('W70.1', 'Afogamento e submersão conseqüentes a queda dentro de águas naturais - habitação coletiva'),
('W70.2', 'Afogamento e submersão conseqüentes a queda dentro de águas naturais - escolas, outras instituições e áreas de administração pública'),
('W70.3', 'Afogamento e submersão conseqüentes a queda dentro de águas naturais - área para a prática de esportes e atletismo'),
('W70.4', 'Afogamento e submersão conseqüentes a queda dentro de águas naturais - rua e estrada'),
('W70.5', 'Afogamento e submersão conseqüentes a queda dentro de águas naturais - áreas de comércio e de serviços'),
('W70.6', 'Afogamento e submersão conseqüentes a queda dentro de águas naturais - áreas industriais e em construção'),
('W70.7', 'Afogamento e submersão conseqüentes a queda dentro de águas naturais - fazenda'),
('W70.8', 'Afogamento e submersão conseqüentes a queda dentro de águas naturais - outros locais especificados'),
('W70.9', 'Afogamento e submersão conseqüentes a queda dentro de águas naturais - local não especificado'),
('W73.0', 'Outros afogamentos e submersão especificados - residência'),
('W73.1', 'Outros afogamentos e submersão especificados - habitação coletiva'),
('W73.2', 'Outros afogamentos e submersão especificados - escolas, outras instituições e áreas de administração pública'),
('W73.3', 'Outros afogamentos e submersão especificados - área para a prática de esportes e atletismo'),
('W73.4', 'Outros afogamentos e submersão especificados - rua e estrada'),
('W73.5', 'Outros afogamentos e submersão especificados - áreas de comércio e de serviços'),
('W73.6', 'Outros afogamentos e submersão especificados - áreas industriais e em construção'),
('W73.7', 'Outros afogamentos e submersão especificados - fazenda'),
('W73.8', 'Outros afogamentos e submersão especificados - outros locais especificados'),
('W73.9', 'Outros afogamentos e submersão especificados - local não especificado'),
('W74.0', 'Afogamento e submersão não especificados - residência'),
('W74.1', 'Afogamento e submersão não especificados - habitação coletiva'),
('W74.2', 'Afogamento e submersão não especificados - escolas, outras instituições e áreas de administração pública'),
('W74.3', 'Afogamento e submersão não especificados - área para a prática de esportes e atletismo'),
('W74.4', 'Afogamento e submersão não especificados - rua e estrada'),
('W74.5', 'Afogamento e submersão não especificados - áreas de comércio e de serviços'),
('W74.6', 'Afogamento e submersão não especificados - áreas industriais e em construção'),
('W74.7', 'Afogamento e submersão não especificados - fazenda'),
('W74.8', 'Afogamento e submersão não especificados - outros locais especificados'),
('W74.9', 'Afogamento e submersão não especificados - local não especificado'),
('W75.0', 'Sufocação e estrangulamento acidental na cama - residência'),
('W75.1', 'Sufocação e estrangulamento acidental na cama - habitação coletiva'),
('W75.2', 'Sufocação e estrangulamento acidental na cama - escolas, outras instituições e áreas de administração pública'),
('W75.3', 'Sufocação e estrangulamento acidental na cama - área para a prática de esportes e atletismo'),
('W75.4', 'Sufocação e estrangulamento acidental na cama - rua e estrada'),
('W75.5', 'Sufocação e estrangulamento acidental na cama - áreas de comércio e de serviços'),
('W75.6', 'Sufocação e estrangulamento acidental na cama - áreas industriais e em construção'),
('W75.7', 'Sufocação e estrangulamento acidental na cama - fazenda'),
('W75.8', 'Sufocação e estrangulamento acidental na cama - outros locais especificados'),
('W75.9', 'Sufocação e estrangulamento acidental na cama - local não especificado'),
('W76.0', 'Outro enforcamento e estrangulamento acidental - residência'),
('W76.1', 'Outro enforcamento e estrangulamento acidental - habitação coletiva'),
('W76.2', 'Outro enforcamento e estrangulamento acidental - escolas, outras instituições e áreas de administração pública'),
('W76.3', 'Outro enforcamento e estrangulamento acidental - área para a prática de esportes e atletismo'),
('W76.4', 'Outro enforcamento e estrangulamento acidental - rua e estrada'),
('W76.5', 'Outro enforcamento e estrangulamento acidental - áreas de comércio e de serviços'),
('W76.6', 'Outro enforcamento e estrangulamento acidental - áreas industriais e em construção'),
('W76.7', 'Outro enforcamento e estrangulamento acidental - fazenda'),
('W76.8', 'Outro enforcamento e estrangulamento acidental - outros locais especificados'),
('W76.9', 'Outro enforcamento e estrangulamento acidental - local não especificado'),
('W77.0', 'Risco a respiração devido a desmoronamento, queda de terra e de outras substâncias - residência'),
('W77.1', 'Risco a respiração devido a desmoronamento, queda de terra e de outras substâncias - habitação coletiva'),
('W77.2', 'Risco a respiração devido a desmoronamento, queda de terra e de outras substâncias - escolas, outras instituições e áreas de administração pública'),
('W77.3', 'Risco a respiração devido a desmoronamento, queda de terra e de outras substâncias - área para a prática de esportes e atletismo'),
('W77.4', 'Risco a respiração devido a desmoronamento, queda de terra | |
<filename>ZCU111/packages/xsdfec/pkg/xsdfec/__init__.py
# Copyright (c) 2019, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cffi
import os
import pynq
import warnings
import parsec as ps
import weakref
from wurlitzer import sys_pipes
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, Xilinx"
__email__ = "<EMAIL>"
# The HWH file includes a lot of information about all of the available code
# parameters. This includes nested lists, etc. so we use [parser
# combinators](https://en.wikipedia.org/wiki/Parsec_(parser)) to keep this
# managable.
_whitespace = ps.regex(r'\s*')
_lexeme = lambda p: p << _whitespace
_lbrace = _lexeme(ps.string('{'))
_rbrace = _lexeme(ps.string('}'))
_separator = _lexeme(ps.regex(r'[ ,]'))
_name = _lexeme(ps.regex(r'[\w]+'))
_num_hex = _lexeme(ps.regex(r'0x[0-9a-fA-F]+')).parsecmap(lambda h: int(h, base=16))
_num_int = _lexeme(ps.regex(r'-?(0|[1-9][0-9]*)([.][0-9]+)?([eE][+-]?[0-9]+)?')).parsecmap(int)
_num_float = _lexeme(ps.regex(r'-?(0|[1-9][0-9]*)([.][0-9]+)?([eE][+-]?[0-9]+)?')).parsecmap(float)
_list_of = lambda elems: _lbrace >> ps.many(elems) << _rbrace
_sep_list_of = lambda elems: _lbrace >> ps.sepBy(elems, _separator) << _rbrace
_param_value = _num_int | _list_of(_num_int)
@ps.generate
def _ldpc_key_value():
key = yield _name
val = yield _param_value
return (key, val)
@ps.generate
def _ldpc_param():
param_name = yield _name
elems = yield _list_of(_ldpc_key_value)
return (param_name, dict(elems))
@ps.generate
def _ldpc_param_table():
params = yield ps.many(_ldpc_param)
return dict(params)
# To round up the HWH parsing, let's define the name, C type, and parser
# combinator for each field we're interested in.
_config = [
('Standard' , 'DRV_STANDARD' , _num_int ),
('Initialization', 'DRV_INITIALIZATION_PARAMS', _sep_list_of(_num_hex)),
]
_code_params = [
('ldpc', 'DRV_LDPC_PARAMS', _ldpc_param_table),
# TODO Add Turbo code params
]
def _set_params(obj, params, config, *args):
for c in config:
setattr(obj, c[0], c[2].parse(params[c[1].format(*args)]))
def populate_params(obj, params):
"""Populates a given SdFec instance with parameters from an HWH file.
Parameters include...
+ Basic IP config settings (XSdFec_Config struct)
+ LDPC parameter table (a dict of named XSdFecLdpcParameters)
obj: An instance of SdFec
params: Dict based on the HWH file snippet
"""
obj._config = _ffi.new('XSdFec_Config*')
obj._code_params = type('', (), {})
_set_params(obj._config, params, _config)
_set_params(obj._code_params, params, _code_params)
# Now we can build python structs from the HWH parameters, but we need to
# convert the types and field names to match the `XSdFecLdpcParameters`
# C struct. Be very careful about garbage collection here! If we do not store a
# reference to the inner C arrays, they will be garbage collected and make the
# LDPC codes incorrect! We solve this with a weakref dict - see https://cffi.readthedocs.io/en/latest/using.html#working-with-pointers-structures-and-arrays
# for details about the CFFI ownership model.
_c_array_weakkeydict = weakref.WeakKeyDictionary()
def _pack_ldpc_param(param_dict : dict) -> any:
"""Returns a cdata XSdFecLdpcParameters version of the given dict"""
key_lookup = {
'k': 'K',
'n': 'N',
'p': 'PSize',
'nlayers': 'NLayers',
'nqc': 'NQC',
'nmqc': 'NMQC',
'nm': 'NM',
'norm_type': 'NormType',
'no_packing': 'NoPacking',
'special_qc': 'SpecialQC',
'no_final_parity': 'NoFinalParity',
'max_schedule': 'MaxSchedule',
'sc_table': 'SCTable',
'la_table': 'LATable',
'qc_table': 'QCTable',
}
# Flush non-struct keys
sub_dict = {key_lookup[key]: param_dict[key] for key in param_dict
if key in key_lookup.keys()}
# Pack tables as C arrays
def to_c_array(lst):
# Convert scalars to singleton lists
if not isinstance(lst, list):
lst = [lst]
# Copy to C array
c_arr = _ffi.new('u32[]', len(lst))
for i, x in enumerate(lst):
c_arr[i] = x
return c_arr
for table_key in filter(lambda k: k.endswith('Table'), sub_dict.keys()):
sub_dict[table_key] = to_c_array(sub_dict[table_key])
c_struct = _pack_value('XSdFecLdpcParameters', sub_dict)
_c_array_weakkeydict[c_struct] = [sub_dict[table_key]
for table_key in filter(lambda k: k.endswith('Table'), sub_dict.keys())
]
return c_struct
# Let's load the compiled `.so` version of the driver and define some helper
# functions to marshall data to/from the driver. We want a couple of things
# here:
# 1. A wrapper to call C functions with C-style arguments
# 2. A pair of functions to wrap and unwrap python objects as C-style values
# Read in C function declarations
_THIS_DIR = os.path.dirname(__file__)
with open(os.path.join(_THIS_DIR, 'xsdfec_functions.c'), 'r') as f:
_header_text = f.read()
_ffi = cffi.FFI()
_ffi.cdef(_header_text)
_lib = _ffi.dlopen(os.path.join(_THIS_DIR, 'libxsdfec.so'))
def _safe_wrapper(name: str, *args, check_return: bool=True, **kwargs) -> any:
"""Wrapper to call C functions, checking if they exist and their return status.
name: C function name
check_return: Flag to treat return value as a status (non-zero is failure)
return: C function return value
"""
with sys_pipes():
if not hasattr(_lib, name):
raise RuntimeError(f"Function {name} not in library")
ret = getattr(_lib, name)(*args, **kwargs)
if check_return and ret:
raise RuntimeError(f"Function {name} call failed")
return ret
def _pack_value(typename: str, value: any) -> any:
"""Pack a python object as a given C representation
typename: Name of the C type (we can use type introspection
to detect the Python type)
value: Python object to pack
return: C value
"""
if isinstance(value, dict):
c_value = _ffi.new(f"{typename}*")
for k, v in value.items():
setattr(c_value, k, v)
value = c_value
return value
def _unpack_value(typename: str, value: any) -> any:
"""Unpack given C data to a Python representation
typename: Name of the C type (we can use type introspection
to detect the Python type)
value: C value to unpack
return: Python object
"""
if dir(value):
return dict({k: getattr(value, k) for k in dir(value)})
else:
return value[0]
# With these helpers in place, we can start defining the SD FEC driver itself.
#
# For initialisation, we parse parameters from the HWH file to populate an
# `XSdFec_Config` struct and pass this to the `XSdFecCfgInitialize` function.
# We also parse code parameter tables from the HWH file and keep them for
# later.
#
# We add in wrapper functions for each of the four main API calls:
# * `XSdFecSetTurboParams(InstancePtr, ParamsPtr)`
# * `XSdFecAddLdpcParams(InstancePtr, CodeId, SCOffset, LAOffset, QCOffset, ParamsPtr)`
# * `XSdFecShareTableSize(ParamsPtr, SCSizePtr, LASizePtr, QCSizePtr)`
# * `XSdFecInterruptClassifier(InstancePtr)`
class SdFec(pynq.DefaultIP):
"""SD FEC driver"""
bindto = ["xilinx.com:ip:sd_fec:1.1"]
def __init__(self, description : dict):
"""Make an SD FEC instance as described by a HWH file snippet"""
super().__init__(description)
if 'parameters' in description:
populate_params(self, description['parameters'])
else:
warnings.warn("Please use an hwh file with the SD-FEC driver"
" - the default configuration is being used")
self._config = _lib.XSdFecLookupConfig(0)
# TODO consider how we should set default LDPC and Turbo code params
self._instance = _ffi.new("XSdFec*")
self._config.BaseAddress = self.mmio.array.ctypes.data
_lib.XSdFecCfgInitialize(self._instance, self._config)
def _call_function(self, name: str, *args, **kwargs) -> any:
"""Helper function to call CFFI functions
name: C function name (without "XSdFec" prefix)
"""
return _safe_wrapper(f"XSdFec{name}", self._instance, *args, **kwargs)
def available_ldpc_params(self) -> list:
"""List the available LDPC code names"""
return list(self._code_params.ldpc.keys())
def add_ldpc_params(self, code_id: int, sc_offset: int, la_offset: int, qc_offset: int, ldpc_param_name: str) -> None:
"""Add a named LDPC code at the given table offsets
code_id: Integer ID for new code
sc_offset: Offset into SC table for new code
la_offset: Offset into LA table for new code
qc_offset: Offset into QC table for new code
ldpc_param_name: Name of LDPC code to add (see available_ldpc_params() for valid options)
"""
ldpc_c_param = _pack_ldpc_param(self._code_params.ldpc[ldpc_param_name])
self._call_function('AddLdpcParams', code_id, sc_offset, la_offset, qc_offset,
ldpc_c_param)
def set_turbo_params(self, turbo_params: dict) -> None:
"""Stub for setting Turbo code parameters"""
# TODO
pass
def share_table_size(self, ldpc_param_name: str) -> tuple:
"""Helper function to get table sizes of a given LDPC code
Useful for calculating table offsets when adding new LDPC codes.
ldpc_param_name: Name of LDPC code (see available_ldpc_params() for valid options)
return: Dict with SC, LA, and QC table sizes
"""
sc_size, la_size, qc_size = (_ffi.new('u32*'), _ffi.new('u32*'), _ffi.new('u32*'))
| |
# dtrradarsim.py - NIST DTR Radar Target Simulator
#
# By <NAME> III
# Start Date: February 28, 2017
# Completed: March 11, 2017 (update on March 22, 2017)
# This program is used run simulations of vehicles using a graphical user interface (GUI) for testing handheld speed cameras (radar guns) for accuracy readings.
# The program simulates vehicles by creating sine (in reality, cosine) waves using numpy arrays by taking data inputted by the user and other standards built in.
# The program is also built partway like a module, as it can be called in other programs due to object-oriented programming. Most everything is done within objects,
# except for a few functions and one global constant (for the background color of the GUI).
# The original code and implementation created by <NAME> in MATLAB (2017).
# This code was implemented for portability (creation of executables and installer for easy distribution). This program also implements an 'advanced' window, allowing
# the user to choose multiple vehicles during the simulation and to specify the amplitude of each (which was not present in the original MATLAB code).
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------#
# NOTE ON ADVANCED WINDOW: while the default number of vehicles is three for the advanced window, and the only number used in this program, the code is set up to #
# allow any number of vehicle simulations with the advanced window. Of course, very high numbers are most definitely improbable and not #
# that useful for this application. #
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------#
#-----Import needed modules and define global constants------#
#Import numpy and sounddevice for calulating the sine waves and playing them (respectively).
import numpy, sounddevice
#Import the OcempGUI gui modules
from ocempgui.widgets import * #For the GUI widgets
from ocempgui.widgets.Constants import * #For GUI constants
GUI_COLOR = (234, 228, 223) #The background color of the GUI
#----------Start of Class Definitions----------#
#--------------------------------------------------------------------------------------------------------------------------------------------------------------#
# This class is used to create a simple window, which is used to enter input about a single vehicle - its speed, which way it is going relative to the camera, #
# and for how long. #
#--------------------------------------------------------------------------------------------------------------------------------------------------------------#
class SimpleWindow(object):
""" A simple gui window, that allows the user to specify information regarding a single vehicle moving either towards or away from the speed camera. """
def init(self, is_metric = False):
""" This function is used to set up the window (initiate the object and instantiate all its attributes). This is used instead of a __init__ method since
it is needed to be called multiple times - every time the window is added again to the main window's frame.
The 'is_metric' optional parameter is used to change between English and Metric units. For the window, this just means switching between miles per hour and
kilometers per hour for the speed unit Label. If 'is_metric' is True, 'kph' is put as the text instead of 'mph'. The actual data conversion is done when creating
the sine wave. """
self.speed_lbl = Label("Speed") #A label for the speed entry
self.speed_entry = Entry() #The entry (text field) where the user inputs the speed of the simulated vehicle
self.s_unit_lbl = Label("mph") #A label that details the units of measurement for the speed
#Now, if 'is_metric' is True, then the units should be metric, so change the text of the unit label to kilometers per hour
if is_metric:
self.s_unit_lbl.text = "kph"
self.dur_lbl = Label("Duration") #A label for the duration entry
self.dur_entry = Entry() #The entry (text field) where the user inputs the duration for the simulated vehicle
self.d_unit_lbl = Label("sec") #A label that details the units of measurement for the duration
#Here the two radio buttons that allow the user to choose whether the simulation is approaching or receding
self.appro_rad = RadioButton("Approaching")
self.recede_rad = RadioButton("Receding", self.appro_rad) #Add in the 'appro_rad' radio button to group the two together
# Set up the tables for holding the widgets
self.entry_table = Table(2, 3) #This table is to hold the text entries and associated labels
#Add the associated widgets to the table (those for the text entries)
self.entry_table.add_child(0, 0, self.speed_lbl)
self.entry_table.add_child(0, 1, self.speed_entry)
self.entry_table.add_child(0, 2, self.s_unit_lbl)
self.entry_table.add_child(1, 0, self.dur_lbl)
self.entry_table.add_child(1, 1, self.dur_entry)
self.entry_table.add_child(1, 2, self.d_unit_lbl)
self.rad_table = Table(2, 1) #This table is to hold the radio buttons
self.rad_table.add_child(0, 0, self.appro_rad)
self.rad_table.add_child(1, 0, self.recede_rad)
self.main_table = Table(2, 1) #This is the main table for the window, which holds all the other tables.
self.main_table.add_child(0, 0, self.entry_table) #Add the entry table first
self.main_table.add_child(1, 0, self.rad_table)
#--------------------------------------------------------------------------------------------------------#
# This method is used to switch the text for the speed button from miles per hour to kilometers per hour #
#--------------------------------------------------------------------------------------------------------#
def switch_units(self, is_metric):
""" This method switches the text for the speed button - between English and Metric. The passed parameter 'is_metric' determines which it should
be switched to. """
if is_metric: #If the units should change to metric, put them in kilometers per hour
self.s_unit_lbl.text = 'kph'
else:
self.s_unit_lbl.text = 'mph'
#-------------------------------------------------------------------------#
# Here are the method that return data from widgets (entered by the user) #
#-------------------------------------------------------------------------#
def get_speed(self):
""" This method returns the text within the speed Entry widget. """
return self.speed_entry.text
def get_duration(self):
""" This method returns the text within the duration Entry widget. """
return self.dur_entry.text
def get_direct(self):
""" This method returns the direction that the vehicle is moving. Specifically, it returns True if it is approaching and False if it is receding. If neither
is active, None is returned."""
if self.appro_rad.active:
return True #To denote the vehicle is approaching the radar
elif self.recede_rad.active:
return False #To denote the vehicle is receding from the radar
else: #If neither radio button has been selected, return None
return None
#---------------------------------------------------------------------------------------------------------------------------------------------------#
# This is the method that returns the reference to the 'main_table' attribute. It is used to add the widgets of the simple window to the interface. #
#---------------------------------------------------------------------------------------------------------------------------------------------------#
def get_main_table(self):
""" This method returns a reference to the 'main_table' Table attribute. """
return self.main_table
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------#
# This class is used to create an advanced window, where data for multiple vehicles as well as extra data not asked for in the simple window. It takes a specific #
# number that details how many vehicles are to be used during that simulation, and sets up the widgets accordingly. The variating widgets are put into lists for #
# easy addition - that is, lists are used to hold the widgets that are associated with the vehicle data, since it is easy to add as many as needed no matter #
# what the passed number is. #
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------#
class AdvancedWindow(object):
""" A more advanced gui window, which allows the user to select multiple simulated vehicles and can input the amplitude. """
def init(self, num_vehicles = 3, is_metric = False):
""" This function is used to set up the window (initiate the object and instantiate all its attributes). This is used instead of a __init__ method since
it is needed to be called multiple times - every time the window is added again to the main window's frame.
The 'num_vehicles' optional parameter is used to detail how many vehicles (and thusly how many widgets) are in the simulation. For this application, the number will
always at most be three.
The 'is_metric' optional parameter is used to change between English and Metric units. For the window, this just means switching between miles per hour and
kilometers per hour for the speed unit Label. If 'is_metric' is True, 'kph' is put as the text instead of 'mph'. The actual data conversion is done when creating
the sine wave. """
self.dur_lbl = Label("Duration") #A label for the duration entry
self.dur_entry = Entry() #The entry (text field) where the user inputs the duration for the simulated vehicle
self.d_unit_lbl = Label("sec") #A label that details the units of measurement for the duration
#Set up the table for hold the widgets dealing with the duration.
self.dur_table = Table(1, 3)
self.dur_table.add_child(0, 0, self.dur_lbl)
self.dur_table.add_child(0, 1, self.dur_entry)
self.dur_table.add_child(0, 2, self.d_unit_lbl)
#Set up the labels for the different information that needs to | |
#!/usr/bin/env python
# coding: utf-8
# # Adding a New Dataset
#
# There exist two different options to use a different dataset within the `neuralHydrology` library.
#
# 1. Preprocess your data to use the `GenericDataset` in `neuralhydrology.datasetzoo.genericdataset`.
# 2. Implement a new dataset class, inheriting from `BaseDataset` in `neuralhydrology.datasetzoo.basedataset`.
#
# Using the `GenericDataset` is recommended and does not require you to add/change a single line of code, while writing a new dataset gives you more freedom to do whatever you want.
#
# ## Using the GenericDataset
#
# With the release of version 0.9.6-beta, we added a `GenericDataset`. This class can be used with any data, as long as the data is preprocessed in the following way:
#
# - The data directory (config argument `data_dir`) must contain a folder 'time_series' and (if static attributes are used) a folder 'attributes'.
# - The folder 'time_series' contains one netcdf file (.nc or .nc4) per basin, named '<basin_id\>.nc/nc4'. The netcdf file has to have one coordinate called `date`, containing the datetime index.
# - The folder 'attributes' contains one or more comma-separated file (.csv) with static attributes, indexed by basin id. Attributes files can be divided into groups of basins or groups of features (but not both).
#
# If you prepare your data set following these guidelines, you can simply set the config argument `dataset` to `generic` and set the `data_dir` to the path of your preprocessed data directory.
#
# ## Adding a Dataset Class
#
# The rest of this tutorial will show you how to add a new dataset class to the `neuralhydrology.datasetzoo`. As an example, we will use the [CAMELS-CL](https://hess.copernicus.org/articles/22/5817/2018/) dataset.
# In[3]:
import sys
from pathlib import Path
from typing import List, Dict, Union
import numpy as np
import pandas as pd
import xarray
from tqdm.notebook import tqdm
from neuralhydrology.datasetzoo.basedataset import BaseDataset
from neuralhydrology.utils.config import Config
# ### Template
# Every dataset has its own file in `neuralhydrology.datasetzoo` and follows a common template. The template can be found [here](https://github.com/neuralhydrology/neuralhydrology/blob/master/neuralhydrology/datasetzoo/template.py).
#
# The most important points are:
# - All dataset classes have to inherit from `BaseDataset` implemented in `neuralhydrology.datasetzoo.basedataset`.
# - All dataset classes have to accept the same inputs upon initialization (see below)
# - Within each dataset class, you have to implement two methods:
# - `_load_basin_data()`: This method loads the time series data for a single basin of the dataset (e.g. meteorological forcing data and streamflow) into a time-indexed pd.DataFrame.
# - `_load_attributes()`: This method loads the catchment attributes for all basins in the dataset and returns a basin-indexed pd.DataFrame with attributes as columns.
#
#
# `BaseDataset` is a map-style [Pytorch Dataset](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset) that implements the core logic for all data sets. It takes care of multiple temporal resolutions, data fusion, normalizations, sanity checks, etc. and also implements the required methods `__len__` (returns number of total training samples) and `__getitem__` (returns a single training sample for a given index), which PyTorch data loaders use, e.g., to create mini-batches for training. However, all of this is not important if you just want to add another dataset to the `neuralHydrology` library.
#
# ### Preprocessing CAMELS-CL
# Because the CAMELS-CL dataset comes in a rather unusual file structure, we added a function to create per-basin csv files with all timeseries features. You can find the function `preprocess_camels_cl_dataset` in `neuralhydrology.datasetzoo.camelscl`, which will create a subfolder called `preprocessed` containing the per-basin files. For the remainder of this tutorial, we assume that this folder and the per-basin csv files exist.
#
# ### Class skeleton
# For the sake of this tutorial, we will omit doc-strings. However, when adding your dataset class we highly encourage you to add extensive doc-strings, as we did for all dataset classes in this package. We use [Python type annotations](https://docs.python.org/3/library/typing.html) everywhere, which facilitates code development with any modern IDE as well as makes it easier to understand what is happening inside a function or class.
#
# The class skeleton looks like this:
# In[7]:
class CamelsCL(BaseDataset):
def __init__(self,
cfg: Config,
is_train: bool,
period: str,
basin: str = None,
additional_features: List[Dict[str, pd.DataFrame]] = [],
id_to_int: Dict[str, int] = {},
scaler: Dict[str, Union[pd.Series, xarray.DataArray]] = {}):
# Initialize `BaseDataset` class
super(CamelsCL, self).__init__(cfg=cfg,
is_train=is_train,
period=period,
basin=basin,
additional_features=additional_features,
id_to_int=id_to_int,
scaler=scaler)
def _load_basin_data(self, basin: str) -> pd.DataFrame:
"""Load timeseries data of one specific basin"""
raise NotImplementedError
def _load_attributes(self) -> pd.DataFrame:
"""Load catchment attributes"""
raise NotImplementedError
# ### Data loading functions
#
# For all datasets, we implemented the actual data loading (e.g., from the txt or csv files) in separate functions outside of the class so that these functions are usable everywhere. This is useful for example when you want to inspect or visualize the discharge of a particular basin or do anything else with the basin data. These functions are implemented within the same file (since they are specific to each data set) and we use those functions from within the class methods.
#
# So let's start by implementing a function that reads a single basin file of time series data for a given basin identifier.
# In[5]:
def load_camels_cl_timeseries(data_dir: Path, basin: str) -> pd.DataFrame:
preprocessed_dir = data_dir / "preprocessed"
# make sure the CAMELS-CL data was already preprocessed and per-basin files exist.
if not preprocessed_dir.is_dir():
msg = [
f"No preprocessed data directory found at {preprocessed_dir}. Use preprocessed_camels_cl_dataset ",
"in neuralhydrology.datasetzoo.camelscl to preprocess the CAMELS CL data set once into ",
"per-basin files."
]
raise FileNotFoundError("".join(msg))
# load the data for the specific basin into a time-indexed dataframe
basin_file = preprocessed_dir / f"{basin}.csv"
df = pd.read_csv(basin_file, index_col='date', parse_dates=['date'])
return df
# Most of this should be easy to follow. First we check that the data was already preprocessed and if it wasn't, we throw an appropriate error message. Then we proceed to load the data into a pd.DataFrame and we make sure that the index is converted into a datetime format.
#
# Next, we need a function to load the attributes, which are stored in a file called `1_CAMELScl_attributes.txt`. We assume that this file exist in the root directory of the dataset (such information is useful to add to the docstring!). The dataframe that this function has to return must be basin-indexed with attributes as columns. Furthermore, we accept an optional argument `basins`, which is a list of strings. This list can specify basins of interest and if passed, we only return the attributes for said basins.
# In[6]:
def load_camels_cl_attributes(data_dir: Path, basins: List[str] = []) -> pd.DataFrame:
# load attributes into basin-indexed dataframe
attributes_file = data_dir / '1_CAMELScl_attributes.txt'
df = pd.read_csv(attributes_file, sep="\t", index_col="gauge_id").transpose()
# convert all columns, where possible, to numeric
df = df.apply(pd.to_numeric, errors='ignore')
# convert the two columns specifying record period start and end to datetime format
df["record_period_start"] = pd.to_datetime(df["record_period_start"])
df["record_period_end"] = pd.to_datetime(df["record_period_end"])
if basins:
if any(b not in df.index for b in basins):
raise ValueError('Some basins are missing static attributes.')
df = df.loc[basins]
return df
# ### Putting everything together
#
# Now we have all required pieces and can finish the dataset class. Notice that we have access to all class attributes from the parent class in all methods (such as the config, which is stored in `self.cfg`). In the `_load_attributes` method, we simply defer to the attribute loading function we implemented above. The BaseDataset will take care of removing all attributes that are not specified as input features. It will also check for missing attributes in the `BaseDataset`, so you don't have to take care of this here.
# In[8]:
class CamelsCL(BaseDataset):
def __init__(self,
cfg: Config,
is_train: bool,
period: str,
basin: str = None,
additional_features: List[Dict[str, pd.DataFrame]] = [],
id_to_int: Dict[str, int] = {},
scaler: Dict[str, Union[pd.Series, xarray.DataArray]] = {}):
# Initialize `BaseDataset` class
super(CamelsCL, self).__init__(cfg=cfg,
is_train=is_train,
period=period,
basin=basin,
additional_features=additional_features,
id_to_int=id_to_int,
scaler=scaler)
def _load_basin_data(self, basin: str) -> pd.DataFrame:
"""Load timeseries data of one specific basin"""
return load_camels_cl_timeseries(data_dir=self.cfg.data_dir, basin=basin)
def _load_attributes(self) -> pd.DataFrame:
"""Load catchment attributes"""
return load_camels_cl_attributes(self.cfg.data_dir, basins=self.basins)
# ### Integrating the dataset class into neuralHydrology
#
# With these | |
<reponame>to-aoki/my-pytorch-bert
# coding=utf-8
#
# Author <NAME>
# This file is based on
# https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/examples/run_lm_finetuning.py.
# This uses the part of BERTDataset.
#
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""for BERT pre-training dataset."""
import os
import itertools
import gzip
from random import random, randint, shuffle, randrange
import numpy as np
import torch
from torch.utils.data import RandomSampler
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from tqdm import tqdm
from mptb.utils import truncate_seq_pair, get_tokenizer
import copy
import pickle
class StackedSentenceDataset(Dataset):
def __init__(self, tokenizer, max_pos, dataset_path=None, documents=[], encoding='utf-8',
sentence_stack=True, pickle_path=None, max_words_length=4, is_sop=False, lazy=False):
self.tokenizer = tokenizer
self.max_pos = max_pos
self.dataset_path = dataset_path
self.encoding = encoding
# BERT reserved tokens
bert_unk_id = self.tokenizer.convert_tokens_to_ids(["[UNK]"])[0]
self.pad_id = self.tokenizer.convert_tokens_to_ids(["[PAD]"])[0]
if self.pad_id == bert_unk_id:
pad_id = self.tokenizer.convert_tokens_to_ids(["<pad>"])[0]
if pad_id != bert_unk_id:
# sentencepiece
self.pad_id = pad_id
else:
raise ValueError('[PAD] is not defined')
self.cls_id = self.tokenizer.convert_tokens_to_ids(["[CLS]"])[0]
self.sep_id = self.tokenizer.convert_tokens_to_ids(["[SEP]"])[0]
self.mask_id = self.tokenizer.convert_tokens_to_ids(["[MASK]"])[0]
self.sentence_stack = sentence_stack
self.max_words_length = max_words_length
self.all_documents = []
self.bert_ids_num = 3 if is_sop else 2
self.is_sop = is_sop
self.lazy = lazy
if pickle_path is not None:
if pickle_path.endswith("gz"):
with gzip.open(pickle_path, "rb") as reader:
self.all_documents = pickle.load(reader)
else:
with open(pickle_path, "rb") as reader:
self.all_documents = pickle.load(reader)
self.indices = list(range(len(self.all_documents)))
shuffle(self.indices)
self.last_indices_path = pickle_path + '.index'
if os.path.exists(self.last_indices_path):
with open(self.last_indices_path, "rb") as index_reader:
last_indices = pickle.load(index_reader)
if isinstance(last_indices, list):
self.indices = list(dict.fromkeys(last_indices + self.indices))
print('Last indices loaded : ' + str(len(last_indices)))
print('Pickled text tensor loaded : ' + str(len(self.indices)))
return
if self.lazy:
self.indices = []
with open(dataset_path, "r", encoding=encoding) as reader:
for _ in tqdm(reader, desc="Loading Dataset"):
self.indices.append(1)
self.file = open(dataset_path, "rb")
self.last_indices_path = dataset_path + '.index'
self.read_counts = 0
self.buffer = None
if os.path.exists(self.last_indices_path):
with open(self.last_indices_path, "rb") as index_reader:
last_indices = pickle.load(index_reader)
if isinstance(last_indices, dict):
self.read_counts = last_indices.get('read_counts', 0)
self.buffer = last_indices.get('buffer', None)
print('Last indices loaded : ', self.read_counts)
if self.read_counts == len(self.indices):
self.file.close()
self.file = open(self.dataset_path, "r", encoding=self.encoding)
self.read_counts = 0
self.buffer = None
for i in range(self.read_counts):
self.file.__next__()
self.limit = 0
return
# load samples into memory
if len(documents) > 0 or dataset_path is not None:
stack = []
if len(documents) > 0:
for text in documents:
stack = self._load_text(text, stack)
else:
with open(dataset_path, "r", encoding=encoding) as reader:
for text in tqdm(reader, desc="Loading Dataset"):
stack = self._load_text(text, stack)
# FIX to last rows ""... . last line is not "" and EOF
if len(stack) > 0:
if self.is_sop:
ids = []
for t in stack:
ids.append(self.tokenizer.convert_tokens_to_ids(t))
self.all_documents.append(ids)
else:
self.all_documents.append(self.tokenizer.convert_tokens_to_ids(stack))
if len(self.all_documents) is 0:
raise ValueError(dataset_path + ' were not includes documents.')
self.indices = list(range(len(self.all_documents)))
def dump_last_indices(self, steps):
if hasattr(self, 'last_indices_path'):
if self.lazy:
# lazy mode
if self.buffer is not None:
lazy_dump = {'read_counts': self.read_counts, 'buffer': self.buffer}
else:
lazy_dump = {'read_counts': self.read_counts}
with open(self.last_indices_path, "wb") as f:
pickle.dump(lazy_dump, f)
else:
# pickled token indices loaded
last_indices = steps % len(self.indices)
with open(self.last_indices_path, "wb") as f:
pickle.dump(self.indices[last_indices:], f)
def dump_ids_documents(self, output_path, is_gzip=False):
if is_gzip:
with gzip.open(output_path + '.gz', 'wb') as f:
pickle.dump(self.all_documents, f)
else:
with open(output_path, 'wb+') as f:
pickle.dump(self.all_documents, f)
def _load_text(self, text, stack):
text = text.strip()
if text == '':
if len(stack) > 0:
if self.is_sop:
ids = []
for t in stack:
ids.append(self.tokenizer.convert_tokens_to_ids(t))
if len(ids) > 0:
self.all_documents.append(ids)
else:
self.all_documents.append(self.tokenizer.convert_tokens_to_ids(stack))
stack = []
else:
if self.is_sop:
tokens = self.tokenizer.tokenize(text)
token_len = 0
for x in stack:
token_len += len(x)
if token_len + len(tokens) > self.max_pos - self.bert_ids_num:
ids = []
for t in stack:
ids.append(self.tokenizer.convert_tokens_to_ids(t))
if len(ids) > 0:
self.all_documents.append(ids)
if len(tokens) > self.max_pos - self.bert_ids_num:
tokens = tokens[:self.max_pos - self.bert_ids_num]
self.all_documents.append(self.tokenizer.convert_tokens_to_ids(tokens))
stack = []
return stack
stack.append(tokens)
elif self.sentence_stack:
tokens = self.tokenizer.tokenize(text)
if len(stack) + len(tokens) > self.max_pos - self.bert_ids_num:
if len(stack) > 0:
self.all_documents.append(self.tokenizer.convert_tokens_to_ids(stack))
if len(tokens) > self.max_pos - self.bert_ids_num:
tokens = tokens[:self.max_pos - self.bert_ids_num]
self.all_documents.append(self.tokenizer.convert_tokens_to_ids(tokens))
stack = []
return stack
stack.extend(tokens)
else:
tokens = self.tokenizer.tokenize(text)
self.all_documents.append(self.tokenizer.convert_tokens_to_ids(tokens))
return stack
def _load_lazy_text(self):
ids = []
if self.buffer is not None:
if self.is_sop:
if len(self.buffer) > self.max_pos - self.bert_ids_num:
ids = [self.buffer[:self.max_pos - self.bert_ids_num]]
self.buffer = None
self.limit = 0
return ids
ids = [self.buffer]
else:
if len(self.buffer) > self.max_pos - self.bert_ids_num:
ids = self.buffer[:self.max_pos - self.bert_ids_num]
self.buffer = None
self.limit = 0
return ids
ids = self.buffer
self.buffer = None
while True:
if self.read_counts == len(self.indices):
self.file.close()
self.file = open(self.dataset_path, "r", encoding=self.encoding)
self.read_counts = 0
break
text = self.file.__next__().rstrip()
self.read_counts += 1
if hasattr(text, 'decode'):
try:
text = text.decode(self.encoding)
except:
# decode errors sometimes occur when using torch/xla (google colab)
text = ''
if text == '':
if len(ids) > 0:
break
else:
continue
tokens = self.tokenizer.tokenize(text)
if self.is_sop:
token_len = 0
for x in ids:
token_len += len(x)
if token_len + len(tokens) > self.max_pos - self.bert_ids_num:
self.buffer = self.tokenizer.convert_tokens_to_ids(tokens)
break
else:
ids.append(self.tokenizer.convert_tokens_to_ids(tokens))
else:
if len(ids) + len(tokens) > self.max_pos - self.bert_ids_num:
self.buffer = self.tokenizer.convert_tokens_to_ids(tokens)
break
else:
ids.extend(self.tokenizer.convert_tokens_to_ids(tokens))
if self.limit > 3:
raise ValueError('dataset empty strings continue.')
if len(ids) == 0:
self.limit += 1
return self._load_lazy_text()
self.limit = 0
return ids
def __len__(self):
return len(self.indices)
def __getitem__(self, item):
# transform sample to features
if self.lazy:
features = self.convert_example_to_features(self._load_lazy_text(), self.max_pos)
else:
features = self.convert_example_to_features(self.all_documents[self.indices[item]], self.max_pos)
return [torch.tensor(x, dtype=torch.long) for x in features]
def convert_example_to_features(
self, token_ids, max_pos, short_seq_prob=0.1, masked_lm_prob=0.15):
"""
Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with
IDs, LM labels, input_mask, CLS and SEP tokens etc.
:param token_ids: str, example tokens.
:param max_pos: int, maximum length of sequence.
:param short_seq_prob: float, Probability of creating sequences which are shorter than the maximum length.
:param masked_lm_prob: float, Masked LM probability.
:return: features
"""
target_max_pos = max_pos - self.bert_ids_num
if random() < short_seq_prob:
target_max_pos = randint(2, target_max_pos - 1)
if self.is_sop:
split_ids = copy.copy(token_ids)
if len(split_ids) > 2:
split_id = randint(1, len(split_ids) - 1)
tokens_a = []
for i in range(split_id):
tokens_a.append(split_ids[i])
tokens_b = []
for i in range(split_id, len(split_ids)):
tokens_b.append(split_ids[i])
else:
tokens_a = split_ids
tokens_b = []
if len(tokens_b) > 0 and random() < 0.5:
is_random_next = 1
temp = tokens_a
tokens_a = tokens_b
tokens_b = temp
else:
is_random_next = 0
tokens_a = list(itertools.chain.from_iterable(tokens_a))
tokens_b = list(itertools.chain.from_iterable(tokens_b))
truncate_seq_pair(tokens_a, tokens_b, target_max_pos)
# Add Special Tokens
tokens_a.insert(0, self.cls_id)
tokens_a.append(self.sep_id)
if len(tokens_b) > 0:
tokens_b.append(self.sep_id)
else:
tokens_b = []
lm_label_ids = [self.pad_id] * len(tokens_a + tokens_b)
bert_token_ids = copy.copy(tokens_a)
bert_token_ids.extend(copy.copy(tokens_b))
# Add next sentence segment
segment_ids = [0] * len(tokens_a) + [1] * len(tokens_b)
else:
label_ids = copy.copy(token_ids)
truncate_seq_pair(label_ids, [], target_max_pos)
# Add Special Tokens
label_ids = [self.cls_id] + label_ids + [self.sep_id]
lm_label_ids = [self.pad_id] * len(label_ids)
bert_token_ids = copy.copy(label_ids)
segment_ids = [0] * len(label_ids)
is_random_next = 0
# mask prediction calc
mask_prediction = round((len(bert_token_ids) - self.bert_ids_num) * masked_lm_prob)
mask_candidate_pos = [i for i, token in enumerate(
bert_token_ids) if token != self.cls_id and token != self.sep_id]
mask_length = np.random.geometric(0.4)
if mask_length > self.max_words_length:
mask_length = self.max_words_length
if mask_length > mask_prediction:
mask_length = mask_prediction
if mask_length > 0:
mask_candidate_words = np.array_split(mask_candidate_pos, int(len(mask_candidate_pos)/mask_length))
shuffle(mask_candidate_words)
masked = 0
# masked and random token
for words in mask_candidate_words:
for pos in words:
masked += 1
if random() < 0.8: # 80%
# masked
lm_label_ids[pos] = bert_token_ids[pos]
bert_token_ids[pos] = self.mask_id
elif random() < 0.5: # 10%
# random token
lm_label_ids[pos] = bert_token_ids[pos]
bert_token_ids[pos] = self.get_random_token_id()
else:
# 10% not mask and not modify
lm_label_ids[pos] = bert_token_ids[pos]
if masked == mask_prediction:
break
if masked == mask_prediction:
break
input_ids = bert_token_ids
# zero padding
num_zero_pad = self.max_pos - len(input_ids)
input_mask = [1] * len(input_ids)
if segment_ids:
segment_ids.extend([0] * num_zero_pad)
input_mask.extend([0] * num_zero_pad)
input_ids.extend([self.pad_id] * | |
delta_sync_golem__FPC9TCmdGolemiUc(struct TCmdGolem *pG, int pnum, unsigned char bLevel)")
del_items(0x80048A78)
SetType(0x80048A78, "void delta_leave_sync__FUc(unsigned char bLevel)")
del_items(0x80048CD0)
SetType(0x80048CD0, "void delta_sync_object__FiUcUc(int oi, unsigned char bCmd, unsigned char bLevel)")
del_items(0x80048D10)
SetType(0x80048D10, "unsigned char delta_get_item__FPC9TCmdGItemUc(struct TCmdGItem *pI, unsigned char bLevel)")
del_items(0x80048ED4)
SetType(0x80048ED4, "void delta_put_item__FPC9TCmdPItemiiUc(struct TCmdPItem *pI, int x, int y, unsigned char bLevel)")
del_items(0x8004903C)
SetType(0x8004903C, "unsigned char delta_portal_inited__Fi(int i)")
del_items(0x80049060)
SetType(0x80049060, "unsigned char delta_quest_inited__Fi(int i)")
del_items(0x80049084)
SetType(0x80049084, "void DeltaAddItem__Fi(int ii)")
del_items(0x800492C4)
SetType(0x800492C4, "int DeltaExportData__FPc(char *Dst)")
del_items(0x800492F4)
SetType(0x800492F4, "int DeltaImportData__FPc(char *Src)")
del_items(0x80049328)
SetType(0x80049328, "void DeltaSaveLevel__Fv()")
del_items(0x800493D0)
SetType(0x800493D0, "void NetSendCmd__FUcUc(unsigned char bHiPri, unsigned char bCmd)")
del_items(0x800493F8)
SetType(0x800493F8, "void NetSendCmdGolem__FUcUcUcUclUc(unsigned char mx, unsigned char my, unsigned char dir, unsigned char menemy, long hp, int cl)")
del_items(0x80049444)
SetType(0x80049444, "void NetSendCmdLoc__FUcUcUcUc(unsigned char bHiPri, unsigned char bCmd, unsigned char x, unsigned char y)")
del_items(0x80049474)
SetType(0x80049474, "void NetSendCmdLocParam1__FUcUcUcUcUs(unsigned char bHiPri, unsigned char bCmd, unsigned char x, unsigned char y, int wParam1)")
del_items(0x800494AC)
SetType(0x800494AC, "void NetSendCmdLocParam2__FUcUcUcUcUsUs(unsigned char bHiPri, unsigned char bCmd, unsigned char x, unsigned char y, int wParam1, int wParam2)")
del_items(0x800494EC)
SetType(0x800494EC, "void NetSendCmdLocParam3__FUcUcUcUcUsUsUs(unsigned char bHiPri, unsigned char bCmd, unsigned char x, unsigned char y, int wParam1, int wParam2, int wParam3)")
del_items(0x80049534)
SetType(0x80049534, "void NetSendCmdParam1__FUcUcUs(unsigned char bHiPri, unsigned char bCmd, unsigned short wParam1)")
del_items(0x80049560)
SetType(0x80049560, "void NetSendCmdParam2__FUcUcUsUs(unsigned char bHiPri, unsigned char bCmd, unsigned short wParam1, unsigned short wParam2)")
del_items(0x80049590)
SetType(0x80049590, "void NetSendCmdParam3__FUcUcUsUsUs(unsigned char bHiPri, unsigned char bCmd, unsigned short wParam1, unsigned short wParam2, int wParam3)")
del_items(0x800495C8)
SetType(0x800495C8, "void NetSendCmdQuest__FUcUc(unsigned char bHiPri, unsigned char q)")
del_items(0x8004963C)
SetType(0x8004963C, "void NetSendCmdGItem__FUcUcUcUcUc(unsigned char bHiPri, unsigned char bCmd, unsigned char mast, unsigned char pnum, int ii)")
del_items(0x80049770)
SetType(0x80049770, "void NetSendCmdGItem2__FUcUcUcUcPC9TCmdGItem(unsigned char usonly, unsigned char bCmd, unsigned char mast, unsigned char pnum, struct TCmdGItem *p)")
del_items(0x800497F4)
SetType(0x800497F4, "unsigned char NetSendCmdReq2__FUcUcUcPC9TCmdGItem(unsigned char bCmd, unsigned char mast, unsigned char pnum, struct TCmdGItem *p)")
del_items(0x80049854)
SetType(0x80049854, "void NetSendCmdExtra__FPC9TCmdGItem(struct TCmdGItem *p)")
del_items(0x800498C4)
SetType(0x800498C4, "void NetSendCmdPItem__FUcUcUcUc(unsigned char bHiPri, unsigned char bCmd, unsigned char x, unsigned char y)")
del_items(0x800499CC)
SetType(0x800499CC, "void NetSendCmdChItem__FUcUc(unsigned char bHiPri, unsigned char bLoc)")
del_items(0x80049A70)
SetType(0x80049A70, "void NetSendCmdDelItem__FUcUc(unsigned char bHiPri, unsigned char bLoc)")
del_items(0x80049AA0)
SetType(0x80049AA0, "void NetSendCmdDItem__FUci(unsigned char bHiPri, int ii)")
del_items(0x80049BB4)
SetType(0x80049BB4, "unsigned char i_own_level__Fi(int nReqLevel)")
del_items(0x80049BBC)
SetType(0x80049BBC, "void NetSendCmdDamage__FUcUcUl(unsigned char bHiPri, unsigned char bPlr, unsigned long dwDam)")
del_items(0x80049BF0)
SetType(0x80049BF0, "void delta_open_portal__FiUcUcUcUcUc(int pnum, unsigned char x, unsigned char y, unsigned char bLevel, int bLType, int bSetLvl)")
del_items(0x80049C4C)
SetType(0x80049C4C, "void delta_close_portal__Fi(int pnum)")
del_items(0x80049C8C)
SetType(0x80049C8C, "void check_update_plr__Fi(int pnum)")
del_items(0x80049C94)
SetType(0x80049C94, "unsigned long On_WALKXY__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049D4C)
SetType(0x80049D4C, "unsigned long On_ADDSTR__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049D7C)
SetType(0x80049D7C, "unsigned long On_ADDMAG__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049DAC)
SetType(0x80049DAC, "unsigned long On_ADDDEX__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049DDC)
SetType(0x80049DDC, "unsigned long On_ADDVIT__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049E0C)
SetType(0x80049E0C, "unsigned long On_SBSPELL__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049E80)
SetType(0x80049E80, "unsigned long On_GOTOGETITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049F28)
SetType(0x80049F28, "unsigned long On_REQUESTGITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A06C)
SetType(0x8004A06C, "unsigned long On_GETITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A244)
SetType(0x8004A244, "unsigned long On_GOTOAGETITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A2EC)
SetType(0x8004A2EC, "unsigned long On_REQUESTAGITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A424)
SetType(0x8004A424, "unsigned long On_AGETITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A5F4)
SetType(0x8004A5F4, "unsigned long On_ITEMEXTRA__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A694)
SetType(0x8004A694, "unsigned long On_PUTITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A820)
SetType(0x8004A820, "unsigned long On_SYNCPUTITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A964)
SetType(0x8004A964, "unsigned long On_RESPAWNITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004AA84)
SetType(0x8004AA84, "unsigned long On_SATTACKXY__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004AB34)
SetType(0x8004AB34, "unsigned long On_SPELLXYD__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004AC40)
SetType(0x8004AC40, "unsigned long On_SPELLXY__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004AD3C)
SetType(0x8004AD3C, "unsigned long On_TSPELLXY__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004AE3C)
SetType(0x8004AE3C, "unsigned long On_OPOBJXY__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004AF6C)
SetType(0x8004AF6C, "unsigned long On_DISARMXY__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B09C)
SetType(0x8004B09C, "unsigned long On_OPOBJT__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B108)
SetType(0x8004B108, "unsigned long On_ATTACKID__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B264)
SetType(0x8004B264, "unsigned long On_SPELLID__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B350)
SetType(0x8004B350, "unsigned long On_SPELLPID__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B434)
SetType(0x8004B434, "unsigned long On_TSPELLID__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B51C)
SetType(0x8004B51C, "unsigned long On_TSPELLPID__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B604)
SetType(0x8004B604, "unsigned long On_KNOCKBACK__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B694)
SetType(0x8004B694, "unsigned long On_RESURRECT__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B6D0)
SetType(0x8004B6D0, "unsigned long On_HEALOTHER__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B740)
SetType(0x8004B740, "unsigned long On_TALKXY__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B7E8)
SetType(0x8004B7E8, "unsigned long On_NEWLVL__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B820)
SetType(0x8004B820, "unsigned long On_WARP__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B90C)
SetType(0x8004B90C, "unsigned long On_MONSTDEATH__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B978)
SetType(0x8004B978, "unsigned long On_KILLGOLEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B9E4)
SetType(0x8004B9E4, "unsigned long On_AWAKEGOLEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004BB58)
SetType(0x8004BB58, "unsigned long On_MONSTDAMAGE__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004BC60)
SetType(0x8004BC60, "unsigned long On_PLRDEAD__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004BCA8)
SetType(0x8004BCA8, "unsigned long On_PLRDAMAGE__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004BE70)
SetType(0x8004BE70, "unsigned long On_OPENDOOR__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004BF0C)
SetType(0x8004BF0C, "unsigned long On_CLOSEDOOR__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004BFA8)
SetType(0x8004BFA8, "unsigned long On_OPERATEOBJ__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C044)
SetType(0x8004C044, "unsigned long On_PLROPOBJ__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C0DC)
SetType(0x8004C0DC, "unsigned long On_BREAKOBJ__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C174)
SetType(0x8004C174, "unsigned long On_CHANGEPLRITEMS__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C17C)
SetType(0x8004C17C, "unsigned long On_DELPLRITEMS__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C184)
SetType(0x8004C184, "unsigned long On_PLRLEVEL__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C18C)
SetType(0x8004C18C, "unsigned long On_DROPITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C1E4)
SetType(0x8004C1E4, "unsigned long On_PLAYER_JOINLEVEL__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C47C)
SetType(0x8004C47C, "unsigned long On_ACTIVATEPORTAL__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C610)
SetType(0x8004C610, "unsigned long On_DEACTIVATEPORTAL__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C664)
SetType(0x8004C664, "unsigned long On_RETOWN__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C6B0)
SetType(0x8004C6B0, "unsigned long On_SETSTR__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C6F0)
SetType(0x8004C6F0, "unsigned long On_SETDEX__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C730)
SetType(0x8004C730, "unsigned long On_SETMAG__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C770)
SetType(0x8004C770, "unsigned long On_SETVIT__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C7B0)
SetType(0x8004C7B0, "unsigned long On_SYNCQUEST__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C7F8)
SetType(0x8004C7F8, "unsigned long On_ENDSHIELD__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C91C)
SetType(0x8004C91C, "unsigned long On_CHEAT_EXPERIENCE__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C924)
SetType(0x8004C924, "unsigned long On_CHEAT_SPELL_LEVEL__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C92C)
SetType(0x8004C92C, "unsigned long On_DEBUG__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004C934)
SetType(0x8004C934, "unsigned long ParseCmd__FiPC4TCmd(int pnum, struct TCmd *pCmd)")
del_items(0x8004CD88)
SetType(0x8004CD88, "void NetSendLoPri__FPCUcUc(unsigned char *pbMsg, unsigned char bLen)")
del_items(0x8004CDB4)
SetType(0x8004CDB4, "int InitLevelType__Fi(int l)")
del_items(0x8004CE00)
SetType(0x8004CE00, "void SetupLocalCoords__Fv()")
del_items(0x8004CF90)
SetType(0x8004CF90, "void InitNewSeed__Fl(long newseed)")
del_items(0x8004D004)
SetType(0x8004D004, "unsigned char NetInit__FUcPUc(unsigned char bSinglePlayer, unsigned char *pfExitProgram)")
del_items(0x8004D22C)
SetType(0x8004D22C, "void InitObjectGFX__Fv()")
del_items(0x8004D448)
SetType(0x8004D448, "void FreeObjectGFX__Fv()")
del_items(0x8004D454)
SetType(0x8004D454, "void DeleteObject__Fii(int oi, int i)")
del_items(0x8004D50C)
SetType(0x8004D50C, "void SetupObject__Fiiii(int i, int x, int y, int ot)")
del_items(0x8004D790)
SetType(0x8004D790, "void SetObjMapRange__Fiiiiii(int i, int x1, int y1, int x2, int y2, int v)")
del_items(0x8004D7F0)
SetType(0x8004D7F0, "void SetBookMsg__Fii(int i, int msg)")
del_items(0x8004D818)
SetType(0x8004D818, "void AddObject__Fiii(int ot, int ox, int oy)")
del_items(0x8004D924)
SetType(0x8004D924, "void Obj_Light__Fii(int i, int lr)")
del_items(0x8004DB34)
SetType(0x8004DB34, "void Obj_Circle__Fi(int i)")
del_items(0x8004DE58)
SetType(0x8004DE58, "void Obj_StopAnim__Fi(int i)")
del_items(0x8004DEBC)
SetType(0x8004DEBC, "void DrawExpl__Fiiiiiccc(int sx, int sy, int f, int ot, int scale, int rtint, int gtint, int btint)")
del_items(0x8004E198)
SetType(0x8004E198, "void DrawObjExpl__FP12ObjectStructiii(struct ObjectStruct *obj, int ScrX, int ScrY, int ot)")
del_items(0x8004E208)
SetType(0x8004E208, "void Obj_Door__Fi(int i)")
del_items(0x8004E39C)
SetType(0x8004E39C, "void Obj_Sarc__Fi(int i)")
del_items(0x8004E3E8)
SetType(0x8004E3E8, "void ActivateTrapLine__Fii(int ttype, int tid)")
del_items(0x8004E4F8)
SetType(0x8004E4F8, "void Obj_FlameTrap__Fi(int i)")
del_items(0x8004E7C8)
SetType(0x8004E7C8, "void Obj_Trap__Fi(int i)")
del_items(0x8004EB18)
SetType(0x8004EB18, "void Obj_BCrossDamage__Fi(int i)")
del_items(0x8004EDA8)
SetType(0x8004EDA8, "void ProcessObjects__Fv()")
del_items(0x8004F048)
SetType(0x8004F048, "void ObjSetMicro__Fiii(int dx, int dy, int pn)")
del_items(0x8004F080)
SetType(0x8004F080, "void ObjSetMini__Fiii(int x, int y, int v)")
del_items(0x8004F154)
SetType(0x8004F154, "void ObjL1Special__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x8004F15C)
SetType(0x8004F15C, "void ObjL2Special__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x8004F164)
SetType(0x8004F164, "void DoorSet__Fiii(int oi, int dx, int dy)")
del_items(0x8004F3E4)
SetType(0x8004F3E4, "void RedoPlayerVision__Fv()")
del_items(0x8004F488)
SetType(0x8004F488, "void OperateL1RDoor__FiiUc(int pnum, int oi, unsigned char sendflag)")
del_items(0x8004F82C)
SetType(0x8004F82C, "void OperateL1LDoor__FiiUc(int pnum, int oi, unsigned char sendflag)")
del_items(0x8004FC04)
SetType(0x8004FC04, "void OperateL2RDoor__FiiUc(int pnum, int oi, unsigned char sendflag)")
del_items(0x8004FF9C)
SetType(0x8004FF9C, "void OperateL2LDoor__FiiUc(int pnum, int oi, unsigned char sendflag)")
del_items(0x80050334)
SetType(0x80050334, "void OperateL3RDoor__FiiUc(int pnum, int oi, unsigned char sendflag)")
del_items(0x8005063C)
SetType(0x8005063C, "void OperateL3LDoor__FiiUc(int pnum, int oi, unsigned char sendflag)")
del_items(0x80050944)
SetType(0x80050944, "void MonstCheckDoors__Fi(int m)")
del_items(0x80050E40)
SetType(0x80050E40, "void ObjChangeMap__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80050FF8)
SetType(0x80050FF8, "void ObjChangeMapResync__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80051168)
SetType(0x80051168, "void OperateL1Door__FiiUc(int pnum, int i, unsigned char sendflag)")
del_items(0x800512C4)
SetType(0x800512C4, "void OperateLever__Fii(int pnum, int i)")
del_items(0x800514B0)
SetType(0x800514B0, "void OperateBook__Fii(int pnum, int i)")
del_items(0x800519A4)
SetType(0x800519A4, "void OperateBookLever__Fii(int pnum, int i)")
del_items(0x80051D4C)
SetType(0x80051D4C, "void OperateSChambBk__Fii(int pnum, int i)")
del_items(0x80051F24)
SetType(0x80051F24, "void OperateChest__FiiUc(int pnum, int i, unsigned char sendmsg)")
del_items(0x800522F4)
SetType(0x800522F4, "void OperateMushPatch__Fii(int pnum, int i)")
del_items(0x800524B8)
SetType(0x800524B8, "void OperateInnSignChest__Fii(int pnum, int i)")
del_items(0x80052650)
SetType(0x80052650, "void OperateSlainHero__FiiUc(int pnum, int i, unsigned char sendmsg)")
del_items(0x800528A4)
SetType(0x800528A4, "void OperateTrapLvr__Fi(int i)")
del_items(0x80052A74)
SetType(0x80052A74, "void OperateSarc__FiiUc(int pnum, int i, unsigned char sendmsg)")
del_items(0x80052C2C)
SetType(0x80052C2C, "void OperateL2Door__FiiUc(int pnum, int i, unsigned char sendflag)")
del_items(0x80052D88)
SetType(0x80052D88, "void OperateL3Door__FiiUc(int pnum, int i, unsigned char sendflag)")
del_items(0x80052EE4)
SetType(0x80052EE4, "void LoadMapObjs__FPUcii(unsigned char *pMap, int startx, int starty)")
del_items(0x80052FEC)
SetType(0x80052FEC, "void OperatePedistal__Fii(int pnum, int i)")
del_items(0x800532A4)
SetType(0x800532A4, "void TryDisarm__Fii(int pnum, int i)")
del_items(0x80053468)
SetType(0x80053468, "int ItemMiscIdIdx__Fi(int imiscid)")
del_items(0x800534D8)
SetType(0x800534D8, "void OperateShrine__Fiii(int pnum, int i, int sType)")
del_items(0x80055A9C)
SetType(0x80055A9C, "void OperateSkelBook__FiiUc(int pnum, int i, unsigned char sendmsg)")
del_items(0x80055C18)
SetType(0x80055C18, "void OperateBookCase__FiiUc(int pnum, int i, unsigned char sendmsg)")
del_items(0x80055DE8)
SetType(0x80055DE8, "void OperateDecap__FiiUc(int pnum, int i, unsigned char sendmsg)")
del_items(0x80055ED0)
SetType(0x80055ED0, "void OperateArmorStand__FiiUc(int pnum, int i, unsigned char sendmsg)")
del_items(0x80056040)
SetType(0x80056040, "int FindValidShrine__Fi(int i)")
del_items(0x80056130)
SetType(0x80056130, "void OperateGoatShrine__Fiii(int pnum, int i, int sType)")
del_items(0x800561D8)
SetType(0x800561D8, "void OperateCauldron__Fiii(int pnum, int i, int sType)")
del_items(0x8005628C)
SetType(0x8005628C, "unsigned char OperateFountains__Fii(int pnum, int i)")
del_items(0x80056838)
SetType(0x80056838, "void OperateWeaponRack__FiiUc(int pnum, int i, unsigned char sendmsg)")
del_items(0x800569E4)
SetType(0x800569E4, "void OperateStoryBook__Fii(int pnum, int i)")
del_items(0x80056AD4)
SetType(0x80056AD4, "void OperateLazStand__Fii(int pnum, int i)")
del_items(0x80056BB4)
SetType(0x80056BB4, "void OperateObject__FiiUc(int pnum, int i, unsigned char TeleFlag)")
del_items(0x80056FEC)
SetType(0x80056FEC, "void SyncOpL1Door__Fiii(int pnum, int cmd, int i)")
del_items(0x80057100)
SetType(0x80057100, "void SyncOpL2Door__Fiii(int pnum, int cmd, int i)")
del_items(0x80057214)
SetType(0x80057214, "void SyncOpL3Door__Fiii(int pnum, int cmd, int i)")
del_items(0x80057328)
SetType(0x80057328, "void SyncOpObject__Fiii(int pnum, int cmd, int i)")
del_items(0x80057508)
SetType(0x80057508, "void BreakCrux__Fi(int i)")
del_items(0x800576F8)
SetType(0x800576F8, "void BreakBarrel__FiiiUcUc(int pnum, int i, int dam, unsigned char forcebreak, int sendmsg)")
del_items(0x80057C4C)
SetType(0x80057C4C, "void BreakObject__Fii(int pnum, int oi)")
del_items(0x80057DAC)
SetType(0x80057DAC, "void SyncBreakObj__Fii(int pnum, int oi)")
del_items(0x80057E08)
SetType(0x80057E08, "void SyncL1Doors__Fi(int i)")
del_items(0x80057F20)
SetType(0x80057F20, "void SyncCrux__Fi(int i)")
del_items(0x80058058)
SetType(0x80058058, "void SyncLever__Fi(int i)")
del_items(0x800580D4)
SetType(0x800580D4, "void SyncQSTLever__Fi(int i)")
del_items(0x800581CC)
SetType(0x800581CC, "void SyncPedistal__Fi(int i)")
del_items(0x80058328)
SetType(0x80058328, "void SyncL2Doors__Fi(int i)")
del_items(0x80058490)
SetType(0x80058490, "void SyncL3Doors__Fi(int i)")
del_items(0x800585BC)
SetType(0x800585BC, "void SyncObjectAnim__Fi(int o)")
del_items(0x800586FC)
SetType(0x800586FC, "void GetObjectStr__Fi(int i)")
del_items(0x80058B18)
SetType(0x80058B18, "void RestoreObjectLight__Fv()")
del_items(0x80058D6C)
SetType(0x80058D6C, "int GetNumOfFrames__7TextDatii_addr_80058D6C(struct TextDat *this, int Creature, int Action)")
del_items(0x80058DA4)
SetType(0x80058DA4, "struct CCreatureHdr *GetCreature__7TextDati_addr_80058DA4(struct TextDat *this, int Creature)")
del_items(0x80058E1C)
SetType(0x80058E1C, "int GetNumOfCreatures__7TextDat_addr_80058E1C(struct TextDat *this)")
del_items(0x80058E30)
SetType(0x80058E30, "int FindPath__FPFiii_UciiiiiPc(unsigned | |
'num_items: ', len(files))
json_list.update({'folders': folders_list, 'files': file_list, 'positions': position_list,
'numerical_values': numerical_list})
self.json_dict = json_list
class Voc2012FilelistCreator(FilelistCreator):
"""Class to create the Pascal Voc 2012 file list"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def create_json_from_list(self, json_list, stereo_replace=None):
"""Creates a dictionary in the format of the basic_files.json.
Takes a dictionary with the dataset-specific names and file endings and fills the dictionary self.json_dict
with the entries from the dataset folder based on the information in the given dictionary.
:param json_list: dataset-spicific dictionary of the form
{'names: [list of all data categories that this dataset provides, e.g. 'color', 'depth', ...],
'types': [list of the corresponding file types, e.g. '.png', '.txt', ...],
'filters': [list of the corresponding filters to identify the folders for each name, e.g. 'camera', ...]}
:param stereo_replace: not used for this dataset
"""
folders_list = []
file_list = []
position_list = []
numerical_list = []
main_files = []
for i, name, type, filter in zip(range(len(json_list['names'])), json_list['names'],
json_list['types'], json_list['filters']):
folders, files = self.create_filelist(filter, type)
positions = []
for j, file in zip(range(len(files)), files):
file = file.split(self.dataset_path + os.sep)[1]
file = os.path.join(*file.split(os.sep)[1:])
if i == 0: # color image
positions.append((len(positions), 0, 0, j))
main_files.append(file.split('.')[0])
else: # segmentation
positions.append((main_files.index(file.split('.')[0]), 0, 0, j))
folders_list.append(folders)
file_list.append(files)
position_list.append(positions)
numerical_list.append(None)
print('name: ', name, 'num_items: ', len(files))
json_list.update({'folders': folders_list, 'files': file_list, 'positions': position_list,
'numerical_values': numerical_list})
self.json_dict = json_list
class A2d2FilelistCreator(FilelistCreator):
"""Class to create the a2d2 file list"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _generate_file_identifier(self, filename):
"""
Generates an identifier that separates different images from each other but is the same for corresponding
images of different types, e.g. for corresponding color, depth and segmentation images.
:param filename: Filename from which the identifier will be extracted
:return: file identifier
"""
filename = os.path.split(filename)[1]
filename = filename.split('_')
return filename[0] + '_' + filename[2] + '_' + filename[3].split('.')[0]
def create_json_from_list(self, json_list, stereo_replace=None):
"""Creates a dictionary in the format of the basic_files.json.
Takes a dictionary with the dataset-specific names and file endings and fills the dictionary self.json_dict
with the entries from the dataset folder based on the information in the given dictionary.
:param json_list: dataset-spicific dictionary of the form
{'names: [list of all data categories that this dataset provides, e.g. 'color', 'depth', ...],
'types': [list of the corresponding file types, e.g. '.png', '.txt', ...],
'filters': [list of the corresponding filters to identify the folders for each name, e.g. 'camera', ...]}
:param stereo_replace: not used for this dataset
"""
folders_list = []
file_list = []
position_list = []
numerical_list = []
main_files = []
for i, name, type, filter in zip(range(len(json_list['names'])), json_list['names'],
json_list['types'], json_list['filters']):
folders, files = self.create_filelist(filter, type, ambiguous_names_to_ignore='camera_lidar_semantic')
positions = []
for j, file in zip(range(len(files)), files):
file = file.split(self.dataset_path + os.sep)[1]
file = os.path.join(*file.split(os.sep)[1:])
if i == 0: # color image
positions.append((len(positions), 0, 0, j))
main_files.append(self._generate_file_identifier(file))
else: # segmentation
positions.append((main_files.index(self._generate_file_identifier(file)), 0, 0, j))
folders_list.append(folders)
file_list.append(files)
position_list.append(positions)
numerical_list.append(None)
print('name: ', name, 'num_items: ', len(files))
json_list.update({'folders': folders_list, 'files': file_list, 'positions': position_list,
'numerical_values': numerical_list})
self.json_dict = json_list
class LostAndFoundFilelistCreator(FilelistCreator):
"""Class to create the LostAndFound file list"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def create_json_from_list(self, json_list, stereo_replace=None):
"""Creates a dictionary in the format of the basic_files.json.
Takes a dictionary with the dataset-specific names and file endings and fills the dictionary self.json_dict
with the entries from the dataset folder based on the information in the given dictionary.
:param json_list: dataset-spicific dictionary of the form
{'names: [list of all data categories that this dataset provides, e.g. 'color', 'depth', ...],
'types': [list of the corresponding file types, e.g. '.png', '.txt', ...],
'filters': [list of the corresponding filters to identify the folders for each name, e.g. 'camera', ...]}
:param stereo_replace: not used for this dataset
"""
folders_list = []
file_list = []
position_list = []
numerical_list = []
main_files = []
for i, name, type, filter in zip(range(len(json_list['names'])), json_list['names'],
json_list['types'], json_list['filters']):
folders, files = self.create_filelist(filter, type)
if 'segmentation' in name:
files = [f for f in files if 'color' not in f and 'instance' not in f and 'Train' not in f]
folders_list.append(folders)
file_list.append(files)
positions = []
lower_limit = [0]
upper_limit = []
old_frame_number = None
new_frame_number = None
old_seq_number = None
new_seq_number = None
frame_indicator_pos = {'color': -2, 'segmentation': -3}
for file in files:
old_frame_number = new_frame_number
old_seq_number = new_seq_number
img_filename = os.path.splitext(os.path.split(file)[1])[0]
if 'color' in name:
new_frame_number = int(img_filename.split('_')[frame_indicator_pos['color']])
new_seq_number = int(img_filename.split('_')[frame_indicator_pos['color']-1])
elif 'segmentation' in name:
new_frame_number = int(img_filename.split('_')[frame_indicator_pos['segmentation']])
new_seq_number = int(img_filename.split('_')[frame_indicator_pos['segmentation']-1])
if old_seq_number != new_seq_number and old_seq_number is not None:
upper_limit.append(files.index(file) - 1)
lower_limit.append(files.index(file))
upper_limit.append(len(files) - 1)
index = 0
for j, file in zip(range(len(files)), files):
file = file.split(self.dataset_path + os.sep)[1]
file = os.path.join(*file.split(os.sep)[1:])
if index < len(lower_limit) - 1 and j == lower_limit[index + 1]:
index += 1
if i == 0:
positions.append((len(positions), j - lower_limit[index], upper_limit[index] - j, j))
main_files.append('_'.join(file.split('_')[:-1]))
else:
if 'segmentation' in name:
positions.append((main_files.index('_'.join(file.split('_')[:-2])),
j - lower_limit[index], upper_limit[index] - j, j))
position_list.append(positions)
numerical_list.append(None)
print('name: ', name, 'num_items: ', len(files))
json_list.update({'folders': folders_list, 'files': file_list, 'positions': position_list,
'numerical_values': numerical_list})
self.json_dict = json_list
class CamVidFilelistCreator(FilelistCreator):
"""Class to create the CamVid file list"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _generate_file_identifier(self, filename):
"""
Generates an identifier that separates different images from each other but is the same for corresponding
images of different types, e.g. for corresponding color, depth and segmentation images.
:param filename: Filename from which the identifier will be extracted
:return: file identifier
"""
filename = os.path.split(filename)[1]
filename = os.path.splitext(filename)[0]
filename = filename.split('_')
return filename[0] + '_' + filename[1]
def create_json_from_list(self, json_list, stereo_replace=None):
"""Creates a dictionary in the format of the basic_files.json.
Takes a dictionary with the dataset-specific names and file endings and fills the dictionary self.json_dict
with the entries from the dataset folder based on the information in the given dictionary.
:param json_list: dataset-spicific dictionary of the form
{'names: [list of all data categories that this dataset provides, e.g. 'color', 'depth', ...],
'types': [list of the corresponding file types, e.g. '.png', '.txt', ...],
'filters': [list of the corresponding filters to identify the folders for each name, e.g. 'camera', ...]}
:param stereo_replace: not used for this dataset
"""
folders_list = []
file_list = []
position_list = []
numerical_list = []
main_files = []
for i, name, type, filter in zip(range(len(json_list['names'])), json_list['names'],
json_list['types'], json_list['filters']):
folders, files = self.create_filelist(filter, type)
positions = []
lower_limit = [0]
upper_limit = []
frame_number = 0
new_seq_id = None
seq_number = 0
for file in files:
old_seq_id = new_seq_id
new_seq_id = self._generate_file_identifier(file)[0]
if new_seq_id != old_seq_id:
upper_limit.append(files.index(file) - 1)
lower_limit.append(files.index(file))
seq_number += 1
upper_limit.append(len(files) - 1)
index = 0
for j, file in zip(range(len(files)), files):
file = file.split(self.dataset_path + os.sep)[1]
file = os.path.join(*file.split(os.sep)[1:])
if index < len(lower_limit) - 1 and j == lower_limit[index + 1]:
index += 1
if i == 0: # color image
positions.append((len(positions), j - lower_limit[index], upper_limit[index] - j, j))
main_files.append(self._generate_file_identifier(file))
else: # segmentation
positions.append((main_files.index(self._generate_file_identifier(file)), j - lower_limit[index],
upper_limit[index] - j, j))
folders_list.append(folders)
file_list.append(files)
position_list.append(positions)
numerical_list.append(None)
print('name: ', name, 'num_items: ', len(files))
json_list.update({'folders': folders_list, 'files': file_list, 'positions': position_list,
'numerical_values': numerical_list})
self.json_dict = json_list
class Make3dFilelistCreator(FilelistCreator):
"""Class to create the make3d file list"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _generate_file_identifier(self, filename):
"""
Generates an identifier that separates different images from each other but is the same for corresponding
images of different types, e.g. for corresponding color, depth and segmentation images.
:param filename: Filename from which the identifier will be extracted
:return: file identifier
"""
filename = os.path.split(filename)[1]
filename = os.path.splitext(filename)[0]
filename = filename.split("img-")[-1]
filename = filename.split("depth_sph_corr-")[-1]
return filename
def create_json_from_list(self, json_list, stereo_replace=None):
"""Creates a dictionary in the format of the basic_files.json.
Takes a dictionary with the dataset-specific names and file endings and fills the dictionary self.json_dict
with the entries from the dataset folder based on the information in the given dictionary.
:param json_list: dataset-spicific dictionary of the form
{'names: [list of all data categories that this dataset provides, e.g. 'color', 'depth', ...],
'types': [list of the corresponding file types, e.g. '.png', '.txt', ...],
'filters': [list | |
arbitraty documents (module, examples files) that cannot be parsed by Numpydoc,
as well as link to external references, like images generated.
"""
@classmethod
def _deserialise(cls, **kwargs):
# print("will deserialise", cls)
try:
instance = cls._instance()
except Exception as e:
raise type(e)(f"Error deserialising {cls}, {kwargs})") from e
assert "_content" in kwargs
assert kwargs["_content"] is not None
for k, v in kwargs.items():
setattr(instance, k, v)
return instance
sections = [
"Signature",
"Summary",
"Extended Summary",
"Parameters",
"Returns",
"Yields",
"Receives",
"Raises",
"Warns",
"Other Parameters",
"Attributes",
"Methods",
"See Also",
"Notes",
"Warnings",
"References",
"Examples",
"index",
] # List of sections in order
_content: Dict[str, Optional[Section]]
refs: List[str]
ordered_sections: List[str]
item_file: Optional[str]
item_line: Optional[int]
item_type: Optional[str]
aliases: List[str]
example_section_data: Section
see_also: List[SeeAlsoItem] # see also data
signature: Signature
references: Optional[List[str]]
arbitrary: List[Section]
__slots__ = (
"_content",
"example_section_data",
"refs",
"ordered_sections",
"signature",
"item_file",
"item_line",
"item_type",
"aliases",
"see_also",
"references",
"logo",
"arbitrary",
)
def __repr__(self):
return "<DocBlob ...>"
def slots(self):
return [
"_content",
"example_section_data",
"refs",
"ordered_sections",
"item_file",
"item_line",
"item_type",
"signature",
"references",
"aliases",
"arbitrary",
]
def __init__(self):
self._content = None
self.example_section_data = None
self.refs = None
self.ordered_sections = None
self.item_file = None
self.item_line = None
self.item_type = None
self.aliases = []
self.signature = Signature(None)
@property
def content(self):
"""
List of sections in the doc blob docstrings
"""
return self._content
@content.setter
def content(self, new):
assert not new.keys() - {
"Signature",
"Summary",
"Extended Summary",
"Parameters",
"Returns",
"Yields",
"Receives",
"Raises",
"Warns",
"Other Parameters",
"Attributes",
"Methods",
"See Also",
"Notes",
"Warnings",
"References",
"Examples",
"index",
}
self._content = new
assert self._content is not None
def _numpy_data_to_section(data: List[Tuple[str, str, List[str]]], title: str):
assert isinstance(data, list), repr(data)
acc = []
for param, type_, desc in data:
assert isinstance(desc, list)
items = []
if desc:
items = parse_rst_section("\n".join(desc))
for l in items:
assert not isinstance(l, Section)
acc.append(Param(param, type_, desc=items).validate())
return Section(acc, title)
_numpydoc_sections_with_param = {
"Parameters",
"Returns",
"Raises",
"Yields",
"Attributes",
"Other Parameters",
"Warns",
"Methods",
"Receives",
}
_numpydoc_sections_with_text = {
"Summary",
"Notes",
"Extended Summary",
"References",
"Warnings",
}
class APIObjectInfo:
"""
Info about an API object
This object can be many things:
Module, Class, method, function.
I'll see how I handle that later.
"""
kind: str
docstring: str
def __init__(self, kind, docstring, signature):
self.kind = kind
self.docstring = docstring
self.parsed = []
self.signature = signature
if docstring is not None and kind != "module":
# TS is going to choke on this as See Also and other
# sections are technically invalid.
try:
ndoc = NumpyDocString(dedent_but_first(docstring))
except Exception as e:
raise NumpydocParseError("APIObjectInfoParse Error in numpydoc") from e
for title in ndoc.ordered_sections:
if not ndoc[title]:
continue
if title in _numpydoc_sections_with_param:
section = _numpy_data_to_section(ndoc[title], title)
assert isinstance(section, Section)
self.parsed.append(section)
elif title in _numpydoc_sections_with_text:
docs = ts.parse("\n".join(ndoc[title]).encode())
if len(docs) != 1:
raise IncorrectInternalDocsLen("\n".join(ndoc[title]), docs)
section = docs[0]
assert isinstance(section, Section), section
self.parsed.append(section)
elif title == "Signature":
self.parsed.append(NumpydocSignature(ndoc[title]))
elif title == "Examples":
self.parsed.append(NumpydocExample(ndoc[title]))
elif title == "See Also":
see_also = ndoc[title]
xx = NumpydocSeeAlso(_normalize_see_also(see_also, qa="??"))
self.parsed.append(xx)
else:
assert False
elif docstring and kind == "module":
self.parsed = ts.parse(docstring.encode())
self.validate()
def special(self, title):
if self.kind == "module":
return None
res = [s for s in self.parsed if s.title == title]
if not res:
return None
assert len(res) == 1
assert not isinstance(res[0], Section), self.parsed
return res[0]
def validate(self):
for p in self.parsed:
assert isinstance(
p, (Section, NumpydocExample, NumpydocSeeAlso, NumpydocSignature)
)
p.validate()
def _normalize_see_also(see_also: List[Any], qa):
"""
numpydoc is complex, the See Also fields can be quite complicated,
so here we sort of try to normalise them.
from what I can remember,
See also can have
name1 : type1
name2 : type2
description for both name1 and name 2.
Though if description is empty, them the type is actually the description.
"""
if not see_also:
return []
assert see_also is not None
new_see_also = []
section: Section
name_and_types: List[Tuple[str, str]]
name: str
type_or_description: str
for name_and_types, raw_description in see_also:
try:
for (name, type_or_description) in name_and_types:
if type_or_description and not raw_description:
assert isinstance(type_or_description, str)
type_ = None
# we have all in a single line,
# and there is no description, so the type field is
# actually the description.
desc = [paragraph([type_or_description])]
elif raw_description:
assert isinstance(raw_description, list)
type_ = type_or_description
desc = [paragraph(raw_description)]
else:
type_ = type_or_description
desc = []
sai = SeeAlsoItem(Ref(name, None, None), desc, type_)
new_see_also.append(sai)
del desc
del type_
except Exception as e:
raise ValueError(
f"Error {qa}: {see_also=} | {name_and_types=} | {raw_description=}"
) from e
return new_see_also
class Gen:
"""
Core class to generate docbundles for a given library.
This is responsible for finding all objects, extracting the doc, parsing it,
and saving that into the right folder.
"""
def __init__(self, dummy_progress, config):
if dummy_progress:
self.Progress = DummyP
else:
self.Progress = Progress
FORMAT = "%(message)s"
logging.basicConfig(
level="INFO", format=FORMAT, datefmt="[%X]", handlers=[RichHandler()]
)
self.log = logging.getLogger("papyri")
self.config = config
self.log.debug("Configuration: %s", self.config)
self.data = {}
self.bdata = {}
self.metadata = {}
self.examples = {}
self.docs = {}
def clean(self, where: Path):
"""
Erase a doc bundle folder.
"""
for _, path in progress(
(where / "module").glob("*.json"),
description="cleaning previous bundle 1/3",
):
path.unlink()
for _, path in progress(
(where / "assets").glob("*"), description="cleaning previous bundle 2/3"
):
path.unlink()
for _, path in progress(
(where / "docs").glob("*"), description="cleaning previous bundle 3/3"
):
path.unlink()
if (where / "module").exists():
(where / "module").rmdir()
if (where / "assets").exists():
(where / "assets").rmdir()
if (where / "papyri.json").exists():
(where / "papyri.json").unlink()
if (where / "docs").exists():
(where / "docs").rmdir()
def collect_narrative_docs(self):
"""
Crawl the filesystem for all docs/rst files
"""
if not self.config.docs_path:
return
path = Path(self.config.docs_path).expanduser()
self.log.info("Scraping Documentation")
for p in path.glob("**/*.rst"):
assert p.is_file()
parts = p.relative_to(path).parts
assert parts[-1].endswith("rst")
try:
data = ts.parse(p.read_bytes())
except Exception as e:
raise type(e)(f"{p=}")
blob = DocBlob()
blob.arbitrary = data
blob.content = {}
blob.ordered_sections = []
blob.item_file = None
blob.item_line = None
blob.item_type = None
blob.aliases = []
blob.example_section_data = Section()
blob.see_also = []
blob.signature = Signature(None)
blob.references = None
blob.refs = []
self.docs[parts] = json.dumps(blob.to_json(), indent=2, sort_keys=True)
# data = p.read_bytes()
def write_narrative(self, where: Path) -> None:
(where / "docs").mkdir(exist_ok=True)
for k, v in self.docs.items():
subf = where / "docs"
file = k[-1].rsplit(".", maxsplit=1)[0]
file = ":".join(k[:-1]) + ":" + file
subf.mkdir(exist_ok=True, parents=True)
with (subf / file).open("w") as f:
f.write(v)
def write_examples(self, where: Path) -> None:
(where / "examples").mkdir(exist_ok=True)
for k, v in self.examples.items():
with (where / "examples" / k).open("w") as f:
f.write(v)
def write_api(self, where: Path):
"""
write the API section of the docbundles.
"""
(where / "module").mkdir(exist_ok=True)
for k, v in self.data.items():
with (where / "module" / k).open("w") as f:
f.write(v)
def write(self, where: Path):
"""
Write a docbundle folder.
"""
self.write_api(where)
self.write_narrative(where)
self.write_examples(where)
self.write_assets(where)
with (where / "papyri.json").open("w") as f:
f.write(json.dumps(self.metadata, indent=2, sort_keys=True))
def write_assets(self, where: Path) -> None:
assets = where / "assets"
assets.mkdir()
for k, v in self.bdata.items():
with (assets / k).open("wb") as f:
f.write(v)
def put(self, path: str, data):
"""
put some json data at the given path
"""
self.data[path + ".json"] = data
def put_raw(self, path: str, data):
"""
put some rbinary data at the given path.
"""
self.bdata[path] = data
def _transform_1(self, blob, ndoc):
blob.content = {k: v for k, v in ndoc._parsed_data.items()}
return blob
def _transform_2(self, blob, target_item, qa):
# try to find relative path WRT site package.
# will not work for dev install. Maybe an option to set the root location ?
item_file = find_file(target_item)
if item_file is not None:
for s in SITE_PACKAGE + [os.path.expanduser("~")]:
if item_file.startswith(s):
item_file = item_file[len(s) :]
blob.item_file = item_file
if item_file is None:
if type(target_item).__name__ in (
"builtin_function_or_method",
"fused_cython_function",
"cython_function_or_method",
):
self.log.debug(
"Could not find source file for built-in function method."
"Likely compiled extension %s %s %s, will not be able to link to it.",
repr(qa),
target_item,
repr(type(target_item).__name__),
)
else:
self.log.warn(
"Could not find source file for %s (%s) [%s], will not be able to link to it.",
repr(qa),
target_item,
type(target_item).__name__,
)
return blob
def _transform_3(self, blob, target_item):
item_line = None
try:
item_line = inspect.getsourcelines(target_item)[1]
except OSError:
self.log.debug("Could not find item_line for %s, (OSERROR)", | |
# Databricks notebook source
# This creates the "team_name" field displayed at the top of the notebook.
dbutils.widgets.text("team_name", "Enter your team's name");
# COMMAND ----------
team_name = dbutils.widgets.get("team_name")
setup_responses = dbutils.notebook.run("./includes/flight_school_assignment_3_setup", 0, {"team_name": team_name}).split()
local_data_path = setup_responses[0]
dbfs_data_path = setup_responses[1]
database_name = setup_responses[2]
print(f"Path to be used for Local Files: {local_data_path}")
print(f"Path to be used for DBFS Files: {dbfs_data_path}")
print(f"Database Name: {database_name}")
# COMMAND ----------
spark.sql(f"USE {database_name}")
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC
# MAGIC
# MAGIC ### Tracking Experiments with MLflow
# MAGIC
# MAGIC Over the course of the machine learning life cycle, data scientists test many different models from various libraries with different hyperparameters. Tracking these various results poses an organizational challenge. In brief, storing experiments, results, models, supplementary artifacts, and code creates significant challenges.
# MAGIC
# MAGIC MLflow Tracking is one of the three main components of MLflow. It is a logging API specific for machine learning and agnostic to libraries and environments that do the training. It is organized around the concept of **runs**, which are executions of data science code. Runs are aggregated into **experiments** where many runs can be a part of a given experiment and an MLflow server can host many experiments.
# MAGIC
# MAGIC Each run can record the following information:<br><br>
# MAGIC
# MAGIC - **Parameters:** Key-value pairs of input parameters such as the number of trees in a random forest model
# MAGIC - **Metrics:** Evaluation metrics such as RMSE or Area Under the ROC Curve
# MAGIC - **Artifacts:** Arbitrary output files in any format. This can include images, pickled models, and data files
# MAGIC - **Source:** The code that originally ran the experiment
# MAGIC
# MAGIC MLflow tracking also serves as a **model registry** so tracked models can easily be stored and, as necessary, deployed into production.
# MAGIC
# MAGIC Experiments can be tracked using libraries in Python, R, and Java as well as by using the CLI and REST calls. This demo will use Python, though the majority of MLflow functionality is also exposed in these other APIs.
# MAGIC
# MAGIC <div><img src="https://files.training.databricks.com/images/eLearning/ML-Part-4/mlflow-tracking.png" style="height: 400px; margin: 20px"/></div>
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC
# MAGIC <img src="/files/flight/Proposed_Architecture.png?raw=true" width=1000/>
# MAGIC
# MAGIC ### Megacorp Power Plant Sensors
# MAGIC Here at Megacorp, you receive sensor data from Rectifiers and Transformers to monitor the operational status from these devices.
# MAGIC
# MAGIC - **Q:** Given a labeled dataset, how can we train a model to predict the device operational status?
# MAGIC - **A:** Databricks and mlflow!
# MAGIC
# MAGIC - **Q:** Can the model be exposed to systems external to Databricks?
# MAGIC
# MAGIC - **A:** Of course, lets use mlflow and model serving to accomplish this.
# MAGIC
# MAGIC
# MAGIC In this demo, we'll use a pre-loaded dataset, **current_reading_labeled**, to train a [DecisionTreeClassifier](https://spark.apache.org/docs/latest/api/java/org/apache/spark/ml/classification/DecisionTreeClassifier.html) to predict the operational status.
# MAGIC
# MAGIC We'll introduce the concept of an machine learing [Pipeline](https://spark.apache.org/docs/latest/ml-pipeline.html) comprised of Transformers and Estimators to easily combine our data preparation and machine learning tasks in to a discrete unit of work for easy serving.
# MAGIC
# MAGIC #### Main concepts in Pipelines
# MAGIC MLlib standardizes APIs for machine learning algorithms to make it easier to combine multiple algorithms into a single pipeline, or workflow. This section covers the key concepts introduced by the Pipelines API, where the pipeline concept is mostly inspired by the scikit-learn project.
# MAGIC
# MAGIC **DataFrame**: This ML API uses DataFrame from Spark SQL as an ML dataset, which can hold a variety of data types. E.g., a DataFrame could have different columns storing text, feature vectors, true labels, and predictions.
# MAGIC
# MAGIC **Transformer**: A Transformer is an algorithm which can transform one DataFrame into another DataFrame. E.g., an ML model is a Transformer which transforms a DataFrame with features into a DataFrame with predictions.
# MAGIC
# MAGIC **Estimator**: An Estimator is an algorithm which can be fit on a DataFrame to produce a Transformer. E.g., a learning algorithm is an Estimator which trains on a DataFrame and produces a model.
# MAGIC
# MAGIC **Pipeline**: A Pipeline chains multiple Transformers and Estimators together to specify an ML workflow.
# MAGIC
# MAGIC **Parameter**: All Transformers and Estimators now share a common API for specifying parameters.
# MAGIC
# MAGIC
# MAGIC
# MAGIC #### DataPrep
# MAGIC We'll have to convert the textural data to numbers using [StringIndexer](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.feature.StringIndexer.html)(transformer) and use a [VectorAssembler](https://spark.apache.org/docs/3.1.1/api/python/reference/api/pyspark.ml.feature.VectorAssembler.html)(transformer) to combine the features ("device_type", "device_id", "reading_1", "reading_2", "reading_3") into a single vector for use by the DecisionTreeClassifier (estimator).
# MAGIC
# MAGIC We also have to randomly split the dataset into two parts- training and test data. Fortunately, Dataframes make this easy.
# COMMAND ----------
# MAGIC %sql
# MAGIC --lets get some data
# MAGIC SELECT * FROM current_readings_labeled
# COMMAND ----------
df_raw_data = spark.sql("""
SELECT
device_type,
device_operational_status AS label,
device_id,
reading_1,
reading_2,
reading_3
FROM current_readings_labeled
""")
(training_data, test_data) = df_raw_data.randomSplit([0.7, 0.3], seed=100)
# COMMAND ----------
from pyspark.ml.feature import IndexToString, StringIndexer, VectorAssembler
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml import Pipeline
import mlflow
from mlflow import spark as mlflow_spark # renamed to prevent collisions when doing spark.sql
import time
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC #### Creating the Pipeline
# MAGIC We'll create a pipeline consisting of five steps:
# MAGIC - Conversion of device_type to a numeric value
# MAGIC - Conversion of device_id to a numeric value
# MAGIC - Conversion of device_operational_status (now called label) to a numeric value
# MAGIC - Vectorization of the feature set (device_type, device_id, reading_1, reading_2, reading_3)
# MAGIC - Classification of the feature set using the DecisionTreeClassifier
# MAGIC
# MAGIC Notice that the pipeline is a single Estimator, so we can use the Estimator fit method to return a trained model which is realized as a Transformer. We can use the Transformer transform method to make predictions on the test data.
# COMMAND ----------
# First we're going to create a pipeline for our model
device_type_indexer = StringIndexer(inputCol="device_type", outputCol="device_type_index")
device_id_indexer = StringIndexer(inputCol="device_id", outputCol="device_id_index")
label_indexer = StringIndexer(inputCol="label", outputCol="label_index")
assembler = VectorAssembler(
inputCols=["device_type_index", "device_id_index", "reading_1", "reading_2", "reading_3"],
outputCol="features")
dtClassifier = DecisionTreeClassifier(labelCol="label_index", featuresCol="features", predictionCol="prediction_label")
dtClassifier.setMaxBins(20)
pipeline = Pipeline(stages=[device_type_indexer, device_id_indexer, label_indexer, assembler, dtClassifier])
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC ### Training Runs
# MAGIC Once the pipeline is created, we can run a series of experiments to determine which maximum depth produces the model which best fits the the test data. In the below function, we create a mlflow run, log the parameters, train the model, score the model and evaluate the accuracy of the model.
# MAGIC
# MAGIC Notice the parameters:
# MAGIC - **p_max_depth** - the depth of the Decision Tree
# MAGIC - **p_owner** - who is running the experiments.
# MAGIC - **p_run_name** - the name of this test run
# MAGIC - **pipeline** - the pipeline to run
# MAGIC - **predictionCol** - normally this would be called 'prediction', but sometimes it is advantageous to change it. We'll see why later.
# COMMAND ----------
# Next, let's create a function to do repeated training runs and log them with mlflow
def training_run(p_max_depth = 2, p_owner = "default", p_run_name = "team-2-fs-runs",
pipeline = Pipeline(), predictionCol='prediction') :
with mlflow.start_run(run_name=p_run_name) as run:
mlflow.log_param("owner", p_owner)
mlflow.log_param("Maximum Depth", p_max_depth)
dtClassifier.setMaxDepth(p_max_depth)
dtClassifier.setPredictionCol(predictionCol)
pipelineModel = pipeline.fit(training_data)
predDf = pipelineModel.transform(test_data)
evaluator = MulticlassClassificationEvaluator(
labelCol="label_index", predictionCol=predictionCol)
accuracy = evaluator.evaluate(predDf, {evaluator.metricName: "accuracy"})
mlflow.log_metric("Accuracy", accuracy)
mlflow_spark.log_model(pipelineModel, artifact_path="decision-tree-model", input_example=test_data.limit(4).toPandas())
return run.info.run_uuid
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC ### Run the Experiments
# MAGIC Below we will call several training runs at various depths. Each expirement will be logged by mlflow and will include example Input Data to make our lives easier should we decide to serve the model later.
# COMMAND ----------
# Call the function with different DT depths
x = [2,4,6,8]
for n in x:
mlFlowOutput = training_run(p_max_depth=n,p_owner="david.lyle", p_run_name="predictor-numeric-output" + str(n), pipeline=pipeline)
print(mlFlowOutput)
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC ### Evaulate the Results
# MAGIC I'd like to look at the output of the most accurate training run, so I'll select it from
# COMMAND ----------
# Let's load | |
cur.image = image
cur.rect = cur.image.get_rect()
pygame.mouse.set_visible(False)
font_type = pygame.font.Font(load_font('font.ttf'), 40)
label = font_type.render('Миссии', True, (255, 255, 255))
screen.blit(label, (WIDTH // 2 - label.get_width() // 2, 0))
stage_of_open_box = 1
sprites_of_particles = pygame.sprite.Group()
clock = pygame.time.Clock()
FPS = 60
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
end_game()
if event.type == pygame.MOUSEMOTION:
cur.rect.topleft = event.pos
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
show_menu()
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if 250 <= pos[0] <= 400 and 250 <= pos[1] <= 400:
stage_of_open_box += 1
if stage_of_open_box == 3:
create_particles(pos, sprites_of_particles)
screen.fill((0, 0, 0))
screen.blit(bgmission_image, (0, 0))
back_btn = draw(10, 570, 'Назад [ESC]', 60, 27, font_size=20)
if back_btn == 1:
show_menu()
if missions_complete != 5:
screen.blit(load_image(os.path.join('progress_bar', f'progress_bar_{stage_of_mission}.png')), (245, 150))
screen.blit(load_image(os.path.join('box', 'box1.png')), (495, 125))
screen.blit(load_image(os.path.join('box', 'box1.png')), (495, 125))
screen.blit(label, (WIDTH // 2 - label.get_width() // 2, 0))
mission = font_type.render(type_of_missions[stage_of_mission - 1][0], True, (255, 255, 255))
try:
mission_counter = font_type.render(
str(eval(f'mission{stage_of_mission}')) + '/' + str(missions_complete_1[stage_of_mission - 1]),
True,
(255, 255, 255))
screen.blit(mission_counter, (WIDTH // 2 - mission_counter.get_width() // 2, 350))
except Exception:
pass
if mission.get_width() > WIDTH:
font_type = pygame.font.Font(load_font('font.ttf'), 30)
mission = font_type.render(type_of_missions[stage_of_mission - 1][0], True, (255, 255, 255))
font_type = pygame.font.Font(load_font('font.ttf'), 40)
screen.blit(mission, (WIDTH // 2 - mission.get_width() // 2, 275))
else:
with open('data/missions_complete.txt') as f:
flag = int(f.readline())
if flag == 0:
if stage_of_open_box < 4:
screen.blit(
pygame.transform.scale(load_image(os.path.join('box', f'box{stage_of_open_box}.png')),
(150, 150)),
(WIDTH // 2 - 75, HEIGHT // 2 - 75))
else:
with open('data/missions_complete.txt', 'w') as f:
f.write('1')
with open('data/gaykascore.txt', 'w') as f:
all_gayka_score += 5000
f.write(str(all_gayka_score))
box_open = True
else:
font_type = pygame.font.Font(load_font('font.ttf'), 40)
l = font_type.render('Миссии выполнены!', True, (255, 255, 255))
screen.blit(l, (WIDTH // 2 - l.get_width() // 2, 250))
sprites_of_particles.draw(screen)
sprites_of_particles.update()
clock.tick(FPS)
cur_sprites.draw(screen)
pygame.display.update()
def statistick():
bgstatistick_image = load_image('statistickFon.jpg')
image = load_image("cursor.png")
defolt_space_ship = load_image('space_ship1.png')
gaykaim = load_image('gayka.png')
cur_sprites = pygame.sprite.Group()
cur = pygame.sprite.Sprite(cur_sprites)
cur.image = image
cur.rect = cur.image.get_rect()
pygame.mouse.set_visible(False)
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.MOUSEMOTION:
cur.rect.topleft = event.pos
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
show_menu()
screen.fill((0, 0, 0))
screen.blit(bgstatistick_image, (0, 0))
back_btn = draw(10, 570, 'Назад [ESC]', 60, 27, font_size=20)
if back_btn == 1:
show_menu()
font_type = pygame.font.Font(load_font('font.ttf'), 65)
label = font_type.render('Ваши результаты', True, (255, 255, 255))
screen.blit(label, (110, 5))
font_type = pygame.font.Font(load_font('font.ttf'), 30)
label = font_type.render('Максимально очков набрано:', True, (255, 255, 255))
screen.blit(label, (40, 130))
s = []
with open('data/results.txt') as f:
for line in f.readlines():
s.append(int(line.strip()))
label = font_type.render(f'{max(s)}', True, (255, 255, 255))
screen.blit(label, (512, 130))
label = font_type.render('Всего убито врагов:', True, (255, 255, 255))
screen.blit(label, (40, 190))
label = font_type.render(f'{sum(s)}', True, (255, 255, 255))
screen.blit(label, (355, 190))
with open('data/astronavt_score.txt') as f1:
k = f1.readline().strip()
label = font_type.render('Всего спасено астронавтов:', True, (255, 255, 255))
screen.blit(label, (40, 250))
label = font_type.render(str(k), True, (255, 255, 255))
screen.blit(label, (500, 250))
with open('data/game_done', 'r') as f2:
q = f2.readline().strip()
label = font_type.render('Ваши награды:', True, (255, 255, 255))
screen.blit(label, (40, 310))
if q == '1':
screen.blit(load_image('trofey.png'), (280, 300))
cur_sprites.draw(screen)
pygame.display.update()
pygame.display.set_icon(load_image('icon.png'))
def game_over(gayka_score):
global score
record = 0
pygame.mixer.music.load('data/game_over.wav')
pygame.mixer.music.set_volume(1)
pygame.mixer.music.play()
with open('data/results.txt', 'r') as f:
s = list(map(int, f.readlines()))
if score > s[-1]:
record = 1
s.append(score)
s.sort()
with open('data/results.txt', 'w') as f:
for i in s:
f.write(str(i) + '\n')
image = load_image('bg_for_game_over.png')
rect = image.get_rect()
rect.x = -image.get_width()
image2 = load_image(space_ship_skin)
rect2 = image2.get_rect()
rect2.y = -image2.get_height()
rect2.x = WIDTH // 2 - image2.get_width() // 2
v1 = 150
v3 = 200
v2 = -200
font_type = pygame.font.Font(load_font('font.ttf'), 50)
message = f'Гаек собрано: {gayka_score}'
text = font_type.render(message, True, '#ffffff')
rect_t = text.get_rect()
rect_t.x = WIDTH
rect_t.y = 250
message2 = f'Очков набрано: {score}'
text2 = font_type.render(message2, True, '#ffffff')
rect_t2 = text.get_rect()
rect_t2.x = -text2.get_width()
rect_t2.y = 325
if record:
message3 = f'Новый рекорд!'
text3 = font_type.render(message3, True, '#ffffff')
rect_t3 = text3.get_rect()
rect_t3.x = WIDTH // 2 - text3.get_width() // 2
rect_t3.y = text3.get_height() + HEIGHT
v4 = -250
FPS = 50
score = 0
clock = pygame.time.Clock()
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
end_game()
if event.type == pygame.KEYDOWN:
if v3 != 0 or v2 != 0 or v1 != 0:
v3 = v2 = v1 = v4 = 0
rect2.y = 150
rect_t.x = WIDTH // 2 - text.get_width() // 2
rect_t2.x = WIDTH // 2 - text2.get_width() // 2
if record:
rect_t3.y = 400
else:
show_menu()
screen.fill((0, 0, 0))
screen.blit(image, (0, 0))
rect2.y += v1 / FPS
screen.blit(image2, (rect2.x, rect2.y))
if rect2.y >= 100:
v1 = 0
rect_t.x += v2 / FPS
screen.blit(text, (rect_t.x, rect_t.y))
if rect_t.x <= WIDTH // 2 - text.get_width() // 2:
v2 = 0
rect_t2.x += v3 / FPS
screen.blit(text2, (rect_t2.x, rect_t2.y))
if rect_t2.x >= (WIDTH // 2 - text2.get_width() // 2):
v3 = 0
if record:
rect_t3.y += v4 / FPS
screen.blit(text3, (rect_t3.x, rect_t3.y))
if rect_t3.y <= 400:
v4 = 0
pygame.display.update()
clock.tick(FPS)
class Meteor(pygame.sprite.Sprite):
image = load_image('meteor.png')
v = 1
def __init__(self, x, group):
super().__init__(group)
self.image = Meteor.image
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = 0
def update(self):
self.rect.y += Meteor.v
def x(self):
return int(self.rect.x)
def y(self):
return int(self.rect.y)
class Bonus(pygame.sprite.Sprite):
v = 10
def __init__(self, x, y, name, type, group):
super().__init__(group)
self.image = load_image(name)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.type = type
self.timing = 0
def update(self):
self.rect.y += Bonus.v
def ret_type(self):
return int(self.type)
def get_timing(self, other):
self.timing = other
def ret_timing(self):
return self.timing
def ret_image(self):
return pygame.transform.scale(self.image, (self.image.get_width() // 2, self.image.get_height() // 2))
class Gayka(pygame.sprite.Sprite):
image = load_image('gayka.png')
v = 1
def __init__(self, x, group):
super().__init__(group)
self.image = Gayka.image
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = 0
def update(self):
self.rect.y += Gayka.v
def x(self):
return int(self.rect.x)
def y(self):
return int(self.rect.y)
class SpaceShip(pygame.sprite.Sprite):
v = 15
def __init__(self, group):
super().__init__(group)
self.image_with_shield = load_image(spaceship_with_shield_skin)
self.image = load_image(space_ship_skin)
self.rect = self.image.get_rect()
self.type = 1
self.mask = pygame.mask.from_surface(self.image)
self.rect.x = WIDTH // 2 - self.image.get_width() // 2
self.rect.y = HEIGHT - self.image.get_height() - 25
def update(self, v):
self.rect.x += v
if self.rect.x + self.image.get_width() > WIDTH:
self.rect.x = WIDTH - self.image.get_width()
if self.rect.x < 0:
self.rect.x = 0
def x(self):
return self.rect.x
def y(self):
return self.rect.y
def ret_x(self):
return self.rect.x + self.image.get_width() // 2 - 7
def ret_y(self):
return self.rect.y - 25
def ret_x_for_double_bullets_1(self):
if self.type == 1:
return self.rect.x + 5
return self.rect.x + 25
def ret_x_for_double_bullets_2(self):
if self.type == 1:
return self.rect.x + self.image.get_width() - 20
return self.rect.x + self.image.get_width() - 40
def ret_x_for_fire(self):
return self.rect.x + self.image.get_width() // 2 - load_image('anim_fire/fire_anim_1.png').get_width() // 2
def shield(self):
self.image = self.image_with_shield
self.type = 2
def shield_off(self):
self.image = load_image(space_ship_skin)
self.type = 1
class Monster(pygame.sprite.Sprite):
def __init__(self, pos_x, name, group):
super().__init__(group)
image = load_image(name)
self.image = image
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
self.rect.x = pos_x
for i in enemys:
if self.rect.x <= i.rect.x < self.rect.x + self.image.get_width():
while self.rect.x <= i.rect.x < self.rect.x + self.image.get_width():
self.rect.x = random.randint(0, 800 - self.image.get_width() // 2)
self.rect.y = 0
def update(self, v, gayka_score):
self.rect.y += v
if self.rect.y >= HEIGHT - self.image.get_height():
game_over(gayka_score)
score = 0
def ret_x(self):
return self.rect.x
class Bullet(pygame.sprite.Sprite):
image = load_image('bullet.png')
def __init__(self, pos, group):
super().__init__(group)
self.image = Bullet.image
self.rect = self.image.get_rect()
self.rect.x = pos[0]
self.rect.y = pos[1]
def update(self, v):
self.rect.y -= v
def end_game():
pygame.quit()
sys.exit()
def ship_death(x, y, group):
pygame.mixer.music.load('data/boom_spaceship.wav')
pygame.mixer.music.set_volume(1)
pygame.mixer.music.play()
AnimatedDeath(1, x, y, group)
class AnimatedDeath(pygame.sprite.Sprite):
def __init__(self, stage, x, y, group):
super().__init__(group)
name = os.path.join('animate_ship_death', f'animate_death_{stage}.png')
self.image = load_image(name)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def update(self, stage):
name = os.path.join('animate_ship_death', f'animate_death_{stage}.png')
self.image = load_image(name)
class Astronavt(pygame.sprite.Sprite):
image = load_image('astronavt.png')
def __init__(self, x, group):
super().__init__(group)
self.image = Astronavt.image
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = 0
def update(self, v):
self.rect.y += v
def x(self):
return self.rect.x
def y(self):
return self.rect.y
class Rocket(pygame.sprite.Sprite):
image = load_image('rocket.png')
def __init__(self, x, group):
super().__init__(group)
self.image = Rocket.image
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
self.rect.x = | |
"Name",
key="name",
link=( lambda item: dict( operation="view_or_manage_repository", id=item.id ) ),
attach_popup=False ),
RepositoryGrid.DescriptionColumn( "Synopsis",
key="description",
attach_popup=False ),
RepositoryGrid.TypeColumn( "Type" ),
RepositoryGrid.MetadataRevisionColumn( "Metadata<br/>Revisions" ),
RepositoryGrid.ToolsFunctionallyCorrectColumn( "Tools<br/>Verified" ),
RepositoryGrid.CategoryColumn( "Category",
model_class=model.Category,
key="Category.name",
attach_popup=False )
]
operations = []
standard_filters = []
default_filter = dict( deleted="False" )
num_rows_per_page = 50
preserve_state = False
use_paging = False
def build_initial_query( self, trans, **kwd ):
decoded_user_id = trans.security.decode_id( kwd[ 'user_id' ] )
return trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deleted == False,
model.Repository.table.c.deprecated == False,
model.Repository.table.c.user_id == decoded_user_id ) ) \
.join( model.User.table ) \
.outerjoin( model.RepositoryCategoryAssociation.table ) \
.outerjoin( model.Category.table )
class RepositoriesInCategoryGrid( RepositoryGrid ):
title = "Category"
columns = [
RepositoryGrid.NameColumn( "Name",
key="name",
link=( lambda item: dict( controller="repository", operation="view_or_manage_repository", id=item.id ) ),
attach_popup=False ),
RepositoryGrid.DescriptionColumn( "Synopsis",
key="description",
attach_popup=False ),
RepositoryGrid.TypeColumn( "Type" ),
RepositoryGrid.MetadataRevisionColumn( "Metadata<br/>Revisions" ),
RepositoryGrid.ToolsFunctionallyCorrectColumn( "Tools<br/>Verified" ),
RepositoryGrid.UserColumn( "Owner",
model_class=model.User,
link=( lambda item: dict( controller="repository", operation="repositories_by_user", id=item.id ) ),
attach_popup=False,
key="User.username" ),
# Columns that are valid for filtering but are not visible.
RepositoryGrid.EmailColumn( "Email",
model_class=model.User,
key="email",
visible=False )
]
columns.append( grids.MulticolFilterColumn( "Search repository name, description",
cols_to_filter=[ columns[0], columns[1] ],
key="free-text-search",
visible=False,
filterable="standard" ) )
operations = []
use_paging = False
def build_initial_query( self, trans, **kwd ):
category_id = kwd.get( 'id', None )
if category_id:
category = suc.get_category( trans, category_id )
if category:
return trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deleted == False,
model.Repository.table.c.deprecated == False ) ) \
.join( model.User.table ) \
.outerjoin( model.RepositoryCategoryAssociation.table ) \
.outerjoin( model.Category.table ) \
.filter( model.Category.table.c.name == category.name )
return trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deleted == False,
model.Repository.table.c.deprecated == False ) ) \
.join( model.User.table ) \
.outerjoin( model.RepositoryCategoryAssociation.table ) \
.outerjoin( model.Category.table )
class RepositoriesIOwnGrid( RepositoryGrid ):
title = "Repositories I own"
columns = [
RepositoryGrid.NameColumn( "Name",
key="name",
link=( lambda item: dict( operation="view_or_manage_repository", id=item.id ) ),
attach_popup=False ),
RepositoryGrid.TypeColumn( "Type" ),
RepositoryGrid.MetadataRevisionColumn( "Metadata<br/>Revisions" ),
RepositoryGrid.ToolsFunctionallyCorrectColumn( "Tools<br/>Verified" ),
RepositoryGrid.DeprecatedColumn( "Deprecated" )
]
columns.append( grids.MulticolFilterColumn( "Search repository name",
cols_to_filter=[ columns[0] ],
key="free-text-search",
visible=False,
filterable="standard" ) )
operations = []
use_paging = False
def build_initial_query( self, trans, **kwd ):
return trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deleted == False,
model.Repository.table.c.user_id == trans.user.id ) ) \
.join( model.User.table ) \
.outerjoin( model.RepositoryCategoryAssociation.table ) \
.outerjoin( model.Category.table )
class RepositoriesMissingToolTestComponentsGrid( RepositoryGrid ):
# This grid displays only the latest installable revision of each repository.
title = "Repositories with missing tool test components"
columns = [
RepositoryGrid.NameColumn( "Name",
key="name",
link=( lambda item: dict( operation="view_or_manage_repository", id=item.id ) ),
attach_popup=False ),
RepositoryGrid.LatestInstallableRevisionColumn( "Latest Installable Revision" ),
RepositoryGrid.UserColumn( "Owner",
key="User.username",
model_class=model.User,
link=( lambda item: dict( operation="repositories_by_user", id=item.id ) ),
attach_popup=False )
]
columns.append( grids.MulticolFilterColumn( "Search repository name",
cols_to_filter=[ columns[0] ],
key="free-text-search",
visible=False,
filterable="standard" ) )
operations = []
use_paging = False
def build_initial_query( self, trans, **kwd ):
# Filter by latest installable revisions that contain tools with missing tool test components.
revision_clause_list = []
for repository in trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == False,
model.Repository.table.c.deleted == False ) ):
changeset_revision = filter_by_latest_downloadable_changeset_revision_that_has_missing_tool_test_components( trans, repository )
if changeset_revision:
revision_clause_list.append( model.RepositoryMetadata.table.c.changeset_revision == changeset_revision )
if revision_clause_list:
return trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == False,
model.Repository.table.c.deleted == False ) ) \
.join( model.RepositoryMetadata ) \
.filter( or_( *revision_clause_list ) ) \
.join( model.User.table )
# Return an empty query.
return trans.sa_session.query( model.Repository ) \
.filter( model.Repository.table.c.id < 0 )
class MyWritableRepositoriesMissingToolTestComponentsGrid( RepositoriesMissingToolTestComponentsGrid ):
# This grid displays only the latest installable revision of each repository.
title = "Repositories I can change with missing tool test components"
columns = [ col for col in RepositoriesMissingToolTestComponentsGrid.columns ]
operations = []
use_paging = False
def build_initial_query( self, trans, **kwd ):
# First get all repositories that the current user is authorized to update.
username = trans.user.username
user_clause_list = []
for repository in trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == False,
model.Repository.table.c.deleted == False ) ):
allow_push = repository.allow_push( trans.app )
if allow_push:
allow_push_usernames = allow_push.split( ',' )
if username in allow_push_usernames:
user_clause_list.append( model.Repository.table.c.id == repository.id )
if user_clause_list:
# We have the list of repositories that the current user is authorized to update, so filter further by latest installable revisions that contain
# tools with missing tool test components.
revision_clause_list = []
for repository in trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == False,
model.Repository.table.c.deleted == False ) ) \
.filter( or_( *user_clause_list ) ):
changeset_revision = filter_by_latest_downloadable_changeset_revision_that_has_missing_tool_test_components( trans, repository )
if changeset_revision:
revision_clause_list.append( model.RepositoryMetadata.table.c.changeset_revision == changeset_revision )
if revision_clause_list:
return trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == False,
model.Repository.table.c.deleted == False ) ) \
.join( model.User.table ) \
.filter( or_( *user_clause_list ) ) \
.join( model.RepositoryMetadata ) \
.filter( or_( *revision_clause_list ) )
# Return an empty query.
return trans.sa_session.query( model.Repository ) \
.filter( model.Repository.table.c.id < 0 )
class RepositoriesWithTestInstallErrorsGrid( RepositoryGrid ):
# This grid displays only the latest installable revision of each repository.
title = "Repositories with tool test installation errors"
columns = [
RepositoryGrid.NameColumn( "Name",
key="name",
link=( lambda item: dict( operation="view_or_manage_repository", id=item.id ) ),
attach_popup=False ),
RepositoryGrid.LatestInstallableRevisionColumn( "Latest Installable Revision" ),
RepositoryGrid.UserColumn( "Owner",
key="User.username",
model_class=model.User,
link=( lambda item: dict( operation="repositories_by_user", id=item.id ) ),
attach_popup=False )
]
columns.append( grids.MulticolFilterColumn( "Search repository name",
cols_to_filter=[ columns[0] ],
key="free-text-search",
visible=False,
filterable="standard" ) )
operations = []
use_paging = False
def build_initial_query( self, trans, **kwd ):
# Filter by latest installable revisions that contain tools with missing tool test components.
revision_clause_list = []
for repository in trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == False,
model.Repository.table.c.deleted == False ) ):
changeset_revision = filter_by_latest_downloadable_changeset_revision_that_has_test_install_errors( trans, repository )
if changeset_revision:
revision_clause_list.append( model.RepositoryMetadata.table.c.changeset_revision == changeset_revision )
if revision_clause_list:
return trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == False,
model.Repository.table.c.deleted == False ) ) \
.join( model.RepositoryMetadata ) \
.filter( or_( *revision_clause_list ) ) \
.join( model.User.table )
# Return an empty query.
return trans.sa_session.query( model.Repository ) \
.filter( model.Repository.table.c.id < 0 )
class MyWritableRepositoriesWithTestInstallErrorsGrid( RepositoriesWithTestInstallErrorsGrid ):
# This grid displays only the latest installable revision of each repository.
title = "Repositories I can change with tool test installation errors"
columns = [ col for col in RepositoriesWithTestInstallErrorsGrid.columns ]
operations = []
use_paging = False
def build_initial_query( self, trans, **kwd ):
# First get all repositories that the current user is authorized to update.
username = trans.user.username
user_clause_list = []
for repository in trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == False,
model.Repository.table.c.deleted == False ) ):
allow_push = repository.allow_push( trans.app )
if allow_push:
allow_push_usernames = allow_push.split( ',' )
if username in allow_push_usernames:
user_clause_list.append( model.Repository.table.c.id == repository.id )
if user_clause_list:
# We have the list of repositories that the current user is authorized to update, so filter further by latest installable revisions that contain
# tools with missing tool test components.
revision_clause_list = []
for repository in trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == False,
model.Repository.table.c.deleted == False ) ) \
.filter( or_( *user_clause_list ) ):
changeset_revision = filter_by_latest_downloadable_changeset_revision_that_has_test_install_errors( trans, repository )
if changeset_revision:
revision_clause_list.append( model.RepositoryMetadata.table.c.changeset_revision == changeset_revision )
if revision_clause_list:
return trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == False,
model.Repository.table.c.deleted == False ) ) \
.join( model.User.table ) \
.filter( or_( *user_clause_list ) ) \
.join( model.RepositoryMetadata ) \
.filter( or_( *revision_clause_list ) )
# Return an empty query.
return trans.sa_session.query( model.Repository ) \
.filter( model.Repository.table.c.id < 0 )
class RepositoriesWithSkipTestsCheckedGrid( RepositoryGrid ):
# This grid displays only the latest installable revision of each repository.
title = "Repositories with skip tool tests checked"
columns = [
RepositoryGrid.NameColumn( "Name",
key="name",
link=( lambda item: dict( operation="view_or_manage_repository", id=item.id ) ),
attach_popup=False ),
RepositoryGrid.LatestInstallableRevisionColumn( "Latest Installable Revision" ),
RepositoryGrid.UserColumn( "Owner",
key="User.username",
model_class=model.User,
link=( lambda item: dict( operation="repositories_by_user", id=item.id ) ),
attach_popup=False )
]
columns.append( grids.MulticolFilterColumn( "Search repository name",
cols_to_filter=[ columns[0] ],
key="free-text-search",
visible=False,
filterable="standard" ) )
operations = []
use_paging = False
def build_initial_query( self, trans, **kwd ):
# Filter by latest installable revisions that contain tools with missing tool test components.
revision_clause_list = []
for repository in trans.sa_session.query( model.Repository ) | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 2 23:00:44 2021
@author: carl
"""
import os.path
import numpy as np
import h5py
import csv
import traceback
import time
from .spectraldata import SpectralData
class DecompositionData(SpectralData):
"SpectralData with addition of ROI/decomposition data"
def __init__(self):
super().__init__()
self.rdc_directory = None # None means dirname(curFile)
self.clear_rdc()
def clear_rdc(self):
"""
Clear the ROI/decomposition/clustering data.
Returns
-------
None.
"""
self.roi = None # bool array or None if nothing is selected
self.decomposition_settings = None # dict with decomp settings
self.decomposition_roi = None # ROI actually used for decomp, or None
# {iteration: {'spectra': array(component, wavenum),
# 'concentrations': array(component, pixel) } }
self.decomposition_data = None
self.decomposition_errors = None # 1d array of error values
self.clustering_settings = None # dict with clustering settings
self.clustering_roi = None # ROI actually used for clustering, or None
self.clustering_labels = None # 1d int array of labels
self.clustering_annotations = None # {str: [labels]}
def set_rdc_directory(self, directory):
# print('set rdc',directory)
self.rdc_directory = directory
def get_duplicate_filenames(self):
"""
Get list of filenames whose basename is not unique.
Returns
-------
List of filenames with full paths
"""
dups = set()
seen = {}
for i, f in enumerate([os.path.splitext(os.path.basename(f))[0]
for f in self.filenames]):
if f in seen:
dups.add(self.data.filenames[i])
dups.add(self.data.filenames[seen[f]])
else:
seen[f] = i
return list(dups)
def rdc_filename(self, filetype='odd'):
"""
Get the default name of the file where the current file's
ROI, decomposition and clustering is stored.
Parameters
----------
filedir : str, optional
The directory the file will be in, or None to use the input dir.
Returns
-------
str
Filename.
"""
filetypes = ['odd']
if filetype not in filetypes:
raise ValueError("File type must be one of " + str(filetypes))
if self.rdc_directory is None:
return os.path.splitext(self.curFile)[0] + '.' + filetype
if self.rdc_directory == '':
raise ValueError("Directory cannot be ''")
fn = os.path.splitext(os.path.basename(self.curFile))[0]
return os.path.join(self.rdc_directory, fn) + '.' + filetype
def read_matrix(self, filename):
self.clear_rdc()
super().read_matrix(filename)
try:
self.load_rdc()
except OSError:
pass
except Exception:
traceback.print_exc()
print('Warning: rdc auto-load failed')
# ROI stuff
def set_roi(self, roi):
"Set ROI from array."
if roi is None:
self.roi = None
elif roi.shape != (self.raw.shape[0],):
raise ValueError('Wrong number of values for ROI (%s)' %
str(roi.shape))
else:
self.roi = roi if roi.any() else None
# Decomposition stuff
def set_decomposition_settings(self, params):
"""
Populate decomposition_settings from a Parameters object,
set the decomposition ROI depending on params.dcRoi,
and clear stored data.
Parameters
----------
params : Parameters
Only the dc settings will be copied, without the 'dc' prefix.
Returns
-------
None
Raises
-------
ValueError
If the dcRoi parameter is inconsistent with current ROI
"""
dcs = params.filtered('dc')
roi = None
if params.dcRoi == 'ignore':
roi = None
elif params.dcRoi == 'require':
if self.roi is None:
raise ValueError('ROI is required but not defined')
roi = self.roi.copy()
elif params.dcRoi == 'ifdef':
roi = self.roi.copy() if self.roi is not None else None
else:
raise ValueError('Bad value for params.dcRoi')
self.decomposition_settings = {k[2:]: v for k, v in dcs.items()}
self.decomposition_settings['DateTime'] = \
time.strftime('%Y-%m-%d %H:%M:%S %Z')
self.decomposition_roi = roi
self.decomposition_data = {}
def set_decomposition_errors(self, errors):
"Set the history of least squares error values"
self.decomposition_errors = np.asarray(errors)
def add_decomposition_data(self, iteration, spectra, concentrations):
"Set data for a specific iteration; 0=initial"
pixels = self.decomposition_roi.sum() if self.decomposition_roi \
is not None else self.raw.shape[0]
comps = self.decomposition_settings['Components']
assert iteration <= self.decomposition_settings['Iterations']
if iteration:
assert spectra is not None and concentrations is not None
else:
assert spectra is not None or concentrations is not None
if spectra is not None:
assert spectra.shape == (comps, self.raw.shape[1])
spectra = spectra.copy()
if concentrations is not None:
assert concentrations.shape == (comps, pixels)
concentrations = concentrations.copy()
self.decomposition_data[iteration] = {
'spectra': spectra, 'concentrations': concentrations}
# Clustering/annotation stuff
def set_clustering_settings(self, params, roi):
"""
Populate decomposition_settings from a Parameters object,
copy the decomposition ROI, and clear stored clustering data.
Parameters
----------
params : Parameters
Only the ca settings will be copied, without the 'ca' prefix.
Returns
-------
None
"""
cas = params.filtered('ca')
self.clustering_settings = {k[2:]: v for k, v in cas.items()}
self.clustering_settings['DateTime'] = \
time.strftime('%Y-%m-%d %H:%M:%S %Z')
self.clustering_roi = roi.copy() if roi is not None else None
self.clustering_labels = None
self.clustering_label_set = None
self.clustering_annotations = {}
def set_clustering_labels(self, labels, relabel):
pixels = self.clustering_roi.sum() if \
self.clustering_roi is not None else self.raw.shape[0]
assert labels.shape == (pixels,)
# Count labels, sort indexes and invert permutation array
bc = np.bincount(labels)
mapp = np.empty(bc.size, bc.dtype)
mapp[np.argsort(bc)[::-1]] = np.arange(bc.size)
self.clustering_labels = mapp[labels]
self.clustering_label_set = set(self.clustering_labels)
def get_annotation_clusters(self, annotation):
if self.clustering_annotations is None:
self.clustering_annotations = {}
if annotation not in self.clustering_annotations:
self.clustering_annotations[annotation] = set()
return self.clustering_annotations[annotation].copy()
def get_unannotated_clusters(self):
used = set()
for c in self.clustering_annotations.values():
used.update(c)
return self.clustering_label_set - used
def set_annotation_clusters(self, annotation, clusters=None):
self.get_annotation_clusters(annotation)
if clusters is not None:
cset = set(clusters)
self.clustering_annotations[annotation] = cset
for c in self.clustering_annotations.values():
if c is not cset and cset.intersection(c):
print('Error: duplicated cluster id',
self.clustering_annotations)
def del_annotation(self, annotation):
if self.clustering_annotations is not None and \
annotation in self.clustering_annotations:
del self.clustering_annotations[annotation]
# Loading and saving
def load_rdc_(self, f, what, imgnum):
grp = f['SpectralData/Image_%d' % imgnum]
if what in ['all', 'roi']:
if 'ROI' in grp:
roi = grp['ROI']
if roi.shape != (self.raw.shape[0],):
raise ValueError('Wrong number of values for ROI (%d)' %
(len(roi)))
roi = roi[:]
else:
roi = None
if what in ['all', 'decomposition']:
dcroi = None
dcerrors = None
if 'Decomposition' in grp:
dc = grp['Decomposition']
dcsettings = dict(dc.attrs.items())
pixels = 0
if 'ROI' in dc:
dcroi = dc['ROI'][:]
if dcroi.shape != (self.raw.shape[0],):
raise ValueError(
'Wrong number of values for '
'Decomposition ROI (%d)' % (len(dcroi)))
pixels = dcroi.sum()
if not pixels:
pixels = self.raw.shape[0]
comps = dcsettings['Components']
dcdata = {}
for ds in dc['Data'].values():
it = ds.attrs['Iteration']
conc = None
if 'Concentrations' in ds:
conc = ds['Concentrations'][:]
if conc.shape[1] != pixels:
raise ValueError('Concentration pixel count mismatch')
if conc.shape[0] != comps:
raise ValueError(
'Concentration component count mismatch')
spect = None
if 'Spectra' in ds:
spect = ds['Spectra'][:]
if spect.shape[1] != self.raw.shape[1]:
raise ValueError('Spectra wavenumber count mismatch')
if spect.shape[0] != comps:
raise ValueError('Spectra component count mismatch')
dcdata[it] = {'concentrations': conc, 'spectra': spect}
if 'Errors' in dc:
dcerrors = dc['Errors'][:]
# if len(dcerrors) >= dcsettings['Iterations']:
# raise ValueError('Too many elements in Errors')
else:
dcsettings = None
dcdata = None
if what in ['all', 'clustering']:
caroi = None
calabels = None
casettings = None
caannot = None
if 'Clustering' in grp:
ca = grp['Clustering']
casettings = dict(ca.attrs.items())
pixels = 0
if 'ROI' in ca:
caroi = ca['ROI'][:]
if caroi.shape != (self.raw.shape[0],):
raise ValueError(
'Wrong number of values for '
'Clustering ROI (%d)' % (len(caroi)))
pixels = caroi.sum()
if not pixels:
pixels = self.raw.shape[0]
maxclust = casettings['Clusters']
if 'Labels' in ca:
calabels = ca['Labels'][:]
if calabels.shape != (pixels,):
raise ValueError('Cluster label count mismatch')
if calabels.max() < 0 or calabels.max() >= maxclust:
raise ValueError('Cluster label range error')
if 'Annotations' in ca:
caannot = {}
for ann in ca['Annotations'].values():
atxt = ann.attrs['Text']
avals = ann[:]
if len(avals) > 0 and (
min(avals) < 0 or np.max(avals) >= maxclust):
raise ValueError('Annotation cluster range error')
if atxt in caannot:
caannot[atxt].update(set(avals))
else:
caannot[atxt] = set(avals)
if what in ['all', 'roi']:
self.roi = roi
if what in ['all', 'decomposition']:
self.decomposition_settings = dcsettings
self.decomposition_roi = dcroi
self.decomposition_data = dcdata
self.decomposition_errors = dcerrors
if what in ['all', 'clustering']:
self.clustering_settings = casettings
self.clustering_roi = caroi
self.clustering_labels = calabels
self.clustering_label_set = set(calabels) if \
calabels is not None else None
# if caannot is not None:
# for k, v in caannot.items():
# caannot[k] = list(v)
self.clustering_annotations = caannot
def load_rdc(self, filename=None, what='all'):
"""
Load ROI/decomp/clust from a named file.
Parameters
----------
filename : str
A named file; possibly from rdc_filename
Returns
-------
success : bool
Whether data was loaded
"""
if filename is None:
filename = self.rdc_filename()
# print('load rdc',filename,what)
with h5py.File(filename, mode='r') as f:
return self.load_rdc_(f, what, 0)
def save_rdc_(self, f, what, imgnum):
grp = f.require_group('SpectralData')
grp.attrs.create('Creator', 'OCTAVVS')
grp = grp.require_group('Image_%d' % imgnum)
def replace_data(grp, name, arr):
if name in grp:
if arr is not None and grp[name].shape == arr.shape:
grp[name][...] = arr
| |
<gh_stars>0
from __future__ import division
#import urllib2
import os,sys
import numpy as np
import pandas as pd
from collections import defaultdict
from sklearn import feature_extraction
from sklearn import preprocessing
from random import seed, shuffle
import utils as ut
import funcs_disp_mist as fdm
import time
SEED = 1234
seed(SEED)
np.random.seed(SEED)
def Zafar(dataset):
if dataset == 'adult':
test_adult_data()
elif dataset == 'compas':
test_compas_data()
else:
test_german_data()
return
def load_adult_data(load_data_size=None):
"""
if load_data_size is set to None (or if no argument is provided), then we load and return the whole data
if it is a number, say 10000, then we will return randomly selected 10K examples
"""
attrs = ['age', 'workclass', 'edu_level', 'marital_status', 'occupation', 'relationship',
'race', 'sex', 'hours_per_week', 'native_country'] # all attributes
int_attrs = ['age', 'edu_level', 'hours_per_week'] # attributes with integer values -- the rest are categorical
sensitive_attrs = ['sex'] # the fairness constraints will be used for this feature
attrs_to_ignore = ['sex', 'race'] # sex and race are sensitive feature so we will not use them in classification, we will not consider fnlwght for classification since its computed externally and it highly predictive for the class (for details, see documentation of the adult data)
attrs_for_classification = set(attrs) - set(attrs_to_ignore)
data_files = ['dataset/Adult.csv']#["adult.data", "adult.test"]
X = []
y = []
x_control = {}
attrs_to_vals = {} # will store the values for each attribute for all users
for k in attrs:
if k in sensitive_attrs:
x_control[k] = []
elif k in attrs_to_ignore:
pass
else:
attrs_to_vals[k] = []
for f in data_files:
for line in open(f):
line = line.strip()
if line.startswith("age") or line == "":
continue
line = line.split(",")
if "?" in line: # if a line has missing attributes, ignore it
continue
class_label = line[-1]
if class_label in ["<=50K.", "<=50K", "0"]:
class_label = -1
elif class_label in [">50K.", ">50K", "1"]:
class_label = +1
else:
raise Exception("Invalid class label value")
y.append(class_label)
for i in range(0,len(line)-1):
attr_name = attrs[i]
attr_val = line[i]
# reducing dimensionality of some very sparse features
if attr_name == "native_country":
if attr_val!="United-States":
attr_val = "Non-United-Stated"
elif attr_name == "education":
if attr_val in ["Preschool", "1st-4th", "5th-6th", "7th-8th"]:
attr_val = "prim-middle-school"
elif attr_val in ["9th", "10th", "11th", "12th"]:
attr_val = "high-school"
if attr_name in sensitive_attrs:
x_control[attr_name].append(attr_val)
elif attr_name in attrs_to_ignore:
pass
else:
attrs_to_vals[attr_name].append(attr_val)
def convert_attrs_to_ints(d): # discretize the string attributes
for attr_name, attr_vals in d.items():
if attr_name in int_attrs: continue
uniq_vals = sorted(list(set(attr_vals))) # get unique values
# compute integer codes for the unique values
val_dict = {}
for i in range(0,len(uniq_vals)):
val_dict[uniq_vals[i]] = i
# replace the values with their integer encoding
for i in range(0,len(attr_vals)):
attr_vals[i] = val_dict[attr_vals[i]]
d[attr_name] = attr_vals
# convert the discrete values to their integer representations
convert_attrs_to_ints(x_control)
convert_attrs_to_ints(attrs_to_vals)
# if the integer vals are not binary, we need to get one-hot encoding for them
for attr_name in attrs_for_classification:
attr_vals = attrs_to_vals[attr_name]
if attr_name in int_attrs or attr_name == "native_country" or attr_name == "sex":
# the way we encoded native country, its binary now so no need to apply one hot encoding on it
X.append(attr_vals)
else:
attr_vals, index_dict = ut.get_one_hot_encoding(attr_vals)
for inner_col in attr_vals.T:
X.append(inner_col)
# convert to numpy arrays for easy handline
X = np.array(X, dtype=float).T
y = np.array(y, dtype = float)
for k, v in x_control.items(): x_control[k] = np.array(v, dtype=float)
# shuffle the data
perm = list(range(0,len(y))) # shuffle the data before creating each fold
#shuffle(perm)
X = X[perm]
y = y[perm]
for k in x_control.keys():
x_control[k] = x_control[k][perm]
# see if we need to subsample the data
if load_data_size is not None:
print ("Loading only %d examples from the data" % load_data_size)
X = X[:load_data_size]
y = y[:load_data_size]
for k in x_control.keys():
x_control[k] = x_control[k][:load_data_size]
return X, y, x_control
def test_adult_data():
data_type = 1
X, y, x_control = load_adult_data()
sensitive_attrs = x_control.keys()
""" Split the data into train and test """
train_fold_size = 0.7
split_point = int(round(float(X.shape[0]) * train_fold_size))
x_train, y_train, x_control_train, x_test, y_test, x_control_test, _= \
ut.split_into_train_test(X, y, x_control, train_fold_size)
cons_params = None # constraint parameters, will use them later
loss_function = "logreg" # perform the experiments with logistic regression
EPS = 1e-6
def train_test_classifier():
w = fdm.train_model_disp_mist(x_train, y_train, x_control_train, loss_function, EPS, cons_params)
train_score, test_score, cov_all_train, cov_all_test, s_attr_to_fp_fn_train, s_attr_to_fp_fn_test, y_pred =\
fdm.get_clf_stats(w, x_train, y_train, x_control_train, x_test, y_test, x_control_test, sensitive_attrs)
return w, test_score, s_attr_to_fp_fn_test, y_pred
""" Classify the data while optimizing for accuracy """
print()
print ("== Unconstrained (original) classifier ==")
w_uncons, acc_uncons, s_attr_to_fp_fn_test_uncons, _ = train_test_classifier()
print ("\n-----------------------------------------------------------------------------------\n")
""" Now classify such that we optimize for accuracy while achieving perfect fairness """
print()
start = time.time()
print ("\n\n== Equalized Odds ==") # setting parameter for constraints
cons_type = 4 # FPR constraint -- just change the cons_type, the rest of parameters should stay the same
tau = 5.0
mu = 1.2
sensitive_attrs_to_cov_thresh = {"sex": {0:{0:0, 1:0}, 1:{0:0, 1:0}, 2:{0:0, 1:0}}} # zero covariance threshold, means try to get the fairest solution
cons_params = {"cons_type": cons_type,
"tau": tau,
"mu": mu,
"sensitive_attrs_to_cov_thresh": sensitive_attrs_to_cov_thresh}
w_cons, acc_cons, s_attr_to_fp_fn_test_cons, y_pred = train_test_classifier()
print ("\n-----------------------------------------------------------------------------------\n")
end = time.time()
print("Time: ", end - start)
df = pd.read_csv("dataset/Adult.csv")
df = df.iloc[split_point:, :]
y_pred[y_pred < 0] = 0
df['pred'] = y_pred
df.to_csv("results_zafarEO/adult_test_repaired.csv", index=False)
return
def load_german_data(load_data_size=None):
"""
if load_data_size is set to None (or if no argument is provided), then we load and return the whole data
if it is a number, say 10000, then we will return randomly selected 10K examples
"""
attrs = ['Month', 'Credit_amount', 'Investment', 'Age','Sex', 'Status', 'Credit_history',\
'Savings', 'Property', 'Housing'] # all attributes
int_attrs = ['Month', 'Credit_amount', 'Investment', 'Age'] # attributes with integer values -- the rest are categorical
sensitive_attrs = ['Sex'] # the fairness constraints will be used for this feature
attrs_to_ignore = ['Sex']
attrs_for_classification = set(attrs) - set(attrs_to_ignore)
# adult data comes in two different files, one for training and one for testing, however, we will combine data from both the files
data_files = ["dataset/German.csv"]
X = []
y = []
x_control = {}
attrs_to_vals = {} # will store the values for each attribute for all users
for k in attrs:
if k in sensitive_attrs:
x_control[k] = []
elif k in attrs_to_ignore:
pass
else:
attrs_to_vals[k] = []
for f in data_files:
for line in open(f):
if line.startswith("Month"):
continue
line = line.strip()
if line == "": continue # skip empty lines
line = line.split(",")
#print(line)
if "?" in line: # if a line has missing attributes, ignore it
continue
class_label = line[-1]
#print(class_label)
if class_label in ["Bad", "0"]:
class_label = -1
elif class_label in ["good", "1"]:
class_label = +1
else:
raise Exception("Invalid class label value")
y.append(class_label)
for i in range(0,len(line)-1):
attr_name = attrs[i]
attr_val = line[i]
if attr_name in sensitive_attrs:
x_control[attr_name].append(attr_val)
elif attr_name in attrs_to_ignore:
pass
else:
attrs_to_vals[attr_name].append(attr_val)
def convert_attrs_to_ints(d): # discretize the string attributes
for attr_name, attr_vals in d.items():
if attr_name in int_attrs: continue
uniq_vals = sorted(list(set(attr_vals))) # get unique values
# compute integer codes for the unique values
val_dict = {}
for i in range(0,len(uniq_vals)):
val_dict[uniq_vals[i]] = i
# replace the values with their integer encoding
for i in range(0,len(attr_vals)):
attr_vals[i] = val_dict[attr_vals[i]]
d[attr_name] = attr_vals
# convert the discrete values to their integer representations
convert_attrs_to_ints(x_control)
convert_attrs_to_ints(attrs_to_vals)
# if the integer vals are not binary, we need to get one-hot encoding for them
for attr_name in attrs_for_classification:
attr_vals = attrs_to_vals[attr_name]
if attr_name in int_attrs or attr_name == "Sex": # the way we encoded native country, its binary now so no need to apply one hot encoding on it
X.append(attr_vals)
else:
attr_vals, index_dict = ut.get_one_hot_encoding(attr_vals)
for inner_col in attr_vals.T:
X.append(inner_col)
X = np.array(X, dtype=float).T
y = np.array(y, dtype = float)
for k, v in x_control.items(): x_control[k] = np.array(v, dtype=float)
# shuffle the data
perm = list(range(0,len(y))) # shuffle the data before creating each fold
#shuffle(perm)
X = X[perm]
y = y[perm]
for k in x_control.keys():
x_control[k] = x_control[k][perm]
# see if we need to subsample the data
if load_data_size is not None:
print ("Loading only %d examples from the data" % load_data_size)
X = X[:load_data_size]
y = | |
"""
self.ResourceId = None
self.Resource = None
self.Value = None
self.Percentage = None
self.BillingValue = None
self.BillingPercentage = None
def _deserialize(self, params):
self.ResourceId = params.get("ResourceId")
self.Resource = params.get("Resource")
self.Value = params.get("Value")
self.Percentage = params.get("Percentage")
self.BillingValue = params.get("BillingValue")
self.BillingPercentage = params.get("BillingPercentage")
class RequestHeader(AbstractModel):
"""自定义请求头配置,默认为关闭状态
"""
def __init__(self):
"""
:param Switch: 自定义请求头配置开关
on:开启
off:关闭
:type Switch: str
:param HeaderRules: 自定义请求头配置规则
注意:此字段可能返回 null,表示取不到有效值。
:type HeaderRules: list of HttpHeaderPathRule
"""
self.Switch = None
self.HeaderRules = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
if params.get("HeaderRules") is not None:
self.HeaderRules = []
for item in params.get("HeaderRules"):
obj = HttpHeaderPathRule()
obj._deserialize(item)
self.HeaderRules.append(obj)
class ResourceBillingData(AbstractModel):
"""计费数据明细
"""
def __init__(self):
"""
:param Resource: 资源名称,根据查询条件不同分为以下几类:
某一个具体域名:表示该域名明细数据
multiDomains:表示多域名汇总明细数据
某一个项目 ID:指定项目查询时,显示为项目 ID
all:账号维度数据明细
:type Resource: str
:param BillingData: 计费数据详情
:type BillingData: list of CdnData
"""
self.Resource = None
self.BillingData = None
def _deserialize(self, params):
self.Resource = params.get("Resource")
if params.get("BillingData") is not None:
self.BillingData = []
for item in params.get("BillingData"):
obj = CdnData()
obj._deserialize(item)
self.BillingData.append(obj)
class ResourceData(AbstractModel):
"""查询对象及其对应的访问明细数据
"""
def __init__(self):
"""
:param Resource: 资源名称,根据查询条件不同分为以下几类:
具体域名:表示该域名明细数据
multiDomains:表示多域名汇总明细数据
项目 ID:指定项目查询时,显示为项目 ID
all:账号维度明细数据
:type Resource: str
:param CdnData: 资源对应的数据明细
:type CdnData: list of CdnData
"""
self.Resource = None
self.CdnData = None
def _deserialize(self, params):
self.Resource = params.get("Resource")
if params.get("CdnData") is not None:
self.CdnData = []
for item in params.get("CdnData"):
obj = CdnData()
obj._deserialize(item)
self.CdnData.append(obj)
class ResourceOriginData(AbstractModel):
"""查询对象及其对应的回源明细数据
"""
def __init__(self):
"""
:param Resource: 资源名称,根据查询条件不同分为以下几类:
具体域名:表示该域名明细数据
multiDomains:表示多域名汇总明细数据
项目 ID:指定项目查询时,显示为项目 ID
all:账号维度明细数据
:type Resource: str
:param OriginData: 回源数据详情
:type OriginData: list of CdnData
"""
self.Resource = None
self.OriginData = None
def _deserialize(self, params):
self.Resource = params.get("Resource")
if params.get("OriginData") is not None:
self.OriginData = []
for item in params.get("OriginData"):
obj = CdnData()
obj._deserialize(item)
self.OriginData.append(obj)
class ResponseHeader(AbstractModel):
"""自定义响应头配置,默认为关闭状态
"""
def __init__(self):
"""
:param Switch: 自定义响应头开关
on:开启
off:关闭
:type Switch: str
:param HeaderRules: 自定义响应头规则
注意:此字段可能返回 null,表示取不到有效值。
:type HeaderRules: list of HttpHeaderPathRule
"""
self.Switch = None
self.HeaderRules = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
if params.get("HeaderRules") is not None:
self.HeaderRules = []
for item in params.get("HeaderRules"):
obj = HttpHeaderPathRule()
obj._deserialize(item)
self.HeaderRules.append(obj)
class ResponseHeaderCache(AbstractModel):
"""源站头部缓存配置,默认为开启状态,缓存所有头部信息
"""
def __init__(self):
"""
:param Switch: 源站头部缓存开关
on:开启
off:关闭
:type Switch: str
"""
self.Switch = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
class Revalidate(AbstractModel):
"""是否回源站校验
"""
def __init__(self):
"""
:param Switch: on | off 是否总是回源校验
注意:此字段可能返回 null,表示取不到有效值。
:type Switch: str
:param Path: 只在特定请求路径回源站校验
注意:此字段可能返回 null,表示取不到有效值。
:type Path: str
"""
self.Switch = None
self.Path = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.Path = params.get("Path")
class RuleCache(AbstractModel):
"""缓存配置分路径版本。
默认情况下所有文件缓存过期时间为 30 天
默认情况下静态加速类型的域名 .php;.jsp;.asp;.aspx 不缓存
"""
def __init__(self):
"""
:param RulePaths: CacheType 对应类型下的匹配内容:
all 时填充 *
file 时填充后缀名,如 jpg、txt
directory 时填充路径,如 /xxx/test
path 时填充绝对路径,如 /xxx/test.html
index 时填充 /
default 时填充 "no max-age"
注意:此字段可能返回 null,表示取不到有效值。
:type RulePaths: list of str
:param RuleType: 规则类型:
all:所有文件生效
file:指定文件后缀生效
directory:指定路径生效
path:指定绝对路径生效
index:首页
default: 源站无max-age时生效
注意:此字段可能返回 null,表示取不到有效值。
:type RuleType: str
:param CacheConfig: 缓存配置。
注意:此字段可能返回 null,表示取不到有效值。
:type CacheConfig: :class:`tencentcloud.cdn.v20180606.models.RuleCacheConfig`
"""
self.RulePaths = None
self.RuleType = None
self.CacheConfig = None
def _deserialize(self, params):
self.RulePaths = params.get("RulePaths")
self.RuleType = params.get("RuleType")
if params.get("CacheConfig") is not None:
self.CacheConfig = RuleCacheConfig()
self.CacheConfig._deserialize(params.get("CacheConfig"))
class RuleCacheConfig(AbstractModel):
"""路径缓存缓存配置(三种缓存模式中选取一种)
"""
def __init__(self):
"""
:param Cache: 缓存配置
注意:此字段可能返回 null,表示取不到有效值。
:type Cache: :class:`tencentcloud.cdn.v20180606.models.CacheConfigCache`
:param NoCache: 不缓存配置
注意:此字段可能返回 null,表示取不到有效值。
:type NoCache: :class:`tencentcloud.cdn.v20180606.models.CacheConfigNoCache`
:param FollowOrigin: 遵循源站配置
注意:此字段可能返回 null,表示取不到有效值。
:type FollowOrigin: :class:`tencentcloud.cdn.v20180606.models.CacheConfigFollowOrigin`
"""
self.Cache = None
self.NoCache = None
self.FollowOrigin = None
def _deserialize(self, params):
if params.get("Cache") is not None:
self.Cache = CacheConfigCache()
self.Cache._deserialize(params.get("Cache"))
if params.get("NoCache") is not None:
self.NoCache = CacheConfigNoCache()
self.NoCache._deserialize(params.get("NoCache"))
if params.get("FollowOrigin") is not None:
self.FollowOrigin = CacheConfigFollowOrigin()
self.FollowOrigin._deserialize(params.get("FollowOrigin"))
class RuleQueryString(AbstractModel):
"""路径保留参数配置
"""
def __init__(self):
"""
:param Switch: on | off CacheKey是否由QueryString组成
注意:此字段可能返回 null,表示取不到有效值。
:type Switch: str
:param Action: includeCustom 包含部分url参数
注意:此字段可能返回 null,表示取不到有效值。
:type Action: str
:param Value: 使用/排除的url参数数组,';' 分割
注意:此字段可能返回 null,表示取不到有效值。
:type Value: str
"""
self.Switch = None
self.Action = None
self.Value = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.Action = params.get("Action")
self.Value = params.get("Value")
class ScdnAclConfig(AbstractModel):
"""SCDN访问控制
"""
def __init__(self):
"""
:param Switch: 是否开启,on | off
:type Switch: str
:param ScriptData: Acl规则组,switch为on时必填
注意:此字段可能返回 null,表示取不到有效值。
:type ScriptData: list of ScdnAclGroup
:param ErrorPage: 错误页面配置
注意:此字段可能返回 null,表示取不到有效值。
:type ErrorPage: :class:`tencentcloud.cdn.v20180606.models.ScdnErrorPage`
"""
self.Switch = None
self.ScriptData = None
self.ErrorPage = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
if params.get("ScriptData") is not None:
self.ScriptData = []
for item in params.get("ScriptData"):
obj = ScdnAclGroup()
obj._deserialize(item)
self.ScriptData.append(obj)
if params.get("ErrorPage") is not None:
self.ErrorPage = ScdnErrorPage()
self.ErrorPage._deserialize(params.get("ErrorPage"))
class ScdnAclGroup(AbstractModel):
"""SCDN精准访问控制配置
"""
def __init__(self):
"""
:param RuleName: 规则名称
:type RuleName: str
:param Configure: 具体配置
:type Configure: list of ScdnAclRule
:param Result: 规则行为,一般为refuse
:type Result: str
:param Status: 规则是否生效中active|inactive
:type Status: str
"""
self.RuleName = None
self.Configure = None
self.Result = None
self.Status = None
def _deserialize(self, params):
self.RuleName = params.get("RuleName")
if params.get("Configure") is not None:
self.Configure = []
for item in params.get("Configure"):
obj = ScdnAclRule()
obj._deserialize(item)
self.Configure.append(obj)
self.Result = params.get("Result")
self.Status = params.get("Status")
class ScdnAclRule(AbstractModel):
"""精准访问控制匹配规则
"""
def __init__(self):
"""
:param MatchKey: 匹配关键字, params | url | ip | referer | user-agent
:type MatchKey: str
:param LogiOperator: 逻辑操作符,取值 exclude, include, notequal, equal, len-less, len-equal, len-more
:type LogiOperator: str
:param MatchValue: 匹配值
:type MatchValue: str
"""
self.MatchKey = None
self.LogiOperator = None
self.MatchValue = None
def _deserialize(self, params):
self.MatchKey = params.get("MatchKey")
self.LogiOperator = params.get("LogiOperator")
self.MatchValue = params.get("MatchValue")
class ScdnBotConfig(AbstractModel):
"""bot配置类型
"""
def __init__(self):
"""
:param Switch: on|off
:type Switch: str
:param BotCookie: Bot cookie策略
注意:此字段可能返回 null,表示取不到有效值。
:type BotCookie: list of BotCookie
:param BotJavaScript: Bot Js策略
注意:此字段可能返回 null,表示取不到有效值。
:type BotJavaScript: list of BotJavaScript
"""
self.Switch = None
self.BotCookie = None
self.BotJavaScript = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
if params.get("BotCookie") is not None:
self.BotCookie = []
for item in params.get("BotCookie"):
obj = BotCookie()
obj._deserialize(item)
self.BotCookie.append(obj)
if params.get("BotJavaScript") is not None:
self.BotJavaScript = []
for item in params.get("BotJavaScript"):
obj = BotJavaScript()
obj._deserialize(item)
self.BotJavaScript.append(obj)
class ScdnCCRules(AbstractModel):
"""scdn 的自定义 cc 规则
"""
def __init__(self):
"""
:param RuleType: 规则类型:
all:所有文件生效
file:指定文件后缀生效
directory:指定路径生效
path:指定绝对路径生效
index:首页
:type RuleType: str
:param RuleValue: 规则值
:type RuleValue: list of str
:param Qps: 规则限频
:type Qps: int
:param DetectionTime: 探测时长
注意:此字段可能返回 null,表示取不到有效值。
:type DetectionTime: int
:param FrequencyLimit: 限频阈值
注意:此字段可能返回 null,表示取不到有效值。
:type FrequencyLimit: int
:param PunishmentSwitch: IP 惩罚开关,可选on|off
注意:此字段可能返回 null,表示取不到有效值。
:type PunishmentSwitch: str
:param PunishmentTime: IP 惩罚时长
注意:此字段可能返回 null,表示取不到有效值。
:type PunishmentTime: int
:param Action: 执行动作,intercept|redirect
注意:此字段可能返回 null,表示取不到有效值。
:type Action: str
:param RedirectUrl: 动作为 redirect 时,重定向的url
注意:此字段可能返回 null,表示取不到有效值。
:type RedirectUrl: str
"""
self.RuleType = None
self.RuleValue = None
self.Qps = None
self.DetectionTime = None
self.FrequencyLimit = None
self.PunishmentSwitch = None
self.PunishmentTime = None
self.Action = None
self.RedirectUrl = None
def _deserialize(self, params):
self.RuleType = params.get("RuleType")
self.RuleValue = params.get("RuleValue")
self.Qps = params.get("Qps")
self.DetectionTime = params.get("DetectionTime")
self.FrequencyLimit = params.get("FrequencyLimit")
self.PunishmentSwitch = params.get("PunishmentSwitch")
self.PunishmentTime = params.get("PunishmentTime")
self.Action = params.get("Action")
self.RedirectUrl = params.get("RedirectUrl")
class ScdnConfig(AbstractModel):
"""cc的配置类型
"""
def __init__(self):
"""
:param Switch: on | off
:type Switch: str
:param Rules: 自定义 cc 防护规则
注意:此字段可能返回 null,表示取不到有效值。
:type Rules: list of ScdnCCRules
"""
self.Switch = None
self.Rules = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = ScdnCCRules()
obj._deserialize(item)
self.Rules.append(obj)
class ScdnDdosConfig(AbstractModel):
"""ddos配置类型
"""
def __init__(self):
"""
:param Switch: on|off
:type Switch: str
"""
self.Switch = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
class ScdnDomain(AbstractModel):
"""聚合了SCDN域名的基本信息
"""
def __init__(self):
"""
:param Domain: 域名
:type Domain: str
:param Status: 当前状态,取值online | offline | process
:type Status: str
:param Waf: Waf 状态默认为‘/’,取值 close | intercept | observe
:type Waf: str
:param Acl: Acl 状态默认为‘/’,取值 close | open
:type Acl: str
:param CC: CC 状态默认为‘/’,取值 close | open
:type CC: str
:param Ddos: Ddos 状态默认为‘/’,取值 close | open
:type Ddos: str
:param ProjectId: 项目ID
:type ProjectId: str
:param AclRuleNumbers: Acl 规则数
:type AclRuleNumbers: int
:param Bot: Bot 状态默认为‘/’,取值 close | open
:type Bot: str
"""
self.Domain = None
self.Status = None
self.Waf = None
self.Acl = None
self.CC = None
self.Ddos = None
self.ProjectId = None
self.AclRuleNumbers = None
self.Bot = None
def _deserialize(self, params):
self.Domain = params.get("Domain")
self.Status = params.get("Status")
self.Waf = params.get("Waf")
self.Acl = params.get("Acl")
self.CC = params.get("CC")
self.Ddos = params.get("Ddos")
self.ProjectId = params.get("ProjectId")
self.AclRuleNumbers = params.get("AclRuleNumbers")
self.Bot = params.get("Bot")
class ScdnErrorPage(AbstractModel):
"""acl的错误页面
"""
def __init__(self):
"""
:param RedirectCode: 状态码
:type RedirectCode: int
:param RedirectUrl: 重定向url
:type RedirectUrl: str
"""
self.RedirectCode = None
self.RedirectUrl = None
def _deserialize(self, params):
self.RedirectCode = params.get("RedirectCode")
self.RedirectUrl = params.get("RedirectUrl")
class ScdnEventLogConditions(AbstractModel):
"""SCDN 事件日志查询条件
"""
def __init__(self):
"""
:param Key: 匹配关键字,ip, attack_location
:type Key: str
:param Operator: 逻辑操作符,取值 exclude, include
:type Operator: str
:param Value: 匹配值,允许使用通配符(*)查询,匹配零个、单个、多个字符,例如 1.2.*
:type Value: str
"""
self.Key = None
self.Operator = None
self.Value = None
def _deserialize(self, params):
self.Key | |
response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_supported_cultures.metadata = {'url': '/apps/cultures'}
def download_query_logs(
self, app_id, custom_headers=None, raw=False, callback=None, **operation_config):
"""Gets the logs of the past month's endpoint queries for the application.
:param app_id: The application ID.
:type app_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param callback: When specified, will be called with each chunk of
data that is streamed. The callback should take two arguments, the
bytes of the current chunk of data and the response object. If the
data is uploading, response will be None.
:type callback: Callable[Bytes, response=None]
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: object or ClientRawResponse if raw=true
:rtype: Generator or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.download_query_logs.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'appId': self._serialize.url("app_id", app_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/octet-stream'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=True, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = self._client.stream_download(response, callback)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
download_query_logs.metadata = {'url': '/apps/{appId}/querylogs'}
def get(
self, app_id, custom_headers=None, raw=False, **operation_config):
"""Gets the application info.
:param app_id: The application ID.
:type app_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ApplicationInfoResponse or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.language.luis.authoring.models.ApplicationInfoResponse
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.language.luis.authoring.models.ErrorResponseException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'appId': self._serialize.url("app_id", app_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationInfoResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/apps/{appId}'}
def update(
self, app_id, name=None, description=None, custom_headers=None, raw=False, **operation_config):
"""Updates the name or description of the application.
:param app_id: The application ID.
:type app_id: str
:param name: The application's new name.
:type name: str
:param description: The application's new description.
:type description: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: OperationStatus or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.language.luis.authoring.models.OperationStatus
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.language.luis.authoring.models.ErrorResponseException>`
"""
application_update_object = models.ApplicationUpdateObject(name=name, description=description)
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'appId': self._serialize.url("app_id", app_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(application_update_object, 'ApplicationUpdateObject')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatus', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update.metadata = {'url': '/apps/{appId}'}
def delete(
self, app_id, force=False, custom_headers=None, raw=False, **operation_config):
"""Deletes an application.
:param app_id: The application ID.
:type app_id: str
:param force: A flag to indicate whether to force an operation.
:type force: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: OperationStatus or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.language.luis.authoring.models.OperationStatus
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.language.luis.authoring.models.ErrorResponseException>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'appId': self._serialize.url("app_id", app_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if force is not None:
query_parameters['force'] = self._serialize.query("force", force, 'bool')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatus', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
delete.metadata = {'url': '/apps/{appId}'}
def publish(
self, app_id, version_id=None, is_staging=False, custom_headers=None, raw=False, **operation_config):
"""Publishes a specific version of the application.
:param app_id: The application ID.
:type app_id: str
:param version_id: The version ID to publish.
:type version_id: str
:param is_staging: Indicates if the staging slot should be used,
instead of the Production one.
:type is_staging: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ProductionOrStagingEndpointInfo or ClientRawResponse if
raw=true
:rtype:
~azure.cognitiveservices.language.luis.authoring.models.ProductionOrStagingEndpointInfo
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.language.luis.authoring.models.ErrorResponseException>`
"""
application_publish_object = models.ApplicationPublishObject(version_id=version_id, is_staging=is_staging)
# Construct URL
url = self.publish.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'appId': self._serialize.url("app_id", app_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(application_publish_object, 'ApplicationPublishObject')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [201, 207]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('ProductionOrStagingEndpointInfo', response)
if response.status_code == 207:
deserialized = self._deserialize('ProductionOrStagingEndpointInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
publish.metadata = {'url': '/apps/{appId}/publish'}
def get_settings(
self, app_id, custom_headers=None, raw=False, **operation_config):
"""Get the application settings including 'UseAllTrainingData'.
:param app_id: The application ID.
:type app_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ApplicationSettings or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.language.luis.authoring.models.ApplicationSettings
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.language.luis.authoring.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_settings.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'appId': self._serialize.url("app_id", app_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationSettings', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_settings.metadata = {'url': '/apps/{appId}/settings'}
def update_settings(
self, app_id, is_public=None, custom_headers=None, raw=False, **operation_config):
"""Updates the application settings including 'UseAllTrainingData'.
:param app_id: The application ID.
:type app_id: str
:param is_public: Setting your application as public allows other
people to use your application's endpoint using their own keys.
:type is_public: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: OperationStatus or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.language.luis.authoring.models.OperationStatus
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.language.luis.authoring.models.ErrorResponseException>`
"""
application_setting_update_object = models.ApplicationSettingUpdateObject(is_public=is_public)
# Construct URL
url = self.update_settings.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'appId': self._serialize.url("app_id", app_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(application_setting_update_object, 'ApplicationSettingUpdateObject')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatus', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update_settings.metadata = {'url': '/apps/{appId}/settings'}
def get_publish_settings(
self, app_id, custom_headers=None, raw=False, **operation_config):
"""Get the application publish settings including 'UseAllTrainingData'.
:param app_id: The | |
196,
243,
236,
234,
215,
217,
224,
245,
225,
220,
247,
230,
247,
213,
246,
228,
233,
207,
224,
256,
201,
222,
211,
245,
218,
227,
240,
197,
256,
195,
208,
257,
230,
222,
239,
222,
203,
198,
225,
205,
235,
204,
264,
243,
231,
235,
221,
258,
239,
216,
260,
237,
216,
238,
228,
226,
216,
243,
241,
216,
216,
222,
240,
226,
224,
205,
203,
230,
243,
212,
221,
239,
241,
241,
204,
201,
241,
217,
212,
237,
237,
233,
225,
239,
225,
229,
301,
397,
597,
587,
369,
275,
247,
238,
240,
227,
224,
230,
238,
230,
214,
211,
237,
243,
244,
228,
238,
206,
230,
215,
203,
216,
233,
239,
222,
252,
244,
254,
218,
222,
240,
202,
250,
197,
228,
207,
230,
205,
210,
247,
236,
248,
219,
271,
221,
227,
216,
227,
222,
215,
227,
216,
254,
235,
215,
242,
231,
230,
246,
215,
225,
233,
257,
225,
229,
227,
196,
220,
238,
228,
202,
211,
237,
229,
215,
235,
186,
216,
240,
256,
228,
247,
238,
210,
245,
244,
213,
238,
234,
233,
203,
236,
223,
253,
236,
264,
216,
206,
239,
232,
201,
249,
242,
211,
231,
219,
197,
253,
235,
206,
234,
215,
233,
261,
240,
386,
1725,
5792,
5959,
3810,
965,
295,
251,
264,
256,
219,
247,
245,
221,
218,
222,
236,
219,
235,
210,
207,
259,
234,
212,
228,
226,
258,
199,
222,
220,
225,
222,
202,
242,
229,
234,
210,
229,
200,
241,
213,
240,
226,
226,
209,
227,
208,
254,
220,
219,
182,
206,
234,
227,
215,
217,
220,
213,
253,
216,
232,
208,
225,
232,
234,
225,
210,
203,
264,
237,
204,
221,
213,
258,
231,
215,
225,
244,
223,
228,
217,
248,
226,
236,
201,
240,
207,
233,
215,
230,
231,
250,
219,
240,
205,
235,
221,
217,
222,
227,
251,
237,
224,
214,
235,
233,
255,
217,
235,
230,
233,
231,
233,
214,
236,
235,
270,
274,
499,
4198,
9829,
8252,
9536,
4991,
741,
252,
252,
228,
220,
235,
209,
222,
205,
228,
231,
264,
242,
236,
233,
248,
218,
251,
246,
220,
249,
234,
223,
225,
253,
224,
204,
225,
213,
226,
223,
206,
251,
249,
216,
216,
205,
203,
235,
231,
236,
202,
223,
235,
214,
238,
222,
240,
252,
220,
232,
242,
205,
229,
206,
226,
237,
211,
234,
220,
233,
255,
230,
209,
234,
220,
243,
183,
218,
236,
216,
232,
238,
236,
220,
219,
234,
192,
210,
229,
255,
255,
220,
206,
222,
238,
241,
216,
237,
229,
225,
235,
249,
255,
223,
224,
234,
229,
215,
212,
240,
249,
222,
226,
238,
225,
236,
226,
221,
231,
256,
261,
255,
548,
3225,
8745,
6302,
7572,
14054,
4268,
547,
341,
271,
289,
259,
260,
239,
264,
240,
254,
231,
244,
222,
249,
222,
202,
218,
250,
221,
235,
217,
223,
214,
224,
210,
224,
250,
210,
240,
214,
235,
250,
198,
221,
217,
184,
237,
210,
204,
226,
233,
218,
211,
224,
209,
241,
257,
236,
224,
244,
235,
223,
223,
223,
266,
234,
238,
207,
223,
230,
221,
228,
226,
216,
205,
203,
239,
231,
215,
221,
225,
217,
218,
224,
239,
224,
226,
241,
207,
240,
221,
222,
222,
224,
229,
236,
235,
215,
228,
243,
202,
216,
219,
231,
239,
227,
237,
225,
238,
246,
236,
264,
260,
262,
242,
252,
220,
285,
297,
382,
638,
1440,
3705,
6656,
6965,
6215,
13217,
9489,
2579,
1206,
1209,
1189,
1152,
1060,
964,
790,
585,
450,
347,
303,
227,
236,
202,
216,
245,
218,
230,
242,
233,
211,
215,
224,
227,
208,
223,
214,
224,
193,
248,
236,
236,
191,
233,
217,
243,
208,
221,
213,
222,
227,
239,
219,
253,
233,
228,
219,
234,
235,
235,
222,
239,
220,
211,
219,
211,
225,
226,
269,
192,
239,
234,
228,
230,
217,
234,
213,
214,
231,
209,
225,
218,
173,
202,
234,
236,
217,
229,
203,
257,
224,
225,
214,
235,
227,
221,
233,
203,
209,
219,
228,
201,
266,
247,
232,
222,
244,
232,
265,
258,
275,
251,
241,
231,
313,
398,
527,
738,
1187,
1823,
2703,
3010,
3563,
4929,
6175,
7530,
8699,
10691,
5792,
1905,
1215,
1204,
1196,
1251,
1209,
1249,
1451,
1783,
1847,
1460,
799,
497,
319,
271,
209,
246,
214,
205,
213,
262,
237,
240,
240,
221,
243,
198,
232,
252,
209,
235,
194,
195,
223,
233,
227,
210,
225,
185,
222,
220,
226,
206,
253,
218,
241,
228,
238,
225,
211,
230,
200,
253,
223,
215,
217,
233,
217,
234,
220,
247,
239,
237,
242,
228,
254,
241,
232,
216,
259,
261,
208,
219,
222,
214,
238,
223,
236,
236,
236,
221,
221,
216,
197,
231,
228,
233,
231,
229,
226,
226,
225,
228,
225,
230,
274,
267,
319,
284,
336,
376,
526,
887,
1386,
1875,
2044,
1917,
2102,
2440,
2905,
3546,
3773,
4548,
8639,
10594,
10138,
10459,
9246,
3735,
1792,
1508,
1325,
1307,
1111,
1039,
1166,
1132,
1290,
1414,
1485,
1550,
1632,
984,
449,
307,
243,
236,
224,
208,
243,
247,
218,
221,
238,
221,
212,
202,
229,
226,
242,
232,
239,
233,
231,
225,
220,
213,
212,
231,
218,
238,
237,
231,
238,
213,
245,
209,
236,
224,
215,
201,
200,
223,
203,
203,
231,
218,
224,
223,
200,
218,
228,
234,
212,
228,
234,
220,
227,
210,
226,
225,
214,
220,
232,
227,
239,
233,
208,
239,
241,
241,
232,
246,
260,
233,
218,
237,
244,
256,
213,
235,
222,
265,
256,
286,
394,
964,
1304,
1664,
2538,
3115,
2939,
2589,
2205,
2295,
2774,
3467,
4305,
4756,
3716,
3318,
3136,
5070,
6165,
5558,
6656,
10540,
4836,
2040,
2092,
2524,
2900,
2764,
1983,
1444,
1190,
1161,
1146,
1402,
1603,
1625,
1429,
1434,
1350,
819,
379,
288,
241,
228,
242,
224,
254,
219,
262,
231,
205,
209,
235,
230,
218,
252,
229,
240,
243,
215,
203,
194,
224,
228,
200,
220,
220,
216,
211,
208,
218,
214,
243,
241,
212,
239,
231,
203,
205,
216,
210,
231,
198,
242,
232,
195,
227,
212,
234,
230,
209,
242,
219,
222,
208,
229,
244,
228,
205,
246,
234,
231,
230,
221,
237,
225,
242,
238,
225,
212,
242,
205,
229,
218,
246,
238,
244,
297,
986,
5326,
11440,
14208,
10889,
8389,
6146,
5526,
5608,
6183,
7177,
7727,
7702,
7060,
6040,
4775,
3793,
3340,
3318,
3667,
4870,
7658,
6513,
2217,
1212,
1051,
1109,
1213,
1356,
1820,
2734,
2675,
1900,
1551,
1305,
1324,
1619,
1645,
1543,
1347,
1307,
1268,
572,
306,
246,
240,
234,
228,
214,
218,
222,
209,
227,
264,
201,
224,
214,
210,
222,
205,
220,
237,
205,
225,
213,
214,
222,
180,
235,
226,
255,
207,
249,
204,
222,
228,
213,
204,
224,
258,
207,
234,
232,
211,
237,
211,
216,
224,
207,
196,
233,
209,
221,
230,
240,
205,
236,
201,
221,
223,
235,
248,
253,
198,
218,
253,
252,
215,
256,
211,
214,
230,
232,
230,
230,
237,
238,
237,
486,
3444,
8578,
9163,
9582,
10298,
10359,
10170,
9255,
9090,
8527,
7653,
6917,
6343,
4964,
4462,
3790,
2899,
2950,
2898,
2903,
4103,
5277,
5420,
2810,
961,
782,
755,
764,
774,
885,
937,
1085,
1593,
2554,
2801,
1932,
1475,
1423,
1336,
1353,
1217,
1381,
1524,
1097,
467,
287,
229,
228,
234,
230,
211,
234,
227,
230,
224,
213,
226,
194,
213,
220,
225,
233,
227,
210,
227,
209,
253,
207,
244,
223,
217,
207,
221,
220,
206,
238,
213,
216,
210,
208,
221,
238,
260,
215,
216,
206,
226,
210,
221,
232,
232,
216,
230,
224,
231,
220,
204,
241,
221,
225,
206,
194,
229,
225,
217,
237,
201,
229,
234,
206,
220,
214,
216,
238,
226,
237,
250,
314,
1252,
5975,
7807,
8075,
8168,
9170,
7907,
8083,
7417,
6168,
6163,
5724,
5452,
5662,
5890,
4934,
4151,
3663,
2903,
3120,
3674,
3717,
3734,
| |
function of time, but not of stage position
Stage is set to a position shortly after the timezero (GUI value)
"""
self.initializeStage()
position = self.stage.calcStageWay(MeasParams['timeZero'] +
MeasParams['timeoverlapp'])
self.stage.moveStage(position)
self.measureMOContrast()
def update(self):
"""
Update Loop for application for Measurement of TR MOKE and TR Hysteresis
"""
self.TotalProgresscount = 0
self.CurrentNumber = 1
self.initializeTransientArrays()
for fluence in self.fluenceVector:
# Card is initialized and closed frequently to prevent
# Buffer overflow of Analog Output Card
# (writing Voltage for amgetnic field)
self.initializeNICard()
self.setFluence(fluence)
self.initializeStage()
### Hysteresis Measurements ###
if self.Hysteresis_Check.isChecked():
self.measureStaticHysteresis()
for position in self.hystDelayVector_mm:
self.initializeNICard()
self.stage.moveStage(position)
self.measureHysteresis(position)
self.closeNICard()
self.closeStage()
self.closeNICard()
### MOKE Measurements ###
if self.TimeResolved_Check.isChecked():
for entry in self.voltageVector:
self.initializeNICard()
self.initializeStage()
self.measureTransient(entry)
self.closeStage()
self.closeNICard()
self.CurrentNumber += 1
self.calculateProgress(0)
self.closeNICard()
self.closeStage()
self.timer.stop()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~ j) measurement order ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def measureStaticHysteresis(self):
"""
Measure Hysteresis without Pump Laser blocked by shutter
"""
self.moveShutter(True)
self.measureHysteresis('Static')
# no useful information contained in the pumped value
# - columns are deleted
np.delete(self.resultList, 2, 1)
if self.SaveButton.isChecked():
self.saveOnlyHysteresis()
self.closeStage()
self.moveShutter(False)
def measureHysteresis(self, position):
"""
Measure a Hysteresis at a certain delay points.
resultList
:param position: Stage delay for naming the save file
:return: self.resultList
"""
self.statusReport('Measuring Hysteresis at ' + str(position))
# set autofocus to Hysteresis Plot in GUI
self.tabWidget_3.setCurrentIndex(2)
self.Progresscount = 0
try:
self.resultList = self.initializeHysteresisArray()
except TypeError:
return
for i in range(np.size(self.resultList[:, 0])):
self.MeasurementCard.WriteValues(self.WritingTask,
self.resultList[i, 0])
attempt = 1
# attempt used to repeat measurements that have an unequal amount of
# chopped and unchopped values
while attempt == 1:
data = self.MeasurementCard.ReadValues_ai(self.MeasurementTask,
LoopParams)
chopper = data[3]
balancedDiode = data[0]
plusDiode = data[1]
minusDiode = data[4]
referenceDiode = data[5]
DiffDiodeChop, DiffDiodeUnChop, attempt = \
utilities.sortAfterChopperSanityCheck(balancedDiode, chopper)
refchop, refunchop = \
utilities.sortAfterChopper(referenceDiode, chopper)
DPlusChop, DPlusUnchop = \
utilities.sortAfterChopper(plusDiode, chopper)
DMinusChop, DMinusUnchop = \
utilities.sortAfterChopper(minusDiode, chopper)
#if not bDebug:
# self.checkIfLaserOff(refchop)
QtGui.QApplication.processEvents()
self.updateGUI()
self.calculateProgress(1)
self.TotalProgresscount += 1
self.Progresscount += 1
self.resultList[i, 1] = DiffDiodeChop
self.resultList[i, 2] = DiffDiodeUnChop
self.resultList[i, 3] = refchop
self.resultList[i, 4] = refunchop
self.resultList[i, 5] = DPlusChop
self.resultList[i, 6] = DPlusUnchop
self.resultList[i, 7] = DMinusChop
self.resultList[i, 8] = DMinusUnchop
self.updateHysteresis()
# when value of position is not string (as it would be for static)
# use the delay value in ps to name saving file
# else use the original name ('static') for saving
if isinstance(position, int) or isinstance(position, float):
self.saveHysteresis(self.stage.calcLightWay(position))
else:
self.saveHysteresis(position)
self.CurrentNumber += 1
def measureMOContrast(self):
"""
measure only MO contrast: stage is stationary at one position.
It displays the MO contrast for the entered magnetic field in an
infinity loop (if end if axid is reached it starts again).
"""
self.tabWidget_3.setCurrentIndex(0)
vector = np.arange(0, 501)
self.PP_Plus = np.zeros((int(len(vector)), 2))
self.currentAmplitude.setText(str(Parameters['Voltage']))
self.MeasurementCard.WriteValues(
self.WritingTask, Parameters['Voltage'])
self.Stage_idx = int(vector[0])
while True:
repeat = 0
while repeat < 1:
data = self.MeasurementCard.ReadValues_ai(self.MeasurementTask,
LoopParams)
QtGui.QApplication.processEvents()
balancedDiode = data[0]
chopper = data[3]
# returned attempt shows if the length of the lists are
# equal or not. if not: repeat the measurement.
DiffDiodeChop, DiffDiodeUnChop, attempt = \
utilities.sortAfterChopperSanityCheck(balancedDiode, chopper)
if attempt == 1:
repeat -= 1
else:
self.updateGUI_Adjustement()
self.calculateMO(data, vector)
repeat += 1
self.Stage_idx += 1
if self.Stage_idx == 500:
self.Stage_idx = 0
self.MeasurementCard.WriteValues(self.WritingTask, 0)
def calculateMO(self, data, vector):
"""
calculate the measured MO signal (Pump Probe for one magnetic field
direction)
:param data:
:param vector:
:return: self.PP_Plus
"""
balancedDiode = data[0]
chopper = data[3]
DiffDiodeChop, DiffDiodeUnChop = \
utilities.sortAfterChopper(balancedDiode, chopper)
self.PP_Plus[self.Stage_idx, 0] = vector[int(self.Stage_idx)]
self.PP_Plus[self.Stage_idx, 1] = DiffDiodeChop - DiffDiodeUnChop
def measureTransient(self, entry):
"""
measure time resolved trace of MOKE for applied voltage (entry)
:param entry: Voltage for Time Trace
:return:
"""
self.tabWidget_3.setCurrentIndex(0)
self.initializeTransientArrays()
self.Progresscount = 0
self.StartMeasurement = True
Loop = 1
self.PP_MinusIdx = 0
self.PP_PlusIdx = 0
Parameters['Voltage'] = float(entry)
self.currentAmplitude.setText(str(Parameters['Voltage']))
if self.SaveButton.isChecked():
self.saveToMeasurementParameterList()
while Loop < LoopParams['Loops']+1:
Polarity_Field = 1
self.MagneticFieldChange = 0
self.statusReport('Loop: '+str(Loop))
while self.MagneticFieldChange < 2:
self.MeasurementCard.WriteValues(
self.WritingTask, Polarity_Field * Parameters['Voltage'])
self.Stage_idx = 0
self.Stage_idx2 = (len(self.stageVector_mm)-1)
self.j, self.k = 0, 0
for Stagemove in self.stageVector_mm:
self.stage.moveStage(Stagemove)
self.Pos_ps = self.stage.calcLightWay(Stagemove)
self.statusReport('Measure Transient: '
'Stage Position in ps: '+str(self.Pos_ps))
repeat = 0
while repeat < 1:
data = self.MeasurementCard.ReadValues_ai(
self.MeasurementTask, LoopParams)
QtGui.QApplication.processEvents()
balancedDiode = data[0]
chopper = data[3]
# returned attempt shows if the length of the lists are
# equal or not. if not: repeat the measurement.
DiffDiodeChop, DiffDiodeUnChop, attempt = \
utilities.sortAfterChopperSanityCheck(
balancedDiode, chopper)
if attempt == 1:
repeat -= 1
else:
self.updateGUI()
self.dataOperations(Loop, Polarity_Field, data)
if Loop == 1:
self.calculateFirstLoop()
else:
self.calculateLoopAverage()
repeat += 1
self.Progresscount += 1
self.TotalProgresscount += 1
self.calculateProgress(0)
self.Stage_idx += 1
self.Stage_idx2 -= 1
self.MagneticFieldChange += 1
Polarity_Field = Polarity_Field*(-1)
# to save time: measure on return way of stage
self.stageVector_mm = self.stageVector_mm[::-1]
Loop += 1
if self.SaveButton.isChecked():
self.saveData()
self.statusReport('Finished Transient Measurement')
if self.SaveButton.isChecked():
self.saveData()
self.MeasurementCard.WriteValues(self.WritingTask, 0)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~ k) Data Analysis ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def calculateLoopAverage(self):
"""
Calculate Loop Average for
- MOKE_Average
- MinusDiode_Average
- PlisDiode_Average
"""
self.MOKE_Average[:, 1] = (self.MOKE_Average[:, 1] +
(self.PP_Plus[:, 1] - self.PP_Minus[:, 1]))/2
self.MinusDiode_Average[:, 1] = (self.MinusDiode_Average[:, 1] +
(self.MinusDiode_PP_Minus[:, 1] +
self.MinusDiode_PP_Plus[:, 1]) / 2)/2
self.PlusDiode_Average[:, 1] = (self.PlusDiode_Average[:, 1] +
(self.PlusDiode_PP_Minus[:, 1] +
self.PlusDiode_PP_Plus[:, 1]) / 2)/2
def calculateFirstLoop(self):
"""
Set first column to Stagevector
Calculate MOKE and Diodes for first loop
"""
self.MOKE_Average[:, 0] = self.stageVector_ps
self.MinusDiode_Average[:, 0] = self.stageVector_ps
self.PlusDiode_Average[:, 0] = self.stageVector_ps
self.MOKE_Average[:, 1] = self.PP_Plus[:, 1] - self.PP_Minus[:, 1]
self.MinusDiode_Average[:, 1] = (self.MinusDiode_PP_Minus[:, 1] +
self.MinusDiode_PP_Plus[:, 1]) / 2
self.PlusDiode_Average[:, 1] = (self.PlusDiode_PP_Minus[:, 1] +
self.PlusDiode_PP_Plus[:, 1]) / 2
def dataOperations(self, Loop, Polarity_Field, data):
"""
sort data according to chopper and Magnetic Field direction
:param Loop, Polarity_Field, data
"""
balancedDiode = data[0]
DiodeMinus = data[1]
chopper = data[3]
DiodePlus = data[4]
referenceDiode = data[5]
DiffDiodeChop, DiffDiodeUnChop = \
utilities.sortAfterChopper(balancedDiode, chopper)
ReferenceChop, ReferenceUnchop = \
utilities.sortAfterChopper(referenceDiode, chopper)
MinusDiodeChop, MinusDiodeUnChop = \
utilities.sortAfterChopper(DiodeMinus, chopper)
PlusDiodeChop, PlusDiodeUnChop = \
utilities.sortAfterChopper(DiodePlus, chopper)
if Polarity_Field < 0:
self.calculateMinusMagneticField(DiffDiodeChop, DiffDiodeUnChop,
ReferenceChop, ReferenceUnchop,
MinusDiodeChop, MinusDiodeUnChop,
PlusDiodeChop, PlusDiodeUnChop,
Loop, data)
else:
self.calculatePlusMagneticField(DiffDiodeChop, DiffDiodeUnChop,
ReferenceChop, ReferenceUnchop,
MinusDiodeChop, MinusDiodeUnChop,
PlusDiodeChop, PlusDiodeUnChop,
Loop, data)
if Loop == 1:
self.calculatePPFirstLoop(Polarity_Field)
else:
self.calculatePPAverageLoop(Polarity_Field)
def calculatePPAverageLoop(self, Polarity_Field):
"""
Calculate Values for Pump-Probe Measurements depending on MagneticField
Average all Loops
:param Polarity_Field:
:return:
self.PP_Plus
self.MinusDiode_PP_Plus
self.PlusDiode_PP_Plus
self.RefDiode_PP_Plus
OR
self.PP_Minus
self.MinusDiode_PP_Minus
self.PlusDiode_PP_Minus
self.RefDiode_PP_Minus
"""
if Polarity_Field > 0:
PP_Plus_value = \
(self.diffDiodeChopPlus[self.Stage_idx] -
self.diffDiodeUnChopPlus[self.Stage_idx])
self.PP_Plus[self.Stage_idx, 1] = \
(self.PP_Plus[self.Stage_idx, 1] +
PP_Plus_value) / 2
Minus_PP_Plus_value = \
(self.MinusDiodeChop_plus[self.Stage_idx] -
self.MinusDiodeUnChop_plus[self.Stage_idx])
self.MinusDiode_PP_Plus[self.Stage_idx, 1] = \
(self.MinusDiode_PP_Plus[self.Stage_idx, 1] +
Minus_PP_Plus_value) / 2
Plus_PP_Plus_value = \
(self.PlusDiodeChop_plus[self.Stage_idx] -
self.PlusDiodeUnChop_plus[self.Stage_idx])
self.PlusDiode_PP_Plus[self.Stage_idx, 1] = \
(self.PlusDiode_PP_Plus[self.Stage_idx, 1] +
Plus_PP_Plus_value) / 2
Refvalue = \
(self.RefDiodeChop_plus[self.Stage_idx] -
self.RefDiodeUnChop_plus[self.Stage_idx])
self.RefDiode_PP_Plus[self.Stage_idx, 1] = \
(self.RefDiode_PP_Plus[self.Stage_idx, 1] +
Refvalue) / 2
else:
PP_Minus_value = \
(self.diffDiodeChopMinus[self.Stage_idx2] -
self.diffDiodeUnChopMinus[self.Stage_idx2])
self.PP_Minus[self.Stage_idx, 1] = \
(self.PP_Minus[self.Stage_idx, 1] +
PP_Minus_value) / 2
Minus_PP_Minus_value = \
(self.MinusDiodeChop_minus[self.Stage_idx] -
self.MinusDiodeUnChop_minus[self.Stage_idx])
self.MinusDiode_PP_Minus[self.Stage_idx, 1] = \
(self.MinusDiode_PP_Minus[self.Stage_idx, 1] +
Minus_PP_Minus_value) / 2
Plus_PP_Minus_value = \
(self.PlusDiodeChop_minus[self.Stage_idx] -
self.PlusDiodeUnChop_minus[self.Stage_idx])
self.PlusDiode_PP_Minus[self.Stage_idx, 1] = \
(self.PlusDiode_PP_Minus[self.Stage_idx, 1] +
Plus_PP_Minus_value) / 2
Refvalue = \
(self.RefDiodeChop_minus[self.Stage_idx] -
self.RefDiodeUnChop_minus[self.Stage_idx])
self.RefDiode_PP_Minus[self.Stage_idx, 1] = \
(self.RefDiode_PP_Minus[self.Stage_idx, 1] +
Refvalue) / 2
def calculatePPFirstLoop(self, Polarity_Field):
"""
S Values for Pump-Probe Measurements depending on MagneticField
for first loop
:param Polarity_Field:
:return:
self.PP_Plus
self.MinusDiode_PP_Plus
self.PlusDiode_PP_Plus
self.RefDiode_PP_Plus
OR
self.PP_Minus
self.MinusDiode_PP_Minus
self.PlusDiode_PP_Minus
self.RefDiode_PP_Minus
"""
if Polarity_Field > 0:
self.PP_Plus[self.Stage_idx, 0] = self.Pos_ps
self.PP_Plus[self.Stage_idx, 1] = \
self.diffDiodeChopPlus[self.Stage_idx] - \
self.diffDiodeUnChopPlus[self.Stage_idx]
self.MinusDiode_PP_Plus[self.Stage_idx, 0] = self.Pos_ps
self.MinusDiode_PP_Plus[self.Stage_idx, 1] = \
self.MinusDiodeChop_plus[self.Stage_idx] -\
self.MinusDiodeUnChop_plus[self.Stage_idx]
self.PlusDiode_PP_Plus[self.Stage_idx, 0] = self.Pos_ps
self.PlusDiode_PP_Plus[self.Stage_idx, 1] = \
self.PlusDiodeChop_plus[self.Stage_idx] -\
self.PlusDiodeUnChop_plus[self.Stage_idx]
self.RefDiode_PP_Plus[self.Stage_idx, 0] = self.Pos_ps
self.RefDiode_PP_Plus[self.Stage_idx, 1] = \
self.RefDiodeChop_plus[self.Stage_idx] -\
self.RefDiodeUnChop_plus[self.Stage_idx]
else:
self.PP_Minus[self.Stage_idx2, 0] = self.Pos_ps
self.PP_Minus[self.Stage_idx2, 1] = \
self.diffDiodeChopMinus[self.Stage_idx] - \
self.diffDiodeUnChopMinus[self.Stage_idx]
self.MinusDiode_PP_Minus[self.Stage_idx2, 0] = self.Pos_ps
self.MinusDiode_PP_Minus[self.Stage_idx2, 1] = \
self.MinusDiodeChop_minus[self.Stage_idx] - \
self.MinusDiodeUnChop_minus[self.Stage_idx]
self.PlusDiode_PP_Minus[self.Stage_idx2, 0] = self.Pos_ps
self.PlusDiode_PP_Minus[self.Stage_idx2, 1] = \
self.PlusDiodeChop_minus[self.Stage_idx] - \
self.PlusDiodeUnChop_minus[self.Stage_idx]
self.RefDiode_PP_Minus[self.Stage_idx2, 0] = self.Pos_ps
self.RefDiode_PP_Minus[self.Stage_idx2, 1] = \
self.RefDiodeChop_minus[self.Stage_idx] - \
self.RefDiodeUnChop_minus[self.Stage_idx]
def calculatePlusMagneticField(self, DiffDiodeChop, DiffDiodeUnChop,
ReferenceChop, ReferenceUnchop,
MinusDiodeChop, MinusDiodeUnChop,
PlusDiodeChop, PlusDiodeUnChop, Loop, data):
"""
Sort | |
= Var(within=Reals,bounds=(0,None),initialize=0)
m.x597 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x598 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x599 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x600 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x601 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x602 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x603 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x604 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x605 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x606 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x607 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x608 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x609 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x610 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x611 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x612 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x613 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x614 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x615 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x616 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x617 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x618 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x619 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x620 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x621 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x622 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x623 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x624 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x625 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x626 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x627 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x628 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x629 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x630 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x631 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x632 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x633 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x634 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x635 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x636 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x637 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x638 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x639 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x640 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x641 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x642 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x643 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x644 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x645 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x646 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x647 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x648 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x649 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x650 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x651 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x652 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x653 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x654 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x655 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x656 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x657 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x658 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x659 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x660 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x661 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x662 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x663 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x664 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x665 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x666 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x667 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x668 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x669 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x670 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x671 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x672 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x673 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x674 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x675 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x676 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x677 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x678 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x679 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x680 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x681 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x682 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x683 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x684 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x685 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x686 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x687 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x688 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x689 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x690 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x691 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x692 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x693 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x694 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x695 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x696 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x697 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x698 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x699 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x700 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x701 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x702 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x703 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x704 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x705 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x706 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x707 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x708 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x709 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x710 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x711 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x712 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x713 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x714 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x715 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x716 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x717 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x718 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x719 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x720 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x721 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x722 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x723 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x724 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x725 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x726 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x727 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x728 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x729 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x730 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x731 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x732 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x733 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x734 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x735 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x736 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x737 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x738 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x739 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x740 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x741 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x742 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x743 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x744 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x745 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x746 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x747 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x748 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x749 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x750 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x751 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x752 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x753 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x754 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x755 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x756 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x757 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x758 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x759 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x760 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x761 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x762 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x763 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x764 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x765 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x766 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x767 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x768 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x769 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x770 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x771 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x772 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x773 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x774 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x775 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x776 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x777 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x778 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x779 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x780 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x781 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x782 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x783 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x784 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x785 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x786 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x787 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x788 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x789 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x790 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x791 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x792 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x793 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x794 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x795 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x796 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x797 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x798 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x799 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x800 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x801 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x802 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x803 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x804 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x805 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x806 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x807 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x808 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x809 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x810 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x811 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x812 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x813 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x814 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x815 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x816 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x817 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x818 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x819 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x820 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x821 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x822 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x823 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x824 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x825 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x826 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x827 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x828 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x829 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x830 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x831 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x832 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x833 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x834 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x835 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x836 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x837 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x838 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x839 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x840 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x841 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x842 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x843 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x844 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x845 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x846 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x847 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x848 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x849 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x850 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x851 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x852 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x853 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x854 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x855 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x856 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x857 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x858 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x859 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x860 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x861 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x862 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x863 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x864 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x865 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x866 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x867 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x868 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x869 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x870 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x871 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x872 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x873 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x874 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x875 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x876 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x877 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x878 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x879 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x880 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x881 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x882 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x883 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x884 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x885 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x886 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x887 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x888 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x889 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x890 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x891 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x892 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x893 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x894 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x895 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x896 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x897 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x898 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x899 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x900 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x901 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x902 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x903 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x904 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x905 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x906 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x907 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x908 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x909 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x910 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x911 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x912 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x913 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x914 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x915 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x916 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x917 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x918 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x919 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x920 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x921 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x922 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x923 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x924 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x925 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x926 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x927 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x928 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x929 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x930 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x931 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x932 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x933 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x934 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x935 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x936 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x937 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x938 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x939 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x940 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x941 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x942 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x943 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x944 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x945 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x946 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x947 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x948 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x949 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x950 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x951 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x952 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x953 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x954 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x955 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x956 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x957 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x958 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x959 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x960 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x961 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x962 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x963 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x964 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x965 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x966 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x967 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x968 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x969 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x970 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x971 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x972 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x973 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x974 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x975 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x976 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x977 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x978 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x979 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x980 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x981 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x982 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x983 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x984 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x985 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x986 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x987 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x988 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x989 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x990 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x991 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x992 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x993 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x994 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x995 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x996 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x997 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x998 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x999 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1000 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1001 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1002 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1003 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1004 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1005 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1006 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1007 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1008 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1009 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1010 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1011 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1012 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1013 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1014 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1015 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1016 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1017 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1018 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1019 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1020 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1021 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1022 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1023 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1024 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1025 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1026 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1027 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1028 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1029 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1030 = Var(within=Reals,bounds=(0,None),initialize=0)
m.obj = Objective(expr=m.x7**2 + m.x8**2 + m.x9**2 + m.x10**2 + m.x11**2 + m.x12**2 + m.x13**2 + m.x14**2 + m.x15**2 +
m.x16**2 + m.x17**2 + m.x18**2 + m.x19**2 + m.x20**2 + m.x21**2 + m.x22**2 + m.x23**2 + m.x24**2
+ m.x25**2 + m.x26**2 + m.x27**2 + m.x28**2 + m.x29**2 + m.x30**2 + m.x31**2 + m.x32**2 + m.x33
**2 + m.x34**2 + m.x35**2 + m.x36**2 + m.x37**2 + m.x38**2 + m.x39**2 + m.x40**2 + m.x41**2 +
m.x42**2 + m.x43**2 + m.x44**2 + m.x45**2 + m.x46**2 + m.x47**2 + m.x48**2 + m.x49**2 + m.x50**2
+ m.x51**2 + m.x52**2 + m.x53**2 + m.x54**2 + m.x55**2 + m.x56**2 + m.x57**2 + m.x58**2 + m.x59
**2 + m.x60**2 + m.x61**2 + m.x62**2 + m.x63**2 + m.x64**2 + m.x65**2 + m.x66**2 + m.x67**2 +
m.x68**2 + m.x69**2 + m.x70**2 + m.x71**2 + m.x72**2 + m.x73**2 + m.x74**2 + m.x75**2 + m.x76**2
+ m.x77**2 + m.x78**2 + m.x79**2 + m.x80**2 + m.x81**2 + m.x82**2 + m.x83**2 + m.x84**2 + m.x85
**2 + m.x86**2 + m.x87**2 + m.x88**2 + m.x89**2 + m.x90**2 + m.x91**2 + m.x92**2 + m.x93**2 +
m.x94**2 + m.x95**2 + m.x96**2 + m.x97**2 + m.x98**2 + m.x99**2 + m.x100**2 + m.x101**2 + m.x102
**2 + m.x103**2 + m.x104**2 + m.x105**2 + m.x106**2 + m.x107**2 + m.x108**2 + m.x109**2 + m.x110
**2 + m.x111**2 + m.x112**2 + m.x113**2 + m.x114**2 + m.x115**2 + m.x116**2 + m.x117**2 + m.x118
**2 + m.x119**2 + m.x120**2 + m.x121**2 + m.x122**2 + m.x123**2 + m.x124**2 + m.x125**2 + m.x126
**2 + m.x127**2 + m.x128**2 + m.x129**2 + m.x130**2 + m.x131**2 + m.x132**2 + m.x133**2 + m.x134
**2 + m.x135**2 + m.x136**2 + m.x137**2 + m.x138**2 + m.x139**2 + m.x140**2 + m.x141**2 + m.x142
**2 + m.x143**2 + m.x144**2 + m.x145**2 + m.x146**2 + m.x147**2 + m.x148**2 + m.x149**2 + m.x150
**2 + m.x151**2 + m.x152**2 + m.x153**2 + m.x154**2 + m.x155**2 + m.x156**2 + m.x157**2 + m.x158
**2 + m.x159**2 + m.x160**2 + m.x161**2 + m.x162**2 + m.x163**2 + m.x164**2 + m.x165**2 + m.x166
**2 + m.x167**2 + m.x168**2 + m.x169**2 + m.x170**2 + m.x171**2 + m.x172**2 + m.x173**2 + m.x174
**2 + m.x175**2 + m.x176**2 + m.x177**2 + m.x178**2 + m.x179**2 + m.x180**2 + m.x181**2 + m.x182
**2 + m.x183**2 + m.x184**2 + m.x185**2 + m.x186**2 + m.x187**2 + m.x188**2 + m.x189**2 + m.x190
**2 + m.x191**2 + m.x192**2 + m.x193**2 + m.x194**2 + m.x195**2 + m.x196**2 + m.x197**2 + m.x198
**2 + m.x199**2 + m.x200**2 + m.x201**2 | |
client.setExplain(PacketPokerExplain.ALL)
self.assertTrue(table.joinPlayer(client, reason="MockCreatePlayerJoin"))
self.assertTrue(table.seatPlayer(client, pos))
self.assertTrue(table.buyInPlayer(client, table.game.maxBuyIn()))
table.game.noAutoBlindAnte(serial)
if should_sit:
clients[serial] = client
table.sitPlayer(client)
table.update()
#
# get seats for all players
for pos,serial in enumerate(s_all):
joinAndSeat(serial,serial in s_sit,s_seats[pos])
#
# setup game (forced_dealer does not work because of first_turn == False)
table.game.dealer_seat = 1
table.game.first_turn = False
#
# put missed_blind on None, if not all missed_blinds are reset
for serial in s_all:
table.game.serial2player[serial].missed_blind = None
#
# setup 49610
table.game.serial2player[49610].missed_blind = 'small'
#
# setup 52392 (only observer)
s_all.append(52392)
clients_all[52392] = client = self.createPlayer(52392, getReadyToPlay=False, clientClass=MockClientWithExplain)
client.service = self.service
client.setExplain(PacketPokerExplain.ALL)
self.assertTrue(table.joinPlayer(client, reason="MockCreatePlayerJoin"))
def firstGame(packet):
#
# 49610 joins again
clients[49610] = clients_all[49610]
s_sit.append(49610)
self.assertFalse(table.seatPlayer(clients[49610], -1))
table.game.noAutoBlindAnte(49610)
table.sitPlayer(clients[49610])
table.update()
#
# get a seat for 52392
self.assertTrue(table.seatPlayer(clients_all[52392], 3))
self.assertTrue(table.buyInPlayer(clients_all[52392], table.game.maxBuyIn()))
table.game.noAutoBlindAnte(52392)
table.update()
table.game.blind(38108); table.update()
table.game.blind(73933); table.update()
table.quitPlayer(clients_all[31464]); table.update()
joinAndSeat(31464, True, -1,True)
table.scheduleAutoDeal()
d1 = clients[s_sit[0]].waitFor(PACKET_POKER_START)
d1.addCallback(firstGame)
return d1
def test56_strange(self):
self.table = table = self.table9
game = table.game
self.service.has_ladder = False
table.factory.buyInPlayer = lambda *a,**kw: table.game.maxBuyIn()
def addPlayerAndReorder(self, *a,**kw):
was_added = self.addPlayerOrig(*a,**kw)
if was_added:
self.serial2player = OrderedDict(sorted(self.serial2player.items(),key=lambda (k,v):od_order.index(k)))
return was_added
game.addPlayerOrig = game.addPlayer
game.addPlayer = lambda *a,**kw: addPlayerAndReorder(game,*a,**kw)
clients_all = {}
clients = {}
s_sit = [61615, 106548, 1789, 43743]
s_all = [61615, 1789, 43743, 106548, 40712]
s_seats = [0,1,2,4,5,6,7,8,9]
od_order = [61615, 106548, 1789, 43743, 40712]
def checkPlayerlistAndMoney():
pl = game.player_list
mm = game.moneyMap()
for client in clients_all.values():
client_game = client.explain.games.getGame(game.id)
self.assertTrue(pl==client_game.player_list)
self.assertTrue(mm==client_game.moneyMap())
def joinAndSeat(serial,should_sit,pos,explain=True):
clients_all[serial] = client = self.createPlayer(serial, getReadyToPlay=False, clientClass=MockClientWithExplain)
client.service = self.service
if explain:
client.setExplain(PacketPokerExplain.REST)
self.assertTrue(table.joinPlayer(client, reason="MockCreatePlayerJoin"))
self.assertTrue(table.seatPlayer(client, pos))
self.assertTrue(table.buyInPlayer(client, table.game.maxBuyIn()))
table.game.noAutoBlindAnte(serial)
if should_sit:
clients[serial] = client
table.sitPlayer(client)
table.update()
#
# get seats for all players
for pos,serial in enumerate(s_all):
joinAndSeat(serial,serial in s_sit,s_seats[pos])
#
# setup game (forced_dealer does not work because of first_turn == False)
table.game.dealer_seat = 1
table.game.first_turn = False
#
# put missed_blind on None, if not all missed_blinds are reset
for serial in s_all:
table.game.serial2player[serial].missed_blind = None
#
# setup 49610
table.game.serial2player[40712].missed_blind = 'small'
#
def firstGame(packet):
clients[40712] = clients_all[40712]
s_sit.append(40712)
self.assertFalse(table.seatPlayer(clients[40712], -1))
table.game.noAutoBlindAnte(40712)
table.sitPlayer(clients[40712])
table.update()
table.game.blind(106548); table.update()
table.game.blind(61615)
table.update()
log_history.reset()
table.scheduleAutoDeal()
d1 = clients[s_sit[0]].waitFor(PACKET_POKER_START)
d1.addCallback(firstGame)
return d1
def test59_manyDisconnects(self):
self.table = table = self.table9
game = table.game
self.service.has_ladder = False
table.factory.buyInPlayer = lambda *a,**kw: table.game.maxBuyIn()
def addPlayerAndReorder(self, *a,**kw):
was_added = self.addPlayerOrig(*a,**kw)
if was_added:
self.serial2player = OrderedDict(sorted(self.serial2player.items(),key=lambda (k,v):od_order.index(k)))
return was_added
game.addPlayerOrig = game.addPlayer
game.addPlayer = lambda *a,**kw: addPlayerAndReorder(game,*a,**kw)
clients_all = {}
clients = {}
s_sit = [155139, 154961, 26908, 155165]
s_all = [95972, 154961, 155165, 155139, 26908]
s_seats = [0,1,2,3,6]
od_order = [155139, 154961, 26908, 155165, 95972, 152688]
def joinAndSeat(serial,should_sit,pos,explain=False):
clients_all[serial] = client = self.createPlayer(serial, getReadyToPlay=False, clientClass=MockClientWithExplain)
client.service = self.service
if explain:
client.setExplain(PacketPokerExplain.REST)
self.assertTrue(table.joinPlayer(client, reason="MockCreatePlayerJoin"))
self.assertTrue(table.seatPlayer(client, pos))
self.assertTrue(table.buyInPlayer(client, table.game.maxBuyIn()))
table.game.noAutoBlindAnte(serial)
if should_sit:
clients[serial] = client
table.sitPlayer(client)
table.update()
for pos,serial in enumerate(s_all):
joinAndSeat(serial,serial in s_sit,s_seats[pos])
#
# setup game (forced_dealer does not work because of first_turn == False)
table.game.dealer_seat = 3
table.game.first_turn = False
#
# put missed_blind on None, if not all missed_blinds are reset
for serial in s_all:
table.game.serial2player[serial].missed_blind = None
table.game.serial2player[95972].missed_blind = 'small'
table.game.serial2player[95972].money = 0
def firstGame(packet):
# player 95972 rebuys and sits in
table.rebuyPlayerRequest(95972, 0)
table.sitPlayer(clients_all[95972])
clients[95972] = clients_all[95972]
table.update()
# a new player arrives
joinAndSeat(152688, False, 5)
# two players time out
table.timer_info["playerTimeout"].cancel()
table.playerTimeoutTimer(154961)
table.timer_info["playerTimeout"].cancel()
table.playerTimeoutTimer(155165)
# one player pays the small blind
game.blind(155139)
table.update()
# the new player leaves again
table.disconnectPlayer(clients_all[152688])
# another player times out
table.timer_info["playerTimeout"].cancel()
table.playerTimeoutTimer(26908)
log_history.reset()
table.scheduleAutoDeal()
d1 = clients[s_sit[0]].waitFor(PACKET_POKER_START)
d1.addCallback(firstGame)
return d1
def test60_minArgEmpty(self):
self.table = table = self.table9
game = table.game
self.service.has_ladder = False
table.factory.buyInPlayer = lambda *a,**kw: table.game.maxBuyIn()
def addPlayerAndReorder(self, *a,**kw):
was_added = self.addPlayerOrig(*a,**kw)
if was_added:
self.serial2player = OrderedDict(sorted(self.serial2player.items(),key=lambda (k,v):od_order.index(k)))
return was_added
game.addPlayerOrig = game.addPlayer
game.addPlayer = lambda *a,**kw: addPlayerAndReorder(game,*a,**kw)
clients_all = {}
clients = {}
s_sit = [154625, 43850, 75617, 56120]
s_all = [43850, 155397, 154625, 75617, 56120, 29546, 155411]
s_seats = [0,1,2,3,4,5,6,7,8]
od_order = [154625, 43850, 75617, 56120, 155397, 29546, 155411]
def joinAndSeat(serial,should_sit,pos,explain=True):
clients_all[serial] = client = self.createPlayer(serial, getReadyToPlay=False, clientClass=MockClientWithExplain)
client.service = self.service
if explain:
client.setExplain(PacketPokerExplain.REST)
self.assertTrue(table.joinPlayer(client, reason="MockCreatePlayerJoin"))
self.assertTrue(table.seatPlayer(client, pos))
self.assertTrue(table.buyInPlayer(client, table.game.maxBuyIn()))
table.game.noAutoBlindAnte(serial)
if should_sit:
clients[serial] = client
table.sitPlayer(client)
table.update()
for pos,serial in enumerate(s_all):
joinAndSeat(serial,serial in s_sit,s_seats[pos])
#
# setup game (forced_dealer does not work because of first_turn == False)
table.game.dealer_seat = 3
table.game.first_turn = False
#
# put missed_blind on None, if not all missed_blinds are reset
for serial in s_all:
table.game.serial2player[serial].missed_blind = None
for richer in (43850,75617,56120):
game.serial2player[richer].money *= 2
def firstGame(packet):
#
# rebuy of 155411
table.rebuyPlayerRequest(155411, 0)
table.sitPlayer(clients_all[155411])
clients[155411] = clients_all[155411]
table.update()
game.blind(43850); table.update()
game.blind(154625); table.update()
game.blind(155411); table.update()
# 155397 disconnects
table.disconnectPlayer(clients_all[155397])
game.callNraise(154625, game.serial2player[154625].money)
# 155397 joins again
joinAndSeat(155397, False, 1)
game.call(75617); table.update()
game.call(56120); table.update()
game.call(155411); table.update()
game.call(43850); table.update()
# round finished
game.check(43850); table.update()
game.check(75617); table.update()
game.check(56120); table.update()
# round finished
game.check(43850); table.update()
game.check(75617); table.update()
game.check(56120); table.update()
# round finished
game.check(43850); table.update()
game.check(75617); table.update()
game.check(56120); table.update()
# game finished. error is caused here.
log_history.reset()
table.scheduleAutoDeal()
d1 = clients[s_sit[0]].waitFor(PACKET_POKER_START)
d1.addCallback(firstGame)
return d1
def test61_incongruentMoney(self):
self.table = table = self.table9
game = table.game
self.service.has_ladder = False
table.factory.buyInPlayer = lambda *a,**kw: table.game.maxBuyIn()
def addPlayerAndReorder(self, *a,**kw):
was_added = self.addPlayerOrig(*a,**kw)
if was_added:
self.serial2player = OrderedDict(sorted(self.serial2player.items(),key=lambda (k,v):od_order.index(k)))
return was_added
game.addPlayerOrig = game.addPlayer
game.addPlayer = lambda *a,**kw: addPlayerAndReorder(game,*a,**kw)
clients_all = {}
clients = {}
s_sit = [82247, 58255, 117776, 55572, 105398]
s_all = [105398, 114305, 58255, 82247, 55572, 117776, 102475, 29047]
s_seats = [0,1,2,3,4,5,6,7,8]
od_order = [82247, 58255, 117776, 55572, 105398, 114305, 102475, 29047]
def joinAndSeat(serial, should_sit, pos, explain=True):
clients_all[serial] = client = self.createPlayer(serial, getReadyToPlay=False, clientClass=MockClientWithExplain)
client.service = self.service
if explain: client.setExplain(PacketPokerExplain.REST)
self.assertTrue(table.joinPlayer(client, reason="MockCreatePlayerJoin"))
self.assertTrue(table.seatPlayer(client, pos))
self.assertTrue(table.buyInPlayer(client, table.game.maxBuyIn()))
table.game.noAutoBlindAnte(serial)
if should_sit:
clients[serial] = client
table.sitPlayer(client)
table.update()
def joinAgain(serial, pos, explain=True):
clients_all[serial] = client = self.createPlayer(serial, getReadyToPlay=False, clientClass=MockClientWithExplain)
client.service = self.service
if explain: client.setExplain(PacketPokerExplain.REST)
self.assertTrue(table.joinPlayer(client, reason="MockCreatePlayerJoin"))
self.assertTrue(table.seatPlayer(client, pos))
self.assertTrue(table.buyInPlayer(client, table.game.maxBuyIn()))
for pos,serial in enumerate(s_all):
joinAndSeat(serial,serial in s_sit,s_seats[pos])
#
# setup game (forced_dealer does not work because of first_turn == False)
table.game.dealer_seat = 4
table.game.first_turn = False
#
# put missed_blind on None, if not all missed_blinds are reset
for serial in s_all:
table.game.serial2player[serial].missed_blind = None
game.serial2player[82247].money *= 2
game.serial2player[29047].money *= 3
game.serial2player[114305].money *= 4
for player_serial, player in game.serial2player.iteritems():
for c in clients_all.itervalues():
c.explain.games.getGame(game.id).serial2player[player_serial].money = player.money
table.game.serial2player[114305].missed_blind = 'big'
table.game.serial2player[102475].missed_blind = 'small'
table.game.serial2player[29047].missed_blind = 'small'
def firstGame(packet):
# seated
#
#
# players 29047,114305 sit in again
table.seatPlayer(clients_all[29047], -1)
table.sitPlayer(clients_all[29047])
table.seatPlayer(clients_all[114305], -1)
table.sitPlayer(clients_all[114305])
# blinds
game.blind(105398); table.update()
game.blind(58255); table.update()
game.blind(29047); table.update()
game.blind(114305); table.update()
log_history.reset()
# player 102475 leaves
table.leavePlayer(clients_all[102475]); table.update()
joinAndSeat(102475, True, -1); table.update()
game.check(114305); table.update()
game.check(58255); table.update()
game.callNraise(82247, game.serial2player[82247].money); table.update()
game.fold(55572); table.update()
game.fold(117776); table.update()
game.call(29047); table.update()
game.fold(105398); table.update()
game.call(114305); table.update()
game.fold(58255); table.update()
game.callNraise(29047, game.serial2player[29047].money); table.update()
game.call(114305); table.update()
game.fold(82247); table.update()
d ={}
for player in game.serial2player.values():
d[player.serial] = player.money
self.assertEqual(d, game.showdown_stack[0]['serial2money'])
for serial, client in clients_all.iteritems():
ex_money_map = client.explain.games.getGame(game.id).moneyMap()
self.assertEquals(game.moneyMap(), ex_money_map, 'moneyMap not equal for serial %d' % serial)
log_history.reset()
table.scheduleAutoDeal()
d1 = clients[s_sit[0]].waitFor(PACKET_POKER_START)
d1.addCallback(firstGame)
return d1
def test62_doubleTheMoney(self):
self.table = table = self.table9
game = table.game
self.service.has_ladder = False
table.factory.buyInPlayer = lambda *a,**kw: table.game.buyIn()
def addPlayerAndReorder(self, *a,**kw):
was_added = self.addPlayerOrig(*a,**kw)
if was_added:
self.serial2player = OrderedDict(sorted(self.serial2player.items(),key=lambda (k,v):od_order.index(k)))
return was_added
game.addPlayerOrig = game.addPlayer
game.addPlayer = lambda *a,**kw: addPlayerAndReorder(game,*a,**kw)
clients_all = {}
clients = {}
s_sit = [158160, 11905, 155177, 65163, 60029, 30640, 79069]
s_all = [158428, 11905, 155177, 65163, 158160, 79069, 60029, 30640]
s_seats = [0,1,2,3,4,5,6,7,8]
od_order = [158428, 11905, 155177, 65163, 158160, 79069, 60029, 30640]
def joinAndSeat(serial, should_sit, pos, explain=True):
clients_all[serial] = client = self.createPlayer(serial, getReadyToPlay=False, clientClass=MockClientWithExplain)
client.service = self.service
if explain: client.setExplain(PacketPokerExplain.REST)
self.assertTrue(table.joinPlayer(client, reason="MockCreatePlayerJoin"))
if table.seatPlayer(client, pos):
self.assertTrue(table.buyInPlayer(client, game.maxBuyIn()))
table.game.noAutoBlindAnte(serial)
if | |
# -*- coding:utf-8 -*-
"""
Copyright (c) 2013-2016 SYPH, All Rights Reserved.
-----------------------------------------------------------
Author: S.JunPeng
Date: 2016/12/22
Change Activity:
_==/ i i \==_
/XX/ |\___/| \XX\
/XXXX\ |XXXXX| /XXXX\
|XXXXXX\_ _XXXXXXX_ _/XXXXXX|
XXXXXXXXXXXxxxxxxxXXXXXXXXXXXxxxxxxxXXXXXXXXXXX
|XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX|
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX|
XXXXXX/^^^^^\XXXXXXXXXXXXXXXXXXXXX/^^^^^\XXXXXX
|XXX| \XXX/^^\XXXXX/^^\XXX/ |XXX|
\XX\ \X/ \XXX/ \X/ /XX/
"\ " \X/ " /"
"""
import json
import logging
import datetime
import re
from braces.views import CsrfExemptMixin
from django.http.response import HttpResponse
from django.views.generic import View
from apps.etl.models import *
from apps.common.models import FeatureCodeMapping
from apps.datasource.models import *
from vendor.utils.pagination import ExtPaginator
from vendor.utils.commons import json_response
from studio.feature_comment_handle.exec_chain_handle import func_exec_chain
from studio.feature_comment_handle.jsonparse_handle import JSONPathParser
from vendor.utils.defaults import *
logger = logging.getLogger('apps.interface')
class FeatureConfig(CsrfExemptMixin, View):
def get(self, request, featurename, page, *args, **kwargs):
"""获取特征基础配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
current_page = page
page_size = 10
if featurename == 'all':
feature_config_obj = FeatureConf.objects.values()
else:
feature_config_obj = FeatureConf.objects.filter(feature_name=featurename).values()
feature_config_count = feature_config_obj.count()
paginator = ExtPaginator(list(feature_config_obj), page_size, feature_config_count)
object_list = paginator.page(current_page)
map(lambda x: [
x.update({"created_on": x["created_on"].strftime('%Y-%m-%d %H:%M:%S') if x["created_on"] else ''}),
x.update({"updated_on": x["updated_on"].strftime('%Y-%m-%d %H:%M:%S') if x["updated_on"] else ''}),
x.update(
{"feature_select_value": x["feature_select_value"] if x["feature_select_value"] else ''}),
x.update({"feature_type_desc": FeatureType.objects.get(pk=x["feature_type_id"]).feature_type_desc if x[
"feature_type_id"] else ''}),
x.update(
{"feature_rule_type_desc": FeatureRuleType.objects.get(
pk=x["feature_rule_type_id"]).feature_type_desc if x[
"feature_rule_type_id"] else ''}),
x.update(
{"feature_card_type_desc": FeatureCardType.objects.get(
pk=x["feature_card_type_id"]).feature_type_desc if x[
"feature_card_type_id"] else ''}),
], object_list)
page_num = paginator.num_pages
page_range = paginator.page_range
res_data = dict(
total_count=feature_config_count,
page_num=page_num,
current_page=current_page,
config_list=list(object_list),
page_range=page_range
)
data.update({"res_data": res_data})
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def put(self, request, item, featureid, *args, **kwargs):
"""更新特征基础配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
logger.info("update feature config request data=%s", body)
updated_on = datetime.datetime.now()
if item == 'feature_info':
feature_name_cn = body.get('feature_name_cn')
if not feature_name_cn:
raise Exception(u'特征中文名不能为空')
feature_type = body.get('feature_type_id')
feature_rule_type = body.get('feature_rule_type_id')
feature_card_type = body.get('feature_card_type_id')
feature_type = FeatureType.objects.get(pk=feature_type) if feature_type else None
feature_rule_type = FeatureRuleType.objects.get(pk=feature_rule_type) if feature_rule_type else None
feature_card_type = FeatureCardType.objects.get(pk=feature_card_type) if feature_card_type else None
FeatureConf.objects.filter(pk=int(featureid)).update(
feature_name=body.get('feature_name'),
feature_name_cn=body.get('feature_name_cn'),
feature_type=feature_type,
feature_rule_type=feature_rule_type,
feature_card_type=feature_card_type,
feature_select_value=body.get('feature_select_value'),
updated_on=updated_on,
is_delete=body.get('is_delete')
)
elif item == 'feature_source':
data_identity = body.get('data_identity')
if not data_identity:
raise Exception(u'特征数据源不能为空!')
FeatureConf.objects.filter(pk=int(featureid)).update(
collect_type=body.get('collect_type'),
data_identity=body.get('data_identity'),
updated_on=updated_on
)
else:
raise Exception('url error')
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def post(self, request, item, *args, **kwargs):
"""添加特征基本信息配置"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
logger.info("update feature config request data=%s", body)
created_on = datetime.datetime.now()
if item == 'feature_info':
feature_name = body.get('feature_name')
feature_name_cn = body.get('feature_name_cn')
if not feature_name_cn or not feature_name_cn:
raise Exception(u'特征名和特征中文名不能为空!')
count = FeatureConf.objects.filter(feature_name=feature_name)
if count:
raise Exception('%s already exists!' % feature_name)
feature_type = body.get('feature_type_id')
feature_rule_type = body.get('feature_rule_type_id')
feature_card_type = body.get('feature_card_type_id')
feature_type = FeatureType.objects.get(pk=feature_type) if feature_type else None
feature_rule_type = FeatureRuleType.objects.get(pk=feature_rule_type) if feature_rule_type else None
feature_card_type = FeatureCardType.objects.get(pk=feature_card_type) if feature_card_type else None
FeatureConf(
feature_name=body.get('feature_name'),
feature_name_cn=body.get('feature_name_cn'),
feature_type=feature_type,
feature_rule_type=feature_rule_type,
feature_card_type=feature_card_type,
feature_select_value=body.get('feature_select_value'),
is_delete=body.get('is_delete'),
updated_on=created_on,
created_on=created_on
).save()
else:
raise Exception('url error')
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
class FeatureShuntConfig(CsrfExemptMixin, View):
def get(self, request, featurename, page, *args, **kwargs):
"""获取分流特征基础配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
current_page = page
page_size = 500
if featurename == 'all':
feature_config_obj = FeatureShuntConf.objects.values()
else:
feature_config_obj = FeatureShuntConf.objects.filter(feature_name=featurename).values()
feature_config_count = feature_config_obj.count()
paginator = ExtPaginator(list(feature_config_obj), page_size, feature_config_count)
object_list = paginator.page(current_page)
map(lambda x: [
x.update({"created_on": x["created_on"].strftime('%Y-%m-%d %H:%M:%S') if x["created_on"] else ''}),
x.update({"updated_on": x["updated_on"].strftime('%Y-%m-%d %H:%M:%S') if x["updated_on"] else ''}),
x.update({"shunt_value": x["shunt_value"] if x["shunt_value"] else ''}),
], object_list)
page_num = paginator.num_pages
page_range = paginator.page_range
res_data = dict(
total_count=feature_config_count,
page_num=page_num,
current_page=current_page,
config_list=list(object_list),
page_range=page_range
)
data.update({"res_data": res_data})
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def put(self, request, featureid, *args, **kwargs):
"""更新分流特征基础配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
logger.info("update shunt feature config request data=%s", body)
feature_name = body.get('feature_name')
shunt_key = body.get('shunt_key')
shunt_type = body.get('shunt_type')
shunt_value = body.get('shunt_value')
data_identity = body.get('data_identity')
is_delete = body.get('is_delete')
if not (feature_name and shunt_key and data_identity and shunt_type and shunt_value):
raise Exception("all values don't is null !")
if not isinstance(eval(body['shunt_value']), tuple):
raise Exception(u'数据源适应范围必须为元组类型!')
updated_on = datetime.datetime.now()
FeatureShuntConf.objects.filter(pk=int(featureid)).update(
feature_name=feature_name,
shunt_key=shunt_key,
data_identity=data_identity,
shunt_type=shunt_type,
shunt_value=shunt_value,
updated_on=updated_on,
is_delete=is_delete
)
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def post(self, request, *args, **kwargs):
"""添加分流特征基本信息配置"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
logger.info("add shunt feature config request data=%s", body)
feature_name = body.get('feature_name')
shunt_key = body.get('shunt_key')
shunt_type = body.get('shunt_type')
shunt_value = body.get('shunt_value')
data_identity = body.get('data_identity')
is_delete = body.get('is_delete')
if not (feature_name and shunt_key and data_identity and shunt_type and shunt_value):
raise Exception("all values don't is null !")
if not isinstance(eval(body['shunt_value']), (tuple, list)):
raise Exception(u'数据源适应范围必须为元组类型!')
if FeatureShuntConf.objects.filter(feature_name=feature_name).count():
raise Exception('this feature_name already exists!')
FeatureShuntConf(
feature_name=feature_name,
shunt_key=shunt_key,
data_identity=data_identity,
shunt_type=shunt_type,
shunt_value=tuple(eval(shunt_value)),
is_delete=is_delete
).save()
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
class FeatureRelevanceConfig(CsrfExemptMixin, View):
def get(self, request, featurename, page, *args, **kwargs):
"""获取依赖特征基础配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
current_page = page
page_size = 100
if featurename == 'all':
feature_config_obj = FeatureRelevanceConf.objects.values()
else:
feature_config_obj = FeatureRelevanceConf.objects.filter(
feature_name=featurename
).values()
feature_config_count = feature_config_obj.count()
paginator = ExtPaginator(list(feature_config_obj), page_size, feature_config_count)
object_list = paginator.page(current_page)
map(lambda x: [
x.update({"created_on": x["created_on"].strftime('%Y-%m-%d %H:%M:%S') if x["created_on"] else ''}),
x.update({"updated_on": x["updated_on"].strftime('%Y-%m-%d %H:%M:%S') if x["updated_on"] else ''}),
], object_list)
page_num = paginator.num_pages
page_range = paginator.page_range
res_data = dict(
total_count=feature_config_count,
page_num=page_num,
current_page=current_page,
config_list=list(object_list),
page_range=page_range
)
data.update({"res_data": res_data})
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def put(self, request, featureid, *args, **kwargs):
"""更新依赖特征基础配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
logger.info("update relevance feature config request data=%s", body)
feature_name = body['feature_name']
depend_feature = body['depend_feature']
data_identity = body['data_identity']
is_delete = body['is_delete']
updated_on = datetime.datetime.now()
FeatureRelevanceConf.objects.filter(pk=int(featureid)).update(
feature_name=feature_name,
depend_feature=depend_feature,
data_identity=data_identity,
updated_on=updated_on,
is_delete=is_delete
)
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def post(self, request, *args, **kwargs):
"""添加依赖特征基本信息配置"""
data = {
'status': 1,
'message': 'success'
}
try:
path = request.path
if 'check' in path:
con = FeatureRelevanceConf.objects.filter(is_delete=False)
for i in con:
if i.depend_feature:
depend_feature_list = i.depend_feature.split(',')
for j in depend_feature_list:
con = FeatureRelevanceConf.objects.filter(is_delete=False, feature_name=j).count()
if not con:
raise Exception("%s dependent feature %s, %s is not available in relevance table !" % (
i.feature_name, j, j))
else:
body = json.loads(request.body)
logger.info("add shunt feature config request data=%s", body)
feature_name = body['feature_name']
depend_feature = body['depend_feature']
data_identity = body['data_identity']
is_delete = body['is_delete']
updated_on = datetime.datetime.now()
if FeatureRelevanceConf.objects.filter(feature_name=feature_name).count():
raise Exception('this feature_name already exists!')
FeatureRelevanceConf(
feature_name=feature_name,
depend_feature=depend_feature,
data_identity=data_identity,
updated_on=updated_on,
is_delete=is_delete
).save()
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
class RemoteConfig(CsrfExemptMixin, View):
def get(self, request, data_identity, page, *args, **kwargs):
"""获取数据源基础配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
current_page = page
page_size = 100
if data_identity == 'all':
data_identity_obj = DsInterfaceInfo.objects.values()
else:
data_identity_obj = DsInterfaceInfo.objects.filter(
data_identity=data_identity
).values()
data_identity_count = data_identity_obj.count()
paginator = ExtPaginator(list(data_identity_obj), page_size, data_identity_count)
object_list = paginator.page(current_page)
map(lambda x: [
x.update({"created_on": x["created_on"].strftime('%Y-%m-%d %H:%M:%S') if x["created_on"] else ''}),
x.update({"updated_on": x["updated_on"].strftime('%Y-%m-%d %H:%M:%S') if x["updated_on"] else ''}),
# x.update({"must_data": eval(x["must_data"]) if x["must_data"] else ''}),
], object_list)
page_num = paginator.num_pages
page_range = paginator.page_range
res_data = dict(
total_count=data_identity_count,
page_num=page_num,
current_page=current_page,
config_list=list(object_list),
page_range=page_range
)
data.update({"res_data": res_data})
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def put(self, request, id, *args, **kwargs):
"""更新数据源基础配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
logger.info("update datasource config request data=%s", body)
name = body['name']
data_identity = body['data_identity']
data_source = body['data_source_id']
data_origin_type = body['data_origin_type']
route = body['route']
method = body['method']
comment = body['comment']
common_data = body['common_data']
must_data = body['must_data']
is_need_token = body['is_need_token']
is_need_encrypt = body['is_need_encrypt']
is_async = body['is_async']
encrypt_type = body['encrypt_type']
is_delete = body['is_delete']
updated_on = datetime.datetime.now()
DsInterfaceInfo.objects.filter(pk=int(id)).update(
name=name,
data_identity=data_identity,
data_source=data_source,
data_origin_type=data_origin_type,
route=route,
method=method,
comment=comment,
common_data=common_data,
must_data=must_data,
is_need_token=is_need_token,
is_need_encrypt=is_need_encrypt,
is_async=is_async,
encrypt_type=encrypt_type,
is_delete=is_delete,
updated_on=updated_on
)
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': 'error'
}
return json_response(data)
def post(self, request, *args, **kwargs):
"""添加数据源基本信息配置"""
data = {'status': 1,
'message': 'success'}
try:
body = json.loads(request.body)
logger.info("add datasource config request data=%s", body)
name = body['name']
data_identity = body['data_identity']
data_source = int(body['data_source_id'])
data_origin_type = body['data_origin_type']
route = body['route']
method = body['method']
comment | |
#!/usr/bin/python
import glob
import functools
import os
import sys
import engine
def cache(f):
"""A decorator to cache results for a given function call.
Note: The caching is only done on the first argument, usually "self".
"""
ret = {}
def _Wrapper(*args, **kwargs):
self = args[0]
if self not in ret:
ret[self] = f(*args, **kwargs)
return ret[self]
return _Wrapper
VERBOSE = True
JAVA_BINARY_FLAGS_DEFAULT = False
printed = set()
def pdep(a, b):
"""Interceptor for printing dependency links.
Args:
a: Source dependency
b: Destination dependency
"""
if (a, b) in printed:
return
if a == b:
return
if VERBOSE:
print "\"%s\" -> \"%s\"" % (a, b)
printed.add((a, b))
TOPLEVEL = "_top"
def abs_target(target, default_module=None):
"""Determines the canonical representation of a target, while
allowing syntactic sugar for the user.
Params:
target: e.g. "src=com/yext/subscriptions:app", src/com/yext/subscriptions:app"
default_module: optional module that can be used to resolve a module-less path.
Returns:
The fully qualified target name.
Raises:
Error if no module can be found and none is provided.
"""
# If the target is fully-specified, just return it.
if "=" in target:
return target
# If the target doesn't have a : in it, assume that the "file" in
# the directory is actually the target name.
if ":" not in target and "/" in target:
d, t = target.rsplit("/", 1)
target = "%s:%s" % (d, t)
# If the target ends with .java, then likely the user just
# autocompleted and wants the target without .java, so strip it
# out.
if target.endswith(".java"):
target = target[:-5]
# Try splitting on 'src' directory
if "src/" in target:
module, rt = target.split("src/")
return "%ssrc=%s" % (module, rt)
# The test directory doesn't have a src dir, so add a special
# case syntactic sugar to add "=" after test.
elif target.startswith("test/"):
return "test=%s" % (target[5:])
# Ran out of guesses, use default module
elif default_module is not None:
return "%s=%s" % (default_module, target)
else:
raise Exception("No module could be determined for target: " + target)
class DataHolder(object):
"""
Class that is the holder of all objects specified in spec files.
Keeps track of where they are specified, what they output, and
intermediates their interactions with the Engine.
"""
# path:name -> fake-o data holders, which know how to insert things
# into the engine.
_registered = {}
# Set of dependency FullName's whose files have already been loaded.
_processed = set()
def __init__(self, module, path, name):
"""Constructor.
Args:
module: The module for this target (e.g. Core)
path: The path for this target
name: The target name within the path
"""
self.module = module
self.path = path
self.name = name
def FullName(self):
"""Returns the fully-qualified target name."""
return "%s=%s:%s" % (self.module, self.path, self.name)
def Apply(self, e):
"""Inserts any necessary rules into the engine.
Args:
e: An engine.Engine instance to insert rules into.
Returns: A file name which will be generated by the last
engine rule that was inserted.
"""
raise NotImplementedError
def TopApply(self, e):
"""See Apply.
This function is called instead of Apply when a rule is being
built explicitly at the command line.
"""
raise NotImplementedError
def LoadSpecs(self):
"""Loads spec files needed to resolve this target's dependencies."""
raise NotImplementedError
def _LoadSpecs(self, deps):
"""Loads the spec files that define the passed in dependencies.
Args:
deps: List of target names that this rule depends on.
"""
deps = list(self.Canonicalize(deps))
if VERBOSE:
print "loading: %s" % str(deps)
for dep in deps:
pdep(self.FullName(), dep)
while len(deps) > 0:
depname = deps.pop()
try:
LoadTargetSpec(self.module, depname)
except:
print "%s: error loading %s=%s" % (self.FullName(), self.module, depname)
raise
dep = DataHolder.Get(self.module, depname)
assert dep, "%s not found by %s:%s" % (depname, self.path, self.name)
if dep.FullName() in self._processed:
continue
self._processed.add(dep.FullName())
if isinstance(dep, JavaLibrary) and dep.deps:
ds = list(dep.Canonicalize(dep.deps))
deps.extend(ds)
for d in ds:
pdep(dep.FullName(), d)
else:
dep.LoadSpecs()
def Canonicalize(self, deps):
"""Fully-qualifies any non-fully-qualified dependencies in the list.
Uses the current module when one isn't provided in the dependecy string.
Args:
deps: List of strings of dependency specifiers
Returns: A list of fully-qualified dependency strings.
"""
for dep in deps:
yield abs_target(dep, self.module)
@classmethod
def Register(cls, module, path, name, obj):
"""Registers a given target in the global registry."""
fname = "%s=%s:%s" % (module, path, name)
assert fname not in cls._registered, fname
assert isinstance(obj, DataHolder)
cls._registered[fname] = obj
@classmethod
def Get(cls, module, fname):
"""Retrieves a target from the global registry."""
fname = abs_target(fname, default_module=module)
return cls._registered.get(fname)
@classmethod
def Go(cls, targets):
"""Builds everything starting with the given targets as the top-level.
Args:
targets: List of string specifiers of targets resolved in top-level
scope
Returns: True if all the targets built successfully, False otherwise
"""
done = set()
e = engine.Engine()
target_names = []
for target in targets:
holder = cls.Get(TOPLEVEL, target)
if not holder:
print >>sys.stderr, "Unknown target", target
continue
ret = holder.TopApply(e)
if ret:
target_names.append(ret)
# Make sure to apply all of the Generate targets. Otherwise
# their out's will never get considered.
for obj in cls._registered.itervalues():
if isinstance(obj, Generate):
obj.Apply(e)
e.ComputeDependencies()
for target in target_names:
e.BuildTarget(e.GetTarget(target))
return e.Go()
class JavaBinary(DataHolder):
"""Class that holds a java_binary target."""
def __init__(self, module, path, name, main, deps, flags=None, premain=None):
DataHolder.__init__(self, module, path, name)
self.main = main
self.deps = deps
self.flags = flags if flags is not None else JAVA_BINARY_FLAGS_DEFAULT
self.premain = premain
@cache
def Apply(self, e):
# Build up a list of source files, jars, and data files that
# we need to get.
sources = set()
jars = self.jars = set()
datas = set()
deps = list(self.deps)
processed = set()
while len(deps) > 0:
depname = deps.pop()
dep = DataHolder.Get(self.module, depname)
assert dep, "%s not found" % depname
if dep.FullName() in processed:
continue
assert isinstance(dep, JavaLibrary), '%s is not a library' % depname
dep.Apply(e)
if dep.files:
sources.update(dep.files)
if dep.jars:
jars.update(dep.jars)
if dep.data:
datas.update(dep.data)
if dep.deps:
deps.extend(dep.Canonicalize(dep.deps))
processed.add(dep.FullName())
if self.flags:
dep = DataHolder.Get(
self.module, "Core/src=com/alphaco/util/flags:flag_processor")
dep.Apply(e)
c = engine.JavaCompile(self.path, self.name, sources, jars,
datas, self.main, self.flags)
e.AddTarget(c)
return c.Name()
TopApply = Apply
def LoadSpecs(self):
self._LoadSpecs(self.deps)
if self.flags:
self._LoadSpecs(
["Core/src=com/alphaco/util/flags:flag_processor"])
class JavaJar(DataHolder):
"""Class that holds a java_deploy target."""
def __init__(self, module, path, name, binary, premain=None):
DataHolder.__init__(self, module, path, name)
self.binary = binary
self.premain = premain
@cache
def Apply(self, e):
dep = DataHolder.Get(self.module, self.binary)
assert dep, "%s not found" % self.binary
assert isinstance(dep, JavaBinary)
dep.Apply(e)
#name = dep.Apply(e)
#target = e.GetTarget(name)
j = engine.JarBuild(self.path, self.name + ".jar", dep.name,
dep.jars, dep.main, self.premain or dep.premain)
e.AddTarget(j)
return j.Name()
TopApply = Apply
def LoadSpecs(self):
self._LoadSpecs([self.binary])
class JavaLibrary(DataHolder):
"""Class that holds a java_library target."""
def __init__(self, module, path, name, files, jars, deps, data):
DataHolder.__init__(self, module, path, name)
self.jars = jars
self.deps = deps
self.data = data
self.files = files
@cache
def TopApply(self, e):
sources = set(self.files)
jars = self.jars = set(self.jars)
datas = set(self.data)
deps = list(self.deps)
processed = set()
while len(deps) > 0:
depname = deps.pop()
dep = DataHolder.Get(self.module, depname)
assert dep, "%s not found" % depname
if dep.FullName() in processed:
continue
assert isinstance(dep, JavaLibrary)
dep.Apply(e)
if dep.files:
sources.update(dep.files)
if dep.jars:
jars.update(dep.jars)
if dep.data:
datas.update(dep.data)
if dep.deps:
deps.extend(dep.Canonicalize(dep.deps))
processed.add(dep.FullName())
c = engine.JavaCompile(self.path, os.path.join(self.path, self.name),
sources, jars,
datas, "", False)
e.AddTarget(c)
return c.Name()
def Apply(self, e):
pass
def LoadSpecs(self):
if self.deps:
self._LoadSpecs(self.deps)
class JavaWar(DataHolder):
"""Class that holds a java_war target."""
def __init__(self, module, path, name, data, binary):
DataHolder.__init__(self, module, path, name)
self.data = data
self.binary = binary
@cache
def Apply(self, e):
dep = DataHolder.Get(self.module, self.binary)
assert dep, "%s not found" % self.binary
assert isinstance(dep, JavaBinary)
dep.Apply(e)
w = engine.WarBuild(self.path, self.name + ".war", self.data, dep.name, dep.jars)
e.AddTarget(w)
return w.Name()
TopApply = Apply
def LoadSpecs(self):
if self.binary:
self._LoadSpecs([self.binary])
class PlayApp(DataHolder):
"""Class that holds a play_app target."""
def __init__(self, module, path, name, modules, deps, data, play_home):
DataHolder.__init__(self, module, path, name)
self.modules = modules
self.deps = deps
self.data = data
self.play_home = play_home
@cache
def Apply(self, e):
deps = []
datas = set(self.data)
for depname in self.deps:
dep = DataHolder.Get(self.module, depname)
deps.append(dep.Apply(e))
for module in self.modules:
assert os.path.exists(module), "play module not found: %s" % module
c = engine.PlayCompile(self.path, self.name + ".zip", | |
<reponame>imranq2/SparkAutoMapper.FHIR<filename>spark_auto_mapper_fhir/value_sets/common_ucum_units.py
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class CommonUCUMUnitsCode(GenericTypeCode):
"""
Common UCUM units
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
Commonly encountered UCUM units (for purposes of helping populate look ups.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://unitsofmeasure.org
"""
codeset: FhirUri = "http://unitsofmeasure.org"
class CommonUCUMUnitsCodeValues:
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Percent = CommonUCUMUnitsCode("%")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Percent_100WBC = CommonUCUMUnitsCode("%/100{WBC}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Percent0to3Hours = CommonUCUMUnitsCode("%{0to3Hours}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentAbnormal = CommonUCUMUnitsCode("%{Abnormal}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentActivity = CommonUCUMUnitsCode("%{Activity}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentBasalActivity = CommonUCUMUnitsCode("%{BasalActivity}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentBinding = CommonUCUMUnitsCode("%{Binding}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentBlockade = CommonUCUMUnitsCode("%{Blockade}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentCarboxyhemoglobin = CommonUCUMUnitsCode("%{Carboxyhemoglobin}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentConversion = CommonUCUMUnitsCode("%{Conversion}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentCound = CommonUCUMUnitsCode("%{Cound}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentEosSeen = CommonUCUMUnitsCode("%{EosSeen}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentExcretion = CommonUCUMUnitsCode("%{Excretion}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentFat = CommonUCUMUnitsCode("%{Fat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentFetalErythrocytes = CommonUCUMUnitsCode("%{FetalErythrocytes}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentHemoglobin = CommonUCUMUnitsCode("%{Hemoglobin}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentHemoglobinA1C = CommonUCUMUnitsCode("%{HemoglobinA1C}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentHemoglobinSaturation = CommonUCUMUnitsCode("%{HemoglobinSaturation}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentHemolysis = CommonUCUMUnitsCode("%{Hemolysis}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentHumanResponse = CommonUCUMUnitsCode("%{HumanResponse}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentIndex = CommonUCUMUnitsCode("%{Index}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentInhibition = CommonUCUMUnitsCode("%{Inhibition}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentLive = CommonUCUMUnitsCode("%{Live}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentNegativeControl = CommonUCUMUnitsCode("%{Negative Control}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentNormal = CommonUCUMUnitsCode("%{Normal}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentNormalControl = CommonUCUMUnitsCode("%{NormalControl}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentNormalPooledPlasma = CommonUCUMUnitsCode("%{NormalPooledPlasma}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentOfAvailable = CommonUCUMUnitsCode("%{ofAvailable}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentOfBacteria = CommonUCUMUnitsCode("%{ofBacteria}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentOfLymphocytes = CommonUCUMUnitsCode("%{OfLymphocytes}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentOfWBCs = CommonUCUMUnitsCode("%{OfWBCs}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentOxygen = CommonUCUMUnitsCode("%{Oxygen}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentPositive = CommonUCUMUnitsCode("%{Positive}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentPrecipitate = CommonUCUMUnitsCode("%{Precipitate}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentReactivity = CommonUCUMUnitsCode("%{Reactivity}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentResponse = CommonUCUMUnitsCode("%{response}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentRisk = CommonUCUMUnitsCode("%{risk}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentSpermMotility = CommonUCUMUnitsCode("%{SpermMotility}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentTotal = CommonUCUMUnitsCode("%{Total}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentTotalProtein = CommonUCUMUnitsCode("%{TotalProtein}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentTot_Cholesterol = CommonUCUMUnitsCode("%{Tot'Cholesterol}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentTot_Hgb = CommonUCUMUnitsCode("%{Tot'Hgb}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentUptake = CommonUCUMUnitsCode("%{Uptake}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
VolumePercent = CommonUCUMUnitsCode("%{vol}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PercentWeightToWeight = CommonUCUMUnitsCode("%{WeightToWeight}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Per12_Hour = CommonUCUMUnitsCode("/(12.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerArbitraryUnit = CommonUCUMUnitsCode("/[arb'U]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerHighPowerField = CommonUCUMUnitsCode("/[HPF]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerInternationalUnit = CommonUCUMUnitsCode("/[iU]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerLowPowerField = CommonUCUMUnitsCode("/[LPF]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerEntity = CommonUCUMUnitsCode("/{Entity}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerHpf = CommonUCUMUnitsCode("/[HPF]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerLPF = CommonUCUMUnitsCode("/[LPF]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerOif = CommonUCUMUnitsCode("/{oif}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerSpecimen = CommonUCUMUnitsCode("/{Specimen}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerTot = CommonUCUMUnitsCode("/{tot}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerTenGiga = CommonUCUMUnitsCode("/10*10")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerTrillion = CommonUCUMUnitsCode("/10*12")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerTrillionRedBloodCells = CommonUCUMUnitsCode("/10*12{rbc}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerMillion = CommonUCUMUnitsCode("/10*6")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerBillion = CommonUCUMUnitsCode("/10*9")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Per100 = CommonUCUMUnitsCode("/100")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Per100Cells = CommonUCUMUnitsCode("/100{cells}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Per100Neutrophils = CommonUCUMUnitsCode("/100{neutrophils}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Per100Spermatozoa = CommonUCUMUnitsCode("/100{spermatozoa}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Per100WBC = CommonUCUMUnitsCode("/100{WBC}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Per100WBC = CommonUCUMUnitsCode("/100{WBCs}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
_Year = CommonUCUMUnitsCode("/a")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerCentimeterOfWater = CommonUCUMUnitsCode("/cm[H2O]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerDay = CommonUCUMUnitsCode("/d")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerDeciliter = CommonUCUMUnitsCode("/dL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerGram = CommonUCUMUnitsCode("/g")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerGramCreat = CommonUCUMUnitsCode("/g{creat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerGramHgb = CommonUCUMUnitsCode("/g{hgb}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerGramTot_nit = CommonUCUMUnitsCode("/g{tot'nit}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerGramTot_prot = CommonUCUMUnitsCode("/g{tot'prot}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerGramWet_tis = CommonUCUMUnitsCode("/g{wet'tis}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerHour = CommonUCUMUnitsCode("/h")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerKilogram = CommonUCUMUnitsCode("/kg")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerKilogramBodyWt = CommonUCUMUnitsCode("/kg{body'wt}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerLiter = CommonUCUMUnitsCode("/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerSquareMeter = CommonUCUMUnitsCode("/m2")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerMilligram = CommonUCUMUnitsCode("/mg")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerMinute = CommonUCUMUnitsCode("/min")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerMilliliter = CommonUCUMUnitsCode("/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerCubicMillimeter = CommonUCUMUnitsCode("/mm3")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerMillimole = CommonUCUMUnitsCode("/mmol")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerMonth = CommonUCUMUnitsCode("/mo")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerSecond = CommonUCUMUnitsCode("/s")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerEnzymeUnit = CommonUCUMUnitsCode("/U")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerMicrogram = CommonUCUMUnitsCode("/ug")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerMicroliter = CommonUCUMUnitsCode("/uL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PerWeek = CommonUCUMUnitsCode("/wk")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
IgAAnticardiolipinUnit = CommonUCUMUnitsCode("[APL'U]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
IgAAnticardiolipinUnitPerMilliliter = CommonUCUMUnitsCode("[APL'U]/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
ArbitraryUnit = CommonUCUMUnitsCode("[arb'U]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
ArbitaryUnit_Liter = CommonUCUMUnitsCode("[arb'U]/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
ArbitraryUnitPerMilliliter = CommonUCUMUnitsCode("[arb'U]/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
AllergyUnit = CommonUCUMUnitsCode("[AU]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
BioequivalentAllergenUnit = CommonUCUMUnitsCode("[BAU]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
BethesdaUnit = CommonUCUMUnitsCode("[beth'U]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
BethesdaUnit = CommonUCUMUnitsCode("[beth'U]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
ColonyFormingUnit = CommonUCUMUnitsCode("[CFU]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
ColonyFormingUnitPerLiter = CommonUCUMUnitsCode("[CFU]/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
ColonyFormingUnitPerMilliliter = CommonUCUMUnitsCode("[CFU]/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
French_catheterGauge_ = CommonUCUMUnitsCode("[Ch]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
CubicInch = CommonUCUMUnitsCode("[cin_i]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Cup = CommonUCUMUnitsCode("[cup_us]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
DegreeFahrenheit = CommonUCUMUnitsCode("[degF]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Dram_USAndBritish_ = CommonUCUMUnitsCode("[dr_av]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Drop = CommonUCUMUnitsCode("[drp]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Drop_HPF = CommonUCUMUnitsCode("[drp]/[HPF]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Drop_Hour = CommonUCUMUnitsCode("[drp]/h")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Drop_Minute = CommonUCUMUnitsCode("[drp]/min")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Drop_Milliliter = CommonUCUMUnitsCode("[drp]/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Drop_Second = CommonUCUMUnitsCode("[drp]/s")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
FluidDram = CommonUCUMUnitsCode("[fdr_us]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
FluidOunce = CommonUCUMUnitsCode("[foz_br]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
FluidOunce = CommonUCUMUnitsCode("[foz_us]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Feet = CommonUCUMUnitsCode("[ft_i]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Fathom = CommonUCUMUnitsCode("[fth_i]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Gallon = CommonUCUMUnitsCode("[gal_br]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
QueenAnne_sWineGallon = CommonUCUMUnitsCode("[gal_us]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
IgGAnticardiolipinUnit = CommonUCUMUnitsCode("[GPL'U]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Grain = CommonUCUMUnitsCode("[gr]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Inch_international_ = CommonUCUMUnitsCode("[in_i]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
InternationalUnit = CommonUCUMUnitsCode("[iU]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
InternationalUnitPer2Hour = CommonUCUMUnitsCode("[IU]/(2.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
InternationalUnitPer24Hour = CommonUCUMUnitsCode("[IU]/(24.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
InternationalUnitPerBillionRedBloodCells = CommonUCUMUnitsCode("[IU]/10*9{RBCs}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
InternationalUnitPerDay = CommonUCUMUnitsCode("[IU]/d")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
InternationalUnitPerDeciliter = CommonUCUMUnitsCode("[IU]/dL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
InternationalUnitPerGram = CommonUCUMUnitsCode("[IU]/g")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
InternationalUnitPerGramOfHemoglobin = CommonUCUMUnitsCode("[IU]/g{Hb}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
InternationalUnit_GramHgb = CommonUCUMUnitsCode("[iU]/g{Hgb}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in | |
<gh_stars>10-100
#!/usr/bin/env python3
import configargparse
import os.path
import numpy as np
try:
import CIAlign.utilityFunctions as utilityFunctions
from CIAlign._version import __version__
except ImportError:
import utilityFunctions
from _version import __version__
def float_range(mini, maxi):
'''
Defines a type for argparse of a float with a fixed range
Parameters
----------
mini: str or float
A float (or a string which can be converted to a float) with the
minumum valid value for the parameter.
maxi: str or float
A float (or a string which can be converted to a float) with the
maximum valid value for the paramter.
Returns
-------
float_range_checker: function
A function to input as a type to argparse to check if the value
provided is in the right range
'''
# Based on solution from @georg-w stack overflow qu. 55324449
mini = float(mini)
maxi = float(maxi)
def float_range_checker(arg):
try:
f = float(arg)
except ValueError:
raise configargparse.ArgumentTypeError(
"Must be a floating point number")
if f < mini or f > maxi:
raise configargparse.ArgumentTypeError(
"Must be in range [%s .. %s]" % (mini, maxi))
return (f)
return (float_range_checker)
def int_range(mini, maxi, n_col):
'''
Defines a type for argparse of an integer with a fixed range, where
the maximum value is a function of the number of columns
Parameters
----------
mini: str or int
An integer (or a string which can be converted to an integer) with the
minumum valid value for the parameter.
maxi: str
A string containing a function using n_col to convert number
of columns to the maximum valid value for the paramter.
Returns
-------
int_range_checker: function
A function to input as a type to argparse to check if the value
provided is in the right range
'''
# Based on solution from @georg-w stack overflow qu. 55324449
mini = int(mini)
try:
maxi_val = eval(maxi)
except TypeError:
maxi_val = int(maxi)
def int_range_checker(arg):
try:
f = int(arg)
except ValueError:
raise configargparse.ArgumentTypeError("Must be an integer")
if f < mini or f > maxi_val:
raise configargparse.ArgumentTypeError(
"Must be in range [%s .. %s (%s)]" % (mini, maxi, maxi_val))
return (f)
return (int_range_checker)
def getParser():
'''
Builds a configargparse.ArgumentParser object with the CIAlign parameters
Returns
-------
parser: configargparse.ArgumentParser
ArgumentParser with the CIAlign parameters
'''
parser = configargparse.ArgumentParser(
description='Clean and interpret a multiple sequence \
alignment', add_help=False)
ci_dir = os.path.dirname(utilityFunctions.__file__)
# Looks up the default values and minimum and maximum values for the
# paramters associated with the cleaning functions in the text file
# ranges.txt provided in the CIAlign code directory
ranges = [line.strip().split("\t")
for line in open("%s/ranges.txt" % ci_dir)]
# Defaults
defs = {x[0]: x[1] for x in ranges}
# Minima
minis = {x[0]: x[2] for x in ranges}
# Maxima
maxis = {x[0]: x[3] for x in ranges}
# Seperate the required and optional paramters
required = parser.add_argument_group('Required Arguments')
optional = parser.add_argument_group('Optional Arguments')
# Files
# not to confuse with inifile
required.add("--infile", dest='infile', type=str,
help='Path to input alignment file in FASTA format')
optional.add("--inifile", dest='inifile', type=str,
default=None,
help='Path to config file. Default: %(default)s',
is_config_file=True)
optional.add("--outfile_stem", dest='outfile_stem', type=str,
default="CIAlign",
help="Prefix for output files, including the path to the \
output directory. Default: %(default)s")
# Initial setup
# Read the alignment temporarily just to find out how many columns there
# are as for several of the cleaning functions the range of valid
# parameters depends on this.
tempargs = parser.parse_known_args()[0]
if tempargs.infile:
# Read the FASTA file into an array
arr, nams = utilityFunctions.FastaToArray(tempargs.infile, None,
tempargs.outfile_stem)
# Find the number of columns in the input alignment
n_col = np.shape(arr)[1]
# Remove the array from memory
del arr
else:
# Gives a valid int value just for generating the --help text
n_col = 100
# parameter to run all functions without having to type them in
optional.add("--all", dest="all_options",
action="store_true",
help="Use all available functions with default parameters.")
# parameter to run all cleaning functions without having to type them in
optional.add("--clean", dest="clean",
action="store_true",
help="Use all cleaning functions with default parameters.")
# parameter to create all mini alignments without having to type them in
optional.add("--visualise", dest="visualise",
action="store_true",
help="Plot all mini alignments with default parameters.")
# parameter to run all interpreation functions except creating sequence logos without having to type them in
optional.add("--interpret", dest="interpret",
action="store_true",
help="Use all interpreting functions with default parameters.")
# Runtime
optional.add("--silent", dest='silent',
help="Do not print progress to the screen. \
Default: %(default)s",
action='store_true')
# Crop Ends
optional.add("--crop_ends", dest="crop_ends",
action="store_true",
help="Crop the ends of sequences if they are poorly aligned. \
Default: %(default)s")
optional.add("--crop_ends_mingap_perc", dest='crop_ends_mingap_perc',
type=float_range(minis['crop_ends_mingap_perc'],
maxis['crop_ends_mingap_perc']),
default=defs['crop_ends_mingap_perc'],
help="Minimum proportion of the sequence length (excluding \
gaps) that is the threshold for change in gap numbers. \
Default: %(default)s.",
metavar="(float, %s..%s)" % (minis['crop_ends_mingap_perc'],
maxis['crop_ends_mingap_perc']))
optional.add("--crop_ends_redefine_perc", dest='crop_ends_redefine_perc',
type=float_range(minis['crop_ends_redefine_perc'],
maxis['crop_ends_redefine_perc']),
default=defs['crop_ends_redefine_perc'],
help="Proportion of the sequence length (excluding gaps) \
that is being checked for change in gap numbers to \
redefine start/end. Default: %(default)s",
metavar="(float, %s..%s)" % (
minis['crop_ends_redefine_perc'],
maxis['crop_ends_redefine_perc']))
# Remove divergent sequences
optional.add("--remove_divergent", dest="remove_divergent",
action="store_true",
help="Remove sequences with <= N proportion of positions at \
which the most common base / amino acid in the \
alignment is present. Default: %(default)s")
optional.add("--remove_divergent_minperc", dest="remove_divergent_minperc",
default=defs['remove_divergent_minperc'],
type=float_range(minis['remove_divergent_minperc'],
maxis['remove_divergent_minperc']),
help="Minimum proportion of positions which should be \
identical to the most common base / amino acid in \
order to be preserved. \
Default: %(default)s)",
metavar="(float, %s..%s)" % (
minis['remove_divergent_minperc'],
maxis['remove_divergent_minperc']))
# # Remove Insertions
optional.add("--remove_insertions", dest="remove_insertions",
action="store_true",
help="Remove insertions found in <= 50 percent of sequences \
from the alignment. Default: %(default)s")
optional.add("--insertion_min_size", dest="insertion_min_size",
type=int_range(minis['insertion_min_size'],
maxis['insertion_max_size'],
n_col),
default=defs['insertion_min_size'],
help="Only remove insertions >= this number of residues. \
Default: %(default)s.",
metavar="(int, %s..%s)" % (
minis['insertion_min_size'],
maxis['insertion_min_size']))
optional.add("--insertion_max_size", dest="insertion_max_size",
type=int_range(minis['insertion_max_size'],
maxis['insertion_max_size'],
n_col),
default=defs['insertion_max_size'],
help="Only remove insertions <= this number of residues. \
Default: %(default)s",
metavar="(int, %s..%s)" % (
minis['insertion_max_size'],
maxis['insertion_max_size']))
optional.add("--insertion_min_flank", dest="insertion_min_flank",
type=int_range(minis['insertion_min_flank'],
maxis['insertion_min_flank'],
n_col),
default=defs['insertion_min_flank'],
help="Minimum number of bases on either side of an insertion \
to classify it as an insertion.\
Default: %(default)s",
metavar="(int, %s..%s)" % (
minis['insertion_min_flank'],
maxis['insertion_min_flank']))
# Remove Short
optional.add("--remove_short", dest="remove_short",
help="Remove sequences <= N bases / amino acids from the \
alignment. Default: %(default)s",
action="store_true")
optional.add("--remove_min_length", dest="remove_min_length",
type=int_range(minis['remove_min_length'],
maxis['remove_min_length'],
n_col),
default=defs['remove_min_length'],
help="Sequences are removed if they are shorter than this \
minimum length, excluding gaps. Default: %(default)s",
metavar="(int, %s..%s)" % (
minis['remove_min_length'],
maxis['remove_min_length']))
# keep gap only
optional.add("--keep_gaponly", dest="remove_gaponly",
action="store_false",
help="Keep gap only columns in the alignment. Default: \
%(default)s")
# Consensus
optional.add("--make_consensus", dest="make_consensus",
action="store_true",
help="Make a consensus sequence based on the cleaned \
alignment. Default: %(default)s")
optional.add("--consensus_type", dest="consensus_type", type=str,
default="majority",
help="Type of consensus sequence to make - can be majority, \
to use the most common character at each position in \
the consensus, even if this is a gap, or \
majority_nongap, to use the most common non-gap \
character at each position. Default: %(default)s")
optional.add("--consensus_keep_gaps", dest="consensus_keep_gaps",
action="store_true",
help="If there are gaps in the consensus (if majority_nongap \
is used as consensus_type), should these be included \
in the consensus (True) or should this position in \
the consensus be deleted (False). Default: %(default)s")
optional.add("--consensus_name", dest="consensus_name",
type=str, default="consensus",
help="Name to use for the consensus sequence in the output \
fasta file. Default: %(default)s")
# Mini Alignments
optional.add("--plot_input", dest="plot_input",
action="store_true",
help="Plot a mini alignment - an image representing the \
input alignment. Default: %(default)s")
optional.add("--plot_output", dest="plot_output",
action="store_true",
help="Plot a mini alignment, an image representing the \
output alignment. Default: %(default)s")
optional.add("--plot_markup", dest="plot_markup",
action="store_true",
help="Draws the input alignment but with the columns and \
rows which have been removed by each function marked \
up in corresponding colours. Default: %(default)s")
optional.add("--plot_dpi", dest="plot_dpi",
type=int, default=300,
help="DPI for mini alignments. Default: %(default)s")
optional.add("--plot_format", dest="plot_format",
type=str, default='png',
help="Image format for mini alignments - can be png, svg, \
tiff or jpg. Default: %(default)s")
optional.add("--plot_width", dest="plot_width",
type=int, default=5,
help="Mini alignment width in inches. Default: %(default)s")
optional.add("--plot_height", dest="plot_height",
type=int, default=3,
help="Mini alignment height in inches. Default: %(default)s")
optional.add("--plot_keep_numbers", dest="plot_keep_numbers",
action="store_true",
help="If specified, for mini alignments based on CIAlign \
output with <10 sequences (or if force_numbers \
is | |
range (1, min((7-i), j)+1):
if game.board[i+k][j-k].islower():
break
switchPos((position[0], position[1]), (position[0]+k, position[1]-k), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+k, position[1]-k))
switchPos((position[0]+k, position[1]-k), (position[0], position[1]), game)
if not game.board[i+k][j-k].isdigit() and not game.board[i+k][j-k].islower():
break
for k in range (1, min((7-i), (7-j))+1):
if game.board[i+k][j+k].islower():
break
switchPos((position[0], position[1]), (position[0]+k, position[1]+k), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+k, position[1]+k))
switchPos((position[0]+k, position[1]+k), (position[0], position[1]), game)
if not game.board[i+k][j+k].isdigit() and not game.board[i+k][j+k].islower():
break
return validMoves
def getWN(position, game):
validMoves = []
i,j = position[0], position[1]
if i > 1:
if j > 0 and(game.board[position[0]-2][position[1]-1].islower() or game.board[position[0]-2][position[1]-1].isdigit()):
switchPos((position[0], position[1]), (position[0]-2, position[1]-1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-2, position[1]-1))
switchPos((position[0]-2, position[1]-1),(position[0], position[1]), game)
if j < 7 and(game.board[position[0]-2][position[1]+1].islower() or game.board[position[0]-2][position[1]+1].isdigit()):
switchPos((position[0], position[1]), (position[0]-2, position[1]+1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-2, position[1]+1))
switchPos((position[0]-2, position[1]+1), (position[0], position[1]), game)
if i > 0:
if j > 1 and(game.board[position[0]-1][position[1]-2].islower() or game.board[position[0]-1][position[1]-2].isdigit()):
switchPos((position[0], position[1]), (position[0]-1, position[1]-2), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-1, position[1]-2))
switchPos((position[0]-1, position[1]-2), (position[0], position[1]), game)
if j < 6 and(game.board[position[0]-1][position[1]+2].islower() or game.board[position[0]-1][position[1]+2].isdigit()):
switchPos((position[0], position[1]), (position[0]-1, position[1]+2), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-1, position[1]+2))
switchPos((position[0]-1, position[1]+2), (position[0], position[1]), game)
if i < 6:
if j > 0 and(game.board[position[0]+2][position[1]-1].islower() or game.board[position[0]+2][position[1]-1].isdigit()):
switchPos((position[0], position[1]), (position[0]+2, position[1]-1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]+2, position[1]-1))
switchPos((position[0]+2, position[1]-1), (position[0], position[1]), game)
if j < 7 and(game.board[position[0]+2][position[1]+1].islower() or game.board[position[0]+2][position[1]+1].isdigit()):
switchPos((position[0], position[1]), (position[0]+2, position[1]+1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]+2, position[1]+1))
switchPos((position[0]+2, position[1]+1), (position[0], position[1]), game)
if i < 7:
if j > 1 and(game.board[position[0]+1][position[1]-2].islower() or game.board[position[0]+1][position[1]-2].isdigit()):
switchPos((position[0], position[1]), (position[0]+1, position[1]-2), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]+1, position[1]-2))
switchPos((position[0]+1, position[1]-2), (position[0], position[1]), game)
if j < 6 and(game.board[position[0]+1][position[1]+2].islower() or game.board[position[0]+1][position[1]+2].isdigit()):
switchPos((position[0], position[1]), (position[0]+1, position[1]+2), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]+1, position[1]+2))
switchPos((position[0]+1, position[1]+2), (position[0], position[1]), game)
return validMoves
def getBN(position, game):
validMoves = []
i,j = position[0], position[1]
if i > 1:
if j > 0 and (not game.board[position[0]-2][position[1]-1].islower()):
switchPos((position[0], position[1]), (position[0]-2, position[1]-1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]-2, position[1]-1))
switchPos((position[0]-2, position[1]-1), (position[0], position[1]), game)
if j < 7 and (not game.board[position[0]-2][position[1]+1].islower()):
switchPos((position[0], position[1]), (position[0]-2, position[1]+1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]-2, position[1]+1))
switchPos((position[0]-2, position[1]+1), (position[0], position[1]), game)
if i > 0:
if j > 1 and (not game.board[position[0]-1][position[1]-2].islower()):
switchPos((position[0], position[1]), (position[0]-1, position[1]-2), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]-1, position[1]-2))
switchPos((position[0]-1, position[1]-2), (position[0], position[1]), game)
if j < 6 and (not game.board[position[0]-1][position[1]+2].islower()):
switchPos((position[0], position[1]), (position[0]-1, position[1]+2), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]-1, position[1]+2))
switchPos((position[0]-1, position[1]+2), (position[0], position[1]), game)
if i < 6:
if j > 0 and (not game.board[position[0]+2][position[1]-1].islower()):
switchPos((position[0], position[1]), (position[0]+2, position[1]-1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+2, position[1]-1))
switchPos((position[0]+2, position[1]-1), (position[0], position[1]), game)
if j < 7 and (not game.board[position[0]+2][position[1]+1].islower()):
switchPos((position[0], position[1]), (position[0]+2, position[1]+1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+2, position[1]+1))
switchPos((position[0]+2, position[1]+1), (position[0], position[1]), game)
if i < 7:
if j > 1 and (not game.board[position[0]+1][position[1]-2].islower()):
switchPos((position[0], position[1]), (position[0]+1, position[1]-2), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+1, position[1]-2))
switchPos((position[0]+1, position[1]-2), (position[0], position[1]), game)
if j < 6 and (not game.board[position[0]+1][position[1]+2].islower()):
switchPos((position[0], position[1]), (position[0]+1, position[1]+2), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+1, position[1]+2))
switchPos((position[0]+1, position[1]+2), (position[0], position[1]), game)
return validMoves
def getWR(position, game):
validMoves = []
i,j = position[0], position[1]
for k in range (i-1, -1, -1):
if not game.board[k][j].islower() and not game.board[k][j].isdigit():
break
switchPos((position[0], position[1]), (k, position[1]), game)
if not isWKingChecked(game, 0):
validMoves.append((k, position[1]))
switchPos((k, position[1]), (position[0], position[1]), game)
if game.board[k][j].islower():
break
for k in range (i+1, 8):
if not game.board[k][j].islower() and not game.board[k][j].isdigit():
break
switchPos((position[0], position[1]), (k, position[1]), game)
if not isWKingChecked(game, 0):
validMoves.append((k, position[1]))
switchPos((k, position[1]), (position[0], position[1]), game)
if game.board[k][j].islower():
break
for k in range (j-1, -1, -1):
if not game.board[i][k].islower() and not game.board[i][k].isdigit():
break
switchPos((position[0], position[1]), (position[0], k), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0], k))
switchPos((position[0], k), (position[0], position[1]), game)
if game.board[i][k].islower():
break
for k in range (j+1, 8):
if not game.board[i][k].islower() and not game.board[i][k].isdigit():
break
switchPos((position[0], position[1]), (position[0], k), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0], k))
switchPos((position[0], k), (position[0], position[1]), game)
if game.board[i][k].islower():
break
return validMoves
def getBR(position, game):
validMoves = []
i,j = position[0], position[1]
for k in range (i-1, -1, -1):
if game.board[k][j].islower():
break
switchPos((position[0], position[1]), (k, position[1]), game)
if not isBKingChecked(game, 1):
validMoves.append((k, position[1]))
switchPos((k, position[1]), (position[0], position[1]), game)
if not game.board[k][j].islower() and not game.board[k][j].isdigit():
break
for k in range (i+1, 8):
if game.board[k][j].islower():
break
switchPos((position[0], position[1]), (k, position[1]), game)
if not isBKingChecked(game, 1):
validMoves.append((k, position[1]))
switchPos((k, position[1]), (position[0], position[1]), game)
if not game.board[k][j].islower() and not game.board[k][j].isdigit():
break
for k in range (j-1, -1, -1):
if game.board[i][k].islower():
break
switchPos((position[0], position[1]), (position[0], k), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0], k))
switchPos((position[0], k), (position[0], position[1]), game)
if not game.board[i][k].islower() and not game.board[i][k].isdigit():
break
for k in range (j+1, 8):
if game.board[i][k].islower():
break
switchPos((position[0], position[1]), (position[0], k), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0], k))
switchPos((position[0], k), (position[0], position[1]), game)
if not game.board[i][k].islower() and not game.board[i][k].isdigit():
break
return validMoves
def getWQ(position, game):
validMoves = []
validMoves.extend(getWB(position, game))
validMoves.extend(getWR(position, game))
return validMoves
def getBQ(position, game):
validMoves = []
validMoves.extend(getBB(position, game))
validMoves.extend(getBR(position, game))
return validMoves
def getWK(position, game):
validMoves = []
if position[0] > 0:
if game.board[position[0]-1][position[1]].islower() or game.board[position[0]-1][position[1]].isdigit():
switchPos((position[0], position[1]), (position[0]-1, position[1]), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-1, position[1]))
switchPos((position[0]-1, position[1]), (position[0], position[1]), game)
if position[1] > 0 and(game.board[position[0]-1][position[1]-1].islower() or game.board[position[0]-1][position[1]-1].isdigit()):
switchPos((position[0], position[1]), (position[0]-1, position[1]-1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-1, position[1]-1))
switchPos((position[0]-1, position[1]-1), (position[0], position[1]), game)
if position[1] < 7 and(game.board[position[0]-1][position[1]+1].islower() or game.board[position[0]-1][position[1]+1].isdigit()):
switchPos((position[0], position[1]), (position[0]-1, position[1]+1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-1, position[1]+1))
switchPos((position[0]-1, position[1]+1), (position[0], position[1]), game)
if position[0] < 7:
if game.board[position[0]+1][position[1]].islower() or game.board[position[0]+1][position[1]].isdigit():
switchPos((position[0], position[1]), (position[0]+1, position[1]), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]+1, position[1]))
switchPos((position[0]+1, position[1]), (position[0], position[1]), game)
if position[1] > 0 and(game.board[position[0]+1][position[1]-1].islower() or game.board[position[0]+1][position[1]-1].isdigit()):
switchPos((position[0], position[1]), (position[0]+1, position[1]-1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]+1, position[1]-1))
switchPos((position[0]+1, position[1]-1), (position[0], position[1]), game)
if position[1] < 7 and(game.board[position[0]+1][position[1]+1].islower() or game.board[position[0]+1][position[1]+1].isdigit()):
switchPos((position[0], position[1]), (position[0]+1, position[1]+1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]+1, position[1]+1))
switchPos((position[0]+1, position[1]+1), (position[0], position[1]), game)
if position[1] > 0:
if game.board[position[0]][position[1]-1].islower() or game.board[position[0]][position[1]-1].isdigit():
switchPos((position[0], position[1]), (position[0], position[1]-1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0], position[1]-1))
switchPos((position[0], position[1]-1), (position[0], position[1]), game)
if position[1] < 7:
if game.board[position[0]][position[1]+1].islower() or game.board[position[0]][position[1]+1].isdigit():
switchPos((position[0], position[1]), (position[0], position[1]+1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0], position[1]+1))
switchPos((position[0], position[1]+1), (position[0], position[1]), game)
if not isWKingChecked(game, 0) and game.wK == (7, 4):
if game.wCastleL and game.board[7][3] == '0' and game.board[7][2] == '0' and game.board[7][1] == '0':
switchPos((7, 4), (7, 3), game)
if not isWKingChecked(game, 1):
switchPos((7, 3), (7, 2), game)
if not isWKingChecked(game, 0):
validMoves.append((7, 2))
switchPos((7, 2), (7, 4), game)
else:
switchPos((7, 3), (7, 4), game)
if game.wCastleR and game.board[7][5] == '0' and game.board[7][6] == '0':
switchPos((7, 4), (7, 5), game)
if not isWKingChecked(game, 0):
switchPos((7, 5), (7, 6), game)
if not isWKingChecked(game, 0):
validMoves.append((7, 6))
switchPos((7, 6), (7, 4), game)
else:
switchPos((7, 5), (7, 4), game)
return validMoves
def getBK(position, game):
validMoves = []
if position[0] > 0:
if not game.board[position[0]-1][position[1]].islower():
switchPos((position[0], position[1]), (position[0]-1, position[1]), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]-1, position[1]))
switchPos((position[0]-1, position[1]), (position[0], position[1]), game)
if position[1] > 0 and not game.board[position[0]-1][position[1]-1].islower():
switchPos((position[0], position[1]), (position[0]-1, position[1]-1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]-1, position[1]-1))
switchPos((position[0]-1, position[1]-1), (position[0], position[1]), game)
if position[1] < 7 and not game.board[position[0]-1][position[1]+1].islower():
switchPos((position[0], position[1]), (position[0]-1, position[1]+1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]-1, position[1]+1))
switchPos((position[0]-1, position[1]+1), (position[0], position[1]), game)
if position[0] < 7:
if not game.board[position[0]+1][position[1]].islower():
switchPos((position[0], position[1]), (position[0]+1, position[1]), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+1, position[1]))
switchPos((position[0]+1, position[1]), (position[0], position[1]), game)
if position[1] > 0 and not game.board[position[0]+1][position[1]-1].islower():
switchPos((position[0], position[1]), (position[0]+1, position[1]-1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+1, position[1]-1))
switchPos((position[0]+1, position[1]-1), (position[0], position[1]), game)
if position[1] < 7 and not game.board[position[0]+1][position[1]+1].islower():
switchPos((position[0], position[1]), (position[0]+1, position[1]+1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+1, position[1]+1))
switchPos((position[0]+1, position[1]+1), (position[0], position[1]), game)
if position[1] > 0:
if not game.board[position[0]][position[1]-1].islower():
switchPos((position[0], position[1]), (position[0], position[1]-1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0], position[1]-1))
switchPos((position[0], | |
is installed
installed = False
for ik in self.installed_keys["keys"]:
if (fk["fingerprint"] == ik["fingerprint"] and
fk["type"] == ik["type"] and
fk["key_capabilities"] == ik["key_capabilities"]
):
self._vv("fingerprint [{}] already installed".format(fk["fingerprint"]))
keyinfo["keys"][index]["state"] = "present"
keyinfo["keys"][index]["changed"] = False
installed = True
# check trust
if not self.compare_trust(fk["trust_level"], self.module.params["trust"]):
# update trust level
self.set_trust(fk["fingerprint"], self.module.params["trust"])
trucnt += 1
# get trust level as displayed by gpg
tru_level, tru_desc = self.get_trust(self.module.params["trust"])
keyinfo["keys"][index]["changed"] = True
keyinfo["keys"][index]["trust_level"] = tru_level
keyinfo["keys"][index]["trust_level_desc"] = tru_desc
continue
if not installed:
self._vv("fingerprint [{}] not yet installed".format(fk["fingerprint"]))
# import file
cmd = self.prepare_command("file", "present")
# run subprocess
rc, stdout, stderr = self.module.run_command(args=cmd, check_rc=True)
self._vv("fingerprint [{}] successfully imported".format(fk["fingerprint"]))
keyinfo["keys"][index]["state"] = "present"
keyinfo["keys"][index]["changed"] = True
impcnt += 1
# check trust
if not self.compare_trust(fk["trust_level"], self.module.params["trust"]):
# update trust level
self.set_trust(fk["fingerprint"], self.module.params["trust"])
trucnt += 1
# get trust level as displayed by gpg
tru_level, tru_desc = self.get_trust(self.module.params["trust"])
keyinfo["keys"][index]["changed"] = True
keyinfo["keys"][index]["trust_level"] = tru_level
keyinfo["keys"][index]["trust_level_desc"] = tru_desc
# set keyinfo
self.set_keyinfo(keyinfo)
# check import count
if impcnt > 0 or trucnt > 0:
self.result["changed"] = True
# set message and return
self.result["msg"] = "[{}] keys were imported; [{}] trust levels updated".format(impcnt, trucnt)
return True
def run_file_absent(self):
"""
remove key(s) present in file
"""
# first, check if the file is OK
keyinfo = self.check_file()
self._vv("remove keys identified in file")
# key count
keycnt = 0
# then see if the key is installed or not
# fk = file key
# ik = installed key
for index, fk in enumerate(keyinfo["keys"]):
installed = False
for ik in self.installed_keys["keys"]:
if (fk["fingerprint"] == ik["fingerprint"] and
fk["type"] == ik["type"] and
fk["key_capabilities"] == ik["key_capabilities"]
):
installed = True
continue
if not installed:
self._vv("fingerprint [{}] not installed; nothing to remove".format(fk["fingerprint"]))
keyinfo["keys"][index]["state"] = "absent"
keyinfo["keys"][index]["changed"] = False
else:
self._vv("fingerprint [{}] installed; will be removed".format(fk["fingerprint"]))
# remove file
cmd = self.prepare_command("file", "absent")
# add fingerprint as argument
cmd += [fk["fingerprint"]]
# run subprocess
rc, stdout, stderr = self.module.run_command(args=cmd, check_rc=True)
self._vv("fingerprint [{}] successfully removed".format(fk["fingerprint"]))
keyinfo["keys"][index]["state"] = "absent"
keyinfo["keys"][index]["changed"] = True
keycnt += 1
# re-run check installed command to prevent attempting to remove same
# fingerprint again (for example after removing pub/sec counterpart
# with the same fpr
self.check_installed_keys()
# set keyinfo
self.set_keyinfo(keyinfo)
# check import count
if keycnt > 0:
self.result["changed"] = True
# return
self.result["msg"] = "[{}] keys were removed".format(keycnt)
return True
def run_file_info(self):
"""
method to only retrive current status of keys
wether from file, content or fpr
"""
# first, check if the file is OK
keyinfo = self.check_file()
self._vv("showing key info from file")
# then see if the key is already installed
# fk = file key
# ik = installed key
for index, fk in enumerate(keyinfo["keys"]):
# check if key is installed
installed = False
for ik in self.installed_keys["keys"]:
if (fk["fingerprint"] == ik["fingerprint"] and
fk["type"] == ik["type"] and
fk["key_capabilities"] == ik["key_capabilities"]
):
self._vv("fingerprint [{}] installed".format(fk["fingerprint"]))
keyinfo["keys"][index]["state"] = "present"
keyinfo["keys"][index]["changed"] = False
installed = True
continue
if not installed:
# set state
self._vv("fingerprint [{}] not installed".format(fk["fingerprint"]))
keyinfo["keys"][index]["state"] = "absent"
keyinfo["keys"][index]["changed"] = False
# set keyinfo
self.set_keyinfo(keyinfo)
# set message and return
return True
def run_content_present(self):
"""
import keys from content
"""
# prepare content
filename = self.prepare_content(self.module.params["content"])
# set file parameter and run file present
self.module.params["file"] = filename
self.run_file_present()
# delete content
self.delete_content(filename)
def run_content_absent(self):
"""
remove keys from content
"""
# prepare content
filename = self.prepare_content(self.module.params["content"])
# set file parameter and run file present
self.module.params["file"] = filename
self.run_file_absent()
# delete content
self.delete_content(filename)
def run_content_info(self):
"""
get key info from content
"""
# prepare content
filename = self.prepare_content(self.module.params["content"])
# set file parameter and run file present
self.module.params["file"] = filename
self.run_file_info()
# delete content
self.delete_content(filename)
def run_fpr_present(self):
"""
import key from keyserver
"""
self._vv("import new keys from keyserver")
# set fpr shorthand
fpr = self.module.params["fpr"]
# set base values
installed = False
impcnt = 0
trucnt = 0
keyinfo = {
'fprs': [],
'keys': [],
}
# check if key is installed
for ik in self.installed_keys["keys"]:
if (fpr == ik["fingerprint"]):
# set keyinfo
self._vv("fingerprint [{}] already installed".format(fpr))
keyinfo["fprs"].append(fpr)
keyinfo["keys"].append(ik)
keyinfo["keys"][0]["state"] = "present"
keyinfo["keys"][0]["changed"] = False
installed = True
# check trust
if not self.compare_trust(ik["trust_level"], self.module.params["trust"]):
# update trust level
self.set_trust(fpr, self.module.params["trust"])
trucnt += 1
# get trust level as displayed by gpg
tru_level, tru_desc = self.get_trust(self.module.params["trust"])
keyinfo["keys"][0]["changed"] = True
keyinfo["keys"][0]["trust_level"] = tru_level
keyinfo["keys"][0]["trust_level_desc"] = tru_desc
continue
if not installed:
self._vv("fingerprint [{}] not yet installed".format(fpr))
# import file
cmd = self.prepare_command("fpr", "present")
cmd += [fpr]
# run subprocess
rc, stdout, stderr = self.module.run_command(args=cmd, check_rc=True)
self._vv("fingerprint [{}] successfully imported from keyserver".format(fpr))
# get info from specific key; keyservers only contain public keys
# so no point in checking the secret keys
cmd = self.prepare_command("check", "installed_public")
cmd += [fpr]
rc, stdout, stderr = self.module.run_command(args=cmd, check_rc=True)
keyinfo = self.process_colons(stdout)
# check expiration by checking trust
if keyinfo["keys"][0]["trust_level"] in ['i','d','r','e']:
# deleted the expired key and fail
cmd = self.prepare_command("fpr", "absent")
cmd += [fpr]
rc, stdout, stderr = self.module.run_command(args=cmd, check_rc=True)
self.module.fail_json(msg="key is either expired or invalid [trust={}] [expiration={}]".format(keyinfo["keys"][0]["trust_level"], keyinfo["keys"][0]["expirationdate"]))
# update key info
keyinfo["keys"][0]["state"] = "present"
keyinfo["keys"][0]["changed"] = True
impcnt += 1
# check trust
if not self.compare_trust(keyinfo["keys"][0]["trust_level"], self.module.params["trust"]):
# update trust level
self.set_trust(fpr, self.module.params["trust"])
trucnt += 1
# get trust level as displayed by gpg
tru_level, tru_desc = self.get_trust(self.module.params["trust"])
keyinfo["keys"][0]["changed"] = True
keyinfo["keys"][0]["trust_level"] = tru_level
keyinfo["keys"][0]["trust_level_desc"] = tru_desc
# set keyinfo
self.set_keyinfo(keyinfo)
# check import count
if impcnt > 0 or trucnt > 0:
self.result["changed"] = True
# set message and return
self.result["msg"] = "[{}] keys were imported; [{}] trust levels updated".format(impcnt, trucnt)
return True
def run_fpr_absent(self):
"""
remove key(s)
"""
self._vv("delete keys based on fingerprint")
# set fpr shorthand
fpr = self.module.params["fpr"]
# set base values
installed = False
keycnt = 0
keyinfo = {
'fprs': [],
'keys': [],
}
# see if the key is installed or not
# ik = installed key
for ik in self.installed_keys["keys"]:
if fpr == ik["fingerprint"]:
if ("state" in ik and ik["state"] != "absent") or ("state" not in ik):
keyinfo["fprs"].append(fpr)
keyinfo["keys"].append(ik)
installed = True
continue
if not installed:
self._vv("fingerprint [{}] not installed; nothing to remove".format(fpr))
key = {}
key[fpr] = {
"state" : "absent",
"changed" : False,
"fingerprint" : fpr,
}
keyinfo["fprs"].append(fpr)
keyinfo["keys"].append(key)
else:
self._vv("fingerprint [{}] installed; will be removed".format(fpr))
# remove file
cmd = self.prepare_command("fpr", "absent")
# add fingerprint as argument
cmd += [fpr]
# run subprocess
rc, stdout, stderr = self.module.run_command(args=cmd, check_rc=True)
self._vv("fingerprint [{}] successfully removed".format(fpr))
keyinfo["keys"][0]["state"] = "absent"
keyinfo["keys"][0]["changed"] = True
keycnt += 1
# re-run check installed command to prevent attempting to remove same
# fingerprint again (for example after removing pub/sec counterpart
# with the same fpr
self.check_installed_keys()
# set keyinfo
self.set_keyinfo(keyinfo)
# check import count
if keycnt > 0:
self.result["changed"] = True
# return
self.result["msg"] = "[{}] keys were removed".format(keycnt)
return True
def run_fpr_latest(self):
"""
get the latest key from the keyserver
"""
self._vv("get latest key from keyserver")
# set fpr shorthand
fpr = self.module.params["fpr"]
# set base values
installed = False
updated = False
updcnt = 0
trucnt = 0
keyinfo = {
'fprs': [],
'keys': [],
}
# check if key is installed
for ik in self.installed_keys["keys"]:
if (fpr == ik["fingerprint"]):
# set keyinfo
self._vv("fingerprint [{}] installed; updating from server".format(fpr))
keyinfo["fprs"].append(fpr)
keyinfo["keys"].append(ik)
keyinfo["keys"][0]["state"] = "present"
keyinfo["keys"][0]["changed"] = False
installed = True
continue
if not installed:
self._vv("fingerprint [{}] not yet installed; install first".format(fpr))
# import from keyserver
self.run_fpr_present()
return True
else:
self._vv("fetching updates from keyserver")
# get updates from keyserver
cmd = self.prepare_command("fpr", "latest")
cmd += [fpr]
rc, stdout, stderr = self.module.run_command(args=cmd, check_rc=True)
# see if any updates were downloaded or not
# for some reason, gpg outputs these messages to stderr
updated = re.search('gpg:\s+unchanged: 1\n', stderr) is None
if updated:
updcnt += 1
# if key was updated, refresh info
if updated:
self._vv("key was updated on server")
# get info from specific key; keyservers only contain public keys
# so | |
field `Relative Humidity Fraction 6`"""
self["Relative Humidity Fraction 6"] = value
@property
def moisture_content_6(self):
"""field `Moisture Content 6`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 6`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_6` or None if not set
"""
return self["Moisture Content 6"]
@moisture_content_6.setter
def moisture_content_6(self, value=None):
"""Corresponds to IDD field `Moisture Content 6`"""
self["Moisture Content 6"] = value
@property
def relative_humidity_fraction_7(self):
"""field `Relative Humidity Fraction 7`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 7`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_7` or None if not set
"""
return self["Relative Humidity Fraction 7"]
@relative_humidity_fraction_7.setter
def relative_humidity_fraction_7(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 7`"""
self["Relative Humidity Fraction 7"] = value
@property
def moisture_content_7(self):
"""field `Moisture Content 7`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 7`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_7` or None if not set
"""
return self["Moisture Content 7"]
@moisture_content_7.setter
def moisture_content_7(self, value=None):
"""Corresponds to IDD field `Moisture Content 7`"""
self["Moisture Content 7"] = value
@property
def relative_humidity_fraction_8(self):
"""field `Relative Humidity Fraction 8`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 8`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_8` or None if not set
"""
return self["Relative Humidity Fraction 8"]
@relative_humidity_fraction_8.setter
def relative_humidity_fraction_8(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 8`"""
self["Relative Humidity Fraction 8"] = value
@property
def moisture_content_8(self):
"""field `Moisture Content 8`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 8`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_8` or None if not set
"""
return self["Moisture Content 8"]
@moisture_content_8.setter
def moisture_content_8(self, value=None):
"""Corresponds to IDD field `Moisture Content 8`"""
self["Moisture Content 8"] = value
@property
def relative_humidity_fraction_9(self):
"""field `Relative Humidity Fraction 9`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 9`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_9` or None if not set
"""
return self["Relative Humidity Fraction 9"]
@relative_humidity_fraction_9.setter
def relative_humidity_fraction_9(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 9`"""
self["Relative Humidity Fraction 9"] = value
@property
def moisture_content_9(self):
"""field `Moisture Content 9`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 9`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_9` or None if not set
"""
return self["Moisture Content 9"]
@moisture_content_9.setter
def moisture_content_9(self, value=None):
"""Corresponds to IDD field `Moisture Content 9`"""
self["Moisture Content 9"] = value
@property
def relative_humidity_fraction_10(self):
"""field `Relative Humidity Fraction 10`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 10`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_10` or None if not set
"""
return self["Relative Humidity Fraction 10"]
@relative_humidity_fraction_10.setter
def relative_humidity_fraction_10(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 10`"""
self["Relative Humidity Fraction 10"] = value
@property
def moisture_content_10(self):
"""field `Moisture Content 10`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 10`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_10` or None if not set
"""
return self["Moisture Content 10"]
@moisture_content_10.setter
def moisture_content_10(self, value=None):
"""Corresponds to IDD field `Moisture Content 10`"""
self["Moisture Content 10"] = value
@property
def relative_humidity_fraction_11(self):
"""field `Relative Humidity Fraction 11`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 11`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_11` or None if not set
"""
return self["Relative Humidity Fraction 11"]
@relative_humidity_fraction_11.setter
def relative_humidity_fraction_11(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 11`"""
self["Relative Humidity Fraction 11"] = value
@property
def moisture_content_11(self):
"""field `Moisture Content 11`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 11`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_11` or None if not set
"""
return self["Moisture Content 11"]
@moisture_content_11.setter
def moisture_content_11(self, value=None):
"""Corresponds to IDD field `Moisture Content 11`"""
self["Moisture Content 11"] = value
@property
def relative_humidity_fraction_12(self):
"""field `Relative Humidity Fraction 12`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 12`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_12` or None if not set
"""
return self["Relative Humidity Fraction 12"]
@relative_humidity_fraction_12.setter
def relative_humidity_fraction_12(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 12`"""
self["Relative Humidity Fraction 12"] = value
@property
def moisture_content_12(self):
"""field `Moisture Content 12`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 12`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_12` or None if not set
"""
return self["Moisture Content 12"]
@moisture_content_12.setter
def moisture_content_12(self, value=None):
"""Corresponds to IDD field `Moisture Content 12`"""
self["Moisture Content 12"] = value
@property
def relative_humidity_fraction_13(self):
"""field `Relative Humidity Fraction 13`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 13`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_13` or None if not set
"""
return self["Relative Humidity Fraction 13"]
@relative_humidity_fraction_13.setter
def relative_humidity_fraction_13(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 13`"""
self["Relative Humidity Fraction 13"] = value
@property
def moisture_content_13(self):
"""field `Moisture Content 13`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 13`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_13` or None if not set
"""
return self["Moisture Content 13"]
@moisture_content_13.setter
def moisture_content_13(self, value=None):
"""Corresponds to IDD field `Moisture Content 13`"""
self["Moisture Content 13"] = value
@property
def relative_humidity_fraction_14(self):
"""field `Relative Humidity Fraction 14`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 14`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_14` or None if not set
"""
return self["Relative Humidity Fraction 14"]
@relative_humidity_fraction_14.setter
def relative_humidity_fraction_14(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 14`"""
self["Relative Humidity Fraction 14"] = value
@property
def moisture_content_14(self):
"""field `Moisture Content 14`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 14`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_14` or None if not set
"""
return self["Moisture Content 14"]
@moisture_content_14.setter
def moisture_content_14(self, value=None):
"""Corresponds to IDD field `Moisture Content 14`"""
self["Moisture Content 14"] = value
@property
def relative_humidity_fraction_15(self):
"""field `Relative Humidity Fraction 15`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 15`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_15` or None if not set
"""
return self["Relative Humidity Fraction 15"]
@relative_humidity_fraction_15.setter
def relative_humidity_fraction_15(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 15`"""
self["Relative Humidity Fraction 15"] = value
@property
def moisture_content_15(self):
"""field `Moisture Content 15`
| Units: | |
= client.update_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == clouddms.UpdateConnectionProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_connection_profile_from_dict():
test_update_connection_profile(request_type=dict)
def test_update_connection_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataMigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_connection_profile),
'__call__') as call:
client.update_connection_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == clouddms.UpdateConnectionProfileRequest()
@pytest.mark.asyncio
async def test_update_connection_profile_async(transport: str = 'grpc_asyncio', request_type=clouddms.UpdateConnectionProfileRequest):
client = DataMigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_connection_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.update_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == clouddms.UpdateConnectionProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_connection_profile_async_from_dict():
await test_update_connection_profile_async(request_type=dict)
def test_update_connection_profile_field_headers():
client = DataMigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = clouddms.UpdateConnectionProfileRequest()
request.connection_profile.name = 'connection_profile.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_connection_profile),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.update_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'connection_profile.name=connection_profile.name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_connection_profile_field_headers_async():
client = DataMigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = clouddms.UpdateConnectionProfileRequest()
request.connection_profile.name = 'connection_profile.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_connection_profile),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.update_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'connection_profile.name=connection_profile.name/value',
) in kw['metadata']
def test_update_connection_profile_flattened():
client = DataMigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_connection_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_connection_profile(
connection_profile=clouddms_resources.ConnectionProfile(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].connection_profile == clouddms_resources.ConnectionProfile(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
def test_update_connection_profile_flattened_error():
client = DataMigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_connection_profile(
clouddms.UpdateConnectionProfileRequest(),
connection_profile=clouddms_resources.ConnectionProfile(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
@pytest.mark.asyncio
async def test_update_connection_profile_flattened_async():
client = DataMigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_connection_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_connection_profile(
connection_profile=clouddms_resources.ConnectionProfile(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].connection_profile == clouddms_resources.ConnectionProfile(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
@pytest.mark.asyncio
async def test_update_connection_profile_flattened_error_async():
client = DataMigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_connection_profile(
clouddms.UpdateConnectionProfileRequest(),
connection_profile=clouddms_resources.ConnectionProfile(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
def test_delete_connection_profile(transport: str = 'grpc', request_type=clouddms.DeleteConnectionProfileRequest):
client = DataMigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_connection_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.delete_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == clouddms.DeleteConnectionProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_connection_profile_from_dict():
test_delete_connection_profile(request_type=dict)
def test_delete_connection_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataMigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_connection_profile),
'__call__') as call:
client.delete_connection_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == clouddms.DeleteConnectionProfileRequest()
@pytest.mark.asyncio
async def test_delete_connection_profile_async(transport: str = 'grpc_asyncio', request_type=clouddms.DeleteConnectionProfileRequest):
client = DataMigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_connection_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.delete_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == clouddms.DeleteConnectionProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_connection_profile_async_from_dict():
await test_delete_connection_profile_async(request_type=dict)
def test_delete_connection_profile_field_headers():
client = DataMigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = clouddms.DeleteConnectionProfileRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_connection_profile),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.delete_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_connection_profile_field_headers_async():
client = DataMigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = clouddms.DeleteConnectionProfileRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_connection_profile),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.delete_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_connection_profile_flattened():
client = DataMigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_connection_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_connection_profile(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_connection_profile_flattened_error():
client = DataMigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to | |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""
Class for reading and writing casa measurement sets.
Requires casacore.
"""
import numpy as np
import os
import warnings
import astropy.time as time
from .uvdata import UVData
from .. import utils as uvutils
__all__ = ["MS"]
"""
This dictionary defines the mapping between CASA polarization numbers and
AIPS polarization numbers
"""
pol_dict = {
1: 1,
2: 2,
3: 3,
4: 4,
5: -1,
6: -3,
7: -4,
8: -2,
9: -5,
10: -7,
11: -8,
12: -6,
}
# convert from casa polarization integers to pyuvdata
class MS(UVData):
"""
Defines a class for reading and writing casa measurement sets.
This class should not be interacted with directly, instead use the read_ms
method on the UVData class.
Attributes
----------
ms_required_extra : list of str
Names of optional UVParameters that are required for MS
"""
ms_required_extra = ["datacolumn", "antenna_positions"]
def _ms_hist_to_string(self, history_table):
"""
Convert a CASA history table into a string for the uvdata history parameter.
Also stores messages column as a list for consitency with other uvdata types
Parameters
----------
history_table : a casa table object
CASA table with history information.
Returns
-------
str
string containing only message column (consistent with other UVDATA
history strings)
str
string enconding complete casa history table converted with a new
line denoting rows and a ';' denoting column breaks.
"""
# string to store just the usual uvdata history
message_str = ""
# string to store all the casa history info
history_str = ""
# Do not touch the history table if it has no information
if history_table.nrows() > 0:
history_str = (
"APP_PARAMS;CLI_COMMAND;APPLICATION;MESSAGE;"
"OBJECT_ID;OBSERVATION_ID;ORIGIN;PRIORITY;TIME\n"
)
app_params = history_table.getcol("APP_PARAMS")["array"]
cli_command = history_table.getcol("CLI_COMMAND")["array"]
application = history_table.getcol("APPLICATION")
message = history_table.getcol("MESSAGE")
obj_id = history_table.getcol("OBJECT_ID")
obs_id = history_table.getcol("OBSERVATION_ID")
origin = history_table.getcol("ORIGIN")
priority = history_table.getcol("PRIORITY")
times = history_table.getcol("TIME")
# Now loop through columns and generate history string
ntimes = len(times)
tables = [
app_params,
cli_command,
application,
message,
obj_id,
obs_id,
origin,
priority,
times,
]
for tbrow in range(ntimes):
message_str += str(message[tbrow])
newline = ";".join([str(table[tbrow]) for table in tables]) + "\n"
history_str += newline
if tbrow < ntimes - 1:
message_str += "\n"
def is_not_ascii(s):
return any(ord(c) >= 128 for c in s)
def find_not_ascii(s):
output = []
for c in s:
if ord(c) >= 128:
output += c
return output
return message_str, history_str
# ms write functionality to be added later.
def write_ms(self):
"""Write ms: Not yet supported."""
def read_ms(
self,
filepath,
data_column="DATA",
pol_order="AIPS",
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Read in a casa measurement set.
Parameters
----------
filepath : str
The measurement set root directory to read from.
data_column : str
name of CASA data column to read into data_array. Options are:
'DATA', 'MODEL', or 'CORRECTED_DATA'
pol_order : str
Option to specify polarizations order convention, options are
'CASA' or 'AIPS'.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
IOError
If root file directory doesn't exist.
ValueError
If the `data_column` is not set to an allowed value.
If the data are have multiple subarrays or are multi source or have
multiple spectral windows.
If the data have multiple data description ID values.
"""
try:
import casacore.tables as tables
except ImportError as e: # pragma: no cover
raise ImportError(
"casacore is not installed but is required for "
"measurement set functionality"
) from e
# make sure user requests a valid data_column
if data_column not in ["DATA", "CORRECTED_DATA", "MODEL"]:
raise ValueError(
"Invalid data_column value supplied. Use 'Data','MODEL' or"
" 'CORRECTED_DATA'"
)
if not os.path.exists(filepath):
raise IOError(filepath + " not found")
# set visibility units
if data_column == "DATA":
self.vis_units = "UNCALIB"
elif data_column == "CORRECTED_DATA":
self.vis_units = "JY"
elif data_column == "MODEL":
self.vis_units = "JY"
# limit length of extra_keywords keys to 8 characters to match uvfits & miriad
self.extra_keywords["DATA_COL"] = data_column
# get frequency information from spectral window table
tb_spws = tables.table(filepath + "/SPECTRAL_WINDOW", ack=False)
spw_names = tb_spws.getcol("NAME")
self.Nspws = len(spw_names)
if self.Nspws > 1:
raise ValueError(
"Sorry. Files with more than one spectral"
"window (spw) are not yet supported. A "
"great project for the interested student!"
)
freqs = tb_spws.getcol("CHAN_FREQ")
self.freq_array = freqs
self.Nfreqs = int(freqs.shape[1])
self.channel_width = float(tb_spws.getcol("CHAN_WIDTH")[0, 0])
self.Nspws = int(freqs.shape[0])
self.spw_array = np.arange(self.Nspws)
tb_spws.close()
# now get the data
tb = tables.table(filepath, ack=False)
# check for multiple subarrays. importuvfits does not appear to
# preserve subarray information!
subarray = np.unique(np.int32(tb.getcol("ARRAY_ID")) - 1)
if len(set(subarray)) > 1:
raise ValueError(
"This file appears to have multiple subarray "
"values; only files with one subarray are "
"supported."
)
times_unique = time.Time(
np.unique(tb.getcol("TIME") / (3600.0 * 24.0)), format="mjd"
).jd
# check for multiple data description ids (combination of spw id & pol id)
data_desc_id = np.unique(np.int32(tb.getcol("DATA_DESC_ID") - 1))
if len(set(data_desc_id)) > 1:
raise ValueError(
"This file appears to have multiple data description ID "
"values; only files with one data description ID are "
"supported."
)
self.Ntimes = int(len(times_unique))
# FITS uvw direction convention is opposite ours and Miriad's.
# CASA's convention is unclear: the docs contradict themselves,
# but empirically it appears to match uvfits
# So conjugate the visibilities and flip the uvws:
data_array = np.conj(tb.getcol(data_column))
self.Nblts = int(data_array.shape[0])
flag_array = tb.getcol("FLAG")
# CASA stores data in complex array with dimension NbltsxNfreqsxNpols
if len(data_array.shape) == 3:
data_array = np.expand_dims(data_array, axis=1)
flag_array = np.expand_dims(flag_array, axis=1)
self.data_array = data_array
self.flag_array = flag_array
self.Npols = int(data_array.shape[-1])
# FITS uvw direction convention is opposite ours and Miriad's.
# CASA's convention is unclear: the docs contradict themselves,
# but empirically it appears to match uvfits
# So conjugate the visibilities and flip the uvws:
self.uvw_array = -1 * tb.getcol("UVW")
self.ant_1_array = tb.getcol("ANTENNA1").astype(np.int32)
self.ant_2_array = tb.getcol("ANTENNA2").astype(np.int32)
self.Nants_data = len(
np.unique(
np.concatenate(
(np.unique(self.ant_1_array), np.unique(self.ant_2_array))
)
)
)
self.baseline_array = self.antnums_to_baseline(
self.ant_1_array, self.ant_2_array
)
self.Nbls = len(np.unique(self.baseline_array))
# Get times. MS from cotter are modified Julian dates in seconds
# (thanks to <NAME> for figuring out the proper conversion)
self.time_array = time.Time(
tb.getcol("TIME") / (3600.0 * 24.0), format="mjd"
).jd
# Polarization array
tb_pol = tables.table(filepath + "/POLARIZATION", ack=False)
num_pols = tb_pol.getcol("NUM_CORR")
# get pol setup ID from data description table
tb_data_desc = tables.table(filepath + "/DATA_DESCRIPTION", ack=False)
pol_id = tb_data_desc.getcol("POLARIZATION_ID")[0]
# use an assert because I don't think it's possible to make a file
# that fails this, but I'm not sure
assert num_pols[pol_id] == self.Npols
if np.unique(num_pols).size > 1:
# use getvarcol method, which returns a dict
pol_list = tb_pol.getvarcol("CORR_TYPE")["r" + str(pol_id + 1)][0].tolist()
else:
# list of lists, probably with each list corresponding to SPW.
pol_list = tb_pol.getcol("CORR_TYPE")[pol_id]
self.polarization_array = np.zeros(len(pol_list), dtype=np.int32)
for polnum in range(len(pol_list)):
self.polarization_array[polnum] = int(pol_dict[pol_list[polnum]])
tb_pol.close()
# Integration time
# use first interval and assume rest are constant (though measurement
# set has all integration times for each Nblt )
# self.integration_time=tb.getcol('INTERVAL')[0]
# for some reason, interval ends up larger than the difference between times...
if len(times_unique) == 1:
self.integration_time = np.ones_like(self.time_array, dtype=np.float64)
else:
# assume that all times in the file are the same size
int_time = self._calc_single_integration_time()
self.integration_time = (
np.ones_like(self.time_array, dtype=np.float64) * int_time
)
# open table with antenna location information
tb_ant = tables.table(filepath + "/ANTENNA", ack=False)
tb_obs = tables.table(filepath + "/OBSERVATION", ack=False)
self.telescope_name = tb_obs.getcol("TELESCOPE_NAME")[0]
self.instrument | |
r"""
Kleber tree
A Kleber tree is a tree of weights generated by Kleber's algorithm
[Kleber1]_. The nodes correspond to the weights in the positive Weyl chamber
obtained by subtracting a (non-zero) positive root. The edges are labeled by
the coefficients of the roots of the difference.
AUTHORS:
- <NAME> (2011-05-03): Initial version
REFERENCES:
.. [Kleber1] <NAME>.
Combinatorial structure of finite dimensional representations of Yangians:
the simply-laced case.
Internat. Math. Res. Notices 1997. no. 4. 187-201.
.. [Kleber2] <NAME>.
Finite dimensional representations of quantum affine algebras.
Ph.D. dissertation at University of California Berkeley. 1998.
math.QA/9809087.
.. TODO:: Output the tree as a graph using the graph code
EXAMPLES::
sage: KleberTree(['A', 3], [[3,2], [2,1], [1,1], [1,1]])
Kleber tree of Cartan type ['A', 3] and root weight [2, 1, 2]
sage: KleberTree(['D', 4], [[2,2]])
Kleber tree of Cartan type ['D', 4] and root weight [0, 2, 0, 0]
TESTS::
sage: KT = KleberTree(['A', 3], [[3,2], [2,1], [1,1], [1,1]])
sage: for x in set(KT.list()): x
...
Kleber tree node with weight [1, 0, 3] and upwards edge root [1, 1, 0]
Kleber tree node with weight [0, 2, 2] and upwards edge root [1, 0, 0]
Kleber tree node with weight [2, 1, 2] and upwards edge root [0, 0, 0]
Kleber tree node with weight [2, 0, 0] and upwards edge root [0, 1, 1]
Kleber tree node with weight [0, 0, 2] and upwards edge root [1, 1, 0]
Kleber tree node with weight [0, 1, 0] and upwards edge root [0, 0, 1]
Kleber tree node with weight [3, 0, 1] and upwards edge root [0, 1, 1]
Kleber tree node with weight [0, 1, 0] and upwards edge root [1, 1, 1]
Kleber tree node with weight [1, 1, 1] and upwards edge root [1, 1, 1]
Kleber tree node with weight [0, 0, 2] and upwards edge root [2, 2, 1]
sage: KT = KleberTree(['A', 7], [[3,2], [2,1], [1,1]])
sage: KT
Kleber tree of Cartan type ['A', 7] and root weight [1, 1, 2, 0, 0, 0, 0]
sage: for x in set(KT.list()): x
...
Kleber tree node with weight [1, 0, 1, 0, 1, 0, 0] and upwards edge root [1, 2, 2, 1, 0, 0, 0]
Kleber tree node with weight [0, 0, 1, 0, 0, 1, 0] and upwards edge root [2, 3, 3, 2, 1, 0, 0]
Kleber tree node with weight [1, 1, 2, 0, 0, 0, 0] and upwards edge root [0, 0, 0, 0, 0, 0, 0]
Kleber tree node with weight [2, 0, 1, 1, 0, 0, 0] and upwards edge root [0, 1, 1, 0, 0, 0, 0]
Kleber tree node with weight [1, 0, 0, 2, 0, 0, 0] and upwards edge root [0, 1, 1, 0, 0, 0, 0]
Kleber tree node with weight [0, 0, 3, 0, 0, 0, 0] and upwards edge root [1, 1, 0, 0, 0, 0, 0]
Kleber tree node with weight [0, 0, 0, 1, 1, 0, 0] and upwards edge root [1, 1, 1, 0, 0, 0, 0]
Kleber tree node with weight [0, 1, 1, 1, 0, 0, 0] and upwards edge root [1, 1, 1, 0, 0, 0, 0]
"""
#*****************************************************************************
# Copyright (C) 2011, 2012 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.lazy_attribute import lazy_attribute
from sage.misc.latex import latex
from sage.rings.integer import Integer
from sage.structure.parent import Parent
from sage.structure.element import Element
from sage.structure.unique_representation import UniqueRepresentation
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.combinat.root_system.cartan_type import CartanType
from sage.combinat.cartesian_product import CartesianProduct
from sage.graphs.digraph import DiGraph
from sage.graphs.dot2tex_utils import have_dot2tex
class KleberTreeNode(Element):
r"""
A node in the Kleber tree.
This class is meant to be used internally by the Kleber tree class and
should not be created directly by the user.
For more on the Kleber tree and the nodes, see :class:`KleberTree`.
"""
def __init__(self, parent_obj, node_weight, dominant_root, parent_node=None):
r"""
Initialize the tree node.
The dominating root is the up_root which is the difference between the
parent node's weight and this node's weight.
INPUT:
- ``parent_obj`` -- The parent object of this element
- ``node_weight`` -- The weight of this node
- ``dominant_root`` -- The dominating root
- ``parent_node`` -- (default:None) The parent node of this node
TESTS::
sage: RS = RootSystem(['A', 2])
sage: WS = RS.weight_space()
sage: R = RS.root_space()
sage: KT = KleberTree(['A', 2], [[1,1]])
sage: parent = KT(WS.sum_of_terms([(1,5), (2,2)]), R.zero())
sage: parent
Kleber tree node with weight [5, 2] and upwards edge root [0, 0]
sage: parent.parent_node
sage: child = KT(WS.sum_of_terms([(1,3), (2,1)]), R.sum_of_terms([(1,1), (2,2)]), parent)
sage: child
Kleber tree node with weight [3, 1] and upwards edge root [1, 2]
sage: child.parent_node
Kleber tree node with weight [5, 2] and upwards edge root [0, 0]
sage: TestSuite(parent).run()
"""
self.parent_node = parent_node
self.children = []
self.weight = node_weight
self.up_root = dominant_root
Element.__init__(self, parent_obj)
@lazy_attribute
def depth(self):
"""
Return the depth of this node in the tree.
EXAMPLES::
sage: RS = RootSystem(['A', 2])
sage: WS = RS.weight_space()
sage: R = RS.root_space()
sage: KT = KleberTree(['A', 2], [[1,1]])
sage: n = KT(WS.sum_of_terms([(1,5), (2,2)]), R.zero())
sage: n.depth
1
sage: n2 = KT(WS.sum_of_terms([(1,5), (2,2)]), R.zero(), n)
sage: n2.depth
2
"""
depth = 0
cur = self
while cur is not None:
depth += 1
cur = cur.parent_node
return depth
def __cmp__(self, rhs):
r"""
Check whether two nodes are equal.
TESTS::
sage: RS = RootSystem(['A', 2])
sage: WS = RS.weight_space()
sage: R = RS.root_space()
sage: KT = KleberTree(['A', 2], [[1,1]])
sage: n = KT(WS.sum_of_terms([(1,5), (2,2)]), R.zero())
sage: n2 = KT(WS.sum_of_terms([(1,5), (2,2)]), R.zero(), n)
sage: cmp(n2, n)
1
sage: n3 = KT(WS.sum_of_terms([(1,5), (2,2)]), R.zero(), n)
sage: cmp(n2, n3)
0
sage: n3 = KT(WS.sum_of_terms([(1,5), (2,3)]), R.zero(), n)
sage: cmp(n2, n3)
-1
"""
if isinstance(rhs, KleberTreeNode):
if self.depth < rhs.depth:
return -1
elif self.depth > rhs.depth:
return 1
elif self.parent_node is not rhs.parent_node:
return cmp(self.parent_node, rhs.parent_node)
return cmp(self.weight, rhs.weight)
return cmp(type(self), type(rhs))
def _repr_(self):
r"""
Return the string representation of ``self``.
EXAMPLES::
sage: RS = RootSystem(['A', 3])
sage: WS = RS.weight_space()
sage: R = RS.root_space()
sage: KT = KleberTree(['A', 2], [[1,1]])
sage: node = sage.combinat.rigged_configurations.kleber_tree.KleberTreeNode(KT, WS.sum_of_terms([(1,2), (2,1), (3,1)]), R.sum_of_terms([(1,3), (3,3)])); node # indirect doctest
Kleber tree node with weight [2, 1, 1] and upwards edge root [3, 0, 3]
"""
ret_str = "Kleber tree node with weight " + repr(list(self.weight.to_vector()))
ret_str += " and upwards edge root " + repr(list(self.up_root.to_vector()))
return ret_str
def _latex_(self):
r"""
Return latex representation of ``self``.
TESTS::
sage: RS = RootSystem(['A', 3])
sage: WS = RS.weight_space()
sage: R = RS.root_space()
sage: KT = KleberTree(['A', 3], [[3,2], [1,1]])
sage: node = sage.combinat.rigged_configurations.kleber_tree.KleberTreeNode(KT, WS.sum_of_terms([(1,4), (3,1)]), R.zero())
sage: latex(node) # indirect doctest
V_{4\omega_{1}+\omega_{3}}
sage: node = sage.combinat.rigged_configurations.kleber_tree.KleberTreeNode(KT, WS.zero(), R.zero())
sage: latex(node) # indirect doctest
V_{0}
sage: node = sage.combinat.rigged_configurations.kleber_tree.KleberTreeNode(KT, WS.sum_of_terms([(1,2)]), R.zero())
sage: latex(node) # indirect doctest
V_{2\omega_{1}}
"""
retStr = "V_{"
for pair in self.weight:
if pair[1] > 1:
retStr += repr(pair[1]) + "\omega_{" + repr(pair[0]) + "}+"
elif pair[1] == 1:
retStr += "\omega_{" + repr(pair[0]) + "}+"
if retStr[-1] == '{':
retStr += "0}"
else:
retStr = retStr[:-1] + "}"
return retStr
class KleberTree(Parent, UniqueRepresentation):
r"""
The tree that is generated by Kleber's algorithm.
A Kleber tree is a tree of weights generated by Kleber's algorithm
[Kleber1]_. It is used to generate the set of all admissible rigged
configurations for the simply-laced types `A_n`, `D_n`, `E_6`, `E_7`,
and `E_8`.
The nodes correspond to the weights in the positive Weyl chamber obtained
by subtracting a (non-zero) positive root. The edges are labeled by the
coefficients of the roots, and `X` is a child of `Y` if `Y` is the root
else if the edge label of `Y` to | |
#!/usr/bin/env python
#pylint: disable=line-too-long
'''in_use_do_not_archive
main_mech.py
Causal Cognitive Architecture 3 (CCA3)
June 2021 full rewrite (all previous code deprecated)
Oct 2021 CCA3 Binding Solution paper demonstration version
Note: Where you see "cca3.py" the module for the demonstration version is actually
"cca4.py" (cca3.py and cca4.py do the same thing but cca4.py is the rewrite for this version)
"evaluation cycle" == "processing cycle" == "cycle" == "loop through the architecture"
("machine cycle" is not used as a number of machine cycles depending on hardward for each evaluation cycle)
please see papers listed in cca3.py module docstring for more details
about the operation of the CCA3
cycles(d, g, h, m):
input parameters:
d, g, h, m (data and method structures instantiations)
returns:
#returns d, g, h, m since they are modified by this method
functional overview of python modules:
cca3.py - intro screens, user enters hyperparameters, links to relevant versions of simulations
main_mech.py - runs CCA3 evaluation cycles
hdata.py - much of latest simulation is in the NavMod class, instantiated as 'h' in cca3.py
ddata.py - instantiated as 'd' in cca3.py, used previous simulations but portions still used
gdata.py - instantiated as 'g' in cca3.py, used previous simulations but portions still used
constants.py - holds constants used by other modules
requirements.txt:
provided simply here as a roadmap to the modules in the CCA3
please check with cca4.py to make sure latest requirements
'''
##START PRAGMAS
#
# pylint: disable=invalid-name
# prefer not to use snake_case style for very frequent data structure or small temp variables
# pylint: disable=bare-except
# prefer in some code areas to catch any exception rising
# pylint: disable=too-many-lines
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
# pylint: disable=too-many-arguments
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-locals
# prefer to use comfortable number of branches and statements, especially in user menu communication
# pylint: disable=pointless-string-statement
#
##END PRAGMAS
## START IMPORTS START IMPORTS
#
##standard imports -- being used by this module
import random
import sys
# import os.path
# import copy
import msvcrt
# import math
# import pdb
# import time
# import pickle
# import threading
# import logging
# from io import StringIO
#
##PyPI imports -- being used by this module
try:
import numpy as np # type: ignore
# justificaton: AwesomePython 9.7, industry standard library
# import pytorch
# justificaton: AwesomePython 9.9, industry standard library
# from pympler import muppy
# justification: GitHub 525 stars, last commit 6 mos ago
# from fuzzywuzzy import fuzz #type: ignore
from fuzzywuzzy import process # type: ignore
# justification: AwesomePython 8.9
from termcolor import colored
# justification: AwesomePython not rated; libraries.io -- 0 dependencies, stable since 2011
#from icecream import ic # type: ignore
#justification: Awesome rating 7.9
except ImportError:
print("\nprogram will end -- main_mech.py module unable to import a PyPI module")
print("please check requirements.txt and install all required dependencies")
sys.exit()
#
##non-PyPI third-party imports -- being used by this module
try:
pass
# justification/ Awesome/LibHunt ratings for non-pypi imports: n/a
except ImportError:
print(
"program will end -- main_mech.py module unable to import a third-party module"
)
print("please check requirements.txt and install all required dependencies")
sys.exit()
#
##CCA3 module imports -- being used by this module
try:
from constants import BINDING
# import hdata
# from constants import *
# import palimpsest #deprecated June 2021
except ImportError:
print("program will end -- main_mech.py module unable to import a cca3 module")
print("please check requirements.txt and install all required dependencies")
sys.exit()
#
###END IMPORTS END IMPORTS
##START MAIN_MECH.CYCLES()
#
def cycles(d, g, h, m):
'''in_use_do_not_archive
loop of evaluation cycles until the CCA3 completes or fails or
times out of its mission/scene, and then returns back to the calling method cca3.main_eval()
-'evaluation cycles' == 'processing cycles' == 'cycles' == loop through the architecture
sensory --> processing --> output
-'sensory scene' -- the visual, auditory, etc sensory inputs presented to the cca3 agent
-note: sensory input data can stay the same the next evaluation cycle (i.e., can have multiple evaluation
cycles for one scene of sensory input data) or perhaps there will be new sensory input scene the next evaluation cycle
-cycles(d, g, h, m) is called by cca3.main_eval(): loop:
input parameters:
d, g, h, m (data and method structures instantiations)
returns:
#returns d, g, h, m since they are modified by this method
'''
# START of EVALN CYCLE LOOP
# -->SENSORY INPUTS -> CCA3 -> MOTOR OUTPUTS ---REPEAT-^
h.exit_cycles = False
for d.evaluation_cycles in range(sys.maxsize):
display_cycle_info(d, g, h)
autonomic_check(g)
next_scene_from_envrt = (h.envrt_interaction_and_input_sensory_vectors_shaping_modules(g))
h.input_sensory_vectors_association_modules(g)
h.sequential_error_correcting_module(g)
h.object_segmentation_module(g)
h.navigation_module(d, g)
h.output_vector_assocation_module(d, g, '', '', 0, 0, 0, -1)
if decide_to_exit(next_scene_from_envrt, d, g, h):
d, g, h = update_expected_values(d, g, h)
return d, g, h, m
# LOOP AGAIN loop for another evaluation cycle --------------^
return None
##END MAIN_MECH.CYCLES()
##START OTHER METHODS START OTHER METHODS
#
def display_cycle_info(d, g, h):
'''in_use_do_not_archive
displays evaluation cycle statistics to the user
'''
if BINDING:
print(colored('\nCycles of Equations will now start', 'cyan'))
print(colored('Recall that a "cycle" is a cycle of all the equations', 'cyan'))
print(colored('Then in the next cycle, the equations repeat although not re-initialized', 'cyan'))
print(colored('"Scenes" (i.e., "sensory scenes") are a new set of visual, auditory, etc', 'cyan'))
print(colored('stimuli being presented to the CCA3. Sensing of a new scene occurs at the', 'cyan'))
print(colored('start of the equations, i.e., with a new cycle. However.... cycles can repeat', 'cyan'))
print(colored('while the same sensory scene is there, ie, can have multiple cycles each sensory scene', 'cyan'))
if h.current_sensory_scene < 0:
print("resetting a negative completed environment back to 0th scene")
h.current_sensory_scene = 0
print(f"\nSTARTING EVALUATION CYCLE # {d.evaluation_cycles} (run # {g.mission_counter} since simulation started)(runs start at '1')")
g.large_letts_display("Cycle # " + str(d.evaluation_cycles))
g.large_letts_display("Scene # " + str(h.current_sensory_scene))
g.show_architecture_related("cca3_architecture.jpg", "CCA3 architecture")
#g.fast_input("Please press ENTER to continue", True)
return True
def decide_to_exit(next_scene_from_envrt, d, g, h, one_loop = True):
'''in_use_do_not_archive
makes decision to exit from main_mech.cycles() main loop
'''
print(colored("\n\n\nCCA3 Binding paper software walk-through note:", 'blue'))
if one_loop:
print(colored("Software currently set for exit after one loop, like in paper. (Please set one_loop==False to avoid this.)", 'blue'))
print(colored("cycles() loop: if (.....): update expected values, return d,g,h,m and exit back to cca4.main_eval())\n\n", 'blue'))
g.fast_input("Press ENTER to continue...\n")
#one_loop mode is intended to allow one loop through cycle() and then return back to main_eval()
if one_loop:
return True
#exit if no more scenes to be served, i.e., 'mission' in this group of sensory scenes is complete
if next_scene_from_envrt < 0:
h.exit_reason = " no more scenes to be served"
return True
#exit if h.exit_cycles becomes set (used previously with cca1 after completion of a mission, but still may be in use in some routines)
if h.exit_cycles:
h.exit_reason = " h.exit_cycles was set"
return True
#exit if have already reached the maximum cycles specified by g.max_evln_cycles_now_exit (e.g., 20 cycles or whatever setting is at this time)
if d.evaluation_cycles > g.max_evln_cycles_now_exit:
h.exit_reason = "d.evaluation_cycles > g.max_evln_cycles_now_exit"
return True
#exit if have almost reached sys.maxsize - 100 which actually is 9,223,372,036,854,775,707 in current test
if d.evaluation_cycles > sys.maxsize - 100:
h.exit_reason = "d.evaluation_cycles > sys.maxsize - 100"
return True
#otherwise return False so exit and return from main_mech.cycles() will not occur now
return False
def setup_user_view(d, g, h) -> bool:
'''CCA3 ver
for the benefit of the user (the CCA3 does not have this information) --
set up position of the lost hiker, the CCA3
will be undergoing active development to allow automatic activation of a variety
of environments, not just forest one
'''
print("for benefit of user (CCA3 does not get this information):")
print("Let's put the CCA1 in the corner at m,n = (1,1): ")
d.set_cca1(1, 1)
print("\nNow let's put the lost hiker at position m,n = (4,2): ")
print("The CCA1, of course, does not have access to hiker position")
d.set_hiker(4, 2)
set_goal_and_hippo(d, g, h)
return True
def autonomic_check(g) -> bool:
'''in_use_do_not_archive
CCA3 ver
central autonomic module is checked for priority operations
before external sensory input vectors are processed
note: peripheral autonomic modules cannot be checked until
the first stages of external sensory input vectors in the
evaluation cycle
'''
print(colored("\n\n\nCCA3 Binding paper software walk-through note:", 'blue'))
print(colored("cycles() loop: autonomic_check()\n\n", 'blue'))
g.fast_input("Press ENTER to continue...\n")
g.large_letts_display("AUTONOMIC MODULE SIMULATION")
g.fast_input("\nPress ENTER to continue...\n")
autonomic_sleep_wake(g)
print(colored("Autonomic system was not modeled in the equations of the CCA3 Binding | |
"""
Test the quality of inference, measured by num edges + num mutations, and (if
using simulated data) the KC distance
"""
import os.path
import argparse
import collections
import itertools
import multiprocessing
import re
import time
import logging
import json
import msprime
import tskit
import numpy as np
import stdpopsim
import tsinfer
from error_generation import add_errors
import intervals
logging.basicConfig()
logger = logging.getLogger(__name__)
def make_switch_errors(sample_data, switch_error_rate=0, random_seed=None, **kwargs):
raise NotImplementedError
def rnd_kc(params):
ts, random_seed = params
s = tskit.Tree.generate_star(
ts.num_samples, span=ts.sequence_length, sample_lists=True)
kc = 0
for tree in ts.trees(sample_lists = True):
kc += tree.span * tree.kc_distance(s.split_polytomies(
random_seed=random_seed + tree.index, sample_lists=True))
return kc / ts.sequence_length
def simulate_stdpopsim(
species,
model,
contig,
num_samples,
mutation_file=None,
seed=123,
skip_existing=False,
num_procs=1,
):
base_fn = f"{model}_{contig}_n{num_samples}"
tree_fn = f"{base_fn}_seed{seed}"
logger.info(f"Using {species}:{contig} from stdpopsim using the {model} model")
if skip_existing and os.path.exists(tree_fn + ".trees"):
logger.info(
f"Simulation file {tree_fn}.trees already exists, returning that.")
return base_fn, tree_fn
sample_data = None
species = stdpopsim.get_species(species)
model = species.get_demographic_model(model)
num_pops = model.num_sampling_populations
if num_samples < num_pops or num_samples % num_pops != 0:
raise ValueError(
f"num_samples must be an integer multiple of {num_pops} "
f"(or 2 x {num_pops} if diploid sequencing error is injected)"
)
pop_n = num_samples // num_pops
logger.info(
f"Simulating {num_pops}x{pop_n} samples, seed {seed}, file prefix '{tree_fn}'."
)
contig = species.get_contig(contig)
l = contig.recombination_map.get_sequence_length()
if mutation_file is not None:
logger.debug(f"Loading {mutation_file}")
sample_data = tsinfer.load(mutation_file)
if sample_data.sequence_length != l:
raise ValueError(
f"Mismatching sequence_length between simulation and {mutation_file}")
# Reduce mutation rate to 0, as we will insert mutations later
contig = stdpopsim.Contig(
mutation_rate=0,
recombination_map=contig.recombination_map,
genetic_map=contig.genetic_map,
)
r_map = contig.recombination_map
assert len(r_map.get_rates()) == 2 # Ensure a single rate over chr
samples = model.get_samples(*([pop_n] * num_pops))
engine = stdpopsim.get_engine('msprime')
ts = engine.simulate(model, contig, samples, seed=seed)
tables = ts.dump_tables()
if sample_data is not None:
pos = sample_data.sites_position[:]
logger.info(
f"Inserting {len(pos)} mutations at variable sites from {mutation_file}")
for tree in ts.trees():
positions = pos[np.logical_and(pos>=tree.interval[0], pos<tree.interval[1])]
if len(positions) == 0:
continue
muts = list(zip(
np.random.uniform(0, tree.total_branch_length, size=len(positions)),
positions))
muts.sort()
tot = 0
# place a mutation on a random branch, proportional to branch length
try:
for n in tree.nodes():
tot += tree.branch_length(n)
while muts[0][0] < tot:
_, position = muts.pop(0)
s = tables.sites.add_row(position=position, ancestral_state="0")
tables.mutations.add_row(node=n, site=s, derived_state="1")
except IndexError:
# No more mutations - go to next tree
continue
tables.sort()
logger.debug(
f"Inserted mutations at density {ts.num_mutations/ts.sequence_length}")
interval = [int(l * 2/20), int(l * 2/20)+1e7] # 10Mb near the start, not centromeric
tables.keep_intervals([interval])
tables.trim()
logger.debug(
f"Cut down tree seq to {interval} ({tables.sites.num_rows} sites) for speed")
# Add info to the top-level metadata
user_data = {}
logger.info("Calculating the kc distance of the simulation against a flat tree")
star_tree = tskit.Tree.generate_star(
ts.num_samples, span=tables.sequence_length, record_provenance=False)
user_data['kc_max'] = tables.tree_sequence().kc_distance(star_tree.tree_sequence)
kc_array = []
max_reps = 100
ts = tables.tree_sequence()
logger.info(
f"Calculating KC distance of the sim against at most {max_reps} * {ts.num_trees}"
f" random trees using {num_procs} parallel threads. This could take a while."
)
seeds = range(seed, seed + max_reps)
with multiprocessing.Pool(num_procs) as pool:
for i, kc in enumerate(pool.imap_unordered(
rnd_kc, zip(itertools.repeat(ts), seeds))
):
kc_array.append(kc)
if i > 10:
se_mean = np.std(kc_array, ddof=1)/np.sqrt(i)
# break if SEM < 1/100th of mean KC. This can take along time
if se_mean/np.average(kc_array) < 0.01:
logger.info(
f"Stopped after {i} replicates as kc_max_split deemed accurate.")
break
user_data['kc_max_split'] = np.average(kc_array)
if tables.metadata_schema != tskit.MetadataSchema({"codec":"json"}):
if tables.metadata:
raise RuntimeError("Metadata already exists, and is not JSON")
tables.metadata_schema = tskit.MetadataSchema({"codec":"json"})
tables.metadata = {}
tables.metadata = { **tables.metadata, "user_data": user_data}
tables.tree_sequence().dump(tree_fn + ".trees")
return base_fn, tree_fn
def test_sim(seed):
ts = msprime.simulate(
10,
length=1000,
mutation_rate=1e-2,
recombination_rate=1e-2,
random_seed=seed)
return ts, f"test_sim{seed}"
def setup_sampledata_from_simulation(
prefix, random_seed, err=0, num_threads=1,
cheat_breakpoints=False, use_sites_time=False, skip_existing=False):
"""
Take the results of a simulation and return a sample data file, some reconstructed
ancestors, a recombination rate array, a suffix to append to the file prefix, and
the original tree sequence.
If 'err' is 0, we do not inject any errors into the haplotypes. Otherwise
we add empirical sequencing error and ancestral allele polarity error
If "cheat_recombination" is True, multiply the recombination_rate for known
recombination locations from the simulation by 20
If "use_sites_time" is True, use the times
If "skip_existing" is True, and the sample_data file and ancestors_file that were
going to be generated already exist, then skip the actual simulation and just return
those files and their data.
"""
suffix = ""
ts = tskit.load(prefix + ".trees")
plain_samples = tsinfer.SampleData.from_tree_sequence(
ts, use_sites_time=use_sites_time)
if cheat_breakpoints:
suffix += "cheat_breakpoints"
logger.info("Cheating by using known breakpoints")
if use_sites_time:
suffix += "use_times"
logger.info("Cheating by using known times")
if err == 0:
sd_path = prefix + suffix + ".samples"
if skip_existing and os.path.exists(sd_path):
logger.info(f"Simulation file {sd_path} already exists, loading that.")
sd = tsinfer.load(sd_path)
else:
sd = plain_samples.copy(path=sd_path) # Save the samples file
sd.finalise()
else:
logger.info("Adding error")
suffix += f"_ae{err}"
sd_path = prefix + suffix + ".samples"
if skip_existing and os.path.exists(sd_path):
logger.info(f"Sample file {sd_path} already exists, loading that.")
sd = tsinfer.load(sd_path)
else:
error_file = add_errors(
plain_samples,
err,
random_seed=random_seed)
sd = error_file.copy(path=prefix + suffix + ".samples")
if use_sites_time:
# Sites that were originally singletons have time 0, but could have been
# converted to inference sites when adding error. Give these a nonzero time
sites_time = sd.sites_time
sites_time[sites_time == 0] = np.min(sites_time[sites_time > 0])/1000.0
sd.sites_time[:] = sites_time
sd.finalise()
for attribute in ('sequence_length', 'num_samples', 'num_sites'):
if getattr(sd, attribute) != getattr(ts, attribute):
raise ValueError(
f"{attribute} differs between original ts and sample_data: "
f"{getattr(sd, attribute)} vs {getattr(ts, attribute)}")
anc_path = prefix + suffix + ".ancestors"
if skip_existing and os.path.exists(anc_path):
logger.info(f"Ancestors file {anc_path} already exists, loading that.")
anc = tsinfer.load(anc_path)
else:
anc = tsinfer.generate_ancestors(
sd,
num_threads=num_threads,
path=anc_path,
)
logger.info("GA done")
inference_pos = anc.sites_position[:]
rho = 1e-8 # shouldn't matter what this is - it it relative to mismatch
if cheat_breakpoints:
raise NotImplementedError("Need to make a RateMap with higher r at breakpoints")
breakpoint_positions = np.array(list(ts.breakpoints()))
inference_positions = anc.sites_position[:]
breakpoints = np.searchsorted(inference_positions, breakpoint_positions)
# Any after the last inference position must be junked
# (those before the first inference position make no difference)
breakpoints = breakpoints[breakpoints != len(rho)]
rho[breakpoints] *= 20
return sd.path, anc.path, rho, suffix, ts
def setup_sample_file(base_filename, args, num_threads=1):
"""
Return a sample data file, the ancestors file, a corresponding recombination rate
(a single number or a RateMap), a prefix to use for files, and None
"""
gmap = args.genetic_map
sd = tsinfer.load(base_filename + ".samples")
anc = tsinfer.generate_ancestors(
sd,
num_threads=num_threads,
path=base_filename + ".ancestors",
)
logger.info("GA done")
inference_pos = anc.sites_position[:]
match = re.search(r'(chr\d+)', base_filename)
if match or gmap is not None:
if gmap is not None:
try:
rho=float(gmap)
logger.info(f"Using rate {gmap} for the recombination rate")
except ValueError:
rho = intervals.read_hapmap(gmap)
logger.info(f"Using file from {gmap} for the recombination map")
else:
chr = match.group(1)
logger.info(f"Using {chr} from HapMapII_GRCh37 for the recombination map")
gmap = stdpopsim.get_species("HomSap").get_genetic_map(id="HapMapII_GRCh37")
if not gmap.is_cached():
gmap.download()
filename = os.path.join(gmap.map_cache_dir, gmap.file_pattern.format(id=chr))
rho = intervals.read_hapmap(filename)
else:
rho = 1e-8 # shouldn't matter what this is - it it relative to mismatch
#if np.any(d==0):
# w = np.where(d==0)
# raise ValueError("Zero recombination rates at", w, inference_pos[w])
return sd.path, anc.path, rho, "", None
# Parameters passed to each subprocess
Params = collections.namedtuple(
"Params",
"ts_file, sample_file, anc_file, rec_rate, ma_mis_ratio, ms_mis_ratio, precision, "
"num_threads, kc_max, kc_max_split, seed, error, source, skip_existing"
)
Results = collections.namedtuple(
"Results",
"arity_mean, arity_var, edges, error, kc_max_split, kc_max, kc_poly, kc_split, "
"muts, n, num_sites, min_num_muts, revised_num_muts, num_trees, precision, "
"proc_time, ma_mis_ratio, ms_mis_ratio, seed, sim_ts_min_bytes, sim_ts_bytes, "
"source, ts_bytes, ts_path"
)
def run(params):
"""
Run a single inference, with the specified rates
"""
precision = params.precision
logger.info(
f"Starting {params.ma_mis_ratio} {params.ms_mis_ratio}. Precision {precision}"
)
prefix = None
assert params.sample_file.endswith(".samples")
assert params.anc_file.endswith(".ancestors")
samples = tsinfer.load(params.sample_file)
ancestors = tsinfer.load(params.anc_file)
start_time = time.process_time()
prefix = params.sample_file[0:-len(".samples")]
inf_prefix = "{}_rma{:g}_rms{:g}_p{}".format(
prefix,
params.ma_mis_ratio,
params.ms_mis_ratio,
precision)
ats_path = inf_prefix + ".atrees"
if params.skip_existing and os.path.exists(ats_path):
logger.info(f"Ancestors ts file {ats_path} already exists, loading that.")
inferred_anc_ts = tskit.load(ats_path)
prov = json.loads(inferred_anc_ts.provenances()[-1].record.encode())
if ancestors.uuid != prov['parameters']['source']['uuid']:
logger.warning(
"The loaded ancestors ts does not match the ancestors file. "
"Checking the site positions, and will abort if they don't match!")
# We might be re-running this, but the simulation file is the same
# So double-check that the positions in | |
<gh_stars>0
# -*- coding: UTF-8 -*-
#加载所有插件
import glob
import importlib
import os
import traceback
import functools
from inspect import getgeneratorstate
from pluginsinterface.TypeExtension import PlugMsgTypeEnum, PlugMsgReturn
from pluginsinterface.EventHandling import StandEven
from pluginsinterface.PlugSession import sessionManagement, SessionManagement, Session
from pluginsinterface.PlugFilter import PlugArgFilter, PlugMsgFilter
from pluginsinterface.PermissionGroup import permRegisterGroup, permRegisterPerm
from pluginsinterface.PermissionGroup import legalPermHas, authRegisterDefaultPerm
from pluginsinterface.PermissionGroup import permDefaultInit
from helper import getlogger
import config
logger = getlogger(__name__)
"""
插件兼容层
*插件管理大师(划掉)
支持发送事件
"""
#插件列表(modulename索引)
PluginsManageList = {}
#插件列表(name索引)
PluginsManageNameList = {}
#优先级插件列表
pluglist = []
for _ in range(10):
pluglist.append(list())
#启动回调列表
plugstartfuncs = []
PLUGADMIN = config.PLUGADMIN
DEBUG = config.DEBUG
"""
插件相关类
"""
class PluginsManage:
"""
插件管理器
实例化后将自动加入插件列表
删除时自动从插件列表中移除
:param modulename: 插件所属模块
:param name: 插件名称,重名时自动更改名称直至名称不重复(名称后加序号)
:param version: 插件版本
:param auther: 插件作者
:param des: 插件描述
:param level: 插件优先级(0-9),0为最高优先级
"""
#module名,昵称,描述,优先级(0-9,决定插件何时处理消息)
def __init__(self,
modulename: str,
name: str,
groupname: str,
version: str,
auther: str,
des: str,
level: int = 4):
global pluglist, PluginsManageList, PluginsManageNameList
if level < 0 or level > 9:
raise Exception('插件优先级设置异常!')
self.__dict__['lock'] = False
self.open = True
self.modulename = modulename
self.name = name
self.groupname = groupname
self.version = version
self.auther = auther
self.des = des
self.funcs = []
self.perfuncs = []
self.level = level
if self.modulename in PluginsManageList:
raise Exception('模块重复注册!')
if self.name in PluginsManageNameList:
logger.info('插件名称重复{0}'.format(self.name))
i = 1
while True:
self.name = "{0}{1}".format(name, i)
if self.name not in PluginsManageNameList:
#保证模块不重名
logger.info('插件名称重设为{0}'.format(self.name))
break
pluglist[self.level].append(self)
PluginsManageNameList[self.name] = self
PluginsManageList[self.modulename] = self
self.__dict__['lock'] = True
def __del__(self):
global pluglist, PluginsManageList, PluginsManageNameList
pluglist[self.level].remove(self)
del PluginsManageNameList[self.name]
del PluginsManageList[self.modulename]
def __hash__(self):
return hash(self.modulename)
def __eq__(self, other):
return self.modulename == other.modulename
def __setattr__(self, key, value):
if self.__dict__['lock']:
raise Exception('当前PluginsManage已锁定')
self.__dict__[key] = value
def __delattr__(self, key):
if self.__dict__['lock']:
raise Exception('当前PluginsManage已锁定')
if key in ('lock'):
raise Exception('StandEven类不可删除lock属性')
del self.__dict__[key]
def changelevel(self, level: int = 4):
global pluglist
if level < 0 or level > 9:
raise Exception('插件优先级设置异常!')
pluglist[self.level].remove(self)
self.level = level
pluglist[self.level].append(self)
@staticmethod
def baleFuncInfo(module, funcname, msgfilter, argfilter, des, limitMsgType,
allow_anonymous, at_to_me):
return (module, funcname, msgfilter, argfilter, des, limitMsgType,
allow_anonymous, at_to_me)
def addPerFunc(self, func, checkmodule=True) -> bool:
"""
添加前置函数,请勿手动调用
"""
if checkmodule and func.__module__ != self.modulename:
return False
self.perfuncs.append(func)
return True
def addFunc(self, func, funcinfo: tuple, checkmodule=True) -> bool:
"""
添加函数,请勿手动调用
"""
if checkmodule and func.__module__ != self.modulename:
return False
self.funcs.append((func, funcinfo))
return True
def _eventArrives(self, session: Session) -> PlugMsgReturn:
#插件的事件处理,返回值决定消息是否放行,PlugMsgReturn.Intercept拦截、PlugMsgReturn.Ignore放行
if not self.open:
return PlugMsgReturn.Ignore
session.sourcefiltermsg = session.messagestand.strip()
session.filtermsg = session.sourcefiltermsg
for func in self.perfuncs:
session.setScope(self.name)
try:
res = func(session)
if res == PlugMsgReturn.Intercept:
return PlugMsgReturn.Ignore
except:
#处理插件异常(前置插件异常时将拦截所有消息的向下传递-即后续函数被忽略)
s = traceback.format_exc(limit=10)
logger.error(s)
logger.error(
"插件函数执行异常!请检查插件->{name}-{version}-{auther}".format(
self.name, self.version, self.auther))
return PlugMsgReturn.Ignore
for fununit in self.funcs:
session.setScope(self.name)
try:
session.filtermsg = session.sourcefiltermsg
res = fununit[0](session)
if res == PlugMsgReturn.Intercept:
return res
except:
#处理插件异常
s = traceback.format_exc(limit=10)
logger.error(s)
logger.error(
"插件函数执行异常!请检查插件->{name}-{version}-{auther}".format(
self.name, self.version, self.auther))
return PlugMsgReturn.Ignore
session.delScope()
return PlugMsgReturn.Ignore
async def async_eventArrives(self, session: Session) -> PlugMsgReturn:
#插件的事件处理,返回值决定消息是否放行,PlugMsgReturn.Intercept拦截、PlugMsgReturn.Ignore放行
if not self.open:
return PlugMsgReturn.Ignore
session.sourcefiltermsg = session.messagestand.strip()
session.filtermsg = session.sourcefiltermsg
for func in self.perfuncs:
session.setScope(self.name)
try:
res = await func(session)
if res == PlugMsgReturn.Intercept:
return PlugMsgReturn.Ignore
except:
#处理插件异常(前置插件异常时将拦截所有消息的向下传递-即后续函数被忽略)
s = traceback.format_exc(limit=10)
logger.error(s)
logger.error(
"插件函数执行异常!请检查插件->{name}-{version}-{auther}".format(
name=self.name,
version=self.version,
auther=self.auther))
return PlugMsgReturn.Ignore
for fununit in self.funcs:
session.setScope(self.name)
try:
session.filtermsg = session.sourcefiltermsg
res = await fununit[0](session)
if res == PlugMsgReturn.Intercept:
return res
except:
#处理插件异常
s = traceback.format_exc(limit=10)
logger.error(s)
logger.error(
"插件函数执行异常!请检查插件->{name}-{version}-{auther}".format(
name=self.name,
version=self.version,
auther=self.auther))
return PlugMsgReturn.Ignore
session.delScope()
return PlugMsgReturn.Ignore
def getPlugDes(self, simple=True) -> str:
"""
获取插件描述
:param simple: 为True时返回简化消息(默认True)
:return: 字符串
"""
msg = "{name} V{version} --{auther}\n描述:{des}\n".format(
name=self.name,
version=self.version,
auther=self.auther,
des=self.des,
)
if not simple:
msg = '所属模块:{module}\n插件优先级:{level}'.format(module=self.modulename,
level=self.level)
return msg
def getPlugMinDes(self, displaynone: bool = False) -> str:
"""
获取插件超简略描述
:return: 字符串
"""
status = ''
if displaynone:
status = ('启用:' if self.open else '禁用:')
msg = "{status}{name}:{des}".format(status=status,
name=self.name,
des=self.des.strip().replace(
'\n', ' ')[:15])
return msg
def __getFuncDes(self, funcinfo: tuple, simple=True):
if simple:
return funcinfo[4]
return "{1}|{2}|{4}|{5}|{6}|{7}".format(*funcinfo)
def getPlugFuncsDes(self, page: int = 1, simple=True):
"""
获取插件函数描述
:param simple: 为True时返回简化消息(默认True)
:return: 字符串
"""
msg = '' if simple else '函数名|消息过滤器|描述|限制标识|允许匿名|AtBot'
page = page - 1
i = 0
lll = len(self.funcs)
if page > int(lll / 5):
page = 0
for fununit in self.funcs:
if i >= page * 5 and i < (page + 1) * 5:
msg += "\n" + self.__getFuncDes(fununit[1], simple=simple)
i += 1
msg += '\n当前页{0}/{1} (共{2}个命令)'.format(page + 1, int(lll / 5) + 1, lll)
return msg
def switchPlug(self, switch: bool = None):
"""
切换插件状态
全局切换,末端控制请使用权限模块控制
:param switch: False时插件关闭,为True时开启,为空时开则关关则开
:return: 字符串
"""
if not switch:
switch = not self.open
self.__dict__['open'] = switch
def registerPerm(self,
perm: str,
defaultperm: PlugMsgTypeEnum = PlugMsgTypeEnum.allowall,
des: str = '无描述'):
if not legalPermHas(self.groupname, perm):
permRegisterPerm(self.groupname, perm, des)
authRegisterDefaultPerm(self.groupname, perm, defaultperm)
"""
插件注册等静态函数
"""
def plugGet(modulename: str) -> PluginsManage:
"""
通过模块名称获取插件管理器
:param name: 插件名
:param level: 插件消息处理优先级,0-9级,0级最优先
:param modulename: 手动指定注册的插件模块名,默认为被装饰函数所在模块,一般不需要指定
:return: Func
"""
global PluginsManageList
if modulename in PluginsManageList:
return PluginsManageList[modulename]
return None
def plugLoads(plugpath='plugins'):
"""
导入指定路径或者目录下的插件,并返回插件信息
:param plugpath: 要导入的插件路径(相对路径),可以导入指定目录下的py插件
:return: None
"""
global pluglist, plugstartfuncs
#加载插件
plugsearchpath = os.path.join(plugpath, '**.py')
#modules_dict = {}
module_paths = glob.glob(plugsearchpath)
plugcount = 0
for path in module_paths:
module_name = path.replace(os.sep, '.')[:-3]
try:
importlib.import_module(module_name)
except:
#处理插件异常
s = traceback.format_exc(limit=10)
logger.error(s)
logger.error("模块加载异常!请检查模块->{0}".format(module_name))
else:
plugcount += 1
if DEBUG:
logger.info("成功加载插件:" + module_name)
logger.info("插件加载完毕,共加载插件{0}".format(plugcount))
for func in plugstartfuncs:
try:
func(plugGet(func.__module__))
except:
#处理插件异常
s = traceback.format_exc(limit=10)
logger.error(s)
logger.error("插件启动回调异常!请检查模块->{0}".format(func.__module__))
#初始化权限组
permDefaultInit()
def plugRegistered(name: str,
groupname: str,
modulename: str = '',
level: int = 4):
"""
插件注册函数(插件名,插件消息处理优先级0-9,默认4)
每个插件必须运行一次,也只能运行一次,且运行在注册函数之前
没有运行注册函数前的注册会被视为无效注册
被装饰函数必须返回如下格式插件描述
{
'plugmanagement':'1.0',#插件注册管理(原样)
'version':'x.x',#插件版本
'auther':'xxx',#插件作者
'des':'描述信息'#插件描述
}
:param name: 插件名
:param groupname: 插件权限组名,大小写字母和数字非数字开头
:param level: 插件消息处理优先级,0-9级,0级最优先
:param modulename: 手动指定注册的插件模块名,默认为被装饰函数所在模块,一般不需要指定
:return: Func
"""
global logger
if level < 0 or level > 9:
raise Exception('插件优先级设置异常!')
if modulename and type(modulename) != str:
raise Exception('模块名称异常')
if modulename:
logger.warning('手动加载插件:{modulename}'.format(modulename=modulename))
def decorate(func):
nonlocal modulename, level, name, groupname
@functools.wraps(func)
def wrapper():
return func()
#获取插件描述,并检查描述
plugdes = func()
if type(plugdes) != dict:
raise Exception('插件注册不合法,注册返回值类型异常!')
if 'plugmanagement' not in plugdes:
raise Exception('插件注册不合法,注册返回值中的plugmanagement键值不存在!')
if plugdes['plugmanagement'] not in ('1.0'):
raise Exception('插件注册不合法,注册返回值中的plugmanagement键值不在可兼容版本列表!')
if 'version' not in plugdes:
raise Exception('插件注册不合法,注册返回值中的version键值不存在!')
if 'auther' not in plugdes:
raise Exception('插件注册不合法,注册返回值中的auther键值不存在!')
if 'des' not in plugdes:
raise Exception('插件注册不合法,注册返回值中的des键值不存在!')
if type(plugdes['version']) != str:
raise Exception('插件注册不合法,注册返回值中的version键值类型异常!')
if type(plugdes['auther']) != str:
raise Exception('插件注册不合法,注册返回值中的auther键值类型异常!')
if type(plugdes['des']) != str:
raise Exception('插件注册不合法,注册返回值中的des键值类型异常!')
if not modulename:
modulename = func.__module__
#插件注册
permRegisterGroup(modulename, groupname, name, plugdes['des'])
plug = PluginsManage(modulename, name, groupname, plugdes['version'],
plugdes['auther'], plugdes['des'], level)
plug.registerPerm('plugopen', PlugMsgTypeEnum.allowall,
'默认生成权限,管理插件的定向禁用与启用')
logger.info('插件已注册{module}-{name}-{auther}-{version}'.format(
module=plug.modulename,
name=plug.name,
auther=plug.auther,
version=plug.version))
return wrapper
return decorate
def on_plugloaded():
"""
插件加载完毕回调
被装饰函数
仅一个参数,参数为当前模块绑定的插件,若无插件绑定,则为None
参数 plug:PluginsManage
返回值 无
"""
def decorate(func):
global plugstartfuncs
@functools.wraps(func)
def wrapper(plug: PluginsManage):
return func(plug)
plugstartfuncs.append(wrapper)
return wrapper
return decorate
def on_preprocessor(limitMsgType: PlugMsgTypeEnum = PlugMsgTypeEnum.allowall,
allow_anonymous=True,
at_to_me=False,
sourceAdmin=False):
"""
前置消息过滤器(仅对模块所属插件生效·以模块名标识)
被装饰函数
函数根据返回的PlugMsgReturn值判断是否拦截消息,不返回数据时默认放行(放行用以允许插件函数处理)
参数 even:StandEven
返回值 PlugMsgReturn类型
注:过滤器相关请查看对应过滤类
:param limitMsgType: PlugMsgTypeEnum类 消息标识过滤器
:param allow_anonymous: 是否允许匿名消息
:param at_to_me: 需要艾特BOT
:param sourceAdmin: 需要是来源管理员(私聊则为自己,群聊则为管理员,临时聊天统一为False)
"""
def decorate(func):
nonlocal limitMsgType, allow_anonymous, at_to_me, sourceAdmin
@functools.wraps(func)
async def wrapper(session: Session) -> PlugMsgReturn:
f = (lambda a, b: not (a or not a and b)
) #如果a为真,或a为假但b为真。判断通过条件的反值
if f(allow_anonymous,not session.anonymous) or \
f(not at_to_me,session.atbot) or \
not (session.msgtype & limitMsgType) or \
f(not sourceAdmin,session.senduuidinfo['sourceAdmin']) and not session.msgtype & PlugMsgTypeEnum.plugadmin:
return PlugMsgReturn.Intercept
res = await func(session)
if res == None:
return PlugMsgReturn.Ignore
return res
if func.__module__ in PluginsManageList:
if DEBUG:
logger.info('前置函数注册{module}.{name}'.format(
module=wrapper.__module__, name=wrapper.__name__))
PluginsManageList[func.__module__].addPerFunc(wrapper,
checkmodule=False)
else:
logger.warning('前置函数{module}.{name}所在模块未注册为插件,注册失败'.format(
module=wrapper.__module__, name=wrapper.__name__))
return wrapper
return decorate
def on_message(msgfilter: PlugMsgFilter = '',
argfilter: PlugArgFilter = '',
des: str = '',
bindperm: str = None,
bindsendperm: str = None,
limitMsgType: PlugMsgTypeEnum = PlugMsgTypeEnum.allowall,
allow_anonymous=False,
at_to_me=True,
sourceAdmin=False):
"""
注册消息函数(消息过滤器,参数过滤器,函数描述,消息类型限制,群聊限制-暂未实现,是否允许匿名,需要指定BOT)
被装饰函数
函数根据返回的PlugMsgReturn值判断是否拦截消息,不返回数据时,发送过回复则拦截,否则放行
参数 even:StandEven
返回值 PlugMsgReturn类型
注:过滤器相关请查看对应过滤类
权限说明:
注:bindperm绑定的权限仅为消息来源权限(群聊时为指定群是否拥有权限),不判定全局管理权限
注:bindsendperm为检查发送对象权限,即发送人权限,群聊时为群聊里发消息的人,判定全局管理权限
绑定权限后执行命令前将检查权限,权限不通过则不响应,可多个插件绑定相同权限
:param msgfilter: PlugMsgFilter类 消息过滤器,可以为正则表达式字符串
:param argfilter: PlugArgFilter类 参数过滤器,可以为文本 详见PlugArgFilter类注释
:param des: 函数描述(例:“!翻译 待翻译内容 - 机器翻译”),可使用 !help 插件昵称 查看描述预览
:param bindperm: 检查消息来源权限,权限名大小写字母和数字非数字开头
:param bindsendperm: 检查发送对象权限,权限名大小写字母和数字非数字开头
:param limitMsgType: 消息标识过滤器
:param allow_anonymous: 是否允许匿名消息
:param at_to_me: 需要艾特BOT
:param sourceAdmin: 需要是来源管理员(私聊则为自己,群聊则为管理员,临时聊天统一为False)
"""
global PluginsManageList, logger
#logger.info('on_msg初始化')
if type(msgfilter) == str:
msgfilter = PlugMsgFilter(msgfilter)
if not isinstance(msgfilter, PlugMsgFilter):
raise Exception('消息过滤器参数不兼容!')
if type(argfilter) == str:
argfilter = PlugArgFilter(argfilter)
if not isinstance(argfilter, PlugArgFilter):
raise Exception('参数过滤器参数不兼容!')
if not (bindperm and type(bindperm) == str and bindperm.strip()):
bindperm = None
if not (bindsendperm and type(bindsendperm) == str
and bindsendperm.strip()):
bindsendperm = None
def decorate(func):
nonlocal msgfilter, argfilter, des, limitMsgType, allow_anonymous, sourceAdmin, at_to_me, bindperm, bindsendperm
@functools.wraps(func)
async def wrapper(session: Session) -> PlugMsgReturn:
#消息过滤
f = (lambda a, b: not (a or not a and b)
) #如果a为真,或a为假但b为真。判断通过条件的反值
if f(allow_anonymous,not session.anonymous) or \
f(not at_to_me,session.atbot) or \
not (session.msgtype & limitMsgType) or \
f(not sourceAdmin,session.senduuidinfo['sourceAdmin']) and | |
<reponame>jschueller/seacas
# @HEADER
# ************************************************************************
#
# TriBITS: Tribal Build, Integrate, and Test System
# Copyright 2013 Sandia Corporation
#
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Corporation nor the names of the
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ************************************************************************
# @HEADER
"""
Python module containing general support functions for creating scripts
"""
#
# Check the python version
#
import sys
if sys.version_info < (2,7):
print("Error, Python version is " + sys.version + " < 2.7!")
sys.exit(1)
#
# Import commands
#
import os
import re
import math
import subprocess
import time
import datetime
import optparse
import traceback
from Python2and3 import b, s, u
verboseDebug = False
#
# Determine what system we are on:
#
rePlatformName = re.compile(r"^[a-zA-Z]+")
platformStr = rePlatformName.findall(sys.platform)[0]
#print("\nplatformStr = " + platformStr)
######################################
# Script location functions
######################################
def getScriptBaseDir():
return os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
def getScriptName():
return os.path.basename(os.path.dirname(sys.argv[0]))
def getCompleteFileDirname(filename):
return os.path.dirname(os.path.realpath(os.path.abspath(filename)))
######################################
# List helper functions
######################################
def findInSequence(seq, item):
for i in range(0, len(seq)):
if seq[i] == item:
return i
return -1
def removeItemsFromList(list, items):
numItemsRemoved = 0
for item in items:
if item in list:
idx = list.index(item)
del list[idx]
numItemsRemoved = numItemsRemoved + 1
return numItemsRemoved
######################################
# String helper functions
######################################
def stripWhitespaceFromStringList(stringList):
return [x.strip() for x in stringList]
def isSubstrInMultiLineString(inputStr, findStr):
return inputStr.find(findStr) >= 0
def getStrUnderlineStr(width):
underlineStr = ""
for i in range(width):
underlineStr += "-"
return underlineStr
def arrayToFormattedString(array_in, offsetStr = ""):
sout = ""
sout += offsetStr + "[\n"
for i in range(0, len(array_in)):
if i != len(array_in)-1:
commaChar = ","
else:
commaChar = ""
sout += (offsetStr + " \'" + str(array_in[i]) + "\'"+commaChar+"\n")
sout += offsetStr + "]\n"
return sout
def extractLinesAfterRegex(string_in, regex_in):
#print("regex_in = " + regex_in)
reMatch = re.compile(regex_in)
linesExtracted = ""
foundRegex = False
for line in string_in.strip().splitlines():
#print("line = '" + line + "'")
if not foundRegex:
matchObj = reMatch.match(line)
#print("matchObj = " + matchObj)
if matchObj:
foundRegex = True
if foundRegex:
linesExtracted += line + "\n"
return linesExtracted
def extractLinesMatchingRegex(string_in, regex_in):
#print("regex_in = " + regex_in)
string_in = s(string_in)
reMatch = re.compile(regex_in)
linesExtracted = ""
for line in string_in.strip().splitlines():
#print("line = '" + line + "'")
matchObj = reMatch.match(line)
#print("matchObj = " + matchObj)
if matchObj:
linesExtracted += line + "\n"
return linesExtracted
# NOTE: Above is *NOT* using tested!
def extractLinesMatchingSubstr(string_in, substr_in):
#print("substr_in = '" + substr_in + "'")
string_in = s(string_in)
linesExtracted = ""
for line in string_in.strip().splitlines():
#print("line = '" + line + "'")
if substr_in in line:
#print("matched '" + substr_in + "'")
linesExtracted += line + "\n"
return linesExtracted
# NOTE: Above is *NOT* unit tested!
# Convert a dictionary to a string, using a sorted set of keys.
#
# This is needed to provide a portable string representation across various
# versions of Python and platforms (see TriBITS GitHub Issue #119).
def sorted_dict_str(d):
items = []
keys = list(d.keys())
keys.sort()
for key in keys:
if isinstance(d[key], dict):
value = sorted_dict_str(d[key])
else:
value = repr(d[key])
items.append(repr(key) + ": " + value)
return "{" + ", ".join(items) + "}"
##############################################
# System command unit testing utiltities
##############################################
class InterceptedCmndStruct:
def __init__(self, cmndRegex, cmndReturn, cmndOutput):
self.cmndRegex = cmndRegex
self.cmndReturn = cmndReturn
self.cmndOutput = cmndOutput
def __str__(self):
return "{cmndRegex='"+self.cmndRegex+"'," \
" cmndReturn="+str(self.cmndReturn)+"," \
" cmndOutput='"+str(self.cmndOutput)+"'}"
#
# Class that is used to record a set of commands that will be used to
# intercept commands
#
class SysCmndInterceptor:
def __init__(self):
self.__fallThroughCmndRegexList = []
self.__interceptedCmndStructList = []
self.__allowExtraCmnds = True
def setFallThroughCmndRegex(self, cmndRegex):
self.__fallThroughCmndRegexList.append(cmndRegex)
def setInterceptedCmnd(self, cmndRegex, cmndReturn, cmndOutput=None):
self.__interceptedCmndStructList.append(
InterceptedCmndStruct(cmndRegex, cmndReturn, cmndOutput) )
def setAllowExtraCmnds(self, allowExtraCmnds):
self.__allowExtraCmnds = allowExtraCmnds
def hasInterceptedCmnds(self):
return len(self.__interceptedCmndStructList) > 0
def getFallThroughCmndRegexList(self):
return self.__fallThroughCmndRegexList[:]
def getInterceptedCmndStructList(self):
return self.__interceptedCmndStructList[:]
def doProcessInterceptedCmnd(self, cmnd):
#print("doProcessInterceptedCmnd(): cmnd='" + cmnd + "'")
if self.isFallThroughCmnd(cmnd):
return False
if len(self.__interceptedCmndStructList) > 0:
return True
if not self.__allowExtraCmnds:
return True
return False
def isFallThroughCmnd(self, cmnd):
for interceptCmndStruct in self.__interceptedCmndStructList:
if re.match(interceptCmndStruct.cmndRegex, cmnd):
return False
for cmndRegex in self.__fallThroughCmndRegexList:
if re.match(cmndRegex, cmnd):
return True
return False
def nextInterceptedCmndStruct(self, cmnd):
assert(not self.isFallThroughCmnd(cmnd))
if len(self.__interceptedCmndStructList) == 0:
raise Exception("Error, cmnd='"+cmnd+"' is past the last expected command!")
ics = self.__interceptedCmndStructList[0]
if not re.match(ics.cmndRegex, cmnd):
raise Exception("Error, cmnd='" + cmnd + "' did not match the" \
" expected regex='" + ics.cmndRegex + "'!")
self.__interceptedCmndStructList.pop(0)
return (ics.cmndReturn, ics.cmndOutput)
def clear(self):
self.__fallThroughCmndRegexList = []
self.__interceptedCmndStructList = []
self.__allowExtraCmnds = True
def readCommandsFromStr(self, cmndsStr):
lines = cmndsStr.splitlines()
for line in lines:
#print("line: '" + line + "'")
if line == "":
continue
splitArray = line.split(':')
(tag, entry) = (splitArray[0], ':'.join(splitArray[1:]))
#(tag, entry) = line.split(':')
#print("(tag, entry) = " + str((tag, entry)))
if tag == "FT":
self.__fallThroughCmndRegexList.append(entry.strip())
elif tag == "IT":
entryArray = entry.split(';')
if len(entryArray) < 3:
raise Exception("Error, invalid line {"+line+"}")
cmndRegex = entryArray[0]
cmndReturn = entryArray[1]
cmndOutput = ""
for cmndOutputEntry in entryArray[2:]:
#print("cmndOutputEntry = {" + cmndOutputEntry + "}")
cmndOutput += cmndOutputEntry.strip()[1:-1]+"\n"
#print("(cmndRegex, cmndReturn, cmndOutput) = " +
# str((cmndRegex, cmndReturn, cmndOutput)))
self.__interceptedCmndStructList.append(
InterceptedCmndStruct(cmndRegex.strip(), int(cmndReturn), cmndOutput)
)
else:
raise Exception("Error, invalid tag = '"+tag+"'!")
def assertAllCommandsRun(self):
if len(self.__interceptedCmndStructList) > 0:
raise Exception("Error, all of the commands have not been run starting with" \
" the command " + str(self.__interceptedCmndStructList[0])
+ "!")
g_sysCmndInterceptor = SysCmndInterceptor()
# Read the command interepts from a file?
cmndInterceptsFile = os.environ.get(
"GENERAL_SCRIPT_SUPPORT_CMND_INTERCEPTS_FILE","")
if cmndInterceptsFile:
cmndInterceptsFileStr = open(cmndInterceptsFile, 'r').read()
print("\nReading system command intercepts from file '" +
cmndInterceptsFile +
"' with contents:\n" +
"-----------------------------------\n" +
cmndInterceptsFileStr +
"-----------------------------------\n")
g_sysCmndInterceptor.readCommandsFromStr(cmndInterceptsFileStr)
g_sysCmndInterceptor.setAllowExtraCmnds(False)
# Dump all commands being performed?
g_dumpAllSysCmnds = "GENERAL_SCRIPT_SUPPORT_DUMD_COMMANDS" in os.environ
# Run a command (or the mock of that command) and optionally return the stdout
#
# Returns the comamndline exit code or, if 'rtnOutput=True' returns the tuple
# (rtnCode, cmndOutput).
#
# For Python 2, this returns stdout as a standard ASCII byte array for the
# output. For Python 3, the stdout output is converted to standard unicode
# (i.e. 'utf-8')
#
def runSysCmndInterface(cmnd, outFile=None, rtnOutput=False, extraEnv=None, \
workingDir="", getStdErr=False \
):
if g_dumpAllSysCmnds:
print("\nDUMP SYS CMND: " + cmnd + "\n")
if outFile!=None and rtnOutput==True:
raise Exception("Error, both outFile and rtnOutput can not be true!")
if g_sysCmndInterceptor.doProcessInterceptedCmnd(cmnd):
(cmndReturn, cmndOutput) = g_sysCmndInterceptor.nextInterceptedCmndStruct(cmnd)
if rtnOutput:
if cmndOutput==None:
raise Exception("Error, the command '"+cmnd+"' gave None output when" \
" non-null output was expected!")
return (cmndOutput, cmndReturn)
if outFile:
writeStrToFile(outFile, cmndOutput)
return cmndReturn
# Else, fall through
if extraEnv:
fullEnv = os.environ.copy()
fullEnv.update(extraEnv)
else:
fullEnv = None
pwd = None
if workingDir:
pwd = os.getcwd()
os.chdir(workingDir)
rtnObject = None
try:
if rtnOutput:
if getStdErr:
child = subprocess.Popen(cmnd, shell=True, stdout=subprocess.PIPE,
stderr = subprocess.STDOUT, env=fullEnv)
else:
child = subprocess.Popen(cmnd, shell=True, stdout=subprocess.PIPE,
env=fullEnv)
data = child.stdout.read()
if sys.version_info >= (3,):
data = data.decode('utf-8')
child.stdout.close()
child.wait()
rtnCode = child.returncode
rtnObject = (data, rtnCode)
else:
outFileHandle = None
if outFile:
outFileHandle = open(outFile, 'w')
rtnCode = subprocess.call(cmnd, shell=True, stderr=subprocess.STDOUT,
stdout=outFileHandle, env=fullEnv)
if outFileHandle: outFileHandle.close()
rtnObject = rtnCode
finally:
if pwd: os.chdir(pwd)
return rtnObject
######################################
# System interaction utilties
######################################
def runSysCmnd(cmnd, throwExcept=True, outFile=None, workingDir="",
extraEnv=None, echoCmndForDebugging=False \
):
"""Run system command and optionally throw on failure"""
sys.stdout.flush()
sys.stderr.flush()
try:
outFileHandle = None
if echoCmndForDebugging: | |
<reponame>MichaelLenghel/Automatic_Stance_Detection_In_Media
import os
import re
import pickle
from pprint import pprint
# NLP libraries
import gensim
import gensim.corpora as corpora
from gensim.test.utils import datapath
from gensim.utils import lemmatize, simple_preprocess
from gensim.models import CoherenceModel, KeyedVectors
import spacy
import pattern
import pandas as pd
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
# Include extra words. May in the future extend it even further
stop_words.extend(['from', 'subject', 're', 'edu', 'use'])
# sklearn for accessing 20 news groups
from sklearn.datasets import load_files
def gen_bunch(news_path, company_tag):
# Cateogies in data set
news_categories = ['alt.atheism', 'comp.graphics', 'comp.os.ms-windows.misc'
, 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x'
, 'misc.forsale', 'rec.autos', 'rec.motorcycles', 'rec.sport.baseball'
, 'rec.sport.hockey', 'sci.crypt', 'sci.electronics', 'sci.med'
, 'sci.space', 'soc.religion.christian', 'talk.politics.guns'
, 'talk.politics.mideast', 'talk.politics.misc', 'talk.religion.misc']
refined_news_categories = ['brexit', 'climate.change', 'financial'
, 'trump', 'sport.gaa' ,'sport.football'
, 'food.reviews', 'polotics', 'medicine'
, 'middle.east', 'abortion'
, 'atheism', 'christianity', 'drugs'
, 'USA', 'china', 'business'
, 'housing', 'online']
# Setup path to test corpus
NEWS_GROUPS_TEST_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), news_path))
# Print the path
# print(NEWS_GROUPS_TEST_PATH)
##### Need to implement a method for including custom categories! ####
# Load all test data.
# news_test = load_files(NEWS_GROUPS_TEST_PATH, description='News Paper Test Topics from 20 news groups'
# , categories=news_categories, load_content=True , shuffle=False, encoding='latin1'
# , decode_error='strict')
# Shuffling the data in order to increase distribution of topics and not overly simplify NLP patterns
# news_test.data is everything in one big string
if company_tag == 'TEST':
news = load_files(NEWS_GROUPS_TEST_PATH, description='Newspaper test Topics from 20 news groups'
, categories=news_categories, load_content=True , shuffle=True, encoding='latin1'
, decode_error='strict', random_state=30)
else:
news = load_files(NEWS_GROUPS_TEST_PATH, description='Newspaper Topics from the specified company'
, categories=refined_news_categories, load_content=True , shuffle=True, encoding='latin1'
, decode_error='strict', random_state=30)
# Note:
# Shows the topic and document ID + the article.
# print(news.filenames[0])
# print(news.data[0])
# Get all of the file names
# for integer_category in news_test.target[:10]:
# print(news_test.target_names[integer_category])
return news
def multiple_replacements(article):
empty_str = ""
# Replacing all dashes, equals, cursors
replacements = {
"-" : empty_str,
"=": empty_str,
"^": empty_str,
}
# Replace newlines
article_list = re.sub('\s+', ' ', article)
# Replace emails
article_list = re.sub('\S*@\S*\s?', '', article_list)
# Replace quotes
article_list = re.sub("\'", "", article_list)
# Replace headers of data (author, date and url)
article_list = re.sub(r'\{[^)]*\}', '', article_list)
# Create a regular expression using replacements and join them togetehr.
# re.compile creates the pattern object
# re.escape avoids using special characters in regex
reg = re.compile("(%s)" % "|".join(map(re.escape, replacements.keys())))
# For each match, look-up corresponding value in dictionary
return reg.sub(lambda value: replacements[value.string[value.start():value.end()]], article)
def split_to_word(articles):
# Iterate over every article
for article in articles:
# Yield to not overload the memory with the big data set.
# Deacc parameter removes all punctuations as well as spliting each word.
yield(gensim.utils.simple_preprocess(str(article), deacc=True))
def create_bigrams(articles, bigram_model):
return [bigram_model[article] for article in articles]
def remove_stopwords(articles):
return [[w for w in simple_preprocess(str(article)) if w not in stop_words] for article in articles]
def lemmatize_words(bigram_model):
# Only considers nouns, verbs, adjectives and adverbs
return [[w for w in lemmatize(str(article))] for article in bigram_model]
# This method is about a minute faster for a data set of 7000 than the one above
def lemmatization(articles, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
articles_lem = []
# Load the spacy lammatixation model for english
spacy_lem = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
for article in articles:
w = spacy_lem(" ".join(article))
articles_lem.append([token.lemma_ for token in w if token.pos_ in allowed_postags])
return articles_lem
def build_lda_model(articles, word_dict, corpus):
# Build LDA model
# Retry with random_state = 0
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=word_dict,
num_topics=50,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
return lda_model
# Build more accurate lda model using mallet
def build_mallet_lda_model(articles, word_dict, corpus):
from gensim.models.wrappers import LdaMallet
MALLET_PATH = '../mallet_2/mallet-2.0.8/bin/mallet.bat'
mallet_file = os.path.abspath(os.path.join(os.path.dirname(__file__), MALLET_PATH))
os.environ.update({'MALLET_HOME':mallet_file})
lda_mallet_model = gensim.models.wrappers.LdaMallet(mallet_file, corpus=corpus, num_topics=19, id2word=word_dict)
return lda_mallet_model
def clean_data(test_bunch, company_tag, isSciObj=True):
########## ALGO ###################
# Create a list of the articles
if isSciObj == False:
article_list = list(test_bunch)
else:
article_list = list(test_bunch.data)
# Replace =, cursor and dash, quotes, newlines and emails
article_word_list = [multiple_replacements(article) for article in article_list]
# split each article into words
article_word_list = list(split_to_word(article_word_list))
# Print the first article word by word:
# print(article_word_list[0])
# brigrams model
# Need bigrams as it cuts word that go together down into one
bigram = gensim.models.Phrases(article_word_list, min_count=8, threshold=100)
bigram_model = gensim.models.phrases.Phraser(bigram)
# Print bigrams
# print(bigram_model[article_word_list[0]])
# Remove stopwords
article_no_stopwords = remove_stopwords(article_word_list)
# make bigrams
bigram_words = create_bigrams(article_no_stopwords, bigram_model)
# Lemmatize - By default only nouns, verbs, adjectives and adverbs
# lemmatized_article = lemmatize_words(bigram_words)
lemmatized_article = lemmatization(bigram_words, allowed_postags=['NOUN', 'VERB', 'ADJ', 'ADV'])
return lemmatized_article
def save_model(lda_model, path = None, model_type = None):
# Below doesn't work due to access denied issues, datapath is the alternative
# MODEL_PATH = "../models/"
# model_file = os.path.abspath(os.path.join(os.path.dirname(__file__), MODEL_PATH))
if path is None:
model_file = datapath("model")
else:
model_file = datapath(path)
lda_model.save(model_file)
def save_data(newspaper_list, company_tag):
MODEL_PATH = "../newspaper_data/newspaper_list.txt"
COMPANY_MODEL_PATH = "../newspaper_data/model_data/"
extension = ".txt"
COMPANY_PATH = company_tag + extension
if company_tag == 'TEST':
newspaper_list_file = os.path.abspath(os.path.join(os.path.dirname(__file__), MODEL_PATH))
else:
newspaper_list_file = os.path.abspath(os.path.join(os.path.dirname(__file__), COMPANY_MODEL_PATH))
try:
# Recursively build the dirs if they do not already exist
os.makedirs(newspaper_list_file, exist_ok=True)
except OSError:
print('FAILED TO CREATE DIR RECURSIVELY')
with open(newspaper_list_file + '/' + COMPANY_PATH, 'w') as filehandle:
for listitem in newspaper_list:
# print(listitem)
filehandle.write('%s\n' % listitem)
def create_word_corpus(articles, company_tag):
word_corpus = {}
unique_words = []
print('STARTING TO CREATE WORD CORPUS for company ' + company_tag + "...")
# Default of where articles start after headers
sentence_start = 4
if company_tag == "DAILY_MAIL":
sentence_start = 2
elif company_tag == "INDEPENDENT":
sentence_start = 4
else:
print("Company type used not supported")
return
for article in articles.data:
# Extract the parts of the article
article_parts = article.split('\r\n')
topic_name = article_parts[0].strip()
print(topic_name)
# DAILY_MAIL articles are formatted differently
if company_tag == "DAILY_MAIL":
header_line = article_parts[1].strip()[1:]
words = header_line.strip().split(" ")
try:
author_name = ' '.join(words[1:3])
except IndexError:
print('Failed to return a author on article ' + article )
try:
# Do regex for date since more consistent
article_index = header_line.find('Published:')
article_end = header_line.find('https:')
article_end -= 3
article_date = header_line[article_index:article_end]
except IndexError:
print('Failed to return a valid date on article ' + article )
elif company_tag == "INDEPENDENT":
try:
author_name = article_parts[1].strip()[1:]
except IndexError:
print('Failed to return a author on article ' + article )
try:
article_date = article_parts[2].strip()
except IndexError:
print('Failed to return a valid date on article ' + article )
else:
print('Company ' + company_tag + " not supported...")
return
# print('Topic name = ' + topic_name)
# print('Author name = ' + author_name)
# print('Article date = ' + article_date)
# print('Link retrieved article from = ' + article_link)
# Loop over every paragraph in the article
for part in article_parts[sentence_start:]:
# Catches the full sentences (used for sentiment analysis)
sentences = part.split('.')
# Loop through each sentence in paragraph
for sentence in sentences:
words = sentence.split(' ')
# Loop through each word in the sentence
for word in words:
# Ensure a word and not a number
if word.isalpha():
if word not in unique_words:
unique_words.append(word)
# New element so add it to the dictionary only after instantiating
word_corpus[word] = []
word_corpus[word].append(topic_name + ':::' + author_name + ':::' + article_date + ':::' + sentence)
else:
word_corpus[word].append(topic_name + ':::' + author_name + ':::' + article_date + ':::' + sentence)
# for word in word_corpus:
# print('WORD: ' + word + ' POINTS TO ', word_corpus[word])
# Add the bigrams to the word corpus
print('Starting to add bigrams to the word corpus...')
bigram_word_corpus = retrieve_bigram_word_corpus(company_tag)
topic_name_bigram = ''
author_name_bigram = ''
article_date_bigram = ''
sentence_bigram = ''
# Convert bigram_word_corpus to a list:
for bigram_word in bigram_word_corpus:
bigram_word_list = []
topic1_name = topic2_name = author1_name = author2_name = article1_date = article2_date = article1_sentence = article2_sentence = ''
# Get words frm bigram
print(bigram_word)
if bigram_word.count('_') == 1:
# Get bigrams
word1, word2 = bigram_word.split('_')
elif bigram_word.count('_') == 2:
# Get trigrams, but only include first and second word for now
word1, word2, _ = bigram_word.split('_')
elif bigram_word.count('_') == 3:
# Get quadrams
word1, word2, _, _ = bigram_word.split('_')
# Get pentams
elif bigram_word.count('_') == 4:
word1, word2, | |
read in in_file:
bc = tk_io.get_read_barcode(read)
this_bc_reads = bc_reads.setdefault(bc, [])
this_bc_reads.append(read)
sorted_bcs = sorted(bc_reads.keys())
for bc in sorted_bcs:
for read in bc_reads[bc]:
out_file.write(read)
else:
# Store the file offset locations (in bytes) by bc
bc_locs = {}
file_offset = in_file.tell()
for read in in_file:
bc = tk_io.get_read_barcode(read)
this_bc_locs = bc_locs.setdefault(bc, [])
this_bc_locs.append(file_offset)
file_offset = in_file.tell() # has to be before next read
sorted_bcs = sorted(bc_locs.keys())
for bc in sorted_bcs:
for offset in bc_locs[bc]:
in_file.seek(offset)
out_file.write(in_file.next())
out_file.close()
def convert_to_bam(sam_name, bam_name):
""" Uses samtools to create a bam_file from the samfile
"""
sam_file = create_sam_infile(sam_name)
bam_file, tids = create_bam_outfile(bam_name, None, None, template=sam_file)
for read in sam_file:
bam_file.write(read)
sam_file.close()
bam_file.close()
def generate_tiling_windows(input_bam, locus_size, overlap=0):
''' Generate a list of (chrom, start, length) loci that tile over all the references in the bam file '''
chroms = input_bam.references
chrom_lengths = input_bam.lengths
loci = []
for (chrom, length) in zip(chroms, chrom_lengths):
start = 0
while start + locus_size + overlap < length:
stop = start + locus_size + overlap
loci.append(tk_io.create_locus_info(chrom, start, stop))
start += locus_size
loci.append(tk_io.create_locus_info(chrom, start, length))
return loci
import numpy as np
import struct
import zlib
class BgzfParserException(Exception):
pass
def get_bsize(b):
id1, id2, length, bsize = struct.unpack("BBHH", b[:6])
if id1 == 66 and id2 == 67 and length == 2:
return bsize
raise BgzfParserException("BSIZE field not found -- this a valid BAM file?")
def parse_bgzf_header(f):
cur_pos = f.tell()
header_fmt = "BBBBIBBH"
d = f.read(12)
# We are at EOF when read returns an empty string
if d == '':
return None
header = struct.unpack(header_fmt, d)
# Check for a valid gzip header
if header[0] != 31 or header[1] != 139:
raise Exception("Not a valid gzip header")
bsize = get_bsize(f.read(6))
next_pos = cur_pos + bsize + 1
f.seek(next_pos)
return next_pos
def is_valid_bgzf_block(block):
if len(block) < 18:
return False
try:
header = struct.unpack("BBBBIBBH", block[:12])
if header[0] != 31 or header[1] != 139 or header[2] != 8 or header[3] != 4:
return False
bsize = get_bsize(block[12:])
if len(block) <= bsize:
raise RuntimeError("Encountered possibly truncated block!")
xlen = header[7]
cdata_end = bsize - xlen - 1 # -1 == -19 + (12 + 6)
wbits = zlib.MAX_WBITS | 16
data = zlib.decompress(block[:bsize+1], wbits)
crc = zlib.crc32(data) & 0xffffffff
# zlib.decompress _may_ check these, but this is cheap enough...
if (crc, len(data)) == struct.unpack("II", block[cdata_end:cdata_end+8]):
return True
return False
except BgzfParserException:
return False
except zlib.error:
return False
def bgzf_noffsets(fname, num_chunks):
fsize = os.path.getsize(fname)
initial_offsets = np.linspace(0, fsize, num_chunks+1).round().astype(int)[:-1]
initial_offsets = sorted(set(initial_offsets))
num_bytes = 1 << 16
if len(initial_offsets) > 1:
num_bytes = min(num_bytes, np.diff(initial_offsets).max())
fp = open(fname, "r")
final_offsets = []
for offset in initial_offsets:
fp.seek(offset)
# read 2x max BAM blocksize, seek up to this value
data = fp.read(2 << 16)
for i in xrange(0, num_bytes):
if is_valid_bgzf_block(data[i:]):
final_offsets.append(offset + i)
break
return final_offsets
def chunk_bam_records(input_bam, chunk_bound_key=None, chunk_size_gb=0.75,
num_chunks=None, min_chunks=1, max_chunks=None):
''' Find a series BAM virtual offset ranges that cover all the reads in the BAM file.
Each chunk will be cover at least 1 BGZF chunk, so the total number of chunks will
be the smaller of max_chunks, and the total number of BGZF chunks in the file.
chunk_bound_key is a function that accepts a pysam AlignedRead object and returns
some read group identifier object. BAM chunks returned will always start on the
first read of a block of reads with a common group identifier. Use this to keep
read pairs, or blocks of reads with a common start position together when chunking.
Set chunk_bound_key=None if there is no constraint on chunk boundaries.
chunk_bound key can return None to signal that the current read is a valid place to
start a chunk.
'''
if num_chunks is None:
size_gb = float(os.path.getsize(input_bam.filename)) / 1e9
num_chunks = max(min_chunks, int(math.ceil(size_gb / chunk_size_gb)))
if max_chunks is not None:
num_chunks = min(num_chunks, max_chunks)
def find_valid_virtual_offset(pos):
''' Scan through reads starting at given pos to find valid start site for iteration '''
input_bam.seek(pos << 16)
n = 0
pos = input_bam.tell()
last_read_key = None
# All positions are valid chunk bounds in chunk_bound_key is None
# unless there are no reads are left
if chunk_bound_key is None:
has_reads = False
for r in input_bam:
has_reads = True
break
if has_reads:
return pos
else:
return None
for r in input_bam:
# Warn if it's taking a long time to find a good start position
n += 1
if n==1000:
logging.warning("Taking a long time to find a chunk boundary. Check your chunk_boundary_test function?")
# get the current bound_key -- a valid block begins when this key changes.
new_key = chunk_bound_key(r)
# The chunk_bound_key can signal that it's a valid place to split by returning None
if new_key is None:
return pos
if last_read_key == None:
last_read_key = new_key
pos = input_bam.tell()
elif new_key == last_read_key:
pos = input_bam.tell()
else:
return pos
# We will trim off chunks that don't have a valid
# start position, or have no reads.
# Any reads will get processed by the preceding chunks
return None
input_bam.reset()
start_virtual_offsets = [input_bam.tell() & ~0xffff]
for x in bgzf_noffsets(input_bam.filename, num_chunks)[1:]:
y = find_valid_virtual_offset(x)
if y is not None:
start_virtual_offsets.append(y)
# Remove duplicate start sites
start_virtual_offsets = sorted(set(start_virtual_offsets))
end_virtual_offsets = start_virtual_offsets[1:]
end_virtual_offsets.append(None)
chunk_defs = list(zip(start_virtual_offsets, end_virtual_offsets))
return [{ 'chunk_start': str(s), 'chunk_end': str(e) } for (s,e) in chunk_defs]
def read_bam_chunk(input_bam, chunk_def):
''' Iterate over the reads in input_bam contained in chunk_def. chunk_def is a (start, end) pair
of virtual offsets into the BAM file.'''
chunk_start, chunk_end = chunk_def
def convert_offset(s):
if s == 'None':
return None
if isinstance(s, str) or isinstance(s, unicode):
return int(s)
return s
chunk_start = convert_offset(chunk_start)
chunk_end = convert_offset(chunk_end)
if chunk_start:
input_bam.seek(chunk_start)
else:
input_bam.reset()
while chunk_end == None or input_bam.tell() < chunk_end:
yield input_bam.next()
def get_allele_read_info(chrom, pos, ref, alt_alleles, min_mapq_counts, min_mapq_for_mean, min_mapq_for_bc, default_indel_qual, bam, reference_pyfasta, max_reads=1000, match = 1, mismatch = -4, gap_open = -6, gap_extend = -1):
all_alleles = [ref] + alt_alleles
bc_qual_maps = [{} for j in xrange(len(all_alleles))]
counts = [0 for x in all_alleles]
diffs = [[] for x in all_alleles]
mapq_sums = [0.0 for x in all_alleles]
mapq_denoms = [0.0 for x in all_alleles]
molecule_differences = [[] for x in all_alleles]
rescued = [[] for x in all_alleles]
num_reads = 0
qnames = set()
for read in bam.fetch(chrom, pos, pos + 1):
num_reads += 1
if read.qname in qnames:
continue
qnames.add(read.qname)
if read.is_duplicate:
continue
if num_reads > max_reads:
break
is_indel_variant = False
for allele in alt_alleles:
if len(allele) != len(ref):
is_indel_variant = True
allele_index_in_read = read_contains_allele_sw(ref, all_alleles, pos, read, reference_pyfasta[chrom], match = match, mismatch = mismatch, gap_open = gap_open, gap_extend = gap_extend)
for (allele_index, allele) in enumerate(all_alleles):
if allele_index == allele_index_in_read:
if dict(read.tags).get("AS") is not None and dict(read.tags).get("XS") is not None:
diffs[allele_index].append(float(dict(read.tags).get("AS")) - float(dict(read.tags).get("XS")))
if dict(read.tags).get('OM') is not None:
if read.mapq >= 30 and dict(read.tags).get('OM') < 30:
rescue = 1
else:
rescue = 0
rescued[allele_index].append(rescue)
if dict(read.tags).get("DM") is not None:
molecule_differences[allele_index].append(float(dict(read.tags).get("DM")))
if read.mapq >= min_mapq_for_mean:
mapq_sums[allele_index] += read.mapq
mapq_denoms[allele_index] += 1
if read.mapq >= min_mapq_counts:
counts[allele_index] += 1
if read.mapq >= min_mapq_for_bc:
bc = tk_io.get_read_barcode(read)
if bc is None:
continue
cigar_map = tk_seq.get_cigar_map(read.cigar)
try:
read_offset = cigar_map.index(pos - read.pos - 1)
except:
continue
if allele == ref:
if is_indel_variant:
qual = str(default_indel_qual)
else:
qual = str(ord(min(read.qual[read_offset:read_offset + len(allele)])))
bc_quals = bc_qual_maps[allele_index].setdefault(bc, [])
bc_quals.append(qual)
# SNP
elif len(allele) == 1 and len(ref) == 1:
if is_indel_variant:
qual = str(default_indel_qual)
else:
qual = str(ord(read.qual[read_offset]))
bc_quals = bc_qual_maps[allele_index].setdefault(bc, [])
bc_quals.append(qual)
# Insert
elif len(allele) > len(ref) and allele.startswith(ref):
bc_quals = bc_qual_maps[allele_index].setdefault(bc, [])
bc_quals.append(str(default_indel_qual))
# Deletion
elif len(allele) < len(ref) and ref.startswith(allele):
bc_quals = bc_qual_maps[allele_index].setdefault(bc, [])
bc_quals.append(str(default_indel_qual))
else:
bc_quals = bc_qual_maps[allele_index].setdefault(bc, [])
bc_quals.append(str(default_indel_qual))
bc_qual_strings = []
for bc_qual_map in bc_qual_maps:
bc_qual_strings.append([])
for bc, bc_quals in bc_qual_map.iteritems():
bc_qual_strings[-1].append(bc + '_' + '_'.join(bc_quals))
mapq_means = [mapq_sums[i]/mapq_denoms[i] if mapq_denoms[i] > 0 else 31 for i in range(len(all_alleles))]
return (counts, mapq_means, bc_qual_strings, molecule_differences, diffs, rescued)
class RevTuple(object):
def __init__(self, value):
self.value = value
def __cmp__(self, other):
return cmp(self.value,other[1])
def read_contains_allele_sw(ref, | |
nodes
running the Apache Airflow software.
Attributes:
location (str):
Optional. The Compute Engine
`zone </compute/docs/regions-zones>`__ in which to deploy
the VMs used to run the Apache Airflow software, specified
as a `relative resource
name </apis/design/resource_names#relative_resource_name>`__.
For example: "projects/{projectId}/zones/{zoneId}".
This ``location`` must belong to the enclosing environment's
project and location. If both this field and
``nodeConfig.machineType`` are specified,
``nodeConfig.machineType`` must belong to this ``location``;
if both are unspecified, the service will pick a zone in the
Compute Engine region corresponding to the Cloud Composer
location, and propagate that choice to both fields. If only
one field (``location`` or ``nodeConfig.machineType``) is
specified, the location information from the specified field
will be propagated to the unspecified field.
machine_type (str):
Optional. The Compute Engine `machine
type </compute/docs/machine-types>`__ used for cluster
instances, specified as a `relative resource
name </apis/design/resource_names#relative_resource_name>`__.
For example:
"projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}".
The ``machineType`` must belong to the enclosing
environment's project and location. If both this field and
``nodeConfig.location`` are specified, this ``machineType``
must belong to the ``nodeConfig.location``; if both are
unspecified, the service will pick a zone in the Compute
Engine region corresponding to the Cloud Composer location,
and propagate that choice to both fields. If exactly one of
this field and ``nodeConfig.location`` is specified, the
location information from the specified field will be
propagated to the unspecified field.
The ``machineTypeId`` must not be a `shared-core machine
type </compute/docs/machine-types#sharedcore>`__.
If this field is unspecified, the ``machineTypeId`` defaults
to "n1-standard-1".
network (str):
Optional. The Compute Engine network to be used for machine
communications, specified as a `relative resource
name </apis/design/resource_names#relative_resource_name>`__.
For example:
"projects/{projectId}/global/networks/{networkId}".
If unspecified, the default network in the environment's
project is used. If a `Custom Subnet
Network </vpc/docs/vpc#vpc_networks_and_subnets>`__ is
provided, ``nodeConfig.subnetwork`` must also be provided.
For `Shared VPC </vpc/docs/shared-vpc>`__ subnetwork
requirements, see ``nodeConfig.subnetwork``.
subnetwork (str):
Optional. The Compute Engine subnetwork to be used for
machine communications, specified as a `relative resource
name </apis/design/resource_names#relative_resource_name>`__.
For example:
"projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}"
If a subnetwork is provided, ``nodeConfig.network`` must
also be provided, and the subnetwork must belong to the
enclosing environment's project and location.
disk_size_gb (int):
Optional. The disk size in GB used for node
VMs. Minimum size is 20GB. If unspecified,
defaults to 100GB. Cannot be updated.
oauth_scopes (Sequence[str]):
Optional. The set of Google API scopes to be made available
on all node VMs. If ``oauth_scopes`` is empty, defaults to
["https://www.googleapis.com/auth/cloud-platform"]. Cannot
be updated.
service_account (str):
Optional. The Google Cloud Platform Service
Account to be used by the workloads. If a
service account is not specified, the "default"
Compute Engine service account is used. Cannot
be updated.
tags (Sequence[str]):
Optional. The list of instance tags applied to all node VMs.
Tags are used to identify valid sources or targets for
network firewalls. Each tag within the list must comply with
`RFC1035 <https://www.ietf.org/rfc/rfc1035.txt>`__. Cannot
be updated.
ip_allocation_policy (google.cloud.orchestration.airflow.service_v1beta1.types.IPAllocationPolicy):
Optional. The IPAllocationPolicy fields for
the GKE cluster.
max_pods_per_node (int):
Optional. The maximum number of pods per node in the Cloud
Composer GKE cluster. The value must be between 8 and 110
and it can be set only if the environment is VPC-native. The
default value is 32. Values of this field will be propagated
both to the ``default-pool`` node pool of the newly created
GKE cluster, and to the default "Maximum Pods per Node"
value which is used for newly created node pools if their
value is not explicitly set during node pool creation. For
more information, see [Optimizing IP address allocation]
(https://cloud.google.com/kubernetes-engine/docs/how-to/flexible-pod-cidr).
Cannot be updated.
"""
location = proto.Field(
proto.STRING,
number=1,
)
machine_type = proto.Field(
proto.STRING,
number=2,
)
network = proto.Field(
proto.STRING,
number=3,
)
subnetwork = proto.Field(
proto.STRING,
number=4,
)
disk_size_gb = proto.Field(
proto.INT32,
number=5,
)
oauth_scopes = proto.RepeatedField(
proto.STRING,
number=6,
)
service_account = proto.Field(
proto.STRING,
number=7,
)
tags = proto.RepeatedField(
proto.STRING,
number=8,
)
ip_allocation_policy = proto.Field(
proto.MESSAGE,
number=9,
message='IPAllocationPolicy',
)
max_pods_per_node = proto.Field(
proto.INT32,
number=10,
)
class PrivateClusterConfig(proto.Message):
r"""Configuration options for the private GKE cluster in a Cloud
Composer environment.
Attributes:
enable_private_endpoint (bool):
Optional. If ``true``, access to the public endpoint of the
GKE cluster is denied.
master_ipv4_cidr_block (str):
Optional. The CIDR block from which IPv4
range for GKE master will be reserved. If left
blank, the default value of '172.16.0.0/23' is
used.
master_ipv4_reserved_range (str):
Output only. The IP range in CIDR notation to
use for the hosted master network. This range is
used for assigning internal IP addresses to the
cluster master or set of masters and to the
internal load balancer virtual IP. This range
must not overlap with any other ranges in use
within the cluster's network.
"""
enable_private_endpoint = proto.Field(
proto.BOOL,
number=1,
)
master_ipv4_cidr_block = proto.Field(
proto.STRING,
number=2,
)
master_ipv4_reserved_range = proto.Field(
proto.STRING,
number=3,
)
class PrivateEnvironmentConfig(proto.Message):
r"""The configuration information for configuring a Private IP
Cloud Composer environment.
Attributes:
enable_private_environment (bool):
Optional. If ``true``, a Private IP Cloud Composer
environment is created. If this field is set to true,
``IPAllocationPolicy.use_ip_aliases`` must be set to true .
private_cluster_config (google.cloud.orchestration.airflow.service_v1beta1.types.PrivateClusterConfig):
Optional. Configuration for the private GKE
cluster for a Private IP Cloud Composer
environment.
web_server_ipv4_cidr_block (str):
Optional. The CIDR block from which IP range for web server
will be reserved. Needs to be disjoint from
private_cluster_config.master_ipv4_cidr_block and
cloud_sql_ipv4_cidr_block.
cloud_sql_ipv4_cidr_block (str):
Optional. The CIDR block from which IP range in tenant
project will be reserved for Cloud SQL. Needs to be disjoint
from web_server_ipv4_cidr_block
web_server_ipv4_reserved_range (str):
Output only. The IP range reserved for the
tenant project's App Engine VMs.
cloud_composer_network_ipv4_cidr_block (str):
Optional. The CIDR block from which IP range for Cloud
Composer Network in tenant project will be reserved. Needs
to be disjoint from
private_cluster_config.master_ipv4_cidr_block and
cloud_sql_ipv4_cidr_block.
This field is supported for Cloud Composer environments in
versions composer-2.\ *.*-airflow-*.*.\* and newer.
cloud_composer_network_ipv4_reserved_range (str):
Output only. The IP range reserved for the tenant project's
Cloud Composer network.
This field is supported for Cloud Composer environments in
versions composer-2.\ *.*-airflow-*.*.\* and newer.
"""
enable_private_environment = proto.Field(
proto.BOOL,
number=1,
)
private_cluster_config = proto.Field(
proto.MESSAGE,
number=2,
message='PrivateClusterConfig',
)
web_server_ipv4_cidr_block = proto.Field(
proto.STRING,
number=3,
)
cloud_sql_ipv4_cidr_block = proto.Field(
proto.STRING,
number=4,
)
web_server_ipv4_reserved_range = proto.Field(
proto.STRING,
number=5,
)
cloud_composer_network_ipv4_cidr_block = proto.Field(
proto.STRING,
number=7,
)
cloud_composer_network_ipv4_reserved_range = proto.Field(
proto.STRING,
number=8,
)
class DatabaseConfig(proto.Message):
r"""The configuration of Cloud SQL instance that is used by the
Apache Airflow software.
Attributes:
machine_type (str):
Optional. Cloud SQL machine type used by
Airflow database. It has to be one of:
db-n1-standard-2, db-n1-standard-4,
db-n1-standard-8 or db-n1-standard-16. If not
specified, db-n1-standard-2 will be used.
"""
machine_type = proto.Field(
proto.STRING,
number=1,
)
class WebServerConfig(proto.Message):
r"""The configuration settings for the Airflow web server App
Engine instance.
Attributes:
machine_type (str):
Optional. Machine type on which Airflow web
server is running. It has to be one of:
composer-n1-webserver-2, composer-n1-webserver-4
or composer-n1-webserver-8.
If not specified, composer-n1-webserver-2 will
be used. Value custom is returned only in
response, if Airflow web server parameters were
manually changed to a non-standard values.
"""
machine_type = proto.Field(
proto.STRING,
number=1,
)
class EncryptionConfig(proto.Message):
r"""The encryption options for the Cloud Composer environment and
its dependencies.
Attributes:
kms_key_name (str):
Optional. Customer-managed Encryption Key
available through Google's Key Management
Service. Cannot be updated. If not specified,
Google-managed key will be used.
"""
kms_key_name = proto.Field(
proto.STRING,
number=1,
)
class MaintenanceWindow(proto.Message):
r"""The configuration settings for Cloud Composer maintenance window.
The following example:
::
{
"startTime":"2019-08-01T01:00:00Z"
"endTime":"2019-08-01T07:00:00Z"
"recurrence":"FREQ=WEEKLY;BYDAY=TU,WE"
}
would define a maintenance window between 01 and 07 hours UTC during
each Tuesday and Wednesday.
Attributes:
start_time (google.protobuf.timestamp_pb2.Timestamp):
Required. Start time of the first recurrence
of the maintenance window.
end_time (google.protobuf.timestamp_pb2.Timestamp):
Required. Maintenance window end time. It is used only to
calculate the duration of the maintenance window. The value
for end_time must be in the future, relative to
``start_time``.
recurrence (str):
Required. Maintenance window recurrence. Format is a subset
of `RFC-5545 <https://tools.ietf.org/html/rfc5545>`__
``RRULE``. The only allowed values for ``FREQ`` field are
``FREQ=DAILY`` and ``FREQ=WEEKLY;BYDAY=...`` Example values:
``FREQ=WEEKLY;BYDAY=TU,WE``, ``FREQ=DAILY``.
"""
start_time = proto.Field(
proto.MESSAGE,
number=1,
message=timestamp_pb2.Timestamp,
)
end_time = proto.Field(
proto.MESSAGE,
number=2,
message=timestamp_pb2.Timestamp,
| |
range=(0, self.height),
normed=normalize)
ret = None
if forPlot:
# for using matplotlib bar command
# bin labels, bin values, bin width
ret = (hist[1][0:-1], hist[0], self.height / bins)
else:
ret = hist[0]
return ret
def horizontal_histogram(self, bins=10, threshold=128, normalize=False,
forPlot=False):
if bins <= 0:
raise Exception("Not enough bins")
img = self.gray_narray
pts = np.where(img > threshold)
x = pts[0]
hist = np.histogram(x, bins=bins, range=(0, self.width),
normed=normalize)
ret = None
if forPlot:
# for using matplotlib bar command
# bin labels, bin values, bin width
ret = (hist[1][0:-1], hist[0], self.width / bins)
else:
ret = hist[0]
return ret
def get_linescan(self, x=None, y=None, pt1=None, pt2=None, channel=-1):
if channel == -1:
img = self.gray_narray
else:
try:
img = self.narray[:, :, channel]
except IndexError:
print('Channel missing!')
return None
ret = None
if x is not None and y is None and pt1 is None and pt2 is None:
if 0 <= x < self.width:
ret = LineScan(img[x, :])
ret.image = self.x
ret.pt1 = (x, 0)
ret.pt2 = (x, self.height)
ret.col = x
x = np.ones((1, self.height))[0] * x
y = range(0, self.height, 1)
pts = zip(x, y)
ret.pointLoc = pts
else:
warnings.warn(
"Image.get_linescan - that is not valid scanline.")
return None
elif x is None and y is not None and pt1 is None and pt2 is None:
if 0 <= y < self.height:
ret = LineScan(img[:, y])
ret.image = self
ret.pt1 = (0, y)
ret.pt2 = (self.width, y)
ret.row = y
y = np.ones((1, self.width))[0] * y
x = range(0, self.width, 1)
pts = zip(x, y)
ret.pointLoc = pts
else:
warnings.warn(
"Image.get_linescan - that is not valid scanline.")
return None
pass
elif ((isinstance(pt1, tuple) or isinstance(pt1, list)) and
(isinstance(pt2, tuple) or isinstance(pt2, list)) and
len(pt1) == 2 and len(pt2) == 2 and x is None and y is None):
pts = self.bresenham_line(pt1, pt2)
ret = LineScan([img[p[0], p[1]] for p in pts])
ret.pointLoc = pts
ret.image = self
ret.pt1 = pt1
ret.pt2 = pt2
else:
# an invalid combination - warn
warnings.warn(
"Image.get_linescan - that is not valid scanline.")
return None
ret.channel = channel
return ret
def set_linescan(self, linescan, x=None, y=None, pt1=None, pt2=None,
channel=-1):
if channel == -1:
img = np.copy(self.gray_narray)
else:
try:
img = np.copy(self.narray[:, :, channel])
except IndexError:
print
'Channel missing!'
return None
if (x is None and y is None and pt1 is None and pt2 is None):
if (linescan.pt1 is None or linescan.pt2 is None):
warnings.warn(
"Image.set_linescan: No coordinates to re-insert linescan.")
return None
else:
pt1 = linescan.pt1
pt2 = linescan.pt2
if (pt1[0] == pt2[0] and np.abs(
pt1[1] - pt2[1]) == self.height):
x = pt1[0] # vertical line
pt1 = None
pt2 = None
elif (pt1[1] == pt2[1] and np.abs(
pt1[0] - pt2[0]) == self.width):
y = pt1[1] # horizontal line
pt1 = None
pt2 = None
ret = None
if x is not None and y is None and pt1 is None and pt2 is None:
if 0 <= x < self.width:
if len(linescan) != self.height:
linescan = linescan.resample(self.height)
# check for number of points
# linescan = np.array(linescan)
img[x, :] = np.clip(linescan[:], 0, 255)
else:
warnings.warn(
"Image.set_linescan: No coordinates to re-insert linescan.")
return None
elif x is None and y is not None and pt1 is None and pt2 is None:
if 0 <= y < self.height:
if len(linescan) != self.width:
linescan = linescan.resample(self.width)
# check for number of points
# linescan = np.array(linescan)
img[:, y] = np.clip(linescan[:], 0, 255)
else:
warnings.warn(
"Image.set_linescan: No coordinates to re-insert linescan.")
return None
elif ((isinstance(pt1, tuple) or isinstance(pt1, list)) and
(isinstance(pt2, tuple) or isinstance(pt2, list)) and
len(pt1) == 2 and len(pt2) == 2 and
x is None and y is None):
pts = self.bresenham_line(pt1, pt2)
if len(linescan) != len(pts):
linescan = linescan.resample(len(pts))
# linescan = np.array(linescan)
linescan = np.clip(linescan[:], 0, 255)
idx = 0
for pt in pts:
img[pt[0], pt[1]] = linescan[idx]
idx += 1
else:
warnings.warn(
"Image.set_linescan: No coordinates to re-insert linescan.")
return None
if channel == -1:
ret = Image(img)
else:
temp = np.copy(self.narray)
temp[:, :, channel] = img
ret = Image(temp)
return ret
def replaceLineScan(self, linescan, x=None, y=None, pt1=None, pt2=None,
channel=None):
if x is None and y is None and pt1 is None and pt2 is None and channel is None:
if linescan.channel == -1:
img = np.copy(self.gray_narray)
else:
try:
img = np.copy(self.narray[:, :, linescan.channel])
except IndexError:
print('Channel missing!')
return None
if linescan.row is not None:
if len(linescan) == self.width:
ls = np.clip(linescan, 0, 255)
img[:, linescan.row] = ls[:]
else:
warnings.warn("LineScan Size and Image size do not match")
return None
elif linescan.col is not None:
if len(linescan) == self.height:
ls = np.clip(linescan, 0, 255)
img[linescan.col, :] = ls[:]
else:
warnings.warn("LineScan Size and Image size do not match")
return None
elif linescan.pt1 and linescan.pt2:
pts = self.bresenham_line(linescan.pt1, linescan.pt2)
if (len(linescan) != len(pts)):
linescan = linescan.resample(len(pts))
ls = np.clip(linescan[:], 0, 255)
idx = 0
for pt in pts:
img[pt[0], pt[1]] = ls[idx]
idx = idx + 1
if linescan.channel == -1:
ret = Image(img)
else:
temp = np.copy(self.narray)
temp[:, :, linescan.channel] = img
ret = Image(temp)
else:
if channel is None:
ret = self.set_linescan(linescan, x, y, pt1, pt2,
linescan.channel)
else:
ret = self.set_linescan(linescan, x, y, pt1, pt2, channel)
return ret
def get_pixels_online(self, pt1, pt2):
ret = None
if ((isinstance(pt1, tuple) or isinstance(pt1, list)) and
(isinstance(pt2, tuple) or isinstance(pt2, list)) and
len(pt1) == 2 and len(pt2) == 2):
pts = self.bresenham_line(pt1, pt2)
ret = [self.get_pixel(p[0], p[1]) for p in pts]
else:
warnings.warn("Image.get_pixels_online - The line you provided is "
"not valid")
return ret
def bresenham_line(self, (x, y), (x2, y2)):
if (not 0 <= x <= self.width - 1 or not 0 <= y <= self.height - 1 or
not 0 <= x2 <= self.width - 1 or not 0 <= y2 <= self.height - 1):
l = Line(self, ((x, y), (x2, y2))).crop2image_edges()
if l:
ep = list(l.end_points)
ep.sort()
x, y = ep[0]
x2, y2 = ep[1]
else:
return []
steep = 0
coords = []
dx = abs(x2 - x)
if (x2 - x) > 0:
sx = 1
else:
sx = -1
dy = abs(y2 - y)
if (y2 - y) > 0:
sy = 1
else:
sy = -1
if dy > dx:
steep = 1
x, y = y, x
dx, dy = dy, dx
sx, sy = sy, sx
d = (2 * dy) - dx
for i in range(0, dx):
if steep:
coords.append((y, x))
else:
coords.append((x, y))
while d >= 0:
y += sy
d -= 2 * dx
x += sx
d += 2 * dy
coords.append((x2, y2))
return coords
def uncrop(self, ListofPts):
return [(i[0] + self._uncropped_x, i[1] + self._uncropped_y) for i in
ListofPts]
def grid(self, dimensions=(10, 10), color=(0, 0, 0), width=1,
antialias=True, alpha=-1):
ret = self.copy()
try:
step_row = self.size()[1] / dimensions[0]
step_col = self.size()[0] / dimensions[1]
except ZeroDivisionError:
return imgTemp
i = 1
j = 1
grid = DrawingLayer(self.size()) # add a new layer for grid
while i < dimensions[0] and j < dimensions[1]:
if dimensions[0] > i:
grid.line((0, step_row * i), (self.size()[0], step_row * i),
color, width, antialias, alpha)
i += 1
if j < dimensions[1]:
grid.line((step_col * j, 0), (step_col * j, self.size()[1]),
color, width, antialias, alpha)
j += 1
ret._grid_layer[0] = ret.add_drawing_layer(grid) # store grid layer index
ret._grid_layer[1] = dimensions
return ret
def remove_grid(self):
if self._grid_layer[0] is not None:
grid = self.remove_drawing_layer(self._grid_layer[0])
self._grid_layer = [None, [0, 0]]
return grid
else:
return None
def find_grid_lines(self):
gridIndex = self.get_drawing_layer(self._grid_layer[0])
if self._grid_layer[0] == -1:
print("Cannot find grid on the image, Try adding a grid first")
lineFS = FeatureSet()
try:
step_row = self.size()[1] / self._grid_layer[1][0]
step_col = self.size()[0] / self._grid_layer[1][1]
except ZeroDivisionError:
return None
i = 1
j = 1
while i < self._grid_layer[1][0]:
lineFS.append(
Line(self,
((0, step_row * i), (self.size()[0], step_row * i))))
i += 1
while j < self._grid_layer[1][1]:
lineFS.append(
Line(self,
((step_col | |
a line in rootLayer
>>> self.moveElement(line2,[10,10]) # moves line2 DeltaX=10, DdeltaY=10
>>> self.moveElement(groupA,[10,-10]) # moves line2 DeltaX=10, DdeltaY=-10
"""
if distance == 0:
return
transfString = ''
if 'transform' in element.attrib:
transfString = element.attrib['transform']
# if transform attribute is present, we must add the new translation
if transfString:
newTransform = 'translate(%f %f) %s ' % (distance[0], distance[1], transfString)
else: # if no transform attribute was found
newTransform = 'translate(%f %f)' % (distance[0], distance[1])
element.attrib['transform'] = newTransform
#---------------------------------------------
def scaleElement(self, element, scaleX=1.0, scaleY=0.0):
"""Scales the element using the transformation attribute.
It is possible to scale elements isolated or entire groups.
:param element: element object to be rotated
:param scaleX: scaling factor in X direction. Default=1.0
:param scaleY: scaling factor in Y direction. Default=0.0
:type element: element object
:type scaleX: float
:type scaleX: float
:returns: nothing
:rtype: -
.. note:: If scaleY==0, then scaleY=scaleX is assumed (default behavior)
**Example**
>>> rootLayer = self.document.getroot() # retrieves the root layer of the file
>>> groupA = self.createGroup(rootLayer,label='temp') # creates a group inside rootLayer
>>> circ1 = centerRadius(groupA,centerPoint=[0,0],radius=1.0) # creates a line in groupA
>>> circ2 = centerRadius(rootLayer,centerPoint=[0,0],radius=1.0) # creates a line in rootLayer
>>> self.scaleElement(circ1,2.0) # scales x2 in both X and Y directions
>>> self.scaleElement(circ1,2.0,3.0) # scales x2 in X and x3 in Y
>>> self.scaleElement(groupA,0.5) # scales x0.5 the group in both X and Y directions
"""
transfString = ''
if 'transform' in element.attrib:
transfString = element.attrib['transform']
# if transform attribute is present, we must add the new translation
if transfString:
if scaleY != 0.0:
newTransform = 'scale(%f %f) %s ' % (scaleX, scaleY, transfString)
else:
newTransform = 'scale(%f) %s' % (scaleX, transfString)
else: # if no transform attribute was found
if scaleY != 0.0:
newTransform = 'scale(%f %f)' % (scaleX, scaleY)
else:
newTransform = 'scale(%f)' % (scaleX)
element.attrib['transform'] = newTransform
#---------------------------------------------
def findMarker(self, markerName):
"""Search for markerName in the document.
:param markerName: marker name
:type markerName: string
:returns: True if markerName was found
:rtype: bool
"""
doc_markers = {}
for child in self.getDefinitions():
if child.get('id') == markerName:
return True
return False
#---------------------------------------------
def getPoints(self, element):
"""Retrieves the list of points of the element.
This function works on paths, texts or groups. In the case of a group, the function will include recursively all its components
:param element: element object
:type element: element object
:returns: list of points
:rtype: list of list
.. note:: This function will consider any transformation stored in transform attribute, that is, it will compute the resulting coordinates of each object
**Example**
>>> rootLayer = self.document.getroot() # retrieves the root layer of the file
>>> line1 = inkscapeMadeEasy_Draw.line.relCoords(rootLayer, [[5,0],[0,6]],[0,0]) # creates a line in groupA
>>> list = self.getPoints(line1) # gets list = [[0.0, 0.0], [5.0, 0.0], [5.0, 6.0]]
"""
# stores the list of coordinates
listCoords = []
# check if element is valid. 'path', 'text' and 'g' are valid
accepted_strings = set( [ inkex.addNS('path', 'svg'), inkex.addNS('text', 'svg'), 'g', 'path'] )
if element.tag not in accepted_strings:
return listCoords
if element.tag == inkex.addNS('path', 'svg') or element.tag == 'path': # if object is path
dString = re.sub('([a-df-zA-DF-Z])+?', r'#\1#', element.attrib['d']).replace('z', '').replace('Z', '').replace(',', ' ').split('#') # adds special character between letters and splits. the first regular expression excludes e and E bc they are used to represent scientific notation =S
dString = [i.lstrip() for i in dString] # removes leading spaces from strings
dString = filter(None, dString) # removes empty elements
Xcurrent = 0
Ycurrent = 0
while len(dString) > 0:
commandType = dString[0]
argument = [float(x) for x in dString[1].split()] # extracts arguments from M command and converts to float
del dString[0]
del dString[0]
if commandType in 'mMlLtT': # extracts points from command 'move to' M/m or 'line to' l/L or 'smooth quadratic Bezier curveto't/T
X = argument[0::2] # 2 parameters per segment, x is 1st
Y = argument[1::2] # 2 parameters per segment, y is 2nd
if commandType in 'hH': # extracts points from command 'horizontal line' h/H
X = argument
Y = [Ycurrent] * len(X)
if commandType in 'vV': # extracts points from command 'vertical line' v/V
Y = argument
X = [Xcurrent] * len(Y)
if commandType in 'cC': # extracts points from command 'Bezier Curve' c/C
X = argument[4::6] # 6 parameters per segment, x is 5th
Y = argument[5::6] # 6 parameters per segment, y is 6th
if commandType in 'sSqQ': # extracts points from command 'quadratic Bezier Curve' q/Q or 'smooth curveto' s/S
X = argument[2::4] # 4 parameters per segment, x is 3rd
Y = argument[3::4] # 4 parameters per segment, y is 4th
if commandType in 'aA': # extracts points from command 'arc' a/A
X = argument[5::7] # 7 parameters per segment, x is 6th
Y = argument[6::7] # 7 parameters per segment, y is 7th
if commandType in 'h': # if h
for i in range(0, len(X)): # convert to abs coordinates
if i==0:
X[i] = X[i] + Xcurrent
else:
X[i] = X[i] + X[i - 1]
if commandType in 'v': # if v
for i in range(0, len(Y)): # convert to abs coordinates
if i==0:
Y[i] = Y[i] + Ycurrent
else:
Y[i] = Y[i] + Y[i - 1]
if commandType in 'mltcsqa': # if m or l
for i in range(0, len(X)): # convert to abs coordinates
if i==0:
X[i] = X[i] + Xcurrent
Y[i] = Y[i] + Ycurrent
else:
X[i] = X[i] + X[i - 1]
Y[i] = Y[i] + Y[i - 1]
coords = zip(X, Y)
listCoords.extend(coords)
Xcurrent = X[-1]
Ycurrent = Y[-1]
if element.tag == inkex.addNS('text', 'svg'): # if object is a text
x = float(element.attrib['x'])
y = float(element.attrib['y'])
coords = [[x, y]]
listCoords.extend(coords)
if element.tag == 'g': # if object is a group
for obj in element.iter():
if obj != element:
listPoints = self.getPoints(obj)
listCoords.extend(listPoints)
# apply transformation
transfMat = self.getTransformMatrix(element)[1]
# creates numpy array with the points to be transformed
coordsNP = np.hstack((np.array(listCoords), np.ones([len(listCoords), 1]))).transpose()
coordsTransformed = np.dot(transfMat, coordsNP)
coordsTransformed = np.delete(coordsTransformed, 2, 0).transpose().tolist() # remove last line, transposes and converts to list of lists
return coordsTransformed
#---------------------------------------------
def getBoundingBox(self, element):
"""Retrieves the bounding Box of the element.
This function works on paths, texts or groups. In the case of a group, the function will consider recursively all its components
:param element: element object
:type element: element object
:returns: two lists: [xMin,yMin] and [xMax,yMax]
:rtype: list
.. note:: This function will consider any transformation stored in transform attribute, that is, it will compute the resulting coordinates of each object
**Example**
>>> rootLayer = self.document.getroot() # retrieves the root layer of the file
>>> line1 = inkscapeMadeEasy_Draw.line.relCoords(rootLayer, [[5,0],[0,6]],[0,0]) # creates a line in groupA
>>> BboxMin,BboxMax = self.getBoundingBox(line1) # gets BboxMin = [0.0, 0.0] and BboxMax = [5.0, 6.0]
"""
coords = self.getPoints(element)
coordsNP = np.array(coords)
bboxMax = np.max(coordsNP, 0)
bboxMin = np.min(coordsNP, 0)
return bboxMin.tolist(), bboxMax.tolist()
#---------------------------------------------
def getCenter(self, element):
"""Retrieves the center coordinates of the bounding Box of the element.
This function works on paths, texts or groups. In the case of a group, the function will consider recursively all its components
:param element: element object
:type element: element object
:returns: two lists: [xCenter,yCenter]
:rtype: list
.. note:: This function will consider any transformation stored in transform attribute, that is, it will compute the resulting coordinates of each object
**Example**
>>> rootLayer = self.document.getroot() # retrieves the root layer of the file
>>> line1 = inkscapeMadeEasy_Draw.line.relCoords(rootLayer, [[5,0],[0,6]],[0,0]) # creates a line in groupA
>>> Center = self.getCenter(line1) # gets Center = [2.5, 3.0]
"""
bboxMin, bboxMax = self.getBoundingBox(element)
bboxCenter = [(bboxMax[0] + bboxMin[0]) / 2, (bboxMax[1] + bboxMin[1]) / 2]
return bboxCenter
def getSegmentFromPoints(self, pointList,normalDirection='R'):
"""given two points of a straight line segment, returns the parameters of that segment:
length, angle (in radians), tangent unitary vector and normal unitary vector
:param | |
from pioneer.common import linalg
from pioneer.common.platform import parse_datasource_name
from pioneer.common.gui import utils
from pioneer.common.video import VideoRecorder, RecordableInterface
from pioneer.das.api import categories, lane_types
from pioneer.das.api.platform import Platform
from pioneer.das.api.samples import ImageFisheye, ImageCylinder, Echo
from pioneer.das.api.sensors import Sensor
from pioneer.das.api.datasources.virtual_datasources.voxel_map import VoxelMap
from pioneer.das.view.windows import Window
from matplotlib.patches import Rectangle, Polygon
from matplotlib.collections import PatchCollection, PolyCollection
from PyQt5.QtCore import QObject
from PyQt5.QtGui import QColor
from PyQt5.QtQml import QQmlProperty
import matplotlib
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import numpy as np
class ImagerWindow(Window, RecordableInterface):
def __init__(self, window, platform, synchronized, datasource, video_fps):
super(ImagerWindow, self).__init__(window, platform)
self.synchronized = synchronized
self.datasource = datasource
self.backend = self.window.findChild(QObject, "figure")
self.ax = self.backend.getFigure().add_subplot(111)
self.image = None
self.scatter = None
self.video_recorder = VideoRecorder.create(self, datasource, platform, synchronized, video_fps)
def get_frame(self):
"""Override"""
width, height = self.backend.getFigure().get_size_inches() * self.backend.getFigure().get_dpi()
return np.fromstring(self.backend.tostring_rgb(), dtype='uint8').reshape(int(height),int(width),3)
def connect(self):
if self.platform.is_live():
self.platform[self.datasource].ds.connect(self.update)
else:
self.add_connection(self.window.cursorChanged.connect(self.update))
self.add_connection(self.window.visibleChanged.connect(self.update))
controls = self.window.controls
self.add_connection(controls.undistortChanged.connect(self.update))
self.add_connection(controls.undistortimageChanged.connect(self.update))
self.add_connection(controls.showActorChanged.connect(self.update))
self.add_connection(controls.pointSizeChanged.connect(self.update))
self.add_connection(controls.useColorsChanged.connect(self.update))
self.add_connection(controls.useBoxColorsChanged.connect(self.update))
self.add_connection(controls.boxLabelsSizeChanged.connect(self.update))
self.add_connection(controls.logScaleChanged.connect(self.update))
self.add_connection(controls.showBBox2DChanged.connect(self.update))
self.add_connection(controls.showSeg2DChanged.connect(self.update))
self.add_connection(controls.showBBox3DChanged.connect(self.update))
self.add_connection(controls.showSeg3DChanged.connect(self.update))
self.add_connection(controls.showLanesChanged.connect(self.update))
self.add_connection(controls.confThresholdChanged.connect(self.update))
self.add_connection(controls.videoChanged.connect(self.update))
self.add_connection(controls.categoryFilterChanged.connect(self.update))
self.add_connection(controls.aspectRatioChanged.connect(self.update_aspect_ratio))
self.add_connection(controls.cropLeftChanged.connect(self.update_aspect_ratio))
self.add_connection(controls.cropRightChanged.connect(self.update_aspect_ratio))
self.add_connection(controls.cropTopChanged.connect(self.update_aspect_ratio))
self.add_connection(controls.cropBottomChanged.connect(self.update_aspect_ratio))
self.add_connection(controls.submitVoxelMapMemory.clicked.connect(self.update_voxel_map))
self.add_connection(controls.voxelSizeChanged.connect(self.update_voxel_map))
self.update()
def update(self):
if not self.window.isVisible():
return
self.__read_qml_properties()
cursor = -1 if self.platform.is_live() else int(self.cursor)
sample = self.platform[self.datasource][cursor]
if self.undistortimage:
image = sample.undistort_image()
else:
image = sample.raw_image()
self.__update_actors(sample, image)
box = None
self.__update_box2D(sample, image, box)
self.__update_seg_2d(sample, image)
self.__update_bbox_3d(sample, image, box)
self.__update_lanes(sample, image)
self.__draw(image)
self.video_recorder.record(self.is_recording)
def update_aspect_ratio(self):
im = self.ax.get_images()
extent = im[0].get_extent()
self.ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/self.aspect_ratio)
self.update()
def update_voxel_map(self):
datasources = [ds_name for ds_name, show in dict(self.show_actor, **dict(self.show_seg_3d)).items() if show]
for datasource in datasources:
if isinstance(self.platform[datasource], VoxelMap):
self.platform[datasource].clear_cache()
self.platform[datasource].memory = int(self.window.controls.voxelMapMemory)
self.platform[datasource].skip = int(self.window.controls.voxelMapSkip)
self.platform[datasource].voxel_size = 10**float(self.window.controls.voxelSize)
self.window.controls.voxelSizeText = f'{self.platform[datasource].voxel_size:.2f}'
self.platform[datasource].invalidate_caches()
self.update()
#===============================================================================
# Private methods
# ==============================================================================
def __get_datasource_to_show_seg3d(self, datasource_name):
sensor_name, pos, ds_type = parse_datasource_name(datasource_name)
if f'{sensor_name}_{pos}_ech' in self.show_actor:
return f'{sensor_name}_{pos}_ech'
if f'{sensor_name}_{pos}_xyzit' in self.show_actor:
return f'{sensor_name}_{pos}_xyzit'
#TODO: Edge case, handle this better
raise Exception('It is impossible to show 3d segmentation if there is not an echo or xyzit datasource in dataset')
def __get_sample(self, sample, datasource_name):
if self.synchronized_cursor == self.cursor and self.player_cursor != -1:
return self.synchronized[self.player_cursor][datasource_name]
else:
return self.platform[datasource_name].get_at_timestamp(sample.timestamp)
def __filter_indices(self, points_mask, indices):
if indices.ndim == 2:
points_mask = np.all(points_mask[indices], axis=1)
return indices[points_mask]
def __update_actors(self, sample, image):
datasources = [ds_name for ds_name, show in dict(self.show_actor, **dict(self.show_seg_3d)).items() if show]
all_points2D = dict()
all_colors = dict()
all_indices = dict()
for datasource_name in datasources:
is_seg3D = datasource_name in self.show_seg_3d
output_ds_name = datasource_name
if is_seg3D:
output_ds_name = datasource_name
datasource_name = self.__get_datasource_to_show_seg3d(datasource_name)
cloud_sample = self.__get_sample(sample, datasource_name)
try:
points, amplitudes, indices = cloud_sample.get_cloud(referential = self.datasource, undistort = self.undistort, reference_ts = int(sample.timestamp), dtype=np.float64)
except Sensor.NoPathToReferential as e:
self.has_referential[datasource_name]['hasReferential'] = False
continue
if points.size == 0:
continue
self.has_referential[datasource_name]['hasReferential'] = True
pts2d, points_mask = sample.project_pts(points, mask_fov=False, output_mask=True, undistorted=self.undistortimage)
all_points2D[output_ds_name] = pts2d
if is_seg3D:
seg_sample = self.platform[output_ds_name].get_at_timestamp(cloud_sample.timestamp)
mode = 'quad_cloud' if isinstance(cloud_sample, Echo) else None
seg_colors = seg_sample.colors(mode=mode)
if seg_colors.shape[0] != points.shape[0]:
print(f'Warning. The length ({seg_colors.shape[0]}) of the segmentation 3D data' \
+f'does not match the length ({points.shape[0]}) of the point cloud.')
continue
all_colors[output_ds_name] = seg_colors
if self.category_filter is not '':
points_mask &= seg_sample.mask_category(self.category_filter)
elif '-rgb' in datasource_name: #TODO: generalize how colors are obtained from the sample
rgb_colors = np.ones((cloud_sample.raw.shape[0],4))
rgb_colors[:,0] = cloud_sample.raw['r']/255
rgb_colors[:,1] = cloud_sample.raw['g']/255
rgb_colors[:,2] = cloud_sample.raw['b']/255
all_colors[output_ds_name] = rgb_colors
else:
a_min, a_max = amplitudes.min(), amplitudes.max()
if self.log_scale:
norm = matplotlib.colors.LogNorm(1 + a_min, 1 + a_min + a_max)
else:
norm = matplotlib.colors.Normalize(amplitudes.min(), amplitudes.max())
if self.use_colors:
c = np.full((points.shape[0], 4), utils.to_numpy(QColor(self.ds_colors[datasource_name])))
c[:,3] = (0.25 + norm(amplitudes + ((1 + a_min) if self.log_scale else 0)))/1.25 #to make sure every point is visible
all_colors[output_ds_name] = c
else:
all_colors[output_ds_name] = norm(amplitudes)
all_indices[output_ds_name] = self.__filter_indices(points_mask, indices)
self.window.hasReferential = self.has_referential
self.__clean_plot_canvas()
for ds_name, indices in all_indices.items():
points2d = all_points2D[ds_name][indices]
colors = np.squeeze(all_colors[ds_name][indices[:,0] if indices.ndim>1 else indices]) #all colors are the same in a given triangle
if indices.ndim == 2:
if colors.ndim == 1:
poly_coll = PolyCollection(points2d, array=colors, cmap=plt.cm.viridis, edgecolors=None, alpha=0.7)
else:
poly_coll = PolyCollection(points2d, facecolors=colors, edgecolors=None, alpha=0.7)
self.ax.add_collection(poly_coll)
self.ax.figure.canvas.draw()
else:
self.scatter = self.ax.scatter(points2d[:, 0], points2d[:, 1], s=self.point_size, c=colors)
try: #remove all previous 2D and 3D boxes
[p.remove() for p in reversed(self.ax.patches)]
[p.remove() for p in reversed(self.ax.texts)]
except:
pass
def __update_box2D(self, sample, image, box):
datasources = [ds_name for ds_name, show in self.show_bbox_2d.items() if show]
for ds_name in datasources:
_, _, ds_type = parse_datasource_name(ds_name)
box_source = categories.get_source(ds_type)
box2d_sample = self.platform[ds_name].get_at_timestamp(sample.timestamp)
if np.abs(np.int64(sample.timestamp) - box2d_sample.timestamp) <= 1e6:
raw = box2d_sample.raw
if 'confidence' in raw:
mask = (raw['confidence'] > self.conf_threshold)
box2d = raw['data'][mask]
else:
box2d = raw['data']
if len(box2d) > 0:
for i, box in enumerate(box2d):
top = (box['x']-box['h']/2)*image.shape[0]
left = (box['y']-box['w']/2)*image.shape[1]
name, color = categories.get_name_color(box_source,box['classes'])
if self.category_filter is not '':
if name not in self.category_filter:
continue
color = np.array(color)/255
if self.use_box_colors:
color = utils.to_numpy(QColor(self.box_2d_colors[ds_name]))[:3]
if 'confidence' in raw:
conf = raw['confidence'][mask][i]
name = f"{name}({conf:.3f})"
rect = Rectangle((left,top),box['w']*image.shape[1],box['h']*image.shape[0],linewidth=1,edgecolor=color,facecolor=list(color)+[0.15])
self.ax.add_patch(rect)
if self.box_labels_size > 0:
txt = self.ax.text(left,top,name+':'+str(box['id']),color='w',fontweight='bold', fontsize=self.box_labels_size, clip_on=True)
txt.set_path_effects([PathEffects.withStroke(linewidth=1, foreground='k')])
def __update_seg_2d(self, sample, image):
datasources = [ds_name for ds_name, show in self.show_seg_2d.items() if show]
for ds_name in datasources:
seg_sample = self.platform[ds_name].get_at_timestamp(sample.timestamp)
_, _, ds_type = parse_datasource_name(ds_name)
annotation_source = categories.get_source(ds_type)
if np.abs(np.int64(sample.timestamp) - seg_sample.timestamp) <= 1e6:
if 'poly2d' in ds_name:
raw = seg_sample.raw
poly2d = raw['data']
if 'confidence' in raw:
mask = raw['confidence'] > self.conf_threshold
poly2d = poly2d[mask]
elif 'seg2d' in ds_name:
poly2d = seg_sample.poly2d(self.conf_threshold)
for poly in poly2d:
name, color = categories.get_name_color(annotation_source, poly['classes'])
if self.category_filter is not '':
if name not in self.category_filter:
break
color = np.array(color)/255
patch = Polygon(poly['polygon'], closed=True,linewidth=1,edgecolor=color,facecolor=list(color)+[0.15])
self.ax.add_patch(patch)
def __update_bbox_3d(self, sample, image, box):
datasources = [ds_name for ds_name, show in self.show_bbox_3d.items() if show]
for ds_name in datasources:
_, _, ds_type = parse_datasource_name(ds_name)
box_source = categories.get_source(ds_type)
if box_source not in categories.CATEGORIES: #FIXME: this should not be here
box_source = 'deepen'
box3d_sample = self.platform[ds_name].get_at_timestamp(sample.timestamp)
if np.abs(np.int64(sample.timestamp) - box3d_sample.timestamp) <= 1e6:
raw = box3d_sample.raw
box3d = box3d_sample.mapto(self.datasource, ignore_orientation=True)
mask = (box3d['flags'] >= 0)
if 'confidence' in raw:
mask = mask & (raw['confidence'] > self.conf_threshold)
if len(box3d[mask]) > 0:
poly_collection = []
color_collection = []
for i, box in enumerate(box3d[mask]):
name, color = categories.get_name_color(box_source, box['classes'])
if self.category_filter is not '' and name not in self.category_filter:
break
color = np.array(color)/255
if self.use_box_colors:
color = utils.to_numpy(QColor(self.box_3d_colors[ds_name]))[:3]
if 'confidence' in raw:
conf = raw['confidence'][mask][i]
name = f"{name}({conf:.3f})"
vertices = linalg.bbox_to_8coordinates(box['c'],box['d'],box['r'])
p, mask_fov = sample.project_pts(vertices, mask_fov=False, output_mask=True, undistorted=self.undistortimage, margin=1000)
if p[mask_fov].shape[0] < 8:
continue
faces = [[0,1,3,2],[0,1,5,4],[0,2,6,4],[7,3,1,5],[7,5,4,6],[7,6,2,3]]
for face in faces:
poly = np.vstack([p[face[0]],p[face[1]],p[face[2]],p[face[3]],p[face[0]]])
poly_collection.append(poly)
color_collection.append(color)
if self.box_labels_size > 0:
txt = self.ax.text(p[:,0].min(),p[:,1].min(),name+':'+str(box['id']),color='w',fontweight='bold', fontsize=self.box_labels_size, clip_on=True)
txt.set_path_effects([PathEffects.withStroke(linewidth=1, foreground='k')])
alpha = 0.05
facecolors = [list(c)+[alpha] for c in color_collection]
poly_collection = PolyCollection(poly_collection, linewidths=0.5, edgecolors=color_collection, facecolors=facecolors)
self.ax.add_collection(poly_collection)
def __update_lanes(self, sample, image):
# Remove all previous lines
[p.remove() for p in reversed(self.ax.lines)]
datasources = [ds_name for ds_name, show in self.show_lanes.items() if show]
for ds_name in datasources:
lane_sample = self.platform[ds_name].get_at_timestamp(sample.timestamp)
for lane in lane_sample.raw:
vertices = lane_sample.transform(lane['vertices'], self.datasource, ignore_orientation=True)
projected_lane = sample.project_pts(vertices, undistorted=self.undistortimage, mask_fov=True, margin=300)
infos = lane_types.LANE_TYPES[lane['type']]
color = np.array(infos['color'])/255
width = 5
offset = width if infos['double'] else 0
nb_lanes = 2 if infos['double'] else 1
for n in range(nb_lanes):
ddashed = infos['dashed'][n] if infos['double'] else infos['dashed']
ls = '--' if ddashed else '-'
self.ax.plot(projected_lane[:,0]-offset, projected_lane[:,1], c=color, lw=width, ls=ls)
offset *= -1
def __clean_plot_canvas(self):
#TODO: Extract to function
# if using colors, set_array does not work, it expects a 1D array, probably indexing an hidden color map
# so we better throw away existing scatter and start over...
if self.scatter is not None:
self.scatter.set_offsets(np.empty((0,2), 'f4'))
self.scatter.set_array(np.empty((0,), 'f4'))
self.scatter.set_sizes(np.empty((0,), 'f4'))
self.scatter = None
self.ax.collections = []
def __draw(self, image):
self.crops = self.__assert_crop_values(image)
image_cropped = image[self.crops[2]:image.shape[0]-self.crops[3],self.crops[0]:image.shape[1]-self.crops[1]]
if self.image is None:
self.image = self.ax.imshow(image_cropped, aspect=self.aspect_ratio)
self.ax.spines["top"].set_visible(False)
self.ax.spines["right"].set_visible(False)
self.ax.spines["bottom"].set_visible(False)
self.ax.spines["left"].set_visible(False)
else:
self.image.set_data(image_cropped)
self.image.set_extent([self.crops[0],image.shape[1]-self.crops[1],image.shape[0]-self.crops[3],self.crops[2]])
self.backend.draw()
def __assert_crop_values(self, image):
crops = []
for crop in self.crops:
try:
crop = int(crop)
assert crop > 0
except:
crop = 0
crops.append(crop)
crops[0] = np.clip(crops[0], 0, int(image.shape[1]/2))
crops[1] = np.clip(crops[1], 0, int(image.shape[1]/2))
crops[2] = np.clip(crops[2], 0, int(image.shape[0]/2))
crops[3] = np.clip(crops[3], 0, int(image.shape[0]/2))
return crops
def __read_qml_properties(self):
controls = self.window.controls
self.cursor = int(QQmlProperty.read(self.window.qobject, "cursor"))
self.synchronized_cursor = int(self.window.synchronizedCursor)
self.player_cursor = int(self.window.playerCursor)
self.show_actor = controls.showActor
self.show_bbox_2d = controls.showBBox2D
self.show_seg_2d = controls.showSeg2D
self.show_bbox_3d = controls.showBBox3D
self.show_seg_3d = | |
success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = gen_exp.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('create_vichele_team_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(create_vichele_team_result)
create_vichele_team_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', [gen_exp, None], None, ), # 1
)
class update_vichele_team_args(object):
"""
Attributes:
- open_id
- team_info
"""
def __init__(self, open_id=None, team_info=None,):
self.open_id = open_id
self.team_info = team_info
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.open_id = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.team_info = vichele_team()
self.team_info.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('update_vichele_team_args')
if self.open_id is not None:
oprot.writeFieldBegin('open_id', TType.STRING, 1)
oprot.writeString(self.open_id.encode('utf-8') if sys.version_info[0] == 2 else self.open_id)
oprot.writeFieldEnd()
if self.team_info is not None:
oprot.writeFieldBegin('team_info', TType.STRUCT, 2)
self.team_info.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(update_vichele_team_args)
update_vichele_team_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'open_id', 'UTF8', None, ), # 1
(2, TType.STRUCT, 'team_info', [vichele_team, None], None, ), # 2
)
class update_vichele_team_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = gen_exp.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('update_vichele_team_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(update_vichele_team_result)
update_vichele_team_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', [gen_exp, None], None, ), # 1
)
class del_vichele_team_args(object):
"""
Attributes:
- open_id
- team_id
"""
def __init__(self, open_id=None, team_id=None,):
self.open_id = open_id
self.team_id = team_id
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.open_id = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.team_id = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('del_vichele_team_args')
if self.open_id is not None:
oprot.writeFieldBegin('open_id', TType.STRING, 1)
oprot.writeString(self.open_id.encode('utf-8') if sys.version_info[0] == 2 else self.open_id)
oprot.writeFieldEnd()
if self.team_id is not None:
oprot.writeFieldBegin('team_id', TType.I64, 2)
oprot.writeI64(self.team_id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(del_vichele_team_args)
del_vichele_team_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'open_id', 'UTF8', None, ), # 1
(2, TType.I64, 'team_id', None, None, ), # 2
)
class del_vichele_team_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = gen_exp.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('del_vichele_team_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(del_vichele_team_result)
del_vichele_team_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', [gen_exp, None], None, ), # 1
)
class get_all_vichele_team_args(object):
"""
Attributes:
- open_id
"""
def __init__(self, open_id=None,):
self.open_id = open_id
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.open_id = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('get_all_vichele_team_args')
if self.open_id is not None:
oprot.writeFieldBegin('open_id', TType.STRING, 1)
oprot.writeString(self.open_id.encode('utf-8') if sys.version_info[0] == 2 else self.open_id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(get_all_vichele_team_args)
get_all_vichele_team_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'open_id', 'UTF8', None, ), # 1
)
class get_all_vichele_team_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype311, _size308) = iprot.readListBegin()
for _i312 in range(_size308):
_elem313 = vichele_team()
_elem313.read(iprot)
self.success.append(_elem313)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = gen_exp.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('get_all_vichele_team_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter314 in self.success:
iter314.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(get_all_vichele_team_result)
get_all_vichele_team_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [vichele_team, None], False), None, ), # 0
(1, TType.STRUCT, | |
data has changed.\n\nSave?'
dialog = gtk.MessageDialog(None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR,
gtk.BUTTONS_NONE,
message)
dialog.add_buttons(
gtk.STOCK_YES, gtk.RESPONSE_YES,
'_Discard', 1,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
)
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_YES:
self.save_session_action()
elif response == 1:
pass
elif response == gtk.RESPONSE_CANCEL:
return
session_filename = self.choose_filename(
gtk.FILE_CHOOSER_ACTION_OPEN,
(('Session *.session', '*.session'),)
)
if session_filename:
self.session_filename = session_filename
self.load_session(self.session_filename)
msg = 'Session file: %s' % (self.session_filename, )
self.statusbar.pop(self.statusbar_cid)
self.statusbar.push(self.statusbar_cid, msg)
def on_save_session_menuitem_activate(self, menuitem, data=None):
self.save_session_action()
def save_session_action(self):
if not self.session_filename:
filename = self.choose_filename(
gtk.FILE_CHOOSER_ACTION_SAVE,
(('Session *.session', '*.session'),),
confirm_overwrite=True,
initfilename=self.session_filename,
buttons=(
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK,
)
)
if filename:
self.session_filename = filename
if self.session_filename:
stem, ext = os.path.splitext(self.session_filename)
if not ext:
self.session_filename += '.session'
self.save_session(self.session_filename)
msg = 'Session file: %s' % (self.session_filename, )
self.statusbar.pop(self.statusbar_cid)
self.statusbar.push(self.statusbar_cid, msg)
def on_save_session_as_menuitem_activate(self, menuitem, data=None):
filename = self.choose_filename(
gtk.FILE_CHOOSER_ACTION_SAVE,
(('Session *.session', '*.session'),),
confirm_overwrite=True,
initfilename=self.session_filename,
buttons=(
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK,
)
)
if filename:
self.session_filename = filename
stem, ext = os.path.splitext(self.session_filename)
if not ext:
self.session_filename += '.session'
self.save_session(self.session_filename)
msg = 'Session file: %s' % (self.session_filename, )
self.statusbar.pop(self.statusbar_cid)
self.statusbar.push(self.statusbar_cid, msg)
def save_session(self, filename):
self.trans_gui_2_obj()
sessionObj = self.params
outfile = open(filename, 'w')
outfile.write('<?xml version="1.0" ?>\n')
sessionObj.export(outfile, 0, name_="session",
namespacedef_='')
outfile.close()
msg = 'Session saved to file:\n%s' % (filename, )
self.error_message(msg, gtk.MESSAGE_INFO)
self.saved_params = self.params.copy()
def load_session(self, filename):
try:
doc = generateds_gui_session.parsexml_(filename)
rootNode = doc.getroot()
rootTag, rootClass = generateds_gui_session.get_root_tag(rootNode)
if rootClass is None:
rootTag = 'session'
rootClass = generateds_gui_session.sessionType
sessionObj = rootClass.factory()
sessionObj.build(rootNode)
self.params = sessionObj
self.trans_obj_2_gui()
self.trans_gui_2_obj()
self.saved_params = self.params.copy()
except IOError, exp:
msg = str(exp)
self.error_message(msg, gtk.MESSAGE_ERROR)
except expat.ExpatError, exp:
msg = '%s file: %s' % (str(exp), filename, )
self.error_message(msg, gtk.MESSAGE_ERROR)
def on_about_menu_item_activate(self, menuitem, data=None):
if self.about_dialog:
self.about_dialog.present()
return
authors = [
'<NAME> <<EMAIL>>',
]
about_dialog = gtk.AboutDialog()
about_dialog.set_transient_for(self.window)
about_dialog.set_destroy_with_parent(True)
about_dialog.set_name("generateDS.py Python bindings generator")
about_dialog.set_version(VERSION)
about_dialog.set_copyright("Copyright \xc2\xa9 2009 <NAME>")
about_dialog.set_website("http://www.rexx.com/~dkuhlman")
about_dialog.set_comments("GTK+ and Glade3 GUI front end")
about_dialog.set_authors(authors)
about_dialog.set_logo_icon_name(gtk.STOCK_EDIT)
# callbacks for destroying the dialog
def close(dialog, response, editor):
editor.about_dialog = None
dialog.destroy()
def delete_event(dialog, event, editor):
editor.about_dialog = None
return True
about_dialog.connect("response", close, self)
about_dialog.connect("delete-event", delete_event, self)
self.about_dialog = about_dialog
about_dialog.show()
def error_message(self, message, message_type=gtk.MESSAGE_ERROR):
# log to terminal window
#print message
# create an error message dialog and display modally to the user
dialog = gtk.MessageDialog(None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
message_type, gtk.BUTTONS_OK, message)
dialog.run()
dialog.destroy()
def reset_default_status(self):
msg = "Session file: (UNTITLED)"
self.statusbar.pop(self.statusbar_cid)
self.statusbar.push(self.statusbar_cid, msg)
def on_input_schema_chooser_button_clicked(self, button, data=None):
filename = self.choose_filename(gtk.FILE_CHOOSER_ACTION_OPEN,
(('Schemas *.xsd', '*.xsd'),))
if filename:
self.input_schema_entry.set_text(filename)
def on_output_superclass_chooser_button_clicked(self, widget, data=None):
filename = self.choose_filename(patterns=(('Python *.py', '*.py'), ))
if filename:
self.output_superclass_entry.set_text(filename)
#self.on_output_superclass_entry_changed(
# self.output_superclass_entry, data)
def on_output_subclass_chooser_button_clicked(self, button, data=None):
filename = self.choose_filename(patterns=(('Python *.py', '*.py'), ))
if filename:
self.output_subclass_entry.set_text(filename)
def on_behavior_filename_chooser_button_clicked(self, button, data=None):
filename = self.choose_filename(gtk.FILE_CHOOSER_ACTION_OPEN,
(('Python *.py', '*.py'),))
if filename:
self.behavior_filename_entry.set_text(filename)
def on_validator_bodies_chooser_button_clicked(self, button, data=None):
filename = self.choose_filename(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
)
if filename:
self.validator_bodies_entry.set_text(filename)
def on_user_methods_chooser_button_clicked(self, button, data=None):
filename = self.choose_filename(gtk.FILE_CHOOSER_ACTION_OPEN,
(('Python *.py', '*.py'),))
if filename:
self.user_methods_entry.set_text(filename)
def choose_filename(self, action=gtk.FILE_CHOOSER_ACTION_SAVE,
patterns=(), confirm_overwrite=False, initfilename=None,
buttons=None):
filename = None
if buttons is None:
buttons=(
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK,
)
dialog = gtk.FileChooserDialog(
title=None,
action=action,
buttons=buttons,
)
if self.current_folder is not None:
dialog.set_current_folder(self.current_folder)
if initfilename is not None:
dialog.set_filename(initfilename)
if patterns:
filter = gtk.FileFilter()
for name, pattern in patterns:
filter.set_name(name)
filter.add_pattern(pattern)
dialog.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("All files *.*")
filter.add_pattern("*")
dialog.add_filter(filter)
dialog.set_do_overwrite_confirmation(confirm_overwrite)
response = dialog.run()
if response == gtk.RESPONSE_OK:
filename = dialog.get_filename()
self.current_folder = dialog.get_current_folder()
elif response == gtk.RESPONSE_CANCEL:
pass
dialog.destroy()
return filename
def on_namespace_prefix_entry_changed(self, widget, data=None):
entry = self.ui_obj_dict['namespace_prefix_entry']
checkbutton = self.ui_obj_dict['empty_namespace_prefix_checkbutton']
checkbutton.set_active(False)
return True
def on_empty_namespace_prefix_checkbutton_toggled(self, widget, data=None):
entry = self.ui_obj_dict['namespace_prefix_entry']
checkbutton = self.ui_obj_dict['empty_namespace_prefix_checkbutton']
if widget.get_active():
entry.set_text('')
return True
def on_output_superclass_entry_changed(self, widget, data=None):
entry = self.ui_obj_dict['superclass_module_entry']
checkbutton = self.auto_super_checkbutton
if checkbutton.get_active():
path = widget.get_text()
if path:
stem = os.path.splitext(os.path.split(path)[1])[0]
if stem:
entry.set_text(stem)
return True
def on_auto_super_checkbutton_toggled(self, widget, data=None):
entry = self.ui_obj_dict['superclass_module_entry']
superclass_entry = self.ui_obj_dict['output_superclass_entry']
#checkbutton = self.auto_super_checkbutton
checkbutton = widget
if widget.get_active():
path = superclass_entry.get_text()
if path:
stem = os.path.splitext(os.path.split(path)[1])[0]
if stem:
entry.set_text(stem)
return True
def on_ok_button_activate(self, widget, data=None):
#print '(GeneratedsGui) widget:', widget
response = self.content_dialog.on_ok_button_activate(
self.content_dialog, data)
return response
name_pat1 = re.compile(r'^(.*)_clear_button')
# This method keys off the correspondence between the
# name of the button and the name of the related entry,
# for example, xxx_yyy_entry : xxx_yyy_clear_button.
def on_clear_button_clicked(self, widget, data=None):
name = widget.get_name()
mo = GeneratedsGui.name_pat1.search(name)
stem = mo.group(1)
name1 = '%s_entry' % (stem, )
ui_obj = self.ui_obj_dict[name1]
ui_obj.set_text('')
# Run main application window
def main(self):
self.window.show()
gtk.main()
class ContentDialog(gtk.Dialog):
def __init__(self):
global Builder
self.content_dialog = Builder.get_object('content_dialog')
self.content_textview = Builder.get_object('content_textview')
buf = self.content_textview.get_buffer().set_text('')
def show(self, content):
#Builder.connect_signals(self)
self.content_textview.get_buffer().set_text(content)
response = self.content_dialog.run()
self.content_dialog.hide()
def on_ok_button_activate(self, widget, data=None):
#print '(content_dialog) widget:', widget
return False
#
# Functions for internal use
#
def capture_options(options):
config_parser = ConfigParser()
config_parser.read([
os.path.expanduser('~/.generateds_gui.ini'),
'./generateds_gui.ini',
])
section = 'general'
names = ('exec-path', 'exec_path')
capture_1_option(options, config_parser, section, names)
## names = ('impl-schema', 'impl_schema')
## capture_1_option(options, config_parser, section, names)
names = ('impl-gui', 'impl_gui')
capture_1_option(options, config_parser, section, names)
names = ('session', 'session')
capture_1_option(options, config_parser, section, names)
# Set some defaults.
if options.exec_path is None:
options.exec_path = 'generateDS.py'
def capture_1_option(options, config_parser, section, names):
if (getattr(options, names[1]) is None and
config_parser.has_option(section, names[0])
):
setattr(options, names[1], config_parser.get(section, names[0]))
def capture_ui_names():
items = generateds_gui_session.sessionType.member_data_items_
for item in items:
ui_item = UIItemSpec(item.get_name())
if item.get_name() == 'member_specs':
ui_item.set_ui_type('combobox')
ui_item.set_access_action('active')
elif item.get_data_type() == 'xs:string':
ui_item.set_ui_type('entry')
ui_item.set_access_action('text')
elif item.get_data_type() == 'xs:boolean':
ui_item.set_ui_type('checkbutton')
ui_item.set_access_action('active')
ParamNameList.append(ui_item)
## print 'ParamNameList:'
## for item in ParamNameList:
## print ' %s %s' % (item.get_name(), item.get_ui_type(), )
USAGE_TEXT = """
python %prog [options] <somefile.xxx>
example:
python %prog somefile.xxx"""
def usage(parser):
parser.print_help()
sys.exit(1)
def main():
parser = OptionParser(USAGE_TEXT)
parser.add_option("--exec-path",
type="string", action="store",
dest="exec_path",
#default="generateDS.py",
help='path to executable generated in command line.'
' Example: "python /path/to/generateDS.py".'
' Default: "./generateDS.py".'
' Use Tools/Generate CL (Ctrl-T) to see it.'
)
parser.add_option("--impl-gui",
type="string", action="store",
dest="impl_gui",
help="name of glade file that defines the GUI if not embedded."
)
parser.add_option("-s", "--session",
type="string", action="store",
dest="session",
help="name of a session file to be loaded."
)
(options, args) = parser.parse_args()
capture_options(options)
capture_ui_names()
if len(args) > 0:
usage(parser)
editor = GeneratedsGui(options)
editor.main()
# Do not change the next 3 lines.
## UI_SPECIFICATION ##
Ui_spec = """
<?xml version="1.0"?>
<interface>
<requires lib="gtk+" version="2.16"/>
<!-- interface-naming-policy project-wide -->
<object class="GtkWindow" id="window1">
<accel-groups>
<group name="accelgroup1"/>
</accel-groups>
<signal name="delete_event" handler="on_window_delete_event"/>
<child>
<object class="GtkVBox" id="vbox1">
<property name="visible">True</property>
<property name="orientation">vertical</property>
<child>
<object class="GtkMenuBar" id="menubar1">
<property name="visible">True</property>
<child>
<object class="GtkMenuItem" id="menuitem1">
<property name="visible">True</property>
<property name="label" translatable="yes">_File</property>
<property name="use_underline">True</property>
<child type="submenu">
<object class="GtkMenu" id="menu1">
<property name="visible">True</property>
<child>
<object class="GtkImageMenuItem" id="clear_menuitem">
<property name="label">Clear</property>
<property name="visible">True</property>
<property name="image">image4</property>
<property name="use_stock">False</property>
<property name="accel_group">accelgroup1</property>
<accelerator key="n" signal="activate" modifiers="GDK_CONTROL_MASK"/>
<signal name="activate" handler="on_clear_menuitem_activate"/>
</object>
</child>
<child>
<object class="GtkImageMenuItem" id="open_session_menuitem">
<property name="label">_Load session</property>
<property name="visible">True</property>
<property name="tooltip_text" translatable="yes">Load a previous saved session.</property>
<property name="use_underline">True</property>
<property name="image">image3</property>
<property name="use_stock">False</property>
<property name="accel_group">accelgroup1</property>
<accelerator key="o" signal="activate" modifiers="GDK_CONTROL_MASK"/>
<signal name="activate" handler="on_open_session_menuitem_activate"/>
</object>
</child>
<child>
<object class="GtkImageMenuItem" id="save_session_menuitem">
<property name="label">_Save session</property>
<property name="visible">True</property>
<property name="tooltip_text" translatable="yes">Save the current session.</property>
<property name="use_underline">True</property>
<property name="image">image1</property>
<property name="use_stock">False</property>
<property name="accel_group">accelgroup1</property>
<accelerator key="s" signal="activate" modifiers="GDK_CONTROL_MASK"/>
<signal name="activate" handler="on_save_session_menuitem_activate"/>
</object>
</child>
<child>
<object class="GtkImageMenuItem" id="save_session_as_menuitem">
<property name="label">Save session as ...</property>
<property name="visible">True</property>
<property name="tooltip_text" translatable="yes">Save the current session in
file chosen by the user.</property>
<property name="image">image2</property>
<property name="use_stock">False</property>
<property name="accel_group">accelgroup1</property>
<signal name="activate" handler="on_save_session_as_menuitem_activate"/>
</object>
</child>
<child>
<object class="GtkSeparatorMenuItem" id="menuitem5">
<property name="visible">True</property>
</object>
</child>
<child>
<object class="GtkImageMenuItem" id="imagemenuitem5">
<property name="label">gtk-quit</property>
<property name="visible">True</property>
<property name="tooltip_text" translatable="yes">Exit from the application.</property>
<property name="use_underline">True</property>
<property name="use_stock">True</property>
<property name="accel_group">accelgroup1</property>
<signal name="activate" handler="on_quit_menu_item_activate"/>
</object>
</child>
</object>
</child>
</object>
</child>
<child>
<object class="GtkMenuItem" id="menuitem2">
<property name="visible">True</property>
<property name="label" translatable="yes">_Tools</property>
<property name="use_underline">True</property>
<child type="submenu">
<object class="GtkMenu" id="menu2">
<property name="visible">True</property>
<child>
<object class="GtkMenuItem" id="capture_cl_menuitem">
<property name="visible">True</property>
<property name="tooltip_text" translatable="yes">Capture the command line that would be used
to generate the bindings modules.</property>
<property name="label" translatable="yes">_Capture CL</property>
<property name="use_underline">True</property>
<accelerator key="t" signal="activate" modifiers="GDK_CONTROL_MASK"/>
<signal name="activate" handler="on_capture_cl_menuitem_activate"/>
</object>
</child>
<child>
<object class="GtkMenuItem" id="generate_menuitem">
<property name="visible">True</property>
<property name="tooltip_text" translatable="yes">Generate the bindings modules.</property>
<property name="label" translatable="yes">_Generate</property>
<property name="use_underline">True</property>
<accelerator key="g" signal="activate" modifiers="GDK_CONTROL_MASK"/>
<signal name="activate" handler="on_generate_menuitem_activate"/>
</object>
</child>
</object>
</child>
</object>
</child>
<child>
<object class="GtkMenuItem" id="menuitem4">
<property name="visible">True</property>
<property name="label" translatable="yes">_Help</property>
<property name="use_underline">True</property>
<child type="submenu">
<object class="GtkMenu" id="menu3">
<property name="visible">True</property>
<child>
<object class="GtkImageMenuItem" id="imagemenuitem10">
<property name="label">gtk-about</property>
<property name="visible">True</property>
<property name="use_underline">True</property>
<property name="use_stock">True</property>
<property name="accel_group">accelgroup1</property>
<signal name="activate" handler="on_about_menu_item_activate"/>
</object>
</child>
</object>
</child>
</object>
</child>
</object>
| |
"thePed": """: The ped whose vehicle seat youre looking up. """
},
result='* returns an integer containing the number of the seat that the ped is currently in:\n** 0: front-left\n** 1: front-right\n** 2: rear-left\n** 3: rear-right\nreturns false if the ped is on foot, or the ped doesnt exist.' ,
),
url='getPedOccupiedVehicleSeat',
),
field=FunctionOOPField(
name='vehicleSeat',
types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
),
is_static=False,
)
],
client=[
FunctionOOP(
description="""Prior to 1.5, the variable was .occupiedVehicleSeat""",
base_function_name="getPedOccupiedVehicleSeat",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getOccupiedVehicleSeat',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets the seat that a specific ped is sitting in in a vehicle.' ,
arguments={
"thePed": """: The ped whose vehicle seat youre looking up. """
},
result='* returns an integer containing the number of the seat that the ped is currently in:\n** 0: front-left\n** 1: front-right\n** 2: rear-left\n** 3: rear-right\nreturns false if the ped is on foot, or the ped doesnt exist.' ,
),
url='getPedOccupiedVehicleSeat',
),
field=FunctionOOPField(
name='vehicleSeat',
types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="getPedOxygenLevel",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getOxygenLevel',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the current oxygen level of the specified ped.' ,
arguments={
"thePed": """The ped whose oxygen level you want to check """
},
result='a float with the oxygen level, false if an invalid ped was given.' ,
),
url='getPedOxygenLevel',
),
field=FunctionOOPField(
name='oxygenLevel',
types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
FunctionOOP(
description=None,
base_function_name="getPedStat",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getStat',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='stat',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the value of the specified statistic of a specific ped.' ,
arguments={
"thePed": """: The ped whose stat you want to retrieve. """,
"stat": """: A whole number determining the stat ID. """
},
result='returns the value of the requested statistic.' ,
),
url='getPedStat',
),
field=None,
is_static=False,
)
],
client=[
FunctionOOP(
description=None,
base_function_name="getPedStat",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getStat',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='stat',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the value of the specified statistic of a specific ped.' ,
arguments={
"thePed": """: The ped whose stat you want to retrieve. """,
"stat": """: A whole number determining the stat ID. """
},
result='returns the value of the requested statistic.' ,
),
url='getPedStat',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
FunctionOOP(
description=None,
base_function_name="getPedTarget",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getTarget',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['element'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to get the element a ped is currently targeting.' ,
arguments={
"thePed": """The ped whose target you want to retrieve. """
},
result='returns the element thats being targeted, or false if there isnt one.\nthis is only effective on physical gta elements, namely:\n* players\n* peds\n* vehicles\n* objects' ,
),
url='getPedTarget',
),
field=None,
is_static=False,
)
],
client=[
FunctionOOP(
description=None,
base_function_name="getPedTarget",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getTarget',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['element'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to get the element a ped is currently targeting.' ,
arguments={
"thePed": """The ped whose target you want to retrieve. """
},
result='returns the element thats being targeted, or false if there isnt one.\nthis is only effective on physical gta elements, namely:\n* players\n* peds\n* vehicles\n* objects' ,
),
url='getPedTarget',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="getPedTargetEnd",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getTargetEnd',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
),
FunctionType(
names=['float'],
is_optional=False,
),
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='targetingPed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows retrieval of the position where a peds target range ends, when he is aiming with a weapon.' ,
arguments={
"targetingPed": """the ped who is targeting whose target end you wish to retrieve """
},
result='returns three floats, x,y,z, representing the position where the peds target ends according to his range, or false if it was unsuccessful.' ,
),
url='getPedTargetEnd',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="getPedTask",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getTask',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
),
FunctionType(
names=['string'],
is_optional=False,
),
FunctionType(
names=['string'],
is_optional=False,
),
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='priority',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='taskType',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to get any simple or complex task of a certain type for a ped.\nIt can provide feedback on all tasks relating to a ped. For example, while jumping, getPedSimplestTask will return TASK_SIMPLE_IN_AIR. If you wanted to know specifically if the player has jumped, you would use this function. If you did you will discover that while jumping Primary task 3 is TASK_COMPLEX_JUMP.' ,
arguments={
"thePed": """: The ped whose task you want to retrieve. """,
"priority": """: A string determining which set of tasks you want to retrieve it from. This must be either primary or secondary. """,
"taskType": """: An integer value representing the task type (or slot) you want to get the task from. Types can be: """,
"PRIMARY TASKS": """ """,
"0": """TASK_SECONDARY_ATTACK """,
"1": """TASK_SECONDARY_DUCK """,
"2": """TASK_SECONDARY_SAY """,
"3": """TASK_SECONDARY_FACIAL_COMPLEX """,
"4": """TASK_SECONDARY_PARTIAL_ANIM """,
"SECONDARY TASKS": """ """,
"5": """TASK_SECONDARY_IK """
},
result='returns the name of the most complex task. see list of player tasks for valid strings. returns false if invalid arguments are specified or if there is no task of the type specified.\n<br>\nreturns between 1 and 4 strings. the first string contains the name of the most complex task, with simpler sub-tasks being named in the following strings. see list of player tasks for valid strings. returns false if invalid arguments are specified or if there is no task of the type specified.' ,
),
url='getPedTask',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
FunctionOOP(
description=None,
base_function_name="getPedTotalAmmo",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getTotalAmmo',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='weaponSlot',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='current',
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns an integer that contains the total ammo in a specified peds weapon. See weapon|Weapon Info' ,
arguments={
"thePed": """: The ped whose ammo you want to check. """,
"weaponSlot": """: an integer representing the weapon slot (set to the peds current slot if not given) """
},
result='returns an int containing the total amount of ammo for the specified peds weapon, or 0 if the ped specified is invalid.' ,
),
url='getPedTotalAmmo',
),
field=None,
is_static=False,
)
],
client=[
FunctionOOP(
description=None,
base_function_name="getPedTotalAmmo",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getTotalAmmo',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
| |
from dataclasses import dataclass
from enum import Enum
from typing import Callable, Iterator, Sequence, Text, Union
from collections import deque
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction import DictVectorizer
@dataclass()
class Dep:
"""A word in a dependency tree.
The fields are defined by https://universaldependencies.org/format.html.
"""
id: Text
form: Union[Text, None]
lemma: Union[Text, None]
upos: Text
xpos: Union[Text, None]
feats: Sequence[Text]
head: Union[Text, None]
deprel: Union[Text, None]
deps: Sequence[Text]
misc: Union[Text, None]
def read_conllu(path: Text) -> Iterator[Sequence[Dep]]:
"""Reads a CoNLL-U format file into sequences of Dep objects.
The CoNLL-U format is described in detail here:
https://universaldependencies.org/format.html
A few key highlights:
* Word lines contain 10 fields separated by tab characters.
* Blank lines mark sentence boundaries.
* Comment lines start with hash (#).
Each word line will be converted into a Dep object, and the words in a
sentence will be collected into a sequence (e.g., list).
:return: An iterator over sentences, where each sentence is a sequence of
words, and each word is represented by a Dep object.
"""
sentence = []
with open(path) as infile:
for line in infile:
if line[0] == "#":
continue
line = line.strip()
if not line:
yield sentence
sentence = []
continue
line = line.split("\t")
for i, item in enumerate(line):
if i == 5 or i == 8:
if item == "_":
line[i] = []
else:
line[i] = item.split("|")
elif item == "_":
line[i] = None
word = Dep(*line)
sentence.append(word)
class Action(Enum):
"""An action in an "arc standard" transition-based parser."""
SHIFT = 1
LEFT_ARC = 2
RIGHT_ARC = 3
def parse(deps: Sequence[Dep],
get_action: Callable[[Sequence[Dep], Sequence[Dep]], Action]) -> None:
"""Parse the sentence based on "arc standard" transitions.
Following the "arc standard" approach to transition-based parsing, this
method creates a stack and a queue, where the input Deps start out on the
queue, are moved to the stack by SHIFT actions, and are combined in
head-dependent relations by LEFT_ARC and RIGHT_ARC actions.
This method does not determine which actions to take; those are provided by
the `get_action` argument to the method. That method will be called whenever
the parser needs a new action, and then the parser will perform whatever
action is returned. If `get_action` returns an invalid action (e.g., a
SHIFT when the queue is empty), an arbitrary valid action will be taken
instead.
This method does not return anything; it modifies the `.head` field of the
Dep objects that were passed as input. Each Dep object's `.head` field is
assigned the value of its head's `.id` field, or "0" if the Dep object is
the root.
:param deps: The sentence, a sequence of Dep objects, each representing one
of the words in the sentence.
:param get_action: a function or other callable that takes the parser's
current stack and queue as input, and returns an "arc standard" action.
:return: Nothing; the `.head` fields of the input Dep objects are modified.
"""
queue = deque(deps)
stack = list()
for word in list(queue):
if "." in word.id:
queue.remove(word)
while queue or len(stack) > 1:
action = get_action(stack, queue)
if (action.value == 1 and queue) or len(stack) < 2 :
dep = queue.popleft()
stack.append(dep)
elif action.value == 2:
stack[-2].head = stack[-1].id
stack.pop(-2)
else:
stack[-1].head = stack[-2].id
stack.pop()
for dep in deps:
if dep.head == None:
dep.head = "0"
class Oracle:
def __init__(self, deps: Sequence[Dep]):
"""Initializes an Oracle to be used for the given sentence.
Minimally, it initializes a member variable `actions`, a list that
will be updated every time `__call__` is called and a new action is
generated.
Note: a new Oracle object should be created for each sentence; an
Oracle object should not be re-used for multiple sentences.
:param deps: The sentence, a sequence of Dep objects, each representing
one of the words in the sentence.
"""
self.actions = []
self.features = []
def __call__(self, stack: Sequence[Dep], queue: Sequence[Dep]) -> Action:
"""Returns the Oracle action for the given "arc standard" parser state.
The oracle for an "arc standard" transition-based parser inspects the
parser state and the reference parse (represented by the `.head` fields
of the Dep objects) and:
* Chooses LEFT_ARC if it produces a correct head-dependent relation
given the reference parse and the current configuration.
* Otherwise, chooses RIGHT_ARC if it produces a correct head-dependent
relation given the reference parse and all of the dependents of the
word at the top of the stack have already been assigned.
* Otherwise, chooses SHIFT.
The chosen action should be both:
* Added to the `actions` member variable
* Returned as the result of this method
Note: this method should only be called on parser state based on the Dep
objects that were passed to __init__; it should not be used for any
other Dep objects.
:param stack: The stack of the "arc standard" transition-based parser.
:param queue: The queue of the "arc standard" transition-based parser.
:return: The action that should be taken given the reference parse
(the `.head` fields of the Dep objects).
"""
#Feature Extraction
word_range = 2
re_stack = reversed(stack)
stack_pos = ["sPOS{}=".format(i) + dep.upos for i, dep in enumerate(re_stack) if i < word_range]
queue_pos = ["qPOS{}=".format(i) + dep.upos for i, dep in enumerate(queue) if i < word_range]
stack_form = ["sform{}=".format(i) + dep.form for i, dep in enumerate(re_stack) if i < word_range and dep.form]
queue_form = ["qform{}=".format(i) + dep.form for i, dep in enumerate(queue) if i < word_range and dep.form]
feature_list = stack_pos + queue_pos + stack_form + queue_form
dict_f = {feature:1 for feature in feature_list}
self.features.append(dict_f)
if len(stack) < 2 and queue:
self.actions.append(Action.SHIFT)
return Action.SHIFT
previous_head = [x.head for x in stack[:-1]]
rest_head = [y.head for y in queue]
if stack[-1].id in previous_head:
self.actions.append(Action.LEFT_ARC)
return Action.LEFT_ARC
elif (int(stack[-1].head) < int(stack[-1].id)) and (stack[-1].id not in rest_head):
self.actions.append(Action.RIGHT_ARC)
return Action.RIGHT_ARC
else:
self.actions.append(Action.SHIFT)
return Action.SHIFT
class Classifier:
def __init__(self, parses: Iterator[Sequence[Dep]]):
"""Trains a classifier on the given parses.
There are no restrictions on what kind of classifier may be trained,
but a typical approach would be to
1. Define features based on the stack and queue of an "arc standard"
transition-based parser (e.g., part-of-speech tags of the top words
in the stack and queue).
2. Apply `Oracle` and `parse` to each parse in the input to generate
training examples of parser states and oracle actions. It may be
helpful to modify `Oracle` to call the feature extraction function
defined in 1, and store the features alongside the actions list that
`Oracle` is already creating.
3. Train a machine learning model (e.g., logistic regression) on the
resulting features and labels (actions).
:param parses: An iterator over sentences, where each sentence is a
sequence of words, and each word is represented by a Dep object.
"""
self.lg = LogisticRegression(multi_class = "multinomial", solver = "lbfgs", penalty = "l2", C = 0.1, max_iter = 300)
self.dv = DictVectorizer()
self.le = LabelEncoder()
whole_feature = []
whole_label = []
for deps in parses:
oracle = Oracle(deps)
parse(deps, oracle)
whole_feature += oracle.features
whole_label += [action.value for action in oracle.actions]
self.dv.fit(whole_feature)
self.le.fit(whole_label)
feature_matrix = self.dv.transform(whole_feature)
label = self.le.transform(whole_label)
self.lg.fit(feature_matrix, label)
def __call__(self, stack: Sequence[Dep], queue: Sequence[Dep]) -> Action:
"""Predicts an action for the given "arc standard" parser state.
There are no restrictions on how this prediction may be made, but a
typical approach would be to convert the parser state into features,
and then use the machine learning model (trained in `__init__`) to make
the prediction.
:param stack: The stack of the "arc standard" transition-based parser.
:param queue: The queue of the "arc standard" transition-based parser.
:return: The action that should be taken.
"""
word_range = 2
re_stack = reversed(stack)
stack_pos = ["sPOS{}=".format(i) + dep.upos for i, dep in enumerate(re_stack) if i < word_range]
queue_pos = ["qPOS{}=".format(i) + dep.upos for i, dep in enumerate(queue) if i < word_range]
stack_form = ["sform{}=".format(i) + dep.form for i, dep in enumerate(re_stack) if i < | |
<reponame>SomeoneSerge/trieste
# Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Callable, Mapping, Union
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from gpflow.utilities import to_default_float
from gpflow.utilities.ops import leading_transpose
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, random_seed
from tests.util.models.gpflow.models import QuadraticMeanAndRBFKernel, gpr_model
from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10
from trieste.acquisition import (
ExpectedImprovement,
MinValueEntropySearch,
PenalizationFunction,
UpdatablePenalizationFunction,
)
from trieste.acquisition.function import NegativePredictiveMean, PredictiveVariance
from trieste.acquisition.function.greedy_batch import (
Fantasizer,
FantasizerModelOrStack,
FantasizerModelStack,
LocalPenalization,
_generate_fantasized_model,
hard_local_penalizer,
soft_local_penalizer,
)
from trieste.data import Dataset
from trieste.models import ProbabilisticModel
from trieste.models.gpflow import GaussianProcessRegression
from trieste.space import Box
from trieste.types import TensorType
def test_locally_penalized_expected_improvement_builder_raises_for_empty_data() -> None:
data = Dataset(tf.zeros([0, 1]), tf.ones([0, 1]))
space = Box([0, 0], [1, 1])
with pytest.raises(tf.errors.InvalidArgumentError):
LocalPenalization(search_space=space).prepare_acquisition_function(
QuadraticMeanAndRBFKernel(),
dataset=data,
)
with pytest.raises(tf.errors.InvalidArgumentError):
LocalPenalization(search_space=space).prepare_acquisition_function(
QuadraticMeanAndRBFKernel(),
)
def test_locally_penalized_expected_improvement_builder_raises_for_invalid_num_samples() -> None:
search_space = Box([0, 0], [1, 1])
with pytest.raises(tf.errors.InvalidArgumentError):
LocalPenalization(search_space, num_samples=-5)
@pytest.mark.parametrize("pending_points", [tf.constant([0.0]), tf.constant([[[0.0], [1.0]]])])
def test_locally_penalized_expected_improvement_builder_raises_for_invalid_pending_points_shape(
pending_points: TensorType,
) -> None:
data = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))
space = Box([0, 0], [1, 1])
builder = LocalPenalization(search_space=space)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
builder.prepare_acquisition_function(QuadraticMeanAndRBFKernel(), data, pending_points)
@random_seed
@pytest.mark.parametrize(
"base_builder",
[
ExpectedImprovement(),
MinValueEntropySearch(Box([0, 0], [1, 1]), grid_size=10000, num_samples=10),
],
)
def test_locally_penalized_acquisitions_match_base_acquisition(
base_builder: ExpectedImprovement | MinValueEntropySearch[ProbabilisticModel],
) -> None:
data = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))
search_space = Box([0, 0], [1, 1])
model = QuadraticMeanAndRBFKernel()
lp_acq_builder = LocalPenalization(search_space, base_acquisition_function_builder=base_builder)
lp_acq = lp_acq_builder.prepare_acquisition_function(model, data, None)
base_acq = base_builder.prepare_acquisition_function(model, dataset=data)
x_range = tf.linspace(0.0, 1.0, 11)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
lp_acq_values = lp_acq(xs[..., None, :])
base_acq_values = base_acq(xs[..., None, :])
if isinstance(base_builder, ExpectedImprovement):
npt.assert_array_equal(lp_acq_values, base_acq_values)
else: # check sampling-based acquisition functions are close
npt.assert_allclose(lp_acq_values, base_acq_values, atol=0.001)
@random_seed
@pytest.mark.parametrize("penalizer", [soft_local_penalizer, hard_local_penalizer])
@pytest.mark.parametrize(
"base_builder",
[ExpectedImprovement(), MinValueEntropySearch(Box([0, 0], [1, 1]), grid_size=5000)],
)
def test_locally_penalized_acquisitions_combine_base_and_penalization_correctly(
penalizer: Callable[..., Union[PenalizationFunction, UpdatablePenalizationFunction]],
base_builder: ExpectedImprovement | MinValueEntropySearch[ProbabilisticModel],
) -> None:
data = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))
search_space = Box([0, 0], [1, 1])
model = QuadraticMeanAndRBFKernel()
pending_points = tf.zeros([2, 2], dtype=tf.float64)
acq_builder = LocalPenalization(
search_space, penalizer=penalizer, base_acquisition_function_builder=base_builder
)
lp_acq = acq_builder.prepare_acquisition_function(model, data, None) # initialize
lp_acq = acq_builder.update_acquisition_function(lp_acq, model, data, pending_points[:1], False)
up_lp_acq = acq_builder.update_acquisition_function(lp_acq, model, data, pending_points, False)
assert up_lp_acq == lp_acq # in-place updates
base_acq = base_builder.prepare_acquisition_function(model, dataset=data)
best = acq_builder._eta
lipschitz_constant = acq_builder._lipschitz_constant
penalizer = penalizer(model, pending_points, lipschitz_constant, best)
x_range = tf.linspace(0.0, 1.0, 11)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
lp_acq_values = lp_acq(xs[..., None, :])
base_acq_values = base_acq(xs[..., None, :])
penal_values = penalizer(xs[..., None, :])
penalized_base_acq = tf.math.exp(tf.math.log(base_acq_values) + tf.math.log(penal_values))
if isinstance(base_builder, ExpectedImprovement):
npt.assert_array_equal(lp_acq_values, penalized_base_acq)
else: # check sampling-based acquisition functions are close
npt.assert_allclose(lp_acq_values, penalized_base_acq, atol=0.001)
@pytest.mark.parametrize("penalizer", [soft_local_penalizer, hard_local_penalizer])
@pytest.mark.parametrize("at", [tf.constant([[0.0], [1.0]]), tf.constant([[[0.0], [1.0]]])])
def test_lipschitz_penalizers_raises_for_invalid_batch_size(
at: TensorType,
penalizer: Callable[..., PenalizationFunction],
) -> None:
pending_points = tf.zeros([1, 2], dtype=tf.float64)
best = tf.constant([0], dtype=tf.float64)
lipschitz_constant = tf.constant([1], dtype=tf.float64)
lp = penalizer(QuadraticMeanAndRBFKernel(), pending_points, lipschitz_constant, best)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
lp(at)
@pytest.mark.parametrize("penalizer", [soft_local_penalizer, hard_local_penalizer])
@pytest.mark.parametrize("pending_points", [tf.constant([0.0]), tf.constant([[[0.0], [1.0]]])])
def test_lipschitz_penalizers_raises_for_invalid_pending_points_shape(
pending_points: TensorType,
penalizer: Callable[..., PenalizationFunction],
) -> None:
best = tf.constant([0], dtype=tf.float64)
lipschitz_constant = tf.constant([1], dtype=tf.float64)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
soft_local_penalizer(QuadraticMeanAndRBFKernel(), pending_points, lipschitz_constant, best)
def test_fantasized_expected_improvement_builder_raises_for_invalid_fantasize_method() -> None:
with pytest.raises(tf.errors.InvalidArgumentError):
Fantasizer(ExpectedImprovement().using("OBJECTIVE"), "notKB")
def test_fantasized_expected_improvement_builder_raises_for_invalid_model() -> None:
data = {
"OBJECTIVE": Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 1], dtype=tf.float64))
}
models = {"OBJECTIVE": QuadraticMeanAndRBFKernel()}
pending_points = tf.zeros([3, 2], dtype=tf.float64)
builder = Fantasizer()
with pytest.raises(NotImplementedError):
builder.prepare_acquisition_function(models, data, pending_points) # type: ignore
def test_fantasized_expected_improvement_builder_raises_for_invalid_observation_shape() -> None:
x = tf.zeros([3, 2], dtype=tf.float64)
y1 = tf.ones([3, 1], dtype=tf.float64)
y2 = tf.ones([3, 2], dtype=tf.float64)
data = {"OBJECTIVE": Dataset(x, y1)}
models = {"OBJECTIVE": GaussianProcessRegression(gpr_model(x, y2))}
pending_points = tf.zeros([3, 2], dtype=tf.float64)
builder = Fantasizer()
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
builder.prepare_acquisition_function(models, data, pending_points)
@pytest.mark.parametrize("pending_points", [tf.constant([0.0]), tf.constant([[[0.0], [1.0]]])])
def test_fantasized_expected_improvement_builder_raises_for_invalid_pending_points_shape(
pending_points: TensorType,
) -> None:
x = tf.zeros([3, 2], dtype=tf.float64)
y = tf.ones([3, 1], dtype=tf.float64)
data = {"OBJECTIVE": Dataset(x, y)}
models = {"OBJECTIVE": GaussianProcessRegression(gpr_model(x, y))}
builder = Fantasizer()
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
builder.prepare_acquisition_function(models, data, pending_points)
@pytest.mark.parametrize("model_type", ["gpr", "stack"])
def test_fantasize_with_kriging_believer_does_not_change_negative_predictive_mean(
model_type: str,
) -> None:
x = to_default_float(tf.constant(np.arange(1, 6).reshape(-1, 1) / 5.0))
y = fnc_2sin_x_over_3(x)
x_test = to_default_float(tf.constant(np.arange(1, 13).reshape(-1, 1) / 12.0))[..., None]
pending_points = to_default_float(tf.constant([0.51, 0.81])[:, None])
data = {"OBJECTIVE": Dataset(x, y)}
models: Mapping[str, FantasizerModelOrStack]
if model_type == "stack":
models = {
"OBJECTIVE": FantasizerModelStack((GaussianProcessRegression(gpr_model(x, y)), 1))
}
else:
models = {"OBJECTIVE": GaussianProcessRegression(gpr_model(x, y))}
builder = Fantasizer(NegativePredictiveMean())
acq0 = builder.prepare_acquisition_function(models, data)
acq1 = builder.prepare_acquisition_function(models, data, pending_points)
acq_val0 = acq0(x_test)
acq_val1 = acq1(x_test)
tf.assert_equal(acq_val1, acq_val0)
@pytest.mark.parametrize("model_type", ["gpr", "stack"])
@pytest.mark.parametrize("fantasize_method", ["KB", "sample"])
def test_fantasize_reduces_predictive_variance(model_type: str, fantasize_method: str) -> None:
x = to_default_float(tf.constant(np.arange(1, 6).reshape(-1, 1) / 5.0))
y = fnc_2sin_x_over_3(x)
x_test = to_default_float(tf.constant(np.arange(1, 13).reshape(-1, 1) / 12.0))[..., None]
pending_points = to_default_float(tf.constant([0.51, 0.81])[:, None])
data = {"OBJECTIVE": Dataset(x, y)}
models: Mapping[str, FantasizerModelOrStack]
if model_type == "stack":
models = {
"OBJECTIVE": FantasizerModelStack((GaussianProcessRegression(gpr_model(x, y)), 1))
}
else:
models = {"OBJECTIVE": GaussianProcessRegression(gpr_model(x, y))}
builder = Fantasizer(PredictiveVariance(), fantasize_method=fantasize_method)
acq0 = builder.prepare_acquisition_function(models, data)
acq1 = builder.update_acquisition_function(acq0, models, data, pending_points[:1])
assert acq0._get_tracing_count() == 0 # type: ignore
assert acq1._get_tracing_count() == 0 # type: ignore
acq_val0 = acq0(x_test)
acq_val1 = acq1(x_test)
tf.assert_less(acq_val1, acq_val0)
# check we avoid retracing, both for the fantasized functions...
acq1_up = builder.update_acquisition_function(acq1, models, data, pending_points)
assert acq1_up == acq1 # in-place updates
acq1_up(x_test)
assert acq1_up._get_tracing_count() == 1 # type: ignore
# ...and the base functions
acq0_up = builder.update_acquisition_function(acq1, models, data)
assert acq0_up == acq0 # in-place updates
acq0_up(x_test)
assert acq0_up._get_tracing_count() == 1 # type: ignore
@pytest.mark.parametrize("model_type", ["gpr", "stack"])
def test_fantasize_allows_query_points_with_leading_dimensions(model_type: str) -> None:
x = to_default_float(tf.constant(np.arange(1, 24).reshape(-1, 1) / 8.0)) # shape: [23, 1]
y = fnc_2sin_x_over_3(x)
model5 = GaussianProcessRegression(gpr_model(x[:5, :], y[:5, :]))
additional_data = Dataset(tf.reshape(x[5:, :], [3, 6, -1]), tf.reshape(y[5:, :], [3, 6, -1]))
query_points = to_default_float(tf.constant(np.arange(1, 21).reshape(-1, 1) / 20.0))[..., None]
query_points = tf.reshape(query_points, [4, 5, 1])
if model_type == "stack":
fanta_model5 = _generate_fantasized_model(
FantasizerModelStack((model5, 1)), additional_data
)
else:
fanta_model5 = _generate_fantasized_model(model5, additional_data)
num_samples = 100000
samples_fm5 = fanta_model5.sample(query_points, num_samples)
pred_f_mean_fm5, pred_f_var_fm5 = fanta_model5.predict(query_points)
pred_y_mean_fm5, pred_y_var_fm5 = fanta_model5.predict_y(query_points)
pred_j_mean_fm5, pred_j_cov_fm5 = fanta_model5.predict_joint(query_points)
tf.assert_equal(samples_fm5.shape, [4, 3, num_samples, 5, 1])
tf.assert_equal(pred_f_mean_fm5.shape, [4, 3, 5, 1])
tf.assert_equal(pred_f_var_fm5.shape, [4, 3, 5, 1])
tf.assert_equal(pred_j_cov_fm5.shape, [4, 3, 1, 5, 5])
np.testing.assert_allclose(pred_f_mean_fm5, pred_j_mean_fm5, atol=1e-5)
np.testing.assert_allclose(pred_f_mean_fm5, pred_y_mean_fm5, atol=1e-5)
samples_fm5_mean = tf.reduce_mean(samples_fm5, axis=-3)
samples_fm5_cov = tfp.stats.covariance(samples_fm5[..., 0], sample_axis=-2)
for j in range(3):
samples_m5 = model5.conditional_predict_f_sample(
query_points[j], additional_data, num_samples
)
pred_f_mean_m5, pred_f_var_m5 = model5.conditional_predict_f(
query_points[j], additional_data
)
pred_j_mean_m5, pred_j_cov_m5 = model5.conditional_predict_joint(
query_points[j], additional_data
)
pred_y_mean_m5, pred_y_var_m5 = model5.conditional_predict_y(
query_points[j], additional_data
)
sample_m5_mean = tf.reduce_mean(samples_m5, axis=1)
sample_m5_cov = tfp.stats.covariance(samples_m5[..., 0], sample_axis=1)
np.testing.assert_allclose(sample_m5_mean, samples_fm5_mean[j], atol=1e-2, rtol=1e-2)
np.testing.assert_allclose(sample_m5_cov, samples_fm5_cov[j], atol=1e-2, rtol=1e-2)
np.testing.assert_allclose(pred_f_mean_m5, pred_f_mean_fm5[j], atol=1e-5)
np.testing.assert_allclose(pred_y_mean_m5, pred_y_mean_fm5[j], atol=1e-5)
np.testing.assert_allclose(pred_j_mean_m5, pred_j_mean_fm5[j], atol=1e-5)
np.testing.assert_allclose(pred_f_var_m5, pred_f_var_fm5[j], atol=1e-5)
np.testing.assert_allclose(pred_y_var_m5, pred_y_var_fm5[j], atol=1e-5)
np.testing.assert_allclose(pred_j_cov_m5, pred_j_cov_fm5[j], atol=1e-5)
def test_fantasized_stack_is_the_same_as_individually_fantasized() -> None:
x = to_default_float(tf.constant(np.arange(1, 24).reshape(-1, 1) / 8.0)) # shape: [23, 1]
y1 = fnc_2sin_x_over_3(x)
y2 = fnc_3x_plus_10(x)
model1 = GaussianProcessRegression(gpr_model(x[:5, :], y1[:5, :]))
model2 = GaussianProcessRegression(gpr_model(x[:5, :], y2[:5, :]))
stacked_models = FantasizerModelStack((model1, 1), (model2, 1))
additional_data1 = Dataset(tf.reshape(x[5:, :], [3, 6, -1]), tf.reshape(y1[5:, :], [3, 6, -1]))
additional_data2 = Dataset(tf.reshape(x[5:, :], [3, 6, -1]), tf.reshape(y2[5:, :], [3, 6, -1]))
additional_data_stacked = Dataset(
tf.reshape(x[5:, :], [3, 6, -1]),
tf.reshape(tf.concat([y1[5:, :], y2[5:, :]], axis=-1), [3, 6, -1]),
)
query_points = to_default_float(tf.constant(np.arange(1, 21).reshape(-1, 1) / 20.0))[..., None]
query_points = tf.reshape(query_points, [4, 5, 1])
stack_fanta_model = _generate_fantasized_model(stacked_models, additional_data_stacked)
fanta_model1 = _generate_fantasized_model(model1, additional_data1)
fanta_model2 = _generate_fantasized_model(model2, additional_data2)
num_samples = 100000
samples_fm1 = fanta_model1.sample(query_points, num_samples)
pred_f_mean_fm1, pred_f_var_fm1 = fanta_model1.predict(query_points)
pred_y_mean_fm1, pred_y_var_fm1 = fanta_model1.predict_y(query_points)
pred_j_mean_fm1, pred_j_cov_fm1 = fanta_model1.predict_joint(query_points)
samples_fm2 = fanta_model2.sample(query_points, num_samples)
pred_f_mean_fm2, pred_f_var_fm2 = fanta_model2.predict(query_points)
pred_y_mean_fm2, pred_y_var_fm2 = fanta_model2.predict_y(query_points)
pred_j_mean_fm2, pred_j_cov_fm2 = fanta_model2.predict_joint(query_points)
samples_fms = stack_fanta_model.sample(query_points, num_samples)
pred_f_mean_fms, pred_f_var_fms = stack_fanta_model.predict(query_points)
pred_y_mean_fms, pred_y_var_fms = stack_fanta_model.predict_y(query_points)
pred_j_mean_fms, pred_j_cov_fms = stack_fanta_model.predict_joint(query_points)
np.testing.assert_equal(pred_f_mean_fms.shape, [4, 3, 5, 2])
np.testing.assert_equal(pred_f_var_fms.shape, [4, 3, 5, 2])
np.testing.assert_equal(pred_f_mean_fm1.shape, [4, 3, 5, 1])
np.testing.assert_equal(pred_f_var_fm1.shape, [4, 3, 5, 1])
np.testing.assert_equal(pred_j_cov_fms.shape, [4, 3, 2, 5, 5])
np.testing.assert_equal(pred_j_cov_fm1.shape, [4, 3, 1, 5, 5])
np.testing.assert_equal(samples_fms.shape, [4, 3, 100000, 5, 2])
np.testing.assert_equal(samples_fm1.shape, [4, 3, 100000, 5, 1])
np.testing.assert_allclose(
pred_f_mean_fms, tf.concat([pred_f_mean_fm1, pred_f_mean_fm2], axis=-1), atol=1e-5
)
np.testing.assert_allclose(
pred_y_mean_fms, tf.concat([pred_y_mean_fm1, pred_y_mean_fm2], axis=-1), atol=1e-5
)
np.testing.assert_allclose(
pred_j_mean_fms, tf.concat([pred_j_mean_fm1, | |
starttime) *
self.stats.sampling_rate, 7))) * -1
# Adjust starttime only if delta is greater than zero or if the values
# are padded with masked arrays.
if delta > 0 or pad:
self.stats.starttime += delta * self.stats.delta
if delta == 0 or (delta < 0 and not pad):
return self
elif delta < 0 and pad:
try:
gap = create_empty_data_chunk(abs(delta), self.data.dtype,
fill_value)
except ValueError:
# create_empty_data_chunk returns negative ValueError ?? for
# too large number of points, e.g. 189336539799
raise Exception("Time offset between starttime and "
"trace.starttime too large")
self.data = np.ma.concatenate((gap, self.data))
return self
elif starttime > self.stats.endtime:
self.data = np.empty(0, dtype=org_dtype)
return self
elif delta > 0:
try:
self.data = self.data[delta:]
except IndexError:
# a huge numbers for delta raises an IndexError
# here we just create empty array with same dtype
self.data = np.empty(0, dtype=org_dtype)
return self
def _rtrim(self, endtime, pad=False, nearest_sample=True, fill_value=None):
"""
Cut current trace to given end time. For more info see
:meth:`~obspy.core.trace.Trace.trim`.
.. rubric:: Example
>>> tr = Trace(data=np.arange(0, 10))
>>> tr.stats.delta = 1.0
>>> tr._rtrim(tr.stats.starttime + 2) # doctest: +ELLIPSIS
<...Trace object at 0x...>
>>> tr.data
array([0, 1, 2])
>>> tr.stats.endtime
UTCDateTime(1970, 1, 1, 0, 0, 2)
"""
org_dtype = self.data.dtype
if isinstance(endtime, float) or isinstance(endtime, int):
endtime = UTCDateTime(self.stats.endtime) - endtime
elif not isinstance(endtime, UTCDateTime):
raise TypeError
# check if in boundary
if nearest_sample:
delta = compatibility.round_away(
(endtime - self.stats.starttime) *
self.stats.sampling_rate) - self.stats.npts + 1
delta = int(delta)
else:
# solution for #127, however some tests need to be changed
# delta = -1*int(math.floor(compatibility.round_away(
# (self.stats.endtime - endtime) * \
# self.stats.sampling_rate, 7)))
delta = int(math.floor(round((endtime - self.stats.endtime) *
self.stats.sampling_rate, 7)))
if delta == 0 or (delta > 0 and not pad):
return self
if delta > 0 and pad:
try:
gap = create_empty_data_chunk(delta, self.data.dtype,
fill_value)
except ValueError:
# create_empty_data_chunk returns negative ValueError ?? for
# too large number of points, e.g. 189336539799
raise Exception("Time offset between starttime and " +
"trace.starttime too large")
self.data = np.ma.concatenate((self.data, gap))
return self
elif endtime < self.stats.starttime:
self.stats.starttime = self.stats.endtime + \
delta * self.stats.delta
self.data = np.empty(0, dtype=org_dtype)
return self
# cut from right
delta = abs(delta)
total = len(self.data) - delta
if endtime == self.stats.starttime:
total = 1
self.data = self.data[:total]
return self
@_add_processing_info
def trim(self, starttime=None, endtime=None, pad=False,
nearest_sample=True, fill_value=None):
"""
Cut current trace to given start and end time.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`, optional
:param starttime: Specify the start time.
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`, optional
:param endtime: Specify the end time.
:type pad: bool, optional
:param pad: Gives the possibility to trim at time points outside the
time frame of the original trace, filling the trace with the
given ``fill_value``. Defaults to ``False``.
:type nearest_sample: bool, optional
:param nearest_sample: If set to ``True``, the closest sample is
selected, if set to ``False``, the inner (next sample for a
start time border, previous sample for an end time border) sample
containing the time is selected. Defaults to ``True``.
Given the following trace containing 6 samples, "|" are the
sample points, "A" is the requested starttime::
| |A | | B | |
1 2 3 4 5 6
``nearest_sample=True`` will select samples 2-5,
``nearest_sample=False`` will select samples 3-4 only.
:type fill_value: int, float or ``None``, optional
:param fill_value: Fill value for gaps. Defaults to ``None``. Traces
will be converted to NumPy masked arrays if no value is given and
gaps are present.
.. note::
This operation is performed in place on the actual data arrays. The
raw data is not accessible anymore afterwards. To keep your
original data, use :meth:`~obspy.core.trace.Trace.copy` to create
a copy of your trace object.
.. rubric:: Example
>>> tr = Trace(data=np.arange(0, 10))
>>> tr.stats.delta = 1.0
>>> t = tr.stats.starttime
>>> tr.trim(t + 2.000001, t + 7.999999) # doctest: +ELLIPSIS
<...Trace object at 0x...>
>>> tr.data
array([2, 3, 4, 5, 6, 7, 8])
"""
# check time order and swap eventually
if (isinstance(starttime, UTCDateTime) and
isinstance(endtime, UTCDateTime) and starttime > endtime):
raise ValueError("startime is larger than endtime")
# cut it
if starttime:
self._ltrim(starttime, pad, nearest_sample=nearest_sample,
fill_value=fill_value)
if endtime:
self._rtrim(endtime, pad, nearest_sample=nearest_sample,
fill_value=fill_value)
# if pad=True and fill_value is given convert to NumPy ndarray
if pad is True and fill_value is not None:
try:
self.data = self.data.filled()
except AttributeError:
# numpy.ndarray object has no attribute 'filled' - ignoring
pass
return self
def slice(self, starttime=None, endtime=None, nearest_sample=True):
"""
Return a new Trace object with data going from start to end time.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Specify the start time of slice.
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param endtime: Specify the end time of slice.
:type nearest_sample: bool, optional
:param nearest_sample: If set to ``True``, the closest sample is
selected, if set to ``False``, the inner (next sample for a
start time border, previous sample for an end time border) sample
containing the time is selected. Defaults to ``True``.
Given the following trace containing 6 samples, "|" are the
sample points, "A" is the requested starttime::
| |A | | B | |
1 2 3 4 5 6
``nearest_sample=True`` will select samples 2-5,
``nearest_sample=False`` will select samples 3-4 only.
:return: New :class:`~obspy.core.trace.Trace` object. Does not copy
data but just passes a reference to it.
.. rubric:: Example
>>> tr = Trace(data=np.arange(0, 10))
>>> tr.stats.delta = 1.0
>>> t = tr.stats.starttime
>>> tr2 = tr.slice(t + 2, t + 8)
>>> tr2.data
array([2, 3, 4, 5, 6, 7, 8])
"""
tr = copy(self)
tr.stats = deepcopy(self.stats)
tr.trim(starttime=starttime, endtime=endtime,
nearest_sample=nearest_sample)
return tr
def slide(self, window_length, step, offset=0,
include_partial_windows=False, nearest_sample=True):
"""
Generator yielding equal length sliding windows of the Trace.
Please keep in mind that it only returns a new view of the original
data. Any modifications are applied to the original data as well. If
you don't want this you have to create a copy of the yielded
windows. Also be aware that if you modify the original data and you
have overlapping windows, all following windows are affected as well.
.. rubric:: Example
>>> import obspy
>>> tr = obspy.read()[0]
>>> for windowed_tr in tr.slide(window_length=10.0, step=10.0):
... print("---") # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
... print(windowed_tr)
---
... | 2009-08-24T00:20:03.000000Z - 2009-08-24T00:20:13.000000Z | ...
---
... | 2009-08-24T00:20:13.000000Z - 2009-08-24T00:20:23.000000Z | ...
:param window_length: The length of each window in seconds.
:type window_length: float
:param step: The step between the start times of two successive
windows in seconds. Can be negative if an offset is given.
:type step: float
:param offset: The offset of the first window in seconds relative to
the start time of the whole interval.
:type offset: float
:param include_partial_windows: Determines if windows that are
shorter then 99.9 % of the desired length are returned.
:type include_partial_windows: bool
:param nearest_sample: If set to ``True``, the closest sample is
selected, if set to ``False``, the inner (next sample for a
start time border, previous sample for an end time border) sample
containing the time is selected. Defaults to ``True``.
Given the following trace containing 6 samples, "|" are the
sample points, "A" is the requested starttime::
| |A | | B | |
1 2 3 4 5 6
``nearest_sample=True`` will select samples 2-5,
``nearest_sample=False`` will select samples 3-4 only.
:type nearest_sample: bool, optional
"""
windows = get_window_times(
starttime=self.stats.starttime,
endtime=self.stats.endtime,
window_length=window_length,
step=step,
offset=offset,
include_partial_windows=include_partial_windows)
if len(windows) < 1:
return
for start, stop in windows:
yield self.slice(start, stop,
nearest_sample=nearest_sample)
def verify(self):
"""
Verify current trace object against available meta data.
.. rubric:: Example
>>> tr = Trace(data=np.array([1,2,3,4]))
>>> tr.stats.npts = 100
>>> tr.verify() #doctest: +ELLIPSIS
Traceback (most recent call last):
...
Exception: ntps(100) differs from data size(4)
"""
if len(self) != self.stats.npts:
msg = "ntps(%d) differs from data size(%d)"
raise Exception(msg % (self.stats.npts, len(self.data)))
delta = self.stats.endtime - self.stats.starttime
if delta < 0:
msg = "End time(%s) before start time(%s)"
raise Exception(msg % | |
outputs
class MRResizeInputSpec(MRTrix3Base):
in_file = File(
exists=True,
argstr='%s',
position=-2,
mandatory=True,
desc='input image to be resized')
out_file = File(
argstr='%s',
position=-1,
mandatory=True,
desc='output image')
img_size = traits.List(
traits.Float,
argstr='-size %s',
sep=',',
position=-3,
desc='new image size for output image; specify as comma-seperated '
'list')
vox_size = InputMultiObject(
traits.Float,
argstr='-voxel %s',
position=-3,
desc='new voxel size for the output image; specify as either single '
'value to be used for all dimensions, or as a comma-separated '
'list of the size for each voxel dimension')
scale_factor = InputMultiObject(
traits.Float,
argstr='-scale %f',
position=-3,
desc='scale image resolution by supplied factor; specify as either '
'single value to be used for all dimensions, or as '
'comma-separated list of scale factors for each dimension')
interp_method = traits.String(
default='cubic',
argstr='-interp %s',
position=-4,
desc='set interpolation method to use when resizing (choices: '
'nearest, linear, cubic, sinc. Default: cubic).')
class MRResizeOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='output image')
class MRResize(MRTrix3Base):
"""
Resize an image by defining the new image resolution, voxel size or a
scale factor
Example
-------
>>> import nipype.interfaces.mrtrix3 as mrt
>>> mrresize = mrt.MRResize()
>>> mrresize.inputs.in_file = 'dwi.mif'
>>> mrresize.inputs.vox_size = 1.25
>>> mrresize.inputs.out_file = 'dwi_resized.mif'
>>> mrresize.cmdline # doctest: +ELLIPSIS
'mrresize -voxel 1.25 dwi.mif dwi_resized.mif'
>>> mrresize.run() # doctest: +SKIP
"""
cmd = 'mrresize'
input_spec = MRResizeInputSpec
output_spec = MRResizeOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = op.abspath(self.inputs.out_file)
return outputs
class MRMathInputSpec(MRTrix3BaseInputSpec):
in_file = traits.Either(
File(exists=True),
traits.String(),
argstr='%s',
mandatory=True,
position=-3,
desc='input image(s)')
out_file = File(
argstr='%s', mandatory=True, position=-1, desc='output image')
operation = traits.Enum(
"mean",
"median",
"sum",
"product",
"rms",
"norm",
"var",
"std",
"min",
"max",
"absmax",
"magmax",
argstr="%s",
position=-2,
mandatory=True,
desc="operation to computer along a specified axis",
)
axis = traits.Int(
0, argstr="-axis %d", desc="specfied axis to perform the operation along"
)
class MRMathOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="output image")
class MRMath(MRTrix3Base):
"""
Compute summary statistic on image intensities
along a specified axis of a single image
Example
-------
>>> import nipype.interfaces.mrtrix3 as mrt
>>> mrmath = mrt.MRMath()
>>> mrmath.inputs.in_file = 'dwi.mif'
>>> mrmath.inputs.operation = 'mean'
>>> mrmath.inputs.axis = 3
>>> mrmath.inputs.out_file = 'dwi_mean.mif'
>>> mrmath.inputs.grad_fsl = ('bvecs', 'bvals')
>>> mrmath.cmdline # doctest: +ELLIPSIS
'mrmath -axis 3 -fslgrad bvecs bvals dwi.mif mean dwi_mean.mif'
>>> mrmath.run() # doctest: +SKIP
"""
_cmd = "mrmath"
input_spec = MRMathInputSpec
output_spec = MRMathOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = op.abspath(self.inputs.out_file)
return outputs
class MRRegisterInputSpec(MRTrix3BaseInputSpec):
in_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-2,
desc='input image 1 ("moving")'
)
ref_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-1,
desc='input image 2 ("template")'
)
# general options
tfm_type = traits.Enum(
'rigid',
'affine',
'nonlinear',
'rigid_affine',
'rigid_nonlinear',
'affine_nonlinear',
'rigid_affine_nonlinear',
argstr='-type %s',
desc='registration type'
)
tfm_img = File(
argstr='-transformed %s',
desc='moving image after transform to template space'
)
tfm_midway = traits.List(
File(exists=False),
argstr='-transformed_midway %s',
desc='moving and template image after transform to midway space'
)
mask1 = File(
argstr='-mask1 %s',
desc='mask to define region of moving image for optimization'
)
mask2 = File(
argstr='-mask2 %s',
desc='mask to define region of template image for optimization'
)
# linear options
rigid = File(
argstr='-rigid %s',
desc='output file containing 4x4 transformation matrix'
)
rigid_1tomidway = File(
argstr='-rigid_1tomidway %s',
desc='output file containing 4x4 transformation aligning '
'moving to template image at common midway'
)
rigid_2tomidway = File(
argstr='-rigid_2tomidway %s',
desc='output file containing 4x4 transformation aligning '
'template to moving image at common midway'
)
rigid_init_translate = traits.Enum(
'mass',
'geometric',
argstr='-rigid_init_translation %s',
desc='initialize translation and centre of rotation'
)
rigid_init_rotation = traits.Enum(
'search',
'mass',
'none',
argstr='-rigid_init_rotation %s',
desc='initialize rotation'
)
rigid_init_matrix = File(
argstr='-rigid_init_matrix %s',
desc='initialize registration with supplied rigid '
'transformation; note this overrides '
'rigid_init_translation and rigid_init_rotation'
)
rigid_scale = InputMultiObject(
traits.Float,
argstr='-rigid_scale %s',
desc='define scale factor of each level of multi-resolution '
'scheme'
)
affine = File(
argstr='-affine %s',
desc='output file containing affine transformation (4x4)'
)
affine_1tomidway = File(
argstr='-affine_1tomidway %s',
desc='output file containing affine transformation aligning '
'moving to template image at common midway'
)
affine_2tomidway = File(
argstr='-affine_2tomidway %s',
desc='output file containing affine transformation aligning '
'template to moving image at common midway'
)
affine_init_translate = traits.Enum(
'mass',
'geometric',
'none',
argstr='-affine_init_translation %s',
desc='initialize translatation and centre of rotation'
)
affine_init_rotation = traits.Enum(
'search',
'moments',
'none',
argstr='-affine_init_rotation %s',
desc='initialize rotation'
)
affine_init_matrix = File(
argstr='-rigid_init_matrix %s',
desc='initialize registration with supplied affine '
'transformation; note this overrides '
'affine_init_translation and affine_init_rotation'
)
affine_scale = InputMultiObject(
traits.Float,
argstr='-affine_scale %s',
desc='define scale factor of each level of multi-resolution '
'scheme'
)
affine_niter = InputMultiObject(
traits.Int,
argstr='-affine_niter %s',
desc='maximum number of gradient descent iterations per stage'
)
affine_metric = traits.Enum(
'diff',
argstr='-affine_metric %s',
desc='valid choices are: diff (intensity differences)'
)
affine_metric_estimator = traits.Enum(
'l1',
'l2',
'lp',
argstr='-affine_metric.diff.estimator %s',
desc='valid choices are l1 (least absolute: |x|), '
'l2 (ordinary least squares), lp (least powers: |x|^1.2)'
)
affine_lmax = InputMultiObject(
traits.Int,
argstr='-affine_lmax %s',
desc='explicitly set lmax to be used per scale factor'
)
affine_log = File(
argstr='-affine_log %s',
desc='write gradient descent parameter evolution to log file'
)
# advanced linear transform initialization options
init_translation_unmasked1 = traits.Bool(
argstr='-init_translation.unmasked1',
desc='disregard mask1 for translation'
)
init_translation_unmasked2 = traits.Bool(
argstr='-init_translationl.unmasked2',
desc='disregard mask2 for translation'
)
init_rotation_unmasked1 = traits.Bool(
argstr='-init_rotation.unmasked1',
desc='disregard mask1 for rotation'
)
init_rotation_unmasked2 = traits.Bool(
argstr='-init_rotation.unmasked2',
desc='disregard mask2 for rotation'
)
init_rotation_angles = InputMultiObject(
traits.Int,
argstr='-init_rotation.search.angles %s',
desc='rotation angles for local search in degrees between 0 '
'and 180'
)
init_rotation_scale = traits.Float(
argstr='-init_rotation.search.scale %f',
desc='relative size of images used for rotation search'
)
init_rotation_direction = traits.Int(
argstr='-init_rotation.search.directions %d',
desc='number of rotation axis for local search'
)
init_rotation_global = traits.Bool(
argstr='-init_rotation.search.run_global',
desc='perform a global search'
)
init_rotation_iteration = traits.Int(
argstr='-init_rotation.search.global.iterations %d',
desc='number of rotations to investigate'
)
# advanced linear registration stage options
linstage_iterations = InputMultiObject(
traits.Int,
argstr='-linstage.iterations %s',
desc='number of iterations for each registration stage'
)
linstage_first_opt = traits.Enum(
'bbgd',
'gd',
argstr='-linstage.optimiser.first %s',
desc='cost function to use at first iteration of all stages'
)
linstage_last_opt = traits.Enum(
'bbgd',
'gd',
argstr='-linstage.optimiser.last %s',
desc='cost function to use at last iteration of all stages'
)
linstage_default_opt = traits.Enum(
'bbgd',
'gd',
argstr='-linstage.optimiser.default %s',
desc='cost function to use at any stage other than first '
'or last'
)
linstage_diagnostic = File(
argstr='-linstage.diagnostics.prefix %s diagnostic',
desc='generate diagnostic images after each registration stage'
)
# non-linear registration options
nl_warp = InputMultiPath(
File,
argstr='-nl_warp %s ',
desc='non-linear warp output as two deformation fields, where '
'warp1 transforms moving to template and warp2 transforms '
'template to moving image'
)
nl_warp_full = traits.File(
argstr='-nl_warp_full %s',
desc='output all warps during registration'
)
nl_init = File(
argstr='-nl_init %s',
desc='initialize non-linear registration with supplied warped '
'image'
)
nl_scale = InputMultiObject(
traits.Float,
argstr='-nl_scale %s',
desc='defining scale factor for each level of multi-resolution '
'scheme'
)
nl_niter = InputMultiObject(
traits.Int,
argstr='-nl_niter %s',
desc='maximum number of iterations'
)
nl_update_smooth = traits.Float(
argstr='-nl_update_smooth %f',
desc='regularise gradient update field with Gausisan smoothing'
)
nl_dis_smooth = traits.Float(
argstr='-nl_dis_smooth %f',
desc='regularise displacement field with Gaussian smoothing'
)
nl_grad_step = traits.Float(
argstr='-nl_grad_step %f',
desc='gradient step size for non-linear registration'
)
nl_lmax = InputMultiObject(
traits.Int,
argstr='-nl_lmax %s',
desc='lax to be used per scale factor in non-linear FOD '
'registration'
)
# fod registration options
directions = File(
argstr='-directions %s',
desc='directions used for FOD reorientation using apodised PSF'
)
noreorientation = traits.Bool(
argstr='-noreorientation',
position=1,
desc='turn off FOD reorientation'
)
class MRRegisterOutputSpec(TraitedSpec):
tfm_img = File(desc='moving image after transform to template space')
tfm_midway = File(desc='moving and template image after transform in '
'midway space')
rigid = File(desc='file containing rigid 4x4 transformation matrix')
rigid_1tomidway = File(desc='file containing rigid 4x4 '
'transform matrix aligning moving at common '
'midway')
rigid_2tomidway = File(desc='file containing rigid 4x4 '
'transform matrix aligning template at common '
'midway')
affine = File(desc='file containing affine 4x4 transformation matrix')
affine_1tomidway = File(desc='file containing affine 4x4 transform matrix '
'aligning moving image at common midway')
affine_2tomidway = File(desc='file containing affine 4x4 transform matrix '
'aligning template image at common midway')
affine_log = File(desc='log with gradient descent parameter evolution')
linstage_diagnostic = File(desc='diagnostic image after each registration '
'stage')
nl_warp = OutputMultiPath(File, desc='non-linear warp output as | |
json"
print (msj)
return response_({})
@app.route('/contrato/update', methods = ['POST'])
def api_contrato_update():
print("api_contrato_update")
if request.headers['Content-Type'] == 'application/json':
# Se recuperan datos.
datos = request.json['datos']
# Persona.
persona = ContratoPersona(__CONN__)
data = persona.set_contrato(datos)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
# #############################################################################
# CATEGORÍA PROFESIONAL
# #############################################################################
@app.route('/categoria_profesional', methods = ['POST'])
def api_categoria_profesional():
print("api_categoria_profesional")
if request.headers['Content-Type'] == 'application/json':
# Categorías profesionales.
datos = request.json['datos']
cp_id = datos['id_']
activo = datos['activo']
cp = CategoriaProfesional(__CONN__)
data = cp.get_cp(cp_id, activo)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
@app.route('/categoria_profesional/update', methods = ['POST'])
def api_categoria_profesional_update():
print("api_categoria_profesional_update")
if request.headers['Content-Type'] == 'application/json':
# Se recuperan datos.
datos = request.json['datos']
# Cat. Prof.
cp = CategoriaProfesional(__CONN__)
data = cp.set_cp(datos)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
# #############################################################################
# CARGO
# #############################################################################
@app.route('/cargo', methods = ['POST'])
def api_cargo():
print("api_cargo")
if request.headers['Content-Type'] == 'application/json':
# Cargo.
datos = request.json['datos']
cargo_id = datos['id_']
activo = datos['activo']
cargo = Cargo(__CONN__)
data = cargo.get_cargo(cargo_id, activo)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
@app.route('/cargo/update', methods = ['POST'])
def api_cargo_update():
print("api_cargo_update")
if request.headers['Content-Type'] == 'application/json':
# Se recuperan datos.
datos = request.json['datos']
# Cargo
cargo = Cargo(__CONN__)
data = cargo.set_cargo(datos)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
# #############################################################################
# AUSENCIAS EN CONTRATOS
# #############################################################################
@app.route('/contrato_ausencia', methods = ['POST'])
def api_contrato_ausencia():
print("api_contrato_ausencia")
if request.headers['Content-Type'] == 'application/json':
# Cargo.
datos = request.json['datos']
contrato_id = datos['contrato_id']
activo = datos['activo']
ausencia = ContratoAusenciaPersona(__CONN__)
data = ausencia.get_contrato_ausencia(contrato_id, activo)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
@app.route('/contrato_ausencia/update', methods = ['POST'])
def api_contrato_ausencia_update():
print("api_contrato_ausencia_update")
if request.headers['Content-Type'] == 'application/json':
# Se recuperan datos.
datos = request.json['datos']
# Ausencias en contrato.
ausencia = ContratoAusenciaPersona(__CONN__)
data = ausencia.set_contrato_ausencia(datos)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
# #############################################################################
# AUSENCIAS
# #############################################################################
@app.route('/ausencia', methods = ['POST'])
def api_ausencia():
print("api_ausencia")
if request.headers['Content-Type'] == 'application/json':
# Ausencia.
datos = request.json['datos']
id_ = datos['id']
codigo = datos['codigo']
activo = datos['activo']
ausencia = Ausencia(__CONN__)
data = ausencia.get_ausencia(id_, codigo, activo)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
@app.route('/ausencia/update', methods = ['POST'])
def api_ausencia_update():
print("api_ausencia_update")
if request.headers['Content-Type'] == 'application/json':
# Se recuperan datos.
datos = request.json['datos']
# Ausencia.
ausencia = Ausencia(__CONN__)
data = ausencia.set_ausencia(datos)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
# #############################################################################
# SERVICIOS PREVIOS
# #############################################################################
@app.route('/servicios_previos', methods = ['POST'])
def api_servicios_previos():
print("api_servicios_previos")
if request.headers['Content-Type'] == 'application/json':
# Servicios Previos.
datos = request.json['datos']
persona_id = datos['persona_id']
anno = datos['anno']
sp = ServiciosPrevios(__CONN__)
data = sp.get_sp(persona_id, anno)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
@app.route('/servicios_previos/update', methods = ['POST'])
def api_servicios_previos_update():
print("api_servicios_previos_update")
if request.headers['Content-Type'] == 'application/json':
# Se recuperan datos.
datos = request.json['datos']
# Serv. Prev.
sp = ServiciosPrevios(__CONN__)
data = sp.set_sp(datos)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
# #############################################################################
# CATEGORÍAS PROFESIONALES EN EQUIPOS DE TRABAJO
# #############################################################################
@app.route('/categoria_equipo', methods = ['POST'])
def api_categoria_equipo():
print("api_categoria_equipo")
if request.headers['Content-Type'] == 'application/json':
# Categoría en equipos.
datos = request.json['datos']
eq_id = datos['eq_id']
cat_id = datos['cat_id']
cat_eq = CategoriaEquipo(__CONN__)
data = cat_eq.get_catEq(eq_id, cat_id)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
@app.route('/categoria_equipo/update', methods = ['POST'])
def api_categoria_equipo_update():
print("api_categoria_equipo_update")
if request.headers['Content-Type'] == 'application/json':
# Se recuperan datos.
datos = request.json['datos']
# Cat. en equipos.
cat_eq = CategoriaEquipo(__CONN__)
data = cat_eq.set_catEq(datos)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
# #############################################################################
# PLANILLA
# #############################################################################
@app.route('/planificacion_diaria', methods = ['POST'])
def api_planificacion_diaria():
print("api_planificacion_diaria")
if request.headers['Content-Type'] == 'application/json':
# Se recuperan datos.
datos = request.json['datos']
print(datos)
anno = datos['anno']
mes = datos['mes']
equipo_id = datos['equipo_id']
# Planificacion diaria.
planilla = PlanificacionDiaria(__CONN__)
data = planilla.get_planificacion_diaria(anno, mes, \
equipo_id = equipo_id)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
# #############################################################################
# TAREAS (ASIGNACIONES)
# #############################################################################
@app.route('/tarea', methods = ['POST'])
def api_tarea():
print("api_tarea")
if request.headers['Content-Type'] == 'application/json':
# Tareas.
datos = request.json['datos']
eq_id = datos['equipo_id']
anno = datos['anno']
tarea = Tarea(__CONN__)
data = tarea.get_tarea(eq_id, anno)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
@app.route('/tarea/update', methods = ['POST'])
def api_tarea_update():
print("api_tarea_update")
if request.headers['Content-Type'] == 'application/json':
# Se recuperan datos.
datos = request.json['datos']
# Tarea.
tarea = Tarea(__CONN__)
data = tarea.set_tarea(datos)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
# #############################################################################
# BUSCAR ASIGNACIONES TRABAJADOR
# #############################################################################
@app.route('/buscar_trabajadores_asignar', methods = ['POST'])
def api_buscar_trabajadores_asignar():
print("api_buscar_tarea_trabajador")
if request.headers['Content-Type'] == 'application/json':
datos = request.json['datos']
eq_id = datos['equipo_id']
fecha = datos['fecha']
# Asignaciones.
trab = AsignarTrabajador(__CONN__)
data = trab.get_asignar_trabajador(eq_id, fecha)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
# #############################################################################
# CAMBIO DE TURNO
# #############################################################################
@app.route('/cambio_turno/update', methods = ['POST'])
def api_cambio_turno_update():
print("api_cambio_turno_update")
if request.headers['Content-Type'] == 'application/json':
datos = request.json['datos']
# Asignaciones.
cambio_turno = CambioTurno(__CONN__)
data = cambio_turno.set_cambio_turno(datos)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
# #############################################################################
# COBERTURAS DE EQUIPO
# #############################################################################
@app.route('/cobertura_equipo', methods = ['POST'])
def api_cobertura_equipo():
print("api_cobertura_equipo")
if request.headers['Content-Type'] == 'application/json':
# Cobertura.
datos = request.json['datos']
cobertura = CoberturaEquipo(__CONN__)
data = cobertura.get_cobertura_equipo()
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
@app.route('/cobertura_equipo/update', methods = ['POST'])
def api_cobertura_equipo_update():
print("api_cobertura_equipo_update")
if request.headers['Content-Type'] == 'application/json':
# Se recuperan datos.
datos = request.json['datos']
# Cobertura.
cobertura = CoberturaEquipo(__CONN__)
data = cobertura.set_cobertura_equipo(datos)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
# #############################################################################
# COBERTURAS DE SERVICIO
# #############################################################################
@app.route('/cobertura_servicio', methods = ['POST'])
def api_cobertura_servicio():
print("api_cobertura_servicio")
if request.headers['Content-Type'] == 'application/json':
# Cobertura de servicio.
datos = request.json['datos']
equipo_id = datos['equipo_id']
mes = datos['mes']
anno = datos['anno']
cobertura = CoberturaServicio(__CONN__)
data = cobertura.get_coberturaServicio(equipo_id, mes, anno)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
# #############################################################################
# BALANCE HORARIO
# #############################################################################
@app.route('/balance', methods = ['POST'])
def api_balance():
print("api_balance")
if request.headers['Content-Type'] == 'application/json':
# Balance horario.
datos = request.json['datos']
persona_id = datos['persona_id']
mes = datos['mes']
anno = datos['anno']
# ALGG 10-06-2017. Se incluyen los demás parámetros de la llamada.
fecha_inicio = datos['fecha_inicio']
fecha_fin = datos['fecha_fin']
cf_id = datos['cf_id']
sf_id = datos['sf_id']
eq_id = datos['eq_id']
p_id = datos['p_id']
balance = Balance(__CONN__)
data = balance.get_horas(mes, anno, fecha_inicio, fecha_fin, \
persona_id, cf_id, sf_id, eq_id, p_id)
# print("enviando balance")
# print(data)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
# #############################################################################
# JORNADA TEÓRICA
# #############################################################################
@app.route('/jornada_teorica', methods = ['POST'])
def api_jornada_teorica():
print("api_jornada_teorica")
if request.headers['Content-Type'] == 'application/json':
# Se recuperan datos.
datos = request.json['datos']
cf_id = datos['cf_id']
anno = datos['anno']
# Jornada teórica
jt = JornadaTeorica(__CONN__)
data = jt.get_jt(cf_id, anno)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
@app.route('/jornada_teorica/update', methods = ['POST'])
def api_jornada_teorica_update():
print("api_jornada_teorica_update")
if request.headers['Content-Type'] == 'application/json':
# Se recuperan datos.
datos = request.json['datos']
# Jornada teórica
jt = JornadaTeorica(__CONN__)
data = jt.set_jt(datos)
return response_(data)
else:
msj = "[ERROR] Se esperaba formato json"
print (msj)
return response_({})
# #############################################################################
# FUNCIÓN PRINCIPAL DE ENTRADA - FLASK APPLICATION
# #############################################################################
def info():
return '''
###########################################################################
###########################################################################
w S h i f t s
G e s t o r d e T u r n o s d e T r a b a j o
###########################################################################
###########################################################################
'''
def comprobar_usuario_bd():
'''Se comprueba si existe usuario en base de datos. Por defecto si no
hay usuarios, se crea usuario admin, con contraseña <PASSWORD>'''
ret = True
usuario = Usuario(__CONN__)
data = usuario.get_usuario(es_activo = 'S')
if len(data['data']) == 0:
print(u"No se han encontrado usuarios activos en el sistema.")
datos = {'celdas_a_modificar' : [], \
'filas_a_eliminar' : [], \
'filas_a_insertar' : [{'nick' : 'admin', \
'passwd' : '<PASSWORD>', \
'fecha_alta' : datetime.today(), \
'intentos' : 5, \
'activo' : 'S'}]}
ret = usuario.set_usuario(datos)
if ret['data'][0]['estado']:
print(u'Se ha creado usuario "admin" con contraseña "<PASSWORD>".')
else:
print(u'Error al crear | |
<filename>magick.py
from binascii import crc32
from errno import ESRCH
from fcntl import fcntl, F_GETFL, F_SETFL
import logging
import os.path
from os import O_NONBLOCK
from subprocess import Popen, PIPE
from tornado.ioloop import IOLoop
from urlparse import urlparse
# Text 'stylesheets'
__all__ = ["ImageMagick", "is_remote"]
logger = logging.getLogger("ectyper")
def is_remote(path):
"""
Returns true if the given path is a remote HTTP or HTTPS URL.
"""
return urlparse(path).scheme in set(["http", "https"])
def _valid_pct(s):
"""
Returns true if the given string represents a positive integer
followed by the '%' character.
"""
if isinstance(s, basestring) and s.endswith('%'):
try:
s = int(s[0:-1])
if s >= 0:
return True
except ValueError:
pass
return False
def _proc_failed(proc):
"""
Returns true if the given subprocess.Popen has terminated and
returned a non-zero code.
"""
rcode = proc.poll()
return rcode is not None and rcode != 0
def _non_blocking_fileno(fh):
fd = fh.fileno()
try:
flags = fcntl(fd, F_GETFL)
fcntl(fd, F_SETFL, flags | O_NONBLOCK)
except IOError, e:
# Failed to set to non-blocking, warn and continue.
logger.warning("Couldn't setup non-blocking pipe: %s" % str(e))
return fd
def _make_blocking(fd):
try:
flags = fcntl(fd, F_GETFL)
fcntl(fd, F_SETFL, flags & ~O_NONBLOCK)
except IOError, e:
# Failed to set to blocking, warn and continue.
logger.warning("Couldn't set blocking: %s" % str(e))
def _list_prepend(dest, src):
"""
Prepends the src to the dest list in place.
"""
for i in xrange(len(src)):
dest.insert(0, src[len(src) - i - 1])
def _proc_terminate(proc):
try:
if proc.poll() is None:
proc.terminate()
proc.wait()
except OSError, e:
if e.errno != ESRCH:
raise
class ImageMagick(object):
"""
Wraps the command-line verison of ImageMagick and provides a way to:
- Chain image operations (i.e. resize -> reflect -> convert)
- Asynchronously process the chain of operations
Chaining happens in the order that you call each method.
"""
JPEG = "jpeg"
PNG = "png"
GRAVITIES = {
"left": "West",
"right": "East",
"top": "North",
"bottom": "South",
"middle": "Center",
"center": "Center",
"topleft": "NorthWest",
"topright": "NorthEast",
"bottomleft": "SouthWest",
"bottomright": "SouthEast",
}
def __init__(self):
""
self.options = []
self.filters = []
self.format = self.PNG
self.convert_path = None
self.curl_path = None
self.ioloop = IOLoop.instance()
self.comment = '\'\''
def _chain_op(self, name, operation, prepend):
"""
Private helper. Chains the given operation/name either prepending
or appending depending on the passed in boolean value of prepend.
"""
if prepend:
self.filters.insert(0, name)
_list_prepend(self.options, operation)
else:
self.filters.append(name)
self.options.extend(operation)
def reflect(self, out_height, top_alpha, bottom_alpha, prepend=False):
"""
Flip the image upside down and crop to the last out_height pixels. Top
and bottom alpha sets parameters for the linear gradient from the top
to bottom.
"""
opt_name = 'reflect_%0.2f_%0.2f_%0.2f' % (out_height, top_alpha, bottom_alpha)
crop_param = 'x%d!' % out_height
rng = top_alpha - bottom_alpha
opt = [
'-gravity', 'NorthWest',
'-alpha', 'on',
'-colorspace', 'sRGB',
'-flip',
'(',
'+clone', '-crop', crop_param, '-delete', '1-100',
'-channel', 'G', '-fx', '%0.2f-(j/h)*%0.2f' % (top_alpha, rng),
'-separate',
')',
'-alpha', 'off', '-compose', 'copy_opacity', '-composite',
'-crop', crop_param, '-delete', '1-100'
]
self._chain_op(opt_name, opt, prepend)
def crop(self, w, h, x, y, g, prepend=False):
"""
Crop the image to (w, h) offset to (x, y) with gravity g. w, h, x, and
y should be integers (w, h should be positive).
w and h can optionally be integer strings ending with '%'.
g should be one of NorthWest, North, NorthEast, West, Center, East,
SouthWest, South, SouthEast (see your ImageMagick's -gravity list for
details).
"""
(w, h) = [v if _valid_pct(v) else int(v) for v in (w, h)]
x = "+%d" % x if x >= 0 else str(x)
y = "+%d" % y if y >= 0 else str(y)
self._chain_op(
'crop_%s_%sx%s%s%s' % (g, w, h, x, y),
['-gravity', g, '-crop', '%sx%s%s%s' % (w, h, x, y)],
prepend)
def add_styled_text(self, t, style, font_dir, w, h):
"""
Add a piece of text (t) with a style (style) to an image of size (w, h)
Style is an object defined as follows:
style = {
'base_w' - expected width of image for x, y, and fontsize to be correct - will determine how those values are scaled
'base_h' - expected height of image for x, y, and fontsize to be correct - will determine how those values are scaled
'x' - location of font
'y' - location of font
'g' - gravity of the font placement
'pointsize' - default pointsize of the font
'color' - Color of the font
'installed_font' - The name of a font installed on the machine or None if using relative_font
'relative_font' - The location of a local font, relative to font_dir
'font_weight' - The font weight
}
"""
if style:
w_mod = w / style['base_w']
h_mod = h / style['base_h']
x = style['x'] * w_mod
y = style['y'] * h_mod
pointsize = style['pointsize'] * h_mod
font = style['installed_font']
if not font and font_dir:
font = os.path.join(font_dir, style['relative_font'])
self.add_text(str(x), str(y), style['g'], str(pointsize), style['color'], t, font,
str(style['font_weight']))
def add_text(self, x, y, g, pointsize, color, text, font, font_weight, style="Normal", prepend=False):
stripped_text = ''.join(c for c in text if c.isalnum())
stripped_text = stripped_text[:64] if len(stripped_text) > 64 else stripped_text
check = crc32(text.encode('utf-8'))
self._chain_op(
'text_%s%s%s_%s_%s' % (g, x, y, stripped_text, check),
[
"-gravity", g,
"-pointsize", pointsize,
"-fill", color,
"-font", font,
"-weight", font_weight,
"-style", style,
"-draw", "text %s,%s '%s'" % (x, y, text.replace('\\', '\\\\').replace("'", '\\\''))
],
prepend
)
def overlay(self, x, y, g, image_filename, prepend=False):
"""
Overlay without resizing
"""
self.overlay_with_resize(x, y, -1, -1, g, image_filename, prepend)
def overlay_with_resize(self, x, y, w, h, g, image_filename, prepend=False):
"""
Overlay image specified by image_filename onto the current image,
offset by (x, y) with gravity g. x and y should be integers.
The overlay image is resized according to (w x h), if they are positive
g should be one of NorthWest, North, NorthEast, West, Center, East,
SouthWest, South, SouthEast (see your ImageMagick's -gravity list for
details).
"""
opt_name = 'overlay_%d_%d_%s' % (x, y, os.path.basename(image_filename))
if g != "Center":
opt_name += "_" + g
x = "+%d" % x if x >= 0 else str(x)
y = "+%d" % y if y >= 0 else str(y)
size = "%dx%d!" % (w, h) if w > 0 and h > 0 else ""
self._chain_op(
opt_name,
[
image_filename,
'-gravity', g,
'-geometry', "%s%s%s" % (size, x, y),
'-composite'
],
prepend)
def resize(self, w, h, maintain_ratio, will_crop, prepend=False):
"""
Resizes the image to the given size. w and h are expected to be
positive integers. If maintain_ratio evaluates to True, the original
aspect ratio of the image will be preserved. With maintain_ratio True:
if will_crop is true, then the result will fill and possibly overflow
the dimensions; otherwise it will scale to fit inside the dimensions.
"""
resize_type = 1
size = "%dx%d" % (w, h)
if not maintain_ratio:
size += "!"
resize_type = 0
elif will_crop:
size += "^"
resize_type = 2
name = 'resize_%d_%d_%d' % (w, h, resize_type)
opt = ['-resize', size]
self._chain_op(name, opt, prepend)
def set_quality(self, quality):
"""
Specifies the compression quality used in the jpg/png encoding
"""
name = 'set_quality_%d' % quality
opt = ['-quality', '%d' % quality]
self._chain_op(name, opt, False)
def constrain(self, w, h, prepend=False):
"""
Constrain the image to the given size. w and h are expected to be
positive integers. This operation is useful after a resize in which
aspect ratio was preserved.
"""
extent = "%dx%d" % (w, h)
self._chain_op(
'constrain_%d_%d' % (w, h),
[
'-gravity', 'Center',
'-background', 'transparent',
'-extent', extent
],
prepend)
def extent(self, w, h, g='Center', bg='#00000000', cp='over', prepend=False):
"""
Extent the image to the given size by expanding its borders. w and h
are expected to be positive integers, g is the anchor or gravity direction
of the expansion, bg is the background color of extended area, cp is the
compose method of the extent operator.
"""
extent = "%dx%d" % (w, h)
self._chain_op(
'extent_%d_%d_%s_%s_%s' % (w, h, g, bg, cp),
[
'-gravity', g,
'-background', bg,
'-compose', cp,
'-extent', extent
],
prepend)
def splice(self, w, h, g='Center', bg='#00000000', cp='over', prepend=False):
"""
Insert a space into the middle or edge of an image, increasing the final
size of the | |
\
import JSONSchemaValidatorCfbd3870405AAd55 \
as JSONSchemaValidatorCfbd3870405AAd55_v1_3_1
from .validators.v1_3_1.jsd_d09b08a3447aa3b9 \
import JSONSchemaValidatorD09B08A3447AA3B9 \
as JSONSchemaValidatorD09B08A3447AA3B9_v1_3_1
from .validators.v1_3_1.jsd_d0a1abfa435b841d \
import JSONSchemaValidatorD0A1Abfa435B841D \
as JSONSchemaValidatorD0A1Abfa435B841D_v1_3_1
from .validators.v1_3_1.jsd_d0a4b88145aabb51 \
import JSONSchemaValidatorD0A4B88145AaBb51 \
as JSONSchemaValidatorD0A4B88145AaBb51_v1_3_1
from .validators.v1_3_1.jsd_d49af9b84c6aa8ea \
import JSONSchemaValidatorD49AF9B84C6AA8Ea \
as JSONSchemaValidatorD49AF9B84C6AA8Ea_v1_3_1
from .validators.v1_3_1.jsd_d6b8ca774739adf4 \
import JSONSchemaValidatorD6B8Ca774739Adf4 \
as JSONSchemaValidatorD6B8Ca774739Adf4_v1_3_1
from .validators.v1_3_1.jsd_d7a6392845e8969d \
import JSONSchemaValidatorD7A6392845E8969D \
as JSONSchemaValidatorD7A6392845E8969D_v1_3_1
from .validators.v1_3_1.jsd_d888ab6d4d59a8c1 \
import JSONSchemaValidatorD888Ab6D4D59A8C1 \
as JSONSchemaValidatorD888Ab6D4D59A8C1_v1_3_1
from .validators.v1_3_1.jsd_d8a619974a8a8c48 \
import JSONSchemaValidatorD8A619974A8A8C48 \
as JSONSchemaValidatorD8A619974A8A8C48_v1_3_1
from .validators.v1_3_1.jsd_d9a1fa9c4068b23c \
import JSONSchemaValidatorD9A1Fa9C4068B23C \
as JSONSchemaValidatorD9A1Fa9C4068B23C_v1_3_1
from .validators.v1_3_1.jsd_db8e09234a988bab \
import JSONSchemaValidatorDb8E09234A988Bab \
as JSONSchemaValidatorDb8E09234A988Bab_v1_3_1
from .validators.v1_3_1.jsd_dcaa6bde4feb9152 \
import JSONSchemaValidatorDcaa6Bde4Feb9152 \
as JSONSchemaValidatorDcaa6Bde4Feb9152_v1_3_1
from .validators.v1_3_1.jsd_e0b5599b4f2997b7 \
import JSONSchemaValidatorE0B5599B4F2997B7 \
as JSONSchemaValidatorE0B5599B4F2997B7_v1_3_1
from .validators.v1_3_1.jsd_e2adba7943bab3e9 \
import JSONSchemaValidatorE2AdBa7943BaB3E9 \
as JSONSchemaValidatorE2AdBa7943BaB3E9_v1_3_1
from .validators.v1_3_1.jsd_e39588a5494982c4 \
import JSONSchemaValidatorE39588A5494982C4 \
as JSONSchemaValidatorE39588A5494982C4_v1_3_1
from .validators.v1_3_1.jsd_e487f8d3481b94f2 \
import JSONSchemaValidatorE487F8D3481B94F2 \
as JSONSchemaValidatorE487F8D3481B94F2_v1_3_1
from .validators.v1_3_1.jsd_e6b3db8046c99654 \
import JSONSchemaValidatorE6B3Db8046C99654 \
as JSONSchemaValidatorE6B3Db8046C99654_v1_3_1
from .validators.v1_3_1.jsd_e78bb8a2449b9eed \
import JSONSchemaValidatorE78BB8A2449B9Eed \
as JSONSchemaValidatorE78BB8A2449B9Eed_v1_3_1
from .validators.v1_3_1.jsd_e9b99b2248c88014 \
import JSONSchemaValidatorE9B99B2248C88014 \
as JSONSchemaValidatorE9B99B2248C88014_v1_3_1
from .validators.v1_3_1.jsd_eab7abe048fb99ad \
import JSONSchemaValidatorEab7Abe048Fb99Ad \
as JSONSchemaValidatorEab7Abe048Fb99Ad_v1_3_1
from .validators.v1_3_1.jsd_eb8249e34f69b0f1 \
import JSONSchemaValidatorEb8249E34F69B0F1 \
as JSONSchemaValidatorEb8249E34F69B0F1_v1_3_1
from .validators.v1_3_1.jsd_eba669054e08a60e \
import JSONSchemaValidatorEba669054E08A60E \
as JSONSchemaValidatorEba669054E08A60E_v1_3_1
from .validators.v1_3_1.jsd_ee9aab01487a8896 \
import JSONSchemaValidatorEe9AAb01487A8896 \
as JSONSchemaValidatorEe9AAb01487A8896_v1_3_1
from .validators.v1_3_1.jsd_eeb168eb41988e07 \
import JSONSchemaValidatorEeb168Eb41988E07 \
as JSONSchemaValidatorEeb168Eb41988E07_v1_3_1
from .validators.v1_3_1.jsd_eeb7eb4b4bd8a1dd \
import JSONSchemaValidatorEeb7Eb4B4Bd8A1Dd \
as JSONSchemaValidatorEeb7Eb4B4Bd8A1Dd_v1_3_1
from .validators.v1_3_1.jsd_f083cb13484a8fae \
import JSONSchemaValidatorF083Cb13484A8Fae \
as JSONSchemaValidatorF083Cb13484A8Fae_v1_3_1
from .validators.v1_3_1.jsd_f09319674049a7d4 \
import JSONSchemaValidatorF09319674049A7D4 \
as JSONSchemaValidatorF09319674049A7D4_v1_3_1
from .validators.v1_3_1.jsd_f393abe84989bb48 \
import JSONSchemaValidatorF393Abe84989Bb48 \
as JSONSchemaValidatorF393Abe84989Bb48_v1_3_1
from .validators.v1_3_1.jsd_f3b26b5544cabab9 \
import JSONSchemaValidatorF3B26B5544CaBab9 \
as JSONSchemaValidatorF3B26B5544CaBab9_v1_3_1
from .validators.v1_3_1.jsd_f49548c54be8a3e2 \
import JSONSchemaValidatorF49548C54Be8A3E2 \
as JSONSchemaValidatorF49548C54Be8A3E2_v1_3_1
from .validators.v1_3_1.jsd_f5947a4c439a8bf0 \
import JSONSchemaValidatorF5947A4C439A8Bf0 \
as JSONSchemaValidatorF5947A4C439A8Bf0_v1_3_1
from .validators.v1_3_1.jsd_f5a13ab24c5aaa91 \
import JSONSchemaValidatorF5A13Ab24C5AAa91 \
as JSONSchemaValidatorF5A13Ab24C5AAa91_v1_3_1
from .validators.v1_3_1.jsd_f5a269c44f2a95fa \
import JSONSchemaValidatorF5A269C44F2A95Fa \
as JSONSchemaValidatorF5A269C44F2A95Fa_v1_3_1
from .validators.v1_3_1.jsd_f5ac590c4ca9975a \
import JSONSchemaValidatorF5Ac590C4Ca9975A \
as JSONSchemaValidatorF5Ac590C4Ca9975A_v1_3_1
from .validators.v1_3_1.jsd_f6826a8e41bba242 \
import JSONSchemaValidatorF6826A8E41BbA242 \
as JSONSchemaValidatorF6826A8E41BbA242_v1_3_1
from .validators.v1_3_1.jsd_f6ac994f451ba011 \
import JSONSchemaValidatorF6Ac994F451BA011 \
as JSONSchemaValidatorF6Ac994F451BA011_v1_3_1
from .validators.v1_3_1.jsd_f6b119ad4d4aaf16 \
import JSONSchemaValidatorF6B119Ad4D4AAf16 \
as JSONSchemaValidatorF6B119Ad4D4AAf16_v1_3_1
from .validators.v1_3_1.jsd_f9bd99c74bba8832 \
import JSONSchemaValidatorF9Bd99C74Bba8832 \
as JSONSchemaValidatorF9Bd99C74Bba8832_v1_3_1
from .validators.v1_3_1.jsd_fa9219bf45c8b43b \
import JSONSchemaValidatorFa9219Bf45C8B43B \
as JSONSchemaValidatorFa9219Bf45C8B43B_v1_3_1
from .validators.v1_3_1.jsd_fb9beb664f2aba4c \
import JSONSchemaValidatorFb9BEb664F2ABa4C \
as JSONSchemaValidatorFb9BEb664F2ABa4C_v1_3_1
from .validators.v1_3_1.jsd_fb9bf80f491a9851 \
import JSONSchemaValidatorFb9BF80F491A9851 \
as JSONSchemaValidatorFb9BF80F491A9851_v1_3_1
from .validators.v1_3_1.jsd_fba0d80747eb82e8 \
import JSONSchemaValidatorFba0D80747Eb82E8 \
as JSONSchemaValidatorFba0D80747Eb82E8_v1_3_1
from .validators.v1_3_1.jsd_fc9538fe43d9884d \
import JSONSchemaValidatorFc9538Fe43D9884D \
as JSONSchemaValidatorFc9538Fe43D9884D_v1_3_1
from .validators.v1_3_1.jsd_ff816b8e435897eb \
import JSONSchemaValidatorFf816B8E435897Eb \
as JSONSchemaValidatorFf816B8E435897Eb_v1_3_1
from .validators.v1_3_1.jsd_ffa748cc44e9a437 \
import JSONSchemaValidatorFfa748Cc44E9A437 \
as JSONSchemaValidatorFfa748Cc44E9A437_v1_3_1
from .validators.v1_3_3.jsd_00a2fa6146089317 \
import JSONSchemaValidator00A2Fa6146089317 \
as JSONSchemaValidator00A2Fa6146089317_v1_3_3
from .validators.v1_3_3.jsd_00aec9b1422ab27e \
import JSONSchemaValidator00AeC9B1422AB27E \
as JSONSchemaValidator00AeC9B1422AB27E_v1_3_3
from .validators.v1_3_3.jsd_039de8b147a98690 \
import JSONSchemaValidator039DE8B147A98690 \
as JSONSchemaValidator039DE8B147A98690_v1_3_3
from .validators.v1_3_3.jsd_03b4c8b44919b964 \
import JSONSchemaValidator03B4C8B44919B964 \
as JSONSchemaValidator03B4C8B44919B964_v1_3_3
from .validators.v1_3_3.jsd_069d9823451b892d \
import JSONSchemaValidator069D9823451B892D \
as JSONSchemaValidator069D9823451B892D_v1_3_3
from .validators.v1_3_3.jsd_07874a4c4c9aabd9 \
import JSONSchemaValidator07874A4C4C9AAbd9 \
as JSONSchemaValidator07874A4C4C9AAbd9_v1_3_3
from .validators.v1_3_3.jsd_098cab9141c9a3fe \
import JSONSchemaValidator098CAb9141C9A3Fe \
as JSONSchemaValidator098CAb9141C9A3Fe_v1_3_3
from .validators.v1_3_3.jsd_09b0f9ce4239ae10 \
import JSONSchemaValidator09B0F9Ce4239Ae10 \
as JSONSchemaValidator09B0F9Ce4239Ae10_v1_3_3
from .validators.v1_3_3.jsd_0a9c988445cb91c8 \
import JSONSchemaValidator0A9C988445Cb91C8 \
as JSONSchemaValidator0A9C988445Cb91C8_v1_3_3
from .validators.v1_3_3.jsd_0b836b7b4b6a9fd5 \
import JSONSchemaValidator0B836B7B4B6A9Fd5 \
as JSONSchemaValidator0B836B7B4B6A9Fd5_v1_3_3
from .validators.v1_3_3.jsd_0c8f7a0b49b9aedd \
import JSONSchemaValidator0C8F7A0B49B9Aedd \
as JSONSchemaValidator0C8F7A0B49B9Aedd_v1_3_3
from .validators.v1_3_3.jsd_0db7da744c0b83d8 \
import JSONSchemaValidator0Db7Da744C0B83D8 \
as JSONSchemaValidator0Db7Da744C0B83D8_v1_3_3
from .validators.v1_3_3.jsd_109d1b4f4289aecd \
import JSONSchemaValidator109D1B4F4289Aecd \
as JSONSchemaValidator109D1B4F4289Aecd_v1_3_3
from .validators.v1_3_3.jsd_10b06a6a4f7bb3cb \
import JSONSchemaValidator10B06A6A4F7BB3Cb \
as JSONSchemaValidator10B06A6A4F7BB3Cb_v1_3_3
from .validators.v1_3_3.jsd_138518e14069ab5f \
import JSONSchemaValidator138518E14069Ab5F \
as JSONSchemaValidator138518E14069Ab5F_v1_3_3
from .validators.v1_3_3.jsd_1399891c42a8be64 \
import JSONSchemaValidator1399891C42A8Be64 \
as JSONSchemaValidator1399891C42A8Be64_v1_3_3
from .validators.v1_3_3.jsd_149aa93b4ddb80dd \
import JSONSchemaValidator149AA93B4Ddb80Dd \
as JSONSchemaValidator149AA93B4Ddb80Dd_v1_3_3
from .validators.v1_3_3.jsd_149b7ba04e5890b2 \
import JSONSchemaValidator149B7Ba04E5890B2 \
as JSONSchemaValidator149B7Ba04E5890B2_v1_3_3
from .validators.v1_3_3.jsd_15b7aa0c4dda8e85 \
import JSONSchemaValidator15B7Aa0C4Dda8E85 \
as JSONSchemaValidator15B7Aa0C4Dda8E85_v1_3_3
from .validators.v1_3_3.jsd_16a1bb5d48cb873d \
import JSONSchemaValidator16A1Bb5D48Cb873D \
as JSONSchemaValidator16A1Bb5D48Cb873D_v1_3_3
from .validators.v1_3_3.jsd_17929bc7465bb564 \
import JSONSchemaValidator17929Bc7465BB564 \
as JSONSchemaValidator17929Bc7465BB564_v1_3_3
from .validators.v1_3_3.jsd_1c894b5848eab214 \
import JSONSchemaValidator1C894B5848EaB214 \
as JSONSchemaValidator1C894B5848EaB214_v1_3_3
from .validators.v1_3_3.jsd_1da5ebdd434aacfe \
import JSONSchemaValidator1Da5Ebdd434AAcfe \
as JSONSchemaValidator1Da5Ebdd434AAcfe_v1_3_3
from .validators.v1_3_3.jsd_1e962af345b8b59f \
import JSONSchemaValidator1E962Af345B8B59F \
as JSONSchemaValidator1E962Af345B8B59F_v1_3_3
from .validators.v1_3_3.jsd_1eaa8b2148ab81de \
import JSONSchemaValidator1Eaa8B2148Ab81De \
as JSONSchemaValidator1Eaa8B2148Ab81De_v1_3_3
from .validators.v1_3_3.jsd_1eb72ad34e098990 \
import JSONSchemaValidator1Eb72Ad34E098990 \
as JSONSchemaValidator1Eb72Ad34E098990_v1_3_3
from .validators.v1_3_3.jsd_1fb8f9f24c998133 \
import JSONSchemaValidator1Fb8F9F24C998133 \
as JSONSchemaValidator1Fb8F9F24C998133_v1_3_3
from .validators.v1_3_3.jsd_208579ea4ed98f4f \
import JSONSchemaValidator208579Ea4Ed98F4F \
as JSONSchemaValidator208579Ea4Ed98F4F_v1_3_3
from .validators.v1_3_3.jsd_20b19b52464b8972 \
import JSONSchemaValidator20B19B52464B8972 \
as JSONSchemaValidator20B19B52464B8972_v1_3_3
from .validators.v1_3_3.jsd_21a6db2540298f55 \
import JSONSchemaValidator21A6Db2540298F55 \
as JSONSchemaValidator21A6Db2540298F55_v1_3_3
from .validators.v1_3_3.jsd_2499e9ad42e8ae5b \
import JSONSchemaValidator2499E9Ad42E8Ae5B \
as JSONSchemaValidator2499E9Ad42E8Ae5B_v1_3_3
from .validators.v1_3_3.jsd_259eab3045988958 \
import JSONSchemaValidator259EAb3045988958 \
as JSONSchemaValidator259EAb3045988958_v1_3_3
from .validators.v1_3_3.jsd_26b44ab04649a183 \
import JSONSchemaValidator26B44Ab04649A183 \
as JSONSchemaValidator26B44Ab04649A183_v1_3_3
from .validators.v1_3_3.jsd_288df9494f2a9746 \
import JSONSchemaValidator288DF9494F2A9746 \
as JSONSchemaValidator288DF9494F2A9746_v1_3_3
from .validators.v1_3_3.jsd_28b24a744a9994be \
import JSONSchemaValidator28B24A744A9994Be \
as JSONSchemaValidator28B24A744A9994Be_v1_3_3
from .validators.v1_3_3.jsd_2e9db85840fbb1cf \
import JSONSchemaValidator2E9DB85840FbB1Cf \
as JSONSchemaValidator2E9DB85840FbB1Cf_v1_3_3
from .validators.v1_3_3.jsd_2eb1fa1e49caa2b4 \
import JSONSchemaValidator2Eb1Fa1E49CaA2B4 \
as JSONSchemaValidator2Eb1Fa1E49CaA2B4_v1_3_3
from .validators.v1_3_3.jsd_2f97e8fa45f8b2a3 \
import JSONSchemaValidator2F97E8Fa45F8B2A3 \
as JSONSchemaValidator2F97E8Fa45F8B2A3_v1_3_3
from .validators.v1_3_3.jsd_3086c9624f498b85 \
import JSONSchemaValidator3086C9624F498B85 \
as JSONSchemaValidator3086C9624F498B85_v1_3_3
from .validators.v1_3_3.jsd_33b799d04d0a8907 \
import JSONSchemaValidator33B799D04D0A8907 \
as JSONSchemaValidator33B799D04D0A8907_v1_3_3
from .validators.v1_3_3.jsd_33bb2b9d40199e14 \
import JSONSchemaValidator33Bb2B9D40199E14 \
as JSONSchemaValidator33Bb2B9D40199E14_v1_3_3
from .validators.v1_3_3.jsd_349c888443b89a58 \
import JSONSchemaValidator349C888443B89A58 \
as JSONSchemaValidator349C888443B89A58_v1_3_3
from .validators.v1_3_3.jsd_38b7eb13449b9471 \
import JSONSchemaValidator38B7Eb13449B9471 \
as JSONSchemaValidator38B7Eb13449B9471_v1_3_3
from .validators.v1_3_3.jsd_38bd0b884b89a785 \
import JSONSchemaValidator38Bd0B884B89A785 \
as JSONSchemaValidator38Bd0B884B89A785_v1_3_3
from .validators.v1_3_3.jsd_398668874439a41d \
import JSONSchemaValidator398668874439A41D \
as JSONSchemaValidator398668874439A41D_v1_3_3
from .validators.v1_3_3.jsd_3b9ef9674429be4c \
import JSONSchemaValidator3B9EF9674429Be4C \
as JSONSchemaValidator3B9EF9674429Be4C_v1_3_3
from .validators.v1_3_3.jsd_3cb24acb486b89d2 \
import JSONSchemaValidator3Cb24Acb486B89D2 \
as JSONSchemaValidator3Cb24Acb486B89D2_v1_3_3
from .validators.v1_3_3.jsd_3d923b184dc9a4ca \
import JSONSchemaValidator3D923B184Dc9A4Ca \
as JSONSchemaValidator3D923B184Dc9A4Ca_v1_3_3
from .validators.v1_3_3.jsd_3d9b99c343398a27 \
import JSONSchemaValidator3D9B99C343398A27 \
as JSONSchemaValidator3D9B99C343398A27_v1_3_3
from .validators.v1_3_3.jsd_3e94cb1b485b8b0e \
import JSONSchemaValidator3E94Cb1B485B8B0E \
as JSONSchemaValidator3E94Cb1B485B8B0E_v1_3_3
from .validators.v1_3_3.jsd_3ebcda3e4acbafb7 \
import JSONSchemaValidator3EbcDa3E4AcbAfb7 \
as JSONSchemaValidator3EbcDa3E4AcbAfb7_v1_3_3
from .validators.v1_3_3.jsd_3f89bbfc4f6b8b50 \
import JSONSchemaValidator3F89Bbfc4F6B8B50 \
as JSONSchemaValidator3F89Bbfc4F6B8B50_v1_3_3
from .validators.v1_3_3.jsd_429c28154bdaa13d \
import JSONSchemaValidator429C28154BdaA13D \
as JSONSchemaValidator429C28154BdaA13D_v1_3_3
from .validators.v1_3_3.jsd_42b6a86e44b8bdfc \
import JSONSchemaValidator42B6A86E44B8Bdfc \
as JSONSchemaValidator42B6A86E44B8Bdfc_v1_3_3
from .validators.v1_3_3.jsd_44974ba5435a801d \
import JSONSchemaValidator44974Ba5435A801D \
as JSONSchemaValidator44974Ba5435A801D_v1_3_3
from .validators.v1_3_3.jsd_44a39a074a6a82a2 \
import JSONSchemaValidator44A39A074A6A82A2 \
as JSONSchemaValidator44A39A074A6A82A2_v1_3_3
from .validators.v1_3_3.jsd_45bc7a8344a8bc1e \
import JSONSchemaValidator45Bc7A8344A8Bc1E \
as JSONSchemaValidator45Bc7A8344A8Bc1E_v1_3_3
from .validators.v1_3_3.jsd_4695090d403b8eaa \
import JSONSchemaValidator4695090D403B8Eaa \
as JSONSchemaValidator4695090D403B8Eaa_v1_3_3
from .validators.v1_3_3.jsd_47a1b84b4e1b8044 \
import JSONSchemaValidator47A1B84B4E1B8044 \
as JSONSchemaValidator47A1B84B4E1B8044_v1_3_3
from .validators.v1_3_3.jsd_4bb22af046fa8f08 \
import JSONSchemaValidator4Bb22Af046Fa8F08 \
as JSONSchemaValidator4Bb22Af046Fa8F08_v1_3_3
from .validators.v1_3_3.jsd_4c8cab5f435a80f4 \
import JSONSchemaValidator4C8CAb5F435A80F4 \
as JSONSchemaValidator4C8CAb5F435A80F4_v1_3_3
from .validators.v1_3_3.jsd_4ca2db1143ebb5d7 \
import JSONSchemaValidator4Ca2Db1143EbB5D7 \
as JSONSchemaValidator4Ca2Db1143EbB5D7_v1_3_3
from .validators.v1_3_3.jsd_4d86a993469a9da9 \
import JSONSchemaValidator4D86A993469A9Da9 \
as JSONSchemaValidator4D86A993469A9Da9_v1_3_3
from .validators.v1_3_3.jsd_4d9ca8e2431a8a24 \
import JSONSchemaValidator4D9CA8E2431A8A24 \
as JSONSchemaValidator4D9CA8E2431A8A24_v1_3_3
from .validators.v1_3_3.jsd_4da91a544e29842d \
import JSONSchemaValidator4Da91A544E29842D \
as JSONSchemaValidator4Da91A544E29842D_v1_3_3
from .validators.v1_3_3.jsd_4dbe3bc743a891bc \
import JSONSchemaValidator4Dbe3Bc743A891Bc \
as JSONSchemaValidator4Dbe3Bc743A891Bc_v1_3_3
from .validators.v1_3_3.jsd_4eb56a614cc9a2d2 \
import JSONSchemaValidator4Eb56A614Cc9A2D2 \
as JSONSchemaValidator4Eb56A614Cc9A2D2_v1_3_3
from .validators.v1_3_3.jsd_4f947a1c4fc884f6 \
import JSONSchemaValidator4F947A1C4Fc884F6 \
as JSONSchemaValidator4F947A1C4Fc884F6_v1_3_3
from .validators.v1_3_3.jsd_4f9f7a7b40f990de \
import JSONSchemaValidator4F9F7A7B40F990De \
as JSONSchemaValidator4F9F7A7B40F990De_v1_3_3
from .validators.v1_3_3.jsd_50864acf4ad8b54d \
import JSONSchemaValidator50864Acf4Ad8B54D \
as JSONSchemaValidator50864Acf4Ad8B54D_v1_3_3
from .validators.v1_3_3.jsd_5087daae4cc98566 \
import JSONSchemaValidator5087Daae4Cc98566 \
as JSONSchemaValidator5087Daae4Cc98566_v1_3_3
from .validators.v1_3_3.jsd_5097f8d445f98f51 \
import JSONSchemaValidator5097F8D445F98F51 \
as JSONSchemaValidator5097F8D445F98F51_v1_3_3
from .validators.v1_3_3.jsd_50b589fd4c7a930a \
import JSONSchemaValidator50B589Fd4C7A930A \
as JSONSchemaValidator50B589Fd4C7A930A_v1_3_3
from .validators.v1_3_3.jsd_518c59cd441aa9fc \
import JSONSchemaValidator518C59Cd441AA9Fc \
as JSONSchemaValidator518C59Cd441AA9Fc_v1_3_3
from .validators.v1_3_3.jsd_549e4aff42bbb52a \
import JSONSchemaValidator549E4Aff42BbB52A \
as JSONSchemaValidator549E4Aff42BbB52A_v1_3_3
from .validators.v1_3_3.jsd_55b439dc4239b140 \
import JSONSchemaValidator55B439Dc4239B140 \
as JSONSchemaValidator55B439Dc4239B140_v1_3_3
from .validators.v1_3_3.jsd_55bc3bf94e38b6ff \
import JSONSchemaValidator55Bc3Bf94E38B6Ff \
as JSONSchemaValidator55Bc3Bf94E38B6Ff_v1_3_3
from .validators.v1_3_3.jsd_579a6a7248cb94cf \
import JSONSchemaValidator579A6A7248Cb94Cf \
as JSONSchemaValidator579A6A7248Cb94Cf_v1_3_3
from .validators.v1_3_3.jsd_5889fb844939a13b \
import JSONSchemaValidator5889Fb844939A13B \
as JSONSchemaValidator5889Fb844939A13B_v1_3_3
from .validators.v1_3_3.jsd_58a3699e489b9529 \
import JSONSchemaValidator58A3699E489B9529 \
as JSONSchemaValidator58A3699E489B9529_v1_3_3
from .validators.v1_3_3.jsd_5b8639224cd88ea7 \
import JSONSchemaValidator5B8639224Cd88Ea7 \
as JSONSchemaValidator5B8639224Cd88Ea7_v1_3_3
from .validators.v1_3_3.jsd_5db21b8e43fab7d8 \
import JSONSchemaValidator5Db21B8E43FaB7D8 \
as JSONSchemaValidator5Db21B8E43FaB7D8_v1_3_3
from .validators.v1_3_3.jsd_6099da82477b858a \
import JSONSchemaValidator6099Da82477B858A \
as JSONSchemaValidator6099Da82477B858A_v1_3_3
from .validators.v1_3_3.jsd_6284db4649aa8d31 \
import JSONSchemaValidator6284Db4649Aa8D31 \
as JSONSchemaValidator6284Db4649Aa8D31_v1_3_3
from .validators.v1_3_3.jsd_62b05b2c40a9b216 \
import JSONSchemaValidator62B05B2C40A9B216 \
as JSONSchemaValidator62B05B2C40A9B216_v1_3_3
from .validators.v1_3_3.jsd_63bb88b74f59aa17 \
import JSONSchemaValidator63Bb88B74F59Aa17 \
as JSONSchemaValidator63Bb88B74F59Aa17_v1_3_3
from .validators.v1_3_3.jsd_698bfbb44dcb9fca \
import JSONSchemaValidator698BFbb44Dcb9Fca \
as JSONSchemaValidator698BFbb44Dcb9Fca_v1_3_3
from .validators.v1_3_3.jsd_6a9edac149ba86cf \
import JSONSchemaValidator6A9EDac149Ba86Cf \
as JSONSchemaValidator6A9EDac149Ba86Cf_v1_3_3
from .validators.v1_3_3.jsd_6bacb8d14639bdc7 \
import JSONSchemaValidator6BacB8D14639Bdc7 \
as JSONSchemaValidator6BacB8D14639Bdc7_v1_3_3
from .validators.v1_3_3.jsd_6db9292d4f28a26b \
import JSONSchemaValidator6Db9292D4F28A26B \
as JSONSchemaValidator6Db9292D4F28A26B_v1_3_3
from .validators.v1_3_3.jsd_6f9819e84178870c \
import JSONSchemaValidator6F9819E84178870C \
as JSONSchemaValidator6F9819E84178870C_v1_3_3
from .validators.v1_3_3.jsd_6f9cda9a465884b4 \
import JSONSchemaValidator6F9CDa9A465884B4 \
as JSONSchemaValidator6F9CDa9A465884B4_v1_3_3
from .validators.v1_3_3.jsd_6fb4ab3643faa80f \
import JSONSchemaValidator6Fb4Ab3643FaA80F \
as JSONSchemaValidator6Fb4Ab3643FaA80F_v1_3_3
from .validators.v1_3_3.jsd_70847bdc4d89a437 \
import JSONSchemaValidator70847Bdc4D89A437 \
as JSONSchemaValidator70847Bdc4D89A437_v1_3_3
from .validators.v1_3_3.jsd_709769624bf988d5 \
import JSONSchemaValidator709769624Bf988D5 \
as JSONSchemaValidator709769624Bf988D5_v1_3_3
from .validators.v1_3_3.jsd_709fda3c42b8877a \
import JSONSchemaValidator709FDa3C42B8877A \
as JSONSchemaValidator709FDa3C42B8877A_v1_3_3
from .validators.v1_3_3.jsd_70a479a6462a9496 \
import JSONSchemaValidator70A479A6462A9496 \
as JSONSchemaValidator70A479A6462A9496_v1_3_3
from .validators.v1_3_3.jsd_70ad397649e9b4d3 \
import JSONSchemaValidator70Ad397649E9B4D3 \
as JSONSchemaValidator70Ad397649E9B4D3_v1_3_3
from .validators.v1_3_3.jsd_70b6f8e140b8b784 \
import JSONSchemaValidator70B6F8E140B8B784 \
as JSONSchemaValidator70B6F8E140B8B784_v1_3_3
from .validators.v1_3_3.jsd_7683f90b4efab090 \
import JSONSchemaValidator7683F90B4EfaB090 \
as JSONSchemaValidator7683F90B4EfaB090_v1_3_3
from .validators.v1_3_3.jsd_7781fa0548a98342 \
import JSONSchemaValidator7781Fa0548A98342 \
as JSONSchemaValidator7781Fa0548A98342_v1_3_3
from .validators.v1_3_3.jsd_7989f86846faaf99 \
import JSONSchemaValidator7989F86846FaAf99 \
as JSONSchemaValidator7989F86846FaAf99_v1_3_3
from .validators.v1_3_3.jsd_7aa3da9d4e098ef2 \
import JSONSchemaValidator7Aa3Da9D4E098Ef2 \
as JSONSchemaValidator7Aa3Da9D4E098Ef2_v1_3_3
from .validators.v1_3_3.jsd_7ab9a8bd4f3b86a4 \
import JSONSchemaValidator7Ab9A8Bd4F3B86A4 \
as JSONSchemaValidator7Ab9A8Bd4F3B86A4_v1_3_3
from .validators.v1_3_3.jsd_7e92f9eb46db8320 \
import JSONSchemaValidator7E92F9Eb46Db8320 \
as JSONSchemaValidator7E92F9Eb46Db8320_v1_3_3
from .validators.v1_3_3.jsd_8091a9b84bfba53b \
import JSONSchemaValidator8091A9B84BfbA53B \
as JSONSchemaValidator8091A9B84BfbA53B_v1_3_3
from .validators.v1_3_3.jsd_80acb88e4ac9ac6d \
import JSONSchemaValidator80AcB88E4Ac9Ac6D \
as JSONSchemaValidator80AcB88E4Ac9Ac6D_v1_3_3
from .validators.v1_3_3.jsd_80b7f8e6406a8701 \
import JSONSchemaValidator80B7F8E6406A8701 \
as JSONSchemaValidator80B7F8E6406A8701_v1_3_3
from .validators.v1_3_3.jsd_819f9aa54feab7bf \
import JSONSchemaValidator819F9Aa54FeaB7Bf \
as JSONSchemaValidator819F9Aa54FeaB7Bf_v1_3_3
from .validators.v1_3_3.jsd_81bb4804405a8d2f \
import JSONSchemaValidator81Bb4804405A8D2F \
as JSONSchemaValidator81Bb4804405A8D2F_v1_3_3
from .validators.v1_3_3.jsd_82918a1b4d289c5c \
import JSONSchemaValidator82918A1B4D289C5C \
as JSONSchemaValidator82918A1B4D289C5C_v1_3_3
from .validators.v1_3_3.jsd_83a3b9404cb88787 \
import JSONSchemaValidator83A3B9404Cb88787 \
as JSONSchemaValidator83A3B9404Cb88787_v1_3_3
from .validators.v1_3_3.jsd_848b5a7b4f9b8c12 \
import JSONSchemaValidator848B5A7B4F9B8C12 \
as JSONSchemaValidator848B5A7B4F9B8C12_v1_3_3
from .validators.v1_3_3.jsd_84ad8b0e42cab48a \
import JSONSchemaValidator84Ad8B0E42CaB48A \
as JSONSchemaValidator84Ad8B0E42CaB48A_v1_3_3
from .validators.v1_3_3.jsd_84b33a9e480abcaf \
import JSONSchemaValidator84B33A9E480ABcaf \
as JSONSchemaValidator84B33A9E480ABcaf_v1_3_3
from .validators.v1_3_3.jsd_84b37ae54c59ab28 \
import JSONSchemaValidator84B37Ae54C59Ab28 \
as JSONSchemaValidator84B37Ae54C59Ab28_v1_3_3
from .validators.v1_3_3.jsd_868439bb4e89a6e4 \
import JSONSchemaValidator868439Bb4E89A6E4 \
as JSONSchemaValidator868439Bb4E89A6E4_v1_3_3
from .validators.v1_3_3.jsd_87a5ab044139862d \
import JSONSchemaValidator87A5Ab044139862D \
as JSONSchemaValidator87A5Ab044139862D_v1_3_3
from .validators.v1_3_3.jsd_87a8ba444ce9bc59 \
import JSONSchemaValidator87A8Ba444Ce9Bc59 \
as JSONSchemaValidator87A8Ba444Ce9Bc59_v1_3_3
from .validators.v1_3_3.jsd_888f585c49b88441 \
import JSONSchemaValidator888F585C49B88441 \
as JSONSchemaValidator888F585C49B88441_v1_3_3
from .validators.v1_3_3.jsd_8893b834445bb29c \
import JSONSchemaValidator8893B834445BB29C \
as JSONSchemaValidator8893B834445BB29C_v1_3_3
from .validators.v1_3_3.jsd_8984ea7744d98a54 \
import JSONSchemaValidator8984Ea7744D98A54 \
as JSONSchemaValidator8984Ea7744D98A54_v1_3_3
from .validators.v1_3_3.jsd_899f08e7401b82dd \
import JSONSchemaValidator899F08E7401B82Dd \
as JSONSchemaValidator899F08E7401B82Dd_v1_3_3
from .validators.v1_3_3.jsd_89b2fb144f5bb09b \
import JSONSchemaValidator89B2Fb144F5BB09B \
as JSONSchemaValidator89B2Fb144F5BB09B_v1_3_3
from .validators.v1_3_3.jsd_89b36b4649999d81 \
import JSONSchemaValidator89B36B4649999D81 \
as JSONSchemaValidator89B36B4649999D81_v1_3_3
from .validators.v1_3_3.jsd_8a96fb954d09a349 \
import JSONSchemaValidator8A96Fb954D09A349 \
as JSONSchemaValidator8A96Fb954D09A349_v1_3_3
from .validators.v1_3_3.jsd_8a9d2b76443b914e \
import JSONSchemaValidator8A9D2B76443B914E \
as JSONSchemaValidator8A9D2B76443B914E_v1_3_3
from .validators.v1_3_3.jsd_8b908a4e4c5a9a23 \
import JSONSchemaValidator8B908A4E4C5A9A23 \
as JSONSchemaValidator8B908A4E4C5A9A23_v1_3_3
from .validators.v1_3_3.jsd_8cb6783b4faba1f4 \
import JSONSchemaValidator8Cb6783B4FabA1F4 \
as JSONSchemaValidator8Cb6783B4FabA1F4_v1_3_3
from .validators.v1_3_3.jsd_8da0391947088a5a \
import JSONSchemaValidator8Da0391947088A5A \
as JSONSchemaValidator8Da0391947088A5A_v1_3_3
from .validators.v1_3_3.jsd_8db939744649a782 \
import JSONSchemaValidator8Db939744649A782 \
as JSONSchemaValidator8Db939744649A782_v1_3_3
from .validators.v1_3_3.jsd_8f93dbe54b2aa1fd \
import JSONSchemaValidator8F93Dbe54B2AA1Fd \
as JSONSchemaValidator8F93Dbe54B2AA1Fd_v1_3_3
from .validators.v1_3_3.jsd_8fa8eb404a4a8d96 \
import JSONSchemaValidator8Fa8Eb404A4A8D96 \
as JSONSchemaValidator8Fa8Eb404A4A8D96_v1_3_3
from .validators.v1_3_3.jsd_93981baa40799483 \
import JSONSchemaValidator93981Baa40799483 \
as JSONSchemaValidator93981Baa40799483_v1_3_3
from .validators.v1_3_3.jsd_9480fa1f47ca9254 \
import JSONSchemaValidator9480Fa1F47Ca9254 \
as JSONSchemaValidator9480Fa1F47Ca9254_v1_3_3
from .validators.v1_3_3.jsd_948ea8194348bc0b \
import JSONSchemaValidator948EA8194348Bc0B \
as JSONSchemaValidator948EA8194348Bc0B_v1_3_3
from .validators.v1_3_3.jsd_9582ab824ce8b29d \
import JSONSchemaValidator9582Ab824Ce8B29D \
as JSONSchemaValidator9582Ab824Ce8B29D_v1_3_3
from .validators.v1_3_3.jsd_9788b8fc4418831d \
import JSONSchemaValidator9788B8Fc4418831D \
as JSONSchemaValidator9788B8Fc4418831D_v1_3_3
from | |
def __call__(self, string):
return IntegerRangeType(1, 7450 if self.gib else 8000)(string)
def SolidFireMinIOPSType(string):
return IntegerRangeType(50, 15000)(string)
def SolidFireMaxIOPSType(string):
return IntegerRangeType(100, 100000)(string)
def SolidFireBurstIOPSType(string):
return IntegerRangeType(100, 100000)(string)
class CountType(object):
"""Type for validating a count of something"""
def __init__(self, allowZero=False):
if allowZero:
self.minval = 0
else:
self.minval = 1
def __call__(self, string):
return IntegerRangeType(self.minval)(string)
class IntegerRangeType(object):
"""Type for validating an integer within a range of values, inclusive"""
def __init__(self, minValue=None, maxValue=None):
self.minValue = None
self.maxValue = None
if minValue is not None:
self.minValue = int(minValue)
if maxValue is not None:
self.maxValue = int(maxValue)
def __call__(self, string):
try:
number = int(string)
except (TypeError, ValueError):
raise InvalidArgumentError("{} is not a valid integer".format(string))
if self.minValue is not None and number < self.minValue:
raise InvalidArgumentError("{} must be >= {}".format(number, self.minValue))
if self.maxValue is not None and number > self.maxValue:
raise InvalidArgumentError("{} must be <= {}".format(number, self.maxValue))
return number
def PositiveIntegerType(string):
"""Type for validating integers"""
errormsg = "{} is not a positive integer".format(string)
try:
number = int(string)
except (TypeError, ValueError):
raise InvalidArgumentError(errormsg)
if number < 0:
raise InvalidArgumentError(errormsg)
return number
def PositiveNonZeroIntegerType(string):
"""Type for validating integers"""
errormsg = "{} is not a positive non-zero integer".format(string)
try:
number = int(string)
except (TypeError, ValueError):
raise InvalidArgumentError(errormsg)
if number <= 0:
raise InvalidArgumentError(errormsg)
return number
def VLANTagType(string):
"""Type for validating VLAN tags"""
errormsg = "{} is not a valid VLAN tag".format(string)
try:
tag = int(string)
except (TypeError, ValueError):
raise InvalidArgumentError(errormsg)
if tag < 1 or tag > 4095:
raise InvalidArgumentError(errormsg)
return tag
def MACAddressType(string):
"""Type for validating MAC address"""
errormsg = "{} is not a valid MAC address".format(string)
if not _re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", string):
raise InvalidArgumentError(errormsg)
return string.lower()
def RegexType(string):
"""Type for validating regexes"""
try:
_re.compile(string)
except _re.error:
raise InvalidArgumentError("Invalid regex")
return string
def GetPrettiestTypeName(typeToName):
"""Get the best human representation of a type"""
if typeToName is None:
return "Any"
typename = repr(typeToName)
# Hacky
if typename.startswith("<"):
typename = getattr(typeToName, "__name__", str(typeToName))
return typename
# ===================================================================================================
# Time manipulation
# ===================================================================================================
class LocalTimezone(_datetime.tzinfo):
"""Class representing the time zone of the machine we are currently running on"""
def __init__(self):
super(LocalTimezone, self).__init__()
self.STDOFFSET = _datetime.timedelta(seconds = -_time.timezone)
if _time.daylight:
self.DSTOFFSET = _datetime.timedelta(seconds = -_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return _datetime.timedelta(0)
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
"""check if this _timezone is in daylight savings _time"""
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
#pylint: disable=unused-argument
class UTCTimezone(_datetime.tzinfo):
"""Class representing UTC time"""
def utcoffset(self, dt):
return _datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return _datetime.timedelta(0)
#pylint: enable=unused-argument
def ParseDateTime(timeString):
"""
Parse a string into a _datetime
Args:
timeString: the string containing a parsable date/time
Returns:
A datetime object corresponding to the date/time in the string
"""
known_formats = [
"%Y-%m-%d %H:%M:%S.%f", # old sf format
"%Y-%m-%dT%H:%M:%S.%fZ", # ISO format with UTC timezone
"%Y-%m-%dT%H:%M:%SZ", # ISO format without ffractional seconds and UTC timezone
"%Y%m%dT%H:%M:%SZ",
"%b %d %H:%M:%S" # syslog/date format
]
parsed = None
for fmt in known_formats:
try:
parsed = _datetime.datetime.strptime(timeString, fmt)
break
except ValueError: pass
if parsed.year == 1900:
parsed = parsed.replace(year=_datetime.datetime.now().year)
return parsed
def ParseTimestamp(timeString):
"""
Parse a string into a unix timestamp
Args:
timeString: the string containing a parsable date/time
Returns:
An integer timestamp corresponding to the date/time in the string
"""
date_obj = ParseDateTime(timeString)
if (date_obj != None):
timestamp = _calendar.timegm(date_obj.timetuple())
if timestamp <= 0:
timestamp = _calendar.timegm(date_obj.utctimetuple())
return timestamp
else:
return 0
def ParseTimestampHiRes(timeString):
"""
Parse a string into a unix timestamp with floating point microseconds
Args:
timeString: the string containing a parsable date/time
Returns:
An floating point timestamp corresponding to the date/time in the string
"""
date_obj = ParseDateTime(timeString)
if (date_obj != None):
return (_calendar.timegm(date_obj.timetuple()) + date_obj.microsecond)
else:
return 0
def TimestampToStr(timestamp, formatString = "%Y-%m-%d %H:%M:%S", timeZone = LocalTimezone()):
"""
Convert a _timestamp to a human readable string
Args:
_timeStamp: the _timestamp to convert
formatString: the format to convert to
_timeZone: the _time zone to convert to
Returns:
A string containing the date/_time in the requested format and _time zone
"""
display_time = _datetime.datetime.fromtimestamp(timestamp, timeZone)
return display_time.strftime(formatString)
def SecondsToElapsedStr(seconds):
"""
Convert an integer number of seconds into elapsed time format (D-HH:MM:SS)
Args:
seconds: the total number of seconds (int)
Returns:
A formatted elapsed time (str)
"""
if isinstance(seconds, six.string_types):
return seconds
delta = _datetime.timedelta(seconds=seconds)
return TimeDeltaToStr(delta)
def TimeDeltaToStr(timeDelta):
"""
Convert a timedelta object to an elapsed time format (D-HH:MM:SS)
Args:
timeDelta: a timedelta object containing the total time (datetime.timedelta)
Returns:
Formatted elapsed time (str)
"""
days = timeDelta.days
hours = 0
minutes = 0
seconds = timeDelta.seconds
if seconds >= 60:
d,r = divmod(seconds, 60)
minutes = d
seconds = r
if minutes >= 60:
d,r = divmod(minutes, 60)
hours = d
minutes = r
time_str = "%02d:%02d" % (minutes, seconds)
if (hours > 0):
time_str = "%02d:%02d:%02d" % (hours, minutes, seconds)
if (days > 0):
time_str = "%d-%02d:%02d:%02d" % (days, hours, minutes, seconds)
return time_str
# ===================================================================================================
# Pretty formatting
# ===================================================================================================
def HumanizeBytes(totalBytes, precision=1, suffix=None):
"""
Convert a number of bytes into the appropriate pretty kiB, MiB, etc.
Args:
totalBytes: the number to convert
precision: how many decimal numbers of precision to preserve
suffix: use this suffix (kiB, MiB, etc.) instead of automatically determining it
Returns:
The prettified string version of the input
"""
if (totalBytes == None):
return "0 B"
converted = float(totalBytes)
suffix_index = 0
suffix_list = ['B', 'kiB', 'MiB', 'GiB', 'TiB']
while (abs(converted) >= 1000):
converted /= 1024.0
suffix_index += 1
if suffix_list[suffix_index] == suffix:
break
return "{0:.{1}f} {2}".format(converted, precision, suffix_list[suffix_index])
def HumanizeDecimal(number, precision=1, suffix=None):
"""
Convert a number into the appropriate pretty k, M, G, etc.
Args:
totalBytes: the number to convert
precision: how many decimal numbers of precision to preserve
suffix: use this suffix (k, M, etc.) instead of automatically determining it
Returns:
The prettified string version of the input
"""
if (number == None):
return "0"
if (abs(number) < 1000):
return str(number)
converted = float(number)
suffix_index = 0
suffix_list = [' ', 'k', 'M', 'G', 'T']
while (abs(converted) >= 1000):
converted /= 1000.0
suffix_index += 1
if suffix_list[suffix_index] == suffix: break
return "{:.{}f {}}".format(converted, precision, suffix_list[suffix_index])
def HumanizeWWN(hexWWN):
"""Convert a hex WWN (0x10000090fa34ad72) to a pretty format (10:00:00:90:fa:34:ad:72)
Args:
hexWWN: the WWN in hex format
Returns:
The prettified string version of the input
"""
pretty = ''
if hexWWN.startswith('0x'):
start_index = 2
else:
start_index = 0
for i in range(start_index, 2*8+2, 2):
pretty += ':' + hexWWN[i:i+2]
return pretty.strip(":")
def PrettyJSON(obj):
"""
Get a pretty printed representation of an object
Args:
obj: a dictionary to pretty-fy (dict)
Returns:
A string of pretty JSON (str)
"""
return _json.dumps(obj, indent=2, sort_keys=True)
# ===================================================================================================
# Misc
# ===================================================================================================
class SolidFireVersion(object):
"""Easily compare SolidFire version strings"""
def __init__(self, versionString):
self.rawVersion = versionString
pieces = versionString.split(".")
if len(pieces) == 4:
self.major = int(pieces[0])
self.minor = int(pieces[1])
self.patch = int(pieces[2])
self.build = int(pieces[3])
elif len(pieces) == 2:
self.major = int(pieces[0])
self.minor = 0
self.patch = 0
self.build = int(pieces[1])
self.apiVersion = float(self.major) + float(self.minor)/10
@staticmethod
def FromParts(major, minor, patch=None, build=None):
return SolidFireVersion("{}.{}.{}.{}".format(major, minor, patch, build))
def parts(self):
return self.major, self.minor, self.patch, self.build
def __str__(self):
return self.rawVersion
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.major == other.major and \
self.minor == other.minor and \
self.patch == other.patch and \
self.build == other.build
def __gt__(self, other):
return self.major > other.major or \
(self.major == other.major and self.minor > other.minor) or \
(self.major == other.major and self.minor == other.minor and self.patch > other.patch) or \
(self.major == other.major and self.minor == other.minor and self.patch == other.patch and self.build > other.build)
def __ne__(self, other):
return not self == other
def __ge__(self, other):
return self == other or self > other
def __lt__(self, other):
return self != other and not self > other
def __le__(self, other):
return self == other or self < other
def GetFilename(baseName):
"""
Get a | |
<gh_stars>1-10
import warnings
from math import isnan
import numpy as np
from scipy import integrate
from ross.fluid_flow.fluid_flow_geometry import move_rotor_center
def calculate_oil_film_force(fluid_flow_object, force_type=None):
"""This function calculates the forces of the oil film in the N and T directions, ie in the
opposite direction to the eccentricity and in the tangential direction.
Parameters
----------
fluid_flow_object: A FluidFlow object.
force_type: str
If set, calculates the oil film force matrix analytically considering the chosen type: 'short' or 'long'.
If set to 'numerical', calculates the oil film force numerically.
Returns
-------
radial_force: float
Force of the oil film in the opposite direction to the eccentricity direction.
tangential_force: float
Force of the oil film in the tangential direction
f_x: float
Components of forces in the x direction
f_y: float
Components of forces in the y direction
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> calculate_oil_film_force(my_fluid_flow) # doctest: +ELLIPSIS
(...
"""
if force_type != "numerical" and (
force_type == "short" or fluid_flow_object.bearing_type == "short_bearing"
):
radial_force = (
0.5
* fluid_flow_object.viscosity
* (fluid_flow_object.radius_rotor / fluid_flow_object.radial_clearance) ** 2
* (fluid_flow_object.length ** 3 / fluid_flow_object.radius_rotor)
* (
(
2
* fluid_flow_object.eccentricity_ratio ** 2
* fluid_flow_object.omega
)
/ (1 - fluid_flow_object.eccentricity_ratio ** 2) ** 2
)
)
tangential_force = (
0.5
* fluid_flow_object.viscosity
* (fluid_flow_object.radius_rotor / fluid_flow_object.radial_clearance) ** 2
* (fluid_flow_object.length ** 3 / fluid_flow_object.radius_rotor)
* (
(np.pi * fluid_flow_object.eccentricity_ratio * fluid_flow_object.omega)
/ (2 * (1 - fluid_flow_object.eccentricity_ratio ** 2) ** (3.0 / 2))
)
)
elif force_type != "numerical" and (
force_type == "long" or fluid_flow_object.bearing_type == "long_bearing"
):
radial_force = (
6
* fluid_flow_object.viscosity
* (fluid_flow_object.radius_rotor / fluid_flow_object.radial_clearance) ** 2
* fluid_flow_object.radius_rotor
* fluid_flow_object.length
* (
(
2
* fluid_flow_object.eccentricity_ratio ** 2
* fluid_flow_object.omega
)
/ (
(2 + fluid_flow_object.eccentricity_ratio ** 2)
* (1 - fluid_flow_object.eccentricity_ratio ** 2)
)
)
)
tangential_force = (
6
* fluid_flow_object.viscosity
* (fluid_flow_object.radius_rotor / fluid_flow_object.radial_clearance) ** 2
* fluid_flow_object.radius_rotor
* fluid_flow_object.length
* (
(np.pi * fluid_flow_object.eccentricity_ratio * fluid_flow_object.omega)
/ (
(2 + fluid_flow_object.eccentricity_ratio ** 2)
* (1 - fluid_flow_object.eccentricity_ratio ** 2) ** 0.5
)
)
)
else:
p_mat = fluid_flow_object.p_mat_numerical
a = np.zeros([fluid_flow_object.nz, fluid_flow_object.ntheta])
b = np.zeros([fluid_flow_object.nz, fluid_flow_object.ntheta])
g1 = np.zeros(fluid_flow_object.nz)
g2 = np.zeros(fluid_flow_object.nz)
base_vector = np.array(
[
fluid_flow_object.xre[0][0] - fluid_flow_object.xi,
fluid_flow_object.yre[0][0] - fluid_flow_object.yi,
]
)
for i in range(fluid_flow_object.nz):
for j in range(int(fluid_flow_object.ntheta / 2)):
vector_from_rotor = np.array(
[
fluid_flow_object.xre[i][j] - fluid_flow_object.xi,
fluid_flow_object.yre[i][j] - fluid_flow_object.yi,
]
)
angle_between_vectors = np.arccos(
np.dot(base_vector, vector_from_rotor)
/ (np.linalg.norm(base_vector) * np.linalg.norm(vector_from_rotor))
)
if isnan(angle_between_vectors):
angle_between_vectors = 0
if angle_between_vectors != 0 and j * fluid_flow_object.dtheta > np.pi:
angle_between_vectors += np.pi
a[i][j] = p_mat[i][j] * np.cos(angle_between_vectors)
b[i][j] = p_mat[i][j] * np.sin(angle_between_vectors)
for i in range(fluid_flow_object.nz):
g1[i] = integrate.simps(a[i][:], fluid_flow_object.gama[0])
g2[i] = integrate.simps(b[i][:], fluid_flow_object.gama[0])
integral1 = integrate.simps(g1, fluid_flow_object.z_list)
integral2 = integrate.simps(g2, fluid_flow_object.z_list)
radial_force = -fluid_flow_object.radius_rotor * integral1
tangential_force = fluid_flow_object.radius_rotor * integral2
force_x = -radial_force * np.sin(
fluid_flow_object.attitude_angle
) + tangential_force * np.cos(fluid_flow_object.attitude_angle)
force_y = radial_force * np.cos(
fluid_flow_object.attitude_angle
) + tangential_force * np.sin(fluid_flow_object.attitude_angle)
return radial_force, tangential_force, force_x, force_y
def calculate_stiffness_and_damping_coefficients(fluid_flow_object):
"""This function calculates the bearing stiffness and damping matrices numerically.
Parameters
----------
fluid_flow_object: A FluidFlow object.
Returns
-------
Two lists of floats
A list of length four including stiffness floats in this order: kxx, kxy, kyx, kyy.
And another list of length four including damping floats in this order: cxx, cxy, cyx, cyy.
And
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> calculate_stiffness_and_damping_coefficients(my_fluid_flow) # doctest: +ELLIPSIS
([428...
"""
N = 6
t = np.linspace(0, 2 * np.pi / fluid_flow_object.omegap, N)
fluid_flow_object.xp = fluid_flow_object.radial_clearance * 0.0001
fluid_flow_object.yp = fluid_flow_object.radial_clearance * 0.0001
dx = np.zeros(N)
dy = np.zeros(N)
xdot = np.zeros(N)
ydot = np.zeros(N)
radial_force = np.zeros(N)
tangential_force = np.zeros(N)
force_xx = np.zeros(N)
force_yx = np.zeros(N)
force_xy = np.zeros(N)
force_yy = np.zeros(N)
X1 = np.zeros([N, 3])
X2 = np.zeros([N, 3])
F1 = np.zeros(N)
F2 = np.zeros(N)
F3 = np.zeros(N)
F4 = np.zeros(N)
for i in range(N):
fluid_flow_object.t = t[i]
delta_x = fluid_flow_object.xp * np.sin(
fluid_flow_object.omegap * fluid_flow_object.t
)
move_rotor_center(fluid_flow_object, delta_x, 0)
dx[i] = delta_x
xdot[i] = (
fluid_flow_object.omegap
* fluid_flow_object.xp
* np.cos(fluid_flow_object.omegap * fluid_flow_object.t)
)
fluid_flow_object.geometry_description()
fluid_flow_object.calculate_pressure_matrix_numerical(direction="x")
[
radial_force[i],
tangential_force[i],
force_xx[i],
force_yx[i],
] = calculate_oil_film_force(fluid_flow_object, force_type="numerical")
delta_y = fluid_flow_object.yp * np.sin(
fluid_flow_object.omegap * fluid_flow_object.t
)
move_rotor_center(fluid_flow_object, -delta_x, 0)
move_rotor_center(fluid_flow_object, 0, delta_y)
dy[i] = delta_y
ydot[i] = (
fluid_flow_object.omegap
* fluid_flow_object.yp
* np.cos(fluid_flow_object.omegap * fluid_flow_object.t)
)
fluid_flow_object.geometry_description()
fluid_flow_object.calculate_pressure_matrix_numerical(direction="y")
[
radial_force[i],
tangential_force[i],
force_xy[i],
force_yy[i],
] = calculate_oil_film_force(fluid_flow_object, force_type="numerical")
move_rotor_center(fluid_flow_object, 0, -delta_y)
fluid_flow_object.geometry_description()
fluid_flow_object.calculate_pressure_matrix_numerical()
X1[i] = [1, dx[i], xdot[i]]
X2[i] = [1, dy[i], ydot[i]]
F1[i] = -force_xx[i]
F2[i] = -force_xy[i]
F3[i] = -force_yx[i]
F4[i] = -force_yy[i]
P1 = np.dot(
np.dot(np.linalg.inv(np.dot(np.transpose(X1), X1)), np.transpose(X1)), F1
)
P2 = np.dot(
np.dot(np.linalg.inv(np.dot(np.transpose(X2), X2)), np.transpose(X2)), F2
)
P3 = np.dot(
np.dot(np.linalg.inv(np.dot(np.transpose(X1), X1)), np.transpose(X1)), F3
)
P4 = np.dot(
np.dot(np.linalg.inv(np.dot(np.transpose(X2), X2)), np.transpose(X2)), F4
)
K = [P1[1], P2[1], P3[1], P4[1]]
C = [P1[2], P2[2], P3[2], P4[2]]
return K, C
def calculate_short_stiffness_matrix(fluid_flow_object):
"""This function calculates the stiffness matrix for the short bearing.
Parameters
----------
fluid_flow_object: A FluidFlow object.
Returns
-------
list of floats
A list of length four including stiffness floats in this order: kxx, kxy, kyx, kyy
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> calculate_short_stiffness_matrix(my_fluid_flow) # doctest: +ELLIPSIS
[417...
"""
h0 = 1.0 / (
(
(np.pi ** 2) * (1 - fluid_flow_object.eccentricity_ratio ** 2)
+ 16 * fluid_flow_object.eccentricity_ratio ** 2
)
** 1.5
)
a = fluid_flow_object.load / fluid_flow_object.radial_clearance
kxx = (
a
* h0
* 4
* (
(np.pi ** 2) * (2 - fluid_flow_object.eccentricity_ratio ** 2)
+ 16 * fluid_flow_object.eccentricity_ratio ** 2
)
)
kxy = (
a
* h0
* np.pi
* (
(np.pi ** 2) * (1 - fluid_flow_object.eccentricity_ratio ** 2) ** 2
- 16 * fluid_flow_object.eccentricity_ratio ** 4
)
/ (
fluid_flow_object.eccentricity_ratio
* np.sqrt(1 - fluid_flow_object.eccentricity_ratio ** 2)
)
)
kyx = (
-a
* h0
* np.pi
* (
(np.pi ** 2)
* (1 - fluid_flow_object.eccentricity_ratio ** 2)
* (1 + 2 * fluid_flow_object.eccentricity_ratio ** 2)
+ (32 * fluid_flow_object.eccentricity_ratio ** 2)
* (1 + fluid_flow_object.eccentricity_ratio ** 2)
)
/ (
fluid_flow_object.eccentricity_ratio
* np.sqrt(1 - fluid_flow_object.eccentricity_ratio ** 2)
)
)
kyy = (
a
* h0
* 4
* (
(np.pi ** 2) * (1 + 2 * fluid_flow_object.eccentricity_ratio ** 2)
+ (
(32 * fluid_flow_object.eccentricity_ratio ** 2)
* (1 + fluid_flow_object.eccentricity_ratio ** 2)
)
/ (1 - fluid_flow_object.eccentricity_ratio ** 2)
)
)
return [kxx, kxy, kyx, kyy]
def calculate_short_damping_matrix(fluid_flow_object):
"""This function calculates the damping matrix for the short bearing.
Parameters
-------
fluid_flow_object: A FluidFlow object.
Returns
-------
list of floats
A list of length four including damping floats in this order: cxx, cxy, cyx, cyy
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> calculate_short_damping_matrix(my_fluid_flow) # doctest: +ELLIPSIS
[...
"""
# fmt: off
h0 = 1.0 / (((np.pi ** 2) * (1 - fluid_flow_object.eccentricity_ratio ** 2)
+ 16 * fluid_flow_object.eccentricity_ratio ** 2) ** 1.5)
a = fluid_flow_object.load / (fluid_flow_object.radial_clearance * fluid_flow_object.omega)
cxx = (a * h0 * 2 * np.pi * np.sqrt(1 - fluid_flow_object.eccentricity_ratio ** 2) *
((np.pi ** 2) * (1 + 2 * fluid_flow_object.eccentricity_ratio ** 2)
- 16 * fluid_flow_object.eccentricity_ratio ** 2) / fluid_flow_object.eccentricity_ratio)
cxy = (-a * h0 * 8 * ((np.pi ** 2) * (1 + 2 * fluid_flow_object.eccentricity_ratio ** 2)
- 16 * fluid_flow_object.eccentricity_ratio ** 2))
cyx = cxy
cyy = (a * h0 * (2 * np.pi * (
(np.pi ** 2) * (1 - fluid_flow_object.eccentricity_ratio ** 2) ** 2
+ 48 * fluid_flow_object.eccentricity_ratio ** 2)) /
(fluid_flow_object.eccentricity_ratio * np.sqrt(1 - fluid_flow_object.eccentricity_ratio ** 2)))
# fmt: on
return [cxx, cxy, cyx, cyy]
def find_equilibrium_position(
fluid_flow_object,
print_along=True,
tolerance=1e-05,
increment_factor=1e-03,
max_iterations=10,
increment_reduction_limit=1e-04,
return_iteration_map=False,
):
"""This function returns an eccentricity value with calculated forces matching the load applied,
meaning an equilibrium position of the rotor.
It first moves the rotor center on x-axis, aiming for the minimum error in the force on x (zero), then
moves on y-axis, aiming for the minimum | |
E501
local_var_params['sim_version'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `sim_version` when calling `sim_version_get_downloads`") # noqa: E501
collection_formats = {}
path_params = {}
if 'sim_version' in local_var_params:
path_params['simVersion'] = local_var_params['sim_version'] # noqa: E501
query_params = []
if 'tenant_id' in local_var_params and local_var_params['tenant_id'] is not None: # noqa: E501
query_params.append(('tenantId', local_var_params['tenant_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/sim-versions/{simVersion}/downloads', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetSimVersionDownloadsQueryResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def sim_version_get_sim_version(self, **kwargs): # noqa: E501
"""sim_version_get_sim_version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_get_sim_version(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str tenant_id:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.sim_version_get_sim_version_with_http_info(**kwargs) # noqa: E501
def sim_version_get_sim_version_with_http_info(self, **kwargs): # noqa: E501
"""sim_version_get_sim_version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_get_sim_version_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str tenant_id:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'tenant_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method sim_version_get_sim_version" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'tenant_id' in local_var_params and local_var_params['tenant_id'] is not None: # noqa: E501
query_params.append(('tenantId', local_var_params['tenant_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/sim-versions/current', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def sim_version_get_wiki_document(self, wiki_version, document_path, **kwargs): # noqa: E501
"""sim_version_get_wiki_document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_get_wiki_document(wiki_version, document_path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str wiki_version: (required)
:param str document_path: (required)
:param str tenant_id:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GetWikiDocumentQueryResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.sim_version_get_wiki_document_with_http_info(wiki_version, document_path, **kwargs) # noqa: E501
def sim_version_get_wiki_document_with_http_info(self, wiki_version, document_path, **kwargs): # noqa: E501
"""sim_version_get_wiki_document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_get_wiki_document_with_http_info(wiki_version, document_path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str wiki_version: (required)
:param str document_path: (required)
:param str tenant_id:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GetWikiDocumentQueryResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'wiki_version',
'document_path',
'tenant_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method sim_version_get_wiki_document" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'wiki_version' is set
if self.api_client.client_side_validation and ('wiki_version' not in local_var_params or # noqa: E501
local_var_params['wiki_version'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `wiki_version` when calling `sim_version_get_wiki_document`") # noqa: E501
# verify the required parameter 'document_path' is set
if self.api_client.client_side_validation and ('document_path' not in local_var_params or # noqa: E501
local_var_params['document_path'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `document_path` when calling `sim_version_get_wiki_document`") # noqa: E501
collection_formats = {}
path_params = {}
if 'wiki_version' in local_var_params:
path_params['wikiVersion'] = local_var_params['wiki_version'] # noqa: E501
if 'document_path' in local_var_params:
path_params['documentPath'] = local_var_params['document_path'] # noqa: E501
query_params = []
if 'tenant_id' in local_var_params and local_var_params['tenant_id'] is not None: # noqa: E501
query_params.append(('tenantId', local_var_params['tenant_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/sim-versions/{wikiVersion}/wiki/{documentPath}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetWikiDocumentQueryResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def sim_version_post_sim_version(self, sim_version_data, **kwargs): # noqa: E501
"""sim_version_post_sim_version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_post_sim_version(sim_version_data, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param NewSimVersionData sim_version_data: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.sim_version_post_sim_version_with_http_info(sim_version_data, **kwargs) # noqa: E501
def sim_version_post_sim_version_with_http_info(self, sim_version_data, **kwargs): # noqa: E501
"""sim_version_post_sim_version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sim_version_post_sim_version_with_http_info(sim_version_data, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param NewSimVersionData sim_version_data: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'sim_version_data'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method sim_version_post_sim_version" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'sim_version_data' is set
if self.api_client.client_side_validation and ('sim_version_data' not in local_var_params or # noqa: E501
local_var_params['sim_version_data'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `sim_version_data` when calling `sim_version_post_sim_version`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if | |
& (
signal_data_frame.values < 0)
# stop loss for buys
ind3 = (asset_data_frame.values < (asset_df_copy.values + stop_loss_df.values)) & (signal_data_frame.values > 0)
# stop loss for sells
ind4 = (asset_data_frame.values > (asset_df_copy.values - stop_loss_df.values)) & (signal_data_frame.values < 0)
# when has there been a stop loss or take profit? assign those as being flat points
ind = ind1 | ind2 | ind3 | ind4
ind = pandas.DataFrame(data=ind, columns=signal_data_frame.columns, index=signal_data_frame.index)
# for debugging
# sum_ind = (ind == True).sum(); print(sum_ind)
signal_data_frame[ind] = 0
# those places where we have been stopped out/taken profit are additional trade "reset points", which we need to define
# (already have ordinary buy/sell trades defined)
reset_points[ind] = 1
# where we don't have trade make these NaN and then fill down
signal_data_frame[reset_points == 0] = numpy.nan
signal_data_frame = signal_data_frame.ffill()
return signal_data_frame
# TODO
def calculate_risk_stop_defined_signals(self, signal_data_frame, stops_data_frame):
"""
Parameters
----------
signal_data_frame : DataFrame
Contains all the trade signals (typically mix of 0, +1 and +1
stops_data_frame : DataFrame
Contains 1/-1 to indicate where trades would be stopped out
Returns
-------
DataFrame containing amended signals that take into account stops and take profits
"""
signal_data_frame_pushed = signal_data_frame # signal_data_frame.shift(1)
reset_points = ((signal_data_frame_pushed - signal_data_frame_pushed.shift(1)).abs())
stops_data_frame = stops_data_frame.abs()
ind = stops_data_frame >= 1
ind.columns = signal_data_frame.columns
signal_data_frame[ind] = 0
reset_points[ind] = 1
signal_data_frame[reset_points == 0] = numpy.nan
signal_data_frame = signal_data_frame.ffill()
return signal_data_frame
def calculate_signal_returns_matrix(self, signal_data_frame, returns_data_frame, period_shift=1):
"""Calculates the trading strategy returns for given signal and asset
as a matrix multiplication
Parameters
----------
signal_data_frame : DataFrame
trading signals
returns_data_frame: DataFrame
returns of asset to be traded
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
return pandas.DataFrame(
signal_data_frame.shift(period_shift).values * returns_data_frame.values, index=returns_data_frame.index)
def calculate_signal_returns_with_tc(self, signal_data_frame, returns_data_frame, tc, period_shift=1):
"""Calculates the trading startegy returns for given signal and asset including
transaction costs
Parameters
----------
signal_data_frame : DataFrame
trading signals
returns_data_frame: DataFrame
returns of asset to be traded
tc : float
transaction costs
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
tc_costs = self.calculate_signal_tc(signal_data_frame, tc, period_shift)
return signal_data_frame.shift(period_shift) * returns_data_frame - tc_costs
def calculate_signal_returns_with_tc_matrix(self, signal_data_frame, returns_data_frame, tc, period_shift=1):
"""Calculates the trading startegy returns for given signal and asset with transaction costs with matrix multiplication
Parameters
----------
signal_data_frame : DataFrame
trading signals
returns_data_frame: DataFrame
returns of asset to be traded
tc : float
transaction costs
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
# for transaction costs which vary by asset name
# TODO add transaction costs which vary by size (maybe using a market impact model?)
if isinstance(tc, dict):
tc_ind = []
for k in returns_data_frame.columns:
try:
tc_ind.append(tc[k.split('.')[0]])
except:
tc_ind.append(tc['default'])
tc_ind = numpy.array(tc_ind)
tc_costs = (numpy.abs(signal_data_frame.shift(period_shift).values - signal_data_frame.values) * tc_ind)
elif isinstance(tc, pandas.DataFrame):
tc_ind = []
# get indices related to the returns
for k in returns_data_frame.columns:
try:
tc_ind.append(k.split('.')[0] + ".spread")
except:
tc_ind.append('default.spread')
# don't include transaction costs at a portfolio level (TODO - weight it according to the assets)
tc['Portfolio.spread'] = 0
# get associated transaction costs time series
tc_costs = tc[tc_ind]
# make sure transaction costs are aligned to the signals
signal_data_frame, tc_costs = signal_data_frame.align(tc_costs, join='left', axis='index')
tc_costs = tc_costs.fillna(method='ffill')
# calculate the transaction costs by multiplying by trades
tc_costs = (numpy.abs(signal_data_frame.shift(period_shift).values - signal_data_frame.values) * tc_costs.values)
else:
tc_costs = (numpy.abs(signal_data_frame.shift(period_shift).values - signal_data_frame.values) * tc)
return pandas.DataFrame(
signal_data_frame.shift(period_shift).values * returns_data_frame.values - tc_costs,
index=returns_data_frame.index)
def calculate_returns(self, data_frame, period_shift=1):
"""Calculates the simple returns for an asset
Parameters
----------
data_frame : DataFrame
asset price
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
return data_frame / data_frame.shift(period_shift) - 1
def calculate_diff_returns(self, data_frame, period_shift=1):
"""Calculates the differences for an asset
Parameters
----------
data_frame : DataFrame
asset price
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
return data_frame - data_frame.shift(period_shift)
def calculate_log_returns(self, data_frame, period_shift=1):
"""Calculates the log returns for an asset
Parameters
----------
data_frame : DataFrame
asset price
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
return math.log(data_frame / data_frame.shift(period_shift))
def create_mult_index(self, df_rets):
""" Calculates a multiplicative index for a time series of returns
Parameters
----------
df_rets : DataFrame
asset price returns
Returns
-------
DataFrame
"""
df = 100.0 * (1.0 + df_rets).cumprod()
# get the first non-nan values for rets and then start index
# one before that (otherwise will ignore first rets point)
# first_date_indices = df_rets.apply(lambda series: series.first_valid_index())
# first_ord_indices = list()
#
# for i in first_date_indices:
# try:
# ind = df.index.searchsorted(i)
# except:
# ind = 0
#
# if ind > 0: ind = ind - 1
#
# first_ord_indices.append(ind)
#
# for i in range(0, len(df.columns)):
# df.iloc[first_ord_indices[i],i] = 100
# probably a quicker way to do this, maybe using group by?
for c in df.columns:
df.loc[df[c].first_valid_index(), c] = 100
return df
def create_mult_index_from_prices(self, data_frame):
"""Calculates a multiplicative index for a time series of prices
Parameters
----------
df_rets : DataFrame
asset price
Returns
-------
DataFrame
"""
return self.create_mult_index(self.calculate_returns(data_frame))
def rolling_z_score(self, data_frame, periods):
"""Calculates the rolling z score for a time series
Parameters
----------
data_frame : DataFrame
asset prices
periods : int
rolling window for z score computation
Returns
-------
DataFrame
"""
return (data_frame - data_frame.rolling(center=False, window=periods).mean()) / data_frame.rolling(center=False,
window=periods).std()
def rolling_volatility(self, data_frame, periods, obs_in_year=252):
"""
rolling_volatility - Calculates the annualised rolling volatility
Parameters
----------
data_frame : DataFrame
contains returns time series
obs_in_year : int
number of observation in the year
Returns
-------
DataFrame
"""
# return pandas.rolling_std(data_frame, periods) * math.sqrt(obs_in_year)
return data_frame.rolling(window=periods, center=False).std() * math.sqrt(obs_in_year)
def rolling_mean(self, data_frame, periods):
return self.rolling_average(data_frame, periods)
def rolling_average(self, data_frame, periods):
"""Calculates the rolling moving average
Parameters
----------
data_frame : DataFrame
contains time series
periods : int
periods in the average
Returns
-------
DataFrame
"""
return data_frame.rolling(periods).mean()
def rolling_sparse_average(self, data_frame, periods):
"""Calculates the rolling moving average of a sparse time series
Parameters
----------
data_frame : DataFrame
contains time series
periods : int
number of periods in the rolling sparse average
Returns
-------
DataFrame
"""
# 1. calculate rolling sum (ignore NaNs)
# 2. count number of non-NaNs
# 3. average of non-NaNs
foo = lambda z: z[pandas.notnull(z)].sum()
# rolling_sum = pandas.rolling_apply(data_frame, periods, foo, min_periods=1)
# rolling_non_nans = pandas.stats.moments.rolling_count(data_frame, periods, freq=None, center=False, how=None) \
rolling_sum = data_frame.rolling(center=False, window=periods, min_periods=1).apply(func=foo)
rolling_non_nans = data_frame.rolling(window=periods, center=False).count()
# For pandas 0.18 onwards (TODO)
# rolling_non_nans = data_frame.rolling(span=periods, freq=None, center=False, how=None).count()
return rolling_sum / rolling_non_nans
def rolling_sparse_sum(self, data_frame, periods):
"""Calculates the rolling moving sum of a sparse time series
Parameters
----------
data_frame : DataFrame
contains time series
periods : int
period for sparse rolling sum
Returns
-------
DataFrame
"""
# 1. calculate rolling sum (ignore NaNs)
# 2. count number of non-NaNs
# 3. average of non-NaNs
foo = lambda z: z[pandas.notnull(z)].sum()
rolling_sum = pandas.rolling_apply(data_frame, periods, foo, min_periods=1)
return rolling_sum
def rolling_median(self, data_frame, periods):
"""Calculates the rolling moving average
Parameters
----------
data_frame : DataFrame
contains time series
periods : int
number of periods in the median
Returns
-------
DataFrame
"""
return data_frame.rolling(periods).median()
def rolling_sum(self, data_frame, periods):
"""Calculates the rolling sum
Parameters
----------
data_frame : DataFrame
contains time series
periods : int
period for rolling sum
Returns
-------
DataFrame
"""
return data_frame.rolling(periods).sum()
def cum_sum(self, data_frame):
"""Calculates the cumulative sum
Parameters
----------
data_frame : DataFrame
contains time series
Returns
-------
DataFrame
"""
return data_frame.cumsum()
def rolling_ewma(self, data_frame, periods):
"""Calculates exponentially weighted moving average
Parameters
----------
data_frame : DataFrame
contains time series
periods : int
periods in the EWMA
Returns
-------
DataFrame
"""
# span = 2 / (1 + periods)
return pandas.ewma(data_frame, span=periods)
##### correlation methods
def rolling_corr(self, data_frame1, periods, data_frame2=None, pairwise=False, flatten_labels=True):
"""Calculates rolling correlation wrapping around pandas functions
Parameters
----------
data_frame1 : DataFrame
contains time series to run correlations on
periods : int
period of rolling correlations
data_frame2 : DataFrame (optional)
contains times series to run correlation against
pairwise : boolean
should we do pairwise correlations only?
Returns
-------
DataFrame
"""
| |
<filename>source/plugins/deployer.py<gh_stars>0
import sys
import os
import stat
import copy
parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(1, parent_dir)
import config
from util import print_help_line, get_real_path, escape_arg, ensure_json_exists, load_json, save_json, dict_merge
from json_include import JSONInclude
def print_help():
print_help_line(0, "Daedalus \"deployer\" plugin help:")
print_help_line(1, "help", "prints this description")
print_help_line(1, "get-context-path", "prints the path to current context file")
print_help_line(1, "get-context", "prints the current context")
print_help_line(1, "clear-context", "clears the set of context files")
print_help_line(1, "set-context <path>", "sets the path to the current context file " +
"(and removes all other context paths)")
print_help_line(1, "add-context <path>", "adds the path to the current set of context files")
print_help_line(1, "remove-context <path>", "remove the path from the current set of context files")
print_help_line(1, "compile <file> [output]", "compile an json machine description file into a shell script")
def parse_command(args):
valid_command = False
if len(args) == 1:
valid_command = True
print_help()
elif len(args) == 2:
if args[1] == "help":
valid_command = True
print_help()
elif args[1] == "get-context-path":
valid_command = True
Deployer.load_settings()
print(Deployer.settings.get("pathToContext", None))
elif args[1] == "get-context":
valid_command = True
Deployer.load_context()
print(Deployer.context)
elif args[1] == "clear-context":
valid_command = True
Deployer.load_settings()
Deployer.clear_context()
Deployer.save_settings()
elif len(args) == 3:
if args[1] == "preprocess":
valid_command = True
Deployer.load_context()
print(Deployer.preprocess(args[2]))
elif args[1] == "compile":
valid_command = True
Deployer.load_context()
print(Deployer.compile(args[2]))
elif args[1] == "set-context":
valid_command = True
Deployer.load_settings()
Deployer.set_context(args[2])
Deployer.save_settings()
elif args[1] == "add-context":
valid_command = True
Deployer.load_settings()
Deployer.add_context(args[2])
Deployer.save_settings()
elif args[1] == "remove-context":
valid_command = True
Deployer.load_settings()
Deployer.remove_context(args[2])
Deployer.save_settings()
elif len(args) == 4:
if args[1] == "preprocess":
valid_command = True
Deployer.load_context()
Deployer.preprocess(args[2], args[3])
elif args[1] == "compile":
valid_command = True
Deployer.load_context()
Deployer.compile(args[2], args[3])
return valid_command
class Deployer:
settings_default_path = None
settings = {}
context = {}
def __init__(self):
pass
@classmethod
def init_default_context(cls):
autossh_config_path = config.Manager.get_current_state_path() + "/deployer.json"
ensure_json_exists(autossh_config_path)
cls.set_default_path(autossh_config_path)
@classmethod
def set_default_path(cls, path):
cls.settings_default_path = path
@classmethod
def load_settings(cls, path=None):
if not path:
if cls.settings_default_path is None:
cls.init_default_context()
path = cls.settings_default_path
cls.settings = load_json(path)
@classmethod
def save_settings(cls, path=None):
if not path:
if cls.settings_default_path is None:
cls.init_default_context()
path = cls.settings_default_path
save_json(path, cls.settings)
@classmethod
def load_context(cls, path=None):
if not path:
cls.load_settings()
if "pathToContext" in cls.settings:
if isinstance(cls.settings["pathToContext"], list):
cls.context = {}
for entry in cls.settings["pathToContext"]:
dict_merge(cls.context, load_json(entry))
else:
cls.context = load_json(cls.settings["pathToContext"])
@classmethod
def set_context(cls, path):
cls.settings["pathToContext"] = [get_real_path(path)]
@classmethod
def clear_context(cls):
cls.settings["pathToContext"] = []
@classmethod
def remove_context(cls, path):
path = get_real_path(path)
if "pathToContext" not in cls.settings:
return
if isinstance(cls.settings["pathToContext"], list):
cls.settings["pathToContext"].remove(path)
elif cls.settings["pathToContext"] == path:
cls.settings["pathToContext"] = []
@classmethod
def add_context(cls, path):
path = get_real_path(path)
if "pathToContext" not in cls.settings:
cls.settings["pathToContext"] = [path]
elif isinstance(cls.settings["pathToContext"], list):
if path not in cls.settings["pathToContext"]:
cls.settings["pathToContext"].append(path)
else:
cls.settings["pathToContext"] = [cls.settings["pathToContext"], path]
@classmethod
def preprocess(cls, path, save_path=None):
json_include = copy.deepcopy(JSONInclude.get(get_real_path(path)))
if save_path is not None:
json_include.save(get_real_path(save_path))
return json_include.data
@classmethod
def lazy_new_line(cls, text):
if text != "":
if text.endswith("\n\n"):
return text
return text + "\n"
return text
@classmethod
def compile_header(cls, json_include, run_on=None):
return "#!/usr/bin/env bash\n\n"
@classmethod
def compile_init(cls, json_include, run_on=None, priority=None):
script = ""
if "init" in json_include.data:
script = cls.compile_command_block(json_include.data["init"], run_on=run_on, priority=priority)
return cls.lazy_new_line(script)
@classmethod
def compile_project_setup(cls, json_include, run_on=None):
script = ""
if "project" in json_include.data:
if "name" in json_include.data["project"]:
if "path" in json_include.data["project"]:
command = "daedalus project add " + json_include.data["project"]["name"] + " " + \
json_include.data["project"]["path"]
script += cls.compile_run_on(command, run_on) + "\n"
command = "daedalus project switch " + json_include.data["project"]["name"]
script += cls.compile_run_on(command, run_on) + "\n"
return cls.lazy_new_line(script)
@classmethod
def compile_https(cls, json_include, run_on=None):
script = ""
if "https" in json_include.data:
for entry in json_include.data["https"]:
if "domain" not in entry:
continue
if "email" not in entry:
continue
script += cls.compile_run_on("daedalus https new-ssl " + entry["domain"] + " " + entry["email"],
run_on) + "\n"
return cls.lazy_new_line(script)
@classmethod
def compile_configfs(cls, json_include, run_on=None):
script = ""
if "configfs" in json_include.data:
for key in json_include.data["configfs"]:
if type(json_include.data["configfs"][key]) is list:
for list_el in json_include.data["configfs"][key]:
script += cls.compile_run_on("daedalus configfs insert " + key + " " + escape_arg(list_el),
run_on) + "\n"
elif json_include.data["configfs"][key] is None:
script += cls.compile_run_on("daedalus configfs request-private " + key, run_on) + "\n"
else:
script += cls.compile_run_on("daedalus configfs set " + key + " " +
escape_arg(str(json_include.data["configfs"][key])), run_on) + "\n"
return cls.lazy_new_line(script)
@classmethod
def compile_modules(cls, json_include, run_on=None):
script = ""
if "modules" in json_include.data:
for module in json_include.data["modules"]:
if json_include.data["modules"][module] == "latest":
command = "daedalus install " + module
script += cls.compile_run_on(command, run_on) + "\n"
return cls.lazy_new_line(script)
@classmethod
def compile_commands(cls, commands, run_on=None):
script = ""
for command in commands:
script += cls.compile_run_on("daedalus " + command, run_on) + "\n"
return cls.lazy_new_line(script)
@classmethod
def command_block_get_all_priorities(cls, command_block):
priorities = []
for entry in command_block:
if "priority" not in entry:
continue
if "commands" not in entry:
continue
priorities.append(entry["priority"])
return sorted(set(priorities))
@classmethod
def compile_command_block(cls, command_block, run_on=None, priority=None):
script = ""
if priority is None:
priorities = cls.command_block_get_all_priorities(command_block)
for target_priority in priorities:
script += cls.compile_command_block(command_block, priority=target_priority, run_on=run_on)
else:
for entry in command_block:
if "priority" not in entry:
continue
if "commands" not in entry:
continue
current_run_on = run_on
if "ignoreRunOn" in entry:
if entry["ignoreRunOn"]:
current_run_on = None
if priority == entry["priority"]:
script += cls.compile_commands(entry["commands"], run_on=current_run_on)
return script
@classmethod
def compile_post_install(cls, json_include, run_on=None, priority=None):
script = ""
if "postInstall" in json_include.data:
script = cls.compile_command_block(json_include.data["postInstall"], run_on=run_on, priority=priority)
return cls.lazy_new_line(script)
@classmethod
def compile_hosts(cls, json_include, run_on=None):
script = ""
if "hosts" in json_include.data:
for host in json_include.data["hosts"]:
if "privateIP" in json_include.data["hosts"][host]:
command = "daedalus hosts add " + host + " " + \
str(json_include.data["hosts"][host]["privateIP"])
script += cls.compile_run_on(command, run_on) + "\n"
elif "publicIP" in json_include.data["hosts"][host]:
command = "daedalus hosts add " + host + " " + \
str(json_include.data["hosts"][host]["publicIP"])
script += cls.compile_run_on(command, run_on) + "\n"
return cls.lazy_new_line(script)
@classmethod
def compile_ssh_link(cls, json_include, run_on=None):
script = ""
if "sshLink" in json_include.data:
for host in json_include.data["sshLink"]:
command = "daedalus ssh link " + json_include.data["sshLink"][host]["remoteUser"] + " " + \
json_include.data["sshLink"][host]["remoteHost"] + " " + \
json_include.data["sshLink"][host]["sshKey"]
script += cls.compile_run_on(command, run_on) + "\n"
return cls.lazy_new_line(script)
@classmethod
def compile_autossh(cls, json_include, run_on=None):
script = ""
if "autossh" in json_include.data:
for entry in json_include.data["autossh"]:
command = "daedalus autossh add " + str(json_include.data["autossh"][entry]["localPort"]) + " " + \
json_include.data["autossh"][entry]["localHost"] + " " + \
str(json_include.data["autossh"][entry]["remotePort"]) + " " + \
json_include.data["autossh"][entry]["remoteUser"] + " " + \
json_include.data["autossh"][entry]["remoteHost"]
script += cls.compile_run_on(command, run_on) + "\n"
if script != "":
script += cls.compile_run_on("daedalus autossh apply --overwrite", run_on) + "\n"
return cls.lazy_new_line(script)
@classmethod
def compile_post_ssh_link(cls, json_include, run_on=None, priority=None):
script = ""
if "postSSHLink" in json_include.data:
script = cls.compile_command_block(json_include.data["postSSHLink"], run_on=run_on, priority=priority)
return cls.lazy_new_line(script)
@classmethod
def write_script(cls, save_path, script):
save_path = get_real_path(save_path)
with open(save_path, "w") as save_file:
save_file.write(script)
st = os.stat(save_path)
os.chmod(save_path, st.st_mode | stat.S_IEXEC)
@classmethod
def replace_bulk(cls, val, replace_array):
temp = val
for entry in replace_array:
if entry["to"] is None:
to = ""
else:
to = entry["to"]
temp = temp.replace(entry["from"], to)
return temp
@classmethod
def translate_context(cls, val):
replace_array = []
state = 0
payload = ""
for i, c in enumerate(val):
if c == "C" and state == 0:
state = 1
elif c == ">" and state == 1:
state = 2
elif c == "{" and state == 2:
state = 3
payload = ""
elif c == "}" and state == 3:
state = 0
content = None
if payload in cls.context:
content = cls.context[payload]
replace_array.append({"from": "C>{" + payload + "}", "to": content})
elif state == 3:
payload += c
val = cls.replace_bulk(val, replace_array)
return val
@classmethod
def resolve_context(cls, data):
for k, v in data.items():
if isinstance(data[k], list):
temp = []
for val in data[k]:
if isinstance(val, dict):
cls.resolve_context(val)
temp.append(val)
else:
temp.append(cls.translate_context(val))
data[k] = temp
elif isinstance(data[k], dict):
cls.resolve_context(data[k])
elif isinstance(data[k], str):
data[k] = cls.translate_context(data[k])
@classmethod
def get_json_param(cls, param, path, work_dir=None, this=None):
if path == "this":
data = this
else:
data = load_json(get_real_path(path, work_dir=work_dir))
if "params" not in data:
return None
if param not in data["params"]:
return None
return data["params"][param]
@classmethod
def translate_params(cls, val, work_dir=None, this=None):
replace_array = []
state = 0
payload = ""
for i, c in enumerate(val):
if c == "P" and state == 0:
state = 1
elif c == ">" and state == 1:
state = 2
elif c == "{" and state == 2:
state |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.