input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
'
options += 'rounded corners=%s' % polygon["curve"]["corner_radius"]
bend_options = 'bend left=%s' % polygon["curve"]["bend_angle"]
points = "%s" % ((f') to [{bend_options}] (').join(polygon["points"]))
return_string += "\\draw[%s] (%s) to[%s] cycle;\n" % (options, points, bend_options)
elif polygon["curve"]["strategy"] == 'segment_bend_right':
if polygon["curve"]["corner_radius"] != 0:
if options != '':
options += ', '
options += 'rounded corners=%s' % polygon["curve"]["corner_radius"]
bend_options = 'bend right=%s' % polygon["curve"]["bend_angle"]
points = "%s" % ((f') to [{bend_options}] (').join(polygon["points"]))
return_string += "\\draw[%s] (%s) to[%s] cycle;\n" % (options, points, bend_options)
elif polygon["type"] == 'linestring':
options = ''
if polygon["o_arrow"]["tip"] != DEFAULT_SEGMENT_O_ARROW_TIP or\
polygon["d_arrow"]["tip"] != DEFAULT_SEGMENT_D_ARROW_TIP:
if options != "":
options += ", "
if polygon["o_arrow"]["tip"] != DEFAULT_SEGMENT_O_ARROW_TIP:
o_arrow_name, o_arrow_options = arrow_tip_to_tkz_option(polygon["o_arrow"]["tip"])
if polygon["o_arrow"]["length"] != DEFAULT_SEGMENT_O_ARROW_LENGTH:
if o_arrow_options != "":
o_arrow_options += ", "
o_arrow_options += "scale length=%f" % polygon["o_arrow"]["length"]
if polygon["o_arrow"]["width"] != DEFAULT_SEGMENT_O_ARROW_WIDTH:
if o_arrow_options != "":
o_arrow_options += ", "
o_arrow_options += "scale width=%f" % polygon["o_arrow"]["width"]
if polygon["o_arrow"]["side"] != DEFAULT_SEGMENT_O_ARROW_SIDE:
if o_arrow_options != "":
o_arrow_options += ", "
o_arrow_options += polygon["o_arrow"]["side"]
if polygon["o_arrow"]["reversed"]:
if o_arrow_options != "":
o_arrow_options += ", "
o_arrow_options += "reversed"
if polygon["d_arrow"]["tip"] != DEFAULT_SEGMENT_D_ARROW_TIP:
d_arrow_name, d_arrow_options = arrow_tip_to_tkz_option(polygon["d_arrow"]["tip"])
if polygon["d_arrow"]["length"] != DEFAULT_SEGMENT_D_ARROW_LENGTH:
if d_arrow_options != "":
d_arrow_options += ", "
d_arrow_options += "scale length=%f" % polygon["d_arrow"]["length"]
if polygon["d_arrow"]["width"] != DEFAULT_SEGMENT_D_ARROW_WIDTH:
if d_arrow_options != "":
d_arrow_options += ", "
d_arrow_options += "scale width=%f" % polygon["d_arrow"]["width"]
if polygon["d_arrow"]["side"] != DEFAULT_SEGMENT_D_ARROW_SIDE:
if d_arrow_options != "":
d_arrow_options += ", "
d_arrow_options += polygon["d_arrow"]["side"]
if polygon["d_arrow"]["reversed"]:
if d_arrow_options != "":
d_arrow_options += ", "
d_arrow_options += "reversed"
if polygon["o_arrow"]["tip"] != DEFAULT_SEGMENT_O_ARROW_TIP and\
polygon["d_arrow"]["tip"] == DEFAULT_SEGMENT_D_ARROW_TIP:
options += "arrows={%s[%s]-}" % (o_arrow_name, o_arrow_options)
elif polygon["o_arrow"]["tip"] == DEFAULT_SEGMENT_O_ARROW_TIP and\
polygon["d_arrow"]["tip"] != DEFAULT_SEGMENT_D_ARROW_TIP:
options += "arrows={-%s[%s]}" % (d_arrow_name, d_arrow_options)
else:
options += "arrows={%s[%s]-%s[%s]}" % (o_arrow_name, o_arrow_options, d_arrow_name, d_arrow_options)
if options != '':
options += ', '
options += common_options
if polygon["curve"]["strategy"] == 'smooth':
if options != '':
options = 'use Hobby shortcut, ' + options
else:
options = 'use Hobby shortcut'
points = "(%s)" % ((')..(').join(polygon["points"]))
return_string += "\\draw[%s] %s;\n" % (options, points)
elif polygon["curve"]["strategy"] == 'nothing':
if polygon["curve"]["corner_radius"] != 0:
if options != '':
options += ', '
options += 'rounded corners=%s' % polygon["curve"]["corner_radius"]
points = "%s" % ((')--(').join(polygon["points"]))
return_string += "\\draw[%s] (%s);\n" % (options, points)
elif polygon["curve"]["strategy"] == 'segment_in_out':
if polygon["curve"]["corner_radius"] != 0:
if options != '':
options += ', '
options += 'rounded corners=%s' % polygon["curve"]["corner_radius"]
in_out_option = 'out=%s, in=%s' % (polygon["curve"]["out_angle"], polygon["curve"]["in_angle"])
if polygon["curve"]["loop"]:
in_out_option += ", loop, min distance=%s" % polygon["curve"]["loop_size"]
points = "%s" % ((')--(').join(polygon["points"][1:]))
return_string += "\\draw[%s] (%s) to[%s] (%s);\n" % (options,polygon["points"][0], in_out_option, points)
elif polygon["curve"]["strategy"] == 'segment_bend_left':
if polygon["curve"]["corner_radius"] != 0:
if options != '':
options += ', '
options += 'rounded corners=%s' % polygon["curve"]["corner_radius"]
bend_options = 'bend left=%s' % polygon["curve"]["bend_angle"]
points = "%s" % ((f') to [{bend_options}] (').join(polygon["points"]))
return_string += "\\draw[%s] (%s);\n" % (options, points)
elif polygon["curve"]["strategy"] == 'segment_bend_right':
if polygon["curve"]["corner_radius"] != 0:
if options != '':
options += ', '
options += 'rounded corners=%s' % polygon["curve"]["corner_radius"]
bend_options = 'bend right=%s' % polygon["curve"]["bend_angle"]
points = "%s" % ((f') to [{bend_options}] (').join(polygon["points"]))
return_string += "\\draw[%s] (%s);\n" % (options, points)
if return_string != '':
return_string = '%POLYGONS/LINESTRINGS\n' + return_string
return return_string
def tikzify_functions(eucl):
return_string = ''
for function in eucl["functions"]:
if function["show"] and function["id"] != 'fct_default':
options = 'domain=%s:%s, samples=%s' % (eval(function["domain_start"]), eval(function["domain_end"]), function["samples"])
if function["line_colour_name"] != DEFAULT_SEGMENT_LINE_COLOUR_NAME or\
function["line_strength"] != DEFAULT_SEGMENT_LINE_STRENGTH:
options += ", draw=%s" % function["line_colour_name"]
if function["line_strength"] != DEFAULT_SEGMENT_LINE_STRENGTH:
options += "!%s" % function["line_strength"]
if function["line_opacity"] != DEFAULT_SEGMENT_LINE_OPACITY:
if options != "":
options += ", "
options += "draw opacity=%s" % (function["line_opacity"])
if function["line_width"] != DEFAULT_SEGMENT_LINE_WIDTH:
if options != "":
options += ", "
options += "line width=%s pt" % function["line_width"]
if function["line_stroke"] != DEFAULT_SEGMENT_LINE_STROKE:
if options != "":
options += ", "
if function["line_stroke"] == "custom":
options += "dash pattern=%s" % line_stroke_custom_to_tkz(function["line_stroke_custom"])
else:
options += str(function["line_stroke"])
if function["o_arrow"]["tip"] != DEFAULT_SEGMENT_O_ARROW_TIP or\
function["d_arrow"]["tip"] != DEFAULT_SEGMENT_D_ARROW_TIP:
if options != "":
options += ", "
if function["o_arrow"]["tip"] != DEFAULT_SEGMENT_O_ARROW_TIP:
o_arrow_name, o_arrow_options = arrow_tip_to_tkz_option(function["o_arrow"]["tip"])
if function["o_arrow"]["length"] != DEFAULT_SEGMENT_O_ARROW_LENGTH:
if o_arrow_options != "":
o_arrow_options += ", "
o_arrow_options += "scale length=%f" % function["o_arrow"]["length"]
if function["o_arrow"]["width"] != DEFAULT_SEGMENT_O_ARROW_WIDTH:
if o_arrow_options != "":
o_arrow_options += ", "
o_arrow_options += "scale width=%f" % function["o_arrow"]["width"]
if function["o_arrow"]["side"] != DEFAULT_SEGMENT_O_ARROW_SIDE:
if o_arrow_options != "":
o_arrow_options += ", "
o_arrow_options += function["o_arrow"]["side"]
if function["o_arrow"]["reversed"]:
if o_arrow_options != "":
o_arrow_options += ", "
o_arrow_options += "reversed"
if function["d_arrow"]["tip"] != DEFAULT_SEGMENT_D_ARROW_TIP:
d_arrow_name, d_arrow_options = arrow_tip_to_tkz_option(function["d_arrow"]["tip"])
if function["d_arrow"]["length"] != DEFAULT_SEGMENT_D_ARROW_LENGTH:
if d_arrow_options != "":
d_arrow_options += ", "
d_arrow_options += "scale length=%f" % function["d_arrow"]["length"]
if function["d_arrow"]["width"] != DEFAULT_SEGMENT_D_ARROW_WIDTH:
if d_arrow_options != "":
d_arrow_options += ", "
d_arrow_options += "scale width=%f" % function["d_arrow"]["width"]
if function["d_arrow"]["side"] != DEFAULT_SEGMENT_D_ARROW_SIDE:
if d_arrow_options != "":
d_arrow_options += ", "
d_arrow_options += function["d_arrow"]["side"]
if function["d_arrow"]["reversed"]:
if d_arrow_options != "":
d_arrow_options += ", "
d_arrow_options += "reversed"
if function["o_arrow"]["tip"] != DEFAULT_SEGMENT_O_ARROW_TIP and\
function["d_arrow"]["tip"] == DEFAULT_SEGMENT_D_ARROW_TIP:
options += "arrows={%s[%s]-}" % (o_arrow_name, o_arrow_options)
elif function["o_arrow"]["tip"] == DEFAULT_SEGMENT_O_ARROW_TIP and\
function["d_arrow"]["tip"] != DEFAULT_SEGMENT_D_ARROW_TIP:
options += "arrows={-%s[%s]}" % (d_arrow_name, d_arrow_options)
else:
options += "arrows={%s[%s]-%s[%s]}" % (o_arrow_name, o_arrow_options, d_arrow_name, d_arrow_options)
return_string += '\\begin{scope}\n'
all_pattern_options = ''
if not function["pattern"]["type"] in ['none', 'solid']:
if function["fill_colour_name"] != DEFAULT_POINT_FILL_COLOUR_NAME or\
function["fill_strength"] != DEFAULT_POINT_FILL_STRENGTH:
all_pattern_options += "pattern color=%s" % function["fill_colour_name"]
if function["fill_strength"] != DEFAULT_POINT_FILL_STRENGTH:
all_pattern_options += "!%s" % function["fill_strength"]
if all_pattern_options != '':
all_pattern_options += ', '
if not function["pattern"]["type"] in\
['Lines', 'Hatch', 'Dots', 'Fivepointed stars', 'Sixpointed stars']:
all_pattern_options += 'pattern=%s' % function["pattern"]["type"]
else:
pattern_options = ''
if function["pattern"]["rotation"] != DEFAULT_PATTERN_ROTATION:
pattern_options += 'angle=%s' % function["pattern"]["rotation"]
if function["pattern"]["distance"] != DEFAULT_PATTERN_DISTANCE:
if pattern_options != '':
pattern_options += ', '
pattern_options += 'distance=%s' % function["pattern"]["distance"]
if function["pattern"]["type"] in ['Fivepointed stars', 'Sixpointed stars']:
pattern_options += ' mm'
if function["pattern"]["xshift"] != DEFAULT_PATTERN_XSHIFT:
if pattern_options != '':
pattern_options += ', '
pattern_options += 'xshift=%s' % function["pattern"]["xshift"]
if function["pattern"]["yshift"] != DEFAULT_PATTERN_YSHIFT:
if pattern_options != '':
pattern_options += ', '
pattern_options += 'yshift=%s' % function["pattern"]["yshift"]
if function["pattern"]["type"] in ['Dots', 'Fivepointed stars', 'Sixpointed stars']:
if function["pattern"]["size"] != DEFAULT_PATTERN_SIZE:
if pattern_options != '':
pattern_options += ', '
pattern_options += 'radius=%s mm' % function["pattern"]["size"]
else:
if function["pattern"]["size"] != DEFAULT_PATTERN_SIZE:
if pattern_options != '':
pattern_options += ', '
pattern_options += 'line width=%s' % function["pattern"]["size"]
if function["pattern"]["type"] == 'Fivepointed stars':
if pattern_options != '':
pattern_options += ', '
pattern_options += 'points=5'
if function["pattern"]["type"] == 'Sixpointed stars':
if pattern_options != '':
pattern_options += ', '
pattern_options += 'points=6'
if function["pattern"]["type"] in ['Sixpointed stars', 'Fivepointed stars']:
all_pattern_options += 'pattern={Stars[%s]}' % pattern_options
else:
all_pattern_options += 'pattern={%s[%s]}' % (function["pattern"]["type"], pattern_options)
if function["pattern"]["type"] != 'none':
if function["pattern"]["type"] == 'solid':
if all_pattern_options != "":
all_pattern_options += ", "
all_pattern_options += "fill=%s" % function["fill_colour_name"]
if function["fill_strength"] != DEFAULT_POINT_FILL_STRENGTH:
all_pattern_options += "!%s" % function["fill_strength"]
all_pattern_options += ", fill opacity=%s, draw opacity=%s" % (function["fill_opacity"], function["fill_opacity"])
if function["type"] == 'yfx':
if all_pattern_options != '':
if function["between"] != -1:
return_string += '\\begin{scope}\n'
return_string += '\\tkzFct[%s]{%s}\n' % (options,function["def"])
func = get_item_from_id(eucl, function["between"], 'f')
return_string += '\\tkzFct[domain=%s:%s, samples=%s]{%s}\n' % (eval(func["domain_start"]), eval(func["domain_end"]), func["samples"], func["def"])
return_string += '\\tkzDrawAreafg[between= a and b, %s, domain=%s:%s]\n' % (all_pattern_options, eval(function["area_start"]),eval(function["area_end"]))
return_string += '\\end{scope}\n'
return_string += '\\tkzFct[%s]{%s}\n' % (options,function["def"])
if all_pattern_options != '' and function["between"] == -1:
return_string += '\\tkzDrawArea[%s, domain=%s:%s]\n' % (all_pattern_options, eval(function["area_start"]),eval(function["area_end"]))
elif function["type"] == 'polar':
return_string += '\\tkzFctPolar[%s]{%s}\n' % (options+', '+all_pattern_options,function["def"])
elif function["type"] == 'parametric':
func = function["def"].split('||')
return_string += '\\tkzFctPar[%s]{%s}{%s}\n' % (options+', '+all_pattern_options, func[0], func[1])
if function["type"] == 'yfx':
if function["sum"]["type"] != DEFAULT_FUNCTION_TYPE:
options = 'interval=%s:%s, number=%s' % (eval(function["sum"]["start"]), eval(function["sum"]["end"]), function["sum"]["number"])
options += ", draw=%s" % function["sum"]["line_colour_name"]
if function["sum"]["line_strength"] != DEFAULT_SEGMENT_LINE_STRENGTH:
options += "!%s" % function["sum"]["line_strength"]
if function["sum"]["fill_colour_name"] != 'same':
if options != "":
options += ", "
options += "fill=%s" % function["sum"]["fill_colour_name"]
if function["sum"]["fill_strength"] != DEFAULT_POINT_FILL_STRENGTH:
options += "!%s" % function["sum"]["fill_strength"]
options += ", fill opacity=%s" % function["sum"]["fill_opacity"]
if function["sum"]["line_opacity"] != DEFAULT_POINT_LINE_OPACITY:
if options != "":
options += ", "
options += "draw opacity=%s" % function["sum"]["line_opacity"]
if function["sum"]["type"] == 'sup':
return_string += '\\tkzDrawRiemannSumSup[%s]' % options
if function["sum"]["type"] == 'inf':
return_string += '\\tkzDrawRiemannSumInf[%s]' % options
if | |
import datetime
from datetime import timedelta
from decimal import Decimal
from bs4 import BeautifulSoup
from django.conf import settings
from django.test import TestCase
from django.utils.timezone import now
from pretix.base.models import (
CartPosition, Event, Item, ItemCategory, ItemVariation, Organizer,
Question, QuestionAnswer, Quota, Voucher,
)
class CartTestMixin:
def setUp(self):
super().setUp()
self.orga = Organizer.objects.create(name='CCC', slug='ccc')
self.event = Event.objects.create(
organizer=self.orga, name='30C3', slug='30c3',
date_from=datetime.datetime(2013, 12, 26, tzinfo=datetime.timezone.utc),
live=True
)
self.category = ItemCategory.objects.create(event=self.event, name="Everything", position=0)
self.quota_shirts = Quota.objects.create(event=self.event, name='Shirts', size=2)
self.shirt = Item.objects.create(event=self.event, name='T-Shirt', category=self.category, default_price=12)
self.quota_shirts.items.add(self.shirt)
self.shirt_red = ItemVariation.objects.create(item=self.shirt, default_price=14, value='Red')
self.shirt_blue = ItemVariation.objects.create(item=self.shirt, value='Blue')
self.quota_shirts.variations.add(self.shirt_red)
self.quota_shirts.variations.add(self.shirt_blue)
self.quota_tickets = Quota.objects.create(event=self.event, name='Tickets', size=5)
self.ticket = Item.objects.create(event=self.event, name='Early-bird ticket',
category=self.category, default_price=23)
self.quota_tickets.items.add(self.ticket)
self.quota_all = Quota.objects.create(event=self.event, name='All', size=None)
self.quota_all.items.add(self.ticket)
self.quota_all.items.add(self.shirt)
self.quota_all.variations.add(self.shirt_blue)
self.quota_all.variations.add(self.shirt_red)
self.client.get('/%s/%s/' % (self.orga.slug, self.event.slug))
self.session_key = self.client.cookies.get(settings.SESSION_COOKIE_NAME).value
class CartTest(CartTestMixin, TestCase):
def test_after_presale(self):
self.event.presale_end = now() - timedelta(days=1)
self.event.save()
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '1'
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
assert 'alert-danger' in response.rendered_content
assert not CartPosition.objects.filter(cart_id=self.session_key, event=self.event).exists()
def test_before_presale(self):
self.event.presale_start = now() + timedelta(days=1)
self.event.save()
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '1'
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
assert 'alert-danger' in response.rendered_content
assert not CartPosition.objects.filter(cart_id=self.session_key, event=self.event).exists()
def test_simple(self):
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '1'
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('Early-bird', doc.select('.cart .cart-row')[0].select('strong')[0].text)
self.assertIn('1', doc.select('.cart .cart-row')[0].select('.count')[0].text)
self.assertIn('23', doc.select('.cart .cart-row')[0].select('.price')[0].text)
self.assertIn('23', doc.select('.cart .cart-row')[0].select('.price')[1].text)
objs = list(CartPosition.objects.filter(cart_id=self.session_key, event=self.event))
self.assertEqual(len(objs), 1)
self.assertEqual(objs[0].item, self.ticket)
self.assertIsNone(objs[0].variation)
self.assertEqual(objs[0].price, 23)
def test_free_price(self):
self.ticket.free_price = True
self.ticket.save()
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '1',
'price_%d' % self.ticket.id: '24.00'
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('Early-bird', doc.select('.cart .cart-row')[0].select('strong')[0].text)
self.assertIn('1', doc.select('.cart .cart-row')[0].select('.count')[0].text)
self.assertIn('24', doc.select('.cart .cart-row')[0].select('.price')[0].text)
self.assertIn('24', doc.select('.cart .cart-row')[0].select('.price')[1].text)
objs = list(CartPosition.objects.filter(cart_id=self.session_key, event=self.event))
self.assertEqual(len(objs), 1)
self.assertEqual(objs[0].item, self.ticket)
self.assertIsNone(objs[0].variation)
self.assertEqual(objs[0].price, 24)
def test_free_price_only_if_allowed(self):
self.ticket.free_price = False
self.ticket.save()
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '1',
'price_%d' % self.ticket.id: '24.00'
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('Early-bird', doc.select('.cart .cart-row')[0].select('strong')[0].text)
self.assertIn('1', doc.select('.cart .cart-row')[0].select('.count')[0].text)
self.assertIn('23', doc.select('.cart .cart-row')[0].select('.price')[0].text)
self.assertIn('23', doc.select('.cart .cart-row')[0].select('.price')[1].text)
objs = list(CartPosition.objects.filter(cart_id=self.session_key, event=self.event))
self.assertEqual(len(objs), 1)
self.assertEqual(objs[0].item, self.ticket)
self.assertIsNone(objs[0].variation)
self.assertEqual(objs[0].price, 23)
def test_free_price_lower_bound(self):
self.ticket.free_price = False
self.ticket.save()
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '1',
'price_%d' % self.ticket.id: '12.00'
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('Early-bird', doc.select('.cart .cart-row')[0].select('strong')[0].text)
self.assertIn('1', doc.select('.cart .cart-row')[0].select('.count')[0].text)
self.assertIn('23', doc.select('.cart .cart-row')[0].select('.price')[0].text)
self.assertIn('23', doc.select('.cart .cart-row')[0].select('.price')[1].text)
objs = list(CartPosition.objects.filter(cart_id=self.session_key, event=self.event))
self.assertEqual(len(objs), 1)
self.assertEqual(objs[0].item, self.ticket)
self.assertIsNone(objs[0].variation)
self.assertEqual(objs[0].price, 23)
def test_variation_inactive(self):
self.shirt_red.active = False
self.shirt_red.save()
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'variation_%d_%d' % (self.shirt.id, self.shirt_red.id): '1'
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
objs = list(CartPosition.objects.filter(cart_id=self.session_key, event=self.event))
self.assertEqual(len(objs), 0)
def test_variation(self):
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'variation_%d_%d' % (self.shirt.id, self.shirt_red.id): '1'
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('Shirt', doc.select('.cart .cart-row')[0].select('strong')[0].text)
self.assertIn('Red', doc.select('.cart .cart-row')[0].text)
self.assertIn('1', doc.select('.cart .cart-row')[0].select('.count')[0].text)
self.assertIn('14', doc.select('.cart .cart-row')[0].select('.price')[0].text)
self.assertIn('14', doc.select('.cart .cart-row')[0].select('.price')[1].text)
objs = list(CartPosition.objects.filter(cart_id=self.session_key, event=self.event))
self.assertEqual(len(objs), 1)
self.assertEqual(objs[0].item, self.shirt)
self.assertEqual(objs[0].variation, self.shirt_red)
self.assertEqual(objs[0].price, 14)
def test_variation_free_price(self):
self.shirt.free_price = True
self.shirt.save()
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'variation_%d_%d' % (self.shirt.id, self.shirt_red.id): '1',
'price_%d_%d' % (self.shirt.id, self.shirt_red.id): '16',
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('Shirt', doc.select('.cart .cart-row')[0].select('strong')[0].text)
self.assertIn('Red', doc.select('.cart .cart-row')[0].text)
self.assertIn('1', doc.select('.cart .cart-row')[0].select('.count')[0].text)
self.assertIn('16', doc.select('.cart .cart-row')[0].select('.price')[0].text)
self.assertIn('16', doc.select('.cart .cart-row')[0].select('.price')[1].text)
objs = list(CartPosition.objects.filter(cart_id=self.session_key, event=self.event))
self.assertEqual(len(objs), 1)
self.assertEqual(objs[0].item, self.shirt)
self.assertEqual(objs[0].variation, self.shirt_red)
self.assertEqual(objs[0].price, 16)
def test_count(self):
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '2'
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('Early-bird', doc.select('.cart .cart-row')[0].select('strong')[0].text)
self.assertIn('2', doc.select('.cart .cart-row')[0].select('.count')[0].text)
self.assertIn('23', doc.select('.cart .cart-row')[0].select('.price')[0].text)
self.assertIn('46', doc.select('.cart .cart-row')[0].select('.price')[1].text)
objs = list(CartPosition.objects.filter(cart_id=self.session_key, event=self.event))
self.assertEqual(len(objs), 2)
for obj in objs:
self.assertEqual(obj.item, self.ticket)
self.assertIsNone(obj.variation)
self.assertEqual(obj.price, 23)
def test_multiple(self):
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '2',
'variation_%d_%d' % (self.shirt.id, self.shirt_red.id): '1'
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('Early-bird', doc.select('.cart')[0].text)
self.assertIn('Shirt', doc.select('.cart')[0].text)
objs = list(CartPosition.objects.filter(cart_id=self.session_key, event=self.event))
self.assertEqual(len(objs), 3)
self.assertIn(self.shirt, [obj.item for obj in objs])
self.assertIn(self.shirt_red, [obj.variation for obj in objs])
self.assertIn(self.ticket, [obj.item for obj in objs])
def test_fuzzy_input(self):
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: 'a',
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('numbers only', doc.select('.alert-danger')[0].text)
self.assertFalse(CartPosition.objects.filter(cart_id=self.session_key, event=self.event).exists())
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '-2',
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('numbers only', doc.select('.alert-danger')[0].text)
self.assertFalse(CartPosition.objects.filter(cart_id=self.session_key, event=self.event).exists())
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'variation_%d_%d' % (self.shirt.id, self.shirt_blue.id): 'a',
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('numbers only', doc.select('.alert-danger')[0].text)
self.assertFalse(CartPosition.objects.filter(cart_id=self.session_key, event=self.event).exists())
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'variation_a_%d' % (self.shirt_blue.id): '-2',
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('numbers only', doc.select('.alert-danger')[0].text)
self.assertFalse(CartPosition.objects.filter(cart_id=self.session_key, event=self.event).exists())
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('did not select any products', doc.select('.alert-warning')[0].text)
self.assertFalse(CartPosition.objects.filter(cart_id=self.session_key, event=self.event).exists())
def test_wrong_event(self):
event2 = Event.objects.create(
organizer=self.orga, name='MRMCD', slug='mrmcd',
date_from=datetime.datetime(2014, 9, 6, tzinfo=datetime.timezone.utc)
)
shirt2 = Item.objects.create(event=event2, name='T-Shirt', default_price=12)
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % shirt2.id: '1',
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('not available', doc.select('.alert-danger')[0].text)
self.assertFalse(CartPosition.objects.filter(cart_id=self.session_key, event=self.event).exists())
def test_no_quota(self):
shirt2 = Item.objects.create(event=self.event, name='T-Shirt', default_price=12)
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % shirt2.id: '1',
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('no longer available', doc.select('.alert-danger')[0].text)
self.assertFalse(CartPosition.objects.filter(cart_id=self.session_key, event=self.event).exists())
def test_in_time_available(self):
self.ticket.available_until = now() + timedelta(days=2)
self.ticket.available_from = now() - timedelta(days=2)
self.ticket.save()
self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '1',
}, follow=True)
self.assertEqual(CartPosition.objects.filter(cart_id=self.session_key, event=self.event).count(), 1)
def test_no_longer_available(self):
self.ticket.available_until = now() - timedelta(days=2)
self.ticket.save()
self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '1',
}, follow=True)
self.assertEqual(CartPosition.objects.filter(cart_id=self.session_key, event=self.event).count(), 0)
def test_not_yet_available(self):
self.ticket.available_from = now() + timedelta(days=2)
self.ticket.save()
self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '1',
}, follow=True)
self.assertEqual(CartPosition.objects.filter(cart_id=self.session_key, event=self.event).count(), 0)
def test_max_items(self):
CartPosition.objects.create(
event=self.event, cart_id=self.session_key, item=self.ticket,
price=23, expires=now() + timedelta(minutes=10)
)
self.event.settings.max_items_per_order = 5
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '5',
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('more than', doc.select('.alert-danger')[0].text)
self.assertEqual(CartPosition.objects.filter(cart_id=self.session_key, event=self.event).count(), 1)
def test_max_per_item_failed(self):
self.ticket.max_per_order = 2
self.ticket.save()
CartPosition.objects.create(
event=self.event, cart_id=self.session_key, item=self.ticket,
price=23, expires=now() + timedelta(minutes=10)
)
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '2',
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('more than', doc.select('.alert-danger')[0].text)
self.assertEqual(CartPosition.objects.filter(cart_id=self.session_key, event=self.event).count(), 1)
def test_max_per_item_success(self):
self.ticket.max_per_order = 3
self.ticket.save()
CartPosition.objects.create(
event=self.event, cart_id=self.session_key, item=self.ticket,
price=23, expires=now() + timedelta(minutes=10)
)
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '2',
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
self.assertEqual(CartPosition.objects.filter(cart_id=self.session_key, event=self.event).count(), 3)
def test_quota_full(self):
self.quota_tickets.size = 0
self.quota_tickets.save()
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '1',
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('no longer available', doc.select('.alert-danger')[0].text)
self.assertFalse(CartPosition.objects.filter(cart_id=self.session_key, event=self.event).exists())
def test_quota_partly(self):
self.quota_tickets.size = 1
self.quota_tickets.save()
response = self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '2'
}, follow=True)
self.assertRedirects(response, '/%s/%s/' % (self.orga.slug, self.event.slug),
target_status_code=200)
doc = BeautifulSoup(response.rendered_content, "lxml")
self.assertIn('no longer available', doc.select('.alert-danger')[0].text)
self.assertIn('Early-bird', doc.select('.cart .cart-row')[0].select('strong')[0].text)
self.assertIn('1', doc.select('.cart .cart-row')[0].select('.count')[0].text)
self.assertIn('23', doc.select('.cart .cart-row')[0].select('.price')[0].text)
self.assertIn('23', doc.select('.cart .cart-row')[0].select('.price')[1].text)
objs = list(CartPosition.objects.filter(cart_id=self.session_key, event=self.event))
self.assertEqual(len(objs), 1)
self.assertEqual(objs[0].item, self.ticket)
self.assertIsNone(objs[0].variation)
self.assertEqual(objs[0].price, 23)
def test_renew_in_time(self):
cp = CartPosition.objects.create(
event=self.event, cart_id=self.session_key, item=self.ticket,
price=23, expires=now() + timedelta(minutes=10)
)
self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'variation_%d_%d' % (self.shirt.id, self.shirt_red.id): '1'
}, follow=True)
cp.refresh_from_db()
self.assertGreater(cp.expires, now() + timedelta(minutes=10))
def test_renew_expired_successfully(self):
cp1 = CartPosition.objects.create(
event=self.event, cart_id=self.session_key, item=self.ticket,
price=23, expires=now() - timedelta(minutes=10)
)
self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'variation_%d_%d' % (self.shirt.id, self.shirt_red.id): '1'
}, follow=True)
obj = CartPosition.objects.get(id=cp1.id)
self.assertEqual(obj.item, self.ticket)
self.assertIsNone(obj.variation)
self.assertEqual(obj.price, 23)
self.assertGreater(obj.expires, now())
def test_renew_questions(self):
cr1 = CartPosition.objects.create(
event=self.event, cart_id=self.session_key, item=self.ticket,
price=23, expires=now() - timedelta(minutes=10)
)
q1 = Question.objects.create(
event=self.event, question='Age', type=Question.TYPE_NUMBER,
required=True
)
self.ticket.questions.add(q1)
cr1.answers.add(QuestionAnswer.objects.create(
cartposition=cr1, question=q1, answer='23'
))
self.client.post('/%s/%s/cart/add' % (self.orga.slug, self.event.slug), {
'item_%d' % self.ticket.id: '1',
}, follow=True)
obj = CartPosition.objects.get(id=cr1.id)
self.assertEqual(obj.answers.get(question=q1).answer, '23')
def test_renew_expired_failed(self):
self.quota_tickets.size = 0
self.quota_tickets.save()
cp1 | |
1), 2), 2)
sol = np.array([[5, 5], [5, 5]], dtype='i4')
out = xr.DataArray(sol, coords=[lincoords, logcoords],
dims=['y', 'log_x'])
assert_eq_xr(c_logx.points(ddf, 'log_x', 'y', ds.count('i32')), out)
out = xr.DataArray(sol, coords=[logcoords, lincoords],
dims=['log_y', 'x'])
assert_eq_xr(c_logy.points(ddf, 'x', 'log_y', ds.count('i32')), out)
out = xr.DataArray(sol, coords=[logcoords, logcoords],
dims=['log_y', 'log_x'])
assert_eq_xr(c_logxy.points(ddf, 'log_x', 'log_y', ds.count('i32')), out)
@pytest.mark.skipif(not sp, reason="spatialpandas not installed")
def test_points_geometry():
axis = ds.core.LinearAxis()
lincoords = axis.compute_index(axis.compute_scale_and_translate((0., 2.), 3), 3)
ddf = dd.from_pandas(sp.GeoDataFrame({
'geom': pd.array(
[[0, 0], [0, 1, 1, 1], [0, 2, 1, 2, 2, 2]], dtype='MultiPoint[float64]'),
'v': [1, 2, 3]
}), npartitions=3)
cvs = ds.Canvas(plot_width=3, plot_height=3)
agg = cvs.points(ddf, geometry='geom', agg=ds.sum('v'))
sol = np.array([[1, nan, nan],
[2, 2, nan],
[3, 3, 3]], dtype='float64')
out = xr.DataArray(sol, coords=[lincoords, lincoords],
dims=['y', 'x'])
assert_eq_xr(agg, out)
@pytest.mark.parametrize('DataFrame', DataFrames)
def test_line(DataFrame):
axis = ds.core.LinearAxis()
lincoords = axis.compute_index(axis.compute_scale_and_translate((-3., 3.), 7), 7)
ddf = DataFrame({'x': [4, 0, -4, -3, -2, -1.9, 0, 10, 10, 0, 4],
'y': [0, -4, 0, 1, 2, 2.1, 4, 20, 30, 4, 0]})
cvs = ds.Canvas(plot_width=7, plot_height=7,
x_range=(-3, 3), y_range=(-3, 3))
agg = cvs.line(ddf, 'x', 'y', ds.count())
sol = np.array([[0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 1],
[0, 2, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 1, 0, 0]], dtype='i4')
out = xr.DataArray(sol, coords=[lincoords, lincoords],
dims=['y', 'x'])
assert_eq_xr(agg, out)
# # Line tests
line_manual_range_params = [
# axis1 none constant
(dict(data={
'x0': [4, -4, 4],
'x1': [0, 0, 0],
'x2': [-4, 4, -4],
'y0': [0, 0, 0],
'y1': [-4, 4, 0],
'y2': [0, 0, 0]
}), dict(x=['x0', 'x1', 'x2'], y=['y0', 'y1', 'y2'], axis=1)),
# axis1 x constant
(dict(data={
'y0': [0, 0, 0],
'y1': [0, 4, -4],
'y2': [0, 0, 0]
}), dict(x=np.array([-4, 0, 4]), y=['y0', 'y1', 'y2'], axis=1)),
# axis0 single
(dict(data={
'x': [4, 0, -4, np.nan, -4, 0, 4, np.nan, 4, 0, -4],
'y': [0, -4, 0, np.nan, 0, 4, 0, np.nan, 0, 0, 0],
}), dict(x='x', y='y', axis=0)),
# axis0 multi
(dict(data={
'x0': [4, 0, -4],
'x1': [-4, 0, 4],
'x2': [4, 0, -4],
'y0': [0, -4, 0],
'y1': [0, 4, 0],
'y2': [0, 0, 0]
}), dict(x=['x0', 'x1', 'x2'], y=['y0', 'y1', 'y2'], axis=0)),
# axis0 multi with string
(dict(data={
'x0': [-4, 0, 4],
'y0': [0, -4, 0],
'y1': [0, 4, 0],
'y2': [0, 0, 0]
}), dict(x='x0', y=['y0', 'y1', 'y2'], axis=0)),
# axis1 RaggedArray
(dict(data={
'x': [[4, 0, -4], [-4, 0, 4, 4, 0, -4]],
'y': [[0, -4, 0], [0, 4, 0, 0, 0, 0]],
}, dtype='Ragged[int64]'), dict(x='x', y='y', axis=1)),
]
if sp:
line_manual_range_params.append(
# geometry
(dict(data={
'geom': [[4, 0, 0, -4, -4, 0],
[-4, 0, 0, 4, 4, 0, 4, 0, 0, 0, -4, 0]]
}, dtype='Line[int64]'), dict(geometry='geom'))
)
@pytest.mark.parametrize('DataFrame', DataFrames)
@pytest.mark.parametrize('df_kwargs,cvs_kwargs', line_manual_range_params)
def test_line_manual_range(DataFrame, df_kwargs, cvs_kwargs):
if DataFrame is dask_cudf_DataFrame:
dtype = df_kwargs.get('dtype', '')
if dtype.startswith('Ragged') or dtype.startswith('Line'):
pytest.skip("Ragged array not supported with cudf")
axis = ds.core.LinearAxis()
lincoords = axis.compute_index(axis.compute_scale_and_translate((-3., 3.), 7), 7)
ddf = DataFrame(geo='geometry' in cvs_kwargs, **df_kwargs)
cvs = ds.Canvas(plot_width=7, plot_height=7,
x_range=(-3, 3), y_range=(-3, 3))
agg = cvs.line(ddf, agg=ds.count(), **cvs_kwargs)
sol = np.array([[0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 1, 0, 0]], dtype='i4')
out = xr.DataArray(sol, coords=[lincoords, lincoords],
dims=['y', 'x'])
assert_eq_xr(agg, out)
line_autorange_params = [
# axis1 none constant
(dict(data={
'x0': [0, 0, 0],
'x1': [-4, 0, 4],
'x2': [0, 0, 0],
'y0': [-4, 4, -4],
'y1': [0, 0, 0],
'y2': [4, -4, 4]
}), dict(x=['x0', 'x1', 'x2'], y=['y0', 'y1', 'y2'], axis=1)),
# axis1 y constant
(dict(data={
'x0': [0, 0, 0],
'x1': [-4, 0, 4],
'x2': [0, 0, 0],
}), dict(x=['x0', 'x1', 'x2'], y=np.array([-4, 0, 4]), axis=1)),
# axis0 single
(dict(data={
'x': [0, -4, 0, np.nan, 0, 0, 0, np.nan, 0, 4, 0],
'y': [-4, 0, 4, np.nan, 4, 0, -4, np.nan, -4, 0, 4],
}), dict(x='x', y='y', axis=0)),
# axis0 multi
(dict(data={
'x0': [0, -4, 0],
'x1': [0, 0, 0],
'x2': [0, 4, 0],
'y0': [-4, 0, 4],
'y1': [4, 0, -4],
'y2': [-4, 0, 4]
}), dict(x=['x0', 'x1', 'x2'], y=['y0', 'y1', 'y2'], axis=0)),
# axis0 multi with string
(dict(data={
'x0': [0, -4, 0],
'x1': [0, 0, 0],
'x2': [0, 4, 0],
'y0': [-4, 0, 4]
}), dict(x=['x0', 'x1', 'x2'], y='y0', axis=0)),
# axis1 RaggedArray
(dict(data={
'x': [[0, -4, 0], [0, 0, 0], [0, 4, 0]],
'y': [[-4, 0, 4], [4, 0, -4], [-4, 0, 4]],
}, dtype='Ragged[int64]'), dict(x='x', y='y', axis=1)),
]
if sp:
line_autorange_params.append(
# geometry
(dict(data={
'geom': [[0, -4, -4, 0, 0, 4],
[0, 4, 0, 0, 0, -4],
[0, -4, 4, 0, 0, 4]]
}, dtype='Line[int64]'), dict(geometry='geom'))
)
@pytest.mark.parametrize('DataFrame', DataFrames)
@pytest.mark.parametrize('df_kwargs,cvs_kwargs', line_autorange_params)
def test_line_autorange(DataFrame, df_kwargs, cvs_kwargs):
if DataFrame is dask_cudf_DataFrame:
dtype = df_kwargs.get('dtype', '')
if dtype.startswith('Ragged') or dtype.startswith('Line'):
pytest.skip("Ragged array not supported with cudf")
axis = ds.core.LinearAxis()
lincoords = axis.compute_index(
axis.compute_scale_and_translate((-4., 4.), 9), 9)
ddf = DataFrame(geo='geometry' in cvs_kwargs, **df_kwargs)
cvs = ds.Canvas(plot_width=9, plot_height=9)
agg = cvs.line(ddf, agg=ds.count(), **cvs_kwargs)
sol = np.array([[0, 0, 0, 0, 3, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 1, 0, 0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 3, 0, 0, 0, 0]], dtype='i4')
out = xr.DataArray(sol, coords=[lincoords, lincoords],
dims=['y', 'x'])
assert_eq_xr(agg, out)
@pytest.mark.parametrize('DataFrame', DataFrames)
def test_line_x_constant_autorange(DataFrame):
# axis1 y constant
x = np.array([-4, 0, 4])
y = ['y0', 'y1', 'y2']
ax = 1
axis = ds.core.LinearAxis()
lincoords = axis.compute_index(
axis.compute_scale_and_translate((-4., 4.), 9), 9)
ddf = DataFrame({
'y0': [0, 0, 0],
'y1': [-4, 0, 4],
'y2': [0, 0, 0],
})
cvs = ds.Canvas(plot_width=9, plot_height=9)
agg = cvs.line(ddf, x, y, ds.count(), axis=ax)
sol = np.array([[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0],
[3, 1, 1, 1, 1, 1, 1, 1, 3],
[0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0]], dtype='i4')
out = xr.DataArray(sol, coords=[lincoords, lincoords],
dims=['y', 'x'])
assert_eq_xr(agg, out)
@pytest.mark.parametrize('ddf', ddfs)
def test_log_axis_line(ddf):
axis = ds.core.LogAxis()
logcoords = axis.compute_index(axis.compute_scale_and_translate((1, 10), 2), 2)
axis = ds.core.LinearAxis()
lincoords = axis.compute_index(axis.compute_scale_and_translate((0, 1), 2), 2)
sol = np.array([[4, 5], [5, 5]], dtype='i4')
out = xr.DataArray(sol, coords=[lincoords, logcoords],
dims=['y', 'log_x'])
assert_eq_xr(c_logx.line(ddf, 'log_x', 'y', ds.count('i32')), out)
out = xr.DataArray(sol, coords=[logcoords, lincoords],
dims=['log_y', 'x'])
assert_eq_xr(c_logy.line(ddf, 'x', 'log_y', ds.count('i32')), out)
out = xr.DataArray(sol, coords=[logcoords, logcoords],
dims=['log_y', 'log_x'])
assert_eq_xr(c_logxy.line(ddf, 'log_x', 'log_y', ds.count('i32')), out)
@pytest.mark.parametrize('DataFrame', DataFrames)
def test_auto_range_line(DataFrame):
axis = ds.core.LinearAxis()
lincoords = axis.compute_index(axis.compute_scale_and_translate((-10., 10.), 5), 5)
ddf = DataFrame({'x': [-10, 0, 10, 0, -10],
'y': [ 0, 10, 0, -10, 0]})
cvs = ds.Canvas(plot_width=5, plot_height=5)
agg = cvs.line(ddf, 'x', 'y', ds.count())
sol = np.array([[0, 0, 1, 0, 0],
[0, 1, 0, 1, 0],
[2, 0, 0, 0, 1],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 0]], dtype='i4')
out = xr.DataArray(sol, coords=[lincoords, lincoords],
dims=['y', 'x'])
assert_eq_xr(agg, out)
@pytest.mark.parametrize('DataFrame', DataFrames)
@pytest.mark.parametrize('df_kwargs,cvs_kwargs', [
# axis1 none constant
(dict(data={
'x0': [-4, np.nan],
'x1': [-2, 2],
'x2': [0, 4],
'y0': [0, np.nan],
'y1': [-4, 4],
'y2': [0, 0]
}, dtype='float32'), dict(x=['x0', 'x1', 'x2'], y=['y0', 'y1', 'y2'], axis=1)),
# axis0 single
(dict(data={
'x': [-4, -2, 0, np.nan, 2, 4],
'y': [0, -4, 0, np.nan, 4, 0],
}), dict(x='x', y='y', axis=0)),
# axis0 multi
(dict(data={
'x0': [-4, -2, 0],
'x1': [np.nan, 2, 4],
'y0': [0, -4, 0],
'y1': [np.nan, 4, 0],
}, dtype='float32'), dict(x=['x0', | |
<reponame>mdop-wh/pulumi-aws<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'GetPolicyDocumentStatementArgs',
'GetPolicyDocumentStatementConditionArgs',
'GetPolicyDocumentStatementNotPrincipalArgs',
'GetPolicyDocumentStatementPrincipalArgs',
]
@pulumi.input_type
class GetPolicyDocumentStatementArgs:
def __init__(__self__, *,
actions: Optional[List[str]] = None,
conditions: Optional[List['GetPolicyDocumentStatementConditionArgs']] = None,
effect: Optional[str] = None,
not_actions: Optional[List[str]] = None,
not_principals: Optional[List['GetPolicyDocumentStatementNotPrincipalArgs']] = None,
not_resources: Optional[List[str]] = None,
principals: Optional[List['GetPolicyDocumentStatementPrincipalArgs']] = None,
resources: Optional[List[str]] = None,
sid: Optional[str] = None):
"""
:param List[str] actions: A list of actions that this statement either allows
or denies. For example, ``["ec2:RunInstances", "s3:*"]``.
:param List['GetPolicyDocumentStatementConditionArgs'] conditions: A nested configuration block (described below)
that defines a further, possibly-service-specific condition that constrains
whether this statement applies.
:param str effect: Either "Allow" or "Deny", to specify whether this
statement allows or denies the given actions. The default is "Allow".
:param List[str] not_actions: A list of actions that this statement does *not*
apply to. Used to apply a policy statement to all actions *except* those
listed.
:param List['GetPolicyDocumentStatementNotPrincipalArgs'] not_principals: Like `principals` except gives principals that
the statement does *not* apply to.
:param List[str] not_resources: A list of resource ARNs that this statement
does *not* apply to. Used to apply a policy statement to all resources
*except* those listed.
:param List['GetPolicyDocumentStatementPrincipalArgs'] principals: A nested configuration block (described below)
specifying a principal (or principal pattern) to which this statement applies.
:param List[str] resources: A list of resource ARNs that this statement applies
to. This is required by AWS if used for an IAM policy.
:param str sid: An ID for the policy statement.
"""
if actions is not None:
pulumi.set(__self__, "actions", actions)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if effect is not None:
pulumi.set(__self__, "effect", effect)
if not_actions is not None:
pulumi.set(__self__, "not_actions", not_actions)
if not_principals is not None:
pulumi.set(__self__, "not_principals", not_principals)
if not_resources is not None:
pulumi.set(__self__, "not_resources", not_resources)
if principals is not None:
pulumi.set(__self__, "principals", principals)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if sid is not None:
pulumi.set(__self__, "sid", sid)
@property
@pulumi.getter
def actions(self) -> Optional[List[str]]:
"""
A list of actions that this statement either allows
or denies. For example, ``["ec2:RunInstances", "s3:*"]``.
"""
return pulumi.get(self, "actions")
@actions.setter
def actions(self, value: Optional[List[str]]):
pulumi.set(self, "actions", value)
@property
@pulumi.getter
def conditions(self) -> Optional[List['GetPolicyDocumentStatementConditionArgs']]:
"""
A nested configuration block (described below)
that defines a further, possibly-service-specific condition that constrains
whether this statement applies.
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[List['GetPolicyDocumentStatementConditionArgs']]):
pulumi.set(self, "conditions", value)
@property
@pulumi.getter
def effect(self) -> Optional[str]:
"""
Either "Allow" or "Deny", to specify whether this
statement allows or denies the given actions. The default is "Allow".
"""
return pulumi.get(self, "effect")
@effect.setter
def effect(self, value: Optional[str]):
pulumi.set(self, "effect", value)
@property
@pulumi.getter(name="notActions")
def not_actions(self) -> Optional[List[str]]:
"""
A list of actions that this statement does *not*
apply to. Used to apply a policy statement to all actions *except* those
listed.
"""
return pulumi.get(self, "not_actions")
@not_actions.setter
def not_actions(self, value: Optional[List[str]]):
pulumi.set(self, "not_actions", value)
@property
@pulumi.getter(name="notPrincipals")
def not_principals(self) -> Optional[List['GetPolicyDocumentStatementNotPrincipalArgs']]:
"""
Like `principals` except gives principals that
the statement does *not* apply to.
"""
return pulumi.get(self, "not_principals")
@not_principals.setter
def not_principals(self, value: Optional[List['GetPolicyDocumentStatementNotPrincipalArgs']]):
pulumi.set(self, "not_principals", value)
@property
@pulumi.getter(name="notResources")
def not_resources(self) -> Optional[List[str]]:
"""
A list of resource ARNs that this statement
does *not* apply to. Used to apply a policy statement to all resources
*except* those listed.
"""
return pulumi.get(self, "not_resources")
@not_resources.setter
def not_resources(self, value: Optional[List[str]]):
pulumi.set(self, "not_resources", value)
@property
@pulumi.getter
def principals(self) -> Optional[List['GetPolicyDocumentStatementPrincipalArgs']]:
"""
A nested configuration block (described below)
specifying a principal (or principal pattern) to which this statement applies.
"""
return pulumi.get(self, "principals")
@principals.setter
def principals(self, value: Optional[List['GetPolicyDocumentStatementPrincipalArgs']]):
pulumi.set(self, "principals", value)
@property
@pulumi.getter
def resources(self) -> Optional[List[str]]:
"""
A list of resource ARNs that this statement applies
to. This is required by AWS if used for an IAM policy.
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[List[str]]):
pulumi.set(self, "resources", value)
@property
@pulumi.getter
def sid(self) -> Optional[str]:
"""
An ID for the policy statement.
"""
return pulumi.get(self, "sid")
@sid.setter
def sid(self, value: Optional[str]):
pulumi.set(self, "sid", value)
@pulumi.input_type
class GetPolicyDocumentStatementConditionArgs:
def __init__(__self__, *,
test: str,
values: List[str],
variable: str):
"""
:param str test: The name of the
[IAM condition operator](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html)
to evaluate.
:param List[str] values: The values to evaluate the condition against. If multiple
values are provided, the condition matches if at least one of them applies.
(That is, the tests are combined with the "OR" boolean operation.)
:param str variable: The name of a
[Context Variable](http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#AvailableKeys)
to apply the condition to. Context variables may either be standard AWS
variables starting with `aws:`, or service-specific variables prefixed with
the service name.
"""
pulumi.set(__self__, "test", test)
pulumi.set(__self__, "values", values)
pulumi.set(__self__, "variable", variable)
@property
@pulumi.getter
def test(self) -> str:
"""
The name of the
[IAM condition operator](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html)
to evaluate.
"""
return pulumi.get(self, "test")
@test.setter
def test(self, value: str):
pulumi.set(self, "test", value)
@property
@pulumi.getter
def values(self) -> List[str]:
"""
The values to evaluate the condition against. If multiple
values are provided, the condition matches if at least one of them applies.
(That is, the tests are combined with the "OR" boolean operation.)
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: List[str]):
pulumi.set(self, "values", value)
@property
@pulumi.getter
def variable(self) -> str:
"""
The name of a
[Context Variable](http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#AvailableKeys)
to apply the condition to. Context variables may either be standard AWS
variables starting with `aws:`, or service-specific variables prefixed with
the service name.
"""
return pulumi.get(self, "variable")
@variable.setter
def variable(self, value: str):
pulumi.set(self, "variable", value)
@pulumi.input_type
class GetPolicyDocumentStatementNotPrincipalArgs:
def __init__(__self__, *,
identifiers: List[str],
type: str):
"""
:param List[str] identifiers: List of identifiers for principals. When `type`
is "AWS", these are IAM user or role ARNs. When `type` is "Service", these are AWS Service roles e.g. `lambda.amazonaws.com`. When `type` is "Federated", these are web identity users or SAML provider ARNs.
:param str type: The type of principal. For AWS ARNs this is "AWS". For AWS services (e.g. Lambda), this is "Service". For Federated access the type is "Federated".
"""
pulumi.set(__self__, "identifiers", identifiers)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def identifiers(self) -> List[str]:
"""
List of identifiers for principals. When `type`
is "AWS", these are IAM user or role ARNs. When `type` is "Service", these are AWS Service roles e.g. `lambda.amazonaws.com`. When `type` is "Federated", these are web identity users or SAML provider ARNs.
"""
return pulumi.get(self, "identifiers")
@identifiers.setter
def identifiers(self, value: List[str]):
pulumi.set(self, "identifiers", value)
@property
@pulumi.getter
def type(self) -> str:
"""
The type of principal. For AWS ARNs this is "AWS". For AWS services (e.g. Lambda), this is "Service". For Federated access the type is "Federated".
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: str):
pulumi.set(self, "type", value)
@pulumi.input_type
class GetPolicyDocumentStatementPrincipalArgs:
def __init__(__self__, *,
identifiers: List[str],
type: str):
"""
:param List[str] identifiers: List of identifiers for principals. When `type`
is "AWS", these are IAM user or role ARNs. When `type` is "Service", these are AWS Service roles e.g. `lambda.amazonaws.com`. When `type` is "Federated", these are web identity users or SAML provider ARNs.
:param str type: The type of principal. For AWS ARNs this is "AWS". For AWS services (e.g. Lambda), this is "Service". For Federated access the type is "Federated".
"""
pulumi.set(__self__, "identifiers", identifiers)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def identifiers(self) -> List[str]:
"""
List of identifiers for principals. When `type`
is "AWS", these are IAM user or role ARNs. When `type` is "Service", these are AWS Service roles e.g. `lambda.amazonaws.com`. When `type` is "Federated", these are web identity users or SAML provider ARNs.
"""
return pulumi.get(self, "identifiers")
@identifiers.setter
def identifiers(self, value: List[str]):
pulumi.set(self, "identifiers", value)
@property
@pulumi.getter
def type(self) -> str:
"""
The type of principal. For AWS ARNs this is "AWS". For AWS services (e.g. Lambda), this is "Service". For Federated access the type is "Federated".
"""
return pulumi.get(self, "type")
@type.setter
| |
while cacheStatus != Results.CacheStatus.VALID and time_now < timeout:
gevent.sleep()
time_now = monotonic()
if cacheStatus == Results.CacheStatus.VALID:
if win_condition == WinCondition.NONE:
leaderboard = random.sample(results['by_race_time'], len(results['by_race_time']))
else:
leaderboard = results[results['meta']['primary_leaderboard']]
generated_heats = []
unplaced_pilots = []
new_heat = {}
assigned_pilots = 0
available_nodes = []
profile_freqs = json.loads(getCurrentProfile().frequencies)
for node_index in range(RACE.num_nodes):
if profile_freqs["f"][node_index] != RHUtils.FREQUENCY_ID_NONE:
available_nodes.append(node_index)
pilots_per_heat = min(pilots_per_heat, RACE.num_nodes, len(available_nodes))
for i,row in enumerate(leaderboard, start=1):
logger.debug("Placing {0} into heat {1}".format(row['pilot_id'], len(generated_heats)))
if row['node'] in new_heat or row['node'] not in available_nodes:
unplaced_pilots.append(row['pilot_id'])
else:
new_heat[row['node']] = row['pilot_id']
assigned_pilots += 1
if assigned_pilots >= pilots_per_heat or i == len(leaderboard):
# find slots for unassigned pilots
if len(unplaced_pilots):
for pilot in unplaced_pilots:
for index in available_nodes:
if index in new_heat:
continue
else:
new_heat[index] = pilot
break
# heat is full, flush and start next heat
generated_heats.append(new_heat)
unplaced_pilots = []
new_heat = {}
assigned_pilots = 0
# commit generated heats to database, lower seeds first
letters = __('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
for idx, heat in enumerate(reversed(generated_heats), start=1):
ladder = letters[len(generated_heats) - idx]
new_heat = Database.Heat(class_id=output_class, cacheStatus=Results.CacheStatus.INVALID, note=ladder + ' ' + suffix)
DB.session.add(new_heat)
DB.session.flush()
DB.session.refresh(new_heat)
for node in range(RACE.num_nodes): # Add pilots
if node in heat:
DB.session.add(Database.HeatNode(heat_id=new_heat.id, node_index=node, pilot_id=heat[node]))
else:
DB.session.add(Database.HeatNode(heat_id=new_heat.id, node_index=node, pilot_id=Database.PILOT_ID_NONE))
DB.session.commit()
logger.info("Generated {0} heats from class {1}".format(len(generated_heats), input_class))
SOCKET_IO.emit('heat_generate_done')
Events.trigger(Evt.HEAT_GENERATE)
emit_heat_data()
else:
logger.warning("Unable to generate heats from class {0}: can't get valid results".format(input_class))
SOCKET_IO.emit('heat_generate_done')
@SOCKET_IO.on('delete_lap')
def on_delete_lap(data):
'''Delete a false lap.'''
node_index = data['node']
lap_index = data['lap_index']
RACE.node_laps[node_index][lap_index]['deleted'] = True
time = RACE.node_laps[node_index][lap_index]['lap_time_stamp']
lap_number = 0
for lap in RACE.node_laps[node_index]:
if not lap['deleted']:
lap['lap_number'] = lap_number
lap_number += 1
else:
lap['lap_number'] = None
db_last = False
db_next = False
for lap in RACE.node_laps[node_index]:
if not lap['deleted']:
if lap['lap_time_stamp'] < time:
db_last = lap
if lap['lap_time_stamp'] > time:
db_next = lap
break
if db_next and db_last:
db_next['lap_time'] = db_next['lap_time_stamp'] - db_last['lap_time_stamp']
db_next['lap_time_formatted'] = RHUtils.time_format(db_next['lap_time'])
elif db_next:
db_next['lap_time'] = db_next['lap_time_stamp']
db_next['lap_time_formatted'] = RHUtils.time_format(db_next['lap_time'])
Events.trigger(Evt.LAP_DELETE, {
'race': RACE,
'node_index': node_index,
})
logger.info('Lap deleted: Node {0} Lap {1}'.format(node_index+1, lap_index))
RACE.cacheStatus = Results.CacheStatus.INVALID # refresh leaderboard
emit_current_laps() # Race page, update web client
emit_current_leaderboard() # Race page, update web client
race_format = getCurrentRaceFormat()
if race_format.team_racing_mode:
# update team-racing status info
if race_format.win_condition != WinCondition.MOST_LAPS: # if not Most Laps Wins race
if race_format.number_laps_win > 0: # if number-laps-win race
t_laps_dict, team_name, pilot_team_dict = get_team_laps_info(-1, race_format.number_laps_win)
check_team_laps_win(t_laps_dict, race_format.number_laps_win, pilot_team_dict)
else:
t_laps_dict = get_team_laps_info()[0]
else: # if Most Laps Wins race enabled
t_laps_dict, t_name, pilot_team_dict = get_team_laps_info(-1, RACE.winning_lap_id)
if ms_from_race_start() > race_format.race_time_sec*1000: # if race done
check_most_laps_win(node_index, t_laps_dict, pilot_team_dict)
check_emit_team_racing_status(t_laps_dict)
@SOCKET_IO.on('simulate_lap')
def on_simulate_lap(data):
'''Simulates a lap (for debug testing).'''
node_index = data['node']
logger.info('Simulated lap: Node {0}'.format(node_index+1))
Events.trigger(Evt.CROSSING_EXIT, {
'nodeIndex': node_index,
'color': hexToColor(Options.get('colorNode_' + str(node_index), '#ffffff'))
})
INTERFACE.intf_simulate_lap(node_index, 0)
@SOCKET_IO.on('LED_solid')
def on_LED_solid(data):
'''LED Solid Color'''
led_red = data['red']
led_green = data['green']
led_blue = data['blue']
on_use_led_effect({
'effect': "stripColor",
'args': {
'color': Color(led_red,led_green,led_blue),
'pattern': ColorPattern.SOLID,
'time': None
}
})
@SOCKET_IO.on('LED_chase')
def on_LED_chase(data):
'''LED Solid Color Chase'''
led_red = data['red']
led_green = data['green']
led_blue = data['blue']
on_use_led_effect({
'effect': "stripColor",
'args': {
'color': Color(led_red,led_green,led_blue),
# 'pattern': ColorPattern.CHASE, # TODO implement chase animation pattern
'pattern': ColorPattern.ALTERNATING,
'time': 5
}
})
@SOCKET_IO.on('LED_RB')
def on_LED_RB():
'''LED rainbow'''
on_use_led_effect({
'effect': "rainbow",
'args': {
'time': 5
}
})
@SOCKET_IO.on('LED_RBCYCLE')
def on_LED_RBCYCLE():
'''LED rainbow Cycle'''
on_use_led_effect({
'effect': "rainbowCycle",
'args': {
'time': 5
}
})
@SOCKET_IO.on('LED_RBCHASE')
def on_LED_RBCHASE():
'''LED Rainbow Cycle Chase'''
on_use_led_effect({
'effect': "rainbowCycleChase",
'args': {
'time': 5
}
})
@SOCKET_IO.on('LED_brightness')
def on_LED_brightness(data):
'''Change LED Brightness'''
brightness = data['brightness']
strip.setBrightness(brightness)
strip.show()
Options.set("ledBrightness", brightness)
Events.trigger(Evt.LED_BRIGHTNESS_SET, {
'level': brightness,
})
@SOCKET_IO.on('set_option')
def on_set_option(data):
Options.set(data['option'], data['value'])
Events.trigger(Evt.OPTION_SET, {
'option': data['option'],
'value': data['value'],
})
@SOCKET_IO.on('get_race_scheduled')
def get_race_elapsed():
# get current race status; never broadcasts to all
emit('race_scheduled', {
'scheduled': RACE.scheduled,
'scheduled_at': RACE.scheduled_time
})
@SOCKET_IO.on('save_callouts')
def save_callouts(data):
# save callouts to Options
callouts = json.dumps(data['callouts'])
Options.set('voiceCallouts', callouts)
logger.info('Set all voice callouts')
logger.debug('Voice callouts set to: {0}'.format(callouts))
@SOCKET_IO.on('imdtabler_update_freqs')
def imdtabler_update_freqs(data):
''' Update IMDTabler page with new frequencies list '''
emit_imdtabler_data(data['freq_list'].replace(',',' ').split())
@SOCKET_IO.on('clean_cache')
def clean_results_cache():
''' expose cach wiping for frontend debugging '''
Results.invalidate_all_caches(DB)
# Socket io emit functions
def emit_priority_message(message, interrupt=False, **params):
''' Emits message to all clients '''
emit_payload = {
'message': message,
'interrupt': interrupt
}
if ('nobroadcast' in params):
emit('priority_message', emit_payload)
else:
if interrupt:
Events.trigger(Evt.MESSAGE_INTERRUPT, {
'message': message,
'interrupt': interrupt
})
else:
Events.trigger(Evt.MESSAGE_STANDARD, {
'message': message,
'interrupt': interrupt
})
SOCKET_IO.emit('priority_message', emit_payload)
def emit_race_status(**params):
'''Emits race status.'''
race_format = getCurrentRaceFormat()
emit_payload = {
'race_status': RACE.race_status,
'race_mode': race_format.race_mode,
'race_time_sec': race_format.race_time_sec,
'race_staging_tones': race_format.staging_tones,
'hide_stage_timer': race_format.start_delay_min != race_format.start_delay_max,
'pi_starts_at_s': RACE.start_time_monotonic
}
if ('nobroadcast' in params):
emit('race_status', emit_payload)
else:
SOCKET_IO.emit('race_status', emit_payload)
def emit_frequency_data(**params):
'''Emits node data.'''
profile_freqs = json.loads(getCurrentProfile().frequencies)
emit_payload = {
'frequency': profile_freqs["f"][:RACE.num_nodes]
}
if ('nobroadcast' in params):
emit('frequency_data', emit_payload)
else:
SOCKET_IO.emit('frequency_data', emit_payload)
# if IMDTabler.java available then trigger call to
# 'emit_imdtabler_rating' via heartbeat function:
if Use_imdtabler_jar_flag:
heartbeat_thread_function.imdtabler_flag = True
def emit_node_data(**params):
'''Emits node data.'''
emit_payload = {
'node_peak_rssi': [node.node_peak_rssi for node in INTERFACE.nodes],
'node_nadir_rssi': [node.node_nadir_rssi for node in INTERFACE.nodes],
'pass_peak_rssi': [node.pass_peak_rssi for node in INTERFACE.nodes],
'pass_nadir_rssi': [node.pass_nadir_rssi for node in INTERFACE.nodes],
'debug_pass_count': [node.debug_pass_count for node in INTERFACE.nodes]
}
if ('nobroadcast' in params):
emit('node_data', emit_payload)
else:
SOCKET_IO.emit('node_data', emit_payload)
def emit_environmental_data(**params):
'''Emits environmental data.'''
emit_payload = []
for sensor in INTERFACE.sensors:
emit_payload.append({sensor.name: sensor.getReadings()})
if ('nobroadcast' in params):
emit('environmental_data', emit_payload)
else:
SOCKET_IO.emit('environmental_data', emit_payload)
def emit_enter_and_exit_at_levels(**params):
'''Emits enter-at and exit-at levels for nodes.'''
profile = getCurrentProfile()
profile_enter_ats = json.loads(profile.enter_ats)
profile_exit_ats = json.loads(profile.exit_ats)
emit_payload = {
'enter_at_levels': profile_enter_ats["v"][:RACE.num_nodes],
'exit_at_levels': profile_exit_ats["v"][:RACE.num_nodes]
}
if ('nobroadcast' in params):
emit('enter_and_exit_at_levels', emit_payload)
else:
SOCKET_IO.emit('enter_and_exit_at_levels', emit_payload)
def emit_node_tuning(**params):
'''Emits node tuning values.'''
tune_val = getCurrentProfile()
emit_payload = {
'profile_ids': [profile.id for profile in Database.Profiles.query.all()],
'profile_names': [profile.name for profile in Database.Profiles.query.all()],
'current_profile': int(Options.get('currentProfile')),
'profile_name': tune_val.name,
'profile_description': tune_val.description
}
if ('nobroadcast' in params):
emit('node_tuning', emit_payload)
else:
SOCKET_IO.emit('node_tuning', emit_payload)
def emit_language(**params):
'''Emits race status.'''
emit_payload = {
'language': Options.get("currentLanguage"),
'languages': Language.getLanguages()
}
if ('nobroadcast' in params):
emit('language', emit_payload)
else:
SOCKET_IO.emit('language', emit_payload)
def emit_all_languages(**params):
'''Emits full language dictionary.'''
emit_payload = {
'languages': Language.getAllLanguages()
}
if ('nobroadcast' in params):
emit('all_languages', emit_payload)
else:
SOCKET_IO.emit('all_languages', emit_payload)
def emit_min_lap(**params):
'''Emits current minimum lap.'''
emit_payload = {
'min_lap': Options.get('MinLapSec'),
'min_lap_behavior': int(Options.get("MinLapBehavior"))
}
if ('nobroadcast' in params):
emit('min_lap', emit_payload)
else:
SOCKET_IO.emit('min_lap', emit_payload)
def emit_race_format(**params):
'''Emits race format values.'''
race_format = getCurrentRaceFormat()
is_db_race_format = RHRaceFormat.isDbBased(race_format)
has_race = not is_db_race_format or Database.SavedRaceMeta.query.filter_by(format_id=race_format.id).first()
if has_race:
locked = True
else:
locked = False
emit_payload = {
'format_ids': [raceformat.id for raceformat in Database.RaceFormat.query.all()],
'format_names': [raceformat.name for raceformat in Database.RaceFormat.query.all()],
'current_format': race_format.id if is_db_race_format else None,
'format_name': race_format.name,
'race_mode': race_format.race_mode,
'race_time_sec': race_format.race_time_sec,
'start_delay_min': race_format.start_delay_min,
'start_delay_max': race_format.start_delay_max,
'staging_tones': race_format.staging_tones,
'number_laps_win': race_format.number_laps_win,
'win_condition': race_format.win_condition,
'team_racing_mode': 1 if race_format.team_racing_mode else 0,
'locked': locked
}
if ('nobroadcast' in params):
emit('race_format', emit_payload)
else:
SOCKET_IO.emit('race_format', emit_payload)
emit_team_racing_stat_if_enb()
emit_current_leaderboard()
def emit_race_formats(**params):
'''Emits all race formats.'''
formats = Database.RaceFormat.query.all()
emit_payload = {}
for race_format in formats:
format_copy = {
'format_name': race_format.name,
'race_mode': race_format.race_mode,
'race_time_sec': race_format.race_time_sec,
'start_delay_min': race_format.start_delay_min,
'start_delay_max': race_format.start_delay_max,
'staging_tones': race_format.staging_tones,
'number_laps_win': race_format.number_laps_win,
'win_condition': race_format.win_condition,
'team_racing_mode': 1 if race_format.team_racing_mode else 0,
}
has_race = Database.SavedRaceMeta.query.filter_by(format_id=race_format.id).first()
if has_race:
format_copy['locked'] = True
else:
format_copy['locked'] = False
emit_payload[race_format.id] = format_copy
if ('nobroadcast' in params):
emit('race_formats', emit_payload)
else:
SOCKET_IO.emit('race_formats', emit_payload)
def emit_current_laps(**params):
'''Emits current laps.'''
global RACE
if 'use_cache' in params and RACE.last_race_cacheStatus == Results.CacheStatus.VALID:
emit_payload = RACE.last_race_laps
else:
current_laps = []
for node in range(RACE.num_nodes):
node_laps = []
fastest_lap_time = float("inf")
fastest_lap_index = None
last_lap_id = -1
for idx, lap in enumerate(RACE.node_laps[node]):
if not lap['deleted']:
splits = get_splits(node, lap['lap_number'], True)
node_laps.append({
'lap_index': idx,
'lap_number': lap['lap_number'],
'lap_raw': lap['lap_time'],
'lap_time': lap['lap_time_formatted'],
'lap_time_stamp': lap['lap_time_stamp'],
'splits': splits
})
last_lap_id = lap['lap_number']
if lap['lap_time'] > 0 and idx > 0 and lap['lap_time'] < fastest_lap_time:
fastest_lap_time = lap['lap_time']
fastest_lap_index = idx
splits = get_splits(node, last_lap_id+1, False)
if splits:
node_laps.append({
'lap_number': last_lap_id+1,
'lap_time': '',
'lap_time_stamp': 0,
'splits': splits
})
current_laps.append({
'laps': node_laps,
'fastest_lap_index': fastest_lap_index,
})
current_laps = {
'node_index': current_laps
}
emit_payload = current_laps
RACE.last_race_laps = current_laps
if ('nobroadcast' in params):
emit('current_laps', emit_payload)
else:
SOCKET_IO.emit('current_laps', emit_payload)
def get_splits(node, lap_id, lapCompleted):
splits = []
for slave_index in range(len(CLUSTER.slaves)):
split = Database.LapSplit.query.filter_by(node_index=node,lap_id=lap_id,split_id=slave_index).one_or_none()
if split:
split_payload = {
'split_id': slave_index,
'split_raw': split.split_time,
'split_time': split.split_time_formatted,
'split_speed': '{0:.2f}'.format(split.split_speed) if split.split_speed <> None else '-'
}
elif lapCompleted:
split_payload = {
'split_id': slave_index,
'split_time': '-'
}
else:
break
splits.append(split_payload)
return splits
def emit_race_list(**params):
'''Emits race listing'''
heats = {}
for heat in Database.SavedRaceMeta.query.with_entities(Database.SavedRaceMeta.heat_id).distinct().order_by(Database.SavedRaceMeta.heat_id):
| |
import py_trees
import random
import carla
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
from srunner.scenariomanager.scenarioatomics.atomic_behaviors import \
ActorDestroy, ActorTransformSetter, ActorRotationSetter, KeepVelocity
from srunner.scenariomanager.scenarioatomics.atomic_criteria import \
CollisionTest, MaxSimTimeTest
from srunner.scenariomanager.scenarioatomics.atomic_trigger_conditions import \
DriveDistance, InTriggerDistanceToLocation, InTriggerDistanceToVehicle, \
InTriggerRegion, StandStill
from srunner.scenarios.basic_scenario import BasicScenario
LEFT_PEDESTRIAN_LOCATIONS = [
carla.Location(x=78.412392, y=323.170654, z=0.178421),
carla.Location(x=68.418488, y=322.162109, z=0.178421),
carla.Location(x=242.724152, y=317.176208, z=0.178421),
carla.Location(x=153.504883, y=323.505646, z=0.178421),
carla.Location(x=96.705582, y=314.969849, z=0.178421),
carla.Location(x=97.432373, y=322.244598, z=0.178421),
carla.Location(x=281.080444, y=319.154968, z=0.178421),
carla.Location(x=47.836460, y=321.374847, z=0.178421),
carla.Location(x=258.185059, y=318.115021, z=0.178421),
carla.Location(x=251.518860, y=323.782043, z=0.178421),
carla.Location(x=159.216705, y=323.816833, z=0.178421),
carla.Location(x=187.085022, y=319.443115, z=0.127962),
carla.Location(x=33.870499, y=322.020935, z=0.178421),
carla.Location(x=167.093018, y=323.526306, z=0.178421),
carla.Location(x=289.059448, y=323.221558, z=0.178421),
carla.Location(x=223.962784, y=322.296570, z=0.178421),
carla.Location(x=59.481533, y=322.036041, z=0.178421),
carla.Location(x=251.175629, y=314.260284, z=0.178421),
carla.Location(x=299.531342, y=316.869080, z=0.178421),
carla.Location(x=322.869507, y=318.211365, z=0.178421),
carla.Location(x=321.886169, y=317.148987, z=0.178421),
carla.Location(x=265.808594, y=319.808716, z=0.178421),
carla.Location(x=367.223083, y=322.735931, z=0.178421),
carla.Location(x=68.458603, y=323.804840, z=0.178421),
carla.Location(x=85.879257, y=318.511047, z=0.178421),
carla.Location(x=237.172943, y=321.682190, z=0.178421),
carla.Location(x=325.161774, y=313.713806, z=0.178421),
carla.Location(x=29.981731, y=322.035217, z=0.178421),
carla.Location(x=218.551392, y=323.960327, z=0.178421),
carla.Location(x=389.259979, y=315.975922, z=0.178421),
carla.Location(x=51.546616, y=324.047668, z=0.178421),
carla.Location(x=95.118416, y=314.533630, z=0.178421),
carla.Location(x=304.154114, y=318.742523, z=0.178421),
carla.Location(x=293.317535, y=316.227295, z=0.178421),
carla.Location(x=128.972229, y=322.892853, z=0.178421),
carla.Location(x=249.703064, y=313.867462, z=0.178421),
carla.Location(x=322.273193, y=322.278992, z=0.178421),
carla.Location(x=272.732605, y=320.105988, z=0.178421),
carla.Location(x=14.837142, y=321.624939, z=0.178421),
carla.Location(x=190.210724, y=321.813049, z=0.178421),
carla.Location(x=184.097626, y=323.372925, z=0.178421),
carla.Location(x=175.718842, y=321.901978, z=0.178421),
carla.Location(x=304.629486, y=317.572845, z=0.178421),
carla.Location(x=316.855194, y=319.423187, z=0.178421),
carla.Location(x=33.428959, y=322.850220, z=0.178421),
carla.Location(x=128.266373, y=323.771790, z=0.178421),
carla.Location(x=342.038818, y=318.311249, z=0.178421),
carla.Location(x=31.766727, y=322.702301, z=0.178421),
carla.Location(x=299.167053, y=323.124878, z=0.178421),
carla.Location(x=140.243896, y=322.242981, z=0.178421),
carla.Location(x=276.190491, y=321.081635, z=0.178421),
carla.Location(x=14.218060, y=321.422974, z=0.178421),
carla.Location(x=37.528347, y=323.550293, z=0.178421),
carla.Location(x=239.099762, y=322.389954, z=0.178421),
carla.Location(x=275.256287, y=316.526398, z=0.178421),
]
RIGHT_PEDESTRIAN_LOCATIONS = [
carla.Location(x=350.550201, y=335.187317, z=0.178421),
carla.Location(x=232.981644, y=334.388306, z=0.178421),
carla.Location(x=188.743835, y=333.404907, z=0.178421),
carla.Location(x=164.170914, y=335.196686, z=0.178421),
carla.Location(x=15.209345, y=335.201416, z=0.178421),
carla.Location(x=359.794922, y=334.626587, z=0.178421),
carla.Location(x=266.455139, y=334.497192, z=0.178421),
carla.Location(x=114.930313, y=334.799988, z=0.178421),
carla.Location(x=50.893738, y=333.211700, z=0.178421),
carla.Location(x=171.739258, y=334.297699, z=0.178421),
carla.Location(x=47.960590, y=335.595856, z=0.178421),
carla.Location(x=207.187683, y=333.949402, z=0.178421),
carla.Location(x=80.272377, y=333.790161, z=0.178421),
carla.Location(x=384.441345, y=335.221680, z=0.178421),
carla.Location(x=122.811996, y=333.855499, z=0.178421),
carla.Location(x=344.781494, y=333.570801, z=0.178421),
carla.Location(x=173.823975, y=334.220032, z=0.178421),
carla.Location(x=144.417786, y=335.413452, z=0.178421),
carla.Location(x=383.538208, y=335.320740, z=0.178421),
carla.Location(x=389.220886, y=334.166260, z=0.178421),
carla.Location(x=223.445312, y=334.052704, z=0.178421),
carla.Location(x=76.547646, y=335.064697, z=0.178421),
carla.Location(x=220.296692, y=334.691833, z=0.178421),
carla.Location(x=280.560150, y=335.657440, z=0.178421),
carla.Location(x=257.465332, y=333.976318, z=0.178421),
carla.Location(x=332.581879, y=333.940979, z=0.178421),
carla.Location(x=147.433990, y=334.056854, z=0.178421),
carla.Location(x=355.586273, y=334.151154, z=0.178421),
carla.Location(x=348.346313, y=334.907135, z=0.178421),
carla.Location(x=218.856461, y=334.872314, z=0.178421),
carla.Location(x=91.295258, y=334.089172, z=0.178421),
carla.Location(x=220.347900, y=335.830750, z=0.178421),
carla.Location(x=98.719185, y=335.502411, z=0.178421),
carla.Location(x=19.872103, y=333.940002, z=0.178421),
carla.Location(x=222.739853, y=335.307434, z=0.178421),
carla.Location(x=366.510742, y=335.127716, z=0.178421),
]
class ERDOSPedestrianBehindCar(BasicScenario):
"""
This class sets up the scenario where the ego vehicle needs to drive
on a road, and a pedestrian crosses unexpectedly from the other side of
the road.
This is a single ego vehicle scenario
"""
def __init__(self,
world,
ego_vehicles,
config,
randomize=False,
debug_mode=False,
criteria_enable=True,
timeout=600000000000,
coca_cola_van_distance=110,
coca_cola_van_translation=4,
pedestrian_distance=117,
pedestrian_translation=4.5,
pedestrian_velocity=2.75,
pedestrian_trigger_distance=26,
pedestrian_yaw_offset=90,
crossing_distance=4.5,
pedestrian_bp_name='walker.pedestrian.0008',
truck_bp_name='vehicle.carlamotors.carlacola',
rotate_pedestrian=True):
"""
Sets up the required class variables and calls BasicScenario to
finish setting up the scenario.
"""
self.debug_mode = debug_mode
self._map = CarlaDataProvider.get_map()
self._world = CarlaDataProvider.get_world()
self._reference_waypoint = self._map.get_waypoint(
config.trigger_points[0].location)
self.timeout = timeout
# Coca Cola Van Config
self._coca_cola_van_distance = coca_cola_van_distance
self._coca_cola_van_translation = coca_cola_van_translation
self._truck_bp_name = truck_bp_name
# Pedestrian Config
self._pedestrian_distance = pedestrian_distance
self._pedestrian_translation = pedestrian_translation
self._pedestrian_velocity = pedestrian_velocity
self._pedestrian_trigger_distance = pedestrian_trigger_distance
self._pedestrian_yaw_offset = pedestrian_yaw_offset
self._pedestrian_bp_name = pedestrian_bp_name
self._rotate_pedestrian = rotate_pedestrian
# Miscellaneous Config
self._crossing_distance = crossing_distance
self._driving_distance = 180
# Call the base class to set up the scenario.
super(ERDOSPedestrianBehindCar,
self).__init__("ERDOSPedestrianBehindCar",
ego_vehicles,
config,
world,
debug_mode,
criteria_enable=criteria_enable)
@staticmethod
def get_waypoint_in_distance(waypoint, distance):
"""
Obtain a waypoint in a given distance from the actor's location.
Do not stop the search on the first intersection.
"""
traveled_distance = 0
while traveled_distance < distance:
waypoint_new = waypoint.next(1.0)[0]
traveled_distance += waypoint_new.transform.location.distance(
waypoint.transform.location)
waypoint = waypoint_new
return waypoint, traveled_distance
def _initialize_actors(self, config):
"""
Initializes the other vehicles in the scenario.
"""
# Initialize the coca cola truck.
coca_cola_van_wp, _ = ERDOSPedestrianBehindCar.get_waypoint_in_distance(
self._reference_waypoint, self._coca_cola_van_distance)
self._coca_cola_van_transform = carla.Transform(
carla.Location(
coca_cola_van_wp.transform.location.x,
coca_cola_van_wp.transform.location.y +
self._coca_cola_van_translation,
coca_cola_van_wp.transform.location.z + 1),
carla.Rotation(coca_cola_van_wp.transform.rotation.pitch,
coca_cola_van_wp.transform.rotation.yaw + 180,
coca_cola_van_wp.transform.rotation.roll))
coca_cola_van = CarlaDataProvider.request_new_actor(
self._truck_bp_name, self._coca_cola_van_transform, color='red')
self.other_actors.append(coca_cola_van)
# Initialize the pedestrian.
pedestrian_wp, _ = ERDOSPedestrianBehindCar.get_waypoint_in_distance(
self._reference_waypoint, self._pedestrian_distance)
self._pedestrian_transform = carla.Transform(
carla.Location(
pedestrian_wp.transform.location.x,
pedestrian_wp.transform.location.y +
self._pedestrian_translation,
pedestrian_wp.transform.location.z + 5),
carla.Rotation(
pedestrian_wp.transform.rotation.pitch,
pedestrian_wp.transform.rotation.yaw +
self._pedestrian_yaw_offset,
pedestrian_wp.transform.rotation.roll))
pedestrian = CarlaDataProvider.request_new_actor(
self._pedestrian_bp_name,
self._pedestrian_transform,
color='red',
rolename='pedestrian')
self.other_actors.append(pedestrian)
# Set all the traffic lights in the world to green.
for actor in self._world.get_actors():
if actor.type_id == "traffic.traffic_light":
actor.set_state(carla.TrafficLightState.Green)
actor.freeze(True)
def _create_behavior(self):
"""
The scenario involves setting up a set of vehicles, and having a
pedestrian run from in between the vehicles in front of the ego
vehicle.
"""
# First, fix the transform of the other actors in the scene.
coca_cola_transform = ActorTransformSetter(
self.other_actors[0], self._coca_cola_van_transform)
# The pedestrian needs to walk to the other side of the road.
pedestrian_crossing = py_trees.composites.Parallel(
"Obstacle clearing road",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
pedestrian_crossing.add_child(
DriveDistance(self.other_actors[-1], self._crossing_distance))
pedestrian_crossing.add_child(
KeepVelocity(self.other_actors[-1],
self._pedestrian_velocity,
distance=self._crossing_distance))
# The pedestrian needs to face us to make detection easier.
pedestrian_rotation = ActorRotationSetter(self.other_actors[-1], 90)
# Define the endcondition.
endcondition = py_trees.composites.Parallel(
"Waiting for end position",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ALL)
reached_goal = InTriggerDistanceToLocation(
self.ego_vehicles[0], self._reference_waypoint.transform.location -
carla.Location(x=self._driving_distance), 5)
endcondition.add_child(reached_goal)
# Define the behavior tree.
sequence = py_trees.composites.Sequence(
"PedestrianBehindCar Behavior Tree")
sequence.add_child(coca_cola_transform)
sequence.add_child(
InTriggerDistanceToVehicle(self.other_actors[-1],
self.ego_vehicles[0],
self._pedestrian_trigger_distance))
sequence.add_child(pedestrian_crossing)
if self._rotate_pedestrian:
sequence.add_child(pedestrian_rotation)
sequence.add_child(endcondition)
sequence.add_child(ActorDestroy(self.other_actors[0]))
sequence.add_child(ActorDestroy(self.other_actors[-1]))
return sequence
def _create_test_criteria(self):
"""
A list of all test criteria will be created that is later used
in parallel behavior tree.
"""
criteria = []
collision_criterion = CollisionTest(self.ego_vehicles[0],
terminate_on_failure=True,
num_updates_after_failure=10)
criteria.append(collision_criterion)
# Do no add the simulation time constraint when running in debug mode.
if not self.debug_mode:
# The scenario should run for 50 simulation time seconds.
timely_arrival = MaxSimTimeTest(self.ego_vehicles[0],
30,
terminate_on_failure=True)
criteria.append(timely_arrival)
return criteria
def __del__(self):
"""
Remove all actors upon deletion
"""
self.remove_all_actors()
class ERDOSPedestrianBehindParkedCar(ERDOSPedestrianBehindCar):
def __init__(self,
world,
ego_vehicles,
config,
randomize=False,
debug_mode=False,
criteria_enable=True,
timeout=600000000000):
super(ERDOSPedestrianBehindParkedCar,
self).__init__(world,
ego_vehicles,
config,
randomize,
debug_mode,
criteria_enable,
timeout,
62,
-3.5,
65,
-3.25,
2.5,
25,
-90,
2.5,
truck_bp_name='vehicle.volkswagen.t2')
class ERDOSCarBehindTruck(ERDOSPedestrianBehindCar):
def __init__(self,
world,
ego_vehicles,
config,
randomize=False,
debug_mode=False,
criteria_enable=True,
timeout=600000000000):
super(ERDOSCarBehindTruck,
self).__init__(world,
ego_vehicles,
config,
randomize,
debug_mode,
criteria_enable,
timeout,
88,
4,
100,
4.5,
7.5,
45,
90,
3.0,
pedestrian_bp_name='vehicle.nissan.micra',
rotate_pedestrian=False)
class ERDOSManyPedestrians(BasicScenario):
"""
This class sets up the scenario with a large number of actors in the
field of vision so as to increase the runtime of the object detectors
and force a change in the optimum configuration.
This is a single ego-vehicle scenario.
"""
def __init__(self,
world,
ego_vehicles,
config,
randomize=False,
debug_mode=False,
criteria_enable=True,
timeout=600000000000):
"""
Sets up the required class variables and calls BasicScenario to
finish setting up the scenario.
"""
self.debug_mode = debug_mode
self._map = CarlaDataProvider.get_map()
self._world = CarlaDataProvider.get_world()
self._reference_waypoint = self._map.get_waypoint(
config.trigger_points[0].location)
self.timeout = timeout
# Number of pedestrians in the scene.
self._num_walkers = 91
# Pedestrian Config.
self._pedestrian_distance = 132
self._pedestrian_trigger_distance = 50
self._pedestrian_translation = 6
self._pedestrian_velocity = 6
# Miscellaneous Config
self._crossing_distance = 10
self._driving_distance = 370
# Call the base class to set up the scenario.
super(ERDOSManyPedestrians,
self).__init__("ERDOSManyPedestrians",
ego_vehicles,
config,
world,
debug_mode,
criteria_enable=criteria_enable)
def spawn_pedestrians(self, sampled_locations, goto_locations):
"""
Spawns the pedestrians at the sampled locations and makes them
complete a trajectory to the corresponding location in the goto
locations.
Returns the actors spawned.
"""
actors = []
for location, destination in zip(sampled_locations, goto_locations):
# Spawn the actor.
walker_bp = random.choice(
self._world.get_blueprint_library().filter("walker.*"))
try:
walker_actor = self._world.spawn_actor(
walker_bp, carla.Transform(location=location))
except RuntimeError:
print("Could not spawn the actor because of collision.")
continue
actors.append(walker_actor)
# Spawn the controller.
walker_controller_bp = self._world.get_blueprint_library().find(
'controller.ai.walker')
walker_controller_actor = self._world.spawn_actor(
walker_controller_bp, carla.Transform(), walker_actor)
self._world.wait_for_tick()
# Choose a location and make the pedestrian move there.
walker_controller_actor.start()
walker_controller_actor.go_to_location(destination)
walker_controller_actor.set_max_speed(1.4)
return actors
def _initialize_actors(self, config):
"""
Initializes the other vehicles in the scenario.
"""
# Initialize all the pedestrians in the scene.
if self._num_walkers > len(LEFT_PEDESTRIAN_LOCATIONS) + len(
RIGHT_PEDESTRIAN_LOCATIONS):
raise ValueError(
"The number of walkers requested ({}) is greater than the "
"number of unique pedestrian locations ({}).".format(
self._num_walkers,
len(LEFT_PEDESTRIAN_LOCATIONS) +
len(RIGHT_PEDESTRIAN_LOCATIONS)))
left_locations, right_locations = [], []
# To ensure that there is determinism across runs, and still be able
# to use random.sample
random.seed(0)
if self._num_walkers // 2 >= len(RIGHT_PEDESTRIAN_LOCATIONS):
right_locations = RIGHT_PEDESTRIAN_LOCATIONS
left_locations = random.sample(
LEFT_PEDESTRIAN_LOCATIONS,
self._num_walkers - len(RIGHT_PEDESTRIAN_LOCATIONS))
else:
right_locations = random.sample(RIGHT_PEDESTRIAN_LOCATIONS,
self._num_walkers // 2)
left_locations = random.sample(
LEFT_PEDESTRIAN_LOCATIONS,
self._num_walkers - (self._num_walkers // 2))
# Spawn the pedestrians on the left and right hand sides of the road.
self.other_actors.extend(
self.spawn_pedestrians(
right_locations,
random.sample(RIGHT_PEDESTRIAN_LOCATIONS,
len(right_locations))))
self.other_actors.extend(
self.spawn_pedestrians(
left_locations,
random.sample(LEFT_PEDESTRIAN_LOCATIONS, len(left_locations))))
# Set all the traffic lights in the world to green.
for actor in self._world.get_actors():
if actor.type_id == "traffic.traffic_light":
actor.set_state(carla.TrafficLightState.Green)
actor.freeze(True)
def _create_behavior(self):
"""
The scenario involves setting up a set of vehicles, and having a
pedestrian run from in between the vehicles | |
242172 * uk_98
+ 25200 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 204120 * uk_100
+ 328104 * uk_101
+ 7560 * uk_102
+ 1148175 * uk_103
+ 1845585 * uk_104
+ 42525 * uk_105
+ 2966607 * uk_106
+ 68355 * uk_107
+ 1575 * uk_108
+ 1092727 * uk_109
+ 4877359 * uk_11
+ 53045 * uk_110
+ 254616 * uk_111
+ 1432215 * uk_112
+ 2302153 * uk_113
+ 53045 * uk_114
+ 2575 * uk_115
+ 12360 * uk_116
+ 69525 * uk_117
+ 111755 * uk_118
+ 2575 * uk_119
+ 236765 * uk_12
+ 59328 * uk_120
+ 333720 * uk_121
+ 536424 * uk_122
+ 12360 * uk_123
+ 1877175 * uk_124
+ 3017385 * uk_125
+ 69525 * uk_126
+ 4850167 * uk_127
+ 111755 * uk_128
+ 2575 * uk_129
+ 1136472 * uk_13
+ 125 * uk_130
+ 600 * uk_131
+ 3375 * uk_132
+ 5425 * uk_133
+ 125 * uk_134
+ 2880 * uk_135
+ 16200 * uk_136
+ 26040 * uk_137
+ 600 * uk_138
+ 91125 * uk_139
+ 6392655 * uk_14
+ 146475 * uk_140
+ 3375 * uk_141
+ 235445 * uk_142
+ 5425 * uk_143
+ 125 * uk_144
+ 13824 * uk_145
+ 77760 * uk_146
+ 124992 * uk_147
+ 2880 * uk_148
+ 437400 * uk_149
+ 10275601 * uk_15
+ 703080 * uk_150
+ 16200 * uk_151
+ 1130136 * uk_152
+ 26040 * uk_153
+ 600 * uk_154
+ 2460375 * uk_155
+ 3954825 * uk_156
+ 91125 * uk_157
+ 6357015 * uk_158
+ 146475 * uk_159
+ 236765 * uk_16
+ 3375 * uk_160
+ 10218313 * uk_161
+ 235445 * uk_162
+ 5425 * uk_163
+ 125 * uk_164
+ 3969 * uk_17
+ 6489 * uk_18
+ 315 * uk_19
+ 63 * uk_2
+ 1512 * uk_20
+ 8505 * uk_21
+ 13671 * uk_22
+ 315 * uk_23
+ 10609 * uk_24
+ 515 * uk_25
+ 2472 * uk_26
+ 13905 * uk_27
+ 22351 * uk_28
+ 515 * uk_29
+ 103 * uk_3
+ 25 * uk_30
+ 120 * uk_31
+ 675 * uk_32
+ 1085 * uk_33
+ 25 * uk_34
+ 576 * uk_35
+ 3240 * uk_36
+ 5208 * uk_37
+ 120 * uk_38
+ 18225 * uk_39
+ 5 * uk_4
+ 29295 * uk_40
+ 675 * uk_41
+ 47089 * uk_42
+ 1085 * uk_43
+ 25 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 230957580727 * uk_47
+ 11211533045 * uk_48
+ 53815358616 * uk_49
+ 24 * uk_5
+ 302711392215 * uk_50
+ 486580534153 * uk_51
+ 11211533045 * uk_52
+ 187944057 * uk_53
+ 307273617 * uk_54
+ 14916195 * uk_55
+ 71597736 * uk_56
+ 402737265 * uk_57
+ 647362863 * uk_58
+ 14916195 * uk_59
+ 135 * uk_6
+ 502367977 * uk_60
+ 24386795 * uk_61
+ 117056616 * uk_62
+ 658443465 * uk_63
+ 1058386903 * uk_64
+ 24386795 * uk_65
+ 1183825 * uk_66
+ 5682360 * uk_67
+ 31963275 * uk_68
+ 51378005 * uk_69
+ 217 * uk_7
+ 1183825 * uk_70
+ 27275328 * uk_71
+ 153423720 * uk_72
+ 246614424 * uk_73
+ 5682360 * uk_74
+ 863008425 * uk_75
+ 1387206135 * uk_76
+ 31963275 * uk_77
+ 2229805417 * uk_78
+ 51378005 * uk_79
+ 5 * uk_8
+ 1183825 * uk_80
+ 250047 * uk_81
+ 408807 * uk_82
+ 19845 * uk_83
+ 95256 * uk_84
+ 535815 * uk_85
+ 861273 * uk_86
+ 19845 * uk_87
+ 668367 * uk_88
+ 32445 * uk_89
+ 2242306609 * uk_9
+ 155736 * uk_90
+ 876015 * uk_91
+ 1408113 * uk_92
+ 32445 * uk_93
+ 1575 * uk_94
+ 7560 * uk_95
+ 42525 * uk_96
+ 68355 * uk_97
+ 1575 * uk_98
+ 36288 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 172620 * uk_100
+ 273420 * uk_101
+ 129780 * uk_102
+ 1182447 * uk_103
+ 1872927 * uk_104
+ 888993 * uk_105
+ 2966607 * uk_106
+ 1408113 * uk_107
+ 668367 * uk_108
+ 681472 * uk_109
+ 4167064 * uk_11
+ 797632 * uk_110
+ 154880 * uk_111
+ 1060928 * uk_112
+ 1680448 * uk_113
+ 797632 * uk_114
+ 933592 * uk_115
+ 181280 * uk_116
+ 1241768 * uk_117
+ 1966888 * uk_118
+ 933592 * uk_119
+ 4877359 * uk_12
+ 35200 * uk_120
+ 241120 * uk_121
+ 381920 * uk_122
+ 181280 * uk_123
+ 1651672 * uk_124
+ 2616152 * uk_125
+ 1241768 * uk_126
+ 4143832 * uk_127
+ 1966888 * uk_128
+ 933592 * uk_129
+ 947060 * uk_13
+ 1092727 * uk_130
+ 212180 * uk_131
+ 1453433 * uk_132
+ 2302153 * uk_133
+ 1092727 * uk_134
+ 41200 * uk_135
+ 282220 * uk_136
+ 447020 * uk_137
+ 212180 * uk_138
+ 1933207 * uk_139
+ 6487361 * uk_14
+ 3062087 * uk_140
+ 1453433 * uk_141
+ 4850167 * uk_142
+ 2302153 * uk_143
+ 1092727 * uk_144
+ 8000 * uk_145
+ 54800 * uk_146
+ 86800 * uk_147
+ 41200 * uk_148
+ 375380 * uk_149
+ 10275601 * uk_15
+ 594580 * uk_150
+ 282220 * uk_151
+ 941780 * uk_152
+ 447020 * uk_153
+ 212180 * uk_154
+ 2571353 * uk_155
+ 4072873 * uk_156
+ 1933207 * uk_157
+ 6451193 * uk_158
+ 3062087 * uk_159
+ 4877359 * uk_16
+ 1453433 * uk_160
+ 10218313 * uk_161
+ 4850167 * uk_162
+ 2302153 * uk_163
+ 1092727 * uk_164
+ 3969 * uk_17
+ 5544 * uk_18
+ 6489 * uk_19
+ 63 * uk_2
+ 1260 * uk_20
+ 8631 * uk_21
+ 13671 * uk_22
+ 6489 * uk_23
+ 7744 * uk_24
+ 9064 * uk_25
+ 1760 * uk_26
+ 12056 * uk_27
+ 19096 * uk_28
+ 9064 * uk_29
+ 88 * uk_3
+ 10609 * uk_30
+ 2060 * uk_31
+ 14111 * uk_32
+ 22351 * uk_33
+ 10609 * uk_34
+ 400 * uk_35
+ 2740 * uk_36
+ 4340 * uk_37
+ 2060 * uk_38
+ 18769 * uk_39
+ 103 * uk_4
+ 29729 * uk_40
+ 14111 * uk_41
+ 47089 * uk_42
+ 22351 * uk_43
+ 10609 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 197322981592 * uk_47
+ 230957580727 * uk_48
+ 44846132180 * uk_49
+ 20 * uk_5
+ 307196005433 * uk_50
+ 486580534153 * uk_51
+ 230957580727 * uk_52
+ 187944057 * uk_53
+ 262525032 * uk_54
+ 307273617 * uk_55
+ 59664780 * uk_56
+ 408703743 * uk_57
+ 647362863 * uk_58
+ 307273617 * uk_59
+ 137 * uk_6
+ 366701632 * uk_60
+ 429207592 * uk_61
+ 83341280 * uk_62
+ 570887768 * uk_63
+ 904252888 * uk_64
+ 429207592 * uk_65
+ 502367977 * uk_66
+ 97547180 * uk_67
+ 668198183 * uk_68
+ 1058386903 * uk_69
+ 217 * uk_7
+ 502367977 * uk_70
+ 18941200 * uk_71
+ 129747220 * uk_72
+ 205512020 * uk_73
+ 97547180 * uk_74
+ 888768457 * uk_75
+ 1407757337 * uk_76
+ 668198183 * uk_77
+ 2229805417 * uk_78
+ 1058386903 * uk_79
+ 103 * uk_8
+ 502367977 * uk_80
+ 250047 * uk_81
+ 349272 * uk_82
+ 408807 * uk_83
+ 79380 * uk_84
+ 543753 * uk_85
+ 861273 * uk_86
+ 408807 * uk_87
+ 487872 * uk_88
+ 571032 * uk_89
+ 2242306609 * | |
credentials are compromised, they can be used from outside of the AWS account they give access to. In contrast, in order to leverage role permissions an attacker would need to gain and maintain access to a specific instance to use the privileges associated with it.',
'vulnerability' : 'AWS access from within AWS instances can be done by either encoding AWS keys into AWS API calls or by assigning the instance to a role which has an appropriate permissions policy for the required access. "AWS Access" means accessing the APIs of AWS in order to access AWS resources or manage AWS account resources.',
'remediation' : 'Remove the access keys from any user account in use on an EC2 instance, and setup EC2 IAM Roles instead.',
'severity' : 'low',
'links' : [
'https://d0.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf#page=49',
'https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html'
],
'references' : [
'AWS CIS v.1.4.0 - 1.18',
'AWS CIS v.1.2.0 - 1.19'
]
}
for region in regionList:
for e in self.cache['ec2']['describe_instances'].get(region,{}):
for R in e['Reservations']:
for ec2 in R['Instances']:
compliance = 0
evidence = {region : ec2['InstanceId']}
for II in self.cache['ec2']['describe_iam_instance_profile_associations'].get(region,{}):
for ia in II['IamInstanceProfileAssociations']:
if ia['InstanceId'] == ec2['InstanceId'] and ia['State'] == 'associated':
compliance = 1
self.finding(policy,compliance,evidence)
# ------------------------------------------------------
policy = {
'name' : 'Ensure a support role has been created to manage incidents with AWS Support',
'description' : 'The AWS Support Role allows a user to create and manage support cases with AWS.',
'vulnerability' : 'Without a support role, no one (with the exception of the root user) will be able to open a support case with AWS. Note that there are charges for using the support service from AWS. Refer to their <a href="https://aws.amazon.com/premiumsupport/pricing/">support pricing model</a> for more information.',
'remediation' : 'Assign the policy AWSSupportAccess to a user or a group.',
'severity' : 'info',
'links' : [
'https://d0.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf#page=52',
'https://aws.amazon.com/premiumsupport/pricing/',
'https://docs.aws.amazon.com/awssupport/latest/user/getting-started.html',
'https://docs.aws.amazon.com/awssupport/latest/user/accessing-support.html#iam'
],
'references' : [
'AWS CIS v.1.4.0 - 1.17',
'AWS CIS v.1.2.0 - 1.20'
]
}
# -- cycle through all the users
compliance = 0
evidence = []
if not 'get_credential_report' in p['iam']:
self.finding(policy,0,'credential report is not available')
else:
for u in p['iam']['get_credential_report'].get('us-east-1',{}):
if u['user'] != '<root_account>':
# -- check the user's attached policies
for A in self.cache['iam']['list_attached_user_policies'].get('us-east-1',{})[u['user']]:
for aup in A['AttachedPolicies']:
if aup['PolicyArn'] == 'arn:aws:iam::aws:policy/AWSSupportAccess':
evidence.append({'user' : u['user']})
compliance = 1
# -- check the user's groups
for B in self.cache['iam']['get_account_authorization_details'].get('us-east-1',{}):
for aad in B['UserDetailList']:
if aad['UserName'] == u['user']:
for g in aad['GroupList']:
for C in self.cache['iam']['list_attached_group_policies'].get('us-east-1',{})[g]:
for agp in C['AttachedPolicies']:
if agp['PolicyArn'] == 'arn:aws:iam::aws:policy/AWSSupportAccess':
compliance = 1
evidence.append({ 'user' : u['user'], 'group' : g})
# -- check the role
for D in self.cache['iam']['get_account_authorization_details'].get('us-east-1',{}):
for aad in D['RoleDetailList']:
for amp in aad['AttachedManagedPolicies']:
if amp['PolicyArn'] == 'arn:aws:iam::aws:policy/AWSSupportAccess':
evidence.append({'role' : aad['RoleName']})
compliance = 1
self.finding(policy,compliance,evidence)
# ------------------------------------------------------
policy = {
'name' : 'Ensure IAM policies that allow full "*:*" administrative privileges are not created',
'description' : 'Policies define the list of actions that is allowed against a set of resources. They typically represent all the actions an entity can take as part of a required job function.',
'vulnerability' : 'Creating an additional policy with administrative access to the entire AWS account has a risk of going undetected, if it is were to be added to a rogue account, leading to a compromise of the AWS account.',
'remediation' : 'Remove the offending policy, and add the user, group, or role to the AWS managed Administrator policy',
'severity' : 'medium',
'links' : [
'https://d0.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf#page=57'
],
'references' : [
'AWS CIS v.1.4.0 - 1.16',
'AWS CIS v.1.2.0 - 1.22'
]
}
evidence = {}
compliance = 1 # in this case we assume everything is fine, until we find something that is not
for gpv in self.cache['iam']['get_policy_version']:
if gpv != 'AdministratorAccess':
if 'Document' in self.cache['iam']['get_policy_version'][gpv]:
if type(self.cache['iam']['get_policy_version'][gpv]['Document']['Statement']) == dict:
s = self.cache['iam']['get_policy_version'][gpv]['Document']['Statement']
if self.comparer(s['Effect'],'Allow') and self.comparer(s['Action'],'*') and self.comparer(s['Resource'],'*'):
compliance = 0
evidence[gpv] = s
else:
for s in self.cache['iam']['get_policy_version'][gpv]['Document']['Statement']:
if self.comparer(s['Effect'],'Allow') and self.comparer(s.get('Action',''),'*') and self.comparer(s['Resource'],'*'):
compliance = 0
evidence[gpv] = s
else:
compliance = 0
evidence[gpv] = 'policy details not available'
self.finding(policy,compliance,evidence)
# ------------------------------------------------------
policy = {
'name' : 'Ensure CloudTrail is enabled in all regions',
'description' : 'The AWS API call history produced by CloudTrail enables security analysis, resource change tracking, and compliance auditing.',
'remediation' : 'Follow <a href="https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-create-a-trail-using-the-console-first-time.html">AWS Best Practices</a> to create a new trail.',
'vulnerability' : 'Without proper logging of AWS API activity, any activity, be it malicious, or legitimate will go undetected, resulting in breaches, or lack of regulatory compliance.',
'severity' : 'medium',
'links' : [
'https://d0.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf#page=61',
'https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-create-a-trail-using-the-console-first-time.html',
'https://aws.amazon.com/premiumsupport/technology/trusted-advisor/best-practice-checklist/#Security'
],
'references' : [
'AWS CIS v.1.4.0 - 3.1',
'AWS CIS v.1.2.0 - 2.1',
'Trusted Advisor - AWS Cloudtrail logging'
]
}
IsMultiRegionTrail = False
IsLogging = False
IncludeManagementEvents = False
ReadWriteType = False
for region in regionList:
for ct in self.cache['cloudtrail']['describe_trails'].get(region,{}).get('trailList',{}):
# IsMultiRegionTrail
if ct['IsMultiRegionTrail']:
IsMultiRegionTrail = True
if self.cache['cloudtrail']['get_trail_status'].get(region,{})[ct['TrailARN']]:
IsLogging = True
for e in self.cache['cloudtrail']['get_event_selectors'].get(region,{})[ct['TrailARN']]['EventSelectors']:
if e['IncludeManagementEvents'] == True:
IncludeManagementEvents = True
if e['ReadWriteType'] == 'All':
ReadWriteType = True
evidence = {
'region' : region,
'IsMultiRegionTrail' : IsMultiRegionTrail,
'IsLogging' : IsLogging,
'IncludeManagementEvents' : IncludeManagementEvents,
'ReadWriteType' : ReadWriteType
}
if IsMultiRegionTrail == True and IsLogging == True and IncludeManagementEvents == True and ReadWriteType == True:
self.finding(policy,1,evidence)
else:
self.finding(policy,0,evidence)
# ------------------------------------------------------
policy = {
'name' : 'Ensure CloudTrail log file validation is enabled',
'description' : 'Enabling log file validation will provide additional integrity checking of CloudTrail logs.',
'vulnerability' : 'Without log file validation, there is a higher liklihood of regulatory compliance findings related to audit logging.',
'severity' : 'low',
'remediation' : 'Follow <a href="https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-enabling.html">AWS Best Practices</a> to enable log file validation.',
'links' : [
'https://d0.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf#page=64',
'https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-enabling.html'
],
'references' : [
'AWS CIS v.1.4.0 - 3.2',
'AWS CIS v.1.2.0 - 2.2'
]
}
for region in regionList:
for ct in self.cache['cloudtrail']['describe_trails'].get(region,{}).get('trailList',{}):
evidence = {
region : ct['Name']
}
if ct['LogFileValidationEnabled']:
self.finding(policy,1,evidence)
else:
self.finding(policy,0,evidence)
# --------------------------------------------------------
policy = {
'name' : 'Ensure the S3 bucket used to store CloudTrail logs is not publicly accessible',
'description' : 'CloudTrail logs a record of every API call made in your AWS account. These logs file are stored in an S3 bucket. It is recommended that the bucket policy,or access control list (ACL),applied to the S3 bucket that CloudTrail logs to prevents public access to the CloudTrail logs.',
'vulnerability' : 'Allowing public access to CloudTrail log content may aid an adversary in identifying weaknesses in the affected account\'s use or configuration.',
'severity' : 'critical',
'remediation' : 'Follow <a href="https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-s3-bucket-policy-for-cloudtrail.html">AWS Best practices</a> to configure CloudTrail S3 buckets.',
'links' : [
'https://d0.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf#page=66',
'https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-s3-bucket-policy-for-cloudtrail.html'
],
'references' : [
'AWS CIS v.1.4.0 - 3.3',
'AWS CIS v 1.2.0 - 2.3'
]
}
for region in regionList:
for ct in self.cache['cloudtrail']['describe_trails'].get(region,{}):
evidence = { 'region' : region }
if not 'S3BucketName' in ct:
compliance = True
evidence['S3BucketName'] = '** No bucket defined ** '
else:
S3BucketName = ct.get('S3BucketName')
evidence['S3BucketName'] = S3BucketName
evidence['Is_Bucket_Public'] = self.cache['s3']['_public_s3_bucket'][S3BucketName]['list_objects']
if self.cache['s3']['_public_s3_bucket'][S3BucketName]['list_objects'] == False and self.cache['s3']['_public_s3_bucket'][S3BucketName]['list_objects_v2'] == False:
compliance = True
self.finding(policy,compliance,evidence)
# --------------------------------------------------------
policy = {
'name' : 'Ensure CloudTrail trails are integrated with CloudWatch Logs',
'description' : 'Sending CloudTrail logs to CloudWatch Logs will facilitate real-time and historic activity logging based on user, API, resource, and IP address, and provides opportunity to establish alarms and notifications for anomalous or sensitivity account activity.',
'vulnerability' : 'Without sending CloudTrail logs to CloudWatch, real-time alerts will not be visible, and may go undetected',
'severity' : 'low',
'remediation' : 'Follow <a href="https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html">AWS Best practices</a> to configure CloudTrail to CloudWatch integration.',
'reference' : [
'AWS CIS v.1.4.0 - 3.4',
'AWS CIS v.1.2.0 - 2.4'
],
'links' : [
'https://d0.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf#page=69',
'https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html'
]
}
for region in regionList:
evidence = {region: 'none detected'}
compliance = 0
for ct in self.cache['cloudtrail']['describe_trails'].get(region,{}).get('trailList',{}):
if 'LatestCloudWatchLogsDeliveryTime' in self.cache['cloudtrail']['get_trail_status'].get(region,{})[ct['TrailARN']]:
if isinstance(self.cache['cloudtrail']['get_trail_status'].get(region,{})[ct['TrailARN']]['LatestCloudWatchLogsDeliveryTime'], (dt.date,dt.datetime)):
x = self.cache['cloudtrail']['get_trail_status'].get(region,{})[ct['TrailARN']]['LatestCloudWatchLogsDeliveryTime'].timestamp()
else:
| |
you are interested in the samples 10, 80, and 140, and want to
know their class name.
>>> from sklearn.datasets import load_wine
>>> data = load_wine()
>>> data.target[[10, 80, 140]]
array([0, 1, 2])
>>> list(data.target_names)
['class_0', 'class_1', 'class_2']
"""
module_path = dirname(__file__)
data, target, target_names = load_data(module_path, 'wine_data.csv')
with open(join(module_path, 'descr', 'wine_data.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = ['alcohol',
'malic_acid',
'ash',
'alcalinity_of_ash',
'magnesium',
'total_phenols',
'flavanoids',
'nonflavanoid_phenols',
'proanthocyanins',
'color_intensity',
'hue',
'od280/od315_of_diluted_wines',
'proline']
frame = None
target_columns = ['target', ]
if as_frame:
frame, data, target = _convert_data_dataframe("load_wine",
data,
target,
feature_names,
target_columns)
if return_X_y:
return data, target
return Bunch(data=data,
target=target,
frame=frame,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names)
@_deprecate_positional_args
def load_iris(*, return_X_y=False, as_frame=False):
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <iris_dataset>`.
Parameters
----------
return_X_y : bool, default=False.
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (150, 4)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, Series} of shape (150,)
The classification target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
target_names: list
The names of target classes.
frame: DataFrame of shape (150, 5)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
filename: str
The path to the location of the data.
.. versionadded:: 0.20
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Notes
-----
.. versionchanged:: 0.20
Fixed two wrong data points according to Fisher's paper.
The new version is the same as in R, but not as in the UCI
Machine Learning Repository.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
data, target, target_names = load_data(module_path, 'iris.csv')
iris_csv_filename = join(module_path, 'data', 'iris.csv')
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = ['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)']
frame = None
target_columns = ['target', ]
if as_frame:
frame, data, target = _convert_data_dataframe("load_iris",
data,
target,
feature_names,
target_columns)
if return_X_y:
return data, target
return Bunch(data=data,
target=target,
frame=frame,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names,
filename=iris_csv_filename)
@_deprecate_positional_args
def load_breast_cancer(*, return_X_y=False, as_frame=False):
"""Load and return the breast cancer wisconsin dataset (classification).
The breast cancer dataset is a classic and very easy binary classification
dataset.
================= ==============
Classes 2
Samples per class 212(M),357(B)
Samples total 569
Dimensionality 30
Features real, positive
================= ==============
Read more in the :ref:`User Guide <breast_cancer_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (569, 30)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, Series} of shape (569,)
The classification target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
target_names: list
The names of target classes.
frame: DataFrame of shape (569, 31)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
filename: str
The path to the location of the data.
.. versionadded:: 0.20
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is
downloaded from:
https://goo.gl/U2Uwz2
Examples
--------
Let's say you are interested in the samples 10, 50, and 85, and want to
know their class name.
>>> from sklearn.datasets import load_breast_cancer
>>> data = load_breast_cancer()
>>> data.target[[10, 50, 85]]
array([0, 1, 0])
>>> list(data.target_names)
['malignant', 'benign']
"""
module_path = dirname(__file__)
data, target, target_names = load_data(module_path, 'breast_cancer.csv')
csv_filename = join(module_path, 'data', 'breast_cancer.csv')
with open(join(module_path, 'descr', 'breast_cancer.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = np.array(['mean radius', 'mean texture',
'mean perimeter', 'mean area',
'mean smoothness', 'mean compactness',
'mean concavity', 'mean concave points',
'mean symmetry', 'mean fractal dimension',
'radius error', 'texture error',
'perimeter error', 'area error',
'smoothness error', 'compactness error',
'concavity error', 'concave points error',
'symmetry error', 'fractal dimension error',
'worst radius', 'worst texture',
'worst perimeter', 'worst area',
'worst smoothness', 'worst compactness',
'worst concavity', 'worst concave points',
'worst symmetry', 'worst fractal dimension'])
frame = None
target_columns = ['target', ]
if as_frame:
frame, data, target = _convert_data_dataframe("load_breast_cancer",
data,
target,
feature_names,
target_columns)
if return_X_y:
return data, target
return Bunch(data=data,
target=target,
frame=frame,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names,
filename=csv_filename)
@_deprecate_positional_args
def load_digits(*, n_class=10, return_X_y=False, as_frame=False):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <digits_dataset>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
return_X_y : bool, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (1797, 64)
The flattened data matrix. If `as_frame=True`, `data` will be
a pandas DataFrame.
target: {ndarray, Series} of shape (1797,)
The classification target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
target_names: list
The names of target classes.
.. versionadded:: 0.20
frame: DataFrame of shape (1797, 65)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
images: {ndarray} of shape (1797, 8, 8)
The raw image data.
DESCR: str
The full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
This is a copy of the test set of the UCI ML hand-written digits datasets
https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import matplotlib.pyplot as plt #doctest: +SKIP
>>> plt.gray() #doctest: +SKIP
>>> plt.matshow(digits.images[0]) #doctest: +SKIP
>>> plt.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1].astype(np.int, copy=False)
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
feature_names = ['pixel_{}_{}'.format(row_idx, col_idx)
for | |
None), 3: ('C', 1, None)},
{frozenset({1, 3}): (1, None)}),
2: ({0: ('C', 3, None), 2: ('C', 1, None), 4: ('C', 1, None)},
{frozenset({0, 2}): (1, None), frozenset({2, 4}): (1, None)}),
3: ({1: ('C', 3, None), 3: ('C', 1, None), 5: ('C', 1, None)},
{frozenset({1, 3}): (1, None), frozenset({3, 5}): (1, None)}),
4: ({2: ('C', 1, None), 4: ('C', 1, None), 6: ('C', 2, None)},
{frozenset({2, 4}): (1, None), frozenset({4, 6}): (1, None)}),
5: ({3: ('C', 1, None), 5: ('C', 1, None), 7: ('C', 1, None)},
{frozenset({3, 5}): (1, None), frozenset({5, 7}): (1, None)}),
6: ({4: ('C', 1, None), 6: ('C', 2, None), 7: ('C', 1, None)},
{frozenset({4, 6}): (1, None), frozenset({6, 7}): (1, None)}),
7: ({8: ('O', 0, None), 5: ('C', 1, None), 6: ('C', 2, None),
7: ('C', 1, None)},
{frozenset({5, 7}): (1, None), frozenset({6, 7}): (1, None),
frozenset({8, 7}): (1, None)}),
8: ({8: ('O', 0, None), 7: ('C', 1, None)},
{frozenset({8, 7}): (1, None)})})
def test__atom_inchi_numbers():
""" test graph.atom_inchi_numbers
"""
cgr = C8H13O_CGR
natms = len(graph.atoms(cgr))
for _ in range(10):
pmt_dct = dict(enumerate(numpy.random.permutation(natms)))
cgr_pmt = graph.relabel(cgr, pmt_dct)
inv_pmt_dct = dict(map(reversed, pmt_dct.items()))
assert graph.atom_inchi_numbers(cgr_pmt) == inv_pmt_dct
ch_cgr = ({5: ('C', 1, None)}, {})
assert graph.atom_inchi_numbers(ch_cgr) == {5: 0}
ch_cgr = ({5: ('C', 0, None), 2: ('H', 0, None)},
{frozenset({5, 2}): (1, None)})
assert graph.atom_inchi_numbers(ch_cgr) == {5: 0, 2: -1}
cf_cgr = ({5: ('C', 0, None), 2: ('F', 0, None)},
{frozenset({5, 2}): (1, None)})
assert graph.atom_inchi_numbers(cf_cgr) == {5: 0, 2: 1}
ccl_cgr = ({5: ('C', 0, None), 2: ('F', 0, None)},
{frozenset({5, 2}): (1, None)})
assert graph.atom_inchi_numbers(ccl_cgr) == {5: 0, 2: 1}
def test__inchi():
""" test graph.inchi
"""
co_cgr = ({0: ('C', 0, None), 1: ('O', 0, None)},
{frozenset({0, 1}): (1, None)})
assert graph.inchi(co_cgr) == 'InChI=1S/CO/c1-2'
assert graph.inchi(C8H13O_SGR) == (
'InChI=1S/C8H13O/c1-3-5-7-8(9)6-4-2/h3-6,8H,7H2,1-2H3')
c_cgr = ({5: ('C', 0, None)}, {})
assert graph.inchi(c_cgr) == 'InChI=1S/C'
n_cgr = ({5: ('N', 0, None)}, {})
assert graph.inchi(n_cgr) == 'InChI=1S/N'
ch_cgr = ({5: ('C', 1, None)}, {})
assert graph.inchi(ch_cgr) == 'InChI=1S/CH/h1H'
ch_cgr = ({5: ('C', 0, None), 2: ('H', 0, None)},
{frozenset({5, 2}): (1, None)})
assert graph.inchi(ch_cgr) == 'InChI=1S/CH/h1H'
cf_cgr = ({5: ('C', 0, None), 2: ('F', 0, None)},
{frozenset({5, 2}): (1, None)})
assert graph.inchi(cf_cgr) == 'InChI=1S/CF/c1-2'
ccl_cgr = ({5: ('C', 0, None), 2: ('Cl', 0, None)},
{frozenset({5, 2}): (1, None)})
assert graph.inchi(ccl_cgr) == 'InChI=1S/CCl/c1-2'
nh_cgr = ({5: ('N', 1, None)}, {})
assert graph.inchi(nh_cgr) == 'InChI=1S/HN/h1H'
ch2_cgr = ({5: ('C', 2, None)}, {})
assert graph.inchi(ch2_cgr) == 'InChI=1S/CH2/h1H2'
def test__stereo_inchi():
""" test graph.stereo_inchi
"""
assert graph.stereo_inchi(C8H13O_SGR) == C8H13O_ICH
assert graph.stereo_inchi(C2H2CL2F2_MM_SGR) == C2H2CL2F2_MM_ICH
assert graph.stereo_inchi(C2H2CL2F2_MP_SGR) == C2H2CL2F2_MP_ICH
assert graph.stereo_inchi(C2H2F2_P_SGR) == C2H2F2_P_ICH
assert graph.stereo_inchi(C4H8O_M_SGR) == C4H8O_M_ICH
# test transformations
def test__implicit():
""" test graph.implicit
"""
assert graph.implicit(CH2FH2H_CGR_EXP) == CH2FH2H_CGR_IMP
assert graph.implicit(CH2FH2H_CGR_EXP, (1, 3, 4, 6)) == CH2FH2H_CGR_IMP
def test__explicit():
""" test graph.explicit
"""
ch2fh2h_cgr_exp = graph.explicit(CH2FH2H_CGR_IMP)
assert graph.backbone_isomorphic(ch2fh2h_cgr_exp, CH2FH2H_CGR_EXP)
assert (graph.atom_explicit_hydrogen_keys(ch2fh2h_cgr_exp) ==
{1: (), 3: (7, 8), 4: (9,), 6: (), 7: (), 8: (), 9: ()})
def test__explicit_stereo_sites():
""" test graph.explicit_stereo_sites
"""
assert graph.explicit_stereo_sites(C8H13O_CGR) == C8H13O_CGR
assert (graph.explicit_stereo_sites(C8H13O_SGR)
== ({0: ('C', 3, None), 1: ('C', 3, None), 2: ('C', 0, None),
3: ('C', 0, None), 4: ('C', 0, None), 5: ('C', 0, None),
6: ('C', 2, None), 7: ('C', 0, False), 8: ('O', 0, None),
9: ('H', 0, None), 10: ('H', 0, None), 11: ('H', 0, None),
12: ('H', 0, None), 13: ('H', 0, None)},
{frozenset({0, 2}): (1, None), frozenset({1, 3}): (1, None),
frozenset({2, 4}): (1, False), frozenset({3, 5}): (1, False),
frozenset({4, 6}): (1, None), frozenset({5, 7}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({9, 7}): (1, None), frozenset({2, 10}): (1, None),
frozenset({3, 11}): (1, None), frozenset({4, 12}): (1, None),
frozenset({5, 13}): (1, None)}))
def test__delete_atoms():
""" test graph.delete_atoms
"""
assert (graph.delete_atoms(CH2FH2H_CGR_EXP, (0, 2, 5)) ==
({1: ('F', 0, None), 3: ('C', 0, None), 4: ('H', 0, None),
6: ('H', 0, None)},
{frozenset({1, 3}): (1, None)}))
def test__add_explicit_hydrogens():
""" test graph.add_explicit_hydrogens
"""
assert graph.add_explicit_hydrogens(
CH2FH2H_CGR_IMP, {3: 2, 4: 1}
) == ({1: ('F', 0, None), 3: ('C', 2, None), 4: ('H', 1, None),
6: ('H', 0, None), 7: ('H', 0, None), 8: ('H', 0, None),
9: ('H', 0, None)},
{frozenset({1, 3}): (1, None), frozenset({3, 7}): (1, None),
frozenset({8, 3}): (1, None), frozenset({9, 4}): (1, None)})
def test__subgraph():
""" test graph.subgraph
"""
assert (graph.subgraph(CH2FH2H_CGR_EXP, (1, 3, 4, 6)) ==
({1: ('F', 0, None), 3: ('C', 0, None), 4: ('H', 0, None),
6: ('H', 0, None)},
{frozenset({1, 3}): (1, None)}))
def test__subgraph_by_bonds():
""" test graph.subgraph_by_bonds
"""
assert (graph.subgraph_by_bonds(C8H13O_CGR,
{frozenset({1, 3}), frozenset({3, 5}),
frozenset({5, 7}), frozenset({8, 7})}) ==
({1: ('C', 3, None), 3: ('C', 1, None), 5: ('C', 1, None),
7: ('C', 1, None), 8: ('O', 0, None)},
{frozenset({1, 3}): (1, None), frozenset({3, 5}): (1, None),
frozenset({5, 7}): (1, None), frozenset({8, 7}): (1, None)}))
def test__relabel():
""" test graph.relabel
"""
assert graph.relabel(
CH2FH2H_CGR_IMP, {1: 0, 3: 1, 4: 2, 6: 3}
) == ({0: ('F', 0, None), 1: ('C', 2, None), 2: ('H', 1, None),
3: ('H', 0, None)},
{frozenset({0, 1}): (1, None)})
def test__subresonances():
""" test graph.subresonances
"""
c2_cgr = ({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (1, None)})
assert graph.subresonances(c2_cgr) == (
({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (1, None)}),
({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (2, None)}),
({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (3, None)}),
({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (4, None)}),
)
c3h3_cgr = ({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (1, None),
frozenset({2, 0}): (1, None)})
assert graph.subresonances(c3h3_cgr) == (
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (1, None),
frozenset({0, 2}): (1, None)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (2, None),
frozenset({0, 2}): (1, None)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (1, None),
frozenset({0, 2}): (2, None)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (2, None), frozenset({1, 2}): (1, None),
frozenset({0, 2}): (1, None)}),
)
def test__lowspin_resonance():
""" test graph.lowspin_resonance
"""
c6h6_cgr = ({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (1, None),
frozenset({2, 3}): (1, None), frozenset({3, 4}): (1, None),
frozenset({4, 5}): (1, None), frozenset({5, 0}): (1, None)})
assert graph.lowspin_resonance(c6h6_cgr) in [
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None)},
{frozenset({0, 1}): (2, None), frozenset({1, 2}): (1, None),
frozenset({2, 3}): (2, None), frozenset({3, 4}): (1, None),
frozenset({4, 5}): (2, None), frozenset({5, 0}): (1, None)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (2, None),
frozenset({2, 3}): (1, None), frozenset({3, 4}): (2, None),
frozenset({4, 5}): (1, None), frozenset({5, 0}): (2, None)})
]
def test__reflection():
""" test graph.reflection
"""
assert (graph.reflection(C8H13O_SGR) ==
graph.set_atom_stereo_parities(C8H13O_SGR, {7: True}))
# test comparisons
def test__backbone_isomorphic():
""" test graph.backbone_isomorphic
"""
assert graph.backbone_isomorphic(CH2FH2H_CGR_EXP, CH2FH2H_CGR_IMP)
cgr = C8H13O_CGR
natms = len(graph.atoms(cgr))
for _ in range(10):
pmt_dct = dict(enumerate(numpy.random.permutation(natms)))
cgr_pmt = graph.relabel(cgr, pmt_dct)
assert graph.backbone_isomorphic(cgr, cgr_pmt)
def test__backbone_isomorphism():
""" test graph.backbone_isomorphism
"""
assert (graph.backbone_isomorphism(CH2FH2H_CGR_EXP, CH2FH2H_CGR_IMP) ==
{1: 1, 3: 3, 4: 4, 6: 6})
cgr = C8H13O_CGR
natms = len(graph.atoms(cgr))
for _ in range(10):
pmt_dct = dict(enumerate(numpy.random.permutation(natms)))
cgr_pmt = graph.relabel(cgr, pmt_dct)
assert graph.backbone_isomorphism(cgr, cgr_pmt) == pmt_dct
if __name__ == '__main__':
# test constructors and value getters
test__from_data()
test__atom_stereo_keys()
test__bond_stereo_keys()
# test value setters
test__set_atom_implicit_hydrogen_valences()
test__set_atom_stereo_parities()
test__set_bond_orders()
test__set_bond_stereo_parities()
test__increment_bond_orders()
# test derived values
test__is_chiral()
test__maximum_spin_multiplicity()
test__possible_spin_multiplicities()
test__ring_keys_list()
test__backbone_keys()
test__explicit_hydrogen_keys()
test__atom_nuclear_charges()
test__atom_total_valences()
test__atom_bond_valences()
test__atom_radical_valences()
test__atom_neighbor_keys()
test__atom_explicit_hydrogen_keys()
test__atom_bond_keys()
test__atom_neighborhoods()
test__atom_inchi_numbers()
test__inchi()
test__stereo_inchi()
# | |
"""Audio queue management."""
import asyncio
import atexit
import collections
import copy
import discord
import enum
import json
import os
import queue
import subprocess
import threading
import time
import uuid
from typing import cast, Any, Awaitable, Callable, Deque, List, Optional
import uita.exceptions
import uita.youtube_api
import logging
log = logging.getLogger(__name__)
class Track():
"""Container for audio resource metadata.
Args:
path: Path to audio resource for ffmpeg to load.
user: User that requested track.
title: Title of track.
duration: Track duration in seconds.
live: Determines if the track is a remote livestream.
local: Determines if the track is a local file or not.
url: The public URL of the track if it exists, ``None`` otherwise.
Attributes:
id (str): Unique 32 character long ID.
path (str): Path to audio resource for ffmpeg to load.
user (uita.types.DiscordUser): User that requested track.
title (str): Title of track.
duration (int): Track duration in seconds.
live (bool): Determines if the track is a remote livestream.
local (bool): Determines if the track is a local file or not.
url (typing.Optional[str]): The public URL of the track if it exists, ``None`` otherwise.
offset (float): Offset in seconds to start track from.
"""
def __init__(
self,
path: str,
user: "uita.types.DiscordUser",
title: str,
duration: float,
live: bool,
local: bool,
url: Optional[str] = None
):
self.id = uuid.uuid4().hex
self.path = path
self.user = user
self.title = title
self.duration = float(duration)
self.live = live
self.local = local
self.url = url
self.offset: float = 0.0
# NOTE: These values must be synced with the enum used in utils/Message.js:PlayStatusSendMessage
class Status(enum.IntEnum):
"""Play status for audio."""
PLAYING = 1
PAUSED = 2
class Queue():
"""Queues audio resources to be played by a looping task.
Args:
maxlen: Maximum queue size. Default is ``None``, which is unlimited.
on_queue_change: Callback that is triggered everytime the state of the playback queue
changes. Function accepts a list of :class:`~uita.audio.Track` as its only argument.
on_status_change: Callback that is triggered everytime the playback status changes.
Function accepts a :class:`~uita.audio.Status` as its only argument.
loop: Event loop for audio tasks to run in.
Attributes:
loop (asyncio.AbstractEventLoop): Event loop for audio tasks to run in.
status (uita.audio.Status): Current playback status (playing, paused, etc).
"""
QueueCallbackType = Callable[
[List[Track], Optional["uita.types.DiscordUser"]], Awaitable[None]
]
StatusCallbackType = Callable[[Status], None]
def __init__(
self,
maxlen: Optional[int] = None,
on_queue_change: Optional[QueueCallbackType] = None,
on_status_change: Optional[StatusCallbackType] = None,
loop: Optional[asyncio.AbstractEventLoop] = None
) -> None:
# async lambdas don't exist
async def dummy_queue_change(q: Any, u: Any) -> None: pass
self._on_queue_change = on_queue_change or dummy_queue_change
async def dummy_status_change(s: Any) -> None: pass
self._on_status_change = on_status_change or dummy_status_change
self.loop = loop or asyncio.get_event_loop()
self.status = Status.PAUSED
self._now_playing: Optional[Track] = None
self._queue: Deque[Track] = collections.deque()
self._queue_lock = asyncio.Lock(loop=self.loop)
self._queue_update_flag = asyncio.Event(loop=self.loop)
self._queue_maxlen = maxlen
self._play_task: Optional[asyncio.Task[Any]] = None
self._play_start_time: Optional[float] = None
self._stream: Optional[FfmpegStream] = None
self._voice: Optional[discord.VoiceClient] = None
def queue(self) -> List[Track]:
"""Retrieves a list of currently queued audio resources.
Returns:
Ordered list of audio resources queued for playback.
"""
if self._now_playing is not None:
# To maintain timer precision we want to avoid modifying the current tracks offset
# outside of pause/resumes
now_playing = copy.copy(self._now_playing)
if self._play_start_time is not None:
now_playing.offset += max(
time.perf_counter() - self._play_start_time,
0.0
)
return [now_playing] + list(self._queue)
return list(self._queue)
def queue_full(self) -> bool:
"""Tests if the queue is at capacity.
Returns:
True if the queue is full.
"""
return self._queue_maxlen is not None and len(self.queue()) >= self._queue_maxlen
async def play(self, voice: discord.VoiceClient) -> None:
"""Starts a new playlist task that awaits and plays new queue inputs.
First stops current playlist task if it exists.
Args:
voice: Voice connection to spawn audio players for.
"""
# Cancels currently running play task
await self.stop()
self._play_task = self.loop.create_task(self._play_loop(voice))
async def stop(self) -> None:
"""Stops and currently playing audio and cancels the running play task."""
if self._play_task is not None:
# If we stop during a song, add it to the front of the queue to be resumed later
if self._now_playing is not None:
if self._play_start_time is not None:
# Add the time spent playing this track to the starting offset, so it resumes
# where it left off
self._now_playing.offset += max(
time.perf_counter() - self._play_start_time,
0.0
)
self._play_start_time = None
self._queue.appendleft(self._now_playing)
self._now_playing = None
self._play_task.cancel()
await self._play_task
self._end_stream()
async def enqueue_file(self, path: str, user: "uita.types.DiscordUser") -> None:
"""Queues a file to be played by the running playlist task.
Args:
path: Path for audio resource to be played.
user: User that requested track.
Raises:
uita.exceptions.ClientError: If called with an unusable audio path.
"""
# Some quick sanitization to make sure bad input won't escape the cache directory
# However user input should never reach this function
filename = os.path.join(uita.utils.cache_dir(), os.path.basename(path))
if not os.path.isfile(filename):
raise uita.exceptions.ClientError(
uita.message.ErrorFileInvalidMessage("Invalid audio format")
)
completed_probe_process = await self.loop.run_in_executor(
None,
lambda: subprocess.run([
"ffprobe",
filename,
"-of", "json",
"-show_format",
"-show_streams",
"-select_streams", "a",
"-show_error",
"-loglevel", "quiet"
], stdout=subprocess.PIPE)
)
probe = json.loads(completed_probe_process.stdout.decode("utf-8"))
if "format" not in probe:
raise uita.exceptions.ClientError(
uita.message.ErrorFileInvalidMessage("Invalid audio format")
)
if "streams" not in probe or len(probe["streams"]) == 0:
raise uita.exceptions.ClientError(
uita.message.ErrorFileInvalidMessage("No audio track found")
)
title = "untagged file upload"
if "tags" in probe["format"]:
# ffprobe sometimes keys tags in all caps or not
tags = {k.lower(): v for k, v in probe["format"]["tags"].items()}
title = "{} - {}".format(
tags.get("artist", "Unknown artist"),
tags.get("title", "Unknown title")
)
log.info(f"[{user.name}:{user.id}] Enqueue [Local]{title}, {probe['format']['duration']}s")
# This check cannot have any awaits between it and the following queue.append()s
if self.queue_full():
raise uita.exceptions.ClientError(uita.message.ErrorQueueFullMessage())
self._queue.append(Track(
filename,
user,
title,
float(probe["format"]["duration"]),
live=False,
local=True
))
await self._notify_queue_change(user)
async def enqueue_url(self, url: str, user: "uita.types.DiscordUser") -> None:
"""Queues a URL to be played by the running playlist task.
Args:
url: URL for audio resource to be played.
user: User that requested track.
Raises:
uita.exceptions.ClientError: If called with an unusable audio path.
"""
info = await uita.youtube_api.scrape(url, loop=self.loop)
# This check cannot have any awaits between it and the following queue.append()s
if self.queue_full():
raise uita.exceptions.ClientError(uita.message.ErrorQueueFullMessage())
if info["extractor"] == "Youtube":
log.info(f"[{user.name}:{user.id}] Enqueue [YouTube]{info['title']}({info['id']}) "
f"{info['acodec']}@{info['abr']}abr, {info['duration']}s")
self._queue.append(Track(
info["url"],
user,
info["title"],
float(info["duration"]),
info["is_live"] or False, # is_live is either True or None?? Thanks ytdl
local=False,
url=f"https://youtube.com/watch?v={info['id']}"
))
await self._notify_queue_change(user)
elif info["extractor"] == "YoutubePlaylist":
if info["_type"] != "playlist":
raise uita.exceptions.ServerError("Unknown playlist type")
for entry in info["entries"]:
await self.enqueue_url(f"https://youtube.com/watch?v={entry['id']}", user)
else:
raise uita.exceptions.ClientError(uita.message.ErrorUrlInvalidMessage())
async def move(self, track_id: str, position: int) -> None:
"""Moves a track to a new position in the playback queue.
Args:
track_id: Track ID of audio resource to be moved.
position: Index position for the track to be moved to.
"""
async with self._queue_lock:
if position >= len(self.queue()) or position < 0:
log.debug("Requested queue index out of bounds")
return
# Check if re-ordering the queue will change the currently playing song
if self._now_playing is not None and self._voice is not None:
# No need to swap with self while playing, would restart the track
if self._now_playing.id == track_id and position == 0:
return
if self._now_playing.id == track_id or position == 0:
self._now_playing.offset = 0
self._queue.appendleft(self._now_playing)
self._now_playing = None
self._voice.stop()
# Since now_playing will not be added to the queue, offset the index to compensate
else:
position -= 1
for index, track in enumerate(self._queue):
if track.id == track_id:
del self._queue[index]
self._queue.insert(position, track)
await self._notify_queue_change()
return
async def remove(self, track_id: str) -> None:
"""Removes a track from the playback queue.
Args:
track_id: Track ID of audio resource to be removed.
"""
async with self._queue_lock:
if self._now_playing is not None and self._now_playing.id == track_id:
if self._voice is not None:
self._voice.stop()
return
for track in self._queue:
if track.id == track_id:
self._queue.remove(track)
await self._notify_queue_change()
return
async def _after_song(self) -> None:
async with self._queue_lock:
self._now_playing = None
self._change_status(Status.PAUSED)
await self._notify_queue_change()
self._end_stream()
def _change_status(self, status: Status) -> None:
self.status = status
self._on_status_change(self.status)
async def _play_loop(self, voice: discord.VoiceClient) -> None:
try:
while voice.is_connected():
self._queue_update_flag.clear()
async with self._queue_lock:
if self._voice is None and len(self._queue) > 0:
self._now_playing = self._queue.popleft()
log.info(f"[{self._now_playing.user.name}:{self._now_playing.user.id}] "
f"Now playing {self._now_playing.title}")
# Launch ffmpeg process
self._stream = FfmpegStream(
self._now_playing,
discord.opus.Encoder()
)
self._voice = voice
# Waits until ffmpeg has buffered audio before | |
# @classmethod
# def _validate_step_and_value(cls, values) -> Numeric:
# value, min, max, step = values["value"], values["min"], values["max"], values["step"]
# if value is not None:
# if value != max and value + step > max:
# raise ValueError(
# f"invalid range: adding step to value is greater than max ({cls.human_readable(value)} + {cls.human_readable(step)} > {cls.human_readable(max)})"
# )
# elif value != min and value - step < min:
# raise ValueError(
# f"invalid range: subtracting step from value is less than min ({cls.human_readable(value)} - {cls.human_readable(step)} < {cls.human_readable(min)})"
# )
# else:
# if (min + step > max):
# raise ValueError(
# f"invalid step: adding step to min is greater than max ({cls.human_readable(min)} + {cls.human_readable(step)} > {cls.human_readable(max)})"
# )
# elif (max - step < min):
# raise ValueError(
# f"invalid step: subtracting step from max is less than min ({cls.human_readable(max)} + {cls.human_readable(step)} < {cls.human_readable(min)})"
# )
# return values
def __str__(self) -> str:
return f"{self.name} ({self.type} {self.human_readable(self.min)}-{self.human_readable(self.max)}, {self.human_readable(self.step)})"
def __opsani_repr__(self) -> dict:
return {
self.name: self.dict(
include={"type", "min", "max", "step", "pinned", "value"}
)
}
class CPU(RangeSetting):
"""CPU is a Setting that describes an adjustable range of values for CPU
resources on a container or virtual machine.
CPU is a default setting known to the Opsani optimization engine that is
used for calculating cost impacts and carries specialized semantics and
heuristics. Always representing computing resources as a CPU object or
type derived thereof.
"""
name = pydantic.Field(
"cpu", const=True, description="Identifies the setting as a CPU setting."
)
min: float = pydantic.Field(
..., gt=0, description="The inclusive minimum number of vCPUs or cores to run."
)
max: float = pydantic.Field(
..., description="The inclusive maximum number of vCPUs or cores to run."
)
step: float = pydantic.Field(
0.125,
description="The multiplier of vCPUs or cores to add or remove during adjustments.",
)
value: Optional[float] = pydantic.Field(
None,
description="The number of vCPUs or cores as measured by the servo or adjusted by the optimizer, if any.",
)
class Memory(RangeSetting):
"""Memory is a Setting that describes an adjustable range of values for
memory resources on a container or virtual machine.
Memory is a default setting known to the Opsani optimization engine that is
used for calculating cost impacts and carries specialized semantics and
heuristics. Always representing memory resources as a Memory object or
type derived thereof.
"""
name = pydantic.Field(
"mem", const=True, description="Identifies the setting as a Memory setting."
)
@pydantic.validator("min")
@classmethod
def ensure_min_greater_than_zero(cls, value: Numeric) -> Numeric:
if value == 0:
raise ValueError("min must be greater than zero")
return value
class Replicas(RangeSetting):
"""Memory is a Setting that describes an adjustable range of values for
memory resources on a container or virtual machine.
Memory is a default setting known to the Opsani optimization engine that is
used for calculating cost impacts and carries specialized semantics and
heuristics. Always representing memory resources as a Memory object or
type derived thereof.
"""
name = pydantic.Field(
"replicas",
const=True,
description="Identifies the setting as a replicas setting.",
)
min: pydantic.StrictInt = pydantic.Field(
...,
description="The inclusive minimum number of replicas to of the application to run.",
)
max: pydantic.StrictInt = pydantic.Field(
...,
description="The inclusive maximum number of replicas to of the application to run.",
)
step: pydantic.StrictInt = pydantic.Field(
1,
description="The multiplier of instances to add or remove during adjustments.",
)
value: Optional[pydantic.StrictInt] = pydantic.Field(
None,
description="The optional number of replicas running as measured by the servo or to be adjusted to as commanded by the optimizer.",
)
class InstanceTypeUnits(str, enum.Enum):
"""InstanceTypeUnits is an enumeration that defines sources of compute instances."""
ec2 = "ec2"
class InstanceType(EnumSetting):
"""InstanceType is a Setting that describes an adjustable enumeration of
values for instance types of nodes or virtual machines.
Memory is a default setting known to the Opsani optimization engine that is
used for calculating cost impacts and carries specialized semantics and
heuristics. Always representing memory resources as a Memory object or
type derived thereof.
"""
name = pydantic.Field(
"inst_type",
const=True,
description="Identifies the setting as an instance type enum setting.",
)
unit: InstanceTypeUnits = pydantic.Field(
InstanceTypeUnits.ec2,
description="The unit of instance types identifying the provider.",
)
class Component(BaseModel):
"""Component objects describe optimizable applications or services that
expose adjustable settings.
"""
name: str
"""The unique name of the component.
"""
settings: List[Setting]
"""The list of adjustable settings that are available for optimizing the
component.
"""
def __init__(self, name: str, settings: List[Setting], **kwargs) -> None: # noqa: D107
super().__init__(name=name, settings=settings, **kwargs)
def get_setting(self, name: str) -> Optional[Setting]:
"""Returns a setting by name or None if it could not be found.
Args:
name: The name of the setting to retrieve.
Returns:
The setting within the component with the given name or None if such
a setting could not be found.
"""
return next(filter(lambda m: m.name == name, self.settings), None)
def __opsani_repr__(self) -> dict:
settings_dict = {"settings": {}}
for setting in self.settings:
settings_dict["settings"].update(setting.__opsani_repr__())
return {self.name: settings_dict}
class Control(BaseModel):
"""Control objects model parameters returned by the optimizer that govern
aspects of the operation to be performed.
"""
duration: Duration = cast(Duration, 0)
"""How long the operation should execute.
"""
delay: Duration = cast(Duration, 0)
"""How long to wait beyond the duration in order to ensure that the metrics
for the target interval have been aggregated and are available for reading.
"""
warmup: Duration = cast(Duration, 0)
"""How long to wait before starting the operation in order to allow the
application to reach a steady state (e.g., filling read through caches, loading
class files into memory, just-in-time compilation being appliied to critical
code paths, etc.).
"""
settlement: Optional[Duration] = None
"""How long to wait after performing an operation in order to allow the
application to reach a steady state (e.g., filling read through caches, loading
class files into memory, just-in-time compilation being appliied to critical
code paths, etc.).
"""
load: Optional[Dict[str, Any]] = None
"""An optional dictionary describing the parameters of the desired load
profile.
"""
userdata: Optional[Dict[str, Any]] = None
"""An optional dictionary of supplemental metadata with no platform defined
semantics.
"""
@pydantic.root_validator(pre=True)
def validate_past_and_delay(cls, values):
if "past" in values:
# NOTE: past is an alias for delay in the API
if "delay" in values:
assert (
values["past"] == values["delay"]
), "past and delay attributes must be equal"
values["delay"] = values.pop("past")
return values
@pydantic.validator("duration", "warmup", "delay", always=True, pre=True)
@classmethod
def validate_durations(cls, value) -> Duration:
return value or Duration(0)
class Description(BaseModel):
"""Description objects model the essential elements of a servo
configuration that the optimizer must be aware of in order to process
measurements and prescribe adjustments.
"""
components: List[Component] = []
"""The set of adjustable components and settings that are available for
optimization.
"""
metrics: List[Metric] = []
"""The set of measurable metrics that are available for optimization.
"""
def get_component(self, name: str) -> Optional[Component]:
"""Returns the component with the given name or `None` if the component
could not be found.
Args:
name: The name of the component to retrieve.
Returns:
The component with the given name or None if it could not be found.
"""
return next(filter(lambda m: m.name == name, self.components), None)
def get_setting(self, name: str) -> Optional[Setting]:
"""
Returns a setting from a fully qualified name of the form `component_name.setting_name`.
Returns:
The setting with the given name or None if it could not be found.
Raises:
ValueError: Raised if the name is not fully qualified.
"""
if not "." in name:
raise ValueError("name must include component name and setting name")
component_name, setting_name = name.split(".", 1)
if component := self.get_component(component_name):
return component.get_setting(setting_name)
return None
def get_metric(self, name: str) -> Optional[Metric]:
"""Returns the metric with the given name or `None` if the metric
could not be found.
Args:
name: The name of the metric to retrieve.
Returns:
The metric with the given name or None if it could not be found.
"""
return next(filter(lambda m: m.name == name, self.metrics), None)
def __opsani_repr__(self) -> dict:
dict = {"application": {"components": {}}, "measurement": {"metrics": {}}}
| |
# -*- coding: utf-8 -*-
## Copyright 2009-2021 NTESS. Under the terms
## of Contract DE-NA0003525 with NTESS, the U.S.
## Government retains certain rights in this software.
##
## Copyright (c) 2009-2021, NTESS
## All rights reserved.
##
## This file is part of the SST software package. For license
## information, see the LICENSE file in the top level directory of the
## distribution.
""" This module provides the a JUnit interface to generate xml data
in a format that Jenkins will consume.
This code comes from https://github.com/kyrus/python-junit-xml
Note: The code from the repo has been modified as allowed
per the MIT license defined on the repo (shown below)
----------------------------------------------------------------------------
The MIT License
Copyright (c) 2013 <NAME>, Inc., <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
----------------------------------------------------------------------------
"""
from collections import defaultdict
import sys
import re
import xml.etree.ElementTree as ET
import xml.dom.minidom
#from six import u, iteritems, PY2
# The orig code uses 3rd party module "six".
# We dont want any external modules, so
# here are equivalent functions
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
unichr = chr
def _iteritems(_d, **kw):
""" Py3 iteritems() """
return iter(_d.items(**kw))
def _u(_s):
""" Py3 u() """
return _s
else:
unichr
def _iteritems(_d, **kw):
""" Py2 iteritems() """
return _d.iteritems(**kw)
def _u(_s):
""" Py2 u() """
return unicode(_s.replace(r'\\', r'\\\\'), "unicode_escape")
################################################################################
# Based on the understanding of what Jenkins can parse for JUnit XML files.
# <?xml version="1.0" encoding="utf-8"?>
# <testsuites errors="1" failures="1" tests="4" time="45">
# <testsuite errors="1" failures="1" hostname="localhost" id="0" name="test1"
# package="testdb" tests="4" timestamp="2012-11-15T01:02:29">
# <properties>
# <property name="assert-passed" value="1"/>
# </properties>
# <testcase classname="testdb.directory" name="1-passed-test" time="10"/>
# <testcase classname="testdb.directory" name="2-failed-test" time="20">
# <failure message="Assertion FAILED: failed assert" type="failure">
# the output of the testcase
# </failure>
# </testcase>
# <testcase classname="package.directory" name="3-errord-test" time="15">
# <error message="Assertion ERROR: error assert" type="error">
# the output of the testcase
# </error>
# </testcase>
# <testcase classname="package.directory" name="3-skipped-test" time="0">
# <skipped message="SKIPPED Test" type="skipped">
# the output of the testcase
# </skipped>
# </testcase>
# <testcase classname="testdb.directory" name="3-passed-test" time="10">
# <system-out>
# I am system output
# </system-out>
# <system-err>
# I am the error output
# </system-err>
# </testcase>
# </testsuite>
# </testsuites>
################################################################################
class JUnitTestSuite(object):
"""
Suite of test cases.
Can handle unicode strings or binary strings if their encoding is provided.
"""
def __init__(self, name, test_cases=None, hostname=None, id=None,
package=None, timestamp=None, properties=None, file=None,
log=None, url=None, stdout=None, stderr=None):
self.name = name
if not test_cases:
test_cases = []
try:
iter(test_cases)
except TypeError:
raise TypeError("test_cases must be a list of test cases")
self.test_cases = test_cases
self.timestamp = timestamp
self.hostname = hostname
self.id = id
self.package = package
self.file = file
self.log = log
self.url = url
self.stdout = stdout
self.stderr = stderr
self.properties = properties
####
def junit_build_xml_doc(self, encoding=None):
"""
Builds the XML document for the JUnit test suite.
Produces clean unicode strings and decodes non-unicode with the help of encoding.
Args:
encoding: Used to decode encoded strings.
Returns:
XML document with unicode string elements
"""
# build the test suite element
test_suite_attributes = dict()
if any(c.assertions for c in self.test_cases):
test_suite_attributes["assertions"] = str(sum([int(c.assertions) \
for c in self.test_cases if c.assertions]))
test_suite_attributes["disabled"] = \
str(len([c for c in self.test_cases if not c.is_enabled]))
test_suite_attributes["errors"] = \
str(len([c for c in self.test_cases if c.junit_is_error()]))
test_suite_attributes["failures"] = \
str(len([c for c in self.test_cases if c.junit_is_failure()]))
test_suite_attributes["name"] = _junit_decode(self.name, encoding)
test_suite_attributes["skipped"] = \
str(len([c for c in self.test_cases if c.junit_is_skipped()]))
test_suite_attributes["tests"] = \
str(len(self.test_cases))
test_suite_attributes["time"] = \
str(sum(c.elapsed_sec for c in self.test_cases if c.elapsed_sec))
if self.hostname:
test_suite_attributes["hostname"] = _junit_decode(self.hostname, encoding)
if self.id:
test_suite_attributes["id"] = _junit_decode(self.id, encoding)
if self.package:
test_suite_attributes["package"] = _junit_decode(self.package, encoding)
if self.timestamp:
test_suite_attributes["timestamp"] = _junit_decode(self.timestamp, encoding)
if self.file:
test_suite_attributes["file"] = _junit_decode(self.file, encoding)
if self.log:
test_suite_attributes["log"] = _junit_decode(self.log, encoding)
if self.url:
test_suite_attributes["url"] = _junit_decode(self.url, encoding)
xml_element = ET.Element("testsuite", test_suite_attributes)
# add any properties
if self.properties:
props_element = ET.SubElement(xml_element, "properties")
for k, v in self.properties.items():
attrs = {"name": _junit_decode(k, encoding), "value": _junit_decode(v, encoding)}
ET.SubElement(props_element, "property", attrs)
# add test suite stdout
if self.stdout:
stdout_element = ET.SubElement(xml_element, "system-out")
stdout_element.text = _junit_decode(self.stdout, encoding)
# add test suite stderr
if self.stderr:
stderr_element = ET.SubElement(xml_element, "system-err")
stderr_element.text = _junit_decode(self.stderr, encoding)
# test cases
for case in self.test_cases:
test_case_attributes = dict()
test_case_attributes["name"] = _junit_decode(case.name, encoding)
if case.assertions:
# Number of assertions in the test case
test_case_attributes["assertions"] = "%d" % case.assertions
if case.elapsed_sec:
test_case_attributes["time"] = "%f" % case.elapsed_sec
if case.timestamp:
test_case_attributes["timestamp"] = _junit_decode(case.timestamp, encoding)
if case.classname:
test_case_attributes["classname"] = _junit_decode(case.classname, encoding)
if case.status:
test_case_attributes["status"] = _junit_decode(case.status, encoding)
if case.category:
test_case_attributes["class"] = _junit_decode(case.category, encoding)
if case.file:
test_case_attributes["file"] = _junit_decode(case.file, encoding)
if case.line:
test_case_attributes["line"] = _junit_decode(case.line, encoding)
if case.log:
test_case_attributes["log"] = _junit_decode(case.log, encoding)
if case.url:
test_case_attributes["url"] = _junit_decode(case.url, encoding)
test_case_element = ET.SubElement(xml_element, "testcase", test_case_attributes)
# failures
for failure in case.failures:
if failure["output"] or failure["message"]:
attrs = {"type": "failure"}
if failure["message"]:
attrs["message"] = _junit_decode(failure["message"], encoding)
if failure["type"]:
attrs["type"] = _junit_decode(failure["type"], encoding)
failure_element = ET.Element("failure", attrs)
if failure["output"]:
failure_element.text = _junit_decode(failure["output"], encoding)
test_case_element.append(failure_element)
# errors
for error in case.errors:
if error["message"] or error["output"]:
attrs = {"type": "error"}
if error["message"]:
attrs["message"] = _junit_decode(error["message"], encoding)
if error["type"]:
attrs["type"] = _junit_decode(error["type"], encoding)
error_element = ET.Element("error", attrs)
if error["output"]:
error_element.text = _junit_decode(error["output"], encoding)
test_case_element.append(error_element)
# skippeds
for skipped in case.skipped:
attrs = {"type": "skipped"}
if skipped["message"]:
attrs["message"] = _junit_decode(skipped["message"], encoding)
skipped_element = ET.Element("skipped", attrs)
if skipped["output"]:
skipped_element.text = _junit_decode(skipped["output"], encoding)
test_case_element.append(skipped_element)
# test stdout
if case.stdout:
stdout_element = ET.Element("system-out")
stdout_element.text = _junit_decode(case.stdout, encoding)
test_case_element.append(stdout_element)
# test stderr
if case.stderr:
stderr_element = ET.Element("system-err")
stderr_element.text = _junit_decode(case.stderr, encoding)
test_case_element.append(stderr_element)
return xml_element
####
class JUnitTestCase(object):
"""A JUnit test case with a result and possibly some stdout or stderr"""
def __init__(self, name, classname=None, elapsed_sec=None, stdout=None,
stderr=None, assertions=None, timestamp=None, status=None,
category=None, file=None, line=None, log=None, url=None,
allow_multiple_subelements=False):
self.name = name
self.assertions = assertions
self.elapsed_sec = elapsed_sec
self.timestamp = timestamp
self.classname = classname
self.status = status
self.category = category
self.file = file
self.line = line
self.log = log
self.url = url
self.stdout = stdout
self.stderr = stderr
self.is_enabled = True
self.errors = []
self.failures = []
self.skipped = []
self.allow_multiple_subalements = allow_multiple_subelements
def junit_add_error_info(self, message=None, output=None, error_type=None):
"""Adds an error message, output, or both to the test case"""
error = {}
error["message"] = message
error["output"] = output
error["type"] = error_type
if self.allow_multiple_subalements:
if message or output:
self.errors.append(error)
elif not len(self.errors):
self.errors.append(error)
else:
if message:
self.errors[0]["message"] = message
if output:
self.errors[0]["output"] = output
if error_type:
self.errors[0]["type"] = error_type
def junit_add_failure_info(self, message=None, output=None, failure_type=None):
"""Adds a failure message, output, or both to the test case"""
failure = {}
failure["message"] = message
failure["output"] = output
failure["type"] = failure_type
if self.allow_multiple_subalements:
if message or output:
self.failures.append(failure)
elif not len(self.failures):
self.failures.append(failure)
else:
if message:
self.failures[0]["message"] = message
if output:
self.failures[0]["output"] = output
if failure_type:
self.failures[0]["type"] = failure_type
def junit_add_skipped_info(self, message=None, output=None):
"""Adds a skipped message, output, or both to the test case"""
skipped = {}
skipped["message"] = message
skipped["output"] = output
if self.allow_multiple_subalements:
if message or output:
self.skipped.append(skipped)
elif not len(self.skipped):
self.skipped.append(skipped)
else:
if message:
self.skipped[0]["message"] = message
if output:
self.skipped[0]["output"] = output
def junit_add_elapsed_sec(self, elapsed_sec):
"""Add the elapsed time to the testcase"""
self.elapsed_sec = elapsed_sec
def junit_is_failure(self):
"""returns true if this test case is a failure"""
return sum(1 for f in self.failures if f["message"] or f["output"]) > 0
def junit_is_error(self):
"""returns true if this test case is | |
#encoding=utf-8
from nltk.corpus import stopwords
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import Ridge
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import xgboost as xgb
#import matplotlib.pyplot as plt
import gc, re
from sklearn.utils import shuffle
from contextlib import contextmanager
from sklearn.externals import joblib
import time
print("Starting job at time:",time.time())
debug = True
print("loading data ...")
used_cols = ["item_id", "user_id"]
if debug == False:
train_df = pd.read_csv("../input/train.csv", parse_dates = ["activation_date"])
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", parse_dates = ["activation_date"])
train_active = pd.read_csv("../input/train_active.csv", usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", parse_dates=["date_from", "date_to"])
else:
train_df = pd.read_csv("../input/train.csv", parse_dates = ["activation_date"])
train_df = shuffle(train_df, random_state=1234); train_df = train_df.iloc[:10000]
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", nrows=1000, parse_dates = ["activation_date"])
train_active = pd.read_csv("../input/train_active.csv", nrows=1000, usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", nrows=1000, usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", nrows=1000, parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", nrows=1000, parse_dates=["date_from", "date_to"])
print("loading data done!")
# =============================================================================
# Add image quality: by steeve
# =============================================================================
import pickle
with open('../input/inception_v3_include_head_max_train.p','rb') as f:
x = pickle.load(f)
train_features = x['features']
train_ids = x['ids']
with open('../input/inception_v3_include_head_max_test.p','rb') as f:
x = pickle.load(f)
test_features = x['features']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_features, columns = ['image_quality'])
incep_test_image_df = pd.DataFrame(test_features, columns = ['image_quality'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
del incep_train_image_df, incep_test_image_df
gc.collect()
with open('../input/train_image_features.p','rb') as f:
x = pickle.load(f)
train_blurinesses = x['blurinesses']
train_ids = x['ids']
with open('../input/test_image_features.p','rb') as f:
x = pickle.load(f)
test_blurinesses = x['blurinesses']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_blurinesses, columns = ['blurinesses'])
incep_test_image_df = pd.DataFrame(test_blurinesses, columns = ['blurinesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding whitenesses ...')
with open('../input/train_image_features.p','rb') as f:
x = pickle.load(f)
train_whitenesses = x['whitenesses']
train_ids = x['ids']
with open('../input/test_image_features.p','rb') as f:
x = pickle.load(f)
test_whitenesses = x['whitenesses']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_whitenesses, columns = ['whitenesses'])
incep_test_image_df = pd.DataFrame(test_whitenesses, columns = ['whitenesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding dullnesses ...')
with open('../input/train_image_features.p','rb') as f:
x = pickle.load(f)
train_dullnesses = x['dullnesses']
train_ids = x['ids']
with open('../input/test_image_features.p','rb') as f:
x = pickle.load(f)
test_dullnesses = x['dullnesses']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_dullnesses, columns = ['dullnesses'])
incep_test_image_df = pd.DataFrame(test_dullnesses, columns = ['dullnesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
# =============================================================================
# new image data
# =============================================================================
print('adding average_pixel_width ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_pixel_width = x['average_pixel_width']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_pixel_width = x['average_pixel_width']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_pixel_width, columns = ['average_pixel_width'])
incep_test_image_df = pd.DataFrame(test_average_pixel_width, columns = ['average_pixel_width'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_reds ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_reds = x['average_reds']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_reds = x['average_reds']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_reds, columns = ['average_reds'])
incep_test_image_df = pd.DataFrame(test_average_reds, columns = ['average_reds'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_blues ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_blues = x['average_blues']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_blues = x['average_blues']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_blues, columns = ['average_blues'])
incep_test_image_df = pd.DataFrame(test_average_blues, columns = ['average_blues'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_greens ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_greens = x['average_greens']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_greens = x['average_greens']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_greens, columns = ['average_greens'])
incep_test_image_df = pd.DataFrame(test_average_greens, columns = ['average_greens'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding widths ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_widths = x['widths']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_widths = x['widths']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_widths, columns = ['widths'])
incep_test_image_df = pd.DataFrame(test_widths, columns = ['widths'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding heights ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_heights = x['heights']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_heights = x['heights']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_heights, columns = ['heights'])
incep_test_image_df = pd.DataFrame(test_heights, columns = ['heights'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
del test_average_blues, test_average_greens, test_average_reds, incep_test_image_df
del train_average_blues, train_average_greens, train_average_reds, incep_train_image_df
gc.collect()
#==============================================================================
# image features by Qifeng
#==============================================================================
print('adding image features @ qifeng ...')
with open('../input/train_image_features_cspace.p','rb') as f:
x = pickle.load(f)
x_train = pd.DataFrame(x, columns = ['average_HSV_Ss',\
'average_HSV_Vs',\
'average_LUV_Ls',\
'average_LUV_Us',\
'average_LUV_Vs',\
'average_HLS_Hs',\
'average_HLS_Ls',\
'average_HLS_Ss',\
'average_YUV_Ys',\
'average_YUV_Us',\
'average_YUV_Vs',\
'ids'
])
#x_train.rename(columns = {'$ids':'image'}, inplace = True)
with open('../input/test_image_features_cspace.p','rb') as f:
x = pickle.load(f)
x_test = pd.DataFrame(x, columns = ['average_HSV_Ss',\
'average_HSV_Vs',\
'average_LUV_Ls',\
'average_LUV_Us',\
'average_LUV_Vs',\
'average_HLS_Hs',\
'average_HLS_Ls',\
'average_HLS_Ss',\
'average_YUV_Ys',\
'average_YUV_Us',\
'average_YUV_Vs',\
'ids'
])
#x_test.rename(columns = {'$ids':'image'}, inplace = True)
train_df = train_df.join(x_train.set_index('ids'), on='image')
test_df = test_df.join(x_test.set_index('ids'), on='image')
del x, x_train, x_test; gc.collect()
# =============================================================================
# add geo info: https://www.kaggle.com/frankherfert/avito-russian-region-cities/data
# =============================================================================
#tmp = pd.read_csv("../input/avito_region_city_features.csv", usecols=["region", "city", "latitude","longitude"])
#train_df = train_df.merge(tmp, on=["city","region"], how="left")
#train_df["lat_long"] = train_df["latitude"]+train_df["longitude"]
#test_df = test_df.merge(tmp, on=["city","region"], how="left")
#test_df["lat_long"] = test_df["latitude"]+test_df["longitude"]
#del tmp; gc.collect()
# =============================================================================
# Add region-income
# =============================================================================
tmp = pd.read_csv("../input/region_income.csv", sep=";", names=["region", "income"])
train_df = train_df.merge(tmp, on="region", how="left")
test_df = test_df.merge(tmp, on="region", how="left")
del tmp; gc.collect()
# =============================================================================
# Add region-income
# =============================================================================
tmp = pd.read_csv("../input/city_population_wiki_v3.csv")
train_df = train_df.merge(tmp, on="city", how="left")
test_df = test_df.merge(tmp, on="city", how="left")
del tmp; gc.collect()
# =============================================================================
# Here Based on https://www.kaggle.com/bminixhofer/aggregated-features-lightgbm/code
# =============================================================================
all_samples = pd.concat([train_df,train_active,test_df,test_active]).reset_index(drop=True)
all_samples.drop_duplicates(["item_id"], inplace=True)
del train_active, test_active; gc.collect()
all_periods = pd.concat([train_periods,test_periods])
del train_periods, test_periods; gc.collect()
all_periods["days_up"] = (all_periods["date_to"] - all_periods["date_from"]).dt.days
gp = all_periods.groupby(["item_id"])[["days_up"]]
gp_df = pd.DataFrame()
gp_df["days_up_sum"] = gp.sum()["days_up"]
gp_df["times_put_up"] = gp.count()["days_up"]
gp_df.reset_index(inplace=True)
gp_df.rename(index=str, columns={"index": "item_id"})
all_periods.drop_duplicates(["item_id"], inplace=True)
all_periods = all_periods.merge(gp_df, on="item_id", how="left")
all_periods = all_periods.merge(all_samples, on="item_id", how="left")
gp = all_periods.groupby(["user_id"])[["days_up_sum", "times_put_up"]].mean().reset_index()\
.rename(index=str, columns={"days_up_sum": "avg_days_up_user",
"times_put_up": "avg_times_up_user"})
n_user_items = all_samples.groupby(["user_id"])[["item_id"]].count().reset_index() \
.rename(index=str, columns={"item_id": "n_user_items"})
gp = gp.merge(n_user_items, on="user_id", how="outer") #left
del all_samples, all_periods, n_user_items
gc.collect()
train_df = train_df.merge(gp, on="user_id", how="left")
test_df = test_df.merge(gp, on="user_id", how="left")
agg_cols = list(gp.columns)[1:]
del gp; gc.collect()
for col in agg_cols:
train_df[col].fillna(-1, inplace=True)
test_df[col].fillna(-1, inplace=True)
print("merging supplimentary data done!")
# =============================================================================
# done! go to the normal steps
# =============================================================================
def rmse(predictions, targets):
print("calculating RMSE ...")
return np.sqrt(((predictions - targets) ** 2).mean())
def text_preprocessing(text):
text = str(text)
text = text.lower()
text = re.sub(r"(\\u[0-9A-Fa-f]+)",r"", text)
text = re.sub(r"===",r" ", text)
# https://www.kaggle.com/demery/lightgbm-with-ridge-feature/code
text = " ".join(map(str.strip, re.split('(\d+)',text)))
regex = re.compile(u'[^[:alpha:]]')
text = regex.sub(" ", text)
text = " ".join(text.split())
return text
@contextmanager
def feature_engineering(df):
# All the feature engineering here
def Do_Text_Hash(df):
print("feature engineering -> hash text ...")
df["text_feature"] = df.apply(lambda row: " ".join([str(row["param_1"]),
str(row["param_2"]), str(row["param_3"])]),axis=1)
df["text_feature_2"] = df.apply(lambda row: " ".join([str(row["param_2"]), str(row["param_3"])]),axis=1)
df["title_description"] = df.apply(lambda row: " ".join([str(row["title"]), str(row["description"])]),axis=1)
print("feature engineering -> preprocess text ...")
df["text_feature"] = df["text_feature"].apply(lambda x: text_preprocessing(x))
df["text_feature_2"] = df["text_feature_2"].apply(lambda x: text_preprocessing(x))
df["description"] = df["description"].apply(lambda x: text_preprocessing(x))
df["title"] = df["title"].apply(lambda x: text_preprocessing(x))
df["title_description"] = df["title_description"].apply(lambda x: text_preprocessing(x))
def Do_Datetime(df):
print("feature engineering -> date time ...")
df["wday"] = df["activation_date"].dt.weekday
df["wday"] =df["wday"].astype(np.uint8)
def Do_Label_Enc(df):
print("feature engineering -> label encoding ...")
lbl = LabelEncoder()
cat_col = ["user_id", "region", "city", "parent_category_name",
"category_name", "user_type", "image_top_1",
"param_1", "param_2", "param_3","image",
]
for col in cat_col:
df[col] = lbl.fit_transform(df[col].astype(str))
gc.collect()
import string
count = lambda l1,l2: sum([1 for x in l1 if x in l2])
def Do_NA(df):
print("feature engineering -> fill na ...")
df["image_top_1"].fillna(-1,inplace=True)
df["image"].fillna("noinformation",inplace=True)
df["param_1"].fillna("nicapotato",inplace=True)
df["param_2"].fillna("nicapotato",inplace=True)
df["param_3"].fillna("nicapotato",inplace=True)
df["title"].fillna("nicapotato",inplace=True)
df["description"].fillna("nicapotato",inplace=True)
# price vs income
# df["price_vs_city_income"] = df["price"] / df["income"]
# df["price_vs_city_income"].fillna(-1, inplace=True)
def Do_Count(df):
print("feature engineering -> do count ...")
# some count
df["num_desc_punct"] = df["description"].apply(lambda x: count(x, set(string.punctuation))).astype(np.int16)
df["num_desc_capE"] = df["description"].apply(lambda x: count(x, "[A-Z]")).astype(np.int16)
df["num_desc_capP"] = df["description"].apply(lambda x: count(x, "[А-Я]")).astype(np.int16)
df["num_title_punct"] = df["title"].apply(lambda x: count(x, set(string.punctuation))).astype(np.int16)
df["num_title_capE"] = df["title"].apply(lambda x: count(x, "[A-Z]")).astype(np.int16)
df["num_title_capP"] = df["title"].apply(lambda x: count(x, "[А-Я]")) .astype(np.int16)
# good, used, bad ... count
df["is_in_desc_хорошо"] = df["description"].str.contains("хорошо").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_Плохо"] = df["description"].str.contains("Плохо").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_новый"] = df["description"].str.contains("новый").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_старый"] = df["description"].str.contains("старый").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_используемый"] = df["description"].str.contains("используемый").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_есплатная_доставка"] = df["description"].str.contains("есплатная доставка").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_есплатный_возврат"] = df["description"].str.contains("есплатный возврат").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_идеально"] = df["description"].str.contains("идеально").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_подержанный"] = df["description"].str.contains("подержанный").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_пСниженные_цены"] = df["description"].str.contains("Сниженные цены").map({True:1, False:0}).astype(np.uint8)
# new count 0604
df["num_title_Exclamation"] = df["title"].apply(lambda x: count(x, "!")).astype(np.int16)
df["num_title_Question"] = df["title"].apply(lambda x: count(x, "?")).astype(np.int16)
df["num_desc_Exclamation"] = df["description"].apply(lambda x: count(x, "!")).astype(np.int16)
df["num_desc_Question"] = df["description"].apply(lambda x: count(x, "?")).astype(np.int16)
def Do_Drop(df):
df.drop(["activation_date"], axis=1, inplace=True)
def Do_Stat_Text(df):
print("feature engineering -> statistics in text ...")
textfeats = ["text_feature","text_feature_2","description", "title"]
for col in textfeats:
df[col + "_num_chars"] = df[col].apply(len).astype(np.int16)
df[col + "_num_words"] = df[col].apply(lambda comment: len(comment.split())).astype(np.int16)
df[col + "_num_unique_words"] = df[col].apply(lambda comment: len(set(w for w in comment.split()))).astype(np.int16)
df[col + "_words_vs_unique"] = (df[col+"_num_unique_words"] / df[col+"_num_words"] * 100).astype(np.float32)
gc.collect()
# choose which functions to run
Do_NA(df)
Do_Text_Hash(df)
Do_Label_Enc(df)
Do_Count(df)
Do_Datetime(df)
Do_Stat_Text(df)
Do_Drop(df)
gc.collect()
return df
def data_vectorize(df):
russian_stop = set(stopwords.words("russian"))
tfidf_para = {
"stop_words": russian_stop,
"analyzer": | |
"""
A Vocabulary maps strings to integers, allowing for strings to be mapped to an
out-of-vocabulary token.
"""
import codecs
import logging
import os
from collections import defaultdict
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Union
from typing import TextIO # pylint: disable=unused-import
from allennlp.common.util import namespace_match
from allennlp.common import Params, Registrable
from allennlp.common.checks import ConfigurationError
from allennlp.common.tqdm import Tqdm
from allennlp.data import instance as adi # pylint: disable=unused-import
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
DEFAULT_NON_PADDED_NAMESPACES = ("*tags", "*labels")
DEFAULT_PADDING_TOKEN = "@@PADDING@@"
DEFAULT_OOV_TOKEN = "@@UNKNOWN@@"
NAMESPACE_PADDING_FILE = 'non_padded_namespaces.txt'
class _NamespaceDependentDefaultDict(defaultdict):
"""
This is a `defaultdict
<https://docs.python.org/2/library/collections.html#collections.defaultdict>`_ where the
default value is dependent on the key that is passed.
We use "namespaces" in the :class:`Vocabulary` object to keep track of several different
mappings from strings to integers, so that we have a consistent API for mapping words, tags,
labels, characters, or whatever else you want, into integers. The issue is that some of those
namespaces (words and characters) should have integers reserved for padding and
out-of-vocabulary tokens, while others (labels and tags) shouldn't. This class allows you to
specify filters on the namespace (the key used in the ``defaultdict``), and use different
default values depending on whether the namespace passes the filter.
To do filtering, we take a set of ``non_padded_namespaces``. This is a set of strings
that are either matched exactly against the keys, or treated as suffixes, if the
string starts with ``*``. In other words, if ``*tags`` is in ``non_padded_namespaces`` then
``passage_tags``, ``question_tags``, etc. (anything that ends with ``tags``) will have the
``non_padded`` default value.
Parameters
----------
non_padded_namespaces : ``Iterable[str]``
A set / list / tuple of strings describing which namespaces are not padded. If a namespace
(key) is missing from this dictionary, we will use :func:`namespace_match` to see whether
the namespace should be padded. If the given namespace matches any of the strings in this
list, we will use ``non_padded_function`` to initialize the value for that namespace, and
we will use ``padded_function`` otherwise.
padded_function : ``Callable[[], Any]``
A zero-argument function to call to initialize a value for a namespace that `should` be
padded.
non_padded_function : ``Callable[[], Any]``
A zero-argument function to call to initialize a value for a namespace that should `not` be
padded.
"""
def __init__(self,
non_padded_namespaces: Iterable[str],
padded_function: Callable[[], Any],
non_padded_function: Callable[[], Any]) -> None:
self._non_padded_namespaces = set(non_padded_namespaces)
self._padded_function = padded_function
self._non_padded_function = non_padded_function
super(_NamespaceDependentDefaultDict, self).__init__()
def __missing__(self, key: str):
if any(namespace_match(pattern, key) for pattern in self._non_padded_namespaces):
value = self._non_padded_function()
else:
value = self._padded_function()
dict.__setitem__(self, key, value)
return value
def add_non_padded_namespaces(self, non_padded_namespaces: Set[str]):
# add non_padded_namespaces which weren't already present
self._non_padded_namespaces.update(non_padded_namespaces)
class _TokenToIndexDefaultDict(_NamespaceDependentDefaultDict):
def __init__(self, non_padded_namespaces: Set[str], padding_token: str, oov_token: str) -> None:
super(_TokenToIndexDefaultDict, self).__init__(non_padded_namespaces,
lambda: {padding_token: 0, oov_token: 1},
lambda: {})
class _IndexToTokenDefaultDict(_NamespaceDependentDefaultDict):
def __init__(self, non_padded_namespaces: Set[str], padding_token: str, oov_token: str) -> None:
super(_IndexToTokenDefaultDict, self).__init__(non_padded_namespaces,
lambda: {0: padding_token, 1: oov_token},
lambda: {})
def _read_pretrained_tokens(embeddings_file_uri: str) -> Set[str]:
# Moving this import to the top breaks everything (cycling import, I guess)
from allennlp.modules.token_embedders.embedding import EmbeddingsTextFile
logger.info('Reading pretrained tokens from: %s', embeddings_file_uri)
tokens = set()
with EmbeddingsTextFile(embeddings_file_uri) as embeddings_file:
for line_number, line in enumerate(Tqdm.tqdm(embeddings_file), start=1):
token_end = line.find(' ')
if token_end >= 0:
token = line[:token_end]
tokens.add(token)
else:
line_begin = line[:20] + '...' if len(line) > 20 else line
logger.warning(f'Skipping line number %d: %s', line_number, line_begin)
return tokens
def pop_max_vocab_size(params: Params) -> Union[int, Dict[str, int]]:
"""
max_vocab_size is allowed to be either an int or a Dict[str, int] (or nothing).
But it could also be a string representing an int (in the case of environment variable
substitution). So we need some complex logic to handle it.
"""
size = params.pop("max_vocab_size", None)
if isinstance(size, Params):
# This is the Dict[str, int] case.
return size.as_dict()
elif size is not None:
# This is the int / str case.
return int(size)
else:
return None
class Vocabulary(Registrable):
"""
A Vocabulary maps strings to integers, allowing for strings to be mapped to an
out-of-vocabulary token.
Vocabularies are fit to a particular dataset, which we use to decide which tokens are
in-vocabulary.
Vocabularies also allow for several different namespaces, so you can have separate indices for
'a' as a word, and 'a' as a character, for instance, and so we can use this object to also map
tag and label strings to indices, for a unified :class:`~.fields.field.Field` API. Most of the
methods on this class allow you to pass in a namespace; by default we use the 'tokens'
namespace, and you can omit the namespace argument everywhere and just use the default.
Parameters
----------
counter : ``Dict[str, Dict[str, int]]``, optional (default=``None``)
A collection of counts from which to initialize this vocabulary. We will examine the
counts and, together with the other parameters to this class, use them to decide which
words are in-vocabulary. If this is ``None``, we just won't initialize the vocabulary with
anything.
min_count : ``Dict[str, int]``, optional (default=None)
When initializing the vocab from a counter, you can specify a minimum count, and every
token with a count less than this will not be added to the dictionary. These minimum
counts are `namespace-specific`, so you can specify different minimums for labels versus
words tokens, for example. If a namespace does not have a key in the given dictionary, we
will add all seen tokens to that namespace.
max_vocab_size : ``Union[int, Dict[str, int]]``, optional (default=``None``)
If you want to cap the number of tokens in your vocabulary, you can do so with this
parameter. If you specify a single integer, every namespace will have its vocabulary fixed
to be no larger than this. If you specify a dictionary, then each namespace in the
``counter`` can have a separate maximum vocabulary size. Any missing key will have a value
of ``None``, which means no cap on the vocabulary size.
non_padded_namespaces : ``Iterable[str]``, optional
By default, we assume you are mapping word / character tokens to integers, and so you want
to reserve word indices for padding and out-of-vocabulary tokens. However, if you are
mapping NER or SRL tags, or class labels, to integers, you probably do not want to reserve
indices for padding and out-of-vocabulary tokens. Use this field to specify which
namespaces should `not` have padding and OOV tokens added.
The format of each element of this is either a string, which must match field names
exactly, or ``*`` followed by a string, which we match as a suffix against field names.
We try to make the default here reasonable, so that you don't have to think about this.
The default is ``("*tags", "*labels")``, so as long as your namespace ends in "tags" or
"labels" (which is true by default for all tag and label fields in this code), you don't
have to specify anything here.
pretrained_files : ``Dict[str, str]``, optional
If provided, this map specifies the path to optional pretrained embedding files for each
namespace. This can be used to either restrict the vocabulary to only words which appear
in this file, or to ensure that any words in this file are included in the vocabulary
regardless of their count, depending on the value of ``only_include_pretrained_words``.
Words which appear in the pretrained embedding file but not in the data are NOT included
in the Vocabulary.
only_include_pretrained_words : ``bool``, optional (default=False)
This defines the stategy for using any pretrained embedding files which may have been
specified in ``pretrained_files``. If False, an inclusive stategy is used: and words
which are in the ``counter`` and in the pretrained file are added to the ``Vocabulary``,
regardless of whether their count exceeds ``min_count`` or not. If True, we use an
exclusive strategy: words are only included in the Vocabulary if they are in the pretrained
embedding file (their count must still be at least ``min_count``).
tokens_to_add : ``Dict[str, List[str]]``, optional (default=None)
If given, this is a list of tokens to add to the vocabulary, keyed by the namespace to add
the tokens to. This is a way | |
<reponame>pandas9/txt2dream
from pathlib import Path
import io
import sys
import os
import math
import numpy as np
import requests
import json
import kornia.augmentation as K
from base64 import b64encode
from omegaconf import OmegaConf
import imageio
from PIL import ImageFile, Image
ImageFile.LOAD_TRUNCATED_IMAGES = True
from taming.models import cond_transformer, vqgan
import transformers
import torch
from torch import nn, optim
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms import functional as TF
try:
import clip
except ImportError:
from CLIP import clip
from utils import *
from upscale_dream import ScaleImage
class ReplaceGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, x_forward, x_backward):
ctx.shape = x_backward.shape
return x_forward
@staticmethod
def backward(ctx, grad_in):
return None, grad_in.sum_to_size(ctx.shape)
class ClampWithGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, input, min, max):
ctx.min = min
ctx.max = max
ctx.save_for_backward(input)
return input.clamp(min, max)
@staticmethod
def backward(ctx, grad_in):
input, = ctx.saved_tensors
return grad_in * (grad_in * (input - input.clamp(ctx.min, ctx.max)) >= 0), None, None
class Text2Image:
'''
Deep dream text with torch, vqgan, esrgan, clip and diffusion.
Adjust settings for more illusive dreams.
'''
def __init__(self, settings={}):
self.settings = {
# required
'seed': -1,
'prompt': '',
'width': 256,
'height': 256,
'clip_model': 'ViT-B/32', # available ['RN50', 'RN101', 'RN50x4', 'RN50x16', 'ViT-B/32','ViT-B/16']
'vqgan_model': 'vqgan_imagenet_f16_16384', # available ["vqgan_imagenet_f16_16384", "vqgan_imagenet_f16_1024", "wikiart_16384", "coco", "sflckr"]
'initial_image': '',
'target_images': '',
'input_images': '',
'output_folder': 'vqgan-steps',
'output_name': '',
'noise_prompt_seeds': [],
'noise_prompt_weights': [],
'key_frames': True,
'generate_video': False,
'upscale_dream': False,
'upscale_strength': 2,
'video_length': 60, # seconds
'target_fps': 30,
'iterations_per_frame': 3,
'angle': 0,
'zoom': 1,
'translation_x': 0,
'translation_y': 0,
'display_frequency': 10,
# additional
'vq_init_weight': 0.0,
'vq_step_size': 0.1,
'vq_cutn': 64,
'vq_cut_pow': 1.0,
# model links
'pretrained_models': {
'vqgan_imagenet_f16_1024_ckpt': 'https://heibox.uni-heidelberg.de/f/140747ba53464f49b476/?dl=1',
'vqgan_imagenet_f16_1024_yaml': 'https://heibox.uni-heidelberg.de/f/6ecf2af6c658432c8298/?dl=1',
'vqgan_imagenet_f16_16384_ckpt': 'https://heibox.uni-heidelberg.de/f/867b05fc8c4841768640/?dl=1',
'vqgan_imagenet_f16_16384_yaml': 'https://heibox.uni-heidelberg.de/f/274fb24ed38341bfa753/?dl=1',
'coco_ckpt': 'https://dl.nmkd.de/ai/clip/coco/coco.ckpt',
'coco_yaml': 'https://dl.nmkd.de/ai/clip/coco/coco.yaml',
'wikiart_16384_ckpt': 'http://eaidata.bmk.sh/data/Wikiart_16384/wikiart_f16_16384_8145600.ckpt',
'wikiart_16384_yaml': 'http://eaidata.bmk.sh/data/Wikiart_16384/wikiart_f16_16384_8145600.yaml',
'sflckr_ckpt': 'https://heibox.uni-heidelberg.de/d/73487ab6e5314cb5adba/files/?p=%2Fcheckpoints%2Flast.ckpt&dl=1',
'sflckr_yaml': 'https://heibox.uni-heidelberg.de/d/73487ab6e5314cb5adba/files/?p=%2Fconfigs%2F2020-11-09T13-31-51-project.yaml&dl=1',
}
}
for key, value in settings.items():
self.settings[key] = value
self.dir_path = os.path.dirname(os.path.abspath(__file__))
os.makedirs(f"{self.dir_path}/{self.settings['output_folder']}/", exist_ok=True)
self.down_pretrained_models()
self.replace_grad = ReplaceGrad.apply
self.clamp_with_grad = ClampWithGrad.apply
self.model_name = self.settings['vqgan_model']
self.total_iterations = self.settings['video_length'] * self.settings['target_fps']
self.clean_cache()
if self.settings['seed'] == -1:
self.seed = None
else:
self.seed = self.settings['seed']
if self.settings['key_frames']:
try:
self.prompts = self.settings['prompt']
self.prompts_series = split_key_frame_text_prompts(
parse_key_frames(self.settings['prompt']),
self.total_iterations
)
except RuntimeError:
self.display_message(
"WARNING: You have selected to use key frames, but you have not "
"formatted `prompt` correctly for key frames.\n"
)
self.prompts = f"0: ({self.settings['prompt']}:1)"
self.prompts_series = split_key_frame_text_prompts(
parse_key_frames(self.settings['prompt']),
self.total_iterations
)
try:
self.target_images = self.settings['target_images']
self.target_images_series = split_key_frame_text_prompts(
parse_key_frames(self.settings['target_images']),
self.total_iterations
)
except RuntimeError:
self.display_message(
"WARNING: You have selected to use key frames, but you have not "
"formatted `target_images` correctly for key frames.\n"
)
self.target_images = f"0: ({self.settings['target_images']}:1)"
self.target_images_series = split_key_frame_text_prompts(
parse_key_frames(self.settings['target_images']),
self.total_iterations
)
try:
self.angle_series = get_inbetweens(parse_key_frames(self.settings['angle']), self.total_iterations)
except RuntimeError:
self.display_message(
"WARNING: You have selected to use key frames, but you have not "
"formatted `angle` correctly for key frames.\n"
)
self.angle = f"0: ({self.settings['angle']})"
self.angle_series = get_inbetweens(parse_key_frames(self.settings['angle']), self.total_iterations)
try:
self.zoom_series = get_inbetweens(parse_key_frames(self.settings['zoom']), self.total_iterations)
except RuntimeError:
self.display_message(
"WARNING: You have selected to use key frames, but you have not "
"formatted `zoom` correctly for key frames.\n"
)
self.zoom = f"0: ({self.settings['zoom']})"
self.zoom_series = get_inbetweens(parse_key_frames(self.settings['zoom']), self.total_iterations)
try:
self.translation_x_series = get_inbetweens(parse_key_frames(self.settings['translation_x']), self.total_iterations)
except RuntimeError:
self.display_message(
"WARNING: You have selected to use key frames, but you have not "
"formatted `translation_x` correctly for key frames.\n"
)
self.translation_x = f"0: ({self.settings['translation_x']})"
self.translation_x_series = get_inbetweens(parse_key_frames(self.settings['translation_x']), self.total_iterations)
try:
self.translation_y_series = get_inbetweens(parse_key_frames(self.settings['translation_y']), self.total_iterations)
except RuntimeError:
self.display_message(
"WARNING: You have selected to use key frames, but you have not "
"formatted `translation_y` correctly for key frames.\n"
)
self.translation_y = f"0: ({self.settings['translation_y']})"
self.translation_y_series = get_inbetweens(parse_key_frames(self.settings['translation_y']), self.total_iterations)
try:
self.iterations_per_frame_series = get_inbetweens(
parse_key_frames(self.settings['iterations_per_frame']), self.total_iterations, integer=True
)
except RuntimeError:
self.display_message(
"WARNING: You have selected to use key frames, but you have not "
"formatted `iterations_per_frame` correctly for key frames.\n"
)
self.iterations_per_frame = f"0: ({self.settings['iterations_per_frame']})"
self.iterations_per_frame_series = get_inbetweens(
parse_key_frames(self.settings['iterations_per_frame']), self.total_iterations, integer=True
)
else:
self.prompts = [phrase.strip() for phrase in self.settings['prompt'].split("|")]
if self.prompts == ['']:
self.prompts = []
self.target_images = self.settings['target_images']
if self.target_images == "None" or not self.target_images:
self.target_images = []
else:
self.target_images = self.target_images.split("|")
self.target_images = [image.strip() for image in self.target_images]
self.angle = float(self.settings['angle'])
self.zoom = float(self.settings['zoom'])
self.translation_x = float(self.settings['translation_x'])
self.translation_y = float(self.settings['translation_y'])
self.iterations_per_frame = int(self.settings['iterations_per_frame'])
self.clean_cache()
for var in ['device', 'model', 'perceptor', 'z']:
try:
del globals()[var]
except:
pass
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.display_message('Deep dream initiated')
self.display_message(f'Using {self.device}')
if self.prompts:
self.display_message(f'I am dreaming about {self.prompts}')
if self.target_images:
self.display_message(f'Using dream state {self.target_images}')
if self.seed == None:
self.seed = torch.seed()
torch.manual_seed(self.seed)
self.display_message(f'Dream seed {self.seed}')
# config
self.vqgan_config = f'{self.dir_path}/models/{self.model_name}.yaml'
self.vqgan_checkpoint = f'{self.dir_path}/models/{self.model_name}.ckpt'
self.model = self.load_vqgan_model(self.vqgan_config, self.vqgan_checkpoint)
if torch.cuda.device_count() > 1:
self.display_message(f"Let's use {torch.cuda.device_count()} GPUs!")
self.model = nn.DataParallel(self.model, device_ids=[_id for _id in range(torch.cuda.device_count())])
self.model.to(self.device)
self.model = self.model.module
else:
self.model.to(self.device)
self.perceptor = clip.load(self.settings['clip_model'], jit=False)[0].eval().requires_grad_(False).to(self.device)
self.cut_size = self.perceptor.visual.input_resolution
self.e_dim = self.model.quantize.e_dim
self.f = 2**(self.model.decoder.num_resolutions - 1)
self.make_cutouts = MakeCutouts(self.cut_size, self.settings['vq_cutn'], cut_pow=self.settings['vq_cut_pow'])
self.n_toks = self.model.quantize.n_e
self.toksX, self.toksY = self.settings['width'] // self.f, self.settings['height'] // self.f
self.sideX, self.sideY = self.toksX * self.f, self.toksY * self.f
self.z_min = self.model.quantize.embedding.weight.min(dim=0).values[None, :, None, None]
self.z_max = self.model.quantize.embedding.weight.max(dim=0).values[None, :, None, None]
self.next_loop_stop = False # ensure proper stop for GPU mem
for i in range(self.total_iterations):
if self.next_loop_stop:
break
if self.settings['key_frames']:
self.prompts = self.prompts_series[i]
self.prompts = [phrase.strip() for phrase in self.prompts.split("|")]
if self.prompts == ['']:
self.prompts = []
self.settings['prompt'] = self.prompts
self.target_images = self.target_images_series[i]
if self.target_images == "None" or not self.target_images:
self.target_images = []
else:
self.target_images = self.target_images.split("|")
self.target_images = [image.strip() for image in self.target_images]
self.settings['target_images'] = self.target_images
self.angle = self.angle_series[i]
self.zoom = self.zoom_series[i]
self.translation_x = self.translation_x_series[i]
self.translation_y = self.translation_y_series[i]
self.iterations_per_frame = self.iterations_per_frame_series[i]
if i == 0 and self.settings['initial_image'] != "":
self.img_0 = read_image_workaround(self.settings['initial_image'])
self.z, *_ = self.model.encode(TF.to_tensor(self.img_0).to(self.device).unsqueeze(0) * 2 - 1)
elif i == 0 and not os.path.isfile(f'{self.dir_path}/{self.settings["output_folder"]}/{i:04}.png'):
self.one_hot = F.one_hot(
torch.randint(self.n_toks, [self.toksY * self.toksX], device=self.device), self.n_toks
).float()
self.z = self.one_hot @ self.model.quantize.embedding.weight
self.z = self.z.view([-1, self.toksY, self.toksX, self.e_dim]).permute(0, 3, 1, 2)
else:
self.img_0 = read_image_workaround(f'{self.dir_path}/{self.settings["output_folder"]}/{i:04}.png')
self.center = (1 * self.img_0.shape[1]//2, 1 * self.img_0.shape[0]//2)
self.trans_mat = np.float32(
[[1, 0, self.translation_x],
[0, 1, self.translation_y]]
)
self.rot_mat = cv2.getRotationMatrix2D(self.center, self.angle, self.zoom)
self.trans_mat = np.vstack([self.trans_mat, [0,0,1]])
self.rot_mat = np.vstack([self.rot_mat, [0,0,1]])
self.transformation_matrix = np.matmul(self.rot_mat, self.trans_mat)
self.img_0 = cv2.warpPerspective(
self.img_0,
self.transformation_matrix,
(self.img_0.shape[1], self.img_0.shape[0]),
borderMode=cv2.BORDER_WRAP
)
self.z, *_ = self.model.encode(TF.to_tensor(self.img_0).to(self.device).unsqueeze(0) * 2 - 1)
i += 1
self.z_orig = self.z.clone()
self.z.requires_grad_(True)
self.opt = optim.Adam([self.z], lr=self.settings['vq_step_size'])
self.normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
std=[0.26862954, 0.26130258, 0.27577711])
self.pMs = []
for prompt in self.prompts:
txt, weight, stop = parse_prompt(prompt)
self.embed = self.perceptor.encode_text(clip.tokenize(txt).to(self.device)).float()
self.pMs.append(Prompt(self.embed, self.replace_grad, weight, stop).to(self.device))
for prompt in self.target_images:
path, weight, stop = parse_prompt(prompt)
self.img = resize_image(Image.open(path).convert('RGB'), (self.sideX, self.sideY))
self.batch = self.make_cutouts(TF.to_tensor(self.img).unsqueeze(0).to(self.device))
self.embed = self.perceptor.encode_image(self.normalize(self.batch)).float()
self.pMs.append(Prompt(self.embed, self.replace_grad, weight, stop).to(self.device))
for seed, weight in zip(self.settings['noise_prompt_seeds'], self.settings['noise_prompt_weights']):
gen = torch.Generator().manual_seed(seed)
self.embed = torch.empty([1, self.perceptor.visual.output_dim]).normal_(generator=gen)
self.pMs.append(Prompt(self.embed, self.replace_grad, weight).to(self.device))
try:
self.dream(i)
except KeyboardInterrupt:
self.next_loop_stop = True
pass
# upscale/gen video
if self.settings['upscale_dream']:
scale_settings = {
'input': f'{self.dir_path}/{self.settings["output_folder"]}',
'output': f'{self.dir_path}/{self.settings["output_folder"]}-upscaled',
'suffix': '',
'netscale': int(self.settings['upscale_strength']),
'outscale': int(self.settings['upscale_strength'])
}
out_folder = f'{self.dir_path}/{self.settings["output_folder"]}-upscaled'
ScaleImage(scale_settings)
else:
out_folder = f'{self.dir_path}/{self.settings["output_folder"]}'
if self.settings['generate_video']:
frames_to_video(out_folder, f'{self.dir_path}/out.mp4', self.settings['target_fps'])
def dream(self, i):
x = 0
while True:
if x >= self.iterations_per_frame:
break
else:
self.train(i)
x += 1
def train(self, i):
self.opt.zero_grad()
lossAll = self.ascend_txt(i, True)
if i % self.settings['display_frequency'] == 0:
self.checkin(i, lossAll)
loss = sum(lossAll)
loss.backward()
self.opt.step()
with torch.no_grad():
self.z.copy_(self.z.maximum(self.z_min).minimum(self.z_max))
def save_output(self, i, img):
if self.settings['output_name'] == '':
filename = f"{self.dir_path}/{self.settings['output_folder']}/{i:04}.png"
else:
filename = f"{self.dir_path}/{self.settings['output_folder']}/{self.settings['output_name']}.png"
imageio.imwrite(filename, np.array(img))
def ascend_txt(self, i, save):
out = self.synth(self.z)
iii = self.perceptor.encode_image(self.normalize(self.make_cutouts(out))).float()
result = []
if self.settings['vq_init_weight']:
result.append(F.mse_loss(self.z, self.z_orig) * self.settings['vq_init_weight'] / 2)
for prompt in self.pMs:
result.append(prompt(iii))
img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]
img = np.transpose(img, (1, 2, 0))
if save:
self.save_output(i, img)
return result
@torch.no_grad()
def checkin(self, i, losses):
losses_str = ', '.join(f'{loss.item():g}' for loss in losses)
self.display_message(f'i: {i}, loss: {sum(losses).item():g}, losses: {losses_str}')
out = self.synth(self.z)
TF.to_pil_image(out[0].cpu()).save('progress.png')
def vector_quantize(self, x, codebook):
d = x.pow(2).sum(dim=-1, keepdim=True) + codebook.pow(2).sum(dim=1) - 2 * x @ codebook.T
indices = d.argmin(-1)
x_q = F.one_hot(indices, codebook.shape[0]).to(d.dtype) @ codebook
return self.replace_grad(x_q, x)
def synth(self, z):
z_q = self.vector_quantize(z.movedim(1, 3), self.model.quantize.embedding.weight).movedim(3, 1)
return self.clamp_with_grad(self.model.decode(z_q).add(1).div(2), 0, 1)
def display_message(self, msg):
print(msg)
def clean_cache(self):
torch.cuda.empty_cache()
with torch.no_grad():
torch.cuda.empty_cache()
def load_vqgan_model(self, config_path, checkpoint_path):
config = OmegaConf.load(config_path)
if config.model.target == 'taming.models.vqgan.VQModel':
model = vqgan.VQModel(**config.model.params)
model.eval().requires_grad_(False)
model.init_from_ckpt(checkpoint_path)
elif config.model.target == 'taming.models.cond_transformer.Net2NetTransformer':
parent_model = cond_transformer.Net2NetTransformer(**config.model.params)
parent_model.eval().requires_grad_(False)
parent_model.init_from_ckpt(checkpoint_path)
model = parent_model.first_stage_model
else:
raise ValueError(f'unknown model type: {config.model.target}')
del | |
import os
from typing import Dict, Optional
import numpy as np
import pandas as pd
from scipy.signal import correlate
from . import ShakeExtractor, helpers
from .abstract_extractor import AbstractExtractor
from .helpers import normalize, get_equidistant_signals
from .log import logger
from .synchronization_errors import StartEqualsEndError
from .types import SourceDict, ResultTableSpec, SyncPairTimeshift, SyncPairs
class Synchronizer:
@property
def extractor(self) -> AbstractExtractor:
"""Get the current extractor"""
return self._extractor
@extractor.setter
def extractor(self, value: AbstractExtractor):
if not issubclass(type(value), AbstractExtractor):
raise TypeError("Extractor needs to be a subclass of AbstractExtractor.")
self._extractor = value
def __init__(
self,
sources: SourceDict,
reference_source_name: str,
extractor: Optional[AbstractExtractor] = None,
sampling_freq: Optional[float] = None,
):
"""
Create a new synchronizer. Synchronizer objects are used to remove offsets and clock offsets by stretching and
moving reference points detected by an extractor.
:param sources: A SourceDict to describe the input data
:param reference_source_name: name of the sensor to be used as reference.
Other sensors will be made synchronous to this sensor, and data from this sensor will not be modified.
:param extractor: This will be used to find synchronization points in the source data. If None, it defaults to
a ShakeExtractor instance
:param sampling_freq: Override the frequency used to resample input data. If None, it defaults to the maximum
input frequency
"""
self.sources = sources
self.ref_source_name = reference_source_name
self._check_sources()
self.extractor = extractor if extractor is not None else ShakeExtractor()
self.ref_signals = self._prepare_ref_signals()
self.sampling_freq = (
sampling_freq
if sampling_freq is not None
else helpers.get_max_ref_frequency(self.ref_signals)
)
def _check_sources(self):
"""Verifies that the source dict adheres to the required format and that the reference source is available"""
for source_name, source in self.sources.items():
if "data" not in source or "ref_column" not in source:
raise ValueError(
"Each source needs to have a `data` and a `ref_column` property"
)
if not isinstance(source["data"], pd.DataFrame):
raise ValueError(
"The `data` property of each source must contain a DatFrame"
)
if not isinstance(source["data"].index, pd.DatetimeIndex):
raise ValueError(
"The `data` DataFrame must have a pd.DatetimeIndex for each source"
)
if source["data"].index.duplicated().any():
raise ValueError(
"The input dataframe must not have duplicate index values, "
"convert the data into a normalized wide format"
)
if (
not isinstance(source["ref_column"], str)
or source["ref_column"] not in source["data"].columns
):
raise ValueError(
"Each source must have a string specifying the reference column, and the reference"
"column must be available in the source's DataFrame"
)
if self.ref_source_name not in self.sources.keys():
raise ValueError(
"The reference source name must be available in the source dict"
)
def _prepare_ref_signals(self) -> pd.DataFrame:
"""
Collect the reference columns from all sources and join them into a single dataframe.
Each reference column is named equal to the name of the source it comes from.
:return: normalized reference signals
"""
reference_signals = pd.DataFrame()
for source_name, source in self.sources.items():
signal = source["data"][source["ref_column"]].dropna()
reference_signals = reference_signals.join(signal, how="outer")
reference_signals.rename(
columns={source["ref_column"]: source_name}, inplace=True
)
reference_signals = reference_signals.apply(normalize)
return reference_signals
@staticmethod
def _get_timeshift_pair(
dataframe: pd.DataFrame, ref_col: str, sig_col: str, segments: SyncPairs
) -> SyncPairTimeshift:
"""
Returns timeshifts to synchronize sig_col to ref_col.
Expects equidistant sampled signals.
:param dataframe: reference signal dataframe
:param ref_col: name of the reference signal in segments
:param sig_col: name of the target signal in segments
:param segments: all detected synchronization pairs
:return: timeshift to align the first and second synchronization point
for the target signal to the reference signal
"""
timeshifts = {}
for index, segment in enumerate(["first", "second"]):
logger.debug(
f"Calculate timeshift of {segment} segment "
f"for {sig_col} to {ref_col}."
)
# reference signal segment data extraction
ref_start, ref_end, ref_data = helpers.get_segment_data(
dataframe, segments, ref_col, segment
)
sig_start, sig_end, sig_data = helpers.get_segment_data(
dataframe, segments, sig_col, segment
)
# calculate cross-correlation of segments
cross_corr = correlate(ref_data, sig_data)
shift_in_samples = np.argmax(cross_corr) - len(sig_data) + 1
# get timestamp at which sig_segment must start to sync signals
max_corr_ts = dataframe.index[
dataframe.index.get_loc(ref_start, method="nearest") + shift_in_samples
]
logger.debug(
f"Highest correlation with start at "
f"{max_corr_ts} with {np.max(cross_corr)}."
)
# calculate timeshift to move signal to maximize correlation
timeshifts[segment] = max_corr_ts - sig_start
logger.debug("Timeshift is {}.".format(str(timeshifts[segment])))
return timeshifts
def _calculate_stretch_factors(self) -> pd.DataFrame:
"""
Calculate the stretch factor that aligns each reference signal to the reference
signal of the reference source. It immediately applies these stretch factors
to a copy of ``self.ref_signals``.
:return: a copy of self.ref_signals with the stretch factors applied.
"""
ref_signals = self.ref_signals.copy()
start_time = ref_signals.index.min()
# Get equidistantly sampled reference signals for the cross correlation to work
df_equidistant = get_equidistant_signals(ref_signals, self.sampling_freq)
sync_pairs = self.extractor.get_segments(df_equidistant)
helpers.verify_segments(ref_signals.columns, sync_pairs)
for source in df_equidistant.columns:
if source == self.ref_source_name:
continue
timeshifts = Synchronizer._get_timeshift_pair(
df_equidistant, self.ref_source_name, source, sync_pairs
)
logger.debug(
f"Timedelta between shifts before stretching: "
f"{timeshifts['first'] - timeshifts['second']}"
)
try:
stretch_factor = helpers.get_stretch_factor(
sync_pairs[source], timeshifts
)
except ZeroDivisionError:
raise StartEqualsEndError(
"First and last segment have been identified as exactly the same. Bad window, maybe?"
)
logger.info(f"Stretch factor for {source}: {stretch_factor}")
# stretch signal and exchange it in dataframe
signal_stretched = helpers.stretch_signals(
pd.DataFrame(ref_signals[source]).dropna(),
stretch_factor,
start_time,
)
ref_signals = (
ref_signals.drop(source, axis="columns")
.join(signal_stretched, how="outer")
.astype(pd.SparseDtype("float"))
)
self.sources[source]["stretch_factor"] = stretch_factor
return ref_signals
def _calculate_timeshifts(self, stretched_ref_signals: pd.DataFrame):
"""
Calculate the shift necessary to align the stretched reference signals to the not-stretched reference sensor.
:param stretched_ref_signals: a copy of self.ref_signals that has been stretched to align the duration between
the synchronization points to the duration between them in the reference sensor
"""
# Resample again with stretched signal
df_equi = get_equidistant_signals(stretched_ref_signals, self.sampling_freq)
segments = self.extractor.get_segments(df_equi)
helpers.verify_segments(stretched_ref_signals.columns, segments)
for source in df_equi.columns:
if source == self.ref_source_name:
continue
timeshifts = Synchronizer._get_timeshift_pair(
df_equi, self.ref_source_name, source, segments
)
timedelta = timeshifts["first"] - timeshifts["second"]
if timedelta > pd.Timedelta(0):
logger.warning(
f"Timedelta between shifts after stretching: {timedelta}."
f"This should be very small: the timedelta to the reference signal"
f"should be equal for both start and end so a simple offset aligns the"
f"signals perfectly."
)
logger.info("Timeshift for {}: {}".format(source, timeshifts["first"]))
self.sources[source]["timeshift"] = timeshifts["first"]
def _calculate_sync_params(self):
"""
This function calculates the synchronization parameters to sync all signals to the reference signal.
It stores the result in ``self.sources``, in the keys ``timeshift`` and ``stretch_factor``.
"""
self.sources[self.ref_source_name]["timeshift"] = None
self.sources[self.ref_source_name]["stretch_factor"] = 1
# Firstly, determine stretch factor and get stretched reference signals
stretched_ref_signals = self._calculate_stretch_factors()
# Secondly, get timeshift for the stretched signals
self._calculate_timeshifts(stretched_ref_signals)
def get_sync_params(self, recalculate: bool = False):
"""
Get the synchronization params. If they have not been calculated yet, they will be.
:param recalculate: force calculation, even if it was already done before
:return: the synchronization params for each source, i.e., each timeshift and stretch factor
"""
selected_keys = ["timeshift", "stretch_factor"]
if recalculate or "timeshift" not in self.sources[self.ref_source_name]:
self._calculate_sync_params()
return {
source_name: {
key: value for key, value in source.items() if key in selected_keys
}
for source_name, source in self.sources.items()
}
def get_synced_data(self, recalculate: bool = False) -> Dict[str, pd.DataFrame]:
"""
Synchronize the input data.
:param recalculate: force recalculating the synchronization parameters
:return: a dictionary of the shifted and stretched source signals
"""
self.get_sync_params(recalculate)
synced_data = {}
start_time = self.ref_signals.index.min()
for source_name, source in self.sources.items():
data = source["data"].copy()
stretch_factor, timeshift = source["stretch_factor"], source["timeshift"]
if stretch_factor != 1:
data = helpers.stretch_signals(data, stretch_factor, start_time)
if timeshift is not None:
data = data.shift(1, freq=timeshift)
synced_data[source_name] = data
return synced_data
def save_pickles(self, target_dir: str) -> Dict[str, pd.DataFrame]:
"""
Save a pickled, synced, dataframe for each source file.
Does not save a total table.
Sync parameters are saved as ``SYNC.csv``.
:param target_dir: target directory for the export files
:return: the synced data, plus a sync parameter dataframe in the dictionary entry with the key "SYNC".
"""
sync_params = pd.DataFrame(self.get_sync_params())
synced_data = self.get_synced_data()
sync_params.to_csv(os.path.join(target_dir, "SYNC.csv"))
for source_name, synced_df in synced_data.items():
synced_df.to_pickle(
os.path.join(target_dir, f"{source_name.upper()}.PICKLE")
)
return {**synced_data, "SYNC": sync_params}
def save_data(
self,
target_dir: str,
tables: Optional[ResultTableSpec] = None,
save_total_table: bool = True,
):
"""
Export synchronized data.
Two formats are possible: if ``tables`` is given, a file for each root key is created containing the columns
from the sensors specified as the keys on the second level. This can be used to create a file for each sensor
type, see ``ResultTableSpec`` for an example.
A ``SYNC.csv`` | |
<filename>tap/utils.py
from argparse import ArgumentParser, ArgumentTypeError
from base64 import b64encode, b64decode
from collections import OrderedDict
import copy
from functools import wraps
import inspect
from io import StringIO
from json import JSONEncoder
import os
import pickle
import re
import subprocess
import sys
import tokenize
from typing import Any, Callable, Dict, Generator, Iterator, List, Tuple, Union
from typing_extensions import Literal
from typing_inspect import get_args
NO_CHANGES_STATUS = """nothing to commit, working tree clean"""
PRIMITIVES = (str, int, float, bool)
def check_output(command: List[str], suppress_stderr: bool = True) -> str:
"""Runs subprocess.check_output and returns the result as a string.
:param command: A list of strings representing the command to run on the command line.
:param suppress_stderr: Whether to suppress anything written to standard error.
:return: The output of the command, converted from bytes to string and stripped.
"""
with open(os.devnull, 'w') as devnull:
devnull = devnull if suppress_stderr else None
output = subprocess.check_output(command, stderr=devnull).decode('utf-8').strip()
return output
def has_git() -> bool:
"""Returns whether git is installed.
:return: True if git is installed, False otherwise.
"""
try:
output = check_output(['git', 'rev-parse', '--is-inside-work-tree'])
return output == 'true'
except (FileNotFoundError, subprocess.CalledProcessError):
return False
def get_git_root() -> str:
"""Gets the root directory of the git repo where the command is run.
:return: The root directory of the current git repo.
"""
return check_output(['git', 'rev-parse', '--show-toplevel'])
def get_git_url(commit_hash: bool = True) -> str:
"""Gets the https url of the git repo where the command is run.
:param commit_hash: If True, the url links to the latest local git commit hash.
If False, the url links to the general git url.
:return: The https url of the current git repo.
"""
# Get git url (either https or ssh)
try:
url = check_output(['git', 'remote', 'get-url', 'origin'])
except subprocess.CalledProcessError:
# For git versions <2.0
url = check_output(['git' 'config' '--get' 'remote.origin.url'])
# Remove .git at end
url = url[:-len('.git')]
# Convert ssh url to https url
m = re.search('git@(.+):', url)
if m is not None:
domain = m.group(1)
path = url[m.span()[1]:]
url = f'https://{domain}/{path}'
if commit_hash:
# Add tree and hash of current commit
url = os.path.join(url, 'tree', get_git_hash())
return url
def get_git_hash() -> str:
"""Gets the git hash of HEAD of the git repo where the command is run.
:return: The git hash of HEAD of the current git repo.
"""
return check_output(['git', 'rev-parse', 'HEAD'])
def has_uncommitted_changes() -> bool:
"""Returns whether there are uncommitted changes in the git repo where the command is run.
:return: True if there are uncommitted changes in the current git repo, False otherwise.
"""
status = check_output(['git', 'status'])
return not status.endswith(NO_CHANGES_STATUS)
def type_to_str(type_annotation: Union[type, Any]) -> str:
"""Gets a string representation of the provided type.
:param type_annotation: A type annotation, which is either a built-in type or a typing type.
:return: A string representation of the type annotation.
"""
# Built-in type
if type(type_annotation) == type:
return type_annotation.__name__
# Typing type
return str(type_annotation).replace('typing.', '')
def get_argument_name(*name_or_flags) -> str:
"""Gets the name of the argument.
:param name_or_flags: Either a name or a list of option strings, e.g. foo or -f, --foo.
:return: The name of the argument (extracted from name_or_flags).
"""
if '-h' in name_or_flags or '--help' in name_or_flags:
return 'help'
if len(name_or_flags) > 1:
name_or_flags = [n_or_f for n_or_f in name_or_flags if n_or_f.startswith('--')]
if len(name_or_flags) != 1:
raise ValueError(f'There should only be a single canonical name for argument {name_or_flags}!')
return name_or_flags[0].lstrip('-')
def get_dest(*name_or_flags, **kwargs) -> str:
"""Gets the name of the destination of the argument.
:param name_or_flags: Either a name or a list of option strings, e.g. foo or -f, --foo.
:param kwargs: Keyword arguments.
:return: The name of the argument (extracted from name_or_flags).
"""
if '-h' in name_or_flags or '--help' in name_or_flags:
return 'help'
return ArgumentParser().add_argument(*name_or_flags, **kwargs).dest
def is_option_arg(*name_or_flags) -> bool:
"""Returns whether the argument is an option arg (as opposed to a positional arg).
:param name_or_flags: Either a name or a list of option strings, e.g. foo or -f, --foo.
:return: True if the argument is an option arg, False otherwise.
"""
return any(name_or_flag.startswith('-') for name_or_flag in name_or_flags)
def tokenize_source(obj: object) -> Generator:
"""Returns a generator for the tokens of the object's source code."""
source = inspect.getsource(obj)
token_generator = tokenize.generate_tokens(StringIO(source).readline)
return token_generator
def get_class_column(obj: type) -> int:
"""Determines the column number for class variables in a class."""
for token_type, token, (start_line, start_column), (end_line, end_column), line in tokenize_source(obj):
if start_line == 1 or token.strip() == '':
continue
return start_column
def source_line_to_tokens(obj: object) -> Dict[int, List[Dict[str, Union[str, int]]]]:
"""Gets a dictionary mapping from line number to a dictionary of tokens on that line for an object's source code."""
line_to_tokens = {}
for token_type, token, (start_line, start_column), (end_line, end_column), line in tokenize_source(obj):
line_to_tokens.setdefault(start_line, []).append({
'token_type': token_type,
'token': token,
'start_line': start_line,
'start_column': start_column,
'end_line': end_line,
'end_column': end_column,
'line': line
})
return line_to_tokens
def get_class_variables(cls: type) -> OrderedDict:
"""Returns an OrderedDict mapping class variables to their additional information (currently just comments)."""
# Get mapping from line number to tokens
line_to_tokens = source_line_to_tokens(cls)
# Get class variable column number
class_variable_column = get_class_column(cls)
# Extract class variables
variable_to_comment = OrderedDict()
for tokens in line_to_tokens.values():
for i, token in enumerate(tokens):
# Skip whitespace
if token['token'].strip() == '':
continue
# Match class variable
if (token['token_type'] == tokenize.NAME and
token['start_column'] == class_variable_column and
len(tokens) > i and
tokens[i + 1]['token'] in ['=', ':']):
class_variable = token['token']
variable_to_comment[class_variable] = {'comment': ''}
# Find the comment (if it exists)
for j in range(i + 1, len(tokens)):
if tokens[j]['token_type'] == tokenize.COMMENT:
# Leave out "#" and whitespace from comment
variable_to_comment[class_variable]['comment'] = tokens[j]['token'][1:].strip()
break
break
return variable_to_comment
def get_literals(literal: Literal, variable: str) -> Tuple[Callable[[str], Any], List[str]]:
"""Extracts the values from a Literal type and ensures that the values are all primitive types."""
literals = list(get_args(literal))
if not all(isinstance(literal, PRIMITIVES) for literal in literals):
raise ValueError(
f'The type for variable "{variable}" contains a literal'
f'of a non-primitive type e.g. (str, int, float, bool).\n'
f'Currently only primitive-typed literals are supported.'
)
str_to_literal = {str(literal): literal for literal in literals}
if len(literals) != len(str_to_literal):
raise ValueError('All literals must have unique string representations')
def var_type(arg: str) -> Any:
return str_to_literal[arg]
return var_type, literals
def boolean_type(flag_value: str) -> bool:
"""Convert a string to a boolean if it is a prefix of 'True' or 'False' (case insensitive) or is '1' or '0'."""
if 'true'.startswith(flag_value.lower()) or flag_value == '1':
return True
if 'false'.startswith(flag_value.lower()) or flag_value == '0':
return False
raise ArgumentTypeError(f'Value has to be a prefix of "True" or "False" (case insensitive) or "1" or "0".')
class TupleTypeEnforcer:
"""The type argument to argparse for checking and applying types to Tuples."""
def __init__(self, types: List[type], loop: bool = False):
self.types = [boolean_type if t == bool else t for t in types]
self.loop = loop
self.index = 0
def __call__(self, arg: str) -> Any:
arg = self.types[self.index](arg)
self.index += 1
if self.loop:
self.index %= len(self.types)
return arg
class MockTuple:
"""Mock of a tuple needed to prevent JSON encoding tuples as lists."""
def __init__(self, _tuple: tuple) -> None:
self.tuple = _tuple
def _nested_replace_type(obj: Any, find_type: type, replace_type: type) -> Any:
"""Replaces any instance (including instances within lists, tuple, dict) of find_type with an instance of replace_type.
Note: Tuples, lists, and dicts are NOT modified in place.
Note: Does NOT do a nested search through objects besides tuples, lists, and dicts (e.g. sets).
:param obj: The object to modify by replacing find_type instances with replace_type instances.
:param find_type: The type to find in obj.
:param replace_type: The type to used to replace find_type in obj.
:return: A version of obj with all instances of find_type replaced by replace_type
"""
if isinstance(obj, tuple):
obj = tuple(_nested_replace_type(item, find_type, replace_type) for item in obj)
elif isinstance(obj, list):
obj = [_nested_replace_type(item, find_type, replace_type) for item in obj]
elif isinstance(obj, dict):
obj = {
_nested_replace_type(key, find_type, replace_type): _nested_replace_type(value, find_type, replace_type)
for key, value in obj.items()
}
if isinstance(obj, find_type):
obj = replace_type(obj)
return obj
class PythonObjectEncoder(JSONEncoder):
"""Stores parameters that are not JSON serializable as pickle dumps.
See: https://stackoverflow.com/a/36252257
"""
def iterencode(self, o: Any, _one_shot: bool = False) -> Iterator[str]:
o = _nested_replace_type(o, tuple, MockTuple)
| |
from enum import Enum
from typing import (List, Mapping, Sequence, Optional, MutableSequence,
TypeVar, Any, FrozenSet, MutableSet, Set, MutableMapping,
Dict, Tuple, _Union)
from ._compat import lru_cache, unicode, bytes, is_py2
from .disambiguators import create_uniq_field_dis_func
from .multistrategy_dispatch import MultiStrategyDispatch
from attr import NOTHING
NoneType = type(None)
T = TypeVar('T')
V = TypeVar('V')
class UnstructureStrategy(Enum):
"""`attrs` classes unstructuring strategies."""
AS_DICT = "asdict"
AS_TUPLE = "astuple"
def _is_attrs_class(cls):
return getattr(cls, "__attrs_attrs__", None) is not None
def _is_union_type(obj):
""" returns true if the object is an instance of union. """
return isinstance(obj, _Union)
def _subclass(typ):
""" a shortcut """
return (lambda cls: issubclass(cls, typ))
class Converter(object):
"""Converts between structured and unstructured data."""
__slots__ = ('_dis_func_cache', 'unstructure_func', 'unstructure_attrs',
'structure_attrs', '_structure', '_dict_factory',
'_union_registry', 'structure_func')
def __init__(self, dict_factory=dict,
unstruct_strat=UnstructureStrategy.AS_DICT):
# Create a per-instance cache.
self.unstruct_strat = UnstructureStrategy(unstruct_strat)
if is_py2: # in py2, the unstruct_strat property setter is not invoked
self._unstruct_strat(unstruct_strat)
self._dis_func_cache = lru_cache()(self._get_dis_func)
self.unstructure_func = MultiStrategyDispatch(
self._unstructure_default
)
self.unstructure_func.register_cls_list([
(bytes, self._unstructure_identity),
(unicode, self._unstructure_identity),
])
self.unstructure_func.register_func_list([
(_subclass(Mapping), self._unstructure_mapping),
(_subclass(Sequence), self._unstructure_seq),
(_subclass(Enum), self._unstructure_enum),
(_is_attrs_class,
lambda *args, **kwargs: self.unstructure_attrs(*args, **kwargs)),
])
# Per-instance register of to-attrs converters.
# Singledispatch dispatches based on the first argument, so we
# store the function and switch the arguments in self.loads.
self.structure_func = MultiStrategyDispatch(self._structure_default)
self.structure_func.register_func_list([
(_subclass(List), self._structure_list),
(_subclass(Sequence), self._structure_list),
(_subclass(MutableSequence), self._structure_list),
(_subclass(MutableSet), self._structure_set),
(_subclass(Set), self._structure_set),
(_subclass(FrozenSet), self._structure_frozenset),
(_subclass(Dict), self._structure_dict),
(_subclass(Mapping), self._structure_dict),
(_subclass(MutableMapping), self._structure_dict),
(_subclass(Tuple), self._structure_tuple),
(_is_union_type, self._structure_union),
(_is_attrs_class,
lambda *args, **kwargs: self.structure_attrs(*args, **kwargs))
])
# Strings are sequences.
self.structure_func.register_cls_list([
(unicode, self._structure_unicode if is_py2
else self._structure_call),
(bytes, self._structure_call),
(int, self._structure_call),
(float, self._structure_call),
(Enum, self._structure_call),
])
self._structure = self.structure_func
self._dict_factory = dict_factory
# Unions are instances now, not classes. We use different registry.
self._union_registry = {}
def unstructure(self, obj):
return self.unstructure_func.dispatch(type(obj))(obj)
@property
def unstruct_strat(self):
# type: () -> UnstructureStrategy
"""The default way of unstructuring ``attrs`` classes."""
return (UnstructureStrategy.AS_DICT
if self.unstructure_attrs == self.unstructure_attrs_asdict
else UnstructureStrategy.AS_TUPLE)
@unstruct_strat.setter
def unstruct_strat(self, val):
# type: (UnstructureStrategy) -> None
self._unstruct_strat(val)
def _unstruct_strat(self, val):
# type: (UnstructureStrategy) -> None
if val is UnstructureStrategy.AS_DICT:
self.unstructure_attrs = self.unstructure_attrs_asdict
self.structure_attrs = self.structure_attrs_fromdict
else:
self.unstructure_attrs = self.unstructure_attrs_astuple
self.structure_attrs = self.structure_attrs_fromtuple
def register_unstructure_hook(self, cls, func):
# type: (Type[T], Callable[[T], Any]) -> None
"""Register a class-to-primitive converter function for a class.
The converter function should take an instance of the class and return
its Python equivalent.
"""
self.unstructure_func.register_cls_list([(cls, func)])
def register_unstructure_hook_func(self, check_func, func):
"""Register a class-to-primitive converter function for a class, using
a function to check if it's a match.
"""
# type: (Callable[Any], Callable[T], Any]) -> None
self.unstructure_func.register_func_list([(check_func, func)])
def register_structure_hook(self, cl, func):
"""Register a primitive-to-class converter function for a type.
The converter function should take two arguments:
* a Python object to be converted,
* the type to convert to
and return the instance of the class. The type may seem redundant, but
is sometimes needed (for example, when dealing with generic classes).
"""
# type: (Type[T], Callable[[Any, Type], T) -> None
if _is_union_type(cl):
self._union_registry[cl] = func
else:
self._structure.register_cls_list([(cl, func)])
def register_structure_hook_func(self, check_func, func):
# type: (Callable[Any], Callable[T], Any]) -> None
"""Register a class-to-primitive converter function for a class, using
a function to check if it's a match.
"""
self.structure_func.register_func_list([(check_func, func)])
def structure(self, obj, cl):
"""Convert unstructured Python data structures to structured data."""
# type: (Any, Type) -> Any
return self.structure_func.dispatch(cl)(obj, cl)
# Classes to Python primitives.
def _unstructure_default(self, obj):
return obj
def unstructure_attrs_asdict(self, obj):
"""Our version of `attrs.asdict`, so we can call back to us."""
attrs = obj.__class__.__attrs_attrs__
rv = self._dict_factory()
for a in attrs:
name = a.name
v = getattr(obj, name)
rv[name] = self.unstructure(v)
return rv
def unstructure_attrs_astuple(self, obj):
"""Our version of `attrs.astuple`, so we can call back to us."""
attrs = obj.__class__.__attrs_attrs__
return tuple(self.unstructure(getattr(obj, a.name)) for a in attrs)
def _unstructure_enum(self, obj):
"""Convert an enum to its value."""
return obj.value
def _unstructure_identity(self, obj):
"""Just pass it through."""
return obj
def _unstructure_seq(self, seq):
"""Convert a sequence to primitive equivalents."""
# We can reuse the sequence class, so tuples stay tuples.
return seq.__class__(self.unstructure(e) for e in seq)
def _unstructure_mapping(self, mapping):
# type: (Mapping) -> Any
"""Convert a mapping of attr classes to primitive equivalents."""
# We can reuse the mapping class, so dicts stay dicts and OrderedDicts
# stay OrderedDicts.
return mapping.__class__((self.unstructure(k), self.unstructure(v))
for k, v in mapping.items())
# Python primitives to classes.
def _structure_default(self, obj, cl):
"""This is the fallthrough case. Everything is a subclass of `Any`.
A special condition here handles ``attrs`` classes.
Bare optionals end here too (optionals with arguments are unions.) We
treat bare optionals as Any.
"""
if cl is Any or cl is Optional:
return obj
# We don't know what this is, so we complain loudly.
msg = "Unsupported type: {0}. Register a structure hook for " \
"it.".format(cl)
raise ValueError(msg)
def _structure_call(self, obj, cl):
"""Just call ``cl`` with the given ``obj``.
This is just an optimization on the ``_structure_default`` case, when
we know we can skip the ``if`` s. Use for ``str``, ``bytes``, ``enum``,
etc.
"""
return cl(obj)
def _structure_unicode(self, obj, cl):
"""Just call ``cl`` with the given ``obj``"""
if not isinstance(obj, (bytes, unicode)):
return cl(str(obj))
else:
return obj
# Attrs classes.
def structure_attrs_fromtuple(self, obj, cl):
# type: (Sequence[Any], Type) -> Any
"""Load an attrs class from a sequence (tuple)."""
conv_obj = [] # A list of converter parameters.
for a, value in zip(cl.__attrs_attrs__, obj):
# We detect the type by the metadata.
converted = self._structure_attr_from_tuple(a, a.name, value)
conv_obj.append(converted)
return cl(*conv_obj)
def _structure_attr_from_tuple(self, a, name, value):
"""Handle an individual attrs attribute."""
type_ = a.type
if type_ is None:
# No type metadata.
return value
return self._structure.dispatch(type_)(value, type_)
def structure_attrs_fromdict(self, obj, cl):
# type: (Mapping, Type) -> Any
"""Instantiate an attrs class from a mapping (dict)."""
# For public use.
conv_obj = obj.copy() # Dict of converted parameters.
for a in cl.__attrs_attrs__:
name = a.name
# We detect the type by metadata.
converted = self._structure_attr_from_dict(a, name, obj)
if converted is not NOTHING:
conv_obj[name] = converted
return cl(**conv_obj)
def _structure_attr_from_dict(self, a, name, mapping):
"""Handle an individual attrs attribute structuring."""
val = mapping.get(name, NOTHING)
if val is NOTHING:
return NOTHING
type_ = a.type
if type_ is None:
# No type.
return val
if _is_union_type(type_):
if NoneType in type_.__args__ and val is None:
return None
return self._structure_union(val, type_)
return self._structure.dispatch(type_)(val, type_)
def _structure_list(self, obj, cl):
# type: (Type[GenericMeta], Iterable[T]) -> List[T]
"""Convert an iterable to a potentially generic list."""
if not cl.__args__ or cl.__args__[0] is Any:
return [e for e in obj]
else:
elem_type = cl.__args__[0]
return [self._structure.dispatch(elem_type)(e, elem_type)
for e in obj]
def _structure_set(self, obj, cl):
# type: (Type[GenericMeta], Iterable[T]) -> MutableSet[T]
"""Convert an iterable into a potentially generic set."""
if not cl.__args__ or cl.__args__[0] is Any:
return set(obj)
else:
elem_type = cl.__args__[0]
return {self._structure.dispatch(elem_type)(e, elem_type)
for e in obj}
def _structure_frozenset(self, obj, cl):
# type: (Type[GenericMeta], Iterable[T]) -> FrozenSet[T]
"""Convert an iterable into a potentially generic frozenset."""
if not cl.__args__ or cl.__args__[0] is Any:
return frozenset(obj)
else:
elem_type = cl.__args__[0]
return frozenset((self._structure.dispatch(elem_type)(e, elem_type)
for e in obj))
def _structure_dict(self, obj, cl):
# type: (Type[GenericMeta], Mapping[T, V]) -> Dict[T, V]
"""Convert a mapping into a potentially generic dict."""
if not cl.__args__ or cl.__args__ == (Any, Any):
return dict(obj)
else:
key_type, val_type = cl.__args__
if key_type is Any:
val_conv = self._structure.dispatch(val_type)
return {k: val_conv(v, val_type) for k, v in obj.items()}
elif val_type is Any:
key_conv = self._structure.dispatch(key_type)
return {key_conv(k, key_type): v for k, v in obj.items()}
else:
key_conv = self._structure.dispatch(key_type)
val_conv = self._structure.dispatch(val_type)
return {key_conv(k, key_type): val_conv(v, val_type)
for k, v in obj.items()}
def _structure_union(self, obj, union):
# type: (_Union, Any): -> Any
"""Deal with converting a union."""
# Note that optionals are unions that contain NoneType. We check for
# NoneType early and handle the case of obj being None, so
# disambiguation functions don't need to handle NoneType.
# Check the union registry first.
handler = self._union_registry.get(union)
if handler is not None:
return handler(obj, union)
# Unions with NoneType in them are basically optionals.
union_params = union.__args__
if NoneType in union_params:
if obj is None:
| |
<gh_stars>1-10
"""camera_module.py: Cobblr module that uses PiTFT and RPi camera to take pictures."""
__author__ = '<NAME>'
__credit__ = ['<NAME>', '<name of persons>']
__license__ = "GPL"
__version__ = "1.0.1"
__email__ = "<EMAIL>"
from engine import Screen
from engine import Utilities
from engine import TextWriter
from engine import SystemState
from engine import Menu
from engine import Events
import RPi.GPIO
import io
import os
import signal
import picamera
import time
import threading
signal.signal(signal.SIGINT, Utilities.GracefulExit)
class CameraState(object):
pass
def Init():
"""Sets up the class and variables needed in order to run the camera app."""
RPi.GPIO.setup(7, RPi.GPIO.OUT) #Flash RPi.GPIO
RPi.GPIO.setup(8, RPi.GPIO.IN, pull_up_down=RPi.GPIO.PUD_UP) #Button RPi.GPIO
SystemState.CameraState = CameraState
RPi.GPIO.output(7, False)
SystemState.CameraState.current_photo = ""
SystemState.CameraState.photo_file_name = None
SystemState.CameraState.photo_path = 'media/photos/'
SystemState.CameraState.preview_path = SystemState.CameraState.photo_path + '.preview/'
preview_path = SystemState.CameraState.preview_path
SystemState.CameraState.image_effect = 0
SystemState.CameraState.photo_tally = None
SystemState.CameraState.flash_enabled = True
SystemState.CameraState.exit_camera = False
SystemState.CameraState.camera_stream = False
SystemState.CameraState.album = False
SystemState.CameraState.setting = 'none'
MakePhotoPath()
SystemState.CameraState.photo_archive = os.listdir(preview_path)
SystemState.CameraState.photo_archive = [os.path.join(preview_path, pic) for pic in SystemState.CameraState.photo_archive]
SystemState.CameraState.photo_archive = sorted(SystemState.CameraState.photo_archive)
SystemState.CameraState.photo_count = len(SystemState.CameraState.photo_archive)
SystemState.CameraState.image_effect = 0
SystemState.CameraState.iso = 0
SystemState.CameraState.rotation = 0
SystemState.CameraState.brightness = 5
SystemState.CameraState.saturation = 10
SystemState.CameraState.contrast = 10
SystemState.CameraState.sharpness = 10
SystemState.CameraState.zoom = 0
SystemState.CameraState.meter_mode = 0
SystemState.CameraState.awb_mode = 0
SystemState.CameraState.exposure_mode = 0
SystemState.CameraState.shutter_speed = 0
SystemState.CameraState.iso_values = [0, 100, 200, 320, 400, 500, 640, 800]
SystemState.CameraState.image_effect_values = [
'none', 'negative', 'solarize', 'sketch', 'denoise', 'emboss', 'oilpaint',
'hatch','gpen', 'pastel', 'watercolor', 'film', 'blur', 'saturation',
'colorswap', 'washedout', 'posterise', 'colorpoint', 'colorbalance',
'cartoon', 'deinterlace1', 'deinterlace2'
]
SystemState.CameraState.awb_mode_values = [
'auto', 'sunlight', 'cloudy', 'shade', 'tungsten', 'fluorescent',
'incandescent', 'flash', 'horizon', 'off'
]
SystemState.CameraState.rotation_values = [0, 90, 180, 270]
SystemState.CameraState.brightness_values = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
hundred_container = [-100, -90, -80, -70, -60, -50, -40, -30, -20, -10, 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
SystemState.CameraState.saturation_values = hundred_container
SystemState.CameraState.contrast_values = hundred_container
SystemState.CameraState.sharpness_values = hundred_container
SystemState.CameraState.zoom_values = [
(0.0, 0.0, 1.0, 1.0),
(0.1, 0.1, 0.9, 0.9),
(0.225, 0.225, 0.8, 0.8),
(0.25, 0.25, 0.7, 0.7),
(0.275, 0.275, 0.6, 0.6),
(0.3, 0.3, 0.5, 0.5),
(0.325, 0.325, 0.4, 0.4),
(0.35, 0.25, 0.3, 0.3),
(0.375, 0.375, 0.2, 0.2),
(0.4, 0.4, 0.1, 0.1),
]
SystemState.CameraState.meter_mode_values = [
'average', 'spot', 'backlit', 'matrix'
]
SystemState.CameraState.exposure_mode_values = [
'auto', 'night', 'nightpreview', 'backlight', 'spotlight',
'sports', 'snow', 'beach', 'verylong', 'fixedfps', 'antishake',
'fireworks', 'off'
]
SystemState.CameraState.shutter_speed_values = [1000000, 100000, 10000, 1000, 100]
def MakePhotoPath():
"""Creates the folder that stores the highres and preview photo."""
if os.path.exists(SystemState.CameraState.preview_path) == False:
os.makedirs(SystemState.CameraState.preview_path)
os.chown(SystemState.CameraState.preview_path, SystemState.uid, SystemState.gid)
def Process():
"""Determines what buttons were pressed with each screen touch."""
button = str(SystemState.pressed_button)
pygame = SystemState.pygame
screen = SystemState.screen
screen_mode = SystemState.screen_mode
if button == 'flash_on':
Menu.JumpTo(screen_mode=2, toggle=True)
SystemState.CameraState.flash_enabled = False
elif button == 'flash_off':
Menu.JumpTo(screen_mode=1, toggle=True)
SystemState.CameraState.flash_enabled = True
elif button == 'go_back':
Menu.Back()
SystemState.CameraState.setting = 'none'
SystemState.CameraState.album = False
elif button == 'gallery':
Menu.JumpTo(screen_mode=3)
OpenAlbum()
elif button == 'right_arrow':
__ProcessRightArrow()
elif button == 'left_arrow':
__ProcessLeftArrow()
elif button == 'capture':
CallTakePhoto()
elif button == 'delete' and SystemState.CameraState.photo_count > 0:
Menu.JumpTo(screen_mode=4)
BlitImage(SystemState.CameraState.current_photo, SystemState.pygame, SystemState.screen)
TextWriter.Write(state=SystemState, text='Delete?', position=(125, 75), size=20)
elif button == 'iso':
Menu.JumpTo(screen_mode=6)
SystemState.CameraState.setting = 'iso'
elif button == 'image_effect':
Menu.JumpTo(screen_mode=6)
SystemState.CameraState.setting = 'image_effect'
elif button == 'rotation':
Menu.JumpTo(screen_mode=6)
SystemState.CameraState.setting = 'rotation'
elif button == 'brightness':
Menu.JumpTo(screen_mode=6)
SystemState.CameraState.setting = 'brightness'
elif button == 'saturation':
Menu.JumpTo(screen_mode=6)
SystemState.CameraState.setting = 'saturation'
elif button == 'contrast':
Menu.JumpTo(screen_mode=6)
SystemState.CameraState.setting = 'contrast'
elif button == 'sharpness':
Menu.JumpTo(screen_mode=6)
SystemState.CameraState.setting = 'sharpness'
elif button == 'zoom':
Menu.JumpTo(screen_mode=6)
SystemState.CameraState.setting = 'zoom'
elif button == 'meter_mode':
Menu.JumpTo(screen_mode=6)
SystemState.CameraState.setting = 'meter_mode'
elif button == 'awb':
Menu.JumpTo(screen_mode=6)
SystemState.CameraState.setting = 'awb_mode'
elif button == 'shutter_speed':
Menu.JumpTo(screen_mode=6)
SystemState.CameraState.setting = 'shutter_speed'
elif button == 'exposure_mode':
Menu.JumpTo(screen_mode=6)
SystemState.CameraState.setting = 'exposure_mode'
elif button == 'settings':
Menu.JumpTo(screen_mode=5)
elif button == 'accept':
DeletePhoto()
Menu.Back()
OpenAlbum()
elif button == 'decline':
Menu.Back()
OpenAlbum()
if SystemState.screen_mode == 5 and SystemState.next_screen_mode == 6:
setting = SystemState.CameraState.setting
setting_values = setting + '_values'
__CurrentSetting(setting_values, setting)
SystemState.CameraState.camera_stream = False
def __PreviousSetting(property_list, property_name):
properties = getattr(SystemState.CameraState, property_list)
index = getattr(SystemState.CameraState, property_name)
if index > 0:
index -= 1
else:
index = len(properties) - 1
__ProcessSettingsValues(property_name, properties, index)
def __NextSetting(property_list, property_name):
properties = getattr(SystemState.CameraState, property_list)
index = getattr(SystemState.CameraState, property_name)
if index < len(properties) - 1:
index += 1
else:
index = 0
__ProcessSettingsValues(property_name, properties, index)
def __CurrentSetting(property_list, property_name):
# Shortens code.
properties = getattr(SystemState.CameraState, property_list)
index = getattr(SystemState.CameraState, property_name)
__ProcessSettingsValues(property_name, properties, index)
def __ProcessSettingsValues(property_name, properties, index):
"""Corrects settings values to auto if they equal zero."""
property_value = properties[index]
setattr(SystemState.camera, property_name, property_value)
setattr(SystemState.CameraState, property_name, index)
property_type = type(property_value)
# Defaults all values of zero to the word auto.
if property_value == 0 and property_type is not bool:
property_value = 'Auto'
# Makes 'zoom' human readable.
if property_type is tuple:
if index == 0:
index = None
property_value = str(index)
property_name = ' '.join(property_name.split('_'))
__WriteSettingsTitle(property_name)
__WriteSettingsValue(property_value)
def __WriteSettingsValue(text):
"""Writes the current value of the setting being manipulated."""
TextWriter.Write(
state=SystemState,
text=str(text).title(),
position=(160, 110),
centered=True,
size=20,
permatext=True,
color=(57, 255, 20)
)
def __WriteSettingsTitle(text):
"""Writes the name of the setting being manipulated."""
TextWriter.Write(
state=SystemState,
text=str(text).title(),
position=(160, 10),
centered=True,
size=25,
permatext=True,
color=(57, 255, 20)
)
def __ProcessLeftArrow():
"""Processes the left arrow button while in a setting."""
if SystemState.CameraState.setting == 'image_effect':
__PreviousSetting('image_effect_values', 'image_effect')
elif SystemState.CameraState.setting == 'iso':
__PreviousSetting('iso_values', 'iso')
elif SystemState.CameraState.setting == 'rotation':
__PreviousSetting('rotation_values', 'rotation')
elif SystemState.CameraState.setting == 'brightness':
__PreviousSetting('brightness_values', 'brightness')
elif SystemState.CameraState.setting == 'saturation':
__PreviousSetting('saturation_values', 'saturation')
elif SystemState.CameraState.setting == 'contrast':
__PreviousSetting('contrast_values', 'contrast')
elif SystemState.CameraState.setting == 'sharpness':
__PreviousSetting('sharpness_values', 'sharpness')
elif SystemState.CameraState.setting == 'zoom':
__PreviousSetting('zoom_values', 'zoom')
elif SystemState.CameraState.setting == 'meter_mode':
__PreviousSetting('meter_mode_values', 'meter_mode')
elif SystemState.CameraState.setting == 'awb_mode':
__PreviousSetting('awb_mode_values', 'awb_mode')
elif SystemState.CameraState.setting == 'shutter_speed':
__PreviousSetting('shutter_speed_values', 'shutter_speed')
elif SystemState.CameraState.setting == 'exposure_mode':
__PreviousSetting('exposure_mode_values', 'exposure_mode')
elif SystemState.screen_mode == 3:
if SystemState.CameraState.photo_count > 0:
PreviousPhoto()
def __ProcessRightArrow():
"""Processes the right arrow button while in a setting."""
if SystemState.CameraState.setting == 'image_effect':
__NextSetting('image_effect_values', 'image_effect')
elif SystemState.CameraState.setting == 'iso':
__NextSetting('iso_values', 'iso')
elif SystemState.CameraState.setting == 'rotation':
__NextSetting('rotation_values', 'rotation')
elif SystemState.CameraState.setting == 'brightness':
__NextSetting('brightness_values', 'brightness')
elif SystemState.CameraState.setting == 'saturation':
__NextSetting('saturation_values', 'saturation')
elif SystemState.CameraState.setting == 'contrast':
__NextSetting('contrast_values', 'contrast')
elif SystemState.CameraState.setting == 'sharpness':
__NextSetting('sharpness_values', 'sharpness')
elif SystemState.CameraState.setting == 'zoom':
__NextSetting('zoom_values', 'zoom')
elif SystemState.CameraState.setting == 'meter_mode':
__NextSetting('meter_mode_values', 'meter_mode')
elif SystemState.CameraState.setting == 'awb_mode':
__NextSetting('awb_mode_values', 'awb_mode')
elif SystemState.CameraState.setting == 'shutter_speed':
__NextSetting('shutter_speed_values', 'shutter_speed')
elif SystemState.CameraState.setting == 'exposure_mode':
__NextSetting('exposure_mode_values', 'exposure_mode')
elif SystemState.screen_mode == 3:
if SystemState.CameraState.photo_count > 0:
NextPhoto()
def CallTakePhoto():
"""Takes a preview photo with the camera. """
# Only if the flash is enabled will the flash turn on.
if SystemState.CameraState.flash_enabled == True:
CallFlash()
# Grabs the timestamp of when the photo was taken.
SystemState.CameraState.photo_time = str(int(time.time()))
file_name = SystemState.CameraState.preview_path + 'PREVIEW-' + SystemState.CameraState.photo_time + '.jpeg'
SystemState.camera.capture(file_name, use_video_port=True, splitter_port=1, format='jpeg')
thread = threading.Thread(target=TakePhoto)
thread.start()
ShowPhoto(file_name)
thread.join()
def TakePhoto():
"""Takes a high res photo with the camera."""
file_name = SystemState.CameraState.photo_path + SystemState.CameraState.photo_time + '.jpeg'
SystemState.camera.resolution = (2592, 1944)
SystemState.camera.capture(file_name, use_video_port=False, format='jpeg')
SystemState.camera.resolution = (320, 240)
def Flash():
"""Turns on the flash light on the front of the camera."""
time.sleep(0)
RPi.GPIO.output(7, True)
time.sleep(0.150)
RPi.GPIO.output(7, False)
def CallFlash():
"""Calls the flash function in a thread."""
thread = threading.Thread(target=Flash)
thread.setDaemon(True)
thread.start()
def OpenAlbum():
"""Opens the photos folder."""
path = SystemState.CameraState.preview_path
SystemState.CameraState.photo_archive = os.listdir(path)
SystemState.CameraState.photo_archive = [os.path.join(path, pic) for pic in SystemState.CameraState.photo_archive]
SystemState.CameraState.photo_archive = sorted(SystemState.CameraState.photo_archive)
SystemState.CameraState.photo_count = len(SystemState.CameraState.photo_archive)
SystemState.CameraState.album = True
#If there is a picture in there.
if SystemState.CameraState.photo_count > 0:
#If that photo is in the list, go to that photo. If not, go to the last photo.
if SystemState.CameraState.current_photo in SystemState.CameraState.photo_archive:
SystemState.CameraState.photo_index = SystemState.CameraState.photo_archive.index(SystemState.CameraState.current_photo)
else:
SystemState.CameraState.photo_index = SystemState.CameraState.photo_count - 1
ShowPhoto(file_index=SystemState.CameraState.photo_index)
else:
TextWriter.Write(
state=SystemState,
text='No Pictures',
position=(95, 100),
permatext=True,
size=20
)
def NextPhoto():
"""Switches to the next photo in the photo archive."""
if SystemState.CameraState.photo_index < SystemState.CameraState.photo_count - 1:
SystemState.CameraState.photo_index += 1
else:
SystemState.CameraState.photo_index = 0
file_name = SystemState.CameraState.photo_archive[SystemState.CameraState.photo_index]
SystemState.CameraState.photo_tally = str(SystemState.CameraState.photo_index + 1) + '/' + str(SystemState.CameraState.photo_count)
ShowPhoto(file_name)
def PreviousPhoto():
"""Switches to the previous photo in the photo archive."""
if SystemState.CameraState.photo_index > 0:
SystemState.CameraState.photo_index -= 1
else:
SystemState.CameraState.photo_index = SystemState.CameraState.photo_count - 1
file_name = SystemState.CameraState.photo_archive[SystemState.CameraState.photo_index]
SystemState.CameraState.photo_tally = str(SystemState.CameraState.photo_index + 1) + '/' + str(SystemState.CameraState.photo_count)
ShowPhoto(file_name)
def BlitImage(file_name, pygame, screen):
"""Stamps the photo on the screen object."""
try:
raw_image = pygame.image.load(file_name)
scaled_image = pygame.transform.scale(raw_image, (320, 240))
scaled_x = (320 - scaled_image.get_width()) / 2
scaled_y = (240 - scaled_image.get_height()) / 2
screen.blit(scaled_image, (scaled_x, scaled_y))
except:
screen.fill(0)
TextWriter.Write(
state=SystemState,
text='Unsupported Format',
color=(255, 0, | |
pulumi.Input[str] ttl_as_iso8601: The period of time for which a message is available to consume before it is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
if lock_duration_as_iso8601 is not None:
pulumi.set(__self__, "lock_duration_as_iso8601", lock_duration_as_iso8601)
if max_delivery_count is not None:
pulumi.set(__self__, "max_delivery_count", max_delivery_count)
if ttl_as_iso8601 is not None:
pulumi.set(__self__, "ttl_as_iso8601", ttl_as_iso8601)
@property
@pulumi.getter(name="lockDurationAsIso8601")
def lock_duration_as_iso8601(self) -> Optional[pulumi.Input[str]]:
"""
The lock duration for the feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
return pulumi.get(self, "lock_duration_as_iso8601")
@lock_duration_as_iso8601.setter
def lock_duration_as_iso8601(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lock_duration_as_iso8601", value)
@property
@pulumi.getter(name="maxDeliveryCount")
def max_delivery_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of times the IoT hub attempts to deliver a message on the feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
return pulumi.get(self, "max_delivery_count")
@max_delivery_count.setter
def max_delivery_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_delivery_count", value)
@property
@pulumi.getter(name="ttlAsIso8601")
def ttl_as_iso8601(self) -> Optional[pulumi.Input[str]]:
"""
The period of time for which a message is available to consume before it is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
return pulumi.get(self, "ttl_as_iso8601")
@ttl_as_iso8601.setter
def ttl_as_iso8601(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ttl_as_iso8601", value)
@pulumi.input_type
class IotHubPropertiesArgs:
def __init__(__self__, *,
allowed_fqdn_list: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
authorization_policies: Optional[pulumi.Input[Sequence[pulumi.Input['SharedAccessSignatureAuthorizationRuleArgs']]]] = None,
cloud_to_device: Optional[pulumi.Input['CloudToDevicePropertiesArgs']] = None,
comments: Optional[pulumi.Input[str]] = None,
disable_device_sas: Optional[pulumi.Input[bool]] = None,
disable_local_auth: Optional[pulumi.Input[bool]] = None,
disable_module_sas: Optional[pulumi.Input[bool]] = None,
enable_file_upload_notifications: Optional[pulumi.Input[bool]] = None,
event_hub_endpoints: Optional[pulumi.Input[Mapping[str, pulumi.Input['EventHubPropertiesArgs']]]] = None,
features: Optional[pulumi.Input[Union[str, 'Capabilities']]] = None,
ip_filter_rules: Optional[pulumi.Input[Sequence[pulumi.Input['IpFilterRuleArgs']]]] = None,
messaging_endpoints: Optional[pulumi.Input[Mapping[str, pulumi.Input['MessagingEndpointPropertiesArgs']]]] = None,
min_tls_version: Optional[pulumi.Input[str]] = None,
network_rule_sets: Optional[pulumi.Input['NetworkRuleSetPropertiesArgs']] = None,
private_endpoint_connections: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateEndpointConnectionArgs']]]] = None,
public_network_access: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]] = None,
restrict_outbound_network_access: Optional[pulumi.Input[bool]] = None,
routing: Optional[pulumi.Input['RoutingPropertiesArgs']] = None,
storage_endpoints: Optional[pulumi.Input[Mapping[str, pulumi.Input['StorageEndpointPropertiesArgs']]]] = None):
"""
The properties of an IoT hub.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_fqdn_list: List of allowed FQDNs(Fully Qualified Domain Name) for egress from Iot Hub.
:param pulumi.Input[Sequence[pulumi.Input['SharedAccessSignatureAuthorizationRuleArgs']]] authorization_policies: The shared access policies you can use to secure a connection to the IoT hub.
:param pulumi.Input['CloudToDevicePropertiesArgs'] cloud_to_device: The IoT hub cloud-to-device messaging properties.
:param pulumi.Input[str] comments: IoT hub comments.
:param pulumi.Input[bool] disable_device_sas: If true, all device(including Edge devices but excluding modules) scoped SAS keys cannot be used for authentication.
:param pulumi.Input[bool] disable_local_auth: If true, SAS tokens with Iot hub scoped SAS keys cannot be used for authentication.
:param pulumi.Input[bool] disable_module_sas: If true, all module scoped SAS keys cannot be used for authentication.
:param pulumi.Input[bool] enable_file_upload_notifications: If True, file upload notifications are enabled.
:param pulumi.Input[Mapping[str, pulumi.Input['EventHubPropertiesArgs']]] event_hub_endpoints: The Event Hub-compatible endpoint properties. The only possible keys to this dictionary is events. This key has to be present in the dictionary while making create or update calls for the IoT hub.
:param pulumi.Input[Union[str, 'Capabilities']] features: The capabilities and features enabled for the IoT hub.
:param pulumi.Input[Sequence[pulumi.Input['IpFilterRuleArgs']]] ip_filter_rules: The IP filter rules.
:param pulumi.Input[Mapping[str, pulumi.Input['MessagingEndpointPropertiesArgs']]] messaging_endpoints: The messaging endpoint properties for the file upload notification queue.
:param pulumi.Input[str] min_tls_version: Specifies the minimum TLS version to support for this hub. Can be set to "1.2" to have clients that use a TLS version below 1.2 to be rejected.
:param pulumi.Input['NetworkRuleSetPropertiesArgs'] network_rule_sets: Network Rule Set Properties of IotHub
:param pulumi.Input[Sequence[pulumi.Input['PrivateEndpointConnectionArgs']]] private_endpoint_connections: Private endpoint connections created on this IotHub
:param pulumi.Input[Union[str, 'PublicNetworkAccess']] public_network_access: Whether requests from Public Network are allowed
:param pulumi.Input[bool] restrict_outbound_network_access: If true, egress from IotHub will be restricted to only the allowed FQDNs that are configured via allowedFqdnList.
:param pulumi.Input['RoutingPropertiesArgs'] routing: The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging
:param pulumi.Input[Mapping[str, pulumi.Input['StorageEndpointPropertiesArgs']]] storage_endpoints: The list of Azure Storage endpoints where you can upload files. Currently you can configure only one Azure Storage account and that MUST have its key as $default. Specifying more than one storage account causes an error to be thrown. Not specifying a value for this property when the enableFileUploadNotifications property is set to True, causes an error to be thrown.
"""
if allowed_fqdn_list is not None:
pulumi.set(__self__, "allowed_fqdn_list", allowed_fqdn_list)
if authorization_policies is not None:
pulumi.set(__self__, "authorization_policies", authorization_policies)
if cloud_to_device is not None:
pulumi.set(__self__, "cloud_to_device", cloud_to_device)
if comments is not None:
pulumi.set(__self__, "comments", comments)
if disable_device_sas is not None:
pulumi.set(__self__, "disable_device_sas", disable_device_sas)
if disable_local_auth is not None:
pulumi.set(__self__, "disable_local_auth", disable_local_auth)
if disable_module_sas is not None:
pulumi.set(__self__, "disable_module_sas", disable_module_sas)
if enable_file_upload_notifications is not None:
pulumi.set(__self__, "enable_file_upload_notifications", enable_file_upload_notifications)
if event_hub_endpoints is not None:
pulumi.set(__self__, "event_hub_endpoints", event_hub_endpoints)
if features is not None:
pulumi.set(__self__, "features", features)
if ip_filter_rules is not None:
pulumi.set(__self__, "ip_filter_rules", ip_filter_rules)
if messaging_endpoints is not None:
pulumi.set(__self__, "messaging_endpoints", messaging_endpoints)
if min_tls_version is not None:
pulumi.set(__self__, "min_tls_version", min_tls_version)
if network_rule_sets is not None:
pulumi.set(__self__, "network_rule_sets", network_rule_sets)
if private_endpoint_connections is not None:
pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections)
if public_network_access is not None:
pulumi.set(__self__, "public_network_access", public_network_access)
if restrict_outbound_network_access is not None:
pulumi.set(__self__, "restrict_outbound_network_access", restrict_outbound_network_access)
if routing is not None:
pulumi.set(__self__, "routing", routing)
if storage_endpoints is not None:
pulumi.set(__self__, "storage_endpoints", storage_endpoints)
@property
@pulumi.getter(name="allowedFqdnList")
def allowed_fqdn_list(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of allowed FQDNs(Fully Qualified Domain Name) for egress from Iot Hub.
"""
return pulumi.get(self, "allowed_fqdn_list")
@allowed_fqdn_list.setter
def allowed_fqdn_list(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_fqdn_list", value)
@property
@pulumi.getter(name="authorizationPolicies")
def authorization_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SharedAccessSignatureAuthorizationRuleArgs']]]]:
"""
The shared access policies you can use to secure a connection to the IoT hub.
"""
return pulumi.get(self, "authorization_policies")
@authorization_policies.setter
def authorization_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SharedAccessSignatureAuthorizationRuleArgs']]]]):
pulumi.set(self, "authorization_policies", value)
@property
@pulumi.getter(name="cloudToDevice")
def cloud_to_device(self) -> Optional[pulumi.Input['CloudToDevicePropertiesArgs']]:
"""
The IoT hub cloud-to-device messaging properties.
"""
return pulumi.get(self, "cloud_to_device")
@cloud_to_device.setter
def cloud_to_device(self, value: Optional[pulumi.Input['CloudToDevicePropertiesArgs']]):
pulumi.set(self, "cloud_to_device", value)
@property
@pulumi.getter
def comments(self) -> Optional[pulumi.Input[str]]:
"""
IoT hub comments.
"""
return pulumi.get(self, "comments")
@comments.setter
def comments(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comments", value)
@property
@pulumi.getter(name="disableDeviceSAS")
def disable_device_sas(self) -> Optional[pulumi.Input[bool]]:
"""
If true, all device(including Edge devices but excluding modules) scoped SAS keys cannot be used for authentication.
"""
return pulumi.get(self, "disable_device_sas")
@disable_device_sas.setter
def disable_device_sas(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_device_sas", value)
@property
@pulumi.getter(name="disableLocalAuth")
def disable_local_auth(self) -> Optional[pulumi.Input[bool]]:
"""
If true, SAS tokens with Iot hub scoped SAS keys cannot be used for authentication.
"""
return pulumi.get(self, "disable_local_auth")
@disable_local_auth.setter
def disable_local_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_local_auth", value)
@property
@pulumi.getter(name="disableModuleSAS")
def disable_module_sas(self) -> Optional[pulumi.Input[bool]]:
"""
If true, all module scoped SAS keys cannot be used for authentication.
"""
return pulumi.get(self, "disable_module_sas")
@disable_module_sas.setter
def disable_module_sas(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_module_sas", value)
@property
@pulumi.getter(name="enableFileUploadNotifications")
def enable_file_upload_notifications(self) -> Optional[pulumi.Input[bool]]:
"""
If True, file upload notifications are enabled.
"""
return pulumi.get(self, "enable_file_upload_notifications")
@enable_file_upload_notifications.setter
def enable_file_upload_notifications(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_file_upload_notifications", value)
@property
@pulumi.getter(name="eventHubEndpoints")
def event_hub_endpoints(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['EventHubPropertiesArgs']]]]:
"""
The Event Hub-compatible endpoint properties. The only possible keys to this dictionary is events. This key has to be present in the dictionary while making create or update calls for the IoT hub.
"""
return pulumi.get(self, "event_hub_endpoints")
@event_hub_endpoints.setter
def event_hub_endpoints(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['EventHubPropertiesArgs']]]]):
pulumi.set(self, "event_hub_endpoints", value)
@property
@pulumi.getter
def features(self) -> Optional[pulumi.Input[Union[str, 'Capabilities']]]:
"""
The capabilities and features enabled for the IoT hub.
"""
return pulumi.get(self, "features")
@features.setter
def features(self, value: Optional[pulumi.Input[Union[str, 'Capabilities']]]):
pulumi.set(self, "features", value)
@property
@pulumi.getter(name="ipFilterRules")
def ip_filter_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpFilterRuleArgs']]]]:
"""
The IP filter rules.
"""
return pulumi.get(self, "ip_filter_rules")
@ip_filter_rules.setter
def ip_filter_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IpFilterRuleArgs']]]]):
pulumi.set(self, "ip_filter_rules", value)
@property
@pulumi.getter(name="messagingEndpoints")
def messaging_endpoints(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['MessagingEndpointPropertiesArgs']]]]:
"""
The messaging endpoint properties for the file upload notification queue.
"""
return pulumi.get(self, "messaging_endpoints")
@messaging_endpoints.setter
def messaging_endpoints(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['MessagingEndpointPropertiesArgs']]]]):
pulumi.set(self, "messaging_endpoints", value)
@property
@pulumi.getter(name="minTlsVersion")
def min_tls_version(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the minimum TLS version to support for this hub. Can be set to "1.2" to have clients that use a TLS version below 1.2 to be rejected.
"""
return pulumi.get(self, "min_tls_version")
@min_tls_version.setter
def min_tls_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_tls_version", value)
@property
@pulumi.getter(name="networkRuleSets")
def network_rule_sets(self) -> Optional[pulumi.Input['NetworkRuleSetPropertiesArgs']]:
"""
Network Rule Set Properties of IotHub
"""
return pulumi.get(self, "network_rule_sets")
@network_rule_sets.setter
def network_rule_sets(self, value: Optional[pulumi.Input['NetworkRuleSetPropertiesArgs']]):
pulumi.set(self, "network_rule_sets", value)
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PrivateEndpointConnectionArgs']]]]:
"""
Private endpoint connections created on this IotHub
"""
return pulumi.get(self, "private_endpoint_connections")
@private_endpoint_connections.setter
def private_endpoint_connections(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateEndpointConnectionArgs']]]]):
pulumi.set(self, "private_endpoint_connections", value)
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]]:
"""
Whether requests from Public Network are allowed
"""
return pulumi.get(self, "public_network_access")
@public_network_access.setter
def public_network_access(self, value: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]]):
pulumi.set(self, "public_network_access", value)
@property
@pulumi.getter(name="restrictOutboundNetworkAccess")
def restrict_outbound_network_access(self) -> Optional[pulumi.Input[bool]]:
"""
If true, egress from IotHub will be restricted to only the allowed FQDNs that are configured via allowedFqdnList.
"""
return pulumi.get(self, "restrict_outbound_network_access")
@restrict_outbound_network_access.setter
def restrict_outbound_network_access(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, | |
<reponame>evenmarbles/rlpy<gh_stars>1-10
from __future__ import division, print_function, absolute_import
# noinspection PyUnresolvedReferences
from six.moves import range
from abc import ABCMeta, abstractmethod
import numpy as np
from ...optimize.algorithms import EM
from ...auxiliary.array import normalize
from ...cluster.vq import kmeans
from ...stats import multivariate_normal, multivariate_student, conditional_normal, conditional_student
from ...stats import canonize_labels, normalize_logspace, shrink_cov, randpd, stacked_randpd
__all__ = ['MixtureModel', 'DiscreteMM', 'GMM', 'StudentMM']
def _process_parameters(ncomponents, mix_prior=None, mix_weight=None):
"""
"""
if mix_prior is None:
mix_prior = 2
mix_prior = np.asarray(mix_prior, dtype=float)
if mix_prior.ndim == 0:
m = mix_prior
mix_prior = m * np.ones(ncomponents)
if mix_prior.ndim > 1:
raise ValueError("Array 'mix_prior' must be at most one-dimensional,"
" but mix_prior.ndim = %d" % mix_prior.ndim)
if mix_prior.shape[0] != ncomponents:
raise ValueError("Array 'mix_prior' must have %d elements,"
" but mix_prior.shape[0] = %d" % (ncomponents, mix_prior.shape[0]))
if mix_weight is not None:
if mix_weight.ndim > 1:
raise ValueError("Array 'mix_weight' must be at most one-dimensional,"
" but mix_weight.ndim = %d" % mix_weight.ndim)
if mix_weight.shape[0] != ncomponents:
raise ValueError("Array 'mix_weight' must have %d elements,"
" but mix_weight.shape[0] = %d" % (ncomponents, mix_weight.shape[0]))
return ncomponents, mix_prior, mix_weight
# noinspection PyAbstractClass
class MixtureModel(EM):
"""Mixture model base class.
Representation of a mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a distribution.
Parameters
----------
ncomponents : int, optional
Number of mixture components. Default is 1.
prior : normal_invwishart, optional
A :data:`.normal_invwishart` distribution.
mix_prior : float or array_like, shape (`ncomponents`,), optional
Prior mixture probabilities.
mix_weight : array_like, shape (`ncomponents`,), optional
Mixture weights.
n_iter : int, optional
Number of EM iterations to perform. Default is 100.
thresh : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Default is 1e-4.
verbose : bool, optional
Controls if debug information is printed to the console. Default
is False.
Attributes
----------
ncomponents : int
Number of mixture components.
dim : int
Dimensionality of the each component.
prior : normal_invwishart
A :data:`.normal_invwishart` distribution.
mix_prior : array_like, shape (`ncomponents`,)
Prior mixture probabilities.
mix_weight : array_like, shape (`ncomponents`,)
Mixture weights.
cond_proba : cond_rv_frozen
Conditional probability distribution.
n_iter : int
Number of EM iterations to perform.
thresh : float
Convergence threshold.
verbose : bool
Controls if debug information is printed to the console.
Examples
--------
>>> from mlpy.stats.models.mixture import GMM
>>> m = GMM()
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) <NAME> and <NAME>
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
__metaclass__ = ABCMeta
def __init__(self, ncomponents=1, prior=None, mix_prior=None, mix_weight=None, n_iter=None,
thresh=None, verbose=None):
super(MixtureModel, self).__init__(n_iter, thresh, verbose)
self.dim = 1
self.cond_proba = None
self.prior = prior
self.ncomponents, self.mix_prior, self.mix_weight = _process_parameters(ncomponents, mix_prior, mix_weight)
@abstractmethod
def sample(self, size=1):
"""Generate random samples from the model.
Parameters
----------
size : int, optional
Number of samples to generate. Default is 1.
Returns
-------
x : array_like, shape (`size`, `dim`)
List of samples
Raises
------
NotImplementedError
If the child class does not implement this function.
"""
raise NotImplementedError
@abstractmethod
def score_samples(self, x):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of x under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of x.
Parameters
----------
x : array_like, shape (`size`, `dim`)
List of `dim`-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array_like, shape (`size`, `ncomponents`)
Posterior probabilities of each mixture component for each
observation.
loglik : array_like, shape (size,)
Log probabilities of each data point in `x`.
Raises
------
NotImplementedError
If the child class does not implement this function.
"""
raise NotImplementedError
# noinspection PyUnusedLocal
def score(self, x, y=None):
"""Compute the log probability under the model.
Parameters
----------
x : array_like, shape (size, dim)
List of dim-dimensional data points. Each row
corresponds to a single data point.
y : Not used.
Returns
-------
logp : array_like, shape (`size`,)
Log probabilities of each data point in `x`.
"""
_, logp = self.score_samples(x)
return logp
def predict(self, x):
"""Predict label for data.
Parameters
----------
x : array_like, shape (`size`, `dim`)
Returns
-------
C : array, shape = (`size`,)
"""
responsibilities, _ = self.score_samples(x)
return responsibilities.argmax(axis=1)
def predict_proba(self, x):
"""
Predict posterior probability of data under the model.
Parameters
----------
x : array_like, shape (`size`, `dim`)
Returns
-------
responsibilities : array_like, shape = (`nsamples`, `ncomponents`)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
responsibilities, logp = self.score_samples(x)
return responsibilities
def fit(self, x, n_init=1):
"""Fit the mixture model from the data `x`.
Estimate model parameters with the expectation-maximization
algorithm.
Parameters
----------
x : array_like, shape (`n`, `dim`)
List of dim-dimensional data points. Each row
corresponds to a single data point.
n_init : int, optional
Number of random restarts to avoid a local minimum.
Default is 1.
"""
self.dim = x.shape[1]
return self._em(x, n_init=n_init)
def _compute_mix_prior(self):
"""
Compute the weighted mixture prior probabilities.
Returns
-------
float :
The weighted mixture priors.
"""
if np.all(self.mix_prior == 1):
return 0
return np.dot(np.log(self.mix_weight).T, (self.mix_prior - 1))
def _estep(self, x):
mix_weight, ll = self.score_samples(x)
self.cond_proba.expected_sufficient_statistics(x, mix_weight)
loglik = np.sum(ll) + self.cond_proba.logprior() + self._compute_mix_prior()
return loglik
def _mstep(self):
self.cond_proba.fit()
self.mix_weight = normalize(self.cond_proba.wsum + self.mix_prior - 1)
class DiscreteMM(MixtureModel):
"""Discrete mixture model class.
Representation of a discrete mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a distribution.
Parameters
----------
ncomponents : int, optional
Number of mixture components. Default is 1.
prior : normal_invwishart, optional
A :data:`.normal_invwishart` distribution.
mix_prior : float or array_like, shape (`ncomponents`,), optional
Prior mixture probabilities.
mix_weight : array_like, shape (`ncomponents`,), optional
Mixture weights.
transmat : array_like, shape (`ncomponents`, `ncomponents`), optional
Matrix of transition probabilities between states.
alpha : float
Value of Dirichlet prior on observations. Default is 1.1 (1=MLE)
n_iter : int, optional
Number of EM iterations to perform. Default is 100.
thresh : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Default is 1e-4.
verbose : bool, optional
Controls if debug information is printed to the console. Default
is False.
Attributes
----------
ncomponents : int
Number of mixture components.
dim : int
Dimensionality of the each component.
prior : normal_invwishart
A :data:`.normal_invwishart` distribution.
mix_prior : array_like, shape (`ncomponents`,)
Prior mixture probabilities.
mix_weight : array_like, shape (`ncomponents`,)
Mixture weights.
transmat : array_like, shape (`ncomponents`, `ncomponents`)
Matrix of transition probabilities between states.
alpha : float
Value of Dirichlet prior on observations.
cond_proba : cond_rv_frozen
Conditional probability distribution.
n_iter : int
Number of EM iterations to perform.
thresh : float
Convergence threshold.
verbose : bool
Controls if debug information is printed to the console.
Examples
--------
>>> from mlpy.stats.models.mixture import DiscreteMM
>>> m = DiscreteMM()
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) <NAME> and <NAME>
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
def __init__(self, ncomponents=1, prior=None, mix_prior=None, mix_weight=None, transmat=None, alpha=None,
n_iter=None, thresh=None, verbose=None):
super(DiscreteMM, self).__init__(ncomponents, prior, mix_prior, mix_weight, n_iter, thresh, verbose)
self.transmat = transmat
self.alpha = alpha if alpha is not None else 1.1
def sample(self, size=1):
"""Generate random samples from the model.
Parameters
----------
size : int, optional
Number of samples to generate. Default is 1.
Returns
-------
x : array_like, shape (`size`, `dim`)
List of samples
Raises
------
NotImplementedError
If the child class does not implement this function.
"""
raise NotImplementedError
def score_samples(self, x):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of x under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of x.
Parameters
----------
x : array_like, shape (`size`, `dim`)
List of `dim`-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array_like, shape (`size`, `ncomponents`)
Posterior probabilities of each mixture component for each
observation.
loglik : array_like, shape (size,)
Log probabilities of each data point in `x`.
"""
n, dim = x.shape
logp = | |
from __future__ import annotations
import os
from tkinter import Variable
from typing import Union
import numpy as np
from survey_stats.functions import *
class Data_Types:
cross = 'cross'
time = 'time'
panel = 'panel'
class Data:
"""
type: 'cross', 'time', 'panel'
"""
def __init__(self, type:str='cross', values:dict[dict]={}) -> None:
if type == Data_Types.cross or type == Data_Types.time \
or type == Data_Types.panel:
self.type = type
else:
self.type = None
self.values = values
def __str__(self) -> str:
#region columns width
width = 0
vars = ['index']
if len(self.variables())>5:
vars.extend(self.variables()[:3])
vars.append('...')
vars.extend(self.variables()[-2:])
else:
vars.extend(self.variables())
if len(self.index()) > 15:
inds = self.index()[:5]
inds.append('⁝')
inds.extend(self.index()[-3:])
else:
inds = self.index()
for i in inds:
for var in vars:
if i != '⁝':
if var != 'index' and var != '...' :
val = str(self.values[var][i])
else:
val = var
else:
val = '⁝'
if is_numeric(val):
val = str(round(val,4))
if width<len(str(var)):
width = len(str(var))
if width<len(val):
width = len(val)
if width<len(str(i)):
width = len(str(i))
width = min(width, 20)
#endregion
#region title
title = 'type: ' + self.type + '\n'
title += ' ' + '-'*(len(vars)*(width+1)-1) + '\n'
title += ''
for var in vars:
title += '|' + str(var)[:20].center(width)
title += '|\n'
title += ' ' + '-'*(len(vars)*(width+1)-1) + '\n'
#endregion
#region rows
rows = title
for i in inds:
for var in vars:
if i != '⁝':
if var == 'index':
rows += '|' + str(i)[:20].center(width)
elif var == '...':
rows += '|' + '...'.center(width)
else:
x = self.values[var][i]
if is_numeric(x):
x = str(round(x,4))
rows += '|' + str(x)[:20].center(width)
else:
if var == '...':
rows += '|' +str('˙·.')[:20]
else:
rows += '|' + str(i)[:20].center(width)
rows += '|\n'
rows += ' ' + '-'*(len(vars)*(width+1)-1) + '\n'
rows += f'{len(self.index())} × {len(self.variables())}'
#endregion
return rows
def variables(self) -> None:
return [var for var in self.values]
def index(self, without_checking:bool=True) -> None:
if without_checking:
for v in self.values.keys():
return list(self.values[v].keys())
else:
start, ind = True, []
for v in self.values.keys():
if start:
ind = [i for i in self.values[v].keys()]
start = False
else:
ind2 = [i for i in self.values[v].keys()]
if ind != ind2:
raise ValueError("Error! index aren't same for all variables.")
return ind
def set_index(self, var:str, drop_var:bool=True) -> None:
new_dict = {}
if var in self.values.keys():
for v in self.values.keys():
if not (v == var and drop_var):
new_dict[v] = {self.values[var][i]:self.values[v][i] for i in self.values[var].keys()}
self.values = new_dict
else:
raise ValueError(f"Error! {var} is not in variables of data.")
def set_names(self, new_names:list[str]=[], old_names:list[str]=[]):
new_dict,i = {},0
for v in self.values.keys():
if (v in old_names) or old_names==[]:
new_dict[new_names[i]] = self.values[v]
i+=1
else:
new_dict[v] = self.values[v]
self.values = new_dict
def select_variables(self, vars:list[str]=[]) -> Data:
if type(vars) != list:
raise ValueError(f"Error! {vars} is not a list of variables.")
if vars!=[]:
new_dict = {}
for var in vars:
if var in self.values.keys():
new_dict[var] = self.values[var]
return Data(self.type,new_dict)
def select_index(self,index:list)->Data:
if type(index) != list:
raise ValueError(f"Error! {index} is not a list of index.")
vars = self.variables()
res_dict = {}
for var in vars:
values = {}
for i in index:
if i in self.index():
values[i] = self.values[var][i]
res_dict[var] = values
return Data(self.type, res_dict)
def drop(self, var_names:list[str]):
if type(var_names) != list:
raise ValueError(f"Error! {var_names} is not a list of variables.")
for var in var_names:
if var in self.values.keys():
self.values.pop(var)
def add_a_dummy(self, condition:list[list[tuple]], add_to_data:bool=False)->None:
# condition = [('sex','=','female'), ('age','<',20)]
# names = sex=female_and_age<20
dummy_values = {}
for i in self.index():
satisfied, is_nan = True, False
for var, sign,val in condition:
try:
nan = np.isnan(self.values[var][i])
except:
nan = False
if not nan:
satisfied = satisfied and check_condition(self.values[var][i], sign, val)
else:
is_nan = True
break
if satisfied:
dummy_values[i] = 1
elif not is_nan:
dummy_values[i] = 0
else:
dummy_values[i] = np.nan
start = True
for var, sign, val in condition:
if start:
dummy_name = var + sign + str(val)
start = False
else:
dummy_name += '_and_' + var + sign + str(val)
res = {}
res[dummy_name] = dummy_values
if add_to_data:
self.values.update(res)
return Data(self.type, res)
def add_dummies(self, conditions:list[dict], add_to_data:bool=False)->None:
# conditions = [[('sex','=','female'),('age','<',20)],[()],[()],...,[()]]
# |___________a condition_____________| |__| |__| |__|
# names = sex=female_age<20
values = {}
for cond in conditions:
values.update(self.add_a_dummy(cond, add_to_data).values)
return Data(self.type, values)
def dropna(self, vars:list[str]=[])->None:
for i in self.index():
is_nan = False
vars = vars if vars != [] else self.variables()
for var in vars:
if var in self.variables():
is_nan = False
try:
if is_numeric(self.values[var][i]):
is_nan = np.isnan(self.values[var][i])
if is_nan:
is_nan = True
break
except:
pass
if is_nan:
for var in self.values.keys():
try:
self.values[var].pop(i)
except:
pass
def to_numpy(self, vars:list[str]=[])->None:
self.dropna(vars)
lst = []
for i in self.index():
in_lst = []
for var in self.values.keys():
if (var in vars) or (vars==[]):
if is_numeric(self.values[var][i]):
in_lst.append(self.values[var][i])
lst.append(in_lst)
return np.array(lst)
def add_data(self, new_data:Data=None)->Data:
if self.index() == None:
new_index = new_data.index()
indexes = new_index
else:
old_index = self.index()
indexes = set(old_index)
if new_data.values != {}:
new_index = set(new_data.index())-indexes
indexes.update(new_index)
new_index = list(new_index)
else:
new_index = []
indexes = list(indexes)
indexes.sort()
old_vars = self.variables()
vars = set(old_vars)
new_vars = new_data.variables()
vars.update(set(new_vars))
vars = list(vars)
for var in vars:
if not var in old_vars:
self.values[var] = dict(zip(indexes,[np.nan]*len(indexes)))
if var in new_vars:
self.values[var].update(new_data.values[var])
elif new_index != []:
new_vals = dict(zip(new_index,[np.nan]*len(new_index)))
self.values[var].update(new_vals)
def transpose(self)->Data:
values_t = {}
for var, ival in self.values.items():
for i, val in ival.items():
if i in values_t.keys():
values_t[i][var] = val
else:
values_t[i] = {var:val}
return Data(self.type, values_t)
@classmethod
def read_csv(cls, path_file:str, data_type:str='cross', na:any='')->Data:
with open(path_file,'r') as f:
lines = f.readlines()
n = len(lines)
values, vars = {}, []
for j, var in enumerate(lines[0].split(',')):
var = var.replace('ï»؟','').replace('\n','')
vars.append(var)
values[var] = {}
for i in range(1,n):
for j, val in enumerate(lines[i].split(',')):
val = val.replace('ï»؟','').replace('\n','')
if val == na:
values[vars[j]][i] = np.nan
elif is_numeric_str(val):
values[vars[j]][i] = float(val)
else:
# print(val)
values[vars[j]][i] = val
return cls(data_type, values)
def to_csv(self, path_file:str, na:str=''):
if os.path.exists(path_file):
res = input(f"'{path_file}' exists, do you want the new file to replace it? (y/n) ")
if res == 'y':
os.remove(path_file)
else:
new_name = input('please, enter a new name without the path: ')
path_file = path_file.replace(path_file.split('\\')[-1],new_name)
with open(path_file, 'a') as f:
title = 'index'
for var in self.variables():
title += ',' + str(var)
f.write(title + '\n')
for i in self.index():
line = str(i)
for var in self.variables():
is_nan = False
if is_numeric(self.values[var][i]):
if np.isnan(self.values[var][i]):
is_nan = True
if is_nan:
line += ',' + na
else:
line += ',' + str(self.values[var][i])
f.write(line + '\n')
def __len__(self):
return len(self.index())
def add_trend(self):
j = 0
self.values['trend'] = {}
for i in self.index():
self.values['trend'][i] = j
j += 1
class Sample:
def __init__(self, data: Data, index:list=[], name:str=None, weights:str='1') -> None:
self.data = data
if index == []:
self.index = data.index()
else:
self.index = index
if not set(self.index).issubset(set(data.index())):
print('sample index is not subset of data index')
raise
self.name = name
self.weights = weights
def get_data(self) -> Data:
res = {}
for var in self.data.variables():
res[var] = {}
for i in self.index:
if i in self.data.index():
res[var][i] = self.data.values[var][i]
else:
raise ValueError(f"index {i} isn't in data index")
return Data(self.data.type, res)
split_methods = ['random', 'start', 'end']
def split(self, ratio: float, names: list, method: str = 'random') -> list[Sample]:
if method == 'random':
if self.weights == '1':
S1 = np.random.choice(self.index, int(ratio*len(self.index)), replace=False)
else:
ws = sum([w for i, w in self.data.values[self.weights].items() if i in self.index])
weights = [w/ws for i, w in self.data.values[self.weights].items() if i in self.index]
S1 = np.random.choice(self.index, int(ratio*len(self.index)), p=weights, replace=False)
S2 = list(set(self.index)-set(S1))
elif method == 'start':
n = int(ratio * len(self.index))
S1, S2 = self.index[:n], self.index[n:]
elif method == 'end':
n = int((1-ratio) * len(self.index))
S1, S2 = self.index[:n], self.index[n:]
return Sample(self.data, S1, names[0], self.weights), Sample(self.data, S2, names[1], self.weights)
def get_weights(self, path_file_csv:str)-> None:
# vars_conditions:list[list], totals:list[Union[int,float]]
groups = Data.read_csv(path_file_csv)
groups_n = len(groups.index())
set_index, set_totals = False, False
for var in groups.variables():
strs, nums = 0, 0
for i in groups.index():
if is_numeric(groups.values[var][i]):
nums += 1
else:
strs += 1
if strs == groups_n:
groups.set_index(var)
set_index = True
elif nums == groups_n:
totals = list(groups.values[var].values())
set_totals = | |
<gh_stars>0
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2005 <NAME> and <NAME>
# Copyright (C) 2012-2014 <NAME>
from re import compile, escape, IGNORECASE, sub
from os.path import splitext
from ..scraper import _BasicScraper
from ..helpers import indirectStarter, bounceStarter
from ..util import tagre, getPageContent
class SabrinaOnline(_BasicScraper):
description = u'Skunks, computers and porn'
url = 'http://sabrina-online.com/'
imageSearch = compile(tagre("a", "href", r'(strips/[^"]*)'))
prevSearch = compile(tagre("a", "href", r"(\d\d\d\d-\d\d.html)") +
tagre("img", "src", "b_back.gif"))
help = 'Index format: n (unpadded)'
adult = True
multipleImagesPerStrip = True
@classmethod
def starter(cls):
"""Pick last one in a list of archive pages."""
archive = cls.url + 'archive.html'
data = getPageContent(archive, cls.session)[0]
search = compile(tagre("a", "href", r"(\d\d\d\d-\d\d.html)"))
archivepages = search.findall(data)
return cls.url + archivepages[-1]
class SafelyEndangered(_BasicScraper):
description = u''
url = 'http://www.safelyendangered.com/'
stripUrl = url + 'comic/%s'
firstStripUrl = stripUrl % 'ignored'
imageSearch = compile(tagre("img", "src", r'(http://www\.safelyendangered\.com/wp-content/uploads/\d+/\d+/[^"]+\.[a-z]+).*'))
prevSearch = compile(tagre("a", "href", r'([^"]+)', after="navi navi-prev"))
textSearch = compile(tagre("img", "title", r'([^"]+)', before=r'http://www\.safelyendangered\.com/wp-content/uploads'))
help = 'Index format: yyyy/mm/stripname'
class SailorsunOrg(_BasicScraper):
url = 'http://sailorsun.org/'
rurl = escape(url)
stripUrl = url + '?p=%s'
firstStripUrl = stripUrl % '21'
imageSearch = compile(tagre("img", "src", r'(%scomics/[^"]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'(%s\?p=\d+)' % rurl, after="prev"))
help = 'Index format: n (unpadded)'
class SamAndFuzzy(_BasicScraper):
description = u"Serial about a cab driver and his bear-like friend by <NAME>. Offers a reader's guide, forum, and frequently asked questions."
url = 'http://www.samandfuzzy.com/'
stripUrl = 'http://samandfuzzy.com/%s'
firstStripUrl = stripUrl % '1'
imageSearch = compile(r'(/comics/.+?)" alt')
prevSearch = compile(r'"><a href="(.+?)"><img src="imgint/nav_prev.gif"')
help = 'Index format: nnnn'
class SandraOnTheRocks(_BasicScraper):
url = 'http://www.sandraontherocks.com/'
stripUrl = url + 'strips-sotr/%s'
firstStripUrl = stripUrl % 'start_by_running'
imageSearch = compile(tagre("img", "src", r'([^"]*/comics/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'([^"]*/strips-sotr/[^"]+)', before="cn[id]prev"))
help = 'Index format: name'
class ScandinaviaAndTheWorld(_BasicScraper):
description = u'Scandinavia and the World'
url = 'http://satwcomic.com/'
rurl = escape(url)
stripUrl = url + '%s'
firstStripUrl = stripUrl % 'sweden-denmark-and-norway'
imageSearch = compile(tagre("img", "src", r'(%sart/[^"/]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'(%s[^"/]+)' % rurl)+"\s*"+tagre('span', 'class', 'spritePrevious'))
help = 'Index format: stripname'
class ScaryGoRound(_BasicScraper):
url = 'http://www.scarygoround.com/'
stripUrl = url + '?date=%s'
firstStripUrl = stripUrl % '20090918'
imageSearch = compile(tagre("img", "src", r'(strips/\d+\.png)'))
prevSearch = compile(tagre("a", "href", r'(\?date=\d+)') + "Previous")
help = 'Index format: n (unpadded)'
class ScenesFromAMultiverse(_BasicScraper):
description = u'SFAM Guest Month wraps up today with a contribution by <NAME> of Octopus Pie that is sure to tickle and delight even the grumpiest of codgers.'
url = 'http://amultiverse.com/'
rurl = escape(url)
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '2010/06/14/parenthood'
imageSearch = (
compile(tagre("div", "id", "comic") + r"\s*" +
tagre("img", "src", r'(%swp-content/uploads/\d+/\d+/[^"]+)' % rurl)),
compile(tagre("div", "id", "comic") + r"\s*" + tagre("a", "href", r'[^"]*') +
tagre("img", "src", r'(%swp-content/uploads/\d+/\d+/[^"]+)' % rurl)),
)
prevSearch = compile(tagre("a", "href", r'(%scomic/\d+\d+/\d+/\d+/[^"]+)' % rurl, after="prev"))
help = 'Index format: yyyy/mm/dd/stripname'
class SchlockMercenary(_BasicScraper):
description = u'2 days ago ... Travel the galaxy. Meet new and fascinating life-forms.'
url = 'http://www.schlockmercenary.com/'
stripUrl = url + '%s'
firstStripUrl = stripUrl % '2000-06-12'
imageSearch = compile(tagre("img", "src", r'(http://static\.schlockmercenary\.com/comics/[^"]+)'))
multipleImagesPerStrip = True
prevSearch = compile(tagre("a", "href", r'(/\d+-\d+-\d+)', quote="'", after="nav-previous"))
help = 'Index format: yyyy-mm-dd'
class SchoolBites(_BasicScraper):
url = 'http://schoolbites.net/'
stripUrl = url + 'd/%s.html'
imageSearch = compile(tagre("img", "src", r'(http://cdn\.schoolbites\.net/comics/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(http://schoolbites\.net/d/\d+\.html)', after="prev"))
help = 'Index format: yyyymmdd'
class Schuelert(_BasicScraper):
url = 'http://www.schuelert.de/'
rurl = escape(url)
stripUrl = url + 'index.php?paged=%s'
firstStripUrl = stripUrl % '5'
imageSearch = compile(tagre("img", "src", r"(%swp-content/[^']+)" % rurl, quote="'"))
prevSearch = compile(tagre("a", "href", r'(%sindex\.php\?paged=\d+)' % rurl) + "«")
multipleImagesPerStrip = True
help = 'Index format: none'
lang = 'de'
class Science(_BasicScraper):
url = 'http://sci-ence.org/'
rurl = escape(url)
stripUrl = url + '%s/'
firstStripUrl = stripUrl % 'periodic-table-element-ass'
prevSearch = compile(tagre("a", "href", r'(%s[^"]+/)' % rurl, after="prev"))
imageSearch = compile(tagre("img", "src", r'(%scomics/\d+-\d+-\d+[^"]+)' % rurl))
help = 'Index format: stripname'
description = u'A comic about science, technology, skepticism, geekery, video games, atheism, and more.'
class SequentialArt(_BasicScraper):
url = 'http://www.collectedcurios.com/sequentialart.php'
stripUrl = url + '?s=%s'
firstStripUrl = stripUrl % '1'
imageSearch = compile(tagre("img", "src", r'([^"]+)', before="strip"))
prevSearch = compile(tagre("a", "href", r'(/sequentialart\.php\?s=\d+)')
+ tagre("img", "src", "Nav_BackOne\.gif"))
help = 'Index format: name'
class SexyLosers(_BasicScraper):
adult = True
url = 'http://www.sexylosers.com/'
stripUrl = url + '%s.html'
imageSearch = compile(r'<img src\s*=\s*"\s*(comics/[\w\.]+?)"', IGNORECASE)
prevSearch = compile(r'<a href="(/\d{3}\.\w+?)"><font color = FFAAAA><<', IGNORECASE)
help = 'Index format: nnn'
starter = indirectStarter(url,
compile(r'SEXY LOSERS <A HREF="(.+?)">Latest SL Comic \(#\d+\)</A>', IGNORECASE))
@classmethod
def namer(cls, imageUrl, pageUrl):
index = pageUrl.split('/')[-1].split('.')[0]
title = imageUrl.split('/')[-1].split('.')[0]
return index + '-' + title
# XXX site has been hacked
class _ShadowGirls(_BasicScraper):
description = u"It's like H.P. Lovecraft meets the Gilmore Girls!"
url = 'http://www.shadowgirlscomic.com/'
stripUrl = url + 'comics/%s'
firstStripUrl = stripUrl % 'book-1/chapter-1-broken-dreams/welcome'
imageSearch = compile(tagre("img", "src", r'([^"]*/comics/[^"]*)'))
prevSearch = compile(tagre("a", "href", r'([^"]*)', after='navi-prev'))
help = 'Index format: custom'
starter = indirectStarter(url, compile(tagre("a", "href", r'([^"]*/comics/[^"]+)')))
class Sheldon(_BasicScraper):
description = u'The story of a software company tycoon billionaire ten-year-old, his grampa, his duck, his pug and a lizard.'
url = 'http://www.sheldoncomics.com/'
rurl = escape(url)
stripUrl = url + 'archive/%s.html'
firstStripUrl = stripUrl % '011130'
imageSearch = compile(tagre("img", "src", r'(http://cdn\.sheldoncomics\.com/strips/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(%sarchive/\d+\.html)' % rurl, after="sidenav-prev"))
help = 'Index format: yymmdd'
class ShermansLagoon(_BasicScraper):
description = u"Sherman's Lagoon by <NAME>"
url = 'http://shermanslagoon.com/'
stripUrl = url + 'comics/%s'
firstStripUrl = stripUrl % '/december-29-2003/'
imageSearch = compile(tagre("img", "src", r'(http://safr\.kingfeatures\.com/idn/test/zone/xml/content\.php\?file=.+?)'))
prevSearch = compile(r'id="previouscomic" class="button white"><a href="(%scomics/[a-z0-9-]+/)"' % url)
help = 'Index format: monthname-day-year'
@classmethod
def namer(cls, imageUrl, pageUrl):
name = pageUrl.rsplit('/', 3)[2]
if name == "shermanslagoon.com":
import datetime
name = datetime.date.today().strftime("%B-%d-%Y").lower()
# name is monthname-day-year
month, day, year = name.split('-')
return "%s-%s-%s" % (year, month, day)
class Shivae(_BasicScraper):
url = 'http://shivae.net/'
rurl = escape(url)
stripUrl = url + 'blog/%s/'
firstStripUrl = stripUrl % '2007/09/21/09212007'
imageSearch = compile(tagre("img", "src", r'(%swp-content/blogs\.dir/\d+/files/\d+/\d+/[^"]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'(%sblog/[^"]+)' % rurl, after="navi-prev"))
help = 'Index format: yyyy/mm/dd/stripname'
# XXX disallowed by robots.txt
class _Shortpacked(_BasicScraper):
url = 'http://www.shortpacked.com/'
rurl = escape(url)
stripUrl = url + '%s/'
imageSearch = compile(tagre("img", "src", r'(%scomics/[^"]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'(%s\d+/comic/[^"]+)' % rurl, after="prev"))
help = 'Index format: yyyy/comic/book-nn/mm-name1/name2'
class ShotgunShuffle(_BasicScraper):
description = u'adventures of the Seven Buckingham sisters, a fat cat, an irritable roommate, a dirty hippy'
url = 'http://shotgunshuffle.com/'
stripUrl = url + 'comic/%s'
firstStripUrl = stripUrl % 'pilot/'
imageSearch = compile(tagre("img", "src", r'(http://shotgunshuffle.com/wp-content/uploads/\d+/\d+/\d+-[^"]+)'))
prevSearch = compile(tagre("a", "href", r'([^"]+)', after="navi navi-prev"))
help = 'Index format: stripname'
class SinFest(_BasicScraper):
description = u'Strip dealing with contemporary issues and religion. Created by <NAME>.'
name = 'SinFest'
url = 'http://www.sinfest.net/'
rurl = escape(url)
stripUrl = url + 'view.php?date=%s'
firstStripUrl = stripUrl % '2000-01-17'
imageSearch = compile(tagre("img", "src", r'(btphp/comics/[^"]+)'))
prevSearch = compile(tagre("a", "href", r"(view\.php\?date\=\d{4}-\d{2}-\d{2})") +
'\s*\n?\s*<img.*src="../images/prev.gif".*>')
help = 'Index format: yyyy-mm-dd'
# XXX disallowed by robots.txt
class _Sketchesnatched(_BasicScraper):
url = 'http://sketchesnatched.blogspot.com/'
stripUrl = url + 'search?updated-max=%s%%2B01:00&max-results=1'
firstStripUrl = stripUrl % '2011-01-27T08:32:00'
imageSearch = compile(tagre("meta", "content", r"(http://\d+\.bp\.blogspot\.com/[^']+)",
after=r'image_url', quote="'"))
prevSearch = compile(tagre("a", "href", r"(http://sketchesnatched\.blogspot\.[a-z]+/search[^']+)",
before=r"blog-pager-older-link", quote="'"))
help = 'Index format: yyyy-mm-ddThh:mm:ss'
description = u"Artwork by <NAME>"
class SkinDeep(_BasicScraper):
url = 'http://www.skindeepcomic.com/'
stripUrl = url + 'archive/%s/'
imageSearch = compile(r'<span class="webcomic-object[^>]*><img src="([^"]*)"')
prevSearch = compile(tagre("a", "href", r'([^"]+)', after="previous-webcomic-link"))
help = 'Index format: custom'
class SlightlyDamned(_BasicScraper):
url = 'http://www.sdamned.com/'
rurl = escape(url)
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '2004/03/03142004'
imageSearch = compile(tagre("img", "src", r'(%scomics/[^"]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'(%s[^"]+)' % rurl, after="prev"))
help = 'Index format: yyyy/mm/number'
class SluggyFreelance(_BasicScraper):
url = 'http://www.sluggy.com/'
stripUrl = url + 'comics/archives/daily/%s'
imageSearch = compile(r'<img src="(/images/comics/.+?)"')
prevSearch = compile(r'<a href="(.+?)"[^>]+?><span class="ui-icon ui-icon-seek-prev">')
multipleImagesPerStrip = True
help = 'Index format: yymmdd'
class SMBC(_BasicScraper):
description = u"Saturday Morning Breakfast Cereal"
url = 'http://www.smbc-comics.com/'
rurl = escape(url)
stripUrl = url + '?id=%s'
firstStripUrl = stripUrl % '1'
imageSearch = compile(tagre("img", "src", r"(%scomics/\d{8}(?:\w2?|-\d)?\.\w{3})\s*" % rurl, quote="'"))
prevSearch = compile(tagre("a", "href", r'([^"]+)#comic', after="backRollover"))
help = 'Index format: nnnn'
def shouldSkipUrl(self, url, data):
"""Skip promo or missing update pages."""
return url in (
self.stripUrl % '2865',
self.stripUrl % '2653',
self.stripUrl % '2424',
self.stripUrl % '2226',
self.stripUrl % '2069',
self.stripUrl % '1895',
self.stripUrl % '1896',
self.stripUrl % '1589',
)
class SnowFlakes(_BasicScraper):
description = u'Snowflakes - A comic by <NAME>, <NAME> and <NAME>.'
url = 'http://www.snowflakescomic.com/'
stripUrl = url + '?id=%s&sl=%s'
firstStripUrl = stripUrl % ('103', '1')
endOfLife = True
imageSearch = (
compile(tagre("img", "src", r'(comics/[^"]+)')),
compile(tagre("img", | |
from __future__ import print_function, division
import json
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import initializers
from einops.layers.tensorflow import Rearrange
import os
class WGAN_GP(keras.Model):
def __init__(self, rows, columns, loadpath, latent_dim = 128, discriminator_extra_steps=5, gp_weight=10.0):
super(WGAN_GP, self).__init__()
self.rows = rows
self.columns = columns
self.channels = 1
self.img_shape = (self.rows, self.columns, self.channels)
self.latent_dim = latent_dim
self.d_steps = discriminator_extra_steps
self.gp_weight = gp_weight
self.hidden_units = 800 # columns变成这个
self.intermediate_units = 3072 # 改成mlp_units
self.attention_heads = 10 # hidden_units * 3 = 3 * attention_heads * 剩下的参数,所以attention_heads需要被hidden_units整除
self.init_pos_embedding()
self._start_epoch_index = 0
json_file = os.path.join(loadpath, "wgan_gp.json")
if os.path.isfile(json_file):
print("########## Load json from %s ##########" % json_file)
with open(json_file, 'r') as f:
json_str = f.read()
json_data = json.loads(json_str)
self._start_epoch_index = json_data['finished_epochs']
print("########## Json str: %s ##########" % json_str)
# discriminator
self.discriminator = None
if loadpath is not None:
discriminator_path = os.path.join(loadpath, "discriminator")
if os.path.isdir(discriminator_path):
print("########## Load discriminator from %s ##########" % discriminator_path)
self.discriminator = keras.models.load_model(discriminator_path)
if self.discriminator is None:
print("########## Create discriminator ##########")
self.discriminator = self.get_discriminator_model()
self.discriminator.summary()
self.d_optimizer = keras.optimizers.Adam(
learning_rate=0.0002, beta_1=0.5, beta_2=0.9
)
self.d_loss_fn = WGAN_GP.discriminator_loss
# generator
self.generator = None
if loadpath is not None:
generator_path = os.path.join(loadpath, "generator")
if os.path.isdir(generator_path):
print("########## Load generator from %s ##########" % generator_path)
self.generator = keras.models.load_model(generator_path)
if self.generator is None:
print("########## Create generator ##########")
self.generator = self.get_generator_model()
self.generator.summary()
self.g_optimizer = keras.optimizers.Adam(
learning_rate=0.0002, beta_1=0.5, beta_2=0.9
)
self.g_loss_fn = WGAN_GP.generator_loss
@property
def start_epoch_index(self):
return self._start_epoch_index
@start_epoch_index.setter
def start_epoch_index(self, value):
raise ValueError
def dense_block(self,
x,
units,
activation,
use_bias=True,
use_bn=False,
use_dropout=False,
drop_value=0.2
):
x = layers.Dense(units, activation= activation, use_bias=use_bias)(x)
if use_bn:
x = layers.BatchNormalization()(x)
if use_dropout:
x = layers.Dropout(drop_value)(x)
return x
# ---------------------
# Discriminator
# ---------------------
def conv_block(self,
x,
filters,
activation,
kernel_size=(3, 3),
strides=(1, 1),
padding="same",
use_bias=True,
use_bn=False,
use_dropout=False,
drop_value=0.5,
):
x = layers.Conv2D(
filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias
)(x)
if use_bn:
x = layers.BatchNormalization()(x)
if activation:
x = activation(x)
if use_dropout:
x = layers.Dropout(drop_value)(x)
return x
def get_discriminator_model(self):
img_input = layers.Input(shape=self.img_shape)
# (None, 600, 75, 1)
x = img_input
x = layers.Reshape((self.rows, self.columns))(x)
x = self.dense_block(x, self.columns * 16, layers.LeakyReLU(0.2),
use_bias=True, use_bn=False, use_dropout=True, drop_value=0.2)
x = self.dense_block(x, self.columns * 8, layers.LeakyReLU(0.2),
use_bias=True, use_bn=False, use_dropout=True, drop_value=0.2)
x = self.dense_block(x, self.columns * 4, layers.LeakyReLU(0.2),
use_bias=True, use_bn=False, use_dropout=True, drop_value=0.2)
x = self.dense_block(x, self.columns * 2, layers.LeakyReLU(0.2),
use_bias=True, use_bn=False, use_dropout=True, drop_value=0.2)
x = layers.Flatten()(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(1)(x)
d_model = keras.models.Model(img_input, x, name="discriminator")
return d_model
@staticmethod
def discriminator_loss(real_img, fake_img):
real_loss = tf.reduce_mean(real_img)
fake_loss = tf.reduce_mean(fake_img)
return fake_loss - real_loss
# ---------------------
# Generator
# ---------------------
def upsample_block(self,
x,
filters,
activation,
kernel_size=(3, 3),
strides=(1, 1),
up_size=(2, 2),
padding="same",
use_bn=False,
use_bias=True,
use_dropout=False,
drop_value=0.3,
):
x = layers.UpSampling2D(up_size)(x)
x = layers.Conv2D(
filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias
)(x)
if use_bn:
x = layers.BatchNormalization()(x)
if activation:
x = activation(x)
if use_dropout:
x = layers.Dropout(drop_value)(x)
return x
def get_generator_model(self):
noise = layers.Input(shape=(self.latent_dim,))
# (None, 128)
x = layers.Dense(self.rows * self.columns * 8, use_bias=False)(noise)
x = layers.Reshape((self.rows, self.columns * 8))(x)
x = self.dense_block(x, self.columns * 8, layers.LeakyReLU(0.2),
use_bias=True, use_bn=False, use_dropout=True, drop_value=0.2)
x = self.dense_block(x, self.columns * 4, layers.LeakyReLU(0.2),
use_bias=True, use_bn=False, use_dropout=True, drop_value=0.2)
x = self.dense_block(x, self.columns * 2, layers.LeakyReLU(0.2),
use_bias=True, use_bn=False, use_dropout=True, drop_value=0.2)
x = self.dense_block(x, self.columns, layers.LeakyReLU(0.2),
use_bias=True, use_bn=False, use_dropout=True, drop_value=0.2)
x = layers.Reshape((self.rows, self.columns, self.channels))(x)
g_model = keras.models.Model(noise, x, name="generator")
return g_model
@staticmethod
def generator_loss(fake_img):
return -tf.reduce_mean(fake_img)
# ---------------------
# Transform
# ---------------------
def transform_block(self, x, num_hidden_layers=12):
for _ in range(num_hidden_layers):
tmp = self.norm(x)
tmp = self.attention(tmp)
x = x + tmp
tmp = self.norm(x)
tmp = self.mlp(tmp)
x = x + tmp
return x
def linear_embedding(self, x):
x = layers.Dense(self.hidden_units)(x)
return x
def init_pos_embedding(self):
self.pos_embedding = self.add_weight(
"position_embedding",
shape=[self.rows, self.hidden_units],
initializer=initializers.TruncatedNormal(stddev=0.02),
dtype=tf.float32)
def position_embedding(self, x):
return x + self.pos_embedding
def norm(self, x):
x = layers.LayerNormalization(epsilon=1e-5)(x)
return x
def mlp(self, x):
out = layers.Dense(self.intermediate_units, activation=WGAN_GP.gelu)(x)
out = layers.Dense(self.hidden_units)(out)
return out
def attention(self, x):
# 假设 x.shape = [batch_size, 120, 800]
qkv = layers.Dense(self.hidden_units * 3, use_bias=False)(x)
# qkv.shape = [batch_size, 120, 2400]
qkv = Rearrange("b n (qkv h d) -> qkv b h n d", qkv=3, h=self.attention_heads)(qkv)
# qkv.shape = [3, batch_size, 10, 120, 80], {"b n (qkv h d) -> qkv b h n d", n=120, qkv=3, h=10, 2400 = qkv * h * d, 故d = 80}
q = qkv[0]
# q.shape = [batch_size, 10, 120, 80]
k = qkv[1]
v = qkv[2]
scale = self.hidden_units ** -0.5
dots = tf.einsum("bhid,bhjd->bhij", q, k) * scale
# dots.shape = [batch_size, 10, 120, 120], {id,jd -> ij: 120*80, 120*80 -> 120*120}, self.scale = 1 /(√800)
attn = tf.nn.softmax(dots, axis=-1)
# attn.shape = [batch_size, 10, 120, 120]
out = tf.einsum("bhij,bhjd->bhid", attn, v)
# out.shape = [batch_size, 10, 120, 80], {ij,jd -> id: 120*120, 120*80 -> 120*80}
out = Rearrange("b h n d -> b n (h d)")(out)
# out.shape = [batch_size, 120, 800], {"b h n d -> b n (h d)", h=10, d=80}
out = layers.Dense(self.hidden_units)(out)
# out.shape = [batch_size, 120, 800], 与输入x相同
return out
@staticmethod
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
# ---------------------
# keras.Model
# ---------------------
def compile(self):
super(WGAN_GP, self).compile()
self.discriminator.compile()
self.generator.compile()
def gradient_penalty(self, batch_size, real_images, fake_images):
""" Calculates the gradient penalty.
This loss is calculated on an interpolated image
and added to the discriminator loss.
"""
# Get the interpolated image
alpha = tf.random.normal([batch_size, 1, 1, 1], 0.0, 1.0)
diff = fake_images - real_images
interpolated = real_images + alpha * diff
with tf.GradientTape() as gp_tape:
gp_tape.watch(interpolated)
# 1. Get the discriminator output for this interpolated image.
pred = self.discriminator(interpolated, training=True)
# 2. Calculate the gradients w.r.t to this interpolated image.
grads = gp_tape.gradient(pred, [interpolated])[0]
# 3. Calculate the norm of the gradients.
norm = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=[1, 2, 3]))
gp = tf.reduce_mean((norm - 1.0) ** 2)
return gp
def train_step(self, real_images):
if isinstance(real_images, tuple):
real_images = real_images[0]
# Get the batch size
batch_size = tf.shape(real_images)[0]
# For each batch, we are going to perform the
# following steps as laid out in the original paper:
# 1. Train the generator and get the generator loss
# 2. Train the discriminator and get the discriminator loss
# 3. Calculate the gradient penalty
# 4. Multiply this gradient penalty with a constant weight factor
# 5. Add the gradient penalty to the discriminator loss
# 6. Return the generator and discriminator losses as a loss dictionary
# ---------------------
# Train discriminator
# ---------------------
# The original paper recommends training
# the discriminator for `x` more steps (typically 5) as compared to
# one step of the generator. Here we will train it for 3 extra steps
# as compared to 5 to reduce the training time.
for i in range(self.d_steps):
# Get the latent vector
random_latent_vectors = tf.random.normal(
shape=(batch_size, self.latent_dim)
)
with tf.GradientTape() as tape:
# Generate fake images from the latent vector
fake_images = self.generator(random_latent_vectors, training=True)
# Get the logits for the fake images
fake_logits = self.discriminator(fake_images, training=True)
# Get the logits for the real images
real_logits = self.discriminator(real_images, training=True)
# Calculate the discriminator loss using the fake and real image logits
d_cost = self.d_loss_fn(real_img=real_logits, fake_img=fake_logits)
# Calculate the gradient penalty
gp = self.gradient_penalty(batch_size, real_images, fake_images)
# Add the gradient penalty to the original discriminator loss
d_loss = d_cost + gp * self.gp_weight
# Get the gradients w.r.t the discriminator loss
d_gradient = tape.gradient(d_loss, self.discriminator.trainable_variables)
# Update the weights of the discriminator using the discriminator optimizer
self.d_optimizer.apply_gradients(
zip(d_gradient, self.discriminator.trainable_variables)
)
# ---------------------
# Train generator
# ---------------------
# Get the latent vector
random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
with tf.GradientTape() as tape:
# Generate fake images using the generator
generated_images = self.generator(random_latent_vectors, training=True)
# Get the discriminator logits for fake images
| |
or points.
SET3 entries are referenced by:
- NX
- ACMODL
- PANEL
- MSC
- PBMSECT
- PBRSECT
- RFORCE
- ELEM only (SOL 600)
- DEACTEL
- ELEM only (SOL 400)
- RBAR, RBAR1, RBE1, RBE2, RBE2GS, RBE3, RROD,
RSPLINE, RSSCON, RTRPLT and RTRPLT1
- RBEin / RBEex only
- ELSIDi / XELSIDi
- ELEM only
- NDSIDi
- GRID only
+------+-----+-------+-----+-----+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+======+=====+=======+=====+=====+=====+=====+=====+=====+
| SET3 | SID | DES | ID1 | ID2 | ID3 | ID4 | ID5 | ID6 |
+------+-----+-------+-----+-----+-----+-----+-----+-----+
| | ID7 | ID8 | etc | | | | | |
+------+-----+-------+-----+-----+-----+-----+-----+-----+
| SET3 | 1 | POINT | 11 | 12 | | | | |
+------+-----+-------+-----+-----+-----+-----+-----+-----+
"""
type = 'SET3'
valid_descs = ['GRID', 'POINT', 'ELEMENT', 'PROP', 'RBEIN', 'RBEEX']
def __init__(self, sid, desc, ids, comment=''):
Set.__init__(self)
if comment:
self.comment = comment
#: Unique identification number. (Integer > 0)
self.sid = sid
#: Set description (Character). Valid options are 'GRID', 'ELEM',
#: 'POINT' and 'PROP'.
if desc == 'ELEM':
desc = 'ELEMENT'
self.desc = desc
#: Identifiers of grids points, elements, points or properties.
#: (Integer > 0)
self.ids = expand_thru(ids)
self.xref_type = None
def validate(self):
if self.desc not in self.valid_descs:
msg = 'desc=%r; valid_descs=[%s]' % (self.desc, ', '.join(self.valid_descs))
raise ValueError(msg)
def cross_reference(self, xref_type):
self.xref_type = xref_type
def add_set(self, set3):
self.ids += set3.get_ids()
assert self.sid == set3.sid, 'SET3.sid=%r; existing sid=%r new=%r' % (self.sid, self.sid, set3.sid)
assert self.desc == set3.desc, 'SET3.sid=%r; existing desc=%r new=%r' % (self.sid, self.desc, set3.desc)
self.clean_ids()
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a SET3 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
desc = string(card, 2, 'desc')
ids = fields(integer_or_string, card, 'ID', i=3, j=len(card))
return SET3(sid, desc, ids, comment=comment)
def union(self, set3):
assert self.type == set3.type, 'type=%r set3.type=%r' % (self.type, set3.type)
assert self.desc == set3.desc, 'self.desc=%r set3.desc=%r' % (self.desc, set3.desc)
ids1 = set(self.ids)
ids2 = set(set3.ids)
self.ids = list(ids1.union(ids2))
def symmetric_difference(self, set3):
assert self.type == set3.type, 'type=%r set3.type=%r' % (self.type, set3.type)
ids1 = set(self.ids)
ids2 = set(set3.ids)
return ids1.symmetric_difference(ids2)
def is_grid(self):
if self.desc == 'GRID':
return True
return False
def is_point(self):
if self.desc == 'POINT':
return True
return False
def is_property(self):
if self.desc == 'PROP':
return True
return False
def is_element(self):
if self.desc == 'ELEMENT':
return True
return False
def SetIDs(self, collapse=True):
"""gets the IDs of the SETx"""
if collapse:
return collapse_thru(self.ids, nthru=1)
else:
return self.ids
def raw_fields(self):
"""Gets the "raw" card without any processing as a list for printing"""
list_fields = ['SET3', self.sid, self.desc] + self.SetIDs()
return list_fields
def __repr__(self):
#fields_blocks = [
#'SET3',
#[[self.sid, self.desc], False], # these are not all integers
#[self.SetIDs(), True], # these are all integers
#]
#print(fields_blocks)
#return self.comment + print_int_card_blocks(fields_blocks)
msg = self.comment
self.ids.sort()
ids = self.ids
packs = condense(ids)
if len(packs) == 1:
singles, doubles = build_thru_packs(packs, max_dv=1)
packs = collapse_thru(ids)
for pack in doubles:
msg += print_card_8(['SET3', self.sid, self.desc] + pack)
if singles:
msg += print_card_8(['SET3', self.sid, self.desc] + singles)
else:
msg += print_card_8(['SET3', self.sid, self.desc] + ids)
return msg
def write_card(self, size: int=8, is_double: bool=False) -> str:
return str(self)
class SESET(SetSuper):
"""
Defines interior grid points for a superelement.
"""
type = 'SESET'
def __init__(self, seid, ids, comment=''):
SetSuper.__init__(self)
if comment:
self.comment = comment
self.seid = seid
#: Grid or scalar point identification number.
#: (0 < Integer < 1000000; G1 < G2)
self.ids = expand_thru(ids)
self.clean_ids()
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a SESET card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
seid = integer_or_blank(card, 1, 'seid', 0)
ids = fields(integer_or_string, card, 'ID', i=2, j=len(card))
return SESET(seid, ids, comment=comment)
def add_seset(self, seset):
self.ids += seset.ids
self.clean_ids()
def raw_fields(self):
list_fields = ['SESET', self.seid] + collapse_thru(self.ids)
return list_fields
def __repr__(self):
thru_fields = collapse_thru(self.ids)
#list_fields = ['SESET', self.seid]
cards = []
while 'THRU' in thru_fields:
ithru = thru_fields.index('THRU')
card = print_card_8(['SESET', self.seid] +
thru_fields[ithru - 1:ithru + 2])
cards.append(card)
thru_fields = thru_fields[0:ithru - 1]+thru_fields[ithru + 2:]
if thru_fields:
card = print_card_8(['SESET', self.seid] + thru_fields)
cards.append(card)
return ''.join(cards)
def cross_reference(self, model: BDF) -> None:
pass
def uncross_reference(self) -> None:
pass
class SEBSET(SuperABCQSet):
"""
Defines boundary degrees-of-freedom to be fixed (b-set) during generalized
dynamic reduction or component mode calculations.
+--------+------+-----+------+-----+----+-----+----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+========+======+=====+======+=====+====+=====+====+
| SEBSET | SEID | ID1 | C1 | ID2 | C2 | ID3 | C3 |
+--------+------+-----+------+-----+----+-----+----+
| SEBSET | C | ID1 | THRU | ID2 | | | |
+--------+------+-----+------+-----+----+-----+----+
"""
type = 'SEBSET'
def __init__(self, seid, ids, components, comment=''):
SuperABCQSet.__init__(self, seid, ids, components, comment)
class SEBSET1(SuperABQSet1):
type = 'SEBSET1'
def __init__(self, seid, components, ids, comment=''):
SuperABQSet1.__init__(self, seid, components, ids, comment)
class SECSET(SuperABCQSet):
type = 'SECSET'
def __init__(self, seid, components, ids, comment=''):
SuperABCQSet.__init__(self, seid, components, ids, comment)
class SECSET1(SuperABQSet1):
type = 'SECSET1'
def __init__(self, seid, components, ids, comment=''):
SuperABQSet1.__init__(self, seid, components, ids, comment)
class SEQSET(SuperABCQSet):
type = 'SEQSET'
def __init__(self, seid, ids, components, comment=''):
SuperABCQSet.__init__(self, seid, ids, components, comment)
class SEQSET1(SuperABQSet1):
type = 'SEQSET1'
def __init__(self, seid, components, ids, comment=''):
SuperABQSet1.__init__(self, seid, components, ids, comment)
class SEQSEP(SetSuper): # not integrated...is this an SESET ???
"""
Used with the CSUPER entry to define the correspondence of the
exterior grid points between an identical or mirror-image superelement
and its primary superelement.
"""
type = 'SEQSEP'
def __init__(self, ssid, psid, ids, comment=''):
SetSuper.__init__(self)
if comment:
self.comment = comment
#: Identification number for secondary superelement. (Integer >= 0).
self.ssid = ssid
#: Identification number for the primary superelement. (Integer >= 0).
self.psid = psid
#: Exterior grid point identification numbers for the primary
#: superelement. (Integer > 0)
self.ids = expand_thru(ids)
self.clean_ids()
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a SEQSEP card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
ssid = integer(card, 1, 'ssid')
psid = integer(card, 2, 'psid')
ids = fields(integer_or_string, card, 'ID', i=3, j=len(card))
return SEQSEP(ssid, psid, ids, comment=comment)
def raw_fields(self):
"""gets the "raw" card without any processing as a list for printing"""
list_fields = ['SEQSEP', self.ssid, self.psid] + self.ids
return list_fields
class RADSET(Set): # not integrated
"""
Specifies which radiation cavities are to be included for
radiation enclosure analysis.
+--------+----------+----------+----------+----------+----------+----------+----------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+========+==========+==========+==========+==========+==========+==========+==========+
| RADSET | ICAVITY1 | ICAVITY2 | ICAVITY3 | ICAVITY4 | ICAVITY5 | ICAVITY6 | ICAVITY7 |
+--------+----------+----------+----------+----------+----------+----------+----------+
| | ICAVITY8 | ICAVITY9 | etc. | | | | |
+--------+----------+----------+----------+----------+----------+----------+----------+
"""
type = 'RADSET'
def __init__(self, seid, ids, comment=''):
Set.__init__(self)
if comment:
self.comment = comment
self.seid = seid
#: Grid or scalar point identification number.
#: (0 < Integer < 1000000; G1 < G2)
self.ids = expand_thru(ids)
self.clean_ids()
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a RADSET card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
ids = fields(integer_or_string, card, 'Cavity_', i=1, j=len(card))
return RADSET(ids, comment=comment)
def add_radset(self, radset):
self.ids += radset.ids
self.clean_ids()
def raw_fields(self):
"""gets the "raw" card without any processing as a list for printing"""
list_fields = ['RADSET', self.seid] + self.ids
return list_fields
class USET(Set):
"""
Defines a degrees-of-freedom set.
+------+-------+-----+------+-----+----+-----+----+
| USET | SNAME | ID1 | C1 | ID2 | C2 | ID3 | C3 |
+------+-------+-----+------+-----+----+-----+----+
| USET | JUNK | ID1 | THRU | ID2 | | | |
+------+-------+-----+------+-----+----+-----+----+
"""
type = 'USET'
def __init__(self, name, components, ids, comment=''):
Set.__init__(self)
if comment:
self.comment = comment
self.name = name
#: Identifiers of grids points. (Integer > 0)
self.components = components
self.ids = ids
| |
# Copyright (c) 2014-2019, iocage
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import collections
import datetime
import json
import operator
import os
import subprocess as su
import iocage_lib.ioc_clean as ioc_clean
import iocage_lib.ioc_common as ioc_common
import iocage_lib.ioc_create as ioc_create
import iocage_lib.ioc_destroy as ioc_destroy
import iocage_lib.ioc_exec as ioc_exec
import iocage_lib.ioc_fetch as ioc_fetch
import iocage_lib.ioc_fstab as ioc_fstab
import iocage_lib.ioc_image as ioc_image
import iocage_lib.ioc_json as ioc_json
import iocage_lib.ioc_list as ioc_list
import iocage_lib.ioc_plugin as ioc_plugin
import iocage_lib.ioc_start as ioc_start
import iocage_lib.ioc_stop as ioc_stop
import iocage_lib.ioc_upgrade as ioc_upgrade
import iocage_lib.ioc_debug as ioc_debug
import iocage_lib.ioc_exceptions as ioc_exceptions
import libzfs
from iocage_lib.release import Release
class PoolAndDataset(ioc_json.IOCZFS):
def __init__(self):
super().__init__()
self.pool = ioc_json.IOCJson().json_get_value("pool")
def get_pool(self):
"""
Helper to get the current pool.
Return:
string: with the pool name.
"""
return self.pool
def get_datasets(self, option_type):
"""
Helper to get datasets.
Return:
generator: from libzfs.ZFSDataset.
"""
__types = {
'all': '/iocage/jails',
'base': '/iocage/releases',
'template': '/iocage/templates',
'uuid': '/iocage/jails',
'root': '/iocage',
}
if option_type in __types.keys():
return self.zfs.get_dataset(
f"{self.pool}{__types[option_type]}").children
def get_iocroot(self):
"""
Helper to get the iocroot.
Return:
string: with the iocroot name.
"""
return ioc_json.IOCJson().json_get_value("iocroot")
class IOCage(ioc_json.IOCZFS):
def __init__(self,
jail=None,
rc=False,
callback=None,
silent=False,
activate=False,
skip_jails=False,
):
super().__init__(callback)
self.rc = rc
self.silent = silent
# FreeNAS won't be entering through the CLI, so we set sane defaults
os.environ.get("IOCAGE_SKIP", "FALSE")
os.environ.get("IOCAGE_FORCE", "TRUE")
if not activate:
self.pool = PoolAndDataset().get_pool()
self.iocroot = PoolAndDataset().get_iocroot()
if not skip_jails:
# When they need to destroy a jail with a missing or bad
# configuration, this gets in our way otherwise.
try:
self.jails = self.list("uuid")
except libzfs.ZFSException as err:
if err.code == libzfs.Error.NOENT and rc:
# No jails exist for RC, that's OK
self.jails = []
return
else:
# Really going to raise this.
raise
self.skip_jails = skip_jails
self.jail = jail
self._all = True if self.jail and 'ALL' in self.jail else False
self.callback = callback
self.is_depend = False
def __all__(self, jail_order, action, ignore_exception=False):
# So we can properly start these.
self._all = False
for j in jail_order:
# We want this to be the real jail now.
self.jail = j
uuid, path = self.__check_jail_existence__()
status, jid = self.list("jid", uuid=uuid)
if action == 'stop':
self.stop(j, ignore_exception=ignore_exception)
elif action == 'start':
if not status:
err, msg = self.start(j, ignore_exception=ignore_exception)
if err:
ioc_common.logit(
{
'level': 'ERROR',
'message': msg
},
_callback=self.callback, silent=self.silent
)
else:
message = f"{uuid} ({j}) is already running!"
ioc_common.logit(
{
'level': 'WARNING',
'message': message
},
_callback=self.callback, silent=self.silent
)
def __jail_order__(self, action, ignore_exception=False):
"""Helper to gather lists of all the jails by order and boot order."""
jail_order = {}
boot_order = {}
_reverse = True if action == 'stop' else False
for jail in self.jails:
self.jail = jail
uuid, path = self.__check_jail_existence__()
conf = ioc_json.IOCJson(path).json_get_value('all')
boot = conf['boot']
priority = conf['priority']
jail_order[jail] = int(priority)
# This removes having to grab all the JSON again later.
if boot:
boot_order[jail] = int(priority)
jail_order = collections.OrderedDict(
sorted(
jail_order.items(),
key=operator.itemgetter(1),
reverse=_reverse))
boot_order = collections.OrderedDict(
sorted(
boot_order.items(),
key=operator.itemgetter(1),
reverse=_reverse))
if self.rc:
self.__rc__(boot_order, action, ignore_exception)
elif self._all:
self.__all__(jail_order, action, ignore_exception)
def __rc__(self, boot_order, action, ignore_exception=False):
"""Helper to start all jails with boot=on"""
# So we can properly start these.
self.rc = False
for j in boot_order.keys():
# We want this to be the real jail now.
self.jail = j
uuid, path = self.__check_jail_existence__()
status, _ = self.list("jid", uuid=uuid)
if action == 'stop':
if status:
message = f" Stopping {uuid}"
ioc_common.logit(
{
'level': 'INFO',
'message': message
},
_callback=self.callback, silent=self.silent
)
self.stop(j, ignore_exception=ignore_exception)
else:
message = f"{uuid} is not running!"
ioc_common.logit(
{
'level': 'INFO',
'message': message
},
_callback=self.callback, silent=self.silent
)
elif action == 'start':
if not status:
message = f" Starting {uuid}"
ioc_common.logit(
{
'level': 'INFO',
'message': message
},
_callback=self.callback, silent=self.silent
)
err, msg = self.start(j, ignore_exception=ignore_exception)
if err:
ioc_common.logit(
{
'level': 'ERROR',
'message': msg
},
_callback=self.callback, silent=self.silent
)
else:
message = f"{uuid} is already running!"
ioc_common.logit(
{
'level': 'WARNING',
'message': message
},
_callback=self.callback, silent=self.silent
)
def __check_jail_existence__(self):
"""
Helper to check if jail dataset exists
Return:
tuple: The jails uuid, path
"""
if os.path.isdir(f"{self.iocroot}/jails/{self.jail}"):
path = f"{self.iocroot}/jails/{self.jail}"
return self.jail, path
elif os.path.isdir(f"{self.iocroot}/templates/{self.jail}"):
path = f"{self.iocroot}/templates/{self.jail}"
return self.jail, path
else:
if self.skip_jails:
# We skip jails for performance, but if they didn't match be
# now need to gather the list and iterate.
self.jails = self.list("uuid")
# We got a partial, time to search.
_jail = {
uuid: path
for (uuid, path) in self.jails.items()
if uuid.startswith(self.jail)
}
if len(_jail) == 1:
uuid, path = next(iter(_jail.items()))
return uuid, path
elif len(_jail) > 1:
msg = f"Multiple jails found for {self.jail}:"
for u, p in sorted(_jail.items()):
msg += f"\n {u} ({p})"
ioc_common.logit(
{
"level": "EXCEPTION",
"message": msg
},
_callback=self.callback,
silent=self.silent)
else:
msg = f"jail '{self.jail}' not found!"
ioc_common.logit(
{
"level": "EXCEPTION",
"message": msg
},
_callback=self.callback,
silent=self.silent)
@staticmethod
def __check_jail_type__(_type, uuid):
"""
Return:
tuple: True if error with a message, or False/None
"""
if _type in ("jail", "plugin", "clonejail", "pluginv2"):
return False, None
elif _type == 'basejail':
return (True, "Please run \"iocage migrate\" before trying to"
f" start {uuid}")
elif _type == 'template':
return (True, "Please convert back to a jail before trying to"
f" start {uuid}")
else:
return True, f"{_type} is not a supported jail type."
@staticmethod
def __mount__(path, _type):
if _type == "devfs":
cmd = ["mount", "-t", "devfs", "devfs", path]
else:
cmd = ["mount", "-a", "-F", path]
_, stderr = su.Popen(cmd, stdout=su.PIPE, stderr=su.PIPE).communicate()
return stderr
@staticmethod
def __umount__(path, _type):
if _type == "devfs":
cmd = ["umount", path]
else:
cmd = ["umount", "-a", "-F", path]
_, stderr = su.Popen(cmd, stdout=su.PIPE, stderr=su.PIPE).communicate()
return stderr
def __remove_activate_comment(self, pool):
"""Removes old legacy comment for zpool activation"""
# Check and clean if necessary iocage_legacy way
# to mark a ZFS pool as usable (now replaced by ZFS property)
comment = self.zfs.get(pool.name).properties["comment"]
if comment.value == "iocage":
comment.value = "-"
def activate(self, zpool):
"""Activates the zpool for iocage usage"""
pools = list(self.zfs.pools)
prop = "org.freebsd.ioc:active"
match = False
for pool in pools:
if pool.name == zpool:
if pool.status not in ('UNAVAIL', 'FAULTED', 'SPLIT'):
match = True
else:
ioc_common.logit(
{
'level': 'EXCEPTION',
'message': f'ZFS pool "{zpool}" is '
f'{pool.status}!\nPlease check zpool status '
f'{zpool} for more information.'
},
_callback=self.callback,
silent=self.silent
)
if not match:
ioc_common.logit(
{
"level": "EXCEPTION",
"message": f"ZFS pool '{zpool}' not found!"
},
_callback=self.callback,
silent=self.silent)
for pool in pools:
if pool.status != "UNAVAIL":
ds = self.zfs.get_dataset(pool.name)
else:
continue
if pool.name == zpool:
ds.properties[prop] = libzfs.ZFSUserProperty("yes")
else:
ds.properties[prop] = libzfs.ZFSUserProperty("no")
self.__remove_activate_comment(pool)
def chroot(self, command):
"""Deprecated: Chroots into a jail and runs a command, or the shell."""
ioc_common.logit(
{
"level": "INFO",
"message":
(
"iocage chroot is deprecated. "
"If you need to execute a {} inside the jail use: {}"
).format(*[
["shell", "iocage console"],
["command", "iocage exec"]
][int(len(command) != 0)])
},
_callback=self.callback,
silent=self.silent)
def clean(self, d_type):
"""Destroys all of a specified dataset types."""
if d_type == 'jails':
ioc_clean.IOCClean(silent=self.silent).clean_jails()
ioc_common.logit(
{
'level': 'INFO',
'message': 'All iocage jail datasets have been destroyed.'
},
_callback=self.callback,
silent=self.silent)
elif d_type == 'all':
ioc_clean.IOCClean(silent=self.silent).clean_all()
ioc_common.logit(
{
'level': 'INFO',
'message': 'All iocage datasets have been destroyed.'
},
| |
<filename>strangefish/strangefish.py
import multiprocessing as mp
import os
from collections import defaultdict
from functools import partial
from time import time
from tqdm import tqdm
from typing import Optional, List, Tuple, Set, Callable
import chess.engine
from reconchess import Player, Color, GameHistory, WinReason, Square
from strangefish import defaults
from strangefish.strategies.auxiliary_strategies import contingency_strategy
from strangefish.utilities import board_matches_sense, move_would_happen_on_board, populate_next_board_set
from strangefish.utilities.player_logging import create_main_logger
from strangefish.utilities.timing import Timer
# Parameters for minor bot behaviors
RC_DISABLE_PBAR = os.getenv('RC_DISABLE_PBAR', 'false').lower() == 'true' # Flag to disable the tqdm progress bars
WAIT_LOOP_RATE_LIMIT = 1 # minimum seconds spent looping in self.while_we_wait()
# Parameters for switching to the emergency backup plan
BOARD_SET_LIMIT = 1_000_000 # number of boards in set at which we stop processing and revert to backup plan
TIMEOUT_RESIGN_LIMIT = 10 # number of seconds left at which we stop processing and revert to backup plan
AVG_BOARD_EXP = 33 # number of moves on each board: mean 33, std 10 according to ~320k boards in logs
class StrangeFish(Player):
"""
StrangeFish is the main skeleton of our reconchess-playing bot. Its primary role is to manage the set of all
possible board states based on the given information. Decision making for sense and move choices are handed off to
one of our strategy functions.
StrangeFish alone does not use the Stockfish chess engine, but most of our strategies do use it to make sensing and
moving decisions. In order to run StrangeFish with one of those strategies, you'll need to download Stockfish from
https://stockfishchess.org/download/ and create an environment variable called STOCKFISH_EXECUTABLE that is the path
to the downloaded Stockfish executable.
"""
def __init__(
self,
choose_sense: Callable[[Set[str], bool, List[Square], List[chess.Move], float], Square] = defaults.choose_sense,
choose_move: Callable[[Set[str], bool, List[chess.Move], float], chess.Move] = defaults.choose_move,
while_we_wait: Optional[Callable[[Set[str], bool], None]] = defaults.while_we_wait,
end_game: Optional[Callable[[Set[str]], None]] = defaults.end_game,
pool_size: Optional[int] = 2,
log_to_file=True,
save_debug_history=False,
rc_disable_pbar=RC_DISABLE_PBAR,
):
"""
Set up StrangeFish with decision-making capabilities inherited from another function.
:param choose_sense: A callable produced by the strategy function which chooses and returns the sense square
:param choose_move: A callable produced by the strategy function which chooses and returns the move
:param while_we_wait: An optional callable produced by the strategy function which uses time between our turns
:param end_game: An optional callable produced by the strategy function which (typically) shuts down StockFish
:param pool_size: Number of processes to use when multiprocessing board set expansion and filtering
:param log_to_file: A boolean flag to turn on/off logging to file gameLogs/StrangeFish.log
:param save_debug_history: A boolean flag to turn on/off the generation of a turn-by-turn internal history
:param rc_disable_pbar: A boolean flag to turn on/off the tqdm progress bars
"""
self._choose_sense = choose_sense
self._choose_move = choose_move
self._while_we_wait = while_we_wait
self._end_game = end_game
self.boards: Set[str] = set()
self.next_turn_boards: defaultdict[Set] = defaultdict(set)
self.next_turn_boards_unsorted: Set[str] = set()
self.color = None
self.turn_num = None
self.pool = mp.Pool(pool_size)
self.save_debug_history = save_debug_history
self.debug_memory = []
self.rc_disable_pbar = rc_disable_pbar
self.timeout_resign = None # flag used to skip later turn processes if we have run out of time
self.logger = create_main_logger(log_to_file=log_to_file)
self.logger.debug("A new StrangeFish player was initialized.")
def _game_state_log(self, step_name='-'): # Save game state for advanced replay
if self.save_debug_history:
info = {
'name': __name__,
'color': chess.COLOR_NAMES[self.color],
'turn': self.turn_num,
'step': step_name,
'boards': list(self.boards),
}
self.debug_memory.append(info)
def _emergency_plan(self): # Switch to emergency backup plan
self.boards = set()
self.next_turn_boards = {None: set()}
self._choose_sense, self._choose_move = contingency_strategy()
setattr(self, 'while_we_wait', None)
def get_debug_history(self): # Get possible board states from each turn
return self.debug_memory
def handle_game_start(self, color: Color, board: chess.Board, opponent_name: str):
color_name = chess.COLOR_NAMES[color]
self.logger.info('Starting a new game as %s against %s.', color_name, opponent_name)
self.boards = {board.epd(en_passant='xfen')}
self.color = color
self.turn_num = 0
self.timeout_resign = False
# Save game state for advanced replay
if self.color == chess.BLACK:
self._game_state_log()
self._game_state_log()
def handle_opponent_move_result(self, captured_my_piece: bool, capture_square: Optional[Square]):
self.turn_num += 1
self.logger.debug("Starting turn %d.", self.turn_num)
# Do not "handle_opponent_move_result" if no one has moved yet
if self.turn_num == 1 and self.color == chess.WHITE:
self._game_state_log()
return
if captured_my_piece:
self.logger.debug('Opponent captured my piece at %s.', chess.SQUARE_NAMES[capture_square])
else:
self.logger.debug("Opponent's move was not a capture.")
self.logger.debug('Already calculated scores for %d possible boards, '
'approximately %d x %d = %d boards left to analyse.',
len(self.next_turn_boards[None]), len(self.boards),
AVG_BOARD_EXP, (AVG_BOARD_EXP * len(self.boards)))
# Check for board set over-growth and switch to emergency plan if needed
if not captured_my_piece and \
(len(self.next_turn_boards[None]) + (AVG_BOARD_EXP * len(self.boards))) > BOARD_SET_LIMIT:
self.logger.warning("Board set grew too large, switching to contingency plan. "
"Set size expected to grow to %d; limit is %d",
len(self.next_turn_boards[None]) + (AVG_BOARD_EXP * len(self.boards)),
BOARD_SET_LIMIT)
self._emergency_plan()
# If creation of new board set didn't complete during op's turn (self.boards will not be empty)
if self.boards:
new_board_set = populate_next_board_set(self.boards, self.color, self.pool,
rc_disable_pbar=self.rc_disable_pbar)
for square in new_board_set.keys():
self.next_turn_boards[square] |= new_board_set[square]
# Get this turn's board set from a dictionary keyed by the possible capture squares
self.boards = self.next_turn_boards[capture_square]
self.logger.debug('Finished expanding and filtering the set of possible board states. '
'There are %d possible boards at the start of our turn %d.',
len(self.boards), self.turn_num)
# Save game state for advanced replay
self._game_state_log('post-op-move')
def choose_sense(self, sense_actions: List[Square], move_actions: List[chess.Move], seconds_left: float
) -> Optional[Square]:
# Check if time is up (or if we already changed to the emergency plan)
if not self.timeout_resign and seconds_left <= TIMEOUT_RESIGN_LIMIT:
self.logger.warning(f'Time is nearly up, go to backup plan.')
self._emergency_plan()
self.timeout_resign = True
self.logger.debug('Choosing a sensing square for turn %d with %d boards and %.0f seconds remaining.',
self.turn_num, len(self.boards), seconds_left)
# The option to pass isn't included in the reconchess input
move_actions += [chess.Move.null()]
with Timer(self.logger.debug, 'choosing sense location'):
# Pass the needed information to the decision-making function to choose a sense square
sense_choice = self._choose_sense(self.boards, self.color, sense_actions, move_actions, seconds_left)
self.logger.debug('Chose to sense %s', chess.SQUARE_NAMES[sense_choice] if sense_choice else 'nowhere')
return sense_choice
def handle_sense_result(self, sense_result: List[Tuple[Square, Optional[chess.Piece]]]):
# Filter the possible board set to only boards which would have produced the observed sense result
num_before = len(self.boards)
i = tqdm(
self.boards,
disable=self.rc_disable_pbar,
desc=f'{chess.COLOR_NAMES[self.color]} Filtering {len(self.boards)} boards by sense results',
unit='boards',
)
self.boards = {board_epd for board_epd in
self.pool.imap_unordered(partial(board_matches_sense, sense_result=sense_result), i)
if board_epd is not None}
self.logger.debug('There were %d possible boards before sensing and %d after.', num_before, len(self.boards))
# Save game state for advanced replay
self._game_state_log('post-sense')
def choose_move(self, move_actions: List[chess.Move], seconds_left: float) -> Optional[chess.Move]:
# Currently, move_actions is passed by reference, so if we add the null move here it will be in the list twice
# since we added it in choose_sense also. Instead of removing this line altogether, I'm leaving a check so we
# are prepared in the case that reconchess is updated to pass a copy of the move_actions list instead.
if chess.Move.null() not in move_actions:
move_actions += [chess.Move.null()]
self.logger.debug('Choosing move for turn %d from %d moves over %d boards with %.2f seconds remaining.',
self.turn_num, len(move_actions), len(self.boards), seconds_left)
with Timer(self.logger.debug, 'choosing move'):
# Pass the needed information to the decision-making function to choose a move
move_choice = self._choose_move(self.boards, self.color, move_actions, seconds_left)
self.logger.debug('The chosen move was %s', move_choice)
# reconchess uses None for the null move, so correct the function output if that was our choice
return move_choice if move_choice != chess.Move.null() else None
def handle_move_result(self, requested_move: Optional[chess.Move], taken_move: Optional[chess.Move],
captured_opponent_piece: bool, capture_square: Optional[Square]):
self.logger.debug('The requested move was %s and the taken move was %s.', requested_move, taken_move)
if captured_opponent_piece:
self.logger.debug('Move %s was a capture!', taken_move)
num_boards_before_filtering = len(self.boards)
if requested_move is None:
requested_move = chess.Move.null()
if taken_move is None:
taken_move = chess.Move.null()
# Filter the possible board set to only boards on which the requested move would have resulted in the taken move
i = tqdm(
self.boards,
disable=self.rc_disable_pbar,
desc=f'{chess.COLOR_NAMES[self.color]} Filtering {len(self.boards)} boards by move results',
unit='boards',
)
self.boards = {
board_epd for board_epd in
self.pool.imap_unordered(partial(move_would_happen_on_board, requested_move, taken_move,
captured_opponent_piece, capture_square), i)
if board_epd is not None
}
self.logger.debug('There were %d possible boards before filtering and %d after.',
num_boards_before_filtering, len(self.boards))
# Save game state for advanced replay
self._game_state_log('post-move')
self._game_state_log()
# Re-initialize the set of boards for next turn (filled in while_we_wait and/or handle_opponent_move_result)
self.next_turn_boards = defaultdict(set)
self.next_turn_boards_unsorted = set()
def while_we_wait(self):
start_time = time()
self.logger.debug('Running the "while_we_wait" method. '
| |
invoice.
Note that when a readonly action actually *does* modify the
object, Lino won't "notice" it.
Discussion
Maybe we should change the name `readonly` to `modifying` or
`writing` (and set the default value `False`). Because for the
application developer that looks more natural. Or --maybe better
but probably with even more consequences-- the default value
should be `False`. Because being readonly, for actions, is a kind
of "privilege": they don't get logged, they also exists for
readonly users... It would be more "secure" when the developer
must explicitly "say something" it when granting that privilege.
Another subtlety is the fact that this attribute is used by
:class:`lino.modlib.users.UserAuthored`. For example the
:class:`StartTicketSession
<lino_xl.lib.working.StartTicketSession>` action in :ref:`noi` is
declared "readonly" because we want Workers who are not Triagers
to see this action even if they are not the author (reporter) of a
ticket. In this use case the name should rather be
`requires_authorship`.
"""
opens_a_window = False
"""
Whether this action opens a window. If this is True, the user
interface is responsible for rendering that window.
"""
hide_top_toolbar = False
"""
Used internally if :attr:`opens_a_window` to say whether the
window has a top toolbar.
"""
hide_navigator = False
"""
Used internally if :attr:`opens_a_window` to say whether the
window has a navigator.
"""
show_in_plain = False
"""
Whether this action should be displayed as a button in the toolbar
of a plain html view.
"""
show_in_bbar = True
"""
Whether this action should be displayed as a button in the toolbar
and the context menu of a full grid.
For example the :class:`CheckinVisitor
<lino_xl.lib.reception.models.CheckinVisitor>`,
:class:`ReceiveVisitor
<lino_xl.lib.reception.models.ReceiveVisitor>` and
:class:`CheckoutVisitor
<lino_xl.lib.reception.models.CheckoutVisitor>` actions have this
attribute explicitly set to `False` because otherwise they would be
visible in the toolbar.
"""
show_in_workflow = False
"""
Whether this action should be displayed in the
:attr:`workflow_buttons <lino.core.model.Model.workflow_buttons>`
column. If this is True, then Lino will automatically set
:attr:`custom_handler` to True.
"""
custom_handler = False
"""
Whether this action is implemented as Javascript function call.
This is necessary if you want your action to be callable using an
"action link" (html button).
"""
select_rows = True
"""
True if this action needs an object to act on.
Set this to `False` if this action is a list action, not a row
action.
"""
http_method = 'GET'
"""
HTTP method to use when this action is called using an AJAX call.
"""
preprocessor = 'null' # None
"""
Name of a Javascript function to be invoked on the web client when
this action is called.
"""
window_type = None
"""
On actions that opens_a_window this must be a unique one-letter
string expressing the window type.
Allowed values are:
- None : opens_a_window is False
- 't' : ShowTable
- 'd' : ShowDetail
- 'i' : ShowInsert
This can be used e.g. by a summary view to decide how to present the
summary data (usage example
:meth:`lino.modlib.uploads.AreaUploads.get_table_summary`).
"""
callable_from = "td"
"""
A string that specifies from which :attr:`window_type` this action
is callable. None means that it is only callable from code.
Default value is 'td' which means from both table and detail
(including ShowEmptyTable which is subclass of ShowDetail). But
not callable from ShowInsert.
"""
hide_virtual_fields = False
required_states = None
def __init__(self, label=None, **kw):
if label is not None:
self.label = label
# if self.parameters is not None and self.select_rows:
# self.show_in_bbar = False
# # see ticket #105
for k, v in list(kw.items()):
if not hasattr(self, k):
raise Exception("Invalid action keyword %s" % k)
setattr(self, k, v)
if self.show_in_workflow:
self.custom_handler = True
if self.icon_name:
if self.icon_name not in constants.ICON_NAMES:
raise Exception(
"Unkonwn icon_name '{0}'".format(self.icon_name))
register_params(self)
def __get__(self, instance, owner):
"""
When a model has an action "foo", then getting an attribute
"foo" of a model instance will return an :class:`InstanceAction`.
"""
if instance is None:
return self
return InstanceAction(
self, instance.get_default_table(), instance, owner)
@classmethod
def decorate(cls, *args, **kw):
"""
Return a decorator which turns an instance method on a model or a
class method on an actor into an action of this class.
The decorated method will be installed as the actions's
:meth:`run_from_ui <Action.run_from_ui>` method.
All arguments are forwarded to :meth:`Action.__init__`.
"""
def decorator(fn):
assert not 'required' in kw
# print 20140422, fn.__name__
kw.setdefault('custom_handler', True)
a = cls(*args, **kw)
def wrapped(ar):
obj = ar.selected_rows[0]
return fn(obj, ar)
a.run_from_ui = wrapped
return a
return decorator
def get_required_roles(self, actor):
return actor.required_roles
def is_callable_from(self, caller):
"""
Return `True` if this action makes sense as a button from within
the specified `caller` (an action instance which must have a
:attr:`window_type`). Do not override this method on your
subclass ; rather specify :attr:`callable_from`.
"""
assert caller.window_type is not None
if self.callable_from is None:
return False
return caller.window_type in self.callable_from
# return isinstance(caller, self.callable_from)
def is_window_action(self):
"""Return `True` if this is a "window action" (i.e. which opens a GUI
window on the client before executin).
"""
return self.opens_a_window or (
self.parameters and not self.no_params_window)
def get_status(self, ar, **kw):
if self.parameters is not None:
if self.keep_user_values:
kw.update(field_values={})
else:
defaults = kw.get('field_values', {})
pv = self.params_layout.params_store.pv2dict(
ar, ar.action_param_values, **defaults)
kw.update(field_values=pv)
return kw
def get_chooser_for_field(self, fieldname):
d = getattr(self, '_choosers_dict', {})
return d.get(fieldname, None)
def get_choices_text(self, obj, request, field):
return obj.get_choices_text(request, self, field)
def make_params_layout_handle(self):
return make_params_layout_handle(self)
def get_data_elem(self, name):
# same as in Actor but here it is an instance method
return self.defining_actor.get_data_elem(name)
def get_param_elem(self, name):
# same as in Actor but here it is an instance method
if self.parameters:
return self.parameters.get(name, None)
return None
def get_widget_options(self, name, **options):
# same as in Actor but here it is an instance method
return options
def get_label(self):
"""
Return the `label` of this action, or the `action_name` if the
action has no explicit label.
"""
return self.label or self.action_name
def get_button_label(self, actor):
if actor is None or actor.default_action is None:
return self.label
if self is actor.default_action.action:
return actor.label
else:
return self.button_text or self.label
# since 20140923 return u"%s %s" % (self.label, actor.label)
def full_name(self, actor):
if self.action_name is None:
raise Exception("Tried to full_name() on %r" % self)
#~ return repr(self)
if self.parameters and not self.no_params_window:
return self.defining_actor.actor_id + '.' + self.action_name
return str(actor) + '.' + self.action_name
def get_action_title(self, ar):
return ar.get_title()
def __repr__(self):
if self.label is None:
name = self.action_name
else:
label_repr = repr(str(self.label))
name = "{} ({})".format(self.action_name, label_repr)
# if self.button_text:
# name = repr(str(self.button_text)) + " " + name
return "<{}.{} {}>".format(
self.__class__.__module__,
self.__class__.__name__,
name)
def __str__(self):
# return force_text(self.label)
# return str(self.get_label())
return str(self.get_label())
def unused__str__(self):
raise Exception("20121003 Must use full_name(actor)")
if self.defining_actor is None:
return repr(self)
if self.action_name is None:
return repr(self)
return str(self.defining_actor) + ':' + self.action_name
#~ def set_permissions(self,*args,**kw)
#~ self.permission = perms.factory(*args,**kw)
def attach_to_workflow(self, wf, name):
if self.action_name is not None:
assert self.action_name == name
self.action_name = name
self.defining_actor = wf
setup_params_choosers(self)
def attach_to_actor(self, owner, name):
"""
Called once per actor and per action on startup before a
:class:`BoundAction` instance is created. If this returns
False, then the action won't be attached to the given actor.
The owner is the actor which "defines" the action, i.e. uses
that instance for the first time. Subclasses of the owner may
re-use the same instance without becoming the owner.
"""
# if not actor.editable and not self.readonly:
# return False
if self.defining_actor is not None:
# already defined by another actor
return True
self.defining_actor = owner
# if self.label is None:
# self.label = name
if self.action_name is not None:
return True
# if name == self.action_name:
# return True
# raise Exception(
# "tried to attach named action %s.%s as %s" %
# (actor, self.action_name, name))
self.action_name = name
setup_params_choosers(self)
# setup_params_choosers(self.__class__)
return True
def get_action_permission(self, ar, obj, state):
"""Return (True or False) whether the given :class:`ActionRequest
<lino.core.requests.BaseRequest>` `ar` should get permission
to run on the given Model instance `obj` (which is | |
<reponame>AMANKANOJIYA/Numerical_Analysis<filename>Numerical_Analysis_Aman/Numerical_Analysis.py
"""
Auther : <NAME>
--------------------------------------------------------------------------
| Numerical-ANALYSIS (Nuerical Methods) |
--------------------------------------------------------------------------
* Creater Message To all users --
==================================
This Liberary Is based On one of the most fundamental Numerical Methods Topics
.It contain a set of calculations and direct formula based functions which can
be used In all types of projects as well as to solve the complex problem ,
I am very Much glad to Get contribution on github If any one intrested can visit
my profile or contact me through my email "<EMAIL>"
This project have no restriction evry one can use it as there own and create there
own version. if you like you can give me a small credit It will boost me to create
more such projects and motivate to contribute more in open source
- Thank You
"""
# IMPORTS
import numpy as np
import math
# ERROR HANDLING
def Error_Handler(func):
def inner (*args):
try:
return func(*args)
except Exception as e:
print(f"{type(e).__name__}\n==============\n {e} \n in {func.__name__} function")
return "error"
return inner
# MAIN CLASS START
class Numerical_Analysis:
"""Numerical analysis, area of mathematics and computer science that creates,
analyzes, and implements algorithms for obtaining numerical solutions to
problems involving continuous variables. Such problems arise throughout the
natural sciences, social sciences, engineering, medicine, and business.
In this Module We have Three Methods
---> Eular mathod
---> Eular Modified method
---> Runga Kutta method
>=========================================================<
| Created By --- <NAME> |
>=========================================================<
"""
@Error_Handler
def __init__(self,x_0,y_0,x_given,gap,function):
self.x_0=x_0
self.y_0=y_0
self.x_given=x_given
self.gap=gap
self.function=str(function)
@Error_Handler
def functionToWork(self,x,y):
"""
This Help to generate The Function Output Given as per queestion
you can change this value for the code
may be in next version you can see an another update so that
the function can also be generated by the given user function input
functionToWork(x,y)
this function takes 2 input
"""
return eval(self.function)
@Error_Handler
def EularModified(self,itration=None):
"""
>============ Eular Modified Method =================<
The objective in numerical methods is, as always, to achieve the most accurate (and reliable!)
result with the least effort.
For integrating the initial value problem (3)
the effort is usually measured by the number of times the function
$f(t,y)$ must be evaluated in stepping from $a$ to $b$. As we will see,
a simple improvement doubles the number of function evaluations per step,
but yields a second order method - a winning strategy.
EularModified()
call this function to get Your Values
You can also pass Number Of itrations You Want to Perform
"""
itration=4 if itration==None else itration
create_x=[self.x_0+i*self.gap for i in range(2*itration) if (self.x_0+i*self.gap)<=self.x_given]
create_y=[self.y_0]
for i in range(0,len(create_x)-1):
y_get=create_y[-1]+(self.gap*self.functionToWork(create_x[i],create_y[-1]))
y_confirm=create_y[-1]+((self.gap/2)*(self.functionToWork(create_x[i],create_y[-1])+self.functionToWork(create_x[i]+self.gap,y_get)))
create_y.append(y_confirm)
return create_x,create_y
@Error_Handler
def Eular(self,itration=None):
"""
>============ Eular Method =================<
The Euler method is a first-order method,
which means that the local error (error per step) is
proportional to the square of the step size,
and the global error (error at a given time) is proportional
to the step size.
Eular()
call this function to get Your Values
You can also pass Number Of itrations You Want to Perform
"""
itration=4 if itration==None else itration
create_x=[self.x_0+i*self.gap for i in range(2*itration) if (self.x_0+i*self.gap)<=self.x_given]
create_y=[self.y_0]
for i in range(0,len(create_x)-1):
y_get=create_y[-1]+(self.gap*self.functionToWork(create_x[i],create_y[-1]))
create_y.append(y_get)
return create_x,create_y
@Error_Handler
def RungaKutta(self,itration=None):
"""
>============ Eular Modified Method =================<
listen) RUUNG-ə-KUUT-tah) are a family of implicit and explicit
iterative methods, which include the well-known routine called the
Euler Method, used in temporal discretization for the approximate
solutions of ordinary differential equations.
RungaKutta()
call this function to get Your Values
You can also pass Number Of itrations You Want to Perform
"""
itration=4 if itration==None else itration
create_x=[self.x_0+i*self.gap for i in range(2*itration) if (self.x_0+i*self.gap)<=self.x_given]
create_y=[self.y_0]
for i in range(0,len(create_x)-1):
k_1 = self.gap * self.functionToWork(create_x[i],create_y[-1])
k_2 = self.gap * self.functionToWork(create_x[i]+(self.gap/2),create_y[-1]+(k_1/2))
k_3 = self.gap * self.functionToWork(create_x[i]+(self.gap/2),create_y[-1]+(k_2/2))
k_4 = self.gap * self.functionToWork(create_x[i]+self.gap,create_y[-1]+k_3)
k = (k_1+(2*k_2)+(2*k_3)+k_4)/6
yOut = create_y[-1] + k
create_y.append(yOut)
return create_x,create_y
class Numerical_Integration:
"""
Numerical integration methods can generally be described as combining
evaluations of the integrand to get an approximation to the integral.
The integrand is evaluated at a finite set of points called integration
points and a weighted sum of these values is used to approximate
the integral.
In this Module We have Three Methods
---> Trapazoid
---> Simpson 1/3
---> Simpson 3/8
>=========================================================<
| Created By --- <NAME> |
>=========================================================<
"""
@Error_Handler
def __init__(self,lower,upper,function):
self.x,self.y=eval(str(lower)),eval(str(upper))
self.function=str(function)
@Error_Handler
def Trapazoid(self,itration=2):
"""
>============ Trapazoid =================<
Trapezoidal Rule is a rule that evaluates the area under
the curves by dividing the total area into smaller trapezoids
rather than using rectangles. This integration works by approximating
the region under the graph of a function as a trapezoid,
and it calculates the area.
Trapazoid()
call this function to get Your Values
"""
def functionToWork(x):
"""
This Help to generate The Function Output Given as per queestion
you can change this value for the code
functionToWork(x)
this function takes 1 input
"""
return eval(self.function)
gap=(self.y-self.x)/(itration)
create_x=[self.x+(i*gap) for i in range(0,itration+1)]
create_y=[functionToWork(i) for i in create_x]
formula=(gap/2)*((create_y[0]+create_y[-1])+2*sum([create_y[i] for i in range(1,len(create_y)-1)]))
return formula
@Error_Handler
def Simpson_38(self,itration=2):
"""
>============ Simpson 1/3 =================<
The trapezoidal rule was based on approximating the
integrand by a first order polynomial, and then integrating
the polynomial over interval of integration. Simpson’s 1/3 rule is an
extension of Trapezoidal rule where the integrand is approximated by a
second order polynomial.
Simpson_13()
call this function to get Your Values
"""
def functionToWork(x):
"""
This Help to generate The Function Output Given as per queestion
you can change this value for the code
functionToWork(x)
this function takes 1 input
"""
return eval(self.function)
gap=(self.y-self.x)/(itration)
create_x=[self.x+(i*gap) for i in range(0,itration+1)]
create_y=[functionToWork(i) for i in create_x]
formula=((3*gap)/8)*((create_y[0]+create_y[-1])+3*(sum([create_y[i] for i in range(1,len(create_y)) if (i)%3!=0]))+2*(sum([create_y[i] for i in range(2,len(create_y)-1,3)])))
return formula
@Error_Handler
def Simpson_13(self,itration=2):
"""
>============ Simpson 3/8 =================<
Simpson's 3/8 rule, also called Simpson's second rule
requests one more function evaluation inside the integration range,
and is exact if f is a polynomial up to cubic degree.
Simpson_38()
call this function to get Your Values
"""
def functionToWork(x):
"""
This Help to generate The Function Output Given as per queestion
you can change this value for the code
functionToWork(x)
this function takes 1 input
"""
return eval(self.function)
gap=(self.y-self.x)/(itration)
create_x=[self.x+(i*gap) for i in range(0,itration+1)]
create_y=[functionToWork(i) for i in create_x]
formula=(gap/3)*((create_y[0]+create_y[-1])+4*(sum([create_y[i] for i in range(1,len(create_y)-1,2)]))+2*(sum([create_y[i] for i in range(2,len(create_y)-1,2)])))
return formula
class Numerical_Interpolation:
"""
In the mathematical field of numerical analysis,
interpolation is a type of estimation, a method of constructing
new data points within the range of a discrete set of known data points.
In this Module We have Three Methods
---> Langrangian
---> Newton Divided Differences
---> Newton Forward
---> Newton Backward
>=========================================================<
| Created By --- <NAME> |
>=========================================================<
"""
@Error_Handler
def __init__(self,x_list,y_list,find_value):
self.x_l=x_list
self.y_l=y_list
self.find_val=find_value
@Error_Handler
def Langrangian(self):
"""
In numerical analysis, Lagrange polynomials are used for polynomial interpolation.
For a given set of points (x_j,y_j) with no two x_j values equal,
the Lagrange polynomial is the polynomial of lowest degree that assumes at each
value x_j the corresponding value y_j, so that the functions coincide at each point.
Langrangian()
call this function to get Your Values
"""
function=0
for i in range(len(self.x_l)):
list_up=[self.find_val-j for j in self.x_l if j!=self.x_l[i]]
list_down=[self.x_l[i]-j for j in self.x_l if j!=self.x_l[i]]
function=function+self.y_l[i]*(np.prod(list_up)/np.prod(list_down))
return function
@Error_Handler
def Newton_Divided(self):
"""
In mathematics, divided differences is an algorithm,
historically used for computing tables of logarithms and trigonometric functions.
... Divided differences is a recursive division process.
The method can be used to calculate the coefficients in the
interpolation polynomial in the Newton form.
Newton_Divided()
call this function to get Your Values
"""
length=len(self.x_l)
overall=[self.y_l]
x_gap=1
def divisor(x_0,x_1,y_0,y_1):
y=(y_1-y_0)/(x_1-x_0)
return y
for i in range(length,1,-1):
local=[]
y_list_itrate=overall[-1]
for j in | |
import datetime
import os
import struct
from sys import version_info
import spats_shape_seq
from spats_shape_seq.mask import match_mask_optimized, Mask
# not currently used in spats, but potentially useful for tools
class FastqRecord(object):
def __init__(self):
self.recordNumber = 0
self.reset()
def reset(self):
self.identifier = None
self.tag = None
self.sequence = None
self.identifier2 = None
self.quality = None
def read(self, infile):
first = infile.readline()
if not first:
self.reset()
return False
self.parse([ first, infile.readline(), infile.readline(), infile.readline() ])
return True
def parse(self, lines):
self.identifier, self.tag = lines[0].lstrip('@').rstrip('\r\n').split(' ')
self.sequence = lines[1].rstrip('\r\n')
self.identifier2 = lines[2].rstrip('\r\n')
self.quality = lines[3].rstrip('\r\n')
def write(self, outfile, skiplen = 0):
outfile.write("@{} {}\n".format(self.identifier, self.tag))
for line in [ self.sequence[skiplen:], self.identifier2, self.quality[skiplen:] ]:
outfile.write(line)
outfile.write('\n')
def reverse_complement(self):
self.sequence = reverse_complement(self.sequence)
self.quality = self.quality[::-1]
class FastFastqParser(object):
def __init__(self, r1_path, r2_path, parse_quality = False):
self.r1_path = r1_path
self.r2_path = r2_path
self.parse_quality = parse_quality
def pair_length(self):
with open(self.r1_path, 'rt') as r1_in:
with open(self.r2_path, 'rt') as r2_in:
r1_in.readline()
r1_first = r1_in.readline().strip('\r\n')
r2_in.readline()
r2_first = r2_in.readline().strip('\r\n')
pair_length = len(r1_first)
if pair_length != len(r2_first):
print("Warning: pair length mismatch in R1 vs R2: {} / {}".format(pair_length, len(r2_first)))
return -1
return pair_length
def appx_number_of_pairs(self):
with open(self.r1_path, 'rt') as r1_in:
# the +1 is since first records tend to be short, and we'd rather underestimate than overestimate
frag_len = 1 + len(r1_in.readline()) + len(r1_in.readline()) + len(r1_in.readline()) + len(r1_in.readline())
return int(float(os.path.getsize(self.r1_path)) / float(frag_len))
def __enter__(self):
self.r1_in = open(self.r1_path, 'rt')
self.r2_in = open(self.r2_path, 'rt')
self.r1_iter = iter(self.r1_in)
self.r2_iter = iter(self.r2_in)
return self
def __exit__(self, type, value, traceback):
self.r1_in.close()
self.r2_in.close()
self.r1_in = None
self.r2_in = None
self.r1_iter = None
self.r2_iter = None
def iterator(self, batch_size):
while True:
batch = self.iterator_read(batch_size)
if batch:
yield batch
else:
return
# kept separate from other read fns for speed
def iterator_read(self, batch_size):
pairs = []
r1_iter = self.r1_iter
r2_iter = self.r2_iter
count = 0
include_quality = self.parse_quality
try:
while count < batch_size:
R1_id = next(r1_iter) #.split(' ')[0]
R1_seq = next(r1_iter).rstrip('\n\r')
next(r1_iter)
R1_q = next(r1_iter)
R2_id = next(r2_iter) #.split(' ')[0]
R2_seq = next(r2_iter).rstrip('\n\r')
next(r2_iter)
R2_q = next(r2_iter)
if 0 == count:
# good enough to just spot-check this, and improve parsing speed by skipping most of the time
R1_id = R1_id.split(' ')[0]
R2_id = R2_id.split(' ')[0]
if R1_id != R2_id:
raise Exception("Malformed input files, id mismatch: {} != {}".format(R1_id, R2_id))
if include_quality:
pairs.append((1, R1_seq, R2_seq, R1_id.split(' ')[0], R1_q.rstrip('\n\r'), R2_q.rstrip('\n\r')))
else:
pairs.append((1, R1_seq, R2_seq, str(count)))
count += 1
except StopIteration:
pass
return pairs
# returns a list of (id, r1, r2), of length <= max_num_pairs, len<max_num_pairs iff eof
def read(self, max_num_pairs):
pairs = []
count = 0
r1_iter = self.r1_iter
r2_iter = self.r2_iter
try:
while count < max_num_pairs:
R1_id = next(r1_iter).split(' ')[0]
R1_seq = next(r1_iter).rstrip('\n\r')
next(r1_iter)
next(r1_iter)
R2_id = next(r2_iter).split(' ')[0]
R2_seq = next(r2_iter).rstrip('\n\r')
next(r2_iter)
next(r2_iter)
if R1_id != R2_id:
raise Exception("Malformed input files, id mismatch: {} != {}".format(R1_id, R2_id))
pairs.append((R1_id.lstrip('@'), R1_seq, R2_seq))
count += 1
except StopIteration:
pass
return pairs, count
# returns a list of (numeric_id, r1, r2, original_id), of length <= max_num_pairs, len<max_num_pairs iff eof
# separate function in order to keep read() optimized for standard case
def read_nomask(self, max_num_pairs):
pairs = []
count = 0
r1_iter = self.r1_iter
r2_iter = self.r2_iter
try:
while count < max_num_pairs:
R1_numeric_id = int(next(r1_iter).strip('@\n\r'))
R1_seq = next(r1_iter).rstrip('\n\r')
R1_original_id = next(r1_iter).strip('+\n\r')
next(r1_iter)
R2_numeric_id = int(next(r2_iter).strip('@\n\r'))
R2_seq = next(r2_iter).rstrip('\n\r')
R2_original_id = next(r2_iter).strip('+\n\r')
next(r2_iter)
if R1_numeric_id != R2_numeric_id or R1_original_id != R2_original_id:
raise Exception("Malformed NOMASK files, id mismatch: ({},{}) != ({},{})".format(R1_numeric_id, R1_original_id, R2_numeric_id, R2_original_id))
pairs.append((R1_numeric_id, R1_seq, R2_seq, R1_original_id))
count += 1
except StopIteration:
pass
return pairs, count
class FastqWriter(object):
def __init__(self, r1_path, r2_path):
self.r1_path = r1_path
self.r2_path = r2_path
self.r1_out = open(self.r1_path, 'w')
self.r2_out = open(self.r2_path, 'w')
def _write_seq(self, out, pair, seq, trim_handle):
out.write('{}{}\n{}\n+\n{}\n'.format('' if pair.identifier.startswith('@') else '@', pair.identifier,
seq.original_seq[pair.mask.length():] if trim_handle else seq.original_seq,
seq.quality[pair.mask.length():] if trim_handle else seq.quality))
def write(self, pair):
self._write_seq(self.r1_out, pair, pair.r1, trim_handle = True)
self._write_seq(self.r2_out, pair, pair.r2, trim_handle = False)
def close(self):
if self.r1_out:
self.r1_out.close()
self.r2_out.close()
self.r1_out = None
self.r2_out = None
class _MaskMatcher:
def __init__(self, masks):
if len(masks) == 2 and 'RRRY' in masks and 'YYYR' in masks:
self.match_mask = match_mask_optimized
else:
self.match_mask = self._match_mask
self.masks = []
for mask in masks:
self.masks.append(Mask(mask))
def _match_mask(self, seq):
for mask in self.masks:
if mask.matches(seq):
return mask.chars
return None
def fastq_handle_filter(r1_path, r2_path, masks = [ 'RRRY', 'YYYR' ], strip_mask = False, outpath = '.', counters = None):
# creates 4 files, one for each mask for r1_path and r2_path.
# output filenames are the originals with the mask name prefixed (nukes them if they exist already)
# returns the list of output files
result = None if len(masks) == 0 else []
r1of = {}
r2of = {}
def _channelFilename(combined_path, handle, outpath):
comboname = os.path.basename(combined_path)
if comboname.endswith(".tmp"):
comboname = comboname[:-4]
return os.path.abspath(os.path.join(outpath, handle + '-' + comboname))
for mask in masks:
if len(mask) == 0:
raise Exception("handle_filter cannot be used with a handle/mask length of 0.")
r1fpath = _channelFilename(r1_path, mask, outpath)
r1of[mask] = open(r1fpath, 'w+')
result.append(r1fpath)
r2fpath = _channelFilename(r2_path, mask, outpath)
r2of[mask] = open(r2fpath, 'w+')
result.append(r2fpath)
mm = _MaskMatcher(masks)
try:
fqr1 = FastqRecord()
fqr2 = FastqRecord()
with open(r1_path, 'r') as r1if, open(r2_path, 'r') as r2if:
while fqr1.read(r1if) and fqr2.read(r2if):
mask = mm.match_mask(fqr1.sequence)
if mask:
striplen = len(mask) if strip_mask else 0
fqr1.write(r1of[mask], striplen)
fqr2.write(r2of[mask])
if counters:
counters.increment_key(mask)
elif counters:
counters.increment_key('no_mask')
finally:
for mask in masks:
r1of[mask].close()
r2of[mask].close()
return result
def fasta_parse(target_path):
pairs = []
with open(target_path, 'rt') as infile:
def nextline():
while True:
l = infile.readline()
if len(l) == 0:
return l
l = l.strip('>\r\n')
if 0 < len(l):
return l
while True:
name = nextline()
if not name:
break
seq = nextline()
if name and seq:
pairs.append((name.strip(), seq))
return pairs
class SamRecord(object):
def parse(self, line):
bits = line.split("\t")
if len(bits) < 6:
self.identifier = None
return
self.identifier = bits[0]
self.flags = int(bits[1])
self.target_name = bits[2]
self.left = int(bits[3]) - 1 # TODO: subtract 1 is required, explain
self.quality = int(bits[4])
if self.target_name == '*':
self.left = -1
self.target_name = None
self.right = -1
return
lengthPart = bits[5][:-1]
self.length = int(lengthPart if len(lengthPart) > 0 else 0)
if self.length > 0 and "M" != bits[5][-1]:
raise Exception("Expected M on SAM length field, got: {}".format(bits[5]))
# rest of bits are not used afaict
self.right = self.left + self.length
def dump(self):
return '\t'.join([self.identifier, str(self.flags), self.target_name or '*', str(self.left + 1), self.quality,
'{}M'.format(self.right - self.left) if self.target_name else '0', '=' if self.target_name else '*',
# TODO: last three vals
'?', '?', 'SEQ'])
class SamWriter(object):
def __init__(self, path, targets, write_failures = True):
self.write_failures = write_failures
self.path = path
self.sam_out = open(self.path, 'w')
self._write_header(targets)
def _write_header(self, targets):
self.sam_out.write('@HD VN:1.0 SO:unsorted\n')
for t in targets:
self.sam_out.write('@SQ SN:{} LN:{}\n'.format(t.name, t.n))
self.sam_out.write('@PG ID:spats_shape_seq VN:{} CL:"spats_tool run"\n'.format(spats_shape_seq._VERSION))
def write(self, pair):
qname = pair.identifier
r2_seq = pair.r2.subsequence
r1_seq = pair.r1.reverse_complement
r2_q = pair.r2.subquality
r1_q = pair.r1.reverse_quality
if pair.failure:
r2_flag = 141
r1_flag = 77
rname = r1_cigar = r2_cigar = rnext = '*'
r2_pos = r1_pos = mapq = r1_pnext = r2_pnext = r2_tlen = r1_tlen = 0
alignment = 'XM:i:0'
if self.write_failures:
alignment += ' f:Z:{}'.format(pair.failure)
r1_align = r2_align = alignment
else:
r2_flag = 163
r1_flag = 83
rname = pair.target.name
r2_pos = pair.r2.left + 1
r1_pos = pair.r1.left + 1
mapq = 255
r2_cigar = '{}M'.format(len(r2_seq))
r1_cigar = '{}M'.format(len(r1_seq))
rnext = '='
r2_pnext = pair.r1.left + 1
r1_pnext = pair.r2.left + 1
r2_tlen = pair.length
r1_tlen = 0 - pair.length
r2_align = 'XA:i:0 MD:Z:{} NM:i:0'.format(len(r2_seq))
r1_align = 'XA:i:0 MD:Z:{} NM:i:0'.format(len(r1_seq))
for row in ( [ qname, r2_flag, rname, r2_pos, mapq, r2_cigar, rnext, r2_pnext, r2_tlen, r2_seq, r2_q, r2_align ],
[ qname, r1_flag, rname, r1_pos, mapq, r1_cigar, rnext, r1_pnext, r1_tlen, r1_seq, r1_q, r1_align ] ):
self.sam_out.write('\t'.join([ str(x) for x in row ]))
self.sam_out.write('\n')
def close(self):
if self.sam_out:
self.sam_out.close()
self.sam_out = None
class SamParser(object):
def __init__(self, path, target_map):
self.sam_path = path
self.target_map = target_map
def __enter__(self):
self.sam_in = open(self.sam_path, 'rt')
self.sam_iter = iter(self.sam_in)
return self
def __exit__(self, type, value, traceback):
self.sam_in.close()
self.sam_in = None
self.sam_iter = None
# returns a list of (target, site, end, mask, numeric_id) -- the 'mask' is convenience to | |
is None or len(points)<1 :
self._cpoints = zeros((0,2))
else :
self._cpoints = points
self._update()
@property
def dpoints(self):
u"""
Les points discretises, sont recalculés
- si _update() a été appelé (_dpoints a été supprimé) ou bien
- si self.precision a changé
Si on veut des dpoints aux abscisses T=(t1,...tn), on appelle directement
X=self(T) plutôt que X=self.dpoints
:return : ndarray((self.presision,2), les points de discrétisation
"""
if not hasattr(self, '_dpoints') : #self._dpoints a été supprimé
T = linspace(0.0, 1.0, self.nbpd)
# si sx et sy n'existent pas, self(T) les recalcule
self._dpoints = self(T)
try : del self._dac
except AttributeError : pass
return self._dpoints
# @property
# def dac(self):
# u"""
# les abscisses curvilignes normalisees du POLYGONE dpoints
# """
# if not hasattr(self, '_dac') :
# self._dac = absCurv(self.dpoints, True)
# return self._dac
@property
def epoints(self):
u"""points échantillonnés, sous forme ndarray((n,2))"""
if not hasattr(self,'_epoints') :
self._epoints = self(self.tech)
return self._epoints
@property
def tech(self):
u"""Les parametres T de l'echantillonnage"""
if not hasattr(self, '_tech') :
self.echantillonner()
return self._tech
@property
def sx(self):
u"""La spline en x, recalculée si _sx n'existe pas"""
if not hasattr(self, '_sx') or self._sx is None:
if len(self._cpoints)>=2 :
_, self._sx, self._sy = computeSpline(self.cpoints, self.methode)
else : self._sx, self._sy = None, None
return self._sx
@property
def sy(self):
u"""La spline en y, recalculée si _sy n'existe pas"""
if not hasattr(self, '_sy') or self._sy is None:
if len(self._cpoints)>=2 :
_, self._sx, self._sy = computeSpline(self.cpoints, self.methode)
else : self._sx, self._sy = None, None
return self._sy
@property
def nbpe(self):return self._nbpe
@nbpe.setter
def nbpe(self,ne):
if ne==self._nbpe :
return
else :
self._nbpe = ne
try : del self._tech
except AttributeError : pass
try : del self._epoints
except AttributeError : pass
@property
def nbpd(self):
"""nb points discrétisation => self._dpoints"""
return self._nbpd
@nbpd.setter
def nbpd(self,nd):
if nd==self._nbpd :
return
else :
self._nbpd = nd
try : del self._dpoints
except AttributeError : pass
try : del self._dac
except AttributeError : pass
precision=nbpd
@property
def knots(self):
u"""Ce sont les 'knots' au sens des splines sx et sy,
i.e. les pseudo-abscisses curvilignes de self
i.e. les T tels que self.sx(T[i]),self.sy(T[i]) = _cpoints[k]
i.e. les abs. curv. des points du POLYGONE self._cpoints
Ils sont stockés dans sx.XXX et sy.XXX"""
try : return self.sx.x#si methode = (cubic','xx')
except AttributeError as msg1:
# debug(str(msg1))
try :return self.sx._data[0]
except AttributeError as msg2 :
# debug(str(msg2))
try : return self.sx.get_knots()
except AttributeError as msg3 :
# debug(str(msg3))
return zeros((0,))
def _update(self):
u"""
Suppression de tous les attributs self._xxx volatiles, ils sont
recalculés à la demande i.e. quand on appelle self.xxx
"""
u'''
Est appelé à chaque modification
- (géométrique) d'un point de contrôle de la spline
- ou bien du PolygonF de base
- ou bien de methode spline (cubic, IUS, US,...), ou des dérivées aux extremites
- ou bien de mode d'échantillonage
ultra privée, ne pas l'appeler de l'extérieur.
'''
# try : del self._qcpolygon
# except AttributeError : pass
# try : del self._qepolygon
# except AttributeError : pass
try : del self._epoints
except AttributeError : pass
try : del self._dpoints
except AttributeError : pass
try : del self._height
except AttributeError : pass
try : del self._width
except AttributeError : pass
try : del self._longueur
except AttributeError : pass
try : del self._dac
except AttributeError : pass
try : del self._sx
except AttributeError : pass
try : del self._sy
except AttributeError : pass
try : del self._width
except AttributeError : pass
try : del self._height
except AttributeError : pass
def isClosed(self, eps=0.0):
try : return dist2(self[0], self[-1]) <= eps*eps
except IndexError : return None
def close(self):
"""
Fermeture de self._cpoints.
Appelé automatiquement si self.methode[0] = 'periodic'
:return : int,
# 0 si rien touché,
# 1 si ajustement mineur (<= self.eps) de self[-1]
# 2 si ajout de point self[-1]=self[0]
"""
if self.isClosed(0) :
msg = u" cpoints deja ferme"
elif dist2(self[0],self[-1]) <= self.eps:
#presque fermé, on n'ajoute pas de point
p1, p2 = self[0], self[-1]
self._cpoints[-1] = self._cpoints[0]
msg = u""" les points self[0]=%s et self[-1]=%s sont presque identiques.
Ils sont à une distance d=%.3g
=> self[-1] est (legerement) modifie.
"""%(p1,p2, dist(p1,p2))
else :
#On ajoute un point
p1, p2 = self[0], self[-1]
self._cpoint.append[self[0]]
msg = u""" Les points self[0]=%s et self[-1]=%s sont distincts.
Ils sont à une distance d=%.3g
=> le point self[0] a ete rajoute en fin de spline.
"""%(p1, p2, dist(p1,p2))
debug(msg)
# def echantillonner(self):
# u"""
# Méthode usuelle d'échantillonnage.
# Utiliser cette méthode pour échantillonner avec les paramètres de self
# et conserver le résultat dans self.epoints, self.tech.
# :utilisation :
# >>> S.mode='courbure'
# >>> S.nbpe=27
# >>> e = S.echantillonner()
# >>> #e et S.epoints contiennent les points echantillonnés
# >>> #les parametres des points d'echantillonnage sont dans S.tech
# >>> T = S.echantillonner(True)
# >>> # T contient les parametres des points d'echantillonnage
# >>> # S.epoints contient les points echantillonnés
# """
# return self._echantillonner(self.nbpe, self.mode, 0, 1)
def echantillonner(self, nbp=0, mode=None, ta=0, tb=1):
u"""
répartition (échantillonnage) de nbp points sur la spline self,
entre les abscisses ta et tb, suivant le mode précisé par 'mode'
*supprime self._epoints et modifie self._tech*
:return : les temps T=ndarray(shape=(n,)) des points échantillonnés,
ie les T[i] tels que self.epoints[i]=self(t[i])
:param mode : str ou unicode ou par defaut None
- si mode==None => mode = self.mode
- si mode is not None => self.mode = mode
- si mode=='linear', les points d'échantillonnage sont régulièrement
répartis tout au long de la spline
- si mode=='rayon' ou 'courbure' la densité de points est
approximativement proportionnelle à la courbure.
- si mode=='cpoints' : retourne les temps des points de contrôle.
- si mode=='telkel' : ta doit être un tableau de temps ndarray((n,))
des points echantillonnés retourne self(ta).
:param nbp : int, nombre de points d'échantillonnage.
- Si mode='telkel' ou 'cpoints', ce paramètre est inutile.
- Dans tous les autres cas il est indispensable, nbp>0
:param ta : float dans [0,1] ou ndarray((n,1)) comme retourné par
self.absCurv()
- facultatif, ta=0 par defaut
- l'abs. curv. du premier point d'échantillonnage
:param tb : float dans [0,1] ou ndarray((n,1)) comme retourné par
self.absCurv()
- facultatif, tb=1 par defaut
- l'abs. curv. du dernier point d'échantillonnage
"""
if mode is None : mode = self._mode
else : self._mode = mode
if nbp==0 : nbp = self._nbpe
else : self._nbpe = nbp
#suppression _epoints qui sera recalculé à partir de _tech calculé ici
try : del self._epoints
except AttributeError : pass
if mode == 'cos' :
#Points serrés au debut et à la fin, lâches au milieu
C = cos(linspace(pi, 2*pi, nbp))
T = 0.5*(1+C)
if (ta, tb) == (0,1) : self._tech = T
else : self._tech = ta + (tb-ta)*T
elif mode == 'x3' :
#Points serrés au milieu, lâches au debut et à la fin
T = linspace(-1, 1, nbp)**3
T = 0.5*(1+T)
if (ta, tb) == (0,1) : self._tech = T
else : self._tech = ta + (tb-ta)*T
elif mode in ('linear', 'lineaire', 'lin') :
self._tech = linspace(ta, tb, nbp)#y compris ta et tb
elif mode in ('telkel', ) :
self._tech = ta
return self._tech
# raise NotImplementedError, u"'%s' mode d'echantillonnage inutile, retourne ta"%mode
elif mode in ('cpoints') :
self._tech = self.knots.copy()
return self._tech
# raise NotImplementedError, u"'%s' mode d'echantillonnage obsolete, utiliser self.knots"%mode
elif mode in ('rayon', 'courbure') :
u"""On ne touche plus à RIEN !!!
Calcul des points d'échantillonnage, de sorte que la densité de points
soit localement proportionnelle à la courbure
- la "COURBURE ABSOLUE" en un point t est ca(t) = sqrt(abs(self.courbure(t)))
On la calcule en N points d'une discrétisation fine T, elle se trouve dans CA
- La SINUOSITÉ sur un micro-intervalle [t, t+dt] est l'intégrale de t à t+dt de ca(s)ds
On la calcule en chaque micro-intervalle de la discrétisation fine, elle est dans S
La SINUOSITÉ TOTALE est la sinuosité sur [a,b], c'est la somme des sinuosités des micro-intervalles
- On partage l'intervalle [ta,tb] en nbp-1 intervalles (de largeur variable)
ta=te[0]<te[1]<te[2]<...<te[nbp-1]=tb;
tels que sur chaque intervalle j, la sinuosité sj soit constante=s0=SINUOSITÉ | |
& 7, self.pmm[5] >> 3 & 7, self.pmm[5] >> 6
timeout = 302.1E-6 * ((b + 1) * len(block_list) + a + 1) * 4**e
data = bytearray([
len(service_list)]) \
+ b''.join([sc.pack() for sc in service_list]) \
+ bytearray([len(block_list)]) \
+ b''.join([bc.pack() for bc in block_list])
log.debug("read w/o encryption service/block list: {0} / {1}".format(
' '.join([hexlify(sc.pack()).decode() for sc in service_list]),
' '.join([hexlify(bc.pack()).decode() for bc in block_list])))
data = self.send_cmd_recv_rsp(0x06, data, timeout)
if len(data) != 1 + len(block_list) * 16:
log.debug("insufficient data received from tag")
raise Type3TagCommandError(DATA_SIZE_ERROR)
return data[1:]
def read_from_ndef_service(self, *blocks):
"""Read block data from an NDEF compatible tag.
This is a convinience method to read block data from a tag
that has system code 0x12FC (NDEF). For other tags this method
simply returns :const:`None`. All arguments are block numbers
to read. To actually pass a list of block numbers requires
unpacking. The following example calls would have the same
effect of reading 32 byte data from from blocks 1 and 8.::
data = tag.read_from_ndef_service(1, 8)
data = tag.read_from_ndef_service(*list(1, 8))
Command execution errors raise :exc:`~nfc.tag.TagCommandError`.
"""
if self.sys == 0x12FC:
sc_list = [ServiceCode(0, 0b001011)]
bc_list = [BlockCode(n) for n in blocks]
return self.read_without_encryption(sc_list, bc_list)
def write_without_encryption(self, service_list, block_list, data):
"""Write data blocks to unencrypted services.
This method sends a Write Without Encryption command to the
tag. The data blocks to overwrite are indicated by a sequence
of :class:`~nfc.tag.tt3.BlockCode` objects in the parameter
*block_list*. Each block code must reference one of the
:class:`~nfc.tag.tt3.ServiceCode` objects in the iterable
*service_list*. If any of the blocks or services do not exist,
the tag will stop processing at that point and return a two
byte error status. The status bytes become the
:attr:`~nfc.tag.TagCommandError.errno` value of the
:exc:`~nfc.tag.TagCommandError` exception. The *data* to write
must be a byte string or array of length ``16 *
len(block_list)``.
As an example, the following code writes ``16 * "\\xAA"`` to
block 5 of service 16, ``16 * "\\xBB"`` to block 0 of service
80 and ``16 * "\\xCC"`` to block 1 of service 80 (all services
are writeable without key)::
sc1 = nfc.tag.tt3.ServiceCode(16, 0x09)
sc2 = nfc.tag.tt3.ServiceCode(80, 0x09)
bc1 = nfc.tag.tt3.BlockCode(5, service=0)
bc2 = nfc.tag.tt3.BlockCode(0, service=1)
bc3 = nfc.tag.tt3.BlockCode(1, service=1)
sc_list = [sc1, sc2]
bc_list = [bc1, bc2, bc3]
data = 16 * "\\xAA" + 16 * "\\xBB" + 16 * "\\xCC"
try:
data = tag.write_without_encryption(sc_list, bc_list, data)
except nfc.tag.TagCommandError as e:
if e.errno > 0x00FF:
print("the tag returned an error status")
else:
print("command failed with some other error")
Command execution errors raise :exc:`~nfc.tag.TagCommandError`.
"""
a, b, e = self.pmm[6] & 7, self.pmm[6] >> 3 & 7, self.pmm[6] >> 6
timeout = 302.1E-6 * ((b + 1) * len(block_list) + a + 1) * 4**e
data = bytearray([
len(service_list)]) \
+ b"".join([sc.pack() for sc in service_list]) \
+ bytearray([len(block_list)]) \
+ b"".join([bc.pack() for bc in block_list]) \
+ bytearray(data)
log.debug("write w/o encryption service/block list: {0} / {1}".format(
' '.join([hexlify(sc.pack()).decode() for sc in service_list]),
' '.join([hexlify(bc.pack()).decode() for bc in block_list])))
self.send_cmd_recv_rsp(0x08, data, timeout)
def write_to_ndef_service(self, data, *blocks):
"""Write block data to an NDEF compatible tag.
This is a convinience method to write block data to a tag that
has system code 0x12FC (NDEF). For other tags this method
simply does nothing. The *data* to write must be a string or
bytearray with length equal ``16 * len(blocks)``. All
parameters following *data* are interpreted as block numbers
to write. To actually pass a list of block numbers requires
unpacking. The following example calls would have the same
effect of writing 32 byte zeros into blocks 1 and 8.::
tag.write_to_ndef_service(32 * "\\0", 1, 8)
tag.write_to_ndef_service(32 * "\\0", *list(1, 8))
Command execution errors raise :exc:`~nfc.tag.TagCommandError`.
"""
if self.sys == 0x12FC:
sc_list = [ServiceCode(0, 0b001001)]
bc_list = [BlockCode(n) for n in blocks]
self.write_without_encryption(sc_list, bc_list, data)
def send_cmd_recv_rsp(self, cmd_code, cmd_data, timeout,
send_idm=True, check_status=True):
"""Send a command and receive a response.
This low level method sends an arbitrary command with the
8-bit integer *cmd_code*, followed by the captured tag
identifier (IDm) if *send_idm* is :const:`True` and the byte
string or bytearray *cmd_data*. It then waits *timeout*
seconds for a response, verifies that the response is
correctly formatted and, if *check_status* is :const:`True`,
that the status flags do not indicate an error.
All errors raise a :exc:`~nfc.tag.TagCommandError`
exception. Errors from response status flags produce an
:attr:`~nfc.tag.TagCommandError.errno` that is greater than
255, all other errors are below 256.
"""
idm = self.idm if send_idm else bytearray()
cmd = bytearray([2+len(idm)+len(cmd_data), cmd_code]) + idm + cmd_data
log.debug(">> {0:02x} {1:02x} {2} {3} ({4}s)".format(
cmd[0], cmd[1], hexlify(cmd[2:10]).decode(),
hexlify(cmd[10:]).decode(), timeout))
started = time.time()
error = None
for retry in range(3):
try:
rsp = self.clf.exchange(cmd, timeout)
break
except nfc.clf.CommunicationError as e:
error = e
reason = error.__class__.__name__
log.debug("%s after %d retries" % (reason, retry))
else:
if type(error) is nfc.clf.TimeoutError:
raise Type3TagCommandError(nfc.tag.TIMEOUT_ERROR)
if type(error) is nfc.clf.TransmissionError:
raise Type3TagCommandError(nfc.tag.RECEIVE_ERROR)
if type(error) is nfc.clf.ProtocolError: # pragma: no branch
raise Type3TagCommandError(nfc.tag.PROTOCOL_ERROR)
if rsp[0] != len(rsp):
log.debug("incorrect response length {0:02x}".format(rsp[0]))
raise Type3TagCommandError(RSP_LENGTH_ERROR)
if rsp[1] != cmd_code + 1:
log.debug("incorrect response code {0:02x}".format(rsp[1]))
raise Type3TagCommandError(RSP_CODE_ERROR)
if send_idm and rsp[2:10] != self.idm:
log.debug("wrong tag or transaction id {}".format(
hexlify(rsp[2:10]).decode()))
raise Type3TagCommandError(TAG_IDM_ERROR)
if not send_idm:
log.debug("<< {0:02x} {1:02x} {2}".format(
rsp[0], rsp[1], hexlify(rsp[2:]).decode()))
return rsp[2:]
if check_status and rsp[10] != 0:
log.debug("tag returned error status {}".format(
hexlify(rsp[10:12]).decode()))
raise Type3TagCommandError(unpack(">H", rsp[10:12])[0])
if not check_status:
log.debug("<< {0:02x} {1:02x} {2} {3}".format(
rsp[0], rsp[1], hexlify(rsp[2:10]).decode(),
hexlify(rsp[10:]).decode()))
return rsp[10:]
log.debug("<< {0:02x} {1:02x} {2} {3} {4} ({elapsed:f}s)".format(
rsp[0], rsp[1], hexlify(rsp[2:10]).decode(),
hexlify(rsp[10:12]).decode(), hexlify(rsp[12:]).decode(),
elapsed=time.time()-started))
return rsp[12:]
class Type3TagEmulation(nfc.tag.TagEmulation):
"""Framework for Type 3 Tag emulation.
"""
def __init__(self, clf, target):
self.services = dict()
self.target = target
self.cmd = bytearray([len(target.tt3_cmd)+1]) + target.tt3_cmd
self.idm = target.sensf_res[1:9]
self.pmm = target.sensf_res[9:17]
self.sys = target.sensf_res[17:19]
self.clf = clf
def __str__(self):
"""x.__str__() <==> str(x)"""
return "Type3TagEmulation IDm={id} PMm={pmm} SYS={sys}".format(
id=hexlify(self.idm).decode(),
pmm=hexlify(self.pmm).decode(),
sys=hexlify(self.sys).decode())
def add_service(self, service_code, block_read_func, block_write_func):
def default_block_read(block_number, rb, re):
return None
def default_block_write(block_number, block_data, wb, we):
return False
if block_read_func is None:
block_read_func = default_block_read
if block_write_func is None:
block_write_func = default_block_write
self.services[service_code] = (block_read_func, block_write_func)
def process_command(self, cmd):
log.debug("cmd: %s", hexlify(cmd).decode() if cmd else str(cmd))
if len(cmd) != cmd[0]:
log.error("tt3 command length error")
return None
if tuple(cmd[0:4]) in [(6, 0, 255, 255), (6, 0) + tuple(self.sys)]:
log.debug("process 'polling' command")
rsp = self.polling(cmd[2:])
return bytearray([2 + len(rsp), 0x01]) + rsp
if cmd[2:10] == self.idm:
if cmd[1] == 0x04:
log.debug("process 'request response' command")
rsp = self.request_response(cmd[10:])
return bytearray([10 + len(rsp), 0x05]) + self.idm + rsp
if cmd[1] == 0x06:
log.debug("process 'read without encryption' command")
rsp = self.read_without_encryption(cmd[10:])
return bytearray([10 + len(rsp), 0x07]) + self.idm + rsp
if cmd[1] == 0x08:
log.debug("process 'write without encryption' command")
rsp = self.write_without_encryption(cmd[10:])
return bytearray([10 + len(rsp), 0x09]) + self.idm + rsp
if cmd[1] == 0x0C:
log.debug("process 'request system code' command")
rsp = self.request_system_code(cmd[10:])
return bytearray([10 + len(rsp), 0x0D]) + self.idm + rsp
def send_response(self, rsp, timeout):
log.debug("rsp: {}".format(hexlify(rsp).decode()
if rsp is not None
else 'None'))
return self.clf.exchange(rsp, timeout)
def polling(self, cmd_data):
if cmd_data[2] == 1:
rsp = self.idm + self.pmm + self.sys
else:
rsp = self.idm + self.pmm
return rsp
def request_response(self, cmd_data):
return bytearray([0])
def read_without_encryption(self, cmd_data):
service_list = cmd_data.pop(0) * [[None, None]]
for i in range(len(service_list)):
service_code = cmd_data[1] << 8 | cmd_data[0]
if service_code not in self.services.keys():
return bytearray([0xFF, 0xA1])
service_list[i] = [service_code, 0]
del cmd_data[0:2]
service_block_list = cmd_data.pop(0) * [None]
if len(service_block_list) > 15:
return bytearray([0xFF, 0xA2])
for i in range(len(service_block_list)):
try:
service_list_item = service_list[cmd_data[0] & 0x0F]
service_code = service_list_item[0]
service_list_item[1] += 1
except IndexError:
return bytearray([1 << (i % 8), 0xA3])
if cmd_data[0] >= 128:
block_number = cmd_data[1]
del cmd_data[0:2]
else:
block_number = cmd_data[2] << 8 | cmd_data[1]
del cmd_data[0:3]
service_block_list[i] = [service_code, block_number, 0]
service_block_count = dict(service_list)
for service_block_list_item in service_block_list:
service_code = service_block_list_item[0]
service_block_list_item[2] = service_block_count[service_code]
block_data = bytearray()
for i, service_block_list_item in enumerate(service_block_list):
service_code, block_number, block_count = service_block_list_item
# rb (read begin) and re | |
{
"109132": ("Joint position method", []),
},
"JointVarianceOfGLCM": {
"128783": ("Joint Variance of GLCM", []),
},
"JuvenilePapillomatosis": {
"111277": ("Juvenile papillomatosis", [6030, 6031]),
},
"KVP": {
"113733": ("KVP", []),
},
"KeepVeinOpenEnded": {
"130162": ("Keep vein open ended", [71]),
},
"KeepVeinOpenStarted": {
"130161": ("Keep vein open started", [71]),
},
"Kep": {
"126313": ("kep", [218, 4107, 7180, 7469]),
},
"Keratometry": {
"KER": ("Keratometry", [29, 30, 33]),
},
"KeratometryMeasurementsSOPInstance": {
"111757": ("Keratometry Measurements SOP Instance", [4240]),
},
"KerleyALine": {
"112109": ("Kerley A line", [6102, 6103]),
},
"KerleyBLine": {
"112110": ("Kerley B line", [6102, 6103]),
},
"KerleyCLines": {
"112111": ("Kerley C lines", [6102, 6103]),
},
"KeyImages": {
"121180": ("Key Images", []),
},
"KeyObjectDescription": {
"113012": ("Key Object Description", []),
},
"KeyObjectSelection": {
"KO": ("Key Object Selection", [32, 33]),
},
"KidneyStent": {
"112175": ("<NAME>ent", [6102, 6138, 6202, 6203, 6404, 7151, 7193]),
},
"KnownBiopsyProvenMalignancy": {
"111125": ("Known biopsy proven malignancy", [6051]),
},
"KnownBiopsyProvenMalignancyTakeAppropriateAction": {
"111122": (
"Known biopsy proven malignancy - take appropriate action",
[6028, 6029],
),
},
"Ktrans": {
"126312": ("Ktrans", [218, 4107, 7180, 7469]),
},
"Kurtosis": {
"126052": ("Kurtosis", [7464]),
},
"KurtosisDiffusionCoefficient": {
"113294": ("Kurtosis Diffusion Coefficient", [218, 7180, 7272, 7469]),
},
"KurtosisDiffusionModel": {
"113252": ("Kurtosis diffusion model", [7273]),
},
"LADRegionInLAOProjection": {
"122470": ("LAD Region in LAO Projection", [3461]),
},
"LADRegionInRAOProjection": {
"122464": ("LAD Region in RAO Projection", [3460]),
},
"LBWOrIUGR": {
"111593": ("LBW or IUGR", []),
},
"LDLCholesterolScoreSheetForMen": {
"122233": ("LDL Cholesterol Score Sheet for Men", [3668]),
},
"LDLCholesterolScoreSheetForWomen": {
"122234": ("LDL Cholesterol Score Sheet for Women", [3668]),
},
"LSDI": {
"113225": ("LSDI", [7260]),
},
"LVWallMotionScoreIndex": {
"125202": ("LV Wall Motion Score Index", []),
},
"LWHMethodForVolumeOfEllipsoid": {
"126029": ("LWH method for volume of ellipsoid", [7474]),
},
"LabelType": {
"113606": ("Label Type", []),
},
"LactatingAdenoma": {
"111278": ("Lactating adenoma", []),
},
"LactationalChange": {
"111279": ("Lactational change", [6030, 6031]),
},
"LandmarkBasedWallMotionAnalysis": {
"122497": ("Landmark Based Wall Motion Analysis", []),
},
"Language": {
"121045": ("Language", []),
},
"LanguageOfContentItemAndDescendants": {
"121049": ("Language of Content Item and Descendants", []),
},
"LanguageOfNameAndValue": {
"121048": ("Language of Name and Value", []),
},
"LanguageOfValue": {
"121047": ("Language of Value", []),
},
"Laparoscopy": {
"LP": ("Laparoscopy", []),
},
"LargeDuctPapilloma": {
"111281": ("Large duct papilloma", [6030, 6031]),
},
"LargeGreaterThan38Point0cmLateralThickness": {
"113603": ("Large: > 38.0 cm lateral thickness", [7041, 7042]),
},
"LargeZoneEmphasis": {
"128822": ("Large Zone Emphasis", []),
},
"LargeZoneHighGrayLevelEmphasis": {
"128828": ("Large Zone High Gray Level Emphasis", []),
},
"LargeZoneLowGrayLevelEmphasis": {
"128827": ("Large Zone Low Gray Level Emphasis", []),
},
"LaserCrossHairs": {
"128151": ("Laser Cross-hairs", [1015, 9573, 9575]),
},
"LaserScan": {
"LS": ("Laser Scan", [29, 30, 33]),
},
"LaserScanning": {
"114203": ("Laser scanning", [8201]),
},
"LaserScanning3DCAMModel": {
"129021": ("Laser Scanning 3D CAM model", [7061]),
},
"LaserSurfaceScan": {
"LS": ("Laser Surface Scan", [9573, 9575]),
},
"LastChordOfAbnormalRegion": {
"122453": ("Last Chord of Abnormal Region", []),
},
"LateChildBearingAfter30": {
"111555": ("Late child bearing (after 30)", [6081, 6087]),
},
"LateContrastEnhancement": {
"122664": ("Late Contrast Enhancement", [6335, 6345, 6346]),
},
"LeadID": {
"122148": ("Lead ID", []),
},
"LeadRadiologicTechnologist": {
"128674": ("Lead Radiologic Technologist", [7450, 7452, 9536]),
},
"LeafPairs": {
"130331": ("Leaf Pairs", [9541]),
},
"LeastMeanSquareLMSDeconvolution": {
"126310": ("Least Mean Square (LMS) deconvolution", [4105, 4106]),
},
"LeastSquaresFitOfMultipleSamples": {
"113261": ("Least squares fit of multiple samples", [7274]),
},
"LeftAtrialEDVolume": {
"122407": ("Left Atrial ED Volume", [3468, 3835]),
},
"LeftAtrialESVolume": {
"122408": ("Left Atrial ES Volume", [3469, 3835]),
},
"LeftAtrialEjectionFractionByAngiography": {
"122406": ("Left Atrial Ejection Fraction by Angiography", [3467]),
},
"LeftContour": {
"122507": ("Left Contour", []),
},
"LeftEyeRx": {
"111689": ("Left Eye Rx", []),
},
"LeftFirst": {
"126830": ("left first", [21]),
},
"LeftHemisphereMostAnterior": {
"125035": ("Left Hemisphere Most Anterior", [7101, 7111]),
},
"LeftHemisphereMostInferior": {
"125038": ("Left Hemisphere Most Inferior", [7101, 7111]),
},
"LeftHemisphereMostPosterior": {
"125036": ("Left Hemisphere Most Posterior", [7101, 7111]),
},
"LeftHemisphereMostSuperior": {
"125037": ("Left Hemisphere Most Superior", [7101, 7111]),
},
"LeftRightAxis": {
"110860": ("Left-Right Axis", [7184]),
},
"LeftToRight": {
"110867": ("Left To Right", [7185]),
},
"LeftToRightFlow": {
"122227": ("Left to Right Flow", []),
},
"LeftVentricleApicalAnterolateralSegment": {
"130623": ("left ventricle apical anterolateral segment", [3781, 3785, 12305]),
},
"LeftVentricleApicalAnteroseptalSegment": {
"130620": ("left ventricle apical anteroseptal segment", [3781, 3785, 12305]),
},
"LeftVentricleApicalInferolateralSegment": {
"130622": ("left ventricle apical inferolateral segment", [3781, 3785, 12305]),
},
"LeftVentricleApicalInferoseptalSegment": {
"130621": ("left ventricle apical inferoseptal segment", [3781, 3785, 12305]),
},
"LeftVentricleMassByAreaLength": {
"125270": ("Left Ventricle Mass by Area Length", [12227, 12232]),
},
"LeftVentricleMassByAreaLengthAdjustedByHeight": {
"125273": (
"Left Ventricle Mass by Area Length - adjusted by Height",
[12227, 12232],
),
},
"LeftVentricleMassByMMode": {
"125221": ("Left Ventricle Mass by M-mode", [12227, 12232]),
},
"LeftVentricleMassByMModeAdjustedByHeight": {
"125271": (
"Left Ventricle Mass by M-mode - adjusted by Height",
[12227, 12232],
),
},
"LeftVentricleMassByTruncatedEllipse": {
"125222": ("Left Ventricle Mass by Truncated Ellipse", [12227, 12232]),
},
"LeftVentricleMassByTruncatedEllipseAdjustedByHeight": {
"125272": (
"Left Ventricle Mass by Truncated Ellipse - adjusted by Height",
[12227, 12232],
),
},
"LeftVentricularEjectionTime": {
"122211": ("Left Ventricular ejection time", [3613]),
},
"LeftVentricularFillingTime": {
"122212": ("Left Ventricular filling time", [3613]),
},
"LeftVentricularPosterobasalSegment": {
"122582": ("Left ventricular posterobasal segment", []),
},
"LengthLuminalSegment": {
"122510": ("Length Luminal Segment", []),
},
"LengthOfReconstructableVolume": {
"113893": ("Length of Reconstructable Volume", []),
},
"Lensometry": {
"LEN": ("Lensometry", [29, 30, 33]),
},
"LesionBoundary": {
"111357": ("Lesion boundary", []),
},
"LesionCapsularContactLength": {
"130558": ("Lesion capsular contact length", [6351]),
},
"LesionCentered": {
"111902": ("Lesion centered", [4207]),
},
"LesionCompletelyRemoved": {
"111447": ("Lesion completely removed", [6066]),
},
"LesionDensity": {
"111035": ("Lesion Density", []),
},
"LesionIdentifier": {
"121151": ("Lesion Identifier", []),
},
"LesionInterventionInformation": {
"122178": ("Lesion Intervention Information", []),
},
"LesionMorphology": {
"122133": ("Lesion Morphology", []),
},
"LesionPartiallyRemoved": {
"111448": ("Lesion partially removed", [6066]),
},
"LesionRisk": {
"121153": ("Lesion Risk", []),
},
"LesionSensitivity": {
"111089": ("Lesion Sensitivity", [6048]),
},
"LesionToBackgroundSUVRatio": {
"126039": ("Lesion to Background SUV Ratio", [7466]),
},
"LesserThan3MonthsAgo": {
"111396": ("< 3 months ago", [6164]),
},
"LeukemicInfiltration": {
"111318": ("Leukemic infiltration", [6030, 6033]),
},
"LevelOfDifficulty": {
"128724": ("Level of Difficulty", []),
},
"LevelOfSignificance": {
"121403": ("Level of Significance", []),
},
"LevenbergMarquardt": {
"113265": ("Levenberg-Marquardt", [7274]),
},
"Levovist": {
"125904": ("Levovist", [12030]),
},
"LightBrownColor": {
"111450": ("Light brown color", [6067]),
},
"LightsOnTimeOfDay": {
"127215": ("Lights on time of day", []),
},
"Line": {
"112083": ("Line", [6102, 6109, 7151, 7196]),
},
"LineBScanPattern": {
"128281": ("Line B-scan pattern", [4272]),
},
"LineScanPattern": {
"125240": ("Line scan pattern", [12032]),
},
"LineSegmentLength": {
"121227": ("Line segment length", [218, 6165, 7469, 7470]),
},
"Linear": {
"112150": ("Linear", [6123, 6335, 6339, 6340]),
},
"LinearCellKillFactor": {
"130088": ("Linear Cell Kill Factor", []),
},
"LinearCurve": {
"130253": ("Linear Curve", [73]),
},
"LinearDisplacement": {
"110856": ("Linear Displacement", [7182]),
},
"LinearQuadraticLinearModel": {
"130133": ("Linear-Quadratic-Linear Model", [9539]),
},
"LinearQuadraticModel": {
"130131": ("Linear-Quadratic Model", [9539]),
},
"LinearQuadraticModelForLowDoseRateBrachytherapy": {
"130134": ("Linear-Quadratic Model for Low-Dose Rate Brachytherapy", [9539]),
},
"LinearQuadraticModelWithTimeFactor": {
"130132": ("Linear-Quadratic Model with Time Factor", [9539]),
},
"LinearSpiculation": {
"113665": ("Linear spiculation", [6166]),
},
"LinearUltrasoundTransducerGeometry": {
"125252": ("Linear ultrasound transducer geometry", [12033]),
},
"LipidAndLactate": {
"113095": ("Lipid and Lactate", [218, 4032, 4033, 7180, 7186, 7469]),
},
"LiquidCrystalDisplay": {
"109992": ("Liquid Crystal Display", [8303]),
},
"LiquidIonChamber": {
"128705": ("Liquid Ion Chamber", [7026, 7027, 7151, 7193]),
},
"Lobar": {
"112158": ("Lobar", [6128, 6129, 8134]),
},
"Lobulated": {
"112135": ("Lobulated", [6119, 6335, 6339, 6340]),
},
"LocalEffectModel": {
"130128": ("Local Effect Model", [9538]),
},
"LocalServiceOperationStarted": {
"110141": ("Local Service Operation Started", [401, 403]),
},
"LocalServiceOperationStopped": {
"110142": ("Local Service Operation Stopped", [401, 403]),
},
"LocalizedProstateFinding": {
"130555": ("Localized Prostate Finding", []),
},
"Localizer": {
"121311": ("Localizer", [4264, 7201, 8120]),
},
"LocallyManufacturedProduct": {
"127390": ("Locally manufactured product", [608]),
},
"LocationInChest": {
"112013": ("Location in Chest", []),
},
"LocationOfSamplingSite": {
"111709": ("Location of sampling site", []),
},
"LocationOfSamplingSiteXOffset": {
"111710": ("Location of sampling site X offset", []),
},
"LocationOfSamplingSiteYOffset": {
"111711": ("Location of sampling site Y offset", []),
},
"LocationOfSamplingSiteZOffset": {
"111712": ("Location of sampling site Z offset", []),
},
"LocationOfSpecimen": {
"111718": ("Location of Specimen", []),
},
"LocationOfSpecimenXOffset": {
"111719": ("Location of Specimen X offset", []),
},
"LocationOfSpecimenYOffset": {
"111720": ("Location of Specimen Y offset", []),
},
"LocationOfSpecimenZOffset": {
"111721": ("Location | |
following meaning: -1 = Couldn't retrieve data in
PostArrayTraceMessage, -999 = Couldn't communicate with Zemax,
-998 = timeout reached
Examples
--------
>>> n = 9**2
>>> nx = np.linspace(-1, 1, np.sqrt(n))
>>> hx, hy = np.meshgrid(nx, nx)
>>> hx, hy = hx.flatten().tolist(), hy.flatten().tolist()
>>> rayData = at.zGetTraceArray(numRays=n, hx=hx, hy=hy, mode=0)
>>> err, vig = rayData[0], rayData[1]
>>> x, y, z = rayData[2], rayData[3], rayData[4]
Notes
-----
The opd can only be computed if the last surface is the image surface,
otherwise, the opd value will be zero.
"""
rd = getRayDataArray(numRays, tType=0, mode=mode, endSurf=surf)
hx = hx if hx else [0.0] * numRays
hy = hy if hy else [0.0] * numRays
px = px if px else [0.0] * numRays
py = py if py else [0.0] * numRays
if intensity:
intensity = intensity if isinstance(intensity, list) else [intensity]*numRays
else:
intensity = [1.0] * numRays
if waveNum:
waveNum = waveNum if isinstance(waveNum, list) else [waveNum]*numRays
else:
waveNum = [1] * numRays
want_opd = [want_opd] * numRays
# fill up the structure
for i in xrange(1, numRays+1):
rd[i].x = hx[i-1]
rd[i].y = hy[i-1]
rd[i].z = px[i-1]
rd[i].l = py[i-1]
rd[i].intensity = intensity[i-1]
rd[i].wave = waveNum[i-1]
rd[i].want_opd = want_opd[i-1]
# call ray tracing
ret = zArrayTrace(rd, timeout)
# free up some memory
#del hx, hy, px, py, intensity, waveNum, want_opd # seems to increase running time
#_gc.collect()
d = {}
if ret == 0:
reals = ['x', 'y', 'z', 'l', 'm', 'n', 'l2', 'm2', 'n2', 'opd',
'intensity']
ints = ['error', 'vigcode']
for r in reals:
exec(r + " = [0.0] * numRays", locals(), d)
for i in ints:
exec(i + " = [0] * numRays", locals(), d)
for i in xrange(1, numRays+1):
d["x"][i-1] = rd[i].x
d["y"][i-1] = rd[i].y
d["z"][i-1] = rd[i].z
d["l"][i-1] = rd[i].l
d["m"][i-1] = rd[i].m
d["n"][i-1] = rd[i].n
d["opd"][i-1] = rd[i].opd
d["intensity"][i-1] = rd[i].intensity
d["l2"][i-1] = rd[i].Exr
d["m2"][i-1] = rd[i].Eyr
d["n2"][i-1] = rd[i].Ezr
d["error"][i-1] = rd[i].error
d["vigcode"][i-1] = rd[i].vigcode
return (d["error"], d["vigcode"], d["x"], d["y"], d["z"],
d["l"], d["m"], d["n"], d["l2"], d["m2"], d["n2"],
d["opd"], d["intensity"])
else:
return ret
def zGetTraceDirectArray(numRays, x=None, y=None, z=None, l=None, m=None,
n=None, intensity=None, waveNum=None, mode=0,
startSurf=0, lastSurf=-1, timeout=5000):
"""Trace large number of rays defined by ``x``, ``y``, ``z``, ``l``,
``m`` and ``n`` coordinates on any starting surface as well as
wavelength number, mode and the surface to trace the ray to.
Ray tracing is performed on the lens file in the LDE of main Zemax
application (not in the DDE server)
Parameters
----------
numRays : integer
number of rays to trace. ``numRays`` should be equal to the length
of the lists (if provided) ``x``, ``y``, ``x``, etc.
x : list, optional
list specifying the x coordinates of the ray at the start surface,
of length ``numRays``; if ``None``, a list of 0.0s for ``x`` is created.
y : list, optional
list specifying the y coordinates of the ray at the start surface,
of length ``numRays``; if ``None``, a list of 0.0s for ``y`` is created
z : list, optional
list specifying the z coordinates of the ray at the start surface,
of length ``numRays``; if ``None``, a list of 0.0s for ``z`` is created.
l : list, optional
list of x-direction cosines, of length ``numRays``; if ``None``, a
list of 0.0s for ``l`` is created
m : list, optional
list of y-direction cosines, of length ``numRays``; if ``None``, a
list of 0.0s for ``m`` is created
n : list, optional
list of z-direction cosines, of length ``numRays``; if ``None``, a
list of 0.0s for ``n`` is created
intensity : float or list, optional
initial intensities. If a list of length ``numRays`` is given it is
used. If a single float value is passed, all rays use the same value for
their initial intensities. If ``None``, all rays use a value of ``1.0``
as their initial intensities.
waveNum : integer or list (of integers), optional
wavelength number. If a list of integers of length ``numRays`` is given
it is used. If a single integer value is passed, all rays use the same
value for wavelength number. If ``None``, all rays use wavelength
number equal to 1.
mode : integer, optional
0 = real (Default), 1 = paraxial
startSurf : integer, optional
start surface number (default = 0)
lastSurf : integer, optional
surface to trace the ray to, which is any valid surface number
(``surf = -1``, default)
timeout : integer, optional
command timeout specified in milli-seconds
Returns
-------
error : list of integers
0 = ray traced successfully;
+ve number = the ray missed the surface;
-ve number = the ray total internal reflected (TIR) at surface
given by the absolute value of the ``error``
vigcode : list of integers
the first surface where the ray was vignetted. Unless an error occurs
at that surface or subsequent to that surface, the ray will continue
to trace to the requested surface.
x, y, z : list of reals
x, or , y, or z, coordinates of the ray on the requested surface
l, m, n : list of reals
the x, y, and z direction cosines after refraction into the media
following the requested surface.
l2, m2, n2 : list of reals
list of x or y or z surface intercept direction normals at
requested surface
opd : list of reals
computed optical path difference if ``want_opd > 0``
intensity : list of reals
the relative transmitted intensity of the ray, including any pupil
or surface apodization defined.
If ray tracing fails, a single integer error code is returned,
which has the following meaning: -1 = Couldn't retrieve data in
PostArrayTraceMessage, -999 = Couldn't communicate with Zemax,
-998 = timeout reached
Notes
-----
Computation of OPD is not permitted in this mode.
"""
rd = getRayDataArray(numRays, tType=1, mode=mode, startSurf=startSurf,
endSurf=lastSurf)
x = x if x else [0.0] * numRays
y = y if y else [0.0] * numRays
z = z if z else [0.0] * numRays
l = l if l else [0.0] * numRays
m = m if m else [0.0] * numRays
n = n if n else [0.0] * numRays
if intensity:
intensity = intensity if isinstance(intensity, list) else [intensity]*numRays
else:
intensity = [1.0] * numRays
if waveNum:
waveNum = waveNum if isinstance(waveNum, list) else [waveNum]*numRays
else:
waveNum = [1] * numRays
# fill up the structure
for i in xrange(1, numRays+1):
rd[i].x = x[i-1]
rd[i].y = y[i-1]
rd[i].z = z[i-1]
rd[i].l = l[i-1]
rd[i].m = m[i-1]
rd[i].n = n[i-1]
rd[i].intensity = intensity[i-1]
rd[i].wave = waveNum[i-1]
# call ray tracing
ret = zArrayTrace(rd, timeout)
d = {}
if ret == 0:
reals = ['x', 'y', 'z', 'l', 'm', 'n', 'l2', 'm2', 'n2', 'opd',
'intensity']
ints = ['error', 'vigcode']
for r in reals:
exec(r + " = [0.0] * numRays", locals(), d)
for i in ints:
exec(i + " = [0] * numRays", locals(), d)
for i in xrange(1, numRays+1):
d["x"][i-1] = rd[i].x
d["y"][i-1] = rd[i].y
d["z"][i-1] = rd[i].z
d["l"][i-1] = rd[i].l
d["m"][i-1] = rd[i].m
d["n"][i-1] = rd[i].n
d["opd"][i-1] = rd[i].opd
d["intensity"][i-1] = rd[i].intensity
d["l2"][i-1] = rd[i].Exr
d["m2"][i-1] = rd[i].Eyr
d["n2"][i-1] = rd[i].Ezr
d["error"][i-1] = rd[i].error
d["vigcode"][i-1] = rd[i].vigcode
return (d["error"], d["vigcode"], d["x"], d["y"], d["z"],
d["l"], d["m"], d["n"], d["l2"], d["m2"], d["n2"],
d["opd"], d["intensity"])
else:
return ret
def zGetPolTraceArray(numRays, hx=None, hy=None, px=None, py=None, Exr=None,
Exi=None, Eyr=None, Eyi=None, Ezr=None, Ezi=None, Ex=0,
Ey=0, Phax=0, Phay=0, intensity=None, waveNum=None, mode=0,
surf=-1, timeout=5000):
"""Trace large number of polarized rays defined by their normalized
field and pupil coordinates. Similar to ``GetPolTrace()``
Ray tracing is performed on the lens file in the LDE of main Zemax
application (not in the DDE server)
Parameters
----------
numRays : integer
number of rays to trace. ``numRays`` should be equal to the length
of the lists (if provided) ``hx``, ``hy``, | |
import numpy as np
import warnings
from collections import defaultdict
from scipy.spatial.distance import cdist as sp_cdist
from typing import Callable
from .base_viewer import BaseViewer
from ..cooking_machine import BaseDataset
# If change, also modify docstring for view()
METRICS_NAMES = [
'jensenshannon', 'euclidean', 'cosine', 'correlation'
]
ERROR_DUPLICATE_DOCUMENTS_IDS = """\
Some documents' IDs in dataset are the same: \
number of unique IDs and total number of documents not equal: "{0}" vs. "{1}". \
Need unique IDs in order to identify documents.\
"""
ERROR_TYPE_METRIC = """\
Parameter "metric" should be "str" or "callable". \
The argument given is of type "{0}"\
"""
ERROR_TYPE_NUM_TOP_SIMILAR = """\
Parameter "num_top_similar" should be "int". \
The argument given is of type "{0}"\
"""
ERROR_TYPE_KEEP_SIMILAR_BY_WORDS = """\
Parameter "keep_similar_by_words" should be "bool". \
The argument given is of type "{0}"\
"""
WARNING_UNDEFINED_FREQUENCY_IN_VW = """\
Some words in Vowpal Wabbit text were skipped \
because they didn\'t have frequency after colon sign ":"\
"""
WARNING_FEWER_THAN_REQUESTED = """\
Only "{0}" documents available{1}. \
This is smaller than the requested number of top similar documents "{2}". \
So display is going to contain all "{0}" documents, but sorted by distance\
"""
WARNING_TOO_MANY_REQUESTED = """\
Requested number of top similar documents "{0}" \
is bigger than total number of documents in the dataset "{1}"\
"""
def prepare_doc_html_with_similarity(
document,
distance,
num_digits: int = 3,
num_sentences_in_snippet: int = 4,
num_words: int = 15,
):
"""
Prepares intital document and search results
html strings
Parameters
----------
document : Padas.DataFrame row
a row that contains columns raw_text
and index in string form
distance : float between 0 and 1
measure of how close found document to the
initial inquiry
num_digits
ammount of digits to visualize as document simmilarity
num_sentences_in_snippet
how many sentences to use for document snippet
num_words
number of document words before the line break in
the document snippet
Returns
-------
doc_html : str
an html string with data about document
plus additional info for the output clarification
"""
if distance > 0:
sim = str(1 - distance)[:2 + num_digits]
doc_title = f'<h3>{document.index.values[0]}   similarity: {sim}</h3>'
else:
doc_title = f'<h3>Search document:   {document.index.values[0]}</h3>'
get_sentences = document['raw_text'].values[0].split('. ')[:num_sentences_in_snippet]
doc_snippet = '. '.join(get_sentences).split(' ')
doc_snippet[-1] += '.'
doc_snippet = ' '.join([
word + '<br />' if i % num_words + 1 == num_words
else word for i, word in enumerate(doc_snippet)
])
doc_html = f"<h3>{doc_title}</h3>{doc_snippet}<br><br />"
if distance == 0:
doc_html += '<h2>Search results:</h2>'
return doc_html
class TopSimilarDocumentsViewer(BaseViewer):
def __init__(self, model, dataset):
"""Viewer which uses topic model to find documents similar to given one
Parameters
----------
model : BaseModel
Topic model
dataset : BaseDataset
Dataset with information about documents
"""
super().__init__(model=model)
if not isinstance(dataset, BaseDataset):
raise TypeError('Parameter "dataset" should derive from BaseDataset')
self._dataset = dataset
self._theta = self.model.get_theta(dataset=self._dataset)
self._documents_ids = list(self._theta.columns)
if len(self._documents_ids) == 0:
warnings.warn('No documents in given dataset', UserWarning)
elif len(set(self._documents_ids)) != len(self._documents_ids):
raise ValueError(ERROR_DUPLICATE_DOCUMENTS_IDS.format(
len(set(self._documents_ids)), len(self._documents_ids)))
def view(self,
document_id,
metric='jensenshannon',
num_top_similar=5,
keep_similar_by_words=True):
"""Shows documents similar to given one by distribution of topics
Parameters
----------
document_id
ID of the document in `dataset`
metric : str or callable
Distance measure which is to be used to measure how documents differ from each other
If str -- should be one of 'jensenshannon', 'euclidean', 'cosine', 'correlation' --
as in scipy.spatial.distance.cdist
If callable -- should map two vectors to numeric value
num_top_similar : int
How many top similar documents' IDs to show
keep_similar_by_words : bool
Whether or not to keep in the output those documents
that are similar to the given one by their constituent words and words' frequencies
Returns
-------
tuple(list, list)
Top similar words, and corresponding distances to given document
"""
self._check_view_parameters_valid(
document_id=document_id,
metric=metric,
num_top_similar=num_top_similar,
keep_similar_by_words=keep_similar_by_words)
num_top_similar = min(num_top_similar, len(self._documents_ids))
document_index = self._documents_ids.index(document_id)
similar_documents_indices, distances = self._view(
document_index=document_index,
metric=metric,
num_top_similar=num_top_similar,
keep_similar_by_words=keep_similar_by_words)
documents_ids = [self._documents_ids[doc_index] for doc_index in similar_documents_indices]
return documents_ids, distances
def view_from_jupyter(
self,
document_id: str,
metric: str or Callable = 'jensenshannon',
num_top_similar: int = 5,
num_digits: int = 3,
keep_similar_by_words: bool = True,
display_output: bool = True,
give_html: bool = False,):
"""
Method for viewing documents similar to requested one
from jupyter notebook. Provides document titles and snippets of
first few sentences.
Parameters
----------
document_id
ID of the document in `dataset`
metric
Distance measure which is to be used to measure how documents differ from each other
If str -- should be one of 'jensenshannon', 'euclidean', 'cosine', 'correlation' --
as in scipy.spatial.distance.cdist
If callable -- should map two vectors to numeric value
num_top_similar
How many top similar documents' IDs to show
keep_similar_by_words
Whether or not to keep in the output those documents
that are similar to the given one by their constituent words and words' frequencies
display_output
if provide output at the end of method run
give_html
return html string generated by the method
Returns
-------
topic_html
html string of the generated output
"""
from IPython.display import display_html
from topicnet.cooking_machine.pretty_output import make_notebook_pretty
make_notebook_pretty()
search_ids, search_distances = self.view(
document_id=document_id,
metric=metric,
num_top_similar=num_top_similar,
keep_similar_by_words=keep_similar_by_words,
)
topic_html = ''
search_ids = [document_id] + search_ids
search_distances = [0] + search_distances
for doc_id, distance in zip(search_ids, search_distances):
document = self._dataset.get_source_document(doc_id)
topic_html += prepare_doc_html_with_similarity(document, distance)
if display_output:
display_html(topic_html, raw=True)
if give_html:
return topic_html
def _view(self,
document_index,
metric,
num_top_similar,
keep_similar_by_words):
documents_indices = [i for i, _ in enumerate(self._documents_ids) if i != document_index]
distances = self._get_documents_distances(documents_indices, document_index, metric)
documents_indices, distances = \
TopSimilarDocumentsViewer._sort_elements_by_corresponding_values(
documents_indices, distances)
if keep_similar_by_words or len(documents_indices) == 0:
documents_indices_to_exclude = []
else:
documents_indices_to_exclude = \
self._get_documents_with_similar_words_frequencies_indices(
documents_indices, document_index, num_top_similar)
if len(documents_indices) == len(documents_indices_to_exclude):
return self._empty_view
elif len(documents_indices) - len(documents_indices_to_exclude) < num_top_similar:
warnings.warn(
WARNING_FEWER_THAN_REQUESTED.format(
len(documents_indices_to_exclude),
(' after throwing out documents similar just by words'
if not keep_similar_by_words else ''),
num_top_similar),
RuntimeWarning
)
documents_indices, distances =\
TopSimilarDocumentsViewer._filter_elements_and_values(
documents_indices, distances, documents_indices_to_exclude)
similar_documents_indices = documents_indices[:num_top_similar]
similar_documents_distances = distances[:num_top_similar]
return similar_documents_indices, similar_documents_distances
@staticmethod
def _sort_elements_by_corresponding_values(elements, values, ascending=True):
def unzip(zipped):
# Transforms [(a, A), (b, B), ...] to [a, b, ...], [A, B, ...]
return list(zip(*zipped))
elements_values = sorted(zip(elements, values), key=lambda kv: kv[1])
if not ascending:
elements_values = elements_values[::-1]
return unzip(elements_values)
@staticmethod
def _filter_elements_and_values(elements, values, elements_to_exclude):
elements_to_exclude = set(elements_to_exclude)
indices_to_exclude = set([i for i, e in enumerate(elements) if e in elements_to_exclude])
result_elements = [e for i, e in enumerate(elements) if i not in indices_to_exclude]
result_values = [v for i, v in enumerate(values) if i not in indices_to_exclude]
assert len(result_elements) == len(result_values)
return result_elements, result_values
@staticmethod
def _are_words_frequencies_similar(words_frequencies_a, words_frequencies_b):
# TODO: method seems very ... heuristic
# maybe need some research to find the best way to compare words frequencies
word_frequency_pairs_a = sorted(words_frequencies_a.items(), key=lambda kv: kv[1])
word_frequency_pairs_b = sorted(words_frequencies_b.items(), key=lambda kv: kv[1])
num_top_words_to_consider = 100
jaccard_coefficient = TopSimilarDocumentsViewer._get_jaccard_coefficient(
word_frequency_pairs_a[:num_top_words_to_consider],
word_frequency_pairs_b[:num_top_words_to_consider])
jaccard_coefficient_threshold_to_be_similar = 0.6
return jaccard_coefficient >= jaccard_coefficient_threshold_to_be_similar
@staticmethod
def _get_jaccard_coefficient(word_frequency_pairs_a, word_frequency_pairs_b):
def get_values_sum(dictionary, default=0.0):
return sum(dictionary.values() or [default])
def get_normalized_values(key_value_pairs):
tiny = 1e-7
denominator = sum(kv[1] for kv in key_value_pairs) or tiny
return {k: v / denominator for k, v in key_value_pairs}
# May help in case documents differ in length significantly
frequencies_a = get_normalized_values(word_frequency_pairs_a)
frequencies_b = get_normalized_values(word_frequency_pairs_b)
words_a, words_b = set(frequencies_a), set(frequencies_b)
intersection = {
e: min(frequencies_a[e], frequencies_b[e])
for e in words_a & words_b
}
union = {
e: max(frequencies_a.get(e, 0), frequencies_b.get(e, 0))
for e in words_a | words_b
}
if len(union) == 0:
return 0.0
return get_values_sum(intersection) / get_values_sum(union)
@staticmethod
def _extract_words_frequencies(vw_text):
# Just gather frequencies of words of all modalities
# TODO: use Dataset for this?
def is_modality_name(vw_word):
return vw_word.startswith('|')
words_frequencies = defaultdict(int)
has_words_with_undefined_frequencies = False
for vw_word in vw_text.split():
if is_modality_name(vw_word):
continue
if ':' in vw_word:
word, frequency = vw_word.split(':')
if len(frequency) == 0:
has_words_with_undefined_frequencies = True
continue
# to allow frequencies as float's but assure that now all are int-s
frequency = int(round(float(frequency)))
else:
word = vw_word
frequency = 1
words_frequencies[word] += frequency
if has_words_with_undefined_frequencies:
warnings.warn(WARNING_UNDEFINED_FREQUENCY_IN_VW, UserWarning)
return words_frequencies
@property
def _empty_view(self):
empty_top_similar_documents_list = list()
empty_distances_list = list()
return empty_top_similar_documents_list, empty_distances_list
def _check_view_parameters_valid(
self, document_id, metric, num_top_similar, keep_similar_by_words):
if document_id not in self._documents_ids:
raise ValueError('No document with such id "{}" in dataset'.format(document_id))
if isinstance(metric, str):
TopSimilarDocumentsViewer._check_str_metric_valid(metric)
elif callable(metric):
TopSimilarDocumentsViewer._check_callable_metric_valid(metric)
else:
raise TypeError(ERROR_TYPE_METRIC.format(type(metric)))
if not isinstance(num_top_similar, int):
raise TypeError(ERROR_TYPE_NUM_TOP_SIMILAR.format(type(num_top_similar)))
elif num_top_similar < 0:
raise ValueError('Parameter "num_top_similar" should be greater than zero')
elif num_top_similar == 0:
return self._empty_view
elif | |
<filename>studies/budget_components_coupling/plot_tendencies_MMC_speed.py
# -*- coding: utf-8 -*-
"""
Mesoscale Tendencies: Sensitivity to spatial and temporal averaging
Created on Tue Apr 19 04:53:49 2016
@author: cener
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
import utm
import glob
import sys
import netCDF4
import pandas as pd
# Constants
g = 9.81 # [m s-2]tar -zxvf
P0 = 100000 # Reference pressure [Pa]
T0 = 300 # Reference temperature for perturbation temperature [K]
K = 0.41 # von Karman constant
kappa = 0.2854 # Poisson constant (R/Cp)
R_air = 287.058 # Specific gas constant for dry air [J kg-1 K-1]
Cp_air = 1005 # Specific heat of air [J kg-1 K-1]
omega = 7.2921159e-5 # angular speed of the Earth [rad/s]
# Station coordinates
lat_s = 33.61054 # degrees N
lon_s = -102.05054 # degrees E
siteID = 'SWIFT'# identifier
fc = 2*omega*np.sin(lat_s*np.pi/180)
utmX_s, utmY_s, utm_zonenumber_s, utm_zoneletter_s = utm.from_latlon(lat_s,lon_s)
# Evaluation period
datefrom = datetime.datetime(2013,11,8,00,0,0)
dateto = datetime.datetime(2013,11,10,00,0,0)
# Vertical profiles
t0 = datetime.datetime(2013,11,8,22,0,0,7320)
#t0 = datetime.datetime(2013,11,9,6,0,0,0)
#t0 = datetime.datetime(2013,11,8,18,0,0,0)
# ----------------------------------------------------------------------
# Load simulation data
# ----------------------------------------------------------------------
dirdata = '/projects/mmc/SWIFTRegion/8Nov2013/WRF_Rerun_Caro'
filenc = 'L0/SWIFT_all_divbyfc_w0_L0_tend_L0'
L = 0.0 # Length [m] of microscale domain Lx = Ly for spatial avaraging
dxmeso = 3000.0 # Horizontal resolution [m] of the mesoscale simulation
dtmeso = 600.0 # temporal resolution [s]
Nav = (int(np.floor(L/dxmeso)) + 1 ) * 3
savenpz = 0 # Save tendencies to npz file
savenc = 0 # Save tendencies to netCDF file
# Resample using running time averages of different window sizes
#windowsize = np.array([1,31,61,121,181]) # number of (dtmeso) timesteps +1
#windowsize = np.array([19,7,1,13,4]) # take the output times into account
windowsize = np.array([4,19,1,7,13])
Nw = windowsize.shape[0]
#tav = dtmeso*(windowsize - 1)/60.0 # averaging time [min]
tav = (windowsize - 1)*10 # averaging time [min]
wrfoutfiles = sorted(glob.glob(dirdata+'/'+filenc+'_*'))
Nfiles = len(wrfoutfiles)
ifile = 0
cnt = 0
firstgoodfile = 1
while ifile < Nfiles:
f1 = netCDF4.Dataset(wrfoutfiles[ifile])
print(wrfoutfiles[ifile])
sys.stdout.flush()
times1 = f1.variables['time'][:]
dates1 = mdates.num2date(times1)
dates1 = pd.DatetimeIndex(dates1).tz_localize(None)
# print(dates1)
heights1 = f1.variables['z'][:]
Nz = heights1.shape[0]
idates = np.where(np.logical_and(times1 >= mdates.date2num(datefrom),
times1 <= mdates.date2num(dateto)))[0]
if idates.shape[0] > 0:
times1 = times1[idates]
Nt = times1.shape[0]
U1 = pd.DataFrame(f1.variables['U'][:], index = dates1)
V1 = pd.DataFrame(f1.variables['V'][:], index = dates1)
W1 = pd.DataFrame(f1.variables['W'][:], index = dates1)
Th1 = pd.DataFrame(f1.variables['Th'][:], index = dates1)# + 10. # Th was calculated with T0 = 290 K instead of 300 K
Ug1 = pd.DataFrame(f1.variables['Ug'][:], index = dates1)
Vg1 = pd.DataFrame(f1.variables['Vg'][:], index = dates1)
Uadv1 = pd.DataFrame(f1.variables['Uadv'][:], index = dates1)
Vadv1 = pd.DataFrame(f1.variables['Vadv'][:], index = dates1)
Thadv1 = pd.DataFrame(f1.variables['Thadv'][:], index = dates1)
Ucor1 = pd.DataFrame(f1.variables['Ucor'][:], index = dates1)
Vcor1 = pd.DataFrame(f1.variables['Vcor'][:], index = dates1)
Uphys1 = pd.DataFrame(f1.variables['Uphys'][:], index = dates1)
Vphys1 = pd.DataFrame(f1.variables['Vphys'][:], index = dates1)
Utend1 = pd.DataFrame(f1.variables['Utend'][:], index = dates1)
Vtend1 = pd.DataFrame(f1.variables['Vtend'][:], index = dates1)
ust1 = pd.DataFrame(f1.variables['ust'][:], index = dates1)
T21 = pd.DataFrame(f1.variables['T2'][:], index = dates1)
TSK1 = pd.DataFrame(f1.variables['TSK'][:], index = dates1)
HFX1 = pd.DataFrame(f1.variables['HFX'][:], index = dates1)
LH1 = pd.DataFrame(f1.variables['LH'][:], index = dates1)
Psfc1 = pd.DataFrame(f1.variables['Psfc'][:], index = dates1)
rho1 = Psfc1/(R_air*T21)
wt1 = HFX1/(rho1*Cp_air)
beta01 = 1/T21
L01 = -ust1**3/(K*g*beta01*wt1) # Surface-layer Obukhov length [m]
U1 = U1.loc[datefrom:dateto]
V1 = V1.loc[datefrom:dateto]
W1 = W1.loc[datefrom:dateto]
Th1 = Th1.loc[datefrom:dateto]
Ug1 = Ug1.loc[datefrom:dateto]
Vg1 = Vg1.loc[datefrom:dateto]
Uadv1 = Uadv1.loc[datefrom:dateto]
Vadv1 = Vadv1.loc[datefrom:dateto]
Thadv1 = Thadv1.loc[datefrom:dateto]
Ucor1 = Ucor1.loc[datefrom:dateto]
Vcor1 = Vcor1.loc[datefrom:dateto]
Uphys1 = Uphys1.loc[datefrom:dateto]
Vphys1 = Vphys1.loc[datefrom:dateto]
Utend1 = Utend1.loc[datefrom:dateto]
Vtend1 = Vtend1.loc[datefrom:dateto]
ust1 = ust1.loc[datefrom:dateto]
T21 = T21.loc[datefrom:dateto]
TSK1 = TSK1.loc[datefrom:dateto]
wt1 = wt1.loc[datefrom:dateto]
LH1 = LH1.loc[datefrom:dateto]
Psfc1 = Psfc1.loc[datefrom:dateto]
L01 = L01.loc[datefrom:dateto]
if firstgoodfile == 1:
U = U1; V = V1; W = W1; Th = Th1;
Ug = Ug1; Vg = Vg1; Uadv = Uadv1; Vadv = Vadv1;
Utend = Utend1; Vtend = Vtend1; Thadv = Thadv1;
Ucor = Ucor1; Vcor = Vcor1; Uphys = Uphys1; Vphys = Vphys1
ust = ust1; T2 = T21; TSK = TSK1
wt = wt1; LH = LH1; Psfc = Psfc1; L0 = L01
times = times1; heights = heights1
firstgoodfile = 0
cnt = cnt + 1
else:
times = np.hstack((times,times1))
heights = heights + heights1
U = pd.concat([U,U1])
V = pd.concat([V,V1])
W = pd.concat([W,W1])
Th = pd.concat([Th,Th1])
Ug = pd.concat([Ug,Ug1])
Vg = pd.concat([Vg,Vg1])
Utend = pd.concat([Utend,Utend1])
Vtend = pd.concat([Vtend,Vtend1])
Uadv = pd.concat([Uadv,Uadv1])
Vadv = pd.concat([Vadv,Vadv1])
Ucor = pd.concat([Ucor,Ucor1])
Vcor = pd.concat([Vcor,Vcor1])
Uphys = pd.concat([Uphys,Uphys1])
Vphys = pd.concat([Vphys,Vphys1])
Thadv = pd.concat([Thadv,Thadv1])
ust = pd.concat([ust,ust1])
T2 = pd.concat([T2,T21])
TSK = pd.concat([TSK,TSK1])
wt = pd.concat([wt,wt1])
LH = pd.concat([LH,LH1])
Psfc = pd.concat([Psfc,Psfc1])
L0 = pd.concat([L0,L01])
cnt = cnt +1
ifile = ifile + 1
isort = np.argsort(times)
times = times[isort]
# Sort timeseries
U.sort_index(inplace=True); V.sort_index(inplace=True);
W.sort_index(inplace=True); Th.sort_index(inplace=True)
Utend.sort_index(inplace=True); Vtend.sort_index(inplace=True)
Ug.sort_index(inplace=True); Vg.sort_index(inplace=True)
Uadv.sort_index(inplace=True); Vadv.sort_index(inplace=True)
Ucor.sort_index(inplace=True); Vcor.sort_index(inplace=True);
Uphys.sort_index(inplace=True); Vphys.sort_index(inplace=True);
Thadv.sort_index(inplace=True); ust.sort_index(inplace=True);
T2.sort_index(inplace=True); TSK.sort_index(inplace=True);
wt.sort_index(inplace=True); LH.sort_index(inplace=True);
Psfc.sort_index(inplace=True); L0.sort_index(inplace=True)
heights = heights/cnt
# Fill initial NaN values in tendencies with next non-NaN values
Uadv = Uadv.bfill(); Vadv = Vadv.bfill()
Utend = Utend.bfill(); Vtend = Vtend.bfill()
Ucor = Ucor.bfill(); Vcor = Vcor.bfill()
Uphys = Uphys.bfill(); Vphys = Vphys.bfill()
Ug = Ug.bfill(); Vg = Vg.bfill()
Thadv = Thadv.bfill()
# Resample
Ugw = []; Vgw = []; Uadvw = []; Vadvw = []; Ucorw = []; Vcorw = []
Uphysw = []; Vphysw = []; Utendw = []; Vtendw = []; Thadvw = []
Uw = []; Vw = []; Ww = []; Thw = []
ustw = []; T2w = []; TSKw = []; wtw = []; LHw = []; Psfcw = [];
L0w = []
for w in range(0,Nw):
Uw.append(U.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(U))
Vw.append(V.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(V))
Ww.append(W.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(W))
Thw.append(Th.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(Th))
Ugw.append(Ug.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(Ug))
Vgw.append(Vg.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(Vg))
Uadvw.append(Uadv.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(Uadv))
Vadvw.append(Vadv.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(Vadv))
Thadvw.append(Thadv.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(Thadv))
Ucorw.append(Ucor.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(Ucor))
Vcorw.append(Vcor.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(Vcor))
Uphysw.append(Uphys.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(Uphys))
Vphysw.append(Vphys.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(Vphys))
Utendw.append(Utend.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(Utend))
Vtendw.append(Vtend.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(Vtend))
ustw.append(ust.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(ust))
T2w.append(T2.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(T2))
TSKw.append(TSK.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(TSK))
wtw.append(wt.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(wt))
LHw.append(LH.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(LH))
Psfcw.append(Psfc.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(Psfc))
L0w.append(L0.rolling(window = windowsize[w], center = True, min_periods = 1).mean().fillna(L0))
# Save to text files
if savenpz == 1:
for w in range(0,Nw):
fileout = siteID + '_' + dirdata + '_w' + str(int(tav[w])) +'_L' + str(int(L))
np.savez(fileout, U = Uw[w], V = Vw[w], W = Ww[w], Th = Thw[w],
Ug = Ugw[w], Vg = Vgw[w],
Uadv = Uadvw[w], Vadv = Vadvw[w], Thadv = Thadvw[w],
Ucor = Ucorw[w], Vcor = Vcorw[w],
Uphys = Uphysw[w], Vphys = Vphysw[w],
Utend = Utendw[w], Vtend = Vtendw[w],
ust = ustw[w], T2 = T2w[w], TSK = TSKw[w],
wt = wtw[w], LH = LHw[w], Psfc = Psfcw[w], L0 = L0w[w],
times = times, heights = heights)
print('Saving ' + fileout)
sys.stdout.flush()
if savenc == 1:
for w in range(0,Nw):
# fileout = siteID + '_' + dirdata + '_w' + str(int(tav[w])) +'_L' + str(int(L)) + '.nc'
fileout = siteID + '_w' + str(int(tav[w])) +'_L' + str(int(L)) + '.nc'
f = netCDF4.Dataset(fileout, 'w')
f.history = f1.history +', tav = ' + str(int(tav[w])) + ' min'
f.createDimension('time', np.shape(times)[0])
f.createDimension('z', Nz)
f.createDimension('site', 1)
lats = f.createVariable('lat', 'float', ('site',))
lats.long_name = 'Site latitude'
lats.units = 'degrees North'
lats[:] = lat_s
lons = f.createVariable('lon', 'float', ('site',))
lons.long_name = 'Site longitude'
lons.units = 'degrees East'
lons[:] = lon_s
fcs = f.createVariable('fc', 'float', ('site',))
fcs.long_name = 'Coriolis parameter'
fcs.units = 's-1'
fcs[:] = fc
timess = f.createVariable('time', 'float', ('time',))
timess.long_name = 'Time'
timess.units = 'Days since 001-01-01 00:00:00 UTC, plus one'
| |
may be
the ``Type`` returned in ``get_record_types()`` or any of its
parents in a ``Type`` hierarchy where
``has_record_type(gradebook_column_record_type)`` is ``true`` .
:param gradebook_column_record_type: the type of the record to retrieve
:type gradebook_column_record_type: ``osid.type.Type``
:return: the gradebook column record
:rtype: ``osid.grading.records.GradebookColumnRecord``
:raise: ``NullArgument`` -- ``gradebook_column_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(gradebook_column_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.records.GradebookColumnRecord
class GradebookColumnForm:
"""This is the form for creating and updating ``GradebookColumns``.
Like all ``OsidForm`` objects, various data elements may be set here
for use in the create and update methods in the
``GradebookAdminSession``. For each data element that may be set,
metadata may be examined to provide display hints or data
constraints.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_grade_system_metadata(self):
"""Gets the metadata for a grade system.
:return: metadata for the grade system
:rtype: ``osid.Metadata``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.Metadata
grade_system_metadata = property(fget=get_grade_system_metadata)
@abc.abstractmethod
def set_grade_system(self, grade_system_id):
"""Sets the grade system.
:param grade_system_id: the new grade system
:type grade_system_id: ``osid.id.Id``
:raise: ``InvalidArgument`` -- ``grade_system_id`` is invalid
:raise: ``NoAccess`` -- ``grade_system_id`` cannot be modified
:raise: ``NullArgument`` -- ``grade_system_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_grade_system(self):
"""Clears the grade system.
:raise: ``NoAccess`` -- ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
pass
grade_system = property(fset=set_grade_system, fdel=clear_grade_system)
@abc.abstractmethod
def get_gradebook_column_form_record(self, gradebook_column_record_type):
"""Gets the ``GradebookColumnFormRecord`` corresponding to the given gradebook column record ``Type``.
:param gradebook_column_record_type: a gradebook column record type
:type gradebook_column_record_type: ``osid.type.Type``
:return: the gradebook column form record
:rtype: ``osid.grading.records.GradebookColumnFormRecord``
:raise: ``NullArgument`` -- ``gradebook_column_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(gradebook_column_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.records.GradebookColumnFormRecord
class GradebookColumnList:
"""Like all ``OsidLists,`` ``GradebookColumnList`` provides a means for accessing ``GradebookColumn`` elements sequentially either one at a time or many at a time.
Examples: while (gcl.hasNext()) { GradebookColumn column =
gcl.getNextGradebookColumn(); }
or
while (gcl.hasNext()) {
GradebookColumn[] columns = gcl.getNextGradebookColumns(gcl.available());
}
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_next_gradebook_column(self):
"""Gets the next ``GradebookColumn`` in this list.
:return: the next ``GradebookColumn`` in this list. The ``has_next()`` method should be used to test that a next ``GradebookColumn`` is available before calling this method.
:rtype: ``osid.grading.GradebookColumn``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradebookColumn
next_gradebook_column = property(fget=get_next_gradebook_column)
@abc.abstractmethod
def get_next_gradebook_columns(self, n):
"""Gets the next set of ``GradebookColumn`` elements in this list which must be less than or equal to the return from ``available()``.
:param n: the number of ``GradebookColumn`` elements requested which must be less than or equal to ``available()``
:type n: ``cardinal``
:return: an array of ``GradebookColumn`` elements.The length of the array is less than or equal to the number specified.
:rtype: ``osid.grading.GradebookColumn``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradebookColumn
class GradebookColumnSummary:
"""A ``GradebookColumnSummary`` is a summary of all entries within a gradebook column."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_gradebook_column_id(self):
"""Gets the ``Id`` of the ``GradebookColumn``.
:return: the ``Id`` of the ``GradebookColumn``
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
gradebook_column_id = property(fget=get_gradebook_column_id)
@abc.abstractmethod
def get_gradebook_column(self):
"""Gets the ``GradebookColumn``.
:return: the ``GradebookColumn``
:rtype: ``osid.grading.GradebookColumn``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradebookColumn
gradebook_column = property(fget=get_gradebook_column)
@abc.abstractmethod
def get_mean(self):
"""Gets the mean score.
If this system is based on grades, the mean output score is
returned.
:return: the mean score
:rtype: ``decimal``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
mean = property(fget=get_mean)
@abc.abstractmethod
def get_median(self):
"""Gets the median score.
If this system is based on grades, the mean output score is
returned.
:return: the median score
:rtype: ``decimal``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
median = property(fget=get_median)
@abc.abstractmethod
def get_mode(self):
"""Gets the mode of the score.
If this system is based on grades, the mode of the output score
is returned.
:return: the median score
:rtype: ``decimal``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
mode = property(fget=get_mode)
@abc.abstractmethod
def get_rms(self):
"""Gets the root mean square of the score.
If this system is based on grades, the RMS of the output score
is returned.
:return: the median score
:rtype: ``decimal``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
rms = property(fget=get_rms)
@abc.abstractmethod
def get_standard_deviation(self):
"""Gets the standard deviation.
If this system is based on grades, the spread of the output
scores is returned.
:return: the standard deviation
:rtype: ``decimal``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
standard_deviation = property(fget=get_standard_deviation)
@abc.abstractmethod
def get_sum(self):
"""Gets the sum of the scores.
If this system is based on grades, the sum of the output scores
is returned.
:return: the median score
:rtype: ``decimal``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
sum = property(fget=get_sum)
@abc.abstractmethod
def get_gradebook_column_summary_record(self, gradebook_column_summary_record_type):
"""Gets the gradebook column summary record corresponding to the given ``GradebookColumnSummary`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``gradebook_column_summary_record_type``
may be the ``Type`` returned in ``get_record_types()`` or any of
its parents in a ``Type`` hierarchy where
``has_record_type(gradebook_column_summary_record_type)`` is
``true`` .
:param gradebook_column_summary_record_type: the type of the record to retrieve
:type gradebook_column_summary_record_type: ``osid.type.Type``
:return: the gradebook column summary record
:rtype: ``osid.grading.records.GradebookColumnSummaryRecord``
:raise: ``NullArgument`` -- ``gradebook_column_summary_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(gradebook_column_summary_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.records.GradebookColumnSummaryRecord
class Gradebook:
"""A gradebook defines a collection of grade entries."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_gradebook_record(self, gradebook_record_type):
"""Gets the gradebook record corresponding to the given ``Gradebook`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``gradebook_record_type`` may be the
``Type`` returned in ``get_record_types()`` or any of its
parents in a ``Type`` hierarchy where
``has_record_type(gradebook_record_type)`` is ``true`` .
:param gradebook_record_type: a gradebook record type
:type gradebook_record_type: ``osid.type.Type``
:return: the gradebook record
:rtype: ``osid.grading.records.GradebookRecord``
:raise: ``NullArgument`` -- ``gradebook_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(gradebook_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.records.GradebookRecord
class GradebookForm:
"""This is the form for creating and updating ``Gradebooks``.
Like all ``OsidForm`` objects, various data elements may be set here
for use in the create and update methods in the
``GradebookAdminSession``. For each data element that may be set,
metadata may be examined to provide display hints or data
constraints.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_gradebook_form_record(self, gradebook_record_type):
"""Gets the ``GradebookFormRecord`` corresponding to the given gradebook record ``Type``.
:param gradebook_record_type: a gradebook record type
:type gradebook_record_type: ``osid.type.Type``
:return: the gradebook form record
:rtype: ``osid.grading.records.GradebookFormRecord``
:raise: ``NullArgument`` -- ``gradebook_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(gradebook_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.records.GradebookFormRecord
class GradebookList:
"""Like all ``OsidLists,`` ``GradebookList`` provides a means for accessing ``Gradebook`` elements sequentially either one at a time or many at a time.
Examples: while (gl.hasNext()) { Gradebook gradebook =
gl.getNextGradebook(); }
or
while (gl.hasNext()) {
Gradebook[] gradebooks = gl.getNextGradebooks(gl.available());
}
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_next_gradebook(self):
"""Gets the next ``Gradebook`` in this list.
:return: the next ``Gradebook`` in this list. The ``has_next()`` method should be used to test that a next ``Gradebook`` is available before calling this method.
:rtype: ``osid.grading.Gradebook``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- | |
'''
Created on Jan 15, 2020
@author: bsana
'''
from os.path import join
import sys,datetime
import pandas as pd
OUT_SEP = ' '
COUNTY_FIPS = [37,59]
if __name__ == '__main__':
if len(sys.argv)<2:
print('Please provide a control file which contains all the required input parameters as an argument!')
else:
print('Reformat survey program started: {}'.format(datetime.datetime.now()))
#Initiate log file
logfilename = 'reformat_survey.log'
logfile = open(logfilename,'w')
logfile.write('Reformat survey program started: ' + str(datetime.datetime.now()) + '\n')
inputctlfile = sys.argv[1]
ctlfile = open(inputctlfile)
for ctlfileline in ctlfile:
logfile.write(ctlfileline)
if len(str.split(ctlfileline))>1:
param = (str.split(ctlfileline)[0]).upper()
value = str.split(ctlfileline)[1]
if param == 'INDIR':
inputdir = value
elif param == 'INHHFILE':
inhhfilename = value
elif param == 'INPERFILE':
inperfilename = value
elif param == 'INTRIPFILE':
intripfilename = value
elif param == 'OUTDIR':
outputdir = value
elif param == 'OUTHHFILE':
outhhfilename = value
elif param == 'OUTPERFILE':
outperfilename = value
elif param == 'OUTTRIPFILE':
outtripfilename = value
inhhfilename = join(inputdir, inhhfilename)
inperfilename = join(inputdir, inperfilename)
intripfilename = join(inputdir, intripfilename)
outhhfilename = join(outputdir, outhhfilename)
outperfilename = join(outputdir, outperfilename)
outtripfilename = join(outputdir, outtripfilename)
###### Household file processing
print('Household file processing started: {}'.format(datetime.datetime.now()))
logfile.write('\n')
logfile.write('Household file processing started: ' + str(datetime.datetime.now()) + '\n')
hh = pd.read_csv(inhhfilename, sep='\t')
hh['hhno'] = hh['hh_id']
hh['hhsize'] = hh['num_people']
hh.loc[hh['hhsize']>900, 'hhsize'] = -1
hh['hhvehs'] = hh['num_vehicles']
hh.loc[hh['hhvehs']>900, 'hhvehs'] = -1
INC1_DICT = {999:-1, 1:7500, 2:20000, 3:30000, 4:42500, 5:62500, 6:87500, 7:125000, 8:175000, 9:225000, 10:350000}
hh['hhincome'] = hh['income_detailed'].map(INC1_DICT)
INC2_DICT = {999:-1, 1:12500, 2:37500, 3:62500, 4:87500, 5:175000, 6:350000}
hh['hhinc2'] = hh['income_followup'].map(INC2_DICT)
hh.loc[(hh['hhincome']<0) & (hh['hhinc2']>0), 'hhincome'] = hh.loc[(hh['hhincome']<0) & (hh['hhinc2']>0), 'hhinc2']
hh['hownrent'] = hh['rent_own']
hh.loc[hh['hownrent']==997, 'hownrent'] = 3 #Other
hh.loc[hh['hownrent']==999, 'hownrent'] = 9 #Prefer not to answer -> Missing
hh.loc[hh['hownrent']<0, 'hownrent'] = -1
RESTYPE_DICT = {1:1, 2:2, 3:3, 4:3, 5:3, 6:5, 7:4, 997:6}
hh['hrestype'] = hh['res_type'].map(RESTYPE_DICT)
hh.loc[pd.isnull(hh['hrestype']), 'hrestype'] = -1
hh['hxcord'] = hh['reported_home_lon']
hh['hycord'] = hh['reported_home_lat']
hh['hhtaz'] = hh['home_taz']
hh['hhparcel'] = hh['home_bg_geoid']
int_cols = ['hhparcel','hhtaz','hhincome','hrestype']
hh[int_cols] = hh[int_cols].astype('int64')
out_colnames = ['hhno','hhsize','hhvehs','hhincome','hownrent','hrestype','hhparcel','hhtaz','hxcord','hycord','wt_alladult_wkday','wt_alladult_7day']
hh = hh[out_colnames]
hh = hh.sort_values('hhno')
hh.to_csv(outhhfilename, sep=OUT_SEP, index=False)
print('Household file processing finished: {}'.format(datetime.datetime.now()))
logfile.write('Household file processing finished: ' + str(datetime.datetime.now()) + '\n')
###### Person file processing
print('Person file processing started: {}'.format(datetime.datetime.now()))
logfile.write('\n')
logfile.write('Person file processing started: ' + str(datetime.datetime.now()) + '\n')
per = pd.read_csv(inperfilename, sep='\t')
per['person_id'] = per['person_id'].round()
per['hhno'] = per['hh_id']
per['pno'] = per['person_num']
AGE_DICT = {1:3, 2:10, 3:16, 4:21, 5:30, 6:40, 7:50, 8:60, 9:70, 10:80}
per['pagey'] = per['age'].map(AGE_DICT)
GEND_DICT = {1:2, 2:1, 3:3, 4:3, 997:3, 995:9, 999:9}
per['pgend'] = per['gender'].map(GEND_DICT)
per.loc[per['pgend']<0, 'pgend'] = -1
per.loc[pd.isna(per['pgend']), 'pgend'] = -1
per['pptyp'] = 0
per['pwtyp'] = 0
per['pstyp'] = 0
per.loc[(per['pagey']>=0) & (per['pagey']<5), 'pptyp'] = 8
per.loc[(per['pagey']>=0) & (per['pagey']<16) & (per['pptyp']==0), 'pptyp'] = 7
per.loc[(per['employment']==1) & (per['hours_work'].isin([1,2,3])) & (per['pptyp']==0), 'pptyp'] = 1
per.loc[(per['pagey']>=16) & (per['pagey']<18) & (per['pptyp']==0), 'pptyp'] = 6
per.loc[(per['pagey']>=16) & (per['pagey']<25) & (per['school_type'].isin([4,7])) & (per['student']==1) & (per['pptyp']==0), 'pptyp'] = 6
per.loc[(per['student'].isin([1,2])) & (per['pptyp']==0), 'pptyp'] = 5
per.loc[(per['employment'].isin([1,2,3])) & (per['pptyp']==0), 'pptyp'] = 2 # Remaining workers are part-time
per.loc[(per['pagey']>65) & (per['pptyp']==0), 'pptyp'] = 3
per.loc[per['pptyp']==0, 'pptyp'] = 4
per.loc[per['pptyp']==1, 'pwtyp'] = 1
per.loc[per['pptyp']==2, 'pwtyp'] = 2
# student workers are also part-time workers
per.loc[(per['pptyp']==5) & (per['employment'].isin([1,2,3])), 'pwtyp'] = 2
per.loc[(per['pptyp']==6) & (per['employment'].isin([1,2,3])), 'pwtyp'] = 2
per.loc[per['student']==1, 'pstyp'] = 1
per.loc[per['student']==2, 'pstyp'] = 2
per['pwxcord'] = per['work_lon']
per['pwycord'] = per['work_lat']
per['psxcord'] = per['school_lon']
per['psycord'] = per['school_lat']
per['ppaidprk'] = 1
per.loc[per['work_park']==1, 'ppaidprk'] = 0
per = per.rename(columns={'work_taz':'pwtaz_tmp', 'school_taz':'pstaz_tmp',
'work_bg_geo_id':'pwpcl_tmp', 'school_bg_geo_id':'pspcl_tmp'})
per.loc[per['work_county_fips'].isin(COUNTY_FIPS), 'pwtaz'] = per.loc[per['work_county_fips'].isin(COUNTY_FIPS), 'pwtaz_tmp']
per.loc[per['work_county_fips'].isin(COUNTY_FIPS), 'pwpcl'] = per.loc[per['work_county_fips'].isin(COUNTY_FIPS), 'pwpcl_tmp']
per.loc[per['school_county_fips'].isin(COUNTY_FIPS), 'pstaz'] = per.loc[per['school_county_fips'].isin(COUNTY_FIPS), 'pstaz_tmp']
per.loc[per['school_county_fips'].isin(COUNTY_FIPS), 'pspcl'] = per.loc[per['school_county_fips'].isin(COUNTY_FIPS), 'pspcl_tmp']
per.loc[pd.isnull(per['pwtaz']), 'pwtaz'] = -1
per.loc[pd.isnull(per['pstaz']), 'pstaz'] = -1
per.loc[pd.isnull(per['pwpcl']), 'pwpcl'] = -1
per.loc[pd.isnull(per['pspcl']), 'pspcl'] = -1
per.loc[pd.isnull(per['pwxcord']), 'pwxcord'] = -1.0
per.loc[pd.isnull(per['pwycord']), 'pwycord'] = -1.0
per.loc[pd.isnull(per['psxcord']), 'psxcord'] = -1.0
per.loc[pd.isnull(per['psycord']), 'psycord'] = -1.0
# there appear to be some person records who are not students but have school loc Ex. hhid 181005890
# account for that by setting school loc to null/missing
per.loc[per['pstyp']==0, 'pstaz'] = -1
per.loc[per['pstyp']==0, 'pspcl'] = -1
per.loc[per['pstyp']==0, 'psxcord'] = -1.0
per.loc[per['pstyp']==0, 'psycord'] = -1.0
# there appear to be some person records who are not workers but have work loc Ex. hhid 181007697
# account for that by setting work loc to null/missing
per.loc[per['pwtyp']==0, 'pwtaz'] = -1
per.loc[per['pwtyp']==0, 'pwpcl'] = -1
per.loc[per['pwtyp']==0, 'pwxcord'] = -1.0
per.loc[per['pwtyp']==0, 'pwycord'] = -1.0
int_cols = ['pwtaz','pstaz','pwpcl','pspcl','pgend']
per[int_cols] = per[int_cols].astype('int64')
out_colnames = ['hhno','pno','pptyp','pagey','pgend','pwtyp','pwpcl','pwtaz','pstyp','pspcl','pstaz','ppaidprk','pwxcord','pwycord','psxcord','psycord']
out_colnames = out_colnames + ['wt_alladult_wkday','wt_alladult_7day']
out_colnames = out_colnames + ['mon_complete','tue_complete','wed_complete','thu_complete','fri_complete',
'sat_complete','sun_complete','nwkdaywts_complete','n7daywts_complete']
per = per[out_colnames]
per = per.sort_values(['hhno','pno'])
per.to_csv(outperfilename, sep=OUT_SEP, index=False)
print('Person file processing finished: {}'.format(datetime.datetime.now()))
logfile.write('Person file processing finished: ' + str(datetime.datetime.now()) + '\n')
###### Trip processing
print('Trip file processing started: {}'.format(datetime.datetime.now()))
logfile.write('\n')
logfile.write('Trip file processing started: ' + str(datetime.datetime.now()) + '\n')
trip = pd.read_csv(intripfilename, sep='\t')
trip['person_id'] = trip['person_id'].round()
trip['hhno'] = trip['hh_id']
trip['pno'] = trip['person_num']
trip['tripno'] = trip['trip_num']
trip['dow'] = trip['travel_date_dow']
if len(trip.loc[pd.isna(trip['dow']), ]) > 0 :
print('Number of trips with missing travel_date_dow: {}'.format(len(trip.loc[pd.isna(trip['dow']), ])))
trip.loc[pd.isna(trip['dow']), 'dow'] = pd.to_datetime(trip.loc[pd.isna(trip['dow']), 'travel_date']).dt.dayofweek + 1
# print(pd.to_datetime(trip.loc[pd.isna(trip['dow']), 'travel_date']).dt.dayofweek.iloc[0])
# retain only complete day trips
trip = trip.merge(per[['hhno','pno','mon_complete','tue_complete','wed_complete','thu_complete',
'fri_complete','sat_complete','sun_complete']],
how='left', on=['hhno','pno'])
trip['incomplete_day_flag'] = 0
for i,d in zip(range(1,8), ['mon','tue','wed','thu','fri','sat','sun']):
trip.loc[(trip['dow']==i) & (trip[d+'_complete']==0), 'incomplete_day_flag'] = 1
trip = trip[trip['incomplete_day_flag']==0]
PURP_DICT = {
-1:-1, #missing -> missing
1:0, #home -> home
2:1, #work -> work
3:4, #work-related -> work
4:2, #school -> school
5:3, #escort -> escort
6:5, #shop -> shop
7:6, #meal -> meal
8:7, #socrec -> socrec
9:4, #errand/other -> pers.bus.
10:10, #change mode -> change mode
11:11, #night non-home -> other
12:11, #other\missing -> other
14:4, #trip needs to be merged -> other
}
trip.loc[pd.isna(trip['o_purpose_category_imputed']), 'o_purpose_category_imputed'] = -1
trip.loc[pd.isna(trip['d_purpose_category_imputed']), 'd_purpose_category_imputed'] = -1
trip['opurp'] = trip['o_purpose_category_imputed'].map(PURP_DICT)
trip['dpurp'] = trip['d_purpose_category_imputed'].map(PURP_DICT)
#0-other 1-walk 2-bike 3-DA 4-hov2 5-hov3 6-walktran 7-drivetran 8-schbus 9-tnc
trip['mode'] = 0
trip.loc[(trip['mode_type_imputed']==1) & (trip['mode']==0), 'mode'] = 1
trip.loc[(trip['mode_type_imputed']==2) & (trip['mode']==0), 'mode'] = 2
trip.loc[(trip['mode_type_imputed']==11) & (trip['mode']==0), 'mode'] = 2
trip.loc[(trip['mode_type_imputed']==12) & (trip['mode']==0), 'mode'] = 2
DRIVE_MODES = [3, 10]
trip.loc[(trip['mode_type_imputed'].isin(DRIVE_MODES)) & (trip['mode']==0), 'mode'] = 3
trip.loc[(trip['mode']==3) & (trip['num_travelers']==2), 'mode'] = 4
trip.loc[(trip['mode']==3) & (trip['num_travelers']>2), 'mode'] = 5
trip.loc[(trip['mode_type_imputed']==4) & (trip['mode']==0), 'mode'] = 9
trip.loc[(trip['mode_type_imputed']==9) & (trip['mode']==0), 'mode'] = 9
DRIVE_ACCESS_CODES = [5,6,7]
# TRANSIT_MODES = [5, 8]
TRANSIT_MODES = [5]
trip.loc[(trip['mode_type_imputed'].isin(TRANSIT_MODES)) & (trip['mode']==0), 'mode'] = 6
# trip.loc[(trip['mode_type_imputed']==8) & (trip['mode_1']==21), 'mode'] = 5 #make vanpool HOV3
trip.loc[(trip['mode_type_imputed']==8), 'mode'] = 5 #make both shuttle and vanpool HOV3
# trip.loc[(trip['mode_type_imputed']==13) & (trip['mode_1']==41) & (trip['mode']==0), 'mode'] = 6
trip.loc[(trip['mode']==6) &
((trip['bus_access'].isin(DRIVE_ACCESS_CODES)) | (trip['bus_egress'].isin(DRIVE_ACCESS_CODES)) |
(trip['rail_access'].isin(DRIVE_ACCESS_CODES)) | (trip['rail_access'].isin(DRIVE_ACCESS_CODES))), 'mode'] = 7
trip.loc[(trip['mode_type_imputed']==6) & (trip['mode']==0), 'mode'] = 8 #Schoolbus
trip['mode_4'] = trip['mode_3']
#0-none 1-fullnetwork 2-notoll 3-bus 4-lrt 5-prem 6-bart 7-ferry
trip['path'] = 0
trip.loc[(trip['mode_type_imputed']==3) & (trip['path']==0), 'path'] = 1
trip.loc[(trip['mode']==6) & (trip['path']==0), 'path'] = 3
trip.loc[(trip['mode']==6) &
((trip['mode_1'].isin([32])) | (trip['mode_2'].isin([32])) | (trip['mode_3'].isin([32])) | (trip['mode_4'].isin([32]))), 'path'] = 7
trip.loc[(trip['mode']==6) &
((trip['mode_1'].isin([30])) | (trip['mode_2'].isin([30])) | (trip['mode_3'].isin([30])) | (trip['mode_4'].isin([30]))), 'path'] = 6
trip.loc[(trip['mode']==6) &
((trip['mode_1'].isin([41,42,55])) | (trip['mode_2'].isin([41,42,55])) | (trip['mode_3'].isin([41,42,55])) | (trip['mode_4'].isin([41,42,55]))), 'path'] = 5
trip.loc[(trip['mode']==6) &
((trip['mode_1'].isin([39,68])) | (trip['mode_2'].isin([39,68])) | (trip['mode_3'].isin([39,68])) | (trip['mode_4'].isin([39,68]))), 'path'] = 4
trip['dorp'] = 3
trip.loc[trip['mode'].isin([3,4,5]), 'dorp'] = 9 #assign missing code for all car trips
trip.loc[(trip['mode'].isin([3,4,5])) & (trip['driver']==1), 'dorp'] = 1
trip.loc[(trip['mode'].isin([3,4,5])) & (trip['driver']==2), 'dorp'] = 2
trip.loc[(trip['mode']==9) & (trip['num_travelers']==1), 'dorp'] = 11
trip.loc[(trip['mode']==9) & (trip['num_travelers']==2), 'dorp'] = 12
trip.loc[(trip['mode']==9) & (trip['num_travelers']>2), 'dorp'] = 13
trip['depart_hour'] = trip['depart_time_imputed'].str.split(expand=True)[1].str.split(':',expand=True)[0].astype(int)
trip['depart_minute'] = trip['depart_time_imputed'].str.split(expand=True)[1].str.split(':',expand=True)[1].astype(int)
trip['deptm'] = trip['depart_hour']*100 + trip['depart_minute']
trip['arrtm'] = trip['arrive_hour']*100 + trip['arrive_minute']
trip['oxcord'] = trip['o_lon']
trip['oycord'] = trip['o_lat']
trip['dxcord'] = trip['d_lon']
trip['dycord'] = trip['d_lat']
trip = trip.rename(columns={'o_taz':'otaz_tmp', 'd_taz':'dtaz_tmp',
'o_bg_geo_id':'opcl_tmp', 'd_bg_geo_id':'dpcl_tmp'})
trip.loc[trip['o_county_fips'].isin(COUNTY_FIPS), 'otaz'] = trip.loc[trip['o_county_fips'].isin(COUNTY_FIPS), 'otaz_tmp']
trip.loc[trip['o_county_fips'].isin(COUNTY_FIPS), 'opcl'] = trip.loc[trip['o_county_fips'].isin(COUNTY_FIPS), 'opcl_tmp']
trip.loc[trip['d_county_fips'].isin(COUNTY_FIPS), 'dtaz'] = trip.loc[trip['d_county_fips'].isin(COUNTY_FIPS), 'dtaz_tmp']
trip.loc[trip['d_county_fips'].isin(COUNTY_FIPS), 'dpcl'] = trip.loc[trip['d_county_fips'].isin(COUNTY_FIPS), 'dpcl_tmp']
trip.loc[pd.isnull(trip['otaz']), 'otaz'] = -1
trip.loc[pd.isnull(trip['dtaz']), 'dtaz'] = -1
trip.loc[pd.isnull(trip['opcl']), 'opcl'] = -1
trip.loc[pd.isnull(trip['dpcl']), 'dpcl'] = -1
trip.loc[pd.isnull(trip['oxcord']), 'oxcord'] = -1.0
trip.loc[pd.isnull(trip['oycord']), 'oycord'] = -1.0
trip.loc[pd.isnull(trip['dxcord']), 'dxcord'] = -1.0
trip.loc[pd.isnull(trip['dycord']), 'dycord'] | |
size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: ProductVersionPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_product_versions_with_http_info(id, **kwargs)
else:
(data) = self.get_product_versions_with_http_info(id, **kwargs)
return data
def get_product_versions_with_http_info(self, id, **kwargs):
"""
Get associated Product Versions of the specified Configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_versions_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: ProductVersionPage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'page_index', 'page_size', 'sort', 'q']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_product_versions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_product_versions`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'page_index' in params:
query_params.append(('pageIndex', params['page_index']))
if 'page_size' in params:
query_params.append(('pageSize', params['page_size']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'q' in params:
query_params.append(('q', params['q']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/build-configurations/{id}/product-versions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProductVersionPage',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_revision(self, id, rev, **kwargs):
"""
Get specific audited revision of this build configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_revision(id, rev, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration id (required)
:param int rev: Build configuration rev (required)
:return: BuildConfigurationAuditedSingleton
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_revision_with_http_info(id, rev, **kwargs)
else:
(data) = self.get_revision_with_http_info(id, rev, **kwargs)
return data
def get_revision_with_http_info(self, id, rev, **kwargs):
"""
Get specific audited revision of this build configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_revision_with_http_info(id, rev, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration id (required)
:param int rev: Build configuration rev (required)
:return: BuildConfigurationAuditedSingleton
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'rev']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_revision" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_revision`")
# verify the required parameter 'rev' is set
if ('rev' not in params) or (params['rev'] is None):
raise ValueError("Missing the required parameter `rev` when calling `get_revision`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'rev' in params:
path_params['rev'] = params['rev']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/build-configurations/{id}/revisions/{rev}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BuildConfigurationAuditedSingleton',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_revisions(self, id, **kwargs):
"""
Gets audited revisions of this build configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_revisions(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:return: BuildConfigurationAuditedPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_revisions_with_http_info(id, **kwargs)
else:
(data) = self.get_revisions_with_http_info(id, **kwargs)
return data
def get_revisions_with_http_info(self, id, **kwargs):
"""
Gets audited revisions of this build configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_revisions_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:return: BuildConfigurationAuditedPage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'page_index', 'page_size', 'sort']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_revisions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_revisions`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'page_index' in params:
query_params.append(('pageIndex', params['page_index']))
if 'page_size' in params:
query_params.append(('pageSize', params['page_size']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/build-configurations/{id}/revisions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BuildConfigurationAuditedPage',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_specific(self, id, **kwargs):
"""
Gets a specific Build Configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_specific(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration id (required)
:return: BuildConfigurationSingleton
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_specific_with_http_info(id, **kwargs)
else:
(data) = self.get_specific_with_http_info(id, **kwargs)
return data
def get_specific_with_http_info(self, id, **kwargs):
"""
Gets a specific Build Configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_specific_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration id (required)
:return: BuildConfigurationSingleton
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_specific" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_specific`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = | |
# !/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import numpy as np
import tensorflow as tf
import time
import os
from sys import path
import tf_util as U
from maddpg import MADDPGAgentTrainer
# from maddpg import MADDPGEnsembleAgentTrainer
import tensorflow.contrib.layers as layers
# from tf_slim import layers
# from tf_slim import rnn
import scipy.io as sio
import rospy
import cbf
path.append("./home/caohuanhui/catkin_ws/src/two_loggers/loggers_control/scripts")
from double_ind import DoubleEscape
def parse_args():
parser = argparse.ArgumentParser("Reinforcement Learning experiments for multiagent environments")
# Environment
parser.add_argument("--scenario", type=str, default="simple_tag", help="name of the scenario script")
parser.add_argument("--max-episode-len", type=int, default=400, help="maximum episode length") #每个episode的步数为400步
#episodes的回数,先前默认60000,现在改成5000
parser.add_argument("--num-episodes", type=int, default=5000, help="number of episodes")
parser.add_argument("--num-adversaries", type=int, default=1, help="number of adversaries")
#这里切换ddpg和maddpg
parser.add_argument("--good-policy", type=str, default="maddpg", help="policy for good agents")
parser.add_argument("--adv-policy", type=str, default="maddpg", help="policy of adversaries")
# Core training parameters
parser.add_argument("--lr", type=float, default=1e-2, help="learning rate for Adam optimizer")
parser.add_argument("--gamma", type=float, default=0.95, help="discount factor")
parser.add_argument("--batch-size", type=int, default=1024, help="number of episodes to optimize at the same time")
parser.add_argument("--num-units", type=int, default=64, help="number of units in the mlp")
# Checkpointing
parser.add_argument("--exp-name", type=str, default=None, help="name of the experiment")
parser.add_argument("--save-dir", type=str, default="./home/chh3213/ros_wc/src/two_loggers/loggers_control/scripts/maddpg-master/tmp/policy_new4/", help="directory in which training state and model should be saved")
parser.add_argument("--save-rate", type=int, default=200, help="save model once every time this many episodes are completed")
parser.add_argument("--load-dir", type=str, default="/home/chh3213/ros_wc/src/two_loggers/loggers_control/scripts/maddpg-master/tmp/policy_new4/", help="directory in which training state and model are loaded")
# Evaluation
parser.add_argument("--restore", action="store_true", default=False)
# default=True出现图形界面
parser.add_argument("--display", action="store_true", default=False)
return parser.parse_args()
def mlp_model(input, num_outputs, scope, reuse=False, num_units=64, rnn_cell=None):
# This model takes as input an observation and returns values of all actions
with tf.variable_scope(scope, reuse=reuse):
out = input
# out = tf.nn.dropout(out, 0.8) # dropout
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
# out = tf.nn.dropout(out, 0.6) # dropout
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_outputs, activation_fn=tf.nn.tanh)
# out = layers.fully_connected(out, num_outputs=num_outputs, activation_fn=None)
return out
def mlp_model_q(input, num_outputs, scope, reuse=False, num_units=64, rnn_cell=None):
# This model takes as input an observation and returns values of all actions
with tf.variable_scope(scope, reuse=reuse):
out = input
# out = tf.nn.dropout(out, 0.8) # dropout
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
# out = tf.nn.dropout(out, 0.8) # dropout
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
# out = layers.fully_connected(out, num_outputs=num_outputs, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_outputs, activation_fn=None)
return out
def lstm_mlp_model(input, num_outputs, scope, reuse=False, num_units=64, rnn_cell=None):
# This model takes as input an observation and returns values of all actions
import tensorflow.contrib.rnn as rnn
lstm_size = input.shape[1]
input = tf.expand_dims(input, 0) # [1,?,232]
with tf.variable_scope(scope, reuse=reuse):
# fully_connetcted: 全连接层
out = input
lstm = rnn.BasicLSTMCell(num_units, forget_bias=1.0, state_is_tuple=True)
# GRU = tf.nn.rnn_cell.GRUCell(num_units)
init_state = lstm.zero_state(1, dtype=tf.float32)
# outputs, _states = rnn.static_rnn(lstm, X_split, dtype=tf.float32)
outputs, _states = tf.nn.dynamic_rnn(lstm, out, time_major=False, initial_state=init_state)
# outputs = tf.convert_to_tensor(np.array(outputs))
out = layers.fully_connected(outputs[-1], num_outputs=num_units, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_outputs, activation_fn=tf.nn.tanh)
return out
def lstm_mlp_model_q(input, num_outputs, scope, reuse=False, num_units=64, rnn_cell=None):
# This model takes as input an observation and returns values of all actions
import tensorflow.contrib.rnn as rnn
lstm_size = input.shape[1]
input = tf.expand_dims(input, 0) # [1,?,232]
with tf.variable_scope(scope, reuse=reuse):
# fully_connetcted: 全连接层
out = input
lstm = rnn.BasicLSTMCell(num_units, forget_bias=1.0, state_is_tuple=True)
GRU = tf.nn.rnn_cell.GRUCell(num_units)
init_state = lstm.zero_state(1, dtype=tf.float32)
# outputs, _states = rnn.static_rnn(lstm, X_split, dtype=tf.float32)
outputs, _states = tf.nn.dynamic_rnn(lstm, out, time_major=False, initial_state=init_state)
# outputs = tf.convert_to_tensor(np.array(outputs))
out = layers.fully_connected(outputs[-1], num_outputs=num_units, activation_fn=tf.nn.relu)
# out = tf.nn.dropout(out, 0.8) # dropout
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_outputs, activation_fn=None)
return out
# 修改
def get_trainers(env, num_adversaries, obs_shape_n, arglist):
'''
从env中读取agent数量,再根据env中的动作空间action_space和obs_shape_n创建agent训练实例。
make the List of trainers
@Output: List of trainers 返回训练实例对象trainers
'''
trainers = []
model1 = mlp_model
model2 = mlp_model_q
model_lstm1 = lstm_mlp_model
model_lstm2 = lstm_mlp_model_q
trainer = MADDPGAgentTrainer
# trainer = MADDPGEnsembleAgentTrainer
# 将 adversaries 添加到 trainers列表
for i in range(num_adversaries):
trainers.append(trainer(
"agent_%d" % i, model1, model2, obs_shape_n, env.action_space, i, arglist,
local_q_func=(arglist.adv_policy == 'ddpg'))) #action_reservoir可能需要修改
# 将 agents 添加到 trainers列表
for i in range(num_adversaries, 2):
trainers.append(trainer(
"agent_%d" % i, model1, model2, obs_shape_n, env.action_space, i, arglist,
local_q_func=(arglist.good_policy == 'ddpg')))
return trainers
#核心部分
def train(arglist):
with U.single_threaded_session():
# Create environment
env = DoubleEscape()
# Create agent trainers
obs_shape_n = [env.observation_space[i].shape for i in range(2)]
num_adversaries = 1
###
trainers = get_trainers(env, num_adversaries, obs_shape_n, arglist)
print('Using good policy {} and adv policy {}'.format(arglist.good_policy, arglist.adv_policy))
# Initialize
U.initialize()
# Load previous results, if necessary
if arglist.load_dir == "":
arglist.load_dir = arglist.save_dir
if arglist.display or arglist.restore:
print("===============================")
print('Loading previous state...')
print("===============================")
filename = 'gazeboSimulink'
arglist.load_dir= os.path.join( arglist.load_dir, filename)
U.load_state(arglist.load_dir)
episode_rewards = [0.0] # sum of rewards for all agents
agent_rewards = [[0.0] for _ in range(2)] # individual agent reward
final_ep_rewards = [] # sum of rewards for training curve
final_ep_ag_rewards = [] # agent rewards for training curve
agent_info = [[[]]] # placeholder for benchmarking info
saver = tf.train.Saver()
obs_n = env.reset()
episode_step = 0
train_step = 0
abound = 0.8
bbound = 1.6
t_start = time.time()
print('Starting iterations...')
# chh ===============================
# 数据保存
episode_reward = []
step_episode = []
position_ = []
volocity = []
step = [i for i in range(arglist.max_episode_len+1)]
action_save = []
pos = [0, 0, 0]
file_folder_name = "/home/firefly/chh_ws/src/two_loggers/loggers_control/scripts/maddpg-master/save_data/训练数据/policy_new4" # policy_continueTest
if not os.path.exists(file_folder_name):
os.makedirs(file_folder_name)
while True:
action_n = [agent.action(obs) for agent, obs in zip(trainers, obs_n)]
# print('action_n',action_n)
# pos[0] = obs_n[0][0]
# pos[1] = obs_n[0][1]
# pos[2] = obs_n[0][4]
# ct = np.sqrt(np.square(pos[0]) + np.square((pos[1])))
# ct = pos[0] / ct
# theta = np.arccos(ct)
# if pos[1] > 0:
# theta = theta
# elif pos[1] <= 0:
# theta = -theta
# pos[2] = theta - pos[2]
# if pos[2] > 3.142:
# pos[2] -= 6.28
# elif pos[2] < -3.142:
# pos[2] += 6.28
# temp = pos[2]
# if np.abs(pos[2]) > np.abs(8*(1.8 - np.sqrt(obs_n[0][2]**2 + obs_n[0][3]**2))):
# temp = np.abs(8*(1.8 - np.sqrt(obs_n[0][2]**2 + obs_n[0][3]**2)))
# if pos[2] < 0:
# pos[2] = -2
# if pos[2] > 0:
# pos[2] = 2
# if pos[2] == 0:
# pos[2] = 0
# action_n[0] = [1.5, -pos[2]]
# action_n = cbf.cbf(obs_n, action_n)
# action_n[1] = [0, 0]
# 默认策略
from pursuer_default import default_p
from evader_default import default_e
action_n[0]=default_p(obs_n,action_n)
# action_n[1]=default_e(obs_n,action_n)
# action_n = cbf.cbf(obs_n, action_n)
# print(action_n[0])
# 调用环境执行n个智能体生成的动作,环境返回n个智能体的新状态、奖励值、智能体是否死亡、其他信息
new_obs_n, rew_n, done_n, info_n = env.step(action_n)
# print('sd',done_n)
# print(
# "\nstate: {} \naction: {} \nreward: {} \ndone: {} \nn_state: {}".format(obs_n, action_n, rew_n, done_n, new_obs_n))
episode_step += 1
# done = all(done_n)
done = any(done_n)
terminal = (episode_step >= arglist.max_episode_len)
# collect experience
# enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列,同时列出数据和数据下标,一般用在 for 循环当中。
# 保存训练数据到经验回放单元
print(rew_n)
for i, agent in enumerate(trainers):
agent.experience(obs_n[i], action_n[i], rew_n[i], new_obs_n[i], done_n[i], terminal)
# agent.experience(obs_n, action_n, rew_n, new_obs_n, done_n, terminal) # ====emsable
# 更新状态
obs_n = new_obs_n
# chh注释
for i, rew in enumerate(rew_n): # 更新总奖励和每个agent的奖励
episode_rewards[-1] += rew
agent_rewards[i][-1] += rew
# ## chh添加
# # 数据保存
step_episode.append(rew_n[0]) # 保存追赶的reward
volocity.append(obs_n[0][4:8])
action_save.append(action_n)
if done or terminal:
'''
network_vs_default
default_vs_default
default_vs_network
network_vs_network
# '''
# if done:
# print('*'*20)
# print('done:', episode_step)
# # # 将数据保存成mat文件
# sio.savemat(file_folder_name + '/network_vs_network-a4_2.mat',
# {'step': step, 'position': position_, 'volocity': volocity, 'action_save': action_save})
# print('save !!!')
# break #保存完之后退出
episode_reward.append(step_episode) #将400个step保存进列表中
# 重置
step_episode = [] # 将每个episode的reward列表清空
obs_n = env.reset()
episode_step = 0
episode_rewards.append(0)
for a in agent_rewards:
a.append(0)
agent_info.append([[]])
print('**************************************************************************')
print(len(episode_rewards))
print('**************************************************************************')
# print("lenth of episode_rewards is :", len(episode_rewards))
# print(f"finished no.{num_terminal} episode!") # chh 2020/10/20
# increment global step counter
train_step += 1
# 更新所有trainers参数update all trainers, if not in display or benchmark mode
loss = None
for agent in trainers:
agent.preupdate()
for agent in trainers:
loss = agent.update(trainers, train_step)
if done or terminal: #modified
name='gazeboSimulink'
if not os.path.exists(arglist.save_dir):
os.makedirs(arglist.save_dir)
U.save_state(arglist.save_dir+name, saver=saver) # 保存模型!!
# print statement depends on whether or not there are adversaries
if num_adversaries == 0:
print("steps: {}, episodes: {}, mean episode reward: {}, time: {} \n".format(
train_step, len(episode_rewards), np.mean(episode_rewards[-arglist.save_rate:]), round(time.time()-t_start, 3)))
else:
print("steps: {}, episodes: {}, mean episode reward: {}, agent episode reward: {}, time: {} \n".format(
train_step, len(episode_rewards), np.mean(episode_rewards[-arglist.save_rate:]),
[np.mean(rew[-arglist.save_rate:]) for rew in agent_rewards], round(time.time()-t_start, 3)))
t_start = time.time()
# Keep track of final episode reward
final_ep_rewards.append(np.mean(episode_rewards[-arglist.save_rate:]))
for rew in agent_rewards:
final_ep_ag_rewards.append(np.mean(rew[-arglist.save_rate:]))
# edit by caohuanhui
# saves final episode reward for plotting training curve later
if len(episode_rewards) > arglist.num_episodes:
print('...Finished total of {} episodes.'.format(len(episode_rewards)))
print("总共耗时:".format(round(time.time()-t_start, 3))) # | |
<filename>5-Filtri/ukazi.py<gh_stars>1-10
# Autogenerated with SMOP 0.32-7-gcce8558
from smop.core import *
from matplotlib.pyplot import *
from numpy import *
#
' <NAME>'
'-------------------------------------------------------------------'
# filtriranje v frekvennem prostoru
close_('all')
clc
Fsamp=1024
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:5
LomnaFrek=50
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:6
figure
x=randn(1,dot(1,Fsamp))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:9
X=fft.fft(x)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:10
F=matlabarray(cat(ones(1,LomnaFrek),zeros(1,length(x) - dot(2,LomnaFrek) + 1),ones(1,LomnaFrek - 1)))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:11
subplot(2,1,1)
hold('on')
plot(abs(X) / max(abs(ravel(X))))
plot(F,'r',lineWidth=2)
xlabel('f (Hz)')
ylabel('amplituda')
title(cat('Lomna frekvenca = ',num2str(LomnaFrek),' Hz'))
axis('tight')
R=multiply(X,F)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:19
r=fft.fft.ifft(R)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:20
subplot(2,1,2)
hold('on')
plot(x)
plot(r,'r',lineWidth=2)
xlabel('t (vzorci)')
ylabel('amplituda')
axis('tight')
'-------------------------------------------------------------------'
########################################
# impulzni odziv filtra - antivzronost, dolina filtra, konvolucija v asovni domeni...
close_('all')
clc
Fsamp=1024
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:31
LomnaFrek=50
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:32
F=matlabarray(cat(ones(1,LomnaFrek),zeros(1,Fsamp - dot(2,LomnaFrek) + 1),ones(1,LomnaFrek - 1)))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:33
f=fft.fft.ifftshift(fft.fft.ifft(F))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:35
# asovno os prestravimo za length(F)/2 vzorcev v levo.
figure
subplot(3,1,1)
plot(cat(arange(- length(f) / 2,length(f) / 2 - 1)),f)
xlabel('t (vzorci)')
ylabel('amplituda')
axis('tight')
title(cat('impulzni odziv frekvennega filtra z Lomno frekvenco = ',num2str(LomnaFrek),' Hz'))
subplot(3,1,2)
plot(cat(arange(- length(f) / 2,length(f) / 2 - 1)),abs(f))
xlabel('t (vzorci)')
ylabel('amplituda')
title('absolutna vrednost impulznega odziva frekvennega filtra')
#set(gca,'YScale','log');
axis('tight')
subplot(3,1,3)
plot(cat(arange(- length(f) / 2,length(f) / 2 - 1)),dot(20,log10(abs(f))))
xlabel('t (vzorci)')
ylabel('amplituda (dB)')
title('absolutna vrednost impulznega odziva frekvennega filtra')
axis('tight')
#####################################
# poskusimo narediti filter konen in vzroen
# impulzni odziv se e nahaja v spr. impulzni
LomnaFrek=200
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:59
Fsamp=1024
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:60
F=matlabarray(cat(ones(1,LomnaFrek),zeros(1,Fsamp - dot(2,LomnaFrek) + 1),ones(1,LomnaFrek - 1)))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:61
f=fft.fft.ifftshift(fft.fft.ifft(F))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:62
# asovno os prestravimo za length(F)/2 vzorcev v levo.
f2=copy(f)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:64
f2[512:1024]=f2[512 - 20:1024 - 20]
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:65
f2[1:511]=0
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:66
f2[512 + 100:1024]=0
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:67
F2=fft.fft(f2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:68
close_('all')
clc
figure
subplot(3,1,1)
hold('on')
plot(cat(arange(- length(f2) / 2,length(f2) / 2 - 1)),f2,'r')
plot(cat(arange(- length(f) / 2,length(f) / 2 - 1)),f,'b')
xlabel('t (vzorci)')
ylabel('amplituda')
axis('tight')
title(cat('impulzni odziv frekvennega filtra z Lomno frekvenco = ',num2str(LomnaFrek),' Hz'))
subplot(3,1,2)
hold('on')
plot(cat(arange(- length(f2) / 2,length(f2) / 2 - 1)),dot(20,log10(abs(f2))),'r')
plot(cat(arange(- length(f) / 2,length(f) / 2 - 1)),dot(20,log10(abs(f))))
xlabel('t (vzorci)')
ylabel('amplituda (dB)')
title('absolutna vrednost frekvenne karakteristike filtra')
axis('tight')
subplot(3,1,3)
hold('on')
plot(abs(F))
plot(abs(F2),'r')
xlabel('f (Hz)')
ylabel('amplituda')
title('absolutna vrednost frekvenne karakteristike filtra')
#set(gca,'YScale','log');
axis('tight')
#####################################
# animacija premika impulz. odziva filtra v desno
LomnaFrek=200
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:95
Fsamp=1024
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:96
F=matlabarray(cat(ones(1,LomnaFrek),zeros(1,Fsamp - dot(2,LomnaFrek) + 1),ones(1,LomnaFrek - 1)))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:97
f=fft.fft.ifftshift(fft.fft.ifft(F))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:98
# asovno os prestravimo za length(F)/2 vzorcev v levo.
close_('all')
clc
figure
for dt in arange(0,200,5).reshape(-1):
dRepL=100
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:104
dRepD=100
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:105
f2=copy(f)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:106
f2[512 - dRepL:1024]=f2[512 - dt - dRepL:1024 - dt]
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:107
f2[1:511 + dt - dRepL]=0
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:108
f2[512 + dt + dRepD:1024]=0
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:109
F2=fft.fft(f2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:110
subplot(3,1,1)
clf()
hold('on')
plot(cat(arange(- length(f) / 2,length(f) / 2 - 1)),f,'b')
plot(cat(arange(- length(f2) / 2,length(f2) / 2 - 1)),f2,'r')
xlabel('t (vzorci)')
ylabel('amplituda')
axis('tight')
title(cat('impulzni odziv frekvennega filtra z Lomno frekvenco = ',num2str(LomnaFrek),' Hz'))
subplot(3,1,2)
clf()
hold('on')
plot(abs(F))
plot(abs(F2),'r')
xlabel('f (Hz)')
ylabel('amplituda')
title('absolutna vrednost FFT')
axis('tight')
subplot(3,1,3)
clf()
hold('on')
plot(unwrap(atan2(imag(F),real(F))),lineWidth=2)
plot(unwrap(atan2(imag(F2),real(F2))),'r')
xlabel('f (Hz)')
ylabel('faza')
title('faza FFT')
axis('tight')
#set(gca,'YLim',cat(0,2500))
autoscale(False)
ylim(0,2500)
pause(0.5)
#####################################
# animacija doline impulz. odziva filtra - Gibbsov effekt
LomnaFrek=200
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:140
Fsamp=1024
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:141
F=matlabarray(cat(ones(1,LomnaFrek),zeros(1,Fsamp - dot(2,LomnaFrek) + 1),ones(1,LomnaFrek - 1)))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:142
f=fft.fft.ifftshift(fft.fft.ifft(F))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:143
# asovno os prestravimo za length(F)/2 vzorcev v levo.
close_('all')
clc
figure
dt=100
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:148
for dRepL in arange(100,10,- 5).reshape(-1):
dRepD=copy(dRepL)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:151
f2=copy(f)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:152
f2[512 - dRepL:1024]=f2[512 - dt - dRepL:1024 - dt]
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:153
f2[1:511 + dt - dRepL]=0
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:154
f2[512 + dt + dRepD:1024]=0
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:155
F2=fft.fft(f2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:156
subplot(3,1,1)
clf()
hold('on')
plot(cat(arange(- length(f) / 2,length(f) / 2 - 1)),f,'b')
plot(cat(arange(- length(f2) / 2,length(f2) / 2 - 1)),f2,'r')
xlabel('t (vzorci)')
ylabel('amplituda')
axis('tight')
title(cat('impulzni odziv frekvennega filtra z Lomno frekvenco = ',num2str(LomnaFrek),' Hz, dolina filtra = ',num2str(dRepD + dRepL),' vzorcev'))
subplot(3,1,2)
clf()
hold('on')
plot(abs(F))
plot(abs(F2),'r')
xlabel('f (Hz)')
ylabel('amplituda')
title('absolutna vrednost FFT')
axis('tight')
subplot(3,1,3)
clf()
hold('on')
plot(unwrap(atan2(imag(F),real(F))),lineWidth=2)
plot(unwrap(atan2(imag(F2),real(F2))),'r')
xlabel('f (Hz)')
ylabel('faza')
title('faza FFT')
axis('tight')
set(gca,'YLim',cat(0,2500))
pause(2)
'-------------------------------------------------------------------'
#####################################
# tekoe povpreje
close_('all')
clc
clear('y')
fsamp=1024
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:189
x=randn(1,dot(1,fsamp))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:190
m=2
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:191
for n in arange(m,length(x)).reshape(-1):
y[n]=sum(x[n - m + 1:n])
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:193
X=fft.fft(x)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:195
Y=fft.fft(y)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:196
close_('all')
clc
figure
subplot(2,1,1)
hold('on')
plot(x)
plot(y,'r',lineWidth=2)
xlabel('t (vzorci)')
ylabel('amplituda')
axis('tight')
title(cat('Tekoe povpreje, m = ',num2str(m)))
subplot(2,1,2)
hold('on')
plot(abs(X))
plot(abs(Y),'r',lineWidth=2)
xlabel('f (Hz)')
ylabel('amplituda')
title(cat('absolutna vrednost FFT'))
axis('tight')
########################################################
# animacija pri razlinih redih fitra m
close_('all')
clc
clear('y')
fsamp=1024
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:219
x=randn(1,dot(1,fsamp))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:220
X=fft.fft(x)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:221
figure
for m in cat(arange(1,4),arange(5,100,5)).reshape(-1):
for n in arange(m,length(x)).reshape(-1):
y[n]=mean(x[n - m + 1:n])
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:226
Y=fft.fft(y)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:228
subplot(2,1,1)
clf()
hold('on')
plot(x)
plot(y,'r',lineWidth=2)
xlabel('t (vzorci)')
ylabel('amplituda')
axis('tight')
title(cat('Tekoe povpreje, m = ',num2str(m)))
subplot(2,1,2)
clf()
hold('on')
plot(abs(X))
plot(abs(Y),'r',lineWidth=2)
xlabel('f (Hz)')
ylabel('amplituda')
title(cat('absolutna vrednost FFT'))
axis('tight')
pause(1)
#############################################
# frekvenna karakteristika tekoega povpreja
close_('all')
clc
figure
for m in cat(arange(1,4),arange(5,20,5)).reshape(-1):
subplot(2,1,1)
clf()
hold('on')
f=matlabarray(cat(ones(1,m),zeros(1,length(x) - m)))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:253
F=fft.fft(f)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:254
plot(abs(F),'m',lineWidth=2)
xlabel('f (Hz)')
ylabel('amplituda')
title(cat('absolutna vrednost FFT, povpreje preko, m = ',num2str(m)))
axis('tight')
subplot(2,1,2)
clf()
hold('on')
plot(unwrap(atan2(imag(F),real(F))),lineWidth=2)
xlabel('f (Hz)')
ylabel('faza')
title('faza FFT')
axis('tight')
pause()
'------------------------------------------------------------------------------------------------------'
# NARTOVANJE FILTROV V MATLABU
# * butter - Butterworth filter no gain ripple in pass band and stop band, slow cutoff
# * cheby1 - Chebyshev filter (Type I) no gain ripple in stop band, moderate cutoff
# * cheby2 - Chebyshev filter (Type II) no gain ripple in pass band, moderate cutoff
# * ellip - Elliptic filter gain ripple in pass and stop band, fast cutoff
# * besself - Bessel filter no group delay ripple, no gain ripple in both bands, slow gain cutoff
'-------------------------------------------------------------------------------------------------------'
clear
clc
filter_order=5
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:281
Rp=0.5
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:282
Rp2=20
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:283
Rs=20
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:284
Wn=100 / 500
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:285
f=matlabarray(cat(0,0.2,0.2,1))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:286
m=matlabarray(cat(1,1,0,0))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:287
subplot(2,2,1)
hold('on')
b,a=cheby1(filter_order,Rp,Wn,'low',nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:290
h,w=freqz(b,a,128,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:291
plot(f,m,lineWidth=2)
plot(w / pi,abs(h),'r',lineWidth=2)
axis('tight')
legend('idealni','nartovan')
xlabel('2*f/f_{vzorevalna}','FontSize',12)
ylabel('amplituda','FontSize',12)
title(cat('Chebyshev filter (Tip 1): red =',num2str(filter_order)),'FontSize',12)
subplot(2,2,2)
hold('on')
b,a=cheby2(filter_order,Rp2,Wn,'low',nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:301
h,w=freqz(b,a,128,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:302
plot(f,m,lineWidth=2)
plot(w / pi,abs(h),'r',lineWidth=2)
axis('tight')
legend('idealni','nartovan')
xlabel('2*f/f_{vzorevalna}','FontSize',12)
ylabel('amplituda','FontSize',12)
title(cat('Chebyshev filter (Tip 2): red =',num2str(filter_order)),'FontSize',12)
subplot(2,2,3)
hold('on')
b,a=butter(filter_order,Wn,'low',nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:312
h,w=freqz(b,a,128,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:313
plot(f,m,lineWidth=2)
plot(w / pi,abs(h),'r',lineWidth=2)
axis('tight')
legend('idealni','nartovan')
xlabel('2*f/f_{vzorevalna}','FontSize',12)
ylabel('amplituda','FontSize',12)
title(cat('Butterworth filter: red =',num2str(filter_order)),'FontSize',12)
subplot(2,2,4)
hold('on')
b,a=ellip(filter_order,Rp,Rs,Wn,'low',nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:323
h,w=freqz(b,a,128,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:324
plot(f,m,lineWidth=2)
plot(w / pi,abs(h),'r',lineWidth=2)
axis('tight')
legend('idealni','nartovan')
xlabel('2*f/f_{vzorevalna}','FontSize',12)
ylabel('amplituda','FontSize',12)
title(cat('Elliptic filter: red =',num2str(filter_order)),'FontSize',12)
#####################################
# POZOR! POZOR! POZOR! POZOR! POZOR!
# pri pasovno-prepustnih in pasovno-zapornih filtrih generirajo funkcije
# [b,a] = cheby1(filter_order,Rp,Wn)
# [b,a] = cheby2(filter_order,Rp,Wn)
# [b,a] = butter(filter_order,Wn)
# filtre reda 2*filter_order !!!
# glej help cheby1, help cheby2, help butter
#####################################
#####################################
# CHEBYSHEV FILTER TIPA 1 - no gain ripple in stop band, moderate cutoff
filter_order=3
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:347
Rp=0.5
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:348
Wn=cat(100,200) / 500
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:349
b,a=cheby1(filter_order,Rp,Wn,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:350
fvtool(b,a)
figure
h,w=freqz(b,a,128,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:354
plot(w / pi,abs(h),lineWidth=2)
axis('tight')
legend('konstruiran filter')
xlabel('2*f/f_{vzorevalna}','FontSize',14)
ylabel('amplituda','FontSize',14)
title(cat('absolutna vrednost frekvennega odziva, red filtra = ',num2str(filter_order)))
originalni_signal=randn(1000,1)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:362
filtriran_signal=filter(b,a,originalni_signal)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:363
figure
subplot(2,1,1)
plot(originalni_signal)
axis('tight')
xlabel('vzorci')
ylabel('amplituda')
title('originalni signal')
subplot(2,1,2)
plot(ravel(filtriran_signal),'r')
axis('tight')
xlabel('vzorci')
ylabel('amplituda')
title('filtriran signal')
# CHEBYSHEV FILTER TIPA 2 no gain ripple in pass band, moderate cutoff
filter_order=10
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:371
Rp2=100
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:372
Wn=cat(100,200) / 500
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:373
b,a=cheby2(filter_order,Rp2,Wn,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:374
fvtool(b,a)
figure
h,w=freqz(b,a,128,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:378
plot(w / pi,abs(h),lineWidth=2)
axis('tight')
legend('konstruiran filter')
xlabel('2*f/f_{vzorevalna}','FontSize',14)
ylabel('amplituda','FontSize',14)
title(cat('absolutna vrednost frekvennega odziva, red filtra = ',num2str(filter_order)))
originalni_signal=randn(1000,1)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:386
filtriran_signal=filter(b,a,originalni_signal)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:387
figure
subplot(2,1,1)
plot(originalni_signal)
axis('tight')
xlabel('vzorci')
ylabel('amplituda')
title('originalni signal')
subplot(2,1,2)
plot(filtriran_signal,'r')
axis('tight')
xlabel('vzorci')
ylabel('amplituda')
title('filtriran signal')
#####################################
# BUTTERWORTH FILTER - no gain ripple in pass band and stop band, slow cutoff
filter_order=5
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:396
Wn=cat(100,200) / 500
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:397
b,a=butter(filter_order,Wn,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:398
fvtool(b,a)
figure
h,w=freqz(b,a,128,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:402
plot(w / pi,abs(h),lineWidth=2)
axis('tight')
legend('konstruiran filter')
xlabel('2*f/f_{vzorevalna}','FontSize',14)
ylabel('amplituda','FontSize',14)
title(cat('absolutna vrednost frekvennega odziva, red filtra = ',num2str(filter_order)))
originalni_signal=randn(10000,1)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:410
filtriran_signal=filter(b,a,originalni_signal)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:411
figure
subplot(2,1,1)
plot(originalni_signal)
axis('tight')
xlabel('vzorci')
ylabel('amplituda')
title('originalni signal')
subplot(2,1,2)
plot(filtriran_signal,'r')
axis('tight')
xlabel('vzorci')
ylabel('amplituda')
title('filtriran signal')
'-------------------------------------------------------------------'
#####################################
# FIRLS FILTER - nartovanje FIR filtra s pomojo metode najmanjih kvadratinih pogrekov (least-squares error minimization).
close_('all')
clc
filter_order=25
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:422
f=matlabarray(cat(0,0.3,0.4,0.6,0.7,1))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:423
m=matlabarray(cat(0,1,0,0,0.5,0.5))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:424
b=firls(filter_order,f,m)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:425
a=1
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:426
fvtool(b,a)
figure
h,w=freqz(b,a,128,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:431
plot(f,m,w / pi,abs(h),lineWidth=2)
axis('tight')
legend('Idealni','nartovan z metodo firls')
xlabel('2*f/f_{vzorevalna}','FontSize',14)
ylabel('amplituda','FontSize',14)
title(cat('Primerjava absolutnih vrednosti frekvennega odziva, red filtra = ',num2str(filter_order)))
y,t=impz(b,a,101,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:439
figure
stem(t,y)
axis('tight')
title('Impulzni odziv filtra')
xlabel('as (vzorci)')
ylabel('amplituda')
fvtool(b,a)
originalni_signal=randn(1000,1)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:447
filtriran_signal=filter(b,a,originalni_signal)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:448
figure
subplot(2,1,1)
plot(originalni_signal)
axis('tight')
xlabel('vzorci')
ylabel('amplituda')
title('originalni signal')
subplot(2,1,2)
plot(filtriran_signal,'r')
axis('tight')
xlabel('vzorci')
ylabel('amplituda')
title('filtriran signal')
#####################################
# # Yule-Walker FILTER - rekurzivna metoda najmanjih kvadratinih pogrekov (recursive least-squares fit to a specified frequency response)
# close all; clc;
# filter_order = 10;
# m = [0 0 1 1 0 0 1 1 0 0];
# f = [0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 1];
# [b,a] = yulewalk(filter_order,f,m);
#
# figure;
# [h,w] = freqz(b,a,128); # izris frekvennih karakteristik filtra
# plot(f,m,w/pi,abs(h),'LineWidth',2);
# axis tight;
# legend('Idealni','nartovan z metodo yulewalk');
# xlabel('2*f/f_{vzorevalna}','FontSize',14);
# ylabel('amplituda','FontSize',14);
# title(['Primerjava absolutnih vrednosti frekvennega odziva, red filtra = ' num2str(filter_order)]);
#
# fvtool(b,a); # orodje za preuevanje karakteristik filtra
#
# originalni_signal = randn(1000,1);
# filtriran_signal = filter(b,a,originalni_signal); # filtriramo signal "originalni_signal"
# figure; subplot(2,1,1);
# plot(originalni_signal); axis tight; xlabel('vzorci'); ylabel('amplituda'); title('originalni signal');
# subplot(2,1,2);
# plot(filtriran_signal,'r'); axis tight; xlabel('vzorci');
# ylabel('amplituda'); title('filtriran signal');
'-------------------------------------------------------------------'
#####################################
# P R A K T I N I P R I M E R I
##################################################################################################
'-------------------------------------------------------------------'
########################################################
# elektrina kitara (https://ccrma.stanford.edu/~jos/waveguide/Sound_Examples.html)
sig,Fs,nbits=wavread('ElectricGuitar.wav',nargout=3)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:491
#[sig, Fs, nbits] = wavread('gtr-dist-yes.wav');
sig=sig[:,1]
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:493
disp(cat('Vzorevalna frekvenca: ',num2str(Fs),' Hz'))
disp(cat('Loljivost: ',num2str(nbits),' bitov'))
wavplay(sig,Fs,'sync')
close_('all')
clc
tT=1024
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:499
tN=1024
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:499
dT=tT / 2
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:499
TFD,F,T=specgram(sig,tN,Fs,window(hamming,tT),dT,nargout=3)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:500
imagesc(T,F[1:128] / 1000,abs(TFD[1:128,:]))
axis('xy')
xlabel('as (s)')
ylabel('f (kHz)')
axis('tight')
title(cat('originalni signal (asovno-frekvenna ravnina)'))
# nartovanje filtra
filter_order=10
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:507
m=matlabarray(cat(1,1,0,0,0,0,0,0,0,0))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:508
f=matlabarray(cat(0,0.01,0.02,0.3,0.4,0.5,0.6,0.7,0.8,1))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:509
# m = [0 0 1 1 1 1 1 1 1 1]; # visoko-prepustno sito
# f = [0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 1];
# Yule-Walker FILTER
b,a=yulewalk(filter_order,f,m,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:514
# FIRLS FILTER
# b = firls(filter_order,f,m); # izraunamo koeficiente b
# a = 1; # firls ne vraa koeficientov a, torej nastavimo a(1) na 1
# izris frekvenne karakteristike filtra
figure
h,w=freqz(b,a,128,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:521
plot(dot(f,Fs) / 2000,m,dot(w / pi,Fs) / 2000,abs(h),lineWidth=2)
axis('tight')
legend('Idealni','Implementirani')
xlabel('f (kHz)','FontSize',14)
ylabel('amplituda','FontSize',14)
title(cat('Primerjava absolutnih vrednosti frekvennega odziva, red filtra = ',num2str(filter_order)))
fvtool(b,a)
# filtriramo signal
filt_sig=filter(b,a,sig)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:532
# nariemo TFD transformiranko
tT=1024
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:535
tN=1024
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:535
dT=tT / 2
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:535
figure
subplot(1,2,1)
TFD,F,T=specgram(sig,tN,Fs,window(hamming,tT),dT,nargout=3)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:537
imagesc(T,F[1:128] / 1000,abs(TFD[1:128,:]))
axis('xy')
xlabel('as (s)')
ylabel('f (kHz)')
axis('tight')
title(cat('Originalni signal'))
subplot(1,2,2)
TFD,F,T=specgram(filt_sig,tN,Fs,window(hamming,tT),dT,nargout=3)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:543
imagesc(T,F[1:128] / 1000,abs(TFD[1:128,:]))
axis('xy')
xlabel('as (s)')
ylabel('f (kHz)')
axis('tight')
title(cat('Filtrirani signal'))
# predvajajmo originalni in filtrirani signal
wavplay(dot(4,filt_sig),Fs,'sync')
wavplay(dot(1,sig),Fs,'sync')
# nariemo originalni in filtrirani signal
figure
hold('on')
plot(sig)
plot(filt_sig,'r')
title(cat('Opazujte zamik med originalnim in filtriranim signalom pri filtru reda ',num2str(filter_order)))
axis('tight')
# namesto funkcije filter uprabimo funkcijo filtfilt
filtfilt_sig=filtfilt(b,a,sig)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:561
figure
hold('on')
plot(sig)
plot(filtfilt_sig,'g',lineWidth=2)
title(cat('filtfilt: je e vedno zamik med originalnim in filtriranim signalom? Red filtra = ',num2str(filter_order)))
axis('tight')
wavplay(dot(1,filtfilt_sig),Fs,'sync')
wavplay(dot(1,filt_sig),Fs,'sync')
'-----------------------------------------------------------------------------------------------'
########################################################
# Linearnost faze
clear
close_('all')
clc
# FIRLS FILTER
filter_order=30
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:578
m=matlabarray(cat(0,0,1,1,0,0))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:579
f=matlabarray(cat(0,0.2,0.2,0.4,0.4,1))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:580
b1,a1=firls(filter_order,f,m,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:581
fvtool(b1,a1)
# izris frekvenne karakteristike filtra
figure
h,w=freqz(b1,a1,128,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:586
plot(f,m,w / pi,abs(h),lineWidth=2)
axis('tight')
legend('Idealni','Implementirani')
xlabel('f (kHz)','FontSize',14)
ylabel('amplituda','FontSize',14)
title(cat('Primerjava absolutnih vrednosti frekvennega odziva, red filtra = ',num2str(filter_order)))
# end of FIRLS
# CHEBYSHEV FILTER TIPA 1 no gain ripple in stop band, moderate cutoff
filter_order=10
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:596
Rp1=0.5
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:597
Wn=cat(100,200) / 500
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:598
b2,a2=cheby1(filter_order,Rp1,Wn,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:599
fvtool(b2,a2)
# izris frekvenne karakteristike filtra
figure
h,w=freqz(b2,a2,128,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:604
plot(f,m,w / pi,abs(h),lineWidth=2)
axis('tight')
legend('Idealni','Implementirani')
xlabel('f (kHz)','FontSize',14)
ylabel('amplituda','FontSize',14)
title(cat('Primerjava absolutnih vrednosti frekvennega odziva, red filtra = ',num2str(filter_order)))
# end of CHEBYSHEV FILTER TIPA 1
# BUTTERWORTH FILTER - no gain ripple in pass band and stop band, slow cutoff
filter_order=10
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:614
Wn=cat(100,200) / 500
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:615
b3,a3=butter(filter_order,Wn,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:616
fvtool(b3,a3)
# izris frekvenne karakteristike filtra
figure
h,w=freqz(b3,a3,128,nargout=2)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:621
plot(f,m,w / pi,abs(h),lineWidth=2)
axis('tight')
legend('Idealni','Implementirani')
xlabel('f (kHz)','FontSize',14)
ylabel('amplituda','FontSize',14)
title(cat('Primerjava absolutnih vrednosti frekvennega odziva, red filtra = ',num2str(filter_order)))
# end of BUTTERWORTH FILTER
# mexican hat
t=arange(- 10,10,0.5)
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:631
mh=multiply(exp((- t ** 2) / 2),(1 - t ** 2))
# /media/martin/2ACC5A8FCC5A54E3/ukazi5.m:632
sig=zeros(1,1024)
# | |
<reponame>gony0/buffalo
# -*- coding: utf-8 -*-
from buffalo.misc.aux import InputOptions, Option
class AlgoOption(InputOptions):
def __init__(self, *args, **kwargs):
super(AlgoOption, self).__init__(*args, **kwargs)
def get_default_option(self):
"""Default options for Algo classes.
:ivar bool evaluation_on_learning: Set True to do run evaluation on training phrase. (default: True)
:ivar bool compute_loss_on_training: Set True to calculate loss on training phrase. (default: True)
:ivar int early_stopping_rounds: The number of exceed epochs after reached minimum loss on training phrase. If set 0, it doesn't work. (default: 0)
:ivar bool save_best: Whenver the loss improved, save the model.
:ivar int evaluation_period: How often will do evaluation in epochs. (default: 1)
:ivar int save_period: How often will do save_best routine in epochs. (default: 10)
:ivar int random_seed: Random Seed
:ivar dict validation: The validation options.
"""
opt = {
'evaluation_on_learning': True,
'compute_loss_on_training': True,
'early_stopping_rounds': 0,
'save_best': False,
'evaluation_period': 1,
'save_period': 10,
'random_seed': 0,
'validation': {}
}
return opt
def get_default_optimize_option(self):
"""Default options for optimize feature.
:ivar str loss: Name of loss to monitor. (default: train_loss)
"""
opt = {
'loss': 'train_loss',
}
return opt
def get_default_tensorboard_option(self):
"""Default options for tensorboard feature.
:ivar str name: Name of graph name. (default: default)
:ivar str root: Path where to make graph directory. (default: ./tb)
:ivar str name_template: Name template for directory name. (default: {name}.{dtm})
"""
opt = {
'name': 'default',
'root': './tb',
'name_template': '{name}.{dtm}'
}
return opt
def is_valid_option(self, opt):
b = super().is_valid_option(opt)
for f in ['num_workers']:
if f not in opt:
raise RuntimeError(f'{f} not defined')
return b
class ALSOption(AlgoOption):
def __init__(self, *args, **kwargs):
super(ALSOption, self).__init__(*args, **kwargs)
def get_default_option(self):
"""Options for Alternating Least Squares.
:ivar bool adaptive_reg: Set True, for adaptive regularization. (default: False)
:ivar bool save_factors: Set True, to save models. (default: False)
:ivar bool accelerator: Set True, to accelerate training using GPU. (default: False)
:ivar int d: The number of latent feature dimension. (default: 20)
:ivar int num_iters: The number of iterations for training. (default: 10)
:ivar int num_workers: The number of threads. (default: 1)
:ivar int hyper_threads: The number of hyper threads when using cuda cores. (default: 256)
:ivar float reg_u: The L2 regularization coefficient for user embedding matrix. (default: 0.1)
:ivar float reg_i: The L2 regularization coefficient for item embedding matrix. (default: 0.1)
:ivar float alpha: The coefficient of giving more weights to losses on positive samples. (default: 8)
:ivar float eps: epsilon for numerical stability (default: 1e-10)
:ivar float cg_tolerance: tolerance of conjugate gradient for early stopping iterations (default: 1e-10)
:ivar str optimizer: The name of optimizer, should be in [llt, ldlt, manual_cg, eigen_cg, eigen_bicg, eigen_gmres, eigen_dgmres, eigen_minres]. (default: manual_cg)
:ivar int num_cg_max_iters: The number of maximum iterations for conjuaget gradient optimizer. (default: 3)
:ivar str model_path: Where to save model.
:ivar dict data_opt: This options will be used to load data if given.
"""
opt = super().get_default_option()
opt.update({
'adaptive_reg': False,
'save_factors': False,
'accelerator': False,
'd': 20,
'num_iters': 10,
'num_workers': 1,
'hyper_threads': 256,
'num_cg_max_iters': 3,
'reg_u': 0.1,
'reg_i': 0.1,
'alpha': 8,
'optimizer': 'manual_cg',
'cg_tolerance': 1e-10,
'eps': 1e-10,
'model_path': '',
'data_opt': {}
})
return Option(opt)
def get_default_optimize_option(self):
"""Optimization Options for ALS.
:ivar str loss: Target loss to optimize.
:ivar int max_trials: The maximum experiments for optimization. If not given, run forever.
:ivar int min_trials: The minimum experiments before deploying model. (Since the best parameter may not be found after `min_trials`, the first best parameter is always deployed)
:ivar bool deployment: Set True to train model with the best parameter. During the optimization, it try to dump the model which beated the previous best loss.
:ivar bool start_with_default_parameters: If set to True, the loss value of the default parameter is used as the starting loss to beat.
:ivar dict space: The parameter space definition. For more information, pleases reference hyperopt's express. Note) Due to hyperopt's `randint` does not provide lower value, we had to implement it a bait tricky. Pleases see optimize.py to check how we deal with `randint`.k
"""
opt = super().get_default_optimize_option()
opt.update({
'loss': 'train_loss',
'max_trials': 100,
'min_trials': 0,
'deployment': True,
'start_with_default_parameters': True,
'space': {
'adaptive_reg': ['choice', ['adaptive_reg', [0, 1]]],
'd': ['randint', ['d', 10, 30]],
'reg_u': ['uniform', ['reg_u', 0.1, 1]],
'reg_i': ['uniform', ['reg_i', 0.1, 1]],
'alpha': ['randint', ['alpha', 1, 32]]
}
})
return Option(opt)
class CFROption(AlgoOption):
def __init__(self, *args, **kwargs):
super(CFROption, self).__init__(*args, **kwargs)
def get_default_option(self):
""" Basic Options for CoFactor.
:ivar int d: The number of latent feature dimension. (default: 20)
:ivar int num_iters: The number of iterations for training. (default: 10)
:ivar int num_workers: The number of threads. (default: 1)
:ivar float reg_u: The L2 regularization coefficient for user embedding matrix. (default: 0.1)
:ivar float reg_i: The L2 regularization coefficient for item embedding matrix. (default: 0.1)
:ivar float reg_c: The L2 regularization coefficient for context embedding matrix. (default: 0.1)
:ivar float eps: epsilon for numerical stability (default: 1e-10)
:ivar float cg_tolerance: The tolerance for early stopping conjugate gradient optimizer. (default: 1e-10)
:ivar float alpha: The coefficient of giving more weights to losses on positive samples. (default: 8.0)
:ivar float l: The relative weight of loss on user-item relation over item-context relation. (default: 1.0)
:ivar str optimizer: The name of optimizer, should be in [llt, ldlt, manual_cg, eigen_cg, eigen_bicg, eigen_gmres, eigen_dgmres, eigen_minres]. (default: manual_cg)
:ivar int num_cg_max_iters: The number of maximum iterations for conjuaget gradient optimizer. (default: 3)
:ivar str model_path: Where to save model. (default: '')
:ivar dict data_opt: This options will be used to load data if given. (default: {})
"""
opt = super().get_default_option()
opt.update({
'save_factors': False,
'd': 20,
'num_iters': 10,
'num_workers': 1,
'num_cg_max_iters': 3,
'cg_tolerance': 1e-10,
'eps': 1e-10,
'reg_u': 0.1,
'reg_i': 0.1,
'reg_c': 0.1,
'alpha': 8.0,
'l': 1.0,
'optimizer': 'manual_cg',
'model_path': '',
'data_opt': {}
})
return Option(opt)
def get_default_optimize_option(self):
"""Optimization options for CoFactor.
:ivar str loss: Target loss to optimize.
:ivar int max_trials: Maximum experiments for optimization. If not given, run forever.
:ivar int min_trials: Minimum experiments before deploying model. (Since the best parameter may not be found after `min_trials`, the first best parameter is always deployed)
:ivar bool deployment(: Set True to train model with the best parameter. During the optimization, it try to dump the model which beated the previous best loss.
:ivar bool start_with_default_parameters: If set to True, the loss value of the default parameter is used as the starting loss to beat.
:ivar dict space: Parameter space definition. For more information, pleases reference hyperopt's express. Note) Due to hyperopt's `randint` does not provide lower value, we had to implement it a bait tricky. Pleases see optimize.py to check how we deal with `randint`.k
"""
opt = super().get_default_optimize_option()
opt.update({
'loss': 'train_loss',
'max_trials': 100,
'min_trials': 0,
'deployment': True,
'start_with_default_parameters': True,
'space': {
'd': ['randint', ['d', 10, 30]],
'reg_u': ['uniform', ['reg_u', 0.1, 1]],
'reg_i': ['uniform', ['reg_i', 0.1, 1]],
'reg_c': ['uniform', ['reg_i', 0.1, 1]],
'alpha': ['randint', ['alpha', 1, 32]],
'l': ['randint', ['alpha', 1, 32]]
}
})
return Option(opt)
def is_valid_option(self, opt):
b = super().is_valid_option(opt)
possible_optimizers = ["llt", "ldlt", "manual_cg", "eigen_cg", "eigen_bicg",
"eigen_gmres", "eigen_dgmres", "eigen_minres"]
if opt.optimizer not in possible_optimizers:
msg = f"optimizer ({opt.optimizer}) should be in {possible_optimizers}"
raise RuntimeError(msg)
return b
class BPRMFOption(AlgoOption):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_default_option(self):
"""Options for Bayesian Personalized Ranking Matrix Factorization.
:ivar bool accelerator: Set True, to accelerate training using GPU. (default: False)
:ivar bool use_bias: Set True, to use bias term for the model.
:ivar int evaluation_period: (default: 100)
:ivar int num_workers: The number of threads. (default: 1)
:ivar int hyper_threads: The number of hyper threads when using cuda cores. (default: 256)
:ivar int num_iters: The number of iterations for training. (default: 100)
:ivar int d: The number of latent feature dimension. (default: 20)
:ivar bool update_i: Set True, to update positive item feature. (default: True)
:ivar bool update_j: Set True, to update negative item feature. (default: True)
:ivar float reg_u: The L2 regularization coefficient for user embedding matrix. (default: 0.025)
:ivar float reg_i: The L2 regularization coefficient for positive item embedding matrix. (default: 0.025)
:ivar | |
<gh_stars>0
# This comoponent contains an object-oriented adaptation of the RC model referred to as the 'Simple Hourly Method' in ISO 13790, (superceded by EN ISO 52016-1).
#
# Hive: An educational plugin developed by the A/S chair at ETH Zurich
# This component is based on building_physics.py in the RC_BuildingSimulator github repository
# https://github.com/architecture-building-systems/RC_BuildingSimulator
# Extensive documentation is available on the project wiki.
#
# Authors: <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
# Credits: <NAME>, <NAME>, <NAME>
# Adapted for Grasshopper by <NAME>
#
# This file is part of Hive
#
# Licensing/Copywrite and liability comments go here.
# Copyright 2018, Architecture and Building Systems - ETH Zurich
# Licence: MIT
"""
Place this component in the grasshopper workspace so that zones can be defined and simulations run.
-
Provided by Hive 0.0.1
"""
ghenv.Component.Name = "Hive_BuildingPhysics"
ghenv.Component.NickName = 'BuildingPhysics'
ghenv.Component.Message = 'VER 0.0.1\nFEB_26_2018'
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "Hive"
ghenv.Component.SubCategory = "0 | Core"
# ComponentExposure=1
import scriptcontext as sc
import Grasshopper.Kernel as ghKernel
class Building(object):
'''Sets the parameters of the building. '''
def __init__(self,
window_area=13.5,
external_envelope_area=15.19,
room_depth=7,
room_width=4.9,
room_height=3.1,
lighting_load=11.7,
lighting_control=300,
lighting_utilisation_factor=0.45,
lighting_maintenance_factor=0.9,
u_walls=0.2,
u_windows=1.1,
g_windows=0.6,
ach_vent=1.5,
ach_infl=0.5,
ventilation_efficiency=0,
thermal_capacitance_per_floor_area=165000,
t_set_heating=20,
t_set_cooling=26,
max_cooling_energy_per_floor_area=-12,
max_heating_energy_per_floor_area=12,
heating_supply_system=sc.sticky["DirectHeater"],
cooling_supply_system=sc.sticky["DirectCooler"],
heating_emission_system=sc.sticky["AirConditioning"],
cooling_emission_system=sc.sticky["AirConditioning"],
):
# Building Dimensions
self.window_area = window_area # [m2] Window Area
self.room_depth = room_depth # [m] Room Depth
self.room_width = room_width # [m] Room Width
self.room_height = room_height # [m] Room Height
# Fenestration and Lighting Properties
self.g_windows = g_windows
self.lighting_load = lighting_load # [kW/m2] lighting load
self.lighting_control = lighting_control # [lux] Lighting set point
# How the light entering the window is transmitted to the working plane
self.lighting_utilisation_factor = lighting_utilisation_factor
# How dirty the window is. Section 2.2.3.1 Environmental Science
# Handbook
self.lighting_maintenance_factor = lighting_maintenance_factor
# Calculated Properties
self.floor_area = room_depth * room_width # [m2] Floor Area
# [m2] Effective Mass Area assuming a medium weight building #192.168.3.11
self.mass_area = self.floor_area * 2.5
self.room_vol = room_width * room_depth * \
room_height # [m3] Room Volume
self.total_internal_area = self.floor_area * 2 + \
room_width * room_height * 2 + room_depth * room_height * 2
# TODO: Standard doesn't explain what A_t is. Needs to be checked
self.A_t = self.total_internal_area
# Single Capacitance 5 conductance Model Parameters
# [kWh/K] Room Capacitance. Default based on ISO standard 192.168.3.11 for medium heavy buildings
self.c_m = thermal_capacitance_per_floor_area * self.floor_area
# Conductance of opaque surfaces to exterior [W/K]
self.h_tr_em = u_walls * (external_envelope_area - window_area)
# Conductance to exterior through glazed surfaces [W/K], based on
# U-wert of 1W/m2K
self.h_tr_w = u_windows * window_area
# Determine the ventilation conductance
ach_tot = ach_infl + ach_vent # Total Air Changes Per Hour
# temperature adjustment factor taking ventilation and infiltration
# [ISO: E -27]
b_ek = (1 - (ach_vent / (ach_tot)) * ventilation_efficiency)
self.h_ve_adj = 1200 * b_ek * self.room_vol * \
(ach_tot / 3600) # Conductance through ventilation [W/M]
# transmittance from the internal air to the thermal mass of the
# building
self.h_tr_ms = 9.1 * self.mass_area
# Conductance from the conditioned air to interior building surface
self.h_tr_is = self.total_internal_area * 3.45
# Thermal set points
self.t_set_heating = t_set_heating
self.t_set_cooling = t_set_cooling
# Thermal Properties
self.has_heating_demand = False # Boolean for if heating is required
self.has_cooling_demand = False # Boolean for if cooling is required
self.max_cooling_energy = max_cooling_energy_per_floor_area * \
self.floor_area # max cooling load (W/m2)
self.max_heating_energy = max_heating_energy_per_floor_area * \
self.floor_area # max heating load (W/m2)
# Building System Properties
self.heating_supply_system = heating_supply_system
self.cooling_supply_system = cooling_supply_system
self.heating_emission_system = heating_emission_system
self.cooling_emission_system = cooling_emission_system
def solve_building_lighting(self, illuminance, occupancy):
"""
Calculates the lighting demand for a set timestep
:param illuminance: Illuminance transmitted through the window [Lumens]
:type illuminance: float
:param occupancy: Probability of full occupancy
:type occupancy: float
:return: self.lighting_demand, Lighting Energy Required for the timestep
:rtype: float
"""
# Cite: Environmental Science Handbook, SV Szokolay, Section 2.2.1.3
# also, this might be sped up by pre-calculating the constants, but idk. first check with profiler...
lux = (illuminance * self.lighting_utilisation_factor *
self.lighting_maintenance_factor) / self.floor_area # [Lux]
if lux < self.lighting_control and occupancy > 0:
# Lighting demand for the hour
self.lighting_demand = self.lighting_load * self.floor_area
else:
self.lighting_demand = 0
def solve_building_energy(self, internal_gains, solar_gains, t_out, t_m_prev):
"""
Calculates the heating and cooling consumption of a building for a set timestep
:param internal_gains: internal heat gains from people and appliances [W]
:type internal_gains: float
:param solar_gains: solar heat gains [W]
:type solar_gains: float
:param t_out: Outdoor air temperature [C]
:type t_out: float
:param t_m_prev: Previous air temperature [C]
:type t_m_prev: float
:return: self.heating_demand, space heating demand of the building
:return: self.heating_sys_electricity, heating electricity consumption
:return: self.heating_sys_fossils, heating fossil fuel consumption
:return: self.cooling_demand, space cooling demand of the building
:return: self.cooling_sys_electricity, electricity consumption from cooling
:return: self.cooling_sys_fossils, fossil fuel consumption from cooling
:return: self.electricity_out, electricity produced from combined heat pump systems
:return: self.sys_total_energy, total exergy consumed (electricity + fossils) for heating and cooling
:return: self.heating_energy, total exergy consumed (electricity + fossils) for heating
:return: self.cooling_energy, total exergy consumed (electricity + fossils) for cooling
:return: self.cop, Coefficient of Performance of the heating or cooling system
:rtype: float
"""
# Main File
# Calculate the heat transfer definitions for formula simplification
self.calc_h_tr_1()
self.calc_h_tr_2()
self.calc_h_tr_3()
# check demand, and change state of self.has_heating_demand, and self._has_cooling_demand
self.has_demand(internal_gains, solar_gains, t_out, t_m_prev)
if not self.has_heating_demand and not self.has_cooling_demand:
# no heating or cooling demand
# calculate temperatures of building R-C-model and exit
# --> rc_model_function_1(...)
self.energy_demand = 0
# y u no pep8 bra?
self.heating_demand = 0 # Energy required by the zone
self.cooling_demand = 0 # Energy surplus of the zone
# Energy (in electricity) required by the supply system to provide
# HeatingDemand
self.heating_sys_electricity = 0
# Energy (in fossil fuel) required by the supply system to provide
# HeatingDemand
self.heating_sys_fossils = 0
# Energy (in electricity) required by the supply system to get rid
# of CoolingDemand
self.cooling_sys_electricity = 0
# Energy (in fossil fuel) required by the supply system to get rid
# of CoolingDemand
self.cooling_sys_fossils = 0
# Electricity produced by the supply system (e.g. CHP)
self.electricity_out = 0
else:
# has heating/cooling demand
# Calculates energy_demand used below
self.calc_energy_demand(
internal_gains, solar_gains, t_out, t_m_prev)
self.calc_temperatures_crank_nicolson(
self.energy_demand, internal_gains, solar_gains, t_out, t_m_prev)
# calculates the actual t_m resulting from the actual heating
# demand (energy_demand)
# Calculate the Heating/Cooling Input Energy Required
supply_director = sc.sticky["SupplyDirector"]() # Initialise Heating System Manager
if self.has_heating_demand:
supply_director.set_builder(self.heating_supply_system(load=self.energy_demand,
t_out=t_out,
heating_supply_temperature=self.heating_supply_temperature,
cooling_supply_temperature=self.cooling_supply_temperature,
has_heating_demand=self.has_heating_demand,
has_cooling_demand=self.has_cooling_demand))
supplyOut = supply_director.calc_system()
# All Variables explained underneath line 467
self.heating_demand = self.energy_demand
self.heating_sys_electricity = supplyOut.electricity_in
self.heating_sys_fossils = supplyOut.fossils_in
self.cooling_demand = 0
self.cooling_sys_electricity = 0
self.cooling_sys_fossils = 0
self.electricity_out = supplyOut.electricity_out
elif self.has_cooling_demand:
supply_director.set_builder(self.cooling_supply_system(load=self.energy_demand * (-1),
t_out=t_out,
heating_supply_temperature=self.heating_supply_temperature,
cooling_supply_temperature=self.cooling_supply_temperature,
has_heating_demand=self.has_heating_demand,
has_cooling_demand=self.has_cooling_demand))
supplyOut = supply_director.calc_system()
self.heating_demand = 0
self.heating_sys_electricity = 0
self.heating_sys_fossils = 0
self.cooling_demand = self.energy_demand
self.cooling_sys_electricity = supplyOut.electricity_in
self.cooling_sys_fossils = supplyOut.fossils_in
self.electricity_out = supplyOut.electricity_out
self.cop = supplyOut.cop
self.sys_total_energy = self.heating_sys_electricity + self.heating_sys_fossils + \
self.cooling_sys_electricity + self.cooling_sys_fossils
self.heating_energy = self.heating_sys_electricity + self.heating_sys_fossils
self.cooling_energy = self.cooling_sys_electricity + self.cooling_sys_fossils
# TODO: rename. this is expected to return a boolean. instead, it changes state??? you don't want to change state...
# why not just return has_heating_demand and has_cooling_demand?? then call the function "check_demand"
# has_heating_demand, has_cooling_demand = self.check_demand(...)
def has_demand(self, internal_gains, solar_gains, t_out, t_m_prev):
"""
Determines whether the building requires heating or cooling
Used in: solve_building_energy()
# step 1 in section C.4.2 in [C.3 ISO 13790]
"""
# set energy demand to 0 and see if temperatures are within the comfort
# range
energy_demand = 0
# Solve for the internal temperature t_Air
self.calc_temperatures_crank_nicolson(
energy_demand, internal_gains, solar_gains, t_out, t_m_prev)
# If the air temperature is less or greater than the set temperature,
# there is a heating/cooling load
if self.t_air < self.t_set_heating:
self.has_heating_demand = True
self.has_cooling_demand = False
elif self.t_air > self.t_set_cooling:
self.has_cooling_demand = True
self.has_heating_demand = False
else:
self.has_heating_demand = False
self.has_cooling_demand = False
def calc_temperatures_crank_nicolson(self, energy_demand, internal_gains, solar_gains, t_out, t_m_prev):
"""
Determines node temperatures and computes | |
@ x_
assert np.allclose(rom.f_(x_), y_)
assert np.allclose(rom.f_(x_, -1), y_)
kron2c, kron3c = opinf.utils.kron2c, opinf.utils.kron3c
rom = opinf._core._base._DiscreteROM("HGB")
rom.r, rom.m = r, m
rom.H_, rom.G_, rom.B_ = H_, G_, B_
u = np.random.random(m)
x_ = np.random.random(r)
y_ = H_ @ kron2c(x_) + G_ @ kron3c(x_) + B_ @ u
assert np.allclose(rom.f_(x_, u), y_)
def test_predict(self):
"""Test _core._base._DiscreteROM.predict()."""
rom = opinf._core._base._DiscreteROM('')
# Get test data.
n, k, m, r = 60, 50, 20, 10
X = _get_data(n, k, m)[0]
Vr = la.svd(X)[0][:,:r]
niters = 5
x0 = X[:,0]
U = np.ones((m, niters-1))
# Try to predict with invalid initial condition.
x0_ = Vr.T @ x0
rom = _trainedmodel(False, "cAHB", Vr, m)
with pytest.raises(ValueError) as ex:
rom.predict(x0_[:-1], niters, U)
assert ex.value.args[0] == "x0 not aligned with Vr, dimension 0"
# Try to predict with bad niters argument.
with pytest.raises(ValueError) as ex:
rom.predict(x0, -18, U)
assert ex.value.args[0] == \
"argument 'niters' must be a nonnegative integer"
# Try to predict with badly-shaped discrete inputs.
rom = _trainedmodel(False, "cAHB", Vr, m)
with pytest.raises(ValueError) as ex:
rom.predict(x0, niters, np.random.random((m-1, niters-1)))
assert ex.value.args[0] == \
f"invalid input shape ({(m-1,niters-1)} != {(m,niters-1)}"
rom = _trainedmodel(False, "cAHB", Vr, m=1)
with pytest.raises(ValueError) as ex:
rom.predict(x0, niters, np.random.random((2, niters-1)))
assert ex.value.args[0] == \
f"invalid input shape ({(2,niters-1)} != {(1,niters-1)}"
# Try to predict with continuous inputs.
rom = _trainedmodel(False, "cAHB", Vr, m)
with pytest.raises(TypeError) as ex:
rom.predict(x0, niters, lambda t: np.ones(m-1))
assert ex.value.args[0] == "input U must be an array, not a callable"
for form in MODEL_FORMS:
if "B" not in form: # No control inputs.
rom = _trainedmodel(False, form, Vr, None)
out = rom.predict(x0, niters)
assert isinstance(out, np.ndarray)
assert out.shape == (n,niters)
else: # Has Control inputs.
# Predict with 2D inputs.
rom = _trainedmodel(False, form, Vr, m)
out = rom.predict(x0, niters, U)
assert isinstance(out, np.ndarray)
assert out.shape == (n,niters)
# Predict with 1D inputs.
rom = _trainedmodel(False, form, Vr, 1)
out = rom.predict(x0, niters, np.ones(niters))
assert isinstance(out, np.ndarray)
assert out.shape == (n,niters)
# Predict with no basis gives result in low-dimensional space.
rom = _trainedmodel(False, "cA", Vr, None)
rom.Vr = None
out = rom.predict(Vr.T @ x0, niters)
assert isinstance(out, np.ndarray)
assert out.shape == (r,niters)
class TestContinuousROM:
"""Test _core._base._ContinuousROM."""
def test_f_(self, r=5, m=2):
"""Test _core._base.ContinuousROM.f_()."""
c_, A_, H_, G_, B_ = _get_operators(r, m)
# Check that the constructed f takes the right number of arguments.
rom = opinf._core._base._ContinuousROM("cA")
rom.r = r
rom.c_, rom.A_ = c_, A_
x_ = np.random.random(r)
y_ = c_ + A_ @ x_
assert np.allclose(rom.f_(0, x_), y_)
assert np.allclose(rom.f_(1, x_), y_)
assert np.allclose(rom.f_(1, x_, -1), y_)
kron2c, kron3c = opinf.utils.kron2c, opinf.utils.kron3c
rom = opinf._core._base._ContinuousROM("HGB")
rom.r, rom.m = r, m
rom.H_, rom.G_, rom.B_ = H_, G_, B_
uu = np.random.random(m)
def u(t):
return uu + t
y_ = H_ @ kron2c(x_) + G_ @ kron3c(x_) + B_ @ uu
assert np.allclose(rom.f_(0, x_, u), y_)
y_ = H_ @ kron2c(x_) + G_ @ kron3c(x_) + B_ @ (uu+1)
assert np.allclose(rom.f_(1, x_, u), y_)
def test_predict(self):
"""Test _core._base._ContinuousROM.predict()."""
# Get test data.
n, k, m, r = 60, 50, 20, 10
X = _get_data(n, k, m)[0]
Vr = la.svd(X)[0][:,:r]
nt = 5
x0 = X[:,0]
t = np.linspace(0, .01*nt, nt)
def u(t):
return np.ones(m)
Upred = np.ones((m, nt))
# Try to predict with invalid initial condition.
x0_ = Vr.T @ x0
rom = _trainedmodel(True, "cAHB", Vr, m)
with pytest.raises(ValueError) as ex:
rom.predict(x0_[1:], t, u)
assert ex.value.args[0] == "x0 not aligned with Vr, dimension 0"
# Try to predict with bad time array.
with pytest.raises(ValueError) as ex:
rom.predict(x0, np.vstack((t,t)), u)
assert ex.value.args[0] == "time 't' must be one-dimensional"
# Predict without inputs.
for form in MODEL_FORMS:
if "B" not in form:
rom = _trainedmodel(True, form, Vr, None)
out = rom.predict(x0, t)
assert isinstance(out, np.ndarray)
assert out.shape == (n,t.size)
# Predict with no basis gives result in low-dimensional space.
rom = _trainedmodel(True, "cA", Vr, None)
rom.Vr = None
out = rom.predict(Vr.T @ x0, t)
assert isinstance(out, np.ndarray)
assert out.shape == (r,t.size)
# Try to predict with badly-shaped discrete inputs.
rom = _trainedmodel(True, "cAHB", Vr, m)
with pytest.raises(ValueError) as ex:
rom.predict(x0, t, np.random.random((m-1, nt)))
assert ex.value.args[0] == \
f"invalid input shape ({(m-1,nt)} != {(m,nt)}"
rom = _trainedmodel(True, "cAHB", Vr, m=1)
with pytest.raises(ValueError) as ex:
rom.predict(x0, t, np.random.random((2, nt)))
assert ex.value.args[0] == \
f"invalid input shape ({(2,nt)} != {(1,nt)}"
# Try to predict with badly-shaped continuous inputs.
rom = _trainedmodel(True, "cAHB", Vr, m)
with pytest.raises(ValueError) as ex:
rom.predict(x0, t, lambda t: np.ones(m-1))
assert ex.value.args[0] == \
f"input function u() must return ndarray of shape (m,)={(m,)}"
with pytest.raises(ValueError) as ex:
rom.predict(x0, t, lambda t: 1)
assert ex.value.args[0] == \
f"input function u() must return ndarray of shape (m,)={(m,)}"
rom = _trainedmodel(True, "cAHB", Vr, m=1)
with pytest.raises(ValueError) as ex:
rom.predict(x0, t, u)
assert ex.value.args[0] == \
f"input function u() must return ndarray of shape (m,)={(1,)}" \
" or scalar"
# Try to predict with continuous inputs with bad return type
rom = _trainedmodel(True, "cAHB", Vr, m)
with pytest.raises(ValueError) as ex:
rom.predict(x0, t, lambda t: set([5]))
assert ex.value.args[0] == \
f"input function u() must return ndarray of shape (m,)={(m,)}"
for form in MODEL_FORMS:
if "B" in form:
# Predict with 2D inputs.
rom = _trainedmodel(True, form, Vr, m)
# continuous input.
out = rom.predict(x0, t, u)
assert isinstance(out, np.ndarray)
assert out.shape == (n,nt)
# discrete input.
out = rom.predict(x0, t, Upred)
assert isinstance(out, np.ndarray)
assert out.shape == (n,nt)
# Predict with 1D inputs.
rom = _trainedmodel(True, form, Vr, 1)
# continuous input.
out = rom.predict(x0, t, lambda t: 1)
assert isinstance(out, np.ndarray)
assert out.shape == (n,nt)
out = rom.predict(x0, t, lambda t: np.array([1]))
assert isinstance(out, np.ndarray)
assert out.shape == (n,nt)
# discrete input.
out = rom.predict(x0, t, np.ones_like(t))
assert isinstance(out, np.ndarray)
assert out.shape == (n,nt)
class TestNonparametricMixin:
"""Test _core._base._NonparametricMixin."""
class Dummy(opinf._core._base._NonparametricMixin,
opinf._core._base._BaseROM):
def __init__(self, modelform):
self.modelform = modelform
def test_O_(self, r=9, m=4):
"""Test _core._base._NonparametricMixin.O_."""
c, A, H, G, B = _get_operators(r, m)
for form in MODEL_FORMS:
rom = self.Dummy(form)
rom.set_operators(None,
c_=c if 'c' in form else None,
A_=A if 'A' in form else None,
H_=H if 'H' in form else None,
G_=G if 'G' in form else None,
B_=B if 'B' in form else None)
O_ = rom.O_
d = opinf.lstsq.lstsq_size(form, r, m if 'B' in form else 0)
assert O_.shape == (r,d)
# Spot check.
if form == "cB":
assert np.all(O_ == np.hstack((c[:,np.newaxis], B)))
elif form == "AB":
assert np.all(O_ == np.hstack((A, B)))
elif form == "HG":
assert np.all(O_ == np.hstack((H, G)))
def test_str(self):
"""Test _core._base._NonparametricMixin.__str__()
(string representation).
"""
# Continuous ROMs
rom = opinf.InferredContinuousROM("A")
assert str(rom) == \
"Reduced-order model structure: dx / dt = Ax(t)"
rom = opinf.InferredContinuousROM("cA")
assert str(rom) == \
"Reduced-order model structure: dx / dt = c + Ax(t)"
rom = opinf.InferredContinuousROM("HB")
assert str(rom) == \
"Reduced-order model structure: dx / dt = H(x(t) ⊗ x(t)) + Bu(t)"
rom = opinf.InferredContinuousROM("G")
assert str(rom) == \
"Reduced-order model structure: dx / dt = G(x(t) ⊗ x(t) ⊗ x(t))"
rom = opinf.InferredContinuousROM("cH")
assert str(rom) == \
"Reduced-order model structure: dx / dt = c + H(x(t) ⊗ x(t))"
# Discrete ROMs
rom = opinf.IntrusiveDiscreteROM("A")
assert str(rom) == \
"Reduced-order model structure: x_{j+1} = Ax_{j}"
rom = opinf.IntrusiveDiscreteROM("cB")
assert str(rom) == \
"Reduced-order model structure: x_{j+1} = c + Bu_{j}"
rom = opinf.IntrusiveDiscreteROM("H")
assert str(rom) == \
"Reduced-order model structure: x_{j+1} = H(x_{j} ⊗ x_{j})"
def test_save_model(self):
"""Test _core._base._NonparametricMixin.save_model()."""
# Clean up after old tests.
target = "savemodeltest.h5"
if os.path.isfile(target): # pragma: no cover
os.remove(target)
# Get a test model.
n, m, r = 15, 2, 5
Vr = np.random.random((n,r))
rom = _trainedmodel("inferred", "cAHGB", Vr, m)
def _checkfile(filename, mdl, hasbasis):
assert os.path.isfile(filename)
with h5py.File(filename, 'r') as data:
# Check metadata.
assert "meta" in data
assert len(data["meta"]) == 0
assert data["meta"].attrs["modelclass"] == \
mdl.__class__.__name__
assert data["meta"].attrs["modelform"] == mdl.modelform
# Check basis
if hasbasis:
assert "Vr" in data
| |
<reponame>sandertyu/Simple-Geometry-Plot<filename>bicycleparameters/period.py
#!/usr/bin/env/ python
import os
from math import pi
import numpy as np
from numpy import ma
from scipy.optimize import leastsq
import matplotlib.pyplot as plt
from uncertainties import ufloat
# local modules
from .io import load_pendulum_mat_file
def average_rectified_sections(data):
'''Returns a slice of an oscillating data vector based on the max and min
of the mean of the sections created by retifiying the data.
Parameters
----------
data : ndarray, shape(n,)
Returns
-------
data : ndarray, shape(m,)
A slice where m is typically less than n.
Notes
-----
This is a function to try to handle the fact that some of the data from the
torsional pendulum had a beating like phenomena and we only want to select
a section of the data that doesn't seem to exhibit the phenomena.
'''
# subtract the mean so that there are zero crossings
meanSubData = data - np.mean(data)
# find the zero crossings
zeroCrossings = np.where(np.diff(np.sign(meanSubData)))[0]
# add a zero to the beginning
crossings = np.concatenate((np.array([0]), zeroCrossings))
# find the mean value of the rectified sections and the local indice
secMean = []
localMeanInd = []
for sec in np.split(np.abs(meanSubData), zeroCrossings):
localMeanInd.append(np.argmax(sec))
secMean.append(np.mean(sec))
meanInd = []
# make the global indices
for i, val in enumerate(crossings):
meanInd.append(val + localMeanInd[i])
# only take the top part of the data because some the zero crossings can be
# a lot at one point mainly due to the resolution of the daq box
threshold = np.mean(secMean)
secMeanOverThresh = []
indice = []
for i, val in enumerate(secMean):
if val > threshold:
secMeanOverThresh.append(val)
indice.append(meanInd[i])
# now return the data based on the max value and the min value
maxInd = indice[np.argmax(secMeanOverThresh)]
minInd = indice[np.argmin(secMeanOverThresh)]
return data[maxInd:minInd]
def calc_periods_for_files(directory, filenames, forkIsSplit):
'''Calculates the period for all filenames in directory.
Parameters
----------
directory : string
This is the path to the RawData directory.
filenames : list
List of all the mat file names in the RawData directory.
forkIsSplit : boolean
True if the fork is broken into a handlebar and fork and false if the
fork and handlebar was measured together.
Returns
-------
periods : dictionary
Contains all the periods for the mat files in the RawData directory.
'''
periods = {}
def pathParts(path):
'''Splits a path into a list of its parts.'''
components = []
while True:
(path,tail) = os.path.split(path)
if tail == "":
components.reverse()
return components
components.append(tail)
pathToRawDataParts = pathParts(directory)
pathToRawDataParts.pop()
pathToBicycleDir = os.path.join(pathToRawDataParts[0],
pathToRawDataParts[1],
pathToRawDataParts[2])
pathToPlotDir = os.path.join(pathToBicycleDir, 'Plots', 'PendulumFit')
# make sure there is a place to save the plots
if not os.path.exists(pathToPlotDir):
os.makedirs(pathToPlotDir)
for f in filenames:
print("Calculating the period for:", f)
# load the pendulum data
pathToMatFile = os.path.join(directory, f)
matData = load_pendulum_mat_file(pathToMatFile)
# generate a variable name for this period
periodKey = get_period_key(matData, forkIsSplit)
# calculate the period
sampleRate = get_sample_rate(matData)
pathToPlotFile = os.path.join(pathToPlotDir,
os.path.splitext(f)[0] + '.png')
period = get_period_from_truncated(matData['data'],
sampleRate,
pathToPlotFile)
print("The period is:", period, "\n")
# either append the the period or if it isn't there yet, then
# make a new list
try:
periods[periodKey].append(period)
except KeyError:
periods[periodKey] = [period]
# now average all the periods
for k, v in periods.items():
if k.startswith('T'):
periods[k] = np.mean(v)
return periods
def check_for_period(mp, forkIsSplit):
'''Returns whether the fork is split into two pieces and whether the period
calculations need to happen again.
Parameters
----------
mp : dictionary
Dictionary the measured parameters.
forkIsSplit : boolean
True if the fork is broken into a handlebar and fork and false if the
fork and handlebar was measured together.
Returns
-------
forcePeriodCalc : boolean
True if there wasn't enough period data in mp, false if there was.
forkIsSplit : boolean
True if the fork is broken into a handlebar and fork and false if the
fork and handlebar was measured together.
'''
forcePeriodCalc = False
#Check to see if mp contains at enough periods to not need
# recalculation
ncTSum = 0
ntTSum = 0
for key in mp.keys():
# check for any periods in the keys
if key[:2] == 'Tc':
ncTSum += 1
elif key[:2] == 'Tt':
ntTSum += 1
# if there isn't enough data then force the period cals again
if forkIsSplit:
if ncTSum < 5 or ntTSum < 11:
forcePeriodCalc = True
else:
if ncTSum < 4 or ntTSum < 8:
forcePeriodCalc = True
return forcePeriodCalc
def fit_goodness(ym, yp):
'''
Calculate the goodness of fit.
Parameters
----------
ym : ndarray, shape(n,)
The vector of measured values.
yp : ndarry, shape(n,)
The vector of predicted values.
Returns
-------
rsq : float
The r squared value of the fit.
SSE : float
The error sum of squares.
SST : float
The total sum of squares.
SSR : float
The regression sum of squares.
'''
SSR = np.sum((yp - np.mean(ym))**2)
SST = np.sum((ym - np.mean(ym))**2)
SSE = SST - SSR
rsq = SSR / SST
return rsq, SSE, SST, SSR
def get_period(data, sampleRate, pathToPlotFile):
'''Returns the period and uncertainty for data resembling a decaying
oscillation.
Parameters
----------
data : ndarray, shape(n,)
A time series that resembles a decaying oscillation.
sampleRate : int
The frequency that data was sampled at.
pathToPlotFile : string
A path to the file to print the plots.
Returns
-------
T : ufloat
The period of oscillation and its uncertainty.
'''
y = data
x = np.linspace(0., (len(y) - 1) / float(sampleRate), num=len(y))
def fitfunc(p, t):
'''Decaying oscillation function.'''
a = p[0]
b = np.exp(-p[3] * p[4] * t)
c = p[1] * np.sin(p[4] * np.sqrt(1 - p[3]**2) * t)
d = p[2] * np.cos(p[4] * np.sqrt(1 - p[3]**2) * t)
return a + b * (c + d)
# initial guesses
#p0 = np.array([1.35, -.5, -.75, 0.01, 3.93]) # guess from delft
#p0 = np.array([2.5, -.75, -.75, 0.001, 4.3]) # guess from ucd
p0 = make_guess(data, sampleRate) # tries to make a good guess
# create the error function
errfunc = lambda p, t, y: fitfunc(p, t) - y
# minimize the error function
p1, success = leastsq(errfunc, p0[:], args=(x, y))
lscurve = fitfunc(p1, x)
# find the uncertainty in the fit parameters
rsq, SSE, SST, SSR = fit_goodness(y, lscurve)
sigma = np.sqrt(SSE / (len(y) - len(p0)))
# calculate the jacobian
L = jac_fitfunc(p1, x)
# the Hessian
H = np.dot(L.T, L)
# the covariance matrix
U = sigma**2. * np.linalg.inv(H)
# the standard deviations
sigp = np.sqrt(U.diagonal())
# natural frequency
wo = ufloat(p1[4], sigp[4])
# damping ratio
zeta = ufloat(p1[3], sigp[3])
# damped natural frequency
wd = (1. - zeta**2.)**(1. / 2.) * wo
# damped natural frequency (hz)
fd = wd / 2. / pi
# period
T = 1. / fd
# plot the data and save it to file
fig = plt.figure()
plot_osfit(x, y, lscurve, p1, rsq, T, m=np.max(x), fig=fig)
plt.savefig(pathToPlotFile)
plt.close()
# return the period
return T
def get_period_from_truncated(data, sampleRate, pathToPlotFile):
#dataRec = average_rectified_sections(data)
dataRec = data
dataGood = select_good_data(dataRec, 0.1)
return get_period(dataGood, sampleRate, pathToPlotFile)
def get_period_key(matData, forkIsSplit):
'''Returns a dictionary key for the period entries.
Parameters
----------
matData : dictionary
The data imported from a pendulum mat file.
forkIsSplit : boolean
True if the fork is broken into a handlebar and fork and false if the
fork and handlebar was measured together.
Returns
-------
key : string
A key of the form 'T[pendulum][part][orientation]'. For example, if it
is the frame that was hung as a torsional pendulum at the second
orientation angle then the key would be 'TtB2'.
'''
# set up the subscripting for the period key
subscripts = {'Fwheel': 'F',
'Rwheel': 'R',
'Frame': 'B',
'Flywheel': 'D'}
# the Flywheel is for the gyro bike and it actually represents the front
# wheel and the flywheel as one rigid body. It was easier to measure the
# the inertia this way. So...the to get the actual flywheel inertia, one
# must subtract the inertia of the Fwheel, F, from the Flywheel, D.
if forkIsSplit:
subscripts['Fork'] = 'S'
subscripts['Handlebar'] = 'G'
else:
subscripts['Fork'] = | |
'data/_components.tsv')
del os
new = cls.load_from_file(path, index_col=None,
use_default_data=use_default_data, store_data=store_data)
H2O = Component.from_chemical('H2O', Chemical('H2O'),
i_charge=0, f_BOD5_COD=0, f_uBOD_COD=0,
f_Vmass_Totmass=0, description="Water",
particle_size='Soluble',
degradability='Undegradable', organic=False)
new.append(H2O)
if default_compile:
new.default_compile(lock_state_at='', particulate_ref='NaCl')
new.compile()
# Add aliases
new.set_alias('H2O', 'Water')
# Pre-define groups
new.define_group('substrates',
('S_CH3OH', 'S_Ac', 'S_Prop', 'S_F', 'C_B_Subst', 'X_B_Subst'))
new.define_group('biomass',
('X_OHO', 'X_AOO', 'X_NOO', 'X_AMO', 'X_PAO',
'X_MEOLO', 'X_FO', 'X_ACO', 'X_HMO', 'X_PRO'))
new.define_group('S_VFA', ('S_Ac', 'S_Prop'))
new.define_group('X_Stor', ('X_OHO_PHA', 'X_GAO_PHA', 'X_PAO_PHA',
'X_GAO_Gly', 'X_PAO_Gly'),)
new.define_group('X_ANO', ('X_AOO', 'X_NOO'))
new.define_group('X_Bio', ('X_OHO', 'X_AOO', 'X_NOO', 'X_AMO', 'X_PAO',
'X_MEOLO', 'X_ACO', 'X_HMO', 'X_PRO', 'X_FO'))
new.define_group('S_NOx', ('S_NO2', 'S_NO3'))
new.define_group('X_PAO_PP', ('X_PAO_PP_Lo', 'X_PAO_PP_Hi'))
new.define_group('TKN', [i.ID for i in new if i.ID not in ('S_N2','S_NO2','S_NO3')])
return new
@staticmethod
def append_combustion_components(components, alt_IDs={},
try_default_compile=True,
**default_compile_kwargs):
'''
Return a new :class:`~.Components` object with the given components
and those needed for combustion reactions (complete oxidation with O2),
namely O2, CO2 (for C), H2O (for H), N2 (for N), P4O10 (for P), and SO2 (for S).
If the combustion components are already in the given collection,
they will NOT be overwritten.
Parameters
----------
components : Iterable(obj)
The original components to be appended.
alt_IDs : dict
Alternative IDs for the combustion components to be added as aliases,
e.g., if "S_O2" is used instead of "O2", then pass {'O2': 'S_O2'}.
default_compile : bool
Whether to try default compile when some components
are missing key properties for compiling.
default_compile_kwargs : dict
Keyword arguments to pass to `default_compile` if needed.
See Also
--------
:func:`default_compile`
Examples
--------
>>> from qsdsan import Components
>>> cmps = Components.load_default()
>>> cmps
CompiledComponents([S_H2, S_CH4, S_CH3OH, S_Ac, S_Prop, S_F, S_U_Inf, S_U_E, C_B_Subst, C_B_BAP, C_B_UAP, C_U_Inf, X_B_Subst, X_OHO_PHA, X_GAO_PHA, X_PAO_PHA, X_GAO_Gly, X_PAO_Gly, X_OHO, X_AOO, X_NOO, X_AMO, X_PAO, X_MEOLO, X_FO, X_ACO, X_HMO, X_PRO, X_U_Inf, X_U_OHO_E, X_U_PAO_E, X_Ig_ISS, X_MgCO3, X_CaCO3, X_MAP, X_HAP, X_HDP, X_FePO4, X_AlPO4, X_AlOH, X_FeOH, X_PAO_PP_Lo, X_PAO_PP_Hi, S_NH4, S_NO2, S_NO3, S_PO4, S_K, S_Ca, S_Mg, S_CO3, S_N2, S_O2, S_CAT, S_AN, H2O])
>>> CH4 = cmps.S_CH4.copy('CH4', phase='g')
>>> cmps = Components.append_combustion_components([*cmps, CH4], alt_IDs=dict(O2='S_O2'))
>>> cmps
CompiledComponents([S_H2, S_CH4, S_CH3OH, S_Ac, S_Prop, S_F, S_U_Inf, S_U_E, C_B_Subst, C_B_BAP, C_B_UAP, C_U_Inf, X_B_Subst, X_OHO_PHA, X_GAO_PHA, X_PAO_PHA, X_GAO_Gly, X_PAO_Gly, X_OHO, X_AOO, X_NOO, X_AMO, X_PAO, X_MEOLO, X_FO, X_ACO, X_HMO, X_PRO, X_U_Inf, X_U_OHO_E, X_U_PAO_E, X_Ig_ISS, X_MgCO3, X_CaCO3, X_MAP, X_HAP, X_HDP, X_FePO4, X_AlPO4, X_AlOH, X_FeOH, X_PAO_PP_Lo, X_PAO_PP_Hi, S_NH4, S_NO2, S_NO3, S_PO4, S_K, S_Ca, S_Mg, S_CO3, S_N2, S_O2, S_CAT, S_AN, H2O, CH4, CO2, N2, P4O10, SO2])
>>> cmps.O2 is cmps.S_O2
True
'''
cmps = components if isinstance(components, (Components, CompiledComponents)) \
else Components(components)
comb_cmps = ['O2', 'CO2', 'H2O', 'N2', 'P4O10', 'SO2']
aliases = dict(H2O='Water')
aliases.update(alt_IDs)
get = getattr
for k, v in alt_IDs.items():
try:
get(cmps, v)
aliases[k] = comb_cmps.pop(comb_cmps.index(k))
except AttributeError:
pass
for ID in comb_cmps:
try: get(cmps, ID)
except AttributeError:
phase = 'g' if ID in ('O2', 'CO2', 'N2', 'SO2') else 's' if ID=='P4O10' else ''
ps = 'Dissolved gas' if phase == 'g' else 'Particulate' if phase=='s' else 'Soluble'
cmp = Component(ID, phase=phase, organic=False, particle_size=ps,
degradability='Undegradable')
cmps.append(cmp)
add_V_from_rho(cmps.P4O10, rho=2.39, rho_unit='g/mL') # http://www.chemspider.com/Chemical-Structure.14128.html
try:
cmps.compile()
except RuntimeError: # cannot compile due to missing properties
cmps.default_compile(**default_compile_kwargs)
for k, v in aliases.items():
cmps.set_alias(k, v)
return cmps
@classmethod
def from_chemicals(cls, chemicals, **data):
'''
Return a new :class:`Components` from a :class:`thermosteam.Chemicals`
or :class:`thermosteam.CompiledChemicals` object.
Parameters
----------
chemicals: thermosteam.Chemicals
The :class:`thermosteam.Chemicals` object as the basis
for the new :class:`~.Components` object.
:class:`Component` objects will have the same ID as the corresponding
:class:`thermosteam.Chemical` object in the :class:`thermosteam.Chemicals`
object.
data : dict
A nested dict with keys being the new components and values being the inner dict,
keys and values of the inner dict are the attribute names and values, respectively.
Examples
--------
>>> import qsdsan as qs
>>> chems = qs.Chemicals((qs.Chemical('Water'), qs.Chemical('Ethanol')))
>>> data = {'Water': {'particle_size': 'Soluble',
... 'degradability': 'Undegradable',
... 'organic': False},
... 'Ethanol': {'particle_size': 'Soluble',
... 'degradability': 'Readily',
... 'organic': False}}
>>> cmps = qs.Components.from_chemicals(chems, **data)
>>> cmps
Components([Water, Ethanol])
'''
cmps = cls.__new__(cls, ())
for i in chemicals:
val_dct = data.get(i.ID)
cmp = Component.from_chemical(i.ID, i)
if val_dct:
for k, v in val_dct.items():
setattr(cmp, k, v)
cmps.append(cmp)
return cmps
# %%
# =============================================================================
# Define the CompiledComponents class
# =============================================================================
chemical_data_array = tmo._chemicals.chemical_data_array
def component_data_array(components, attr):
data = chemical_data_array(components, attr)
return data
class CompiledComponents(CompiledChemicals):
'''
A subclass of :class:`thermosteam.CompiledChemicals`, contains `Component` objects as attributes.
Examples
--------
`Component <https://qsdsan.readthedocs.io/en/latest/tutorials/2_Component.html>`_
See Also
--------
`thermosteam.CompiledChemicals <https://thermosteam.readthedocs.io/en/latest/Chemicals.html>`_
'''
_cache = {}
def __new__(cls, components, cache=None):
isa = isinstance
components = tuple([cmp if isa(cmp, Component) else Component(cmp, cache)
for cmp in components])
cache = cls._cache
if components in cache:
self = cache[components]
else:
self = object.__new__(cls)
setfield = setattr
for cmp in components:
setfield(self, cmp.ID, cmp)
self._compile(components)
cache[components] = self
return self
def __reduce__(self):
return CompiledComponents, (self.tuple, )
def __contains__(self, component):
if isinstance(component, str):
return component in self.__dict__
elif isinstance(component, Component):
return component in self.tuple
else: # pragma: no cover
return False
def __repr__(self):
return f"CompiledComponents([{', '.join(self.IDs)}])"
def refresh_constants(self):
'''
Refresh constant arrays of :class:`Components` objects,
including all chemical and component-specific properties.
'''
super().refresh_constants()
dct = self.__dict__
components = self.tuple
for i in _num_component_properties:
dct[i] = component_data_array(components, i)
def compile(self, skip_checks=False):
'''Do nothing, :class:`CompiledComponents` have already been compiled.'''
pass
def _compile(self, components, skip_checks=False):
dct = self.__dict__
tuple_ = tuple # this speeds up the code
components = tuple_(dct.values())
CompiledChemicals._compile(self, components, skip_checks)
for component in components:
missing_properties = component.get_missing_properties(_key_component_properties)
if not missing_properties: continue
missing = utils.repr_listed_values(missing_properties)
raise RuntimeError(f'{component} is missing key component-related properties ({missing}).')
for i in _num_component_properties:
dct[i] = component_data_array(components, i)
dct['g'] = np.asarray([1 if cmp.particle_size == 'Dissolved gas' else 0 for cmp in components])
s = dct['s'] = np.asarray([1 if cmp.particle_size == 'Soluble' else 0 for cmp in components])
c = dct['c'] = np.asarray([1 if cmp.particle_size == 'Colloidal' else 0 for cmp in components])
dct['x'] = np.asarray([1 if cmp.particle_size == 'Particulate' else 0 for cmp in components])
b = dct['b'] = np.asarray([1 if cmp.degradability != 'Undegradable' else 0 for cmp in components])
dct['rb'] = np.asarray([1 if cmp.degradability == 'Readily' else 0 for cmp in components])
org = dct['org'] = np.asarray([int(cmp.organic) for cmp in components])
inorg = dct['inorg'] = np.ones_like(org) - org
ID_arr = dct['_ID_arr'] = np.asarray([i.ID for i in components])
dct['chem_MW'] = np.asarray([i.chem_MW for i in components])
# Inorganic degradable non-gas, incorrect
inorg_b = inorg * b * (s+c)
if inorg_b.sum() > 0:
bad_IDs = ID_arr[np.where(inorg_b==1)[0]]
raise ValueError(f'Components {bad_IDs} are inorganic, degradable, and not gas, '
'which is not correct.')
def subgroup(self, IDs):
'''Create a new subgroup of :class:`Component` objects.'''
components = self[IDs]
new = Components(components)
new.compile()
for i in new.IDs:
for j in self.get_aliases(i):
try: new.set_alias(i, j)
except: pass
return new
def index(self, ID):
'''Return index of specified component.'''
try: return self._index[ID]
except KeyError:
raise UndefinedComponent(ID)
def indices(self, IDs):
'''Return indices of multiple components.'''
try:
dct = self._index
return [dct[i] for i in IDs]
except KeyError as key_error:
raise UndefinedComponent(key_error.args[0])
def copy(self):
'''Return a copy.'''
copy = Components(self)
copy.compile()
return copy
def get_IDs_from_array(self, array):
'''
Get the IDs of a group of components based on the 1/0 or True/False array.
Parameters
----------
array : Iterable(1/0)
1D collection of 1/0 or True/False with the same length
as the IDs.
Examples
--------
>>> from qsdsan import Components
>>> cmps = Components.load_default()
>>> cmps.get_IDs_from_array(cmps.g)
('S_H2', 'S_CH4', 'S_N2', 'S_O2')
'''
return tuple(self._ID_arr[np.asarray(array).astype(bool)])
def get_array_from_IDs(self, IDs):
'''
Generate a ``numpy`` array in the same shape as ``CompiledComponents.IDs``,
where the values would be 1 for components whose IDs are in the given ID iterable
and 0 for components not in the given ID iterable.
Parameters
----------
IDs : Iterable(str)
IDs of select components within this ``~.CompiledComponents``.
Examples
--------
>>> from qsdsan import Components
>>> cmps = Components.load_default()
>>> IDs = ('S_H2', 'S_CH4', 'S_N2', 'S_O2')
>>> cmps.get_array_from_IDs(IDs)
array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0])
'''
arr = np.zeros_like(self._ID_arr, dtype='int')
arr[self.indices(IDs)] = 1
| |
return "success"
else:
return "redirect"
def browse_books(request):
def get_context():
context = {
'cartCount': getCartCount(request),
'books': Book.objects.all()
}
return context
context = get_context()
if request.method == "POST":
if request.POST.get("search_button"):
save_search(request, query=request.POST['search'])
return redirect('search')
if request.POST.get("advanced_search_button"):
save_search(request, query="")
return redirect('search')
if request.POST.get("add_to_cart"):
err = add_to_cart(request, request.POST['add_to_cart'], 1)
if err == "redirect":
return redirect('login')
elif err == "out_of_stock":
context['out_of_stock_flag'] = True
return render(request, 'bookstore/browse-books.html', context)
context = get_context()
return render(request, 'bookstore/browse-books.html', context)
else:
return render(request, 'bookstore/browse-books.html', context)
def cart(request):
def get_context():
books = CartItem.objects.filter(user=request.user)
prices = []
for book in books:
price = int(book.quantity)*float(book.book.cost)
price = f"{price:.2f}"
prices.append(price)
total_cost = 0
for book in books:
total_cost += int(book.quantity)*float(book.book.cost)
total_cost = f"{total_cost:.2f}"
context = {
'cartCount': getCartCount(request),
'books_in_cart': zip(CartItem.objects.filter(user=request.user), prices),
'total_cost': total_cost,
}
return context
context = get_context()
if request.method == "POST":
if request.POST.get("search_button"):
save_search(request, query=request.POST['search'])
return redirect('search')
if request.POST.get("advanced_search_button"):
save_search(request, query="")
return redirect('search')
if request.POST.get("cross_button"):
cart_item = CartItem.objects.filter(user=request.user, book=Book.objects.get(isbn=request.POST.get("cross_button")))[0]
book = Book.objects.get(isbn=request.POST.get("cross_button"))
book.stock += cart_item.quantity
book.save()
cart_item.delete()
context = get_context()
return render(request, 'bookstore/cart.html', context)
if request.POST.get("minus_button"):
cart_item = CartItem.objects.filter(user=request.user, book=Book.objects.get(isbn=request.POST.get("minus_button")))[0]
if cart_item.quantity == 0:
return render(request, 'bookstore/cart.html', context)
else:
book = Book.objects.get(isbn=request.POST.get("minus_button"))
# if book.stock == 0:
# context["minus_flag"] = True
# return render(request, 'bookstore/cart.html', context)
book.stock += 1
book.save()
cart_item.quantity -= 1
if cart_item.quantity == 0:
cart_item.delete()
else:
cart_item.save()
context = get_context()
if request.POST.get("plus_button"):
book = Book.objects.get(isbn=request.POST.get("plus_button"))
cart_item = CartItem.objects.filter(user=request.user, book=Book.objects.get(isbn=request.POST.get("plus_button")))[0]
if book.stock == 0:
context["minus_flag"] = True
return render(request, 'bookstore/cart.html', context)
book.stock -= 1
book.save()
cart_item.quantity += 1
cart_item.save()
context = get_context()
if request.POST.get("continue_checkout"):
cartCount = getCartCount(request)
if cartCount == 0:
context['checkout_flag'] = True
return render(request, 'bookstore/cart.html', context)
else:
return redirect('shipping')
return render(request, 'bookstore/cart.html', context)
else:
return render(request, 'bookstore/cart.html', context)
def shipping(request):
order = Order.objects.filter(user=request.user, status="Incomplete")
if len(order) == 0:
books = CartItem.objects.filter(user=request.user)
total_cost = 0
for book in books:
total_cost += int(book.quantity)*float(book.book.cost)
promotion = Promotion(code="SYSTEM",
percentage=0,
start_date=datetime.date.today(),
end_date=datetime.date.today(),)
promotion.save()
order = Order.objects.create_order(
user=request.user,
total=total_cost,
orig_total=total_cost,
promotion=promotion,
date=datetime.date.today(),
time=datetime.datetime.now().strftime("%H:%M:%S"),
first_name="",
last_name="",
phone="",
street="",
city="",
state="",
zip_code="",
county="",
country="",
card_name="",
card_num="",
card_exp="",
card_cvv="",
card_four="",
)
order.save()
else:
order = order[0]
def get_context():
order = Order.objects.filter(user=request.user, status="Incomplete")[0]
books = CartItem.objects.filter(user=request.user)
prices = []
for book in books:
price = int(book.quantity)*float(book.book.cost)
price = f"{price:.2f}"
prices.append(price)
total_cost = order.total
discount = Decimal(int(order.promotion.percentage))*order.orig_total / Decimal(100)
discount = f"{discount:.2f}"
context = {
'cartCount': getCartCount(request),
'books_in_cart': zip(books, prices),
'total_cost': total_cost,
'promo_code_name': order.promotion.code,
'promo_code_discount': discount,
}
return context
context = get_context()
if request.method == "POST":
if request.POST.get("search_button"):
save_search(request, query=request.POST['search'])
return redirect('search')
if request.POST.get("advanced_search_button"):
save_search(request, query="")
return redirect('search')
if request.POST.get("promo_remove_button"):
order.promotion = Promotion(code="SYSTEM", start_date=datetime.date.today(), end_date=datetime.date.today(), percentage=0)
order.total = order.orig_total
order.save()
context = get_context()
return render(request, 'bookstore/shipping.html', context)
if request.POST.get("promo_button"):
if request.POST["promo_code"] == "":
context['promo_none_flag'] = True
return render(request, 'bookstore/shipping.html', context)
promo = Promotion.objects.filter(code=request.POST['promo_code'])
if len(promo) == 0:
context['promo_invalid_flag'] = True
return render(request, 'bookstore/shipping.html', context)
completed_orders = Order.objects.filter(user=request.user, status="Confirmed")
orderItems = []
for c_orders in completed_orders:
orderItems.append(OrderItem.objects.filter(order=c_orders)[0])
for orderItem in orderItems:
used_promo = orderItem.order.promotion.code
if used_promo == request.POST['promo_code']:
context['promo_flag'] = True
return render(request, 'bookstore/shipping.html', context)
promo = promo[0]
if promo.start_date > datetime.date.today():
context['promo_start_flag'] = True
return render(request, 'bookstore/shipping.html', context)
if promo.end_date < datetime.date.today():
context['promo_end_flag'] = True
return render(request, 'bookstore/shipping.html', context)
if order.promotion.code != "SYSTEM":
order.total = order.orig_total
order.promotion = promo
order.total = order.total * Decimal(100-int(promo.percentage)) / Decimal(100)
order.total = round(order.total, 2)
order.save()
context = get_context()
return render(request, 'bookstore/shipping.html', context)
first_name = request.POST['userFirst_name']
last_name = request.POST['userLast_name']
phone = request.POST['userPhone']
street = request.POST['userStreet']
city = request.POST['userCity']
state = request.POST['userState']
zip_code = request.POST['userZip']
county = request.POST['userCounty']
country = request.POST['userCountry']
if "save_address" in request.POST.getlist("checks[]"):
user = request.user
mail_message = ""
if user.first_name != first_name:
mail_message += "* Your first name has been updated\n"
if user.last_name != last_name:
mail_message += "* Your last name has been updated\n"
if user.phone != phone:
mail_message += "* Your phone has been updated\n"
if user.street != street:
mail_message += "* Your steet has been updated\n"
if user.city != city:
mail_message += "* Your city has been updated\n"
if user.state != state:
mail_message += "* Your state has been updated\n"
if user.zip_code != zip_code:
mail_message += "* Your zip code has been updated\n"
if user.county != county:
mail_message += "* Your county has been updated"
if user.country != country:
mail_message += "* Your country has been updated"
user.first_name = first_name
user.last_name = last_name
user.phone = phone
user.street = street
user.city = city
user.state = state
user.zip_code = zip_code
user.county = county
user.country = country
user.save()
mail_subject = "Changes made to your genlib account"
if mail_message != "":
EMAIL = EmailMessage(mail_subject, mail_message, to=[request.user.email])
EMAIL.send()
order.first_name = first_name
order.last_name = last_name
order.phone = phone
order.street = street
order.city = city
order.state = state
order.zip_code = zip_code
order.county = county
order.country = country
order.save()
return redirect('payment')
else:
return render(request, 'bookstore/shipping.html', context)
def payment(request):
order = Order.objects.filter(user=request.user, status="Incomplete")[0]
def get_context():
order = Order.objects.filter(user=request.user, status="Incomplete")[0]
books = CartItem.objects.filter(user=request.user)
prices = []
for book in books:
price = int(book.quantity)*float(book.book.cost)
price = f"{price:.2f}"
prices.append(price)
total_cost = order.total
discount = Decimal(int(order.promotion.percentage))*order.orig_total / Decimal(100)
discount = f"{discount:.2f}"
payment_cards = []
if request.user.card_four1 != "":
payment_cards.append(request.user.card_four1)
if request.user.card_four2 != "":
payment_cards.append(request.user.card_four2)
if request.user.card_four3 != "":
payment_cards.append(request.user.card_four3)
context = {
'cartCount': getCartCount(request),
'books_in_cart': zip(books, prices),
'total_cost': total_cost,
'promo_code_name': order.promotion.code,
'promo_code_discount': discount,
'payment_cards': payment_cards,
}
return context
context = get_context()
if request.method == "POST":
if request.POST.get("search_button"):
save_search(request, query=request.POST['search'])
return redirect('search')
if request.POST.get("advanced_search_button"):
save_search(request, query="")
return redirect('search')
if request.POST.get("promo_remove_button"):
order.promotion = Promotion(code="SYSTEM", start_date=datetime.date.today(), end_date=datetime.date.today(), percentage=0)
order.total = order.orig_total
order.save()
context = get_context()
return render(request, 'bookstore/payment.html', context)
if request.POST.get("promo_button"):
if request.POST["promo_code"] == "":
context['promo_none_flag'] = True
return render(request, 'bookstore/payment.html', context)
promo = Promotion.objects.filter(code=request.POST['promo_code'])
if len(promo) == 0:
context['promo_invalid_flag'] = True
return render(request, 'bookstore/payment.html', context)
completed_orders = Order.objects.filter(user=request.user, status="Confirmed")
orderItems = []
for c_orders in completed_orders:
orderItems.append(OrderItem.objects.filter(order=c_orders)[0])
for orderItem in orderItems:
used_promo = orderItem.order.promotion.code
if used_promo == request.POST['promo_code']:
context['promo_flag'] = True
return render(request, 'bookstore/payment.html', context)
promo = promo[0]
if promo.start_date > datetime.date.today():
context['promo_start_flag'] = True
return render(request, 'bookstore/payment.html', context)
if promo.end_date < datetime.date.today():
context['promo_end_flag'] = True
return render(request, 'bookstore/payment.html', context)
if order.promotion.code != "SYSTEM":
order.total = order.orig_total
order.promotion = promo
order.total = order.total * Decimal(100-int(promo.percentage)) / Decimal(100)
order.total = round(order.total, 2)
order.save()
context = get_context()
return render(request, 'bookstore/payment.html', context)
card_option = request.POST['card_option']
if card_option == "Select":
card_name = request.POST['card_name']
card_num = request.POST['card_num']
card_exp = f"{request.POST.get('card_month')}/{request.POST.get('card_year')}"
card_cvv = request.POST['card_cvv']
card_four = request.POST['card_num'][-4:]
if card_exp == "None/None":
context['card_missing_field_flag'] = True
return render(request, 'bookstore/payment.html', context)
user = request.user
if "save_card" in request.POST.getlist("checks[]"):
if user.card_count == 3:
context['card_overflow_flag'] = True
return render(request, 'bookstore/payment.html', context)
user.card_count += 1
if user.card_four1 == "":
user.card_name1 = card_name
user.card_num1 = card_num
user.card_exp1 = card_exp
user.card_cvv1 = card_cvv
user.card_four1 = card_four
elif user.card_four2 == "":
user.card_name2 = card_name
user.card_num2 = card_num
user.card_exp2 = card_exp
user.card_cvv2 = card_cvv
user.card_four2 = card_four
elif user.card_four3 == "":
user.card_name3 = card_name
user.card_num3 = card_num
user.card_exp3 = card_exp
user.card_cvv3 = card_cvv
user.card_four3 = card_four
user.save()
mail_subject = "Changes made to your genlib account"
mail_message = f"Card ending in {card_four} has been added to your account"
EMAIL = EmailMessage(mail_subject, mail_message, to=[request.user.email])
EMAIL.send()
else:
if request.user.card_four1 == card_option:
card_name = request.user.card_name1
card_num = request.user.card_num1
card_exp = request.user.card_exp1
card_cvv = request.user.card_cvv1
card_four = request.user.card_four1
elif request.user.card_four2 == card_option:
card_name = request.user.card_name2
card_num = request.user.card_num2
card_exp = request.user.card_exp2
card_cvv = request.user.card_cvv2
card_four = request.user.card_four2
elif request.user.card_four3 == card_option:
card_name = request.user.card_name3
card_num = request.user.card_num3
card_exp = request.user.card_exp3
card_cvv = request.user.card_cvv3
card_four = request.user.card_four3
order.card_name = card_name
order.card_num = card_num
order.card_exp = card_exp
order.card_cvv = card_cvv
order.card_four = card_four
order.save()
return redirect('finalplaceorder')
else:
return render(request, 'bookstore/payment.html', context)
def finalplaceorder(request):
order = Order.objects.filter(user=request.user, status="Incomplete")[0]
def get_context():
order = Order.objects.filter(user=request.user, status="Incomplete")[0]
books = CartItem.objects.filter(user=request.user)
prices = []
for book in books:
price = int(book.quantity)*float(book.book.cost)
price = f"{price:.2f}"
prices.append(price)
total_cost = order.total
discount = Decimal(int(order.promotion.percentage))*order.orig_total / Decimal(100)
discount = f"{discount:.2f}"
tax = order.total * Decimal(0.1)
total = order.total + Decimal(5) + tax
tax = round(tax, 2)
total = round(total, 2)
context = {
'cartCount': getCartCount(request),
'books_in_cart': zip(books, prices),
'total_cost': total_cost,
'promo_code_name': | |
ActionType protobuf.
path_prefix: Prefix to add to the path when reporting errors.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If the data doesn't have the expected type.
"""
if check_spec_class:
ProtobufValidator._check_spec_proto_class(
data, spec, action_pb2.ActionType, path_prefix)
ProtobufValidator._check_matching_oneof(
data, 'action', 'action', spec, 'action_types', path_prefix)
@staticmethod
def _check_position(data, spec, path_prefix, check_spec_class):
"""Check that a Position proto conforms to the provided spec proto.
Args:
data: Position protobuf.
spec: PositionType protobuf.
path_prefix: Prefix to add to the path when reporting errors.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If the data doesn't have the expected type.
"""
if check_spec_class:
ProtobufValidator._check_spec_proto_class(
data, spec, primitives_pb2.PositionType, path_prefix)
@staticmethod
def _check_rotation(data, spec, path_prefix, check_spec_class):
"""Check that a Rotation proto conforms to the provided spec proto.
Args:
data: Rotation protobuf.
spec: RotationType protobuf.
path_prefix: Prefix to add to the path when reporting errors.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If the data doesn't have the expected type.
"""
if check_spec_class:
ProtobufValidator._check_spec_proto_class(
data, spec, primitives_pb2.RotationType, path_prefix)
@staticmethod
def _check_action_data(data, spec, path_prefix, check_spec_class):
"""Check that an ActionData proto conforms to the provided spec proto.
Args:
data: ActionData protobuf.
spec: ActionSpec protobuf.
path_prefix: Prefix to add to the path when reporting errors.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If the data has a unexpected number of actions or the action
source is unknown.
"""
if check_spec_class:
ProtobufValidator._check_spec_proto_class(
data, spec, action_pb2.ActionSpec, path_prefix)
if data.source == action_pb2.ActionData.SOURCE_UNKNOWN:
raise TypingError(f'{path_prefix} action data\'s source is unknown.')
ProtobufValidator._check_repeated_count_matches(
data.actions, 'actions', 'actions', spec.actions, 'actions',
path_prefix)
@staticmethod
def _check_joystick(data, spec, path_prefix, check_spec_class):
"""Check that a Joystick proto conforms to the provided spec proto.
Args:
data: Joystick protobuf.
spec: JoystickType protobuf.
path_prefix: Prefix to add to the path when reporting errors.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If an axis value is out of range.
"""
if check_spec_class:
ProtobufValidator._check_spec_proto_class(
data, spec, action_pb2.JoystickType, path_prefix)
def check_axis(value, axis):
"""Check that a joystick axis value is within range.
Args:
value: Value to check.
axis: Name of the axis.
Raises:
TypingError: If an axis value is out of range.
"""
if value < -1.0 or value > 1.0:
raise TypingError(f'{path_prefix} joystick {axis} value {value} is out '
'of range [-1.0, 1.0].')
check_axis(data.x_axis, 'x_axis')
check_axis(data.y_axis, 'y_axis')
@staticmethod
def _check_observation(data, spec, path_prefix, check_spec_class):
"""Check that an Observation proto conforms to the provided spec proto.
Args:
data: Observation protobuf.
spec: ObservationSpec protobuf.
path_prefix: Prefix to add to the path when reporting errors.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If the data contains an unexpected number of entities.
"""
if check_spec_class:
ProtobufValidator._check_spec_proto_class(
data, spec, observation_pb2.ObservationSpec, path_prefix)
ProtobufValidator._check_optional_fields(
data, OBSERVATION_OPTIONAL_ENTITIES, spec, path_prefix)
ProtobufValidator._check_repeated_count_matches(
data.global_entities, 'global_entities', 'observations',
spec.global_entities, 'global_entities', path_prefix)
@staticmethod
def check_data(data, spec, path_prefix, check_spec_class):
"""Check that a data proto conforms to the provided spec proto.
To keep traversal of the data and spec protos separate from validation,
this method does not recursively validate the provided data proto. To
traverse and validate a data proto use ProtobufNode.data_to_proto_nest().
Args:
data: Data protobuf.
spec: Spec protobuf.
path_prefix: String path to the protobuf.
check_spec_class: Whether this method should check the spec proto class.
Raises:
TypingError: If the data protobuf does not conform the spec protobuf or
the data proto is not supported.
"""
if not ProtobufValidator._DATA_PROTO_CLASS_TO_VALIDATOR:
ProtobufValidator._DATA_PROTO_CLASS_TO_VALIDATOR = {
action_pb2.ActionData: ProtobufValidator._check_action_data,
action_pb2.Action: ProtobufValidator._check_action,
action_pb2.Joystick: ProtobufValidator._check_joystick,
observation_pb2.EntityField: ProtobufValidator._check_entity_field,
observation_pb2.Entity: ProtobufValidator._check_entity,
observation_pb2.Feeler: ProtobufValidator._check_feeler,
observation_pb2.ObservationData: ProtobufValidator._check_observation,
primitives_pb2.Category: ProtobufValidator._check_category,
primitives_pb2.Number: ProtobufValidator._check_number,
primitives_pb2.Position: ProtobufValidator._check_position,
primitives_pb2.Rotation: ProtobufValidator._check_rotation,
}
validator = (
ProtobufValidator._DATA_PROTO_CLASS_TO_VALIDATOR.get(type(data)))
if not validator:
raise TypingError(f'Validator not found for {type(data).__qualname__} at '
f'"{path_prefix}".')
validator(data, spec, path_prefix, check_spec_class)
class ProtobufDataValidationOptions:
"""Options that control validation of data protos against spec protos."""
def __init__(self, check_feeler_data_with_spec=True):
"""Initialize options.
Args:
check_feeler_data_with_spec: Whether to check each feeler proto against
the spec. This operation is very expensive with large numbers of
feelers. If the data has already been validated it is preferable to
disable feeler validation.
"""
self.check_feeler_data_with_spec = check_feeler_data_with_spec
class ProtobufNode:
"""Class that references a node in a protobuf.
Attributes:
proto: Referenced protobuf instance.
"""
# Lazily initialized by _from_spec().
_SPEC_PROTOCLASS_TO_PARSER = None
def __init__(self, name, proto, proto_field_name):
"""Initialize this instance.
Args:
name: Human readable name of the node.
proto: Protobuf instance.
proto_field_name: Name of this field in the parent proto.
"""
self._proto = proto
self._name = name
self._proto_field_name = proto_field_name
self._children_by_proto_field_name = collections.OrderedDict()
self._parent = None
self._update_path()
@property
def children(self):
"""Get the sequence of children of this node."""
return tuple(self._children_by_proto_field_name.values())
@property
def proto(self):
"""Get the protobuf owned by this node."""
return self._proto
@property
def proto_field_name(self):
"""Get the name of this field in the parent proto."""
return self._proto_field_name
def child_by_proto_field_name(self, proto_field_name):
"""Get a child node by proto_field_name.
Args:
proto_field_name: Name of the child field.
Returns:
ProtobufNode instance if found, None otherwise.
"""
return self._children_by_proto_field_name.get(proto_field_name)
@property
def parent(self):
"""Get the parent node of this node."""
return self._parent
@property
def name(self):
"""Name of this node."""
return self._name
@property
def path(self):
"""Human readable path of this node relative to its' ancestors."""
return self._path
def _update_path(self):
"""Update the human readable path of this node relative to ancestors."""
parent = self.parent
if parent:
parent_path = parent._path # pylint: disable=protected-access
self._path = '/'.join([parent_path, self.name])
else:
self._path = self.name
for child in self.children:
child._update_path() # pylint: disable=protected-access
def add_children(self, children):
"""Add children node to this instance.
Args:
children: Sequence of ProtobufNode instances to add as children of this
node.
Returns:
Reference to this node.
"""
for child in children:
assert child.proto_field_name not in self._children_by_proto_field_name
# pylint: disable=protected-access
child._remove_from_parent(update_path=False)
child._parent = self # pylint: disable=protected-access
self._children_by_proto_field_name[child.proto_field_name] = child
child._update_path() # pylint: disable=protected-access
return self
def remove_from_parent(self):
"""Remove this node from its parent."""
self._remove_from_parent()
def _remove_from_parent(self, update_path=True):
"""Remove this node from its parent.
Args:
update_path: Whether to update this node's cached path.
"""
if self._parent:
# pylint: disable=protected-access
del self._parent._children_by_proto_field_name[self.proto_field_name]
self._parent = None
if update_path:
self._update_path()
def __eq__(self, other):
"""Compare for equality with another ProtobufNode instance.
Args:
other: ProtobufNode instance to compare with.
Returns:
True if they're equivalent, False otherwise.
"""
if not (other and issubclass(type(other), type(self))):
return False
if self.name != other.name:
return False
if self.proto_field_name != other.proto_field_name:
return False
if self.proto != other.proto:
return False
if len(self.children) != len(other.children):
return False
for this_child, other_child in zip(self.children, other.children):
if this_child == other_child:
continue
return False
return True
def __ne__(self, other):
"""Compare for inequality with another ProtobufNode instance.
Args:
other: ProtobufNode instance to compare with.
Returns:
True if they're not equivalent, False otherwise.
"""
return not self.__eq__(other)
def __str__(self):
"""Construct a string representation.
Returns:
String representation of this instance.
"""
children_string = ', '.join([str(child) for child in self.children])
return (f'{self.name}: '
f'(proto=({type(self.proto).__qualname__}, '
f'{self.proto_field_name}: {self.proto}), '
f'children=[{children_string}])')
def as_nest(self, include_self=True):
"""Generate a nested dictionary from this node.
Args:
include_self: Whether to include this node in the returned nest.
Returns:
Nested dictionary with leaf values referencing ProtobufNode instances that
correspond to leaves in this tree.
"""
children = self.children
if children:
child_nest = {}
nest = {self.name: child_nest}
for child in self.children:
child_nest[child.name] = child.as_nest(include_self=False)
else:
nest = {self.name: self}
return nest if include_self else nest[self.name]
@staticmethod
def _infer_path_components_from_spec(spec, name, parent_path):
"""Infer path components from a spec proto.
Args:
spec: Spec proto to query.
name: Override for the proto name.
parent_path: String path to the proto.
Returns:
(name, parent_path, path) where:
* name is the overridden name or the name derived from the spec proto.
* parent_path is the supplied parent_path or an empty string if it was
None.
* path is the constructed path to the proto
"""
parent_path = parent_path if parent_path else ''
name = name if name else _get_proto_name(spec)
path = _concat_path(parent_path, name)
return (name, parent_path, path)
@staticmethod
def _from_leaf_spec(spec, name, unused_parent_path, proto_field_name):
"""Parse a leaf spec protobuf into a ProtobufNode instance.
Args:
spec: Protobuf to wrap in a ProtobufNode instance.
name: Name of the node.
unused_parent_path: Ignored.
| |
import gzip
import importlib
import json
import logging
import sys
import time
import unittest
import zlib
import six
if six.PY3:
from unittest import mock
else:
import mock
from engineio import exceptions
from engineio import packet
from engineio import payload
from engineio import server
import pytest
original_import_module = importlib.import_module
def _mock_import(module, *args, **kwargs):
if module.startswith('engineio.'):
return original_import_module(module, *args, **kwargs)
return module
class TestServer(unittest.TestCase):
_mock_async = mock.MagicMock()
_mock_async._async = {
'thread': 't',
'queue': 'q',
'queue_empty': RuntimeError,
'websocket': 'w',
}
def _get_mock_socket(self):
mock_socket = mock.MagicMock()
mock_socket.closed = False
mock_socket.closing = False
mock_socket.upgraded = False
mock_socket.session = {}
return mock_socket
@classmethod
def setUpClass(cls):
server.Server._default_monitor_clients = False
@classmethod
def tearDownClass(cls):
server.Server._default_monitor_clients = True
def setUp(self):
logging.getLogger('engineio').setLevel(logging.NOTSET)
def tearDown(self):
# restore JSON encoder, in case a test changed it
packet.Packet.json = json
def test_is_asyncio_based(self):
s = server.Server()
assert not s.is_asyncio_based()
def test_async_modes(self):
s = server.Server()
assert s.async_modes() == [
'eventlet',
'gevent_uwsgi',
'gevent',
'threading',
]
def test_create(self):
kwargs = {
'ping_timeout': 1,
'ping_interval': 2,
'max_http_buffer_size': 3,
'allow_upgrades': False,
'http_compression': False,
'compression_threshold': 4,
'cookie': 'foo',
'cors_allowed_origins': ['foo', 'bar', 'baz'],
'cors_credentials': False,
'async_handlers': False,
}
s = server.Server(**kwargs)
for arg in six.iterkeys(kwargs):
assert getattr(s, arg) == kwargs[arg]
assert s.ping_interval_grace_period == 5
def test_create_with_grace_period(self):
s = server.Server(ping_interval=(1, 2))
assert s.ping_interval == 1
assert s.ping_interval_grace_period == 2
def test_create_ignores_kwargs(self):
server.Server(foo='bar') # this should not raise
def test_async_mode_threading(self):
s = server.Server(async_mode='threading')
assert s.async_mode == 'threading'
import threading
try:
import queue
except ImportError:
import Queue as queue
assert s._async['thread'] == threading.Thread
assert s._async['queue'] == queue.Queue
assert s._async['websocket'] is None
def test_async_mode_eventlet(self):
s = server.Server(async_mode='eventlet')
assert s.async_mode == 'eventlet'
from eventlet.green import threading
from eventlet import queue
from engineio.async_drivers import eventlet as async_eventlet
assert s._async['thread'] == threading.Thread
assert s._async['queue'] == queue.Queue
assert s._async['websocket'] == async_eventlet.WebSocketWSGI
@mock.patch('importlib.import_module', side_effect=_mock_import)
def test_async_mode_gevent_uwsgi(self, import_module):
sys.modules['gevent'] = mock.MagicMock()
sys.modules['gevent'].queue = mock.MagicMock()
sys.modules['gevent.queue'] = sys.modules['gevent'].queue
sys.modules['gevent.queue'].JoinableQueue = 'foo'
sys.modules['gevent.queue'].Empty = RuntimeError
sys.modules['gevent.event'] = mock.MagicMock()
sys.modules['gevent.event'].Event = 'bar'
sys.modules['uwsgi'] = mock.MagicMock()
s = server.Server(async_mode='gevent_uwsgi')
assert s.async_mode == 'gevent_uwsgi'
from engineio.async_drivers import gevent_uwsgi as async_gevent_uwsgi
assert s._async['thread'] == async_gevent_uwsgi.Thread
assert s._async['queue'] == 'foo'
assert s._async['queue_empty'] == RuntimeError
assert s._async['event'] == 'bar'
assert s._async['websocket'] == async_gevent_uwsgi.uWSGIWebSocket
del sys.modules['gevent']
del sys.modules['gevent.queue']
del sys.modules['gevent.event']
del sys.modules['uwsgi']
del sys.modules['engineio.async_drivers.gevent_uwsgi']
@mock.patch('importlib.import_module', side_effect=_mock_import)
def test_async_mode_gevent_uwsgi_without_uwsgi(self, import_module):
sys.modules['gevent'] = mock.MagicMock()
sys.modules['gevent'].queue = mock.MagicMock()
sys.modules['gevent.queue'] = sys.modules['gevent'].queue
sys.modules['gevent.queue'].JoinableQueue = 'foo'
sys.modules['gevent.queue'].Empty = RuntimeError
sys.modules['gevent.event'] = mock.MagicMock()
sys.modules['gevent.event'].Event = 'bar'
sys.modules['uwsgi'] = None
with pytest.raises(ValueError):
server.Server(async_mode='gevent_uwsgi')
del sys.modules['gevent']
del sys.modules['gevent.queue']
del sys.modules['gevent.event']
del sys.modules['uwsgi']
@mock.patch('importlib.import_module', side_effect=_mock_import)
def test_async_mode_gevent_uwsgi_without_websocket(self, import_module):
sys.modules['gevent'] = mock.MagicMock()
sys.modules['gevent'].queue = mock.MagicMock()
sys.modules['gevent.queue'] = sys.modules['gevent'].queue
sys.modules['gevent.queue'].JoinableQueue = 'foo'
sys.modules['gevent.queue'].Empty = RuntimeError
sys.modules['gevent.event'] = mock.MagicMock()
sys.modules['gevent.event'].Event = 'bar'
sys.modules['uwsgi'] = mock.MagicMock()
del sys.modules['uwsgi'].websocket_handshake
s = server.Server(async_mode='gevent_uwsgi')
assert s.async_mode == 'gevent_uwsgi'
from engineio.async_drivers import gevent_uwsgi as async_gevent_uwsgi
assert s._async['thread'] == async_gevent_uwsgi.Thread
assert s._async['queue'] == 'foo'
assert s._async['queue_empty'] == RuntimeError
assert s._async['event'] == 'bar'
assert s._async['websocket'] is None
del sys.modules['gevent']
del sys.modules['gevent.queue']
del sys.modules['gevent.event']
del sys.modules['uwsgi']
del sys.modules['engineio.async_drivers.gevent_uwsgi']
@mock.patch('importlib.import_module', side_effect=_mock_import)
def test_async_mode_gevent(self, import_module):
sys.modules['gevent'] = mock.MagicMock()
sys.modules['gevent'].queue = mock.MagicMock()
sys.modules['gevent.queue'] = sys.modules['gevent'].queue
sys.modules['gevent.queue'].JoinableQueue = 'foo'
sys.modules['gevent.queue'].Empty = RuntimeError
sys.modules['gevent.event'] = mock.MagicMock()
sys.modules['gevent.event'].Event = 'bar'
sys.modules['geventwebsocket'] = 'geventwebsocket'
s = server.Server(async_mode='gevent')
assert s.async_mode == 'gevent'
from engineio.async_drivers import gevent as async_gevent
assert s._async['thread'] == async_gevent.Thread
assert s._async['queue'] == 'foo'
assert s._async['queue_empty'] == RuntimeError
assert s._async['event'] == 'bar'
assert s._async['websocket'] == async_gevent.WebSocketWSGI
del sys.modules['gevent']
del sys.modules['gevent.queue']
del sys.modules['gevent.event']
del sys.modules['geventwebsocket']
del sys.modules['engineio.async_drivers.gevent']
@mock.patch('importlib.import_module', side_effect=_mock_import)
def test_async_mode_gevent_without_websocket(self, import_module):
sys.modules['gevent'] = mock.MagicMock()
sys.modules['gevent'].queue = mock.MagicMock()
sys.modules['gevent.queue'] = sys.modules['gevent'].queue
sys.modules['gevent.queue'].JoinableQueue = 'foo'
sys.modules['gevent.queue'].Empty = RuntimeError
sys.modules['gevent.event'] = mock.MagicMock()
sys.modules['gevent.event'].Event = 'bar'
sys.modules['geventwebsocket'] = None
s = server.Server(async_mode='gevent')
assert s.async_mode == 'gevent'
from engineio.async_drivers import gevent as async_gevent
assert s._async['thread'] == async_gevent.Thread
assert s._async['queue'] == 'foo'
assert s._async['queue_empty'] == RuntimeError
assert s._async['event'] == 'bar'
assert s._async['websocket'] is None
del sys.modules['gevent']
del sys.modules['gevent.queue']
del sys.modules['gevent.event']
del sys.modules['geventwebsocket']
del sys.modules['engineio.async_drivers.gevent']
@unittest.skipIf(sys.version_info < (3, 5), 'only for Python 3.5+')
@mock.patch('importlib.import_module', side_effect=_mock_import)
def test_async_mode_aiohttp(self, import_module):
sys.modules['aiohttp'] = mock.MagicMock()
with pytest.raises(ValueError):
server.Server(async_mode='aiohttp')
@mock.patch('importlib.import_module', side_effect=[ImportError])
def test_async_mode_invalid(self, import_module):
with pytest.raises(ValueError):
server.Server(async_mode='foo')
@mock.patch('importlib.import_module', side_effect=[_mock_async])
def test_async_mode_auto_eventlet(self, import_module):
s = server.Server()
assert s.async_mode == 'eventlet'
@mock.patch(
'importlib.import_module', side_effect=[ImportError, _mock_async]
)
def test_async_mode_auto_gevent_uwsgi(self, import_module):
s = server.Server()
assert s.async_mode == 'gevent_uwsgi'
@mock.patch(
'importlib.import_module',
side_effect=[ImportError, ImportError, _mock_async],
)
def test_async_mode_auto_gevent(self, import_module):
s = server.Server()
assert s.async_mode == 'gevent'
@mock.patch(
'importlib.import_module',
side_effect=[ImportError, ImportError, ImportError, _mock_async],
)
def test_async_mode_auto_threading(self, import_module):
s = server.Server()
assert s.async_mode == 'threading'
def test_generate_id(self):
s = server.Server()
assert s._generate_id() != s._generate_id()
def test_on_event(self):
s = server.Server()
@s.on('connect')
def foo():
pass
s.on('disconnect', foo)
assert s.handlers['connect'] == foo
assert s.handlers['disconnect'] == foo
def test_on_event_invalid(self):
s = server.Server()
with pytest.raises(ValueError):
s.on('invalid')
def test_trigger_event(self):
s = server.Server()
f = {}
@s.on('connect')
def foo(sid, environ):
return sid + environ
@s.on('message')
def bar(sid, data):
f['bar'] = sid + data
return 'bar'
r = s._trigger_event('connect', 1, 2, run_async=False)
assert r == 3
r = s._trigger_event('message', 3, 4, run_async=True)
r.join()
assert f['bar'] == 7
r = s._trigger_event('message', 5, 6)
assert r == 'bar'
def test_trigger_event_error(self):
s = server.Server()
@s.on('connect')
def foo(sid, environ):
return 1 / 0
@s.on('message')
def bar(sid, data):
return 1 / 0
r = s._trigger_event('connect', 1, 2, run_async=False)
assert not r
r = s._trigger_event('message', 3, 4, run_async=False)
assert r is None
def test_session(self):
s = server.Server()
mock_socket = self._get_mock_socket()
s.sockets['foo'] = mock_socket
with s.session('foo') as session:
assert session == {}
session['username'] = 'bar'
assert s.get_session('foo') == {'username': 'bar'}
def test_close_one_socket(self):
s = server.Server()
mock_socket = self._get_mock_socket()
s.sockets['foo'] = mock_socket
s.disconnect('foo')
assert mock_socket.close.call_count == 1
assert 'foo' not in s.sockets
def test_close_all_sockets(self):
s = server.Server()
mock_sockets = {}
for sid in ['foo', 'bar', 'baz']:
mock_sockets[sid] = self._get_mock_socket()
s.sockets[sid] = mock_sockets[sid]
s.disconnect()
for socket in six.itervalues(mock_sockets):
assert socket.close.call_count == 1
assert s.sockets == {}
def test_upgrades(self):
s = server.Server()
s.sockets['foo'] = self._get_mock_socket()
assert s._upgrades('foo', 'polling') == ['websocket']
assert s._upgrades('foo', 'websocket') == []
s.sockets['foo'].upgraded = True
assert s._upgrades('foo', 'polling') == []
assert s._upgrades('foo', 'websocket') == []
s.allow_upgrades = False
s.sockets['foo'].upgraded = True
assert s._upgrades('foo', 'polling') == []
assert s._upgrades('foo', 'websocket') == []
def test_transport(self):
s = server.Server()
s.sockets['foo'] = self._get_mock_socket()
s.sockets['foo'].upgraded = False
s.sockets['bar'] = self._get_mock_socket()
s.sockets['bar'].upgraded = True
assert s.transport('foo') == 'polling'
assert s.transport('bar') == 'websocket'
def test_bad_session(self):
s = server.Server()
s.sockets['foo'] = 'client'
with pytest.raises(KeyError):
s._get_socket('bar')
def test_closed_socket(self):
s = server.Server()
s.sockets['foo'] = self._get_mock_socket()
s.sockets['foo'].closed = True
with pytest.raises(KeyError):
s._get_socket('foo')
def test_jsonp_with_bad_index(self):
s = server.Server()
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'EIO=3&j=abc'}
start_response = mock.MagicMock()
s.handle_request(environ, start_response)
assert start_response.call_args[0][0] == '400 BAD REQUEST'
def test_jsonp_index(self):
s = server.Server()
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'EIO=3&j=233'}
start_response = mock.MagicMock()
r = s.handle_request(environ, start_response)
assert start_response.call_args[0][0] == '200 OK'
assert r[0].startswith(b'___eio[233]("')
assert r[0].endswith(b'");')
def test_connect(self):
s = server.Server()
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'EIO=3'}
start_response = mock.MagicMock()
r = s.handle_request(environ, start_response)
assert len(s.sockets) == 1
assert start_response.call_count == 1
assert start_response.call_args[0][0] == '200 OK'
assert (
'Content-Type',
'application/octet-stream',
) in start_response.call_args[0][1]
assert len(r) == 1
packets = payload.Payload(encoded_payload=r[0]).packets
assert len(packets) == 1
assert packets[0].packet_type == packet.OPEN
assert 'upgrades' in packets[0].data
assert packets[0].data['upgrades'] == ['websocket']
assert 'sid' in packets[0].data
def test_connect_no_upgrades(self):
s = server.Server(allow_upgrades=False)
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'EIO=3'}
start_response = mock.MagicMock()
r = s.handle_request(environ, start_response)
packets = payload.Payload(encoded_payload=r[0]).packets
assert packets[0].data['upgrades'] == []
def test_connect_bad_eio_version(self):
s = server.Server()
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'EIO=5'}
start_response = mock.MagicMock()
r = s.handle_request(environ, start_response)
assert start_response.call_args[0][0], '400 BAD REQUEST'
assert b'unsupported version' in r[0]
def test_connect_b64_with_1(self):
s = server.Server(allow_upgrades=False)
s._generate_id = mock.MagicMock(return_value='1')
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'EIO=3&b64=1'}
start_response = mock.MagicMock()
s.handle_request(environ, start_response)
assert start_response.call_args[0][0], '200 OK'
assert (
'Content-Type',
'text/plain; charset=UTF-8',
) in start_response.call_args[0][1]
s.send('1', b'\x00\x01\x02', binary=True)
environ = {'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'EIO=3&sid=1&b64=1'}
r = s.handle_request(environ, start_response)
assert r[0] == b'6:b4AAEC'
def test_connect_b64_with_true(self):
s = server.Server(allow_upgrades=False)
s._generate_id = mock.MagicMock(return_value='1')
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'EIO=3&b64=true'}
start_response = mock.MagicMock()
s.handle_request(environ, start_response)
assert start_response.call_args[0][0], '200 OK'
assert (
'Content-Type',
'text/plain; charset=UTF-8',
) in start_response.call_args[0][1]
s.send('1', b'\x00\x01\x02', binary=True)
environ = {'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'EIO=3&sid=1&b64=true'}
r = s.handle_request(environ, start_response)
assert r[0] == b'6:b4AAEC'
def test_connect_b64_with_0(self):
s = server.Server(allow_upgrades=False)
s._generate_id = mock.MagicMock(return_value='1')
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'EIO=3&b64=0'}
start_response = mock.MagicMock()
s.handle_request(environ, start_response)
assert start_response.call_args[0][0], '200 OK'
assert (
'Content-Type',
| |
ttimer('calculate new solution',timeit) as t2:
for row in self.stackrows:
self.pronew2d(values, outvalues, row , alfa )
self.solvenew2d(values, outvalues, row , alfa )
self.epinew2d(values, outvalues, row , alfa )
ittotal += 1
with ttimer('extract new solution',timeit) as t2:
now = outvalues[self.stackrowindex,self.stackcolindex]
distance = now-before
newton_conv =np.abs(distance).sum()
if not silent:print(f'Iteration {iteration} Sum of distances {newton_conv:>{15},.{6}f}')
if newton_conv <= 0.001 :
convergence = True
break
if iteration != 0 and nonlin and not (iteration % nonlin) or iteration <= nonlinfirst :
with ttimer('Updating solver',timeit) as t3:
if not silent :print(f'Updating solver, iteration {iteration}')
df_now = pd.DataFrame(values,index=databank.index,columns=databank.columns)
self.stacksolver = self.getsolver(df=df_now)
diffcount += 1
with ttimer('Update solution',timeit):
# update = self.solveinv(distance)
update = self.stacksolver(distance)
damp = newtonalfa if iteration <= newtonnodamp else 1.0
values[self.stackrowindex,self.stackcolindex] = before - damp * update
if ldumpvar:
self.dumplist.append([fairiteration,self.periode, int(iteration+1)]+[values[row,p]
for p in dumpplac])
# if iteration > first_test:
# itafter=[values[row,c] for c in convplace]
# convergence = True
# for after,before in zip(itafter,itbefore):
## print(before,after)
# if before > absconv and abs(after-before)/abs(before) > relconv:
# convergence = False
# break
# if convergence:
# break
# else:
# itbefore=itafter
# self.epistack2d(values, values, row , alfa )
if not silent:
if not convergence :
print(f'Not converged in {iteration} iterations')
else:
print(f'Solved in {iteration} iterations')
if ldumpvar:
self.dumpdf= pd.DataFrame(self.dumplist)
del self.dumplist
self.dumpdf.columns= ['fair','per','iteration']+self.dump
if fairantal<=2 : self.dumpdf.drop('fair',axis=1,inplace=True)
outdf = pd.DataFrame(values,index=databank.index,columns=databank.columns)
if stats:
numberfloats = self.calculate_freq[-1][1]*ittotal
endtime = time.time()
self.simtime = endtime-starttime
self.setuptime = endtimesetup - starttimesetup
print(f'Setup time (seconds) :{self.setuptime:>15,.4f}')
print(f'Total model evaluations :{ittotal:>15,}')
print(f'Number of solver update :{diffcount:>15,}')
print(f'Simulation time (seconds) :{self.simtime:>15,.4f}')
if self.simtime > 0.0:
print(f'Floating point operations per second : {numberfloats/self.simtime:>15,.1f}')
if not silent : print (self.name + ' solved ')
return outdf
def newton1per_un_normalized(self, databank, start='', slut='', silent=1,samedata=0,alfa=1.0,stats=False,first_test=1,
antal=20,conv=[],absconv=0.01,relconv=0.00001, nonlin=False ,timeit = False,reset=1,
dumpvar=[],ldumpvar=False,dumpwith=15,dumpdecimal=5,chunk=None,ljit=False,
fairopt={'fairantal':1},
newtonalfa = 1.0 , newtonnodamp=0,**kwargs):
'''Evaluates this model on a databank from start to slut (means end in Danish).
First it finds the values in the Dataframe, then creates the evaluater function through the *outeval* function
(:func:`modelclass.model.fouteval`)
then it evaluates the function and returns the values to a the Dataframe in the databank.
The text for the evaluater function is placed in the model property **make_los_text**
where it can be inspected
in case of problems.
'''
# print('new nwwton')
starttimesetup=time.time()
fairantal = {**fairopt,**kwargs}.get('fairantal',1)
sol_periode = self.smpl(start,slut,databank)
if self.maxlag and not (self.current_per[0]+self.maxlag) in databank.index :
print('***** Warning: You are solving the model before all lags are avaiable')
print('Maxlag:',self.maxlag,'First solveperiod:',self.current_per[0],'First dataframe index',databank.index[0])
sys.exit()
if not silent : print ('Will start calculating: ' + self.name)
# if not samedata or not hasattr(self,'new2d') :
# if (not hasattr(self,'solvenew2d')) or (not self.eqcolumns(self.genrcolumns,databank.columns)):
# databank=insertModelVar(databank,self) # fill all Missing value with 0.0
# for i in [j for j in self.allvar.keys() if self.allvar[j]['matrix']]:
# databank.loc[:,i]=databank.loc[:,i].astype('O') # Make sure columns with matrixes are of this type
#
# self.make_new_text2d = self.outsolve2dcunk(databank,chunk=chunk,
# ljit=ljit, debug=kwargs.get('debug',1),type='res')
# exec(self.make_new_text2d,globals()) # creates the los function
# self.pronew2d,self.solvenew2d,self.epinew2d = make_los(self.funks,self.errfunk)
if not self.eqcolumns(self.genrcolumns,databank.columns):
databank=insertModelVar(databank,self) # fill all Missing value with 0.0
for i in [j for j in self.allvar.keys() if self.allvar[j]['matrix']]:
databank.loc[:,i]=databank.loc[:,i].astype('O') # Make sure columns with matrixes are of this type
newdata = True
else:
newdata = False
if ljit:
if newdata or not hasattr(self,'pronew2d_jit'):
if not silent: print(f'Create compiled solving function for {self.name}')
self.make_newlos_text2d_jit = self.outsolve2dcunk(databank,chunk=chunk,ljit=ljit, debug=kwargs.get('debug',1),type='res')
exec(self.make_newlos_text2d_jit,globals()) # creates the los function
self.pronew2d_jit,self.solvenew2d_jit,self.epinew2d_jit = make_los(self.funks,self.errfunk)
self.pronew2d,self.solvenew2d,self.epinew2d = self.pronew2d_jit,self.solvenew2d_jit,self.epinew2d_jit
else:
if newdata or not hasattr(self,'pronew2d_nojit'):
if not silent: print(f'Create solving function for {self.name}')
self.make_newlos_text2d_nojit = self.outsolve2dcunk(databank,chunk=chunk,ljit=ljit, debug=kwargs.get('debug',1),type='res')
exec(self.make_newlos_text2d_nojit,globals()) # creates the los function
self.pronew2d_nojit,self.solvenew2d_nojit,self.epinew2d_nojit = make_los(self.funks,self.errfunk)
self.pronew2d,self.solvenew2d,self.epinew2d = self.pronew2d_nojit,self.solvenew2d_nojit,self.epinew2d_nojit
values = databank.values.copy()
outvalues = np.empty_like(values)#
if not hasattr(self,'newton_diff'):
endovar = self.coreorder if self.use_preorder else self.solveorder
self.newton_diff = newton_diff(self,forcenum=1,df=databank,
endovar = endovar, ljit=ljit,nchunk=chunk,onlyendocur=True )
if not hasattr(self,'solver') or reset:
# breakpoint()
self.solver = self.newton_diff.get_solve1per(df=databank,periode=[self.current_per[0]])[self.current_per[0]]
newton_col = [databank.columns.get_loc(c) for c in self.newton_diff.endovar]
newton_col_endo = [databank.columns.get_loc(c) for c in self.newton_diff.declared_endo_list]
self.genrcolumns = databank.columns.copy()
self.genrindex = databank.index.copy()
convvar = [conv.upper()] if isinstance(conv,str) else [c.upper() for c in conv] if conv != [] else list(self.endogene)
convplace=[databank.columns.get_loc(c) for c in convvar] # this is how convergence is measured
convergence = True
if ldumpvar:
self.dumplist = []
self.dump = convvar if dumpvar == [] else [v for v in self.vlist(dumpvar) if v in self.endogene]
dumpplac = [databank.columns.get_loc(v) for v in self.dump]
ittotal = 0
endtimesetup=time.time()
starttime=time.time()
for fairiteration in range(fairantal):
if fairantal >=2:
print(f'Fair-Taylor iteration: {fairiteration}')
for self.periode in sol_periode:
row=databank.index.get_loc(self.periode)
if ldumpvar:
self.dumplist.append([fairiteration,self.periode,int(0)]+[values[row,p]
for p in dumpplac])
itbefore = [values[row,c] for c in convplace]
self.pronew2d(values, values, row , alfa )
for iteration in range(antal):
with ttimer(f'sim per:{self.periode} it:{iteration}',0) as xxtt:
before = values[row,newton_col_endo]
self.solvenew2d(values, outvalues, row , alfa )
now = outvalues[row,newton_col]
distance = now-0.0
newton_conv =np.abs(distance).sum()
if newton_conv <= 0.000000001 :
# print(f'Iteration {iteration} sum of distances {newton_conv}')
break
if iteration != 0 and nonlin and not (iteration % nonlin):
with ttimer('Updating solver',timeit) as t3:
if not silent :print(f'Updating solver, iteration {iteration}')
df_now = pd.DataFrame(values,index=databank.index,columns=databank.columns)
self.solver = self.newton_diff.get_solve1per(df=df_now,periode=[self.periode])[self.periode]
#breakpoint()
with ttimer('Update solution',0):
# update = self.solveinv(distance)
update = self.solver(distance)
# breakpoint()
damp = newtonalfa if iteration <= newtonnodamp else 1.0
values[row,newton_col_endo] = before - damp*update
ittotal += 1
if ldumpvar:
self.dumplist.append([fairiteration,self.periode, int(iteration+1)]+[values[row,p]
for p in dumpplac])
# if iteration > first_test:
# itafter=[values[row,c] for c in convplace]
# convergence = True
# for after,before in zip(itafter,itbefore):
## print(before,after)
# if before > absconv and abs(after-before)/abs(before) > relconv:
# convergence = False
# break
# if convergence:
# break
# else:
# itbefore=itafter
self.epinew2d(values, values, row , alfa )
if not silent:
if not convergence :
print(f'{self.periode} not converged in {iteration} iterations')
else:
print(f'{self.periode} Solved in {iteration} iterations')
if ldumpvar:
self.dumpdf= pd.DataFrame(self.dumplist)
del self.dumplist
self.dumpdf.columns= ['fair','per','iteration']+self.dump
if fairantal<=2 : self.dumpdf.drop('fair',axis=1,inplace=True)
outdf = pd.DataFrame(values,index=databank.index,columns=databank.columns)
if stats:
numberfloats = self.calculate_freq[-1][1]*ittotal
endtime = time.time()
self.simtime = endtime-starttime
self.setuptime = endtimesetup - starttimesetup
print(f'Setup time (seconds) :{self.setuptime:>15,.2f}')
print(f'Foating point operations :{self.calculate_freq[-1][1]:>15,}')
print(f'Total iterations :{ittotal:>15,}')
print(f'Total floating point operations :{numberfloats:>15,}')
print(f'Simulation time (seconds) :{self.simtime:>15,.2f}')
if self.simtime > 0.0:
print(f'Floating point operations per second : {numberfloats/self.simtime:>15,.1f}')
if not silent : print (self.name + ' solved ')
return outdf
def newtonstack_un_normalized(self, databank, start='', slut='', silent=1,samedata=0,alfa=1.0,stats=False,first_test=1,
antal=20,conv=[],absconv=0.01,relconv=0.00001,
dumpvar=[],ldumpvar=False,dumpwith=15,dumpdecimal=5,chunk=None,nchunk=None,ljit=False,nljit=0,
fairopt={'fairantal':1},debug=False,timeit=False,nonlin=False,
newtonalfa = 1.0 , newtonnodamp=0,forcenum=True,reset = False, **kwargs):
'''Evaluates this model on a databank from start to slut (means end in Danish).
First it finds the values in the Dataframe, then creates the evaluater function through the *outeval* function
(:func:`modelclass.model.fouteval`)
then it evaluates the function and returns the values to a the Dataframe in the databank.
The text for the evaluater function is placed in the model property **make_los_text**
where it can be inspected
in case of problems.
'''
# print('new nwwton')
ittotal = 0
diffcount = 0
starttimesetup=time.time()
fairantal = {**fairopt,**kwargs}.get('fairantal',1)
sol_periode = self.smpl(start,slut,databank)
if self.maxlag and not (self.current_per[0]+self.maxlag) in databank.index :
print('***** Warning: You are solving the model before all lags are avaiable')
print('Maxlag:',self.maxlag,'First solveperiod:',self.current_per[0],'First dataframe index',databank.index[0])
sys.exit()
if not silent : print ('Will start calculating: ' + self.name)
# if not samedata or not hasattr(self,'solve2d') :
# if (not hasattr(self,'solvestack2d')) or (not self.eqcolumns(self.genrcolumns,databank.columns)):
# databank=insertModelVar(databank,self) # fill all Missing value with 0.0
# for i in [j for j in self.allvar.keys() if self.allvar[j]['matrix']]:
# databank.loc[:,i]=databank.loc[:,i].astype('O') # Make sure columns with matrixes are of this type
#
# self.make_losstack_text2d = self.outsolve2dcunk(databank,chunk=chunk,
# ljit=ljit, debug=debug,type='res')
# exec(self.make_losstack_text2d,globals()) # creates the los function
# self.prostack2d,self.solvestack2d,self.epistack2d = make_los(self.funks,self.errfunk)
if not self.eqcolumns(self.genrcolumns,databank.columns):
databank=insertModelVar(databank,self) # fill all Missing value with 0.0
for i in [j for j in self.allvar.keys() | |
events notifications.
:param pulumi.Input[bool] confidential_issues_events: Enable notifications for confidential issues events.
:param pulumi.Input[bool] confidential_note_events: Enable notifications for confidential note events.
:param pulumi.Input[str] issue_channel: The name of the channel to receive issue events notifications.
:param pulumi.Input[bool] issues_events: Enable notifications for issues events.
:param pulumi.Input[str] merge_request_channel: The name of the channel to receive merge request events notifications.
:param pulumi.Input[bool] merge_requests_events: Enable notifications for merge requests events.
:param pulumi.Input[str] note_channel: The name of the channel to receive note events notifications.
:param pulumi.Input[bool] note_events: Enable notifications for note events.
:param pulumi.Input[bool] notify_only_broken_pipelines: Send notifications for broken pipelines.
:param pulumi.Input[bool] notify_only_default_branch: DEPRECATED: This parameter has been replaced with `branches_to_be_notified`.
:param pulumi.Input[str] pipeline_channel: The name of the channel to receive pipeline events notifications.
:param pulumi.Input[bool] pipeline_events: Enable notifications for pipeline events.
:param pulumi.Input[str] project: ID of the project you want to activate integration on.
:param pulumi.Input[str] push_channel: The name of the channel to receive push events notifications.
:param pulumi.Input[bool] push_events: Enable notifications for push events.
:param pulumi.Input[str] tag_push_channel: The name of the channel to receive tag push events notifications.
:param pulumi.Input[bool] tag_push_events: Enable notifications for tag push events.
:param pulumi.Input[str] username: Username to use.
:param pulumi.Input[str] webhook: Webhook URL (ex.: https://hooks.slack.com/services/...)
:param pulumi.Input[str] wiki_page_channel: The name of the channel to receive wiki page events notifications.
:param pulumi.Input[bool] wiki_page_events: Enable notifications for wiki page events.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServiceSlackArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a ServiceSlack resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param ServiceSlackArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServiceSlackArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
branches_to_be_notified: Optional[pulumi.Input[str]] = None,
confidential_issue_channel: Optional[pulumi.Input[str]] = None,
confidential_issues_events: Optional[pulumi.Input[bool]] = None,
confidential_note_events: Optional[pulumi.Input[bool]] = None,
issue_channel: Optional[pulumi.Input[str]] = None,
issues_events: Optional[pulumi.Input[bool]] = None,
merge_request_channel: Optional[pulumi.Input[str]] = None,
merge_requests_events: Optional[pulumi.Input[bool]] = None,
note_channel: Optional[pulumi.Input[str]] = None,
note_events: Optional[pulumi.Input[bool]] = None,
notify_only_broken_pipelines: Optional[pulumi.Input[bool]] = None,
notify_only_default_branch: Optional[pulumi.Input[bool]] = None,
pipeline_channel: Optional[pulumi.Input[str]] = None,
pipeline_events: Optional[pulumi.Input[bool]] = None,
project: Optional[pulumi.Input[str]] = None,
push_channel: Optional[pulumi.Input[str]] = None,
push_events: Optional[pulumi.Input[bool]] = None,
tag_push_channel: Optional[pulumi.Input[str]] = None,
tag_push_events: Optional[pulumi.Input[bool]] = None,
username: Optional[pulumi.Input[str]] = None,
webhook: Optional[pulumi.Input[str]] = None,
wiki_page_channel: Optional[pulumi.Input[str]] = None,
wiki_page_events: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServiceSlackArgs.__new__(ServiceSlackArgs)
__props__.__dict__["branches_to_be_notified"] = branches_to_be_notified
__props__.__dict__["confidential_issue_channel"] = confidential_issue_channel
__props__.__dict__["confidential_issues_events"] = confidential_issues_events
__props__.__dict__["confidential_note_events"] = confidential_note_events
__props__.__dict__["issue_channel"] = issue_channel
__props__.__dict__["issues_events"] = issues_events
__props__.__dict__["merge_request_channel"] = merge_request_channel
__props__.__dict__["merge_requests_events"] = merge_requests_events
__props__.__dict__["note_channel"] = note_channel
__props__.__dict__["note_events"] = note_events
__props__.__dict__["notify_only_broken_pipelines"] = notify_only_broken_pipelines
if notify_only_default_branch is not None and not opts.urn:
warnings.warn("""use 'branches_to_be_notified' argument instead""", DeprecationWarning)
pulumi.log.warn("""notify_only_default_branch is deprecated: use 'branches_to_be_notified' argument instead""")
__props__.__dict__["notify_only_default_branch"] = notify_only_default_branch
__props__.__dict__["pipeline_channel"] = pipeline_channel
__props__.__dict__["pipeline_events"] = pipeline_events
if project is None and not opts.urn:
raise TypeError("Missing required property 'project'")
__props__.__dict__["project"] = project
__props__.__dict__["push_channel"] = push_channel
__props__.__dict__["push_events"] = push_events
__props__.__dict__["tag_push_channel"] = tag_push_channel
__props__.__dict__["tag_push_events"] = tag_push_events
__props__.__dict__["username"] = username
if webhook is None and not opts.urn:
raise TypeError("Missing required property 'webhook'")
__props__.__dict__["webhook"] = webhook
__props__.__dict__["wiki_page_channel"] = wiki_page_channel
__props__.__dict__["wiki_page_events"] = wiki_page_events
__props__.__dict__["job_events"] = None
super(ServiceSlack, __self__).__init__(
'gitlab:index/serviceSlack:ServiceSlack',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
branches_to_be_notified: Optional[pulumi.Input[str]] = None,
confidential_issue_channel: Optional[pulumi.Input[str]] = None,
confidential_issues_events: Optional[pulumi.Input[bool]] = None,
confidential_note_events: Optional[pulumi.Input[bool]] = None,
issue_channel: Optional[pulumi.Input[str]] = None,
issues_events: Optional[pulumi.Input[bool]] = None,
job_events: Optional[pulumi.Input[bool]] = None,
merge_request_channel: Optional[pulumi.Input[str]] = None,
merge_requests_events: Optional[pulumi.Input[bool]] = None,
note_channel: Optional[pulumi.Input[str]] = None,
note_events: Optional[pulumi.Input[bool]] = None,
notify_only_broken_pipelines: Optional[pulumi.Input[bool]] = None,
notify_only_default_branch: Optional[pulumi.Input[bool]] = None,
pipeline_channel: Optional[pulumi.Input[str]] = None,
pipeline_events: Optional[pulumi.Input[bool]] = None,
project: Optional[pulumi.Input[str]] = None,
push_channel: Optional[pulumi.Input[str]] = None,
push_events: Optional[pulumi.Input[bool]] = None,
tag_push_channel: Optional[pulumi.Input[str]] = None,
tag_push_events: Optional[pulumi.Input[bool]] = None,
username: Optional[pulumi.Input[str]] = None,
webhook: Optional[pulumi.Input[str]] = None,
wiki_page_channel: Optional[pulumi.Input[str]] = None,
wiki_page_events: Optional[pulumi.Input[bool]] = None) -> 'ServiceSlack':
"""
Get an existing ServiceSlack resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] branches_to_be_notified: Branches to send notifications for. Valid options are "all", "default", "protected", and "default_and_protected".
:param pulumi.Input[str] confidential_issue_channel: The name of the channel to receive confidential issue events notifications.
:param pulumi.Input[bool] confidential_issues_events: Enable notifications for confidential issues events.
:param pulumi.Input[bool] confidential_note_events: Enable notifications for confidential note events.
:param pulumi.Input[str] issue_channel: The name of the channel to receive issue events notifications.
:param pulumi.Input[bool] issues_events: Enable notifications for issues events.
:param pulumi.Input[str] merge_request_channel: The name of the channel to receive merge request events notifications.
:param pulumi.Input[bool] merge_requests_events: Enable notifications for merge requests events.
:param pulumi.Input[str] note_channel: The name of the channel to receive note events notifications.
:param pulumi.Input[bool] note_events: Enable notifications for note events.
:param pulumi.Input[bool] notify_only_broken_pipelines: Send notifications for broken pipelines.
:param pulumi.Input[bool] notify_only_default_branch: DEPRECATED: This parameter has been replaced with `branches_to_be_notified`.
:param pulumi.Input[str] pipeline_channel: The name of the channel to receive pipeline events notifications.
:param pulumi.Input[bool] pipeline_events: Enable notifications for pipeline events.
:param pulumi.Input[str] project: ID of the project you want to activate integration on.
:param pulumi.Input[str] push_channel: The name of the channel to receive push events notifications.
:param pulumi.Input[bool] push_events: Enable notifications for push events.
:param pulumi.Input[str] tag_push_channel: The name of the channel to receive tag push events notifications.
:param pulumi.Input[bool] tag_push_events: Enable notifications for tag push events.
:param pulumi.Input[str] username: Username to use.
:param pulumi.Input[str] webhook: Webhook URL (ex.: https://hooks.slack.com/services/...)
:param pulumi.Input[str] wiki_page_channel: The name of the channel to receive wiki page events notifications.
:param pulumi.Input[bool] wiki_page_events: Enable notifications for wiki page events.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ServiceSlackState.__new__(_ServiceSlackState)
__props__.__dict__["branches_to_be_notified"] = branches_to_be_notified
__props__.__dict__["confidential_issue_channel"] = confidential_issue_channel
__props__.__dict__["confidential_issues_events"] = confidential_issues_events
__props__.__dict__["confidential_note_events"] = confidential_note_events
__props__.__dict__["issue_channel"] = issue_channel
__props__.__dict__["issues_events"] = issues_events
__props__.__dict__["job_events"] = job_events
__props__.__dict__["merge_request_channel"] = merge_request_channel
__props__.__dict__["merge_requests_events"] = merge_requests_events
__props__.__dict__["note_channel"] = note_channel
__props__.__dict__["note_events"] = note_events
__props__.__dict__["notify_only_broken_pipelines"] = notify_only_broken_pipelines
__props__.__dict__["notify_only_default_branch"] = notify_only_default_branch
__props__.__dict__["pipeline_channel"] = pipeline_channel
__props__.__dict__["pipeline_events"] = pipeline_events
__props__.__dict__["project"] = project
__props__.__dict__["push_channel"] = push_channel
__props__.__dict__["push_events"] = push_events
__props__.__dict__["tag_push_channel"] = tag_push_channel
__props__.__dict__["tag_push_events"] = tag_push_events
__props__.__dict__["username"] = username
__props__.__dict__["webhook"] = webhook
__props__.__dict__["wiki_page_channel"] = wiki_page_channel
__props__.__dict__["wiki_page_events"] = wiki_page_events
return ServiceSlack(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="branchesToBeNotified")
def branches_to_be_notified(self) -> pulumi.Output[str]:
"""
Branches to send notifications for. Valid options are "all", "default", "protected", and "default_and_protected".
"""
return pulumi.get(self, "branches_to_be_notified")
@property
@pulumi.getter(name="confidentialIssueChannel")
def confidential_issue_channel(self) -> pulumi.Output[Optional[str]]:
"""
The name of the channel to receive confidential issue events notifications.
"""
return pulumi.get(self, "confidential_issue_channel")
@property
@pulumi.getter(name="confidentialIssuesEvents")
def confidential_issues_events(self) -> pulumi.Output[bool]:
"""
Enable notifications for confidential issues events.
"""
return pulumi.get(self, "confidential_issues_events")
@property
@pulumi.getter(name="confidentialNoteEvents")
def confidential_note_events(self) -> pulumi.Output[bool]:
"""
Enable notifications for confidential note events.
"""
return pulumi.get(self, "confidential_note_events")
@property
@pulumi.getter(name="issueChannel")
def issue_channel(self) -> pulumi.Output[Optional[str]]:
"""
The name of the channel to receive issue events notifications.
"""
return pulumi.get(self, "issue_channel")
@property
@pulumi.getter(name="issuesEvents")
def issues_events(self) -> pulumi.Output[bool]:
"""
Enable notifications for issues events.
"""
return pulumi.get(self, "issues_events")
@property
@pulumi.getter(name="jobEvents")
def job_events(self) -> pulumi.Output[bool]:
return pulumi.get(self, "job_events")
@property
@pulumi.getter(name="mergeRequestChannel")
def merge_request_channel(self) -> pulumi.Output[Optional[str]]:
"""
The name of the channel to receive merge request events notifications.
"""
return pulumi.get(self, "merge_request_channel")
@property
@pulumi.getter(name="mergeRequestsEvents")
def merge_requests_events(self) -> pulumi.Output[bool]:
"""
Enable notifications for merge requests events.
"""
return pulumi.get(self, "merge_requests_events")
@property
@pulumi.getter(name="noteChannel")
def note_channel(self) -> pulumi.Output[Optional[str]]:
"""
The name of the channel to receive note events notifications.
"""
return pulumi.get(self, "note_channel")
@property
@pulumi.getter(name="noteEvents")
def note_events(self) -> pulumi.Output[bool]:
"""
Enable notifications for note events.
"""
return pulumi.get(self, "note_events")
@property
@pulumi.getter(name="notifyOnlyBrokenPipelines")
def notify_only_broken_pipelines(self) -> pulumi.Output[bool]:
"""
Send notifications for broken pipelines.
| |
assert data[0]['file_type'] == 'operator'
assert data[0]['compressed_size_bytes'] == 46445454332
assert data[0]['is_valid_zip']
assert data[0]['is_valid_format']
assert data[0]['md5'] == 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'
else: # api version 2.0
rv = flask_app.get(url_for('{0}.catalog_get_api'.format(api_version), modified_since='20170101'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['files']
assert len(data) == 1
assert data[0]['filename'] == 'operator_file.zip'
assert data[0]['file_type'] == 'operator'
assert data[0]['compressed_size_bytes'] == 46445454332
assert data[0]['is_valid_zip']
assert data[0]['is_valid_format']
assert data[0]['md5'] == 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'
def test_valid_cataloged_since(flask_app, db_conn, api_version):
"""Verify the API returns 200 and valid JSON containing appropriate files.
The files returned should have last_seen time before the specified time.
"""
_dummy_data_generator(db_conn)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.catalog_api'.format(api_version), cataloged_since='20171101'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert len(data) == 1
assert data[0]['filename'] == 'pairing_file.zip'
assert data[0]['file_type'] == 'pairing_list'
assert data[0]['compressed_size_bytes'] == 1564624
assert data[0]['is_valid_zip']
assert not data[0]['is_valid_format']
assert data[0]['md5'] == 'd0481db2-bdc8-43da-a69e-ea7006bd7a7c'
else: # api version 2.0
rv = flask_app.get(url_for('{0}.catalog_get_api'.format(api_version), cataloged_since='20171101'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['files']
assert len(data) == 1
assert data[0]['filename'] == 'pairing_file.zip'
assert data[0]['file_type'] == 'pairing_list'
assert data[0]['compressed_size_bytes'] == 1564624
assert data[0]['is_valid_zip']
assert not data[0]['is_valid_format']
assert data[0]['md5'] == 'd0481db2-bdc8-43da-a69e-ea7006bd7a7c'
def test_api_with_no_arguments(flask_app, db_conn, api_version):
"""Verify the API returns 200 and all the specified files."""
_dummy_data_generator(db_conn)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.catalog_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert len(data) == 4
assert b'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11' in rv.data
assert b'40a37f83-21cb-4ab4-bba5-4032b1347273' in rv.data
assert b'014a3782-9826-4665-8830-534013b59cc5' in rv.data
assert b'd0481db2-bdc8-43da-a69e-ea7006bd7a7c' in rv.data
else: # api version 2.0
rv = flask_app.get(url_for('{0}.catalog_get_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['files']
assert len(data) == 4
assert b'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11' in rv.data
assert b'40a37f83-21cb-4ab4-bba5-4032b1347273' in rv.data
assert b'014a3782-9826-4665-8830-534013b59cc5' in rv.data
assert b'd0481db2-bdc8-43da-a69e-ea7006bd7a7c' in rv.data
def test_api_with_multiple_arguments(flask_app, db_conn, api_version):
"""Verify the API returns 200 and valid JSON containing the appropriate files.
The files returned should satisfy all the specified arguments.
"""
_dummy_data_generator(db_conn)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.catalog_api'.format(api_version), modified_since='20160901',
is_valid_zip=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert len(data) == 2
assert data[0]['filename'] == 'stolen_file.zip'
assert data[0]['file_type'] == 'stolen_list'
assert data[0]['compressed_size_bytes'] == 54543
assert data[0]['is_valid_zip']
assert data[0]['is_valid_format']
assert data[0]['md5'] == '014a3782-9826-4665-8830-534013b59cc5'
assert data[1]['filename'] == 'operator_file.zip'
assert data[1]['file_type'] == 'operator'
assert data[1]['compressed_size_bytes'] == 46445454332
assert data[1]['is_valid_zip']
assert data[1]['is_valid_format']
assert data[1]['md5'] == 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'
else: # api version 2.0
rv = flask_app.get(url_for('{0}.catalog_get_api'.format(api_version), modified_since='20160901',
is_valid_zip=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['files']
assert len(data) == 2
assert data[0]['filename'] == 'stolen_file.zip'
assert data[0]['file_type'] == 'stolen_list'
assert data[0]['compressed_size_bytes'] == 54543
assert data[0]['is_valid_zip']
assert data[0]['is_valid_format']
assert data[0]['md5'] == '014a3782-9826-4665-8830-534013b59cc5'
assert data[1]['filename'] == 'operator_file.zip'
assert data[1]['file_type'] == 'operator'
assert data[1]['compressed_size_bytes'] == 46445454332
assert data[1]['is_valid_zip']
assert data[1]['is_valid_format']
assert data[1]['md5'] == 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'
def test_put_not_allowed(flask_app, db_conn, tmpdir, logger, api_version):
"""Verify the Catalog API does not support HTTP PUT and returns HTTP 405 METHOD NOT ALLOWED."""
if api_version == 'v1':
rv = flask_app.put(url_for('{0}.catalog_api'.format(api_version), file_type='operator'))
assert rv.status_code == 405
assert b'The method is not allowed for the requested URL' in rv.data
else: # api version 2.0
rv = flask_app.put(url_for('{0}.catalog_get_api'.format(api_version), file_type='operator'))
assert rv.status_code == 405
assert b'The method is not allowed for the requested URL' in rv.data
def test_post_not_allowed(flask_app, db_conn, tmpdir, logger, api_version):
"""Verify the Catalog API does not support HTTP POST and returns HTTP 405 METHOD NOT ALLOWED."""
if api_version == 'v1':
rv = flask_app.post(url_for('{0}.catalog_api'.format(api_version), file_type='operator'))
assert rv.status_code == 405
assert b'The method is not allowed for the requested URL' in rv.data
else: # api version 2.0
rv = flask_app.post(url_for('{0}.catalog_get_api'.format(api_version), file_type='operator'))
assert rv.status_code == 405
assert b'The method is not allowed for the requested URL' in rv.data
def test_delete_not_allowed(flask_app, db_conn, tmpdir, logger, api_version):
"""Verify the Catalog API does not support HTTP DELETE and returns HTTP 405 METHOD NOT ALLOWED."""
if api_version == 'v1':
rv = flask_app.delete(url_for('{0}.catalog_api'.format(api_version), file_type='operator'))
assert rv.status_code == 405
assert b'The method is not allowed for the requested URL' in rv.data
else: # api version 2.0
rv = flask_app.delete(url_for('{0}.catalog_get_api'.format(api_version), file_type='operator'))
assert rv.status_code == 405
assert b'The method is not allowed for the requested URL' in rv.data
def test_import_status(db_conn, mocked_config, tmpdir, monkeypatch, flask_app, api_version,
logger, mocked_statsd, metadata_db_conn):
"""Test import status info in catalog api.
- import_status:
- ever_imported_successfully: true or false
- most_recent_import: status
Generate an MD5 hash of the file during import and store it in job_metadata.
Then, when cataloging, look at the most recent import job in the job_metadata table where
the file had the same MD5 hash and lookup the status.
ever_imported_successfully will be true if there is any successfull import - joining on files md5
most_recent_import returns the status of the most recent import - joining on files md5
"""
# Step 1
# try to import something successfully to get most_recent_import = success
# and test the md5 created in the abstract importer using dirbs-import cli command
here = path.abspath(path.dirname(__file__))
data_dir = path.join(here, 'unittest_data/operator')
valid_csv_operator_data_file_name = 'operator1_20160701_20160731.csv'
valid_csv_operator_data_file = path.join(data_dir, valid_csv_operator_data_file_name)
# create a zip file inside a temp dir
valid_zip_operator_data_file_path = \
str(tmpdir.join('operator1_20160701_20160731.zip'))
with zipfile.ZipFile(valid_zip_operator_data_file_path, 'w') as valid_csv_operator_data_file_zfile:
# zipfile write() method supports an extra argument (arcname) which is the
# archive name to be stored in the zip file.
valid_csv_operator_data_file_zfile.write(valid_csv_operator_data_file, valid_csv_operator_data_file_name)
runner = CliRunner()
result = runner.invoke(dirbs_import_cli, ['operator', 'Operator1', '--disable-rat-import',
'--disable-region-check', '--disable-home-check',
valid_zip_operator_data_file_path],
obj={'APP_CONFIG': mocked_config})
assert result.exit_code == 0
catalog_config_dict = {
'prospectors': [
{
'file_type': 'operator',
'paths': [valid_zip_operator_data_file_path],
'schema_filename': 'OperatorImportSchema_v2.csvs'
}
],
'perform_prevalidation': False
}
catalog_config = CatalogConfig(ignore_env=True, **catalog_config_dict)
monkeypatch.setattr(mocked_config, 'catalog_config', catalog_config)
# Run dirbs-catalog using db args from the temp postgres instance
runner = CliRunner()
result = runner.invoke(dirbs_catalog_cli, obj={'APP_CONFIG': mocked_config})
assert result.exit_code == 0
# call apis
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.catalog_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data[0]['import_status']['most_recent_import'] == 'success'
assert data[0]['import_status']['ever_imported_successfully'] is True
with db_conn.cursor() as cursor:
cursor.execute('SELECT md5 FROM data_catalog')
md5 = cursor.fetchone().md5
# Step 2
with db_conn.cursor() as cursor:
cursor.execute('TRUNCATE TABLE job_metadata')
# status error
job_metadata_importer(db_conn=db_conn, command='dirbs-import',
run_id=10, subcommand='operator',
status='error',
start_time='2017-08-15 01:15:39.54785+00',
extra_metadata={'input_file_md5': md5})
# status in progress, most recent
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=11, subcommand='operator',
status='running',
start_time='2017-08-15 01:15:40.54785+00',
extra_metadata={'input_file_md5': md5})
# call API
rv = flask_app.get(url_for('{0}.catalog_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data[0]['import_status']['most_recent_import'] == 'running'
assert data[0]['import_status']['ever_imported_successfully'] is False
assert len(data) == 1
# Step 3 try a different order
with db_conn.cursor() as cursor:
cursor.execute('TRUNCATE TABLE job_metadata')
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=13, subcommand='gsma', status='success',
start_time='2017-08-15 01:15:39.54785+00',
extra_metadata={'input_file_md5': md5})
# status in progress, most recent
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=14, subcommand='gsma', status='error',
start_time='2017-08-15 01:15:40.54785+00',
extra_metadata={'input_file_md5': md5})
# call API
rv = flask_app.get(url_for('{0}.catalog_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data[0]['import_status']['most_recent_import'] == 'error'
assert data[0]['import_status']['ever_imported_successfully'] is True
assert len(data) == 1
else: # api version 2.0
rv = flask_app.get(url_for('{0}.catalog_get_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['files']
assert data[0]['import_status']['most_recent_import'] == 'success'
assert data[0]['import_status']['ever_imported_successfully'] is True
with db_conn.cursor() as cursor:
cursor.execute('SELECT md5 FROM data_catalog')
md5 = cursor.fetchone().md5
# Step 2
with db_conn.cursor() as cursor:
cursor.execute('TRUNCATE TABLE job_metadata')
# status error
job_metadata_importer(db_conn=db_conn, command='dirbs-import',
run_id=10, subcommand='operator',
status='error',
start_time='2017-08-15 01:15:39.54785+00',
extra_metadata={'input_file_md5': md5})
# status in progress, most recent
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=11, subcommand='operator',
status='running',
start_time='2017-08-15 01:15:40.54785+00',
extra_metadata={'input_file_md5': md5})
# call API
rv = flask_app.get(url_for('{0}.catalog_get_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['files']
assert data[0]['import_status']['most_recent_import'] == 'running'
assert data[0]['import_status']['ever_imported_successfully'] is False
assert len(data) == 1
# Step 3 try a different order
with db_conn.cursor() as cursor:
cursor.execute('TRUNCATE TABLE job_metadata')
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=13, subcommand='gsma', status='success',
start_time='2017-08-15 01:15:39.54785+00',
extra_metadata={'input_file_md5': md5})
# status in progress, most recent
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=14, subcommand='gsma', status='error',
start_time='2017-08-15 01:15:40.54785+00',
extra_metadata={'input_file_md5': md5})
# call API
rv = flask_app.get(url_for('{0}.catalog_get_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['files']
assert data[0]['import_status']['most_recent_import'] == 'error'
assert data[0]['import_status']['ever_imported_successfully'] is True
assert len(data) == 1
def test_num_records_uncompressed_size(mocked_config, tmpdir, monkeypatch,
flask_app, api_version):
"""Test import status info in catalog api.
- num_records: the number of lines in the file minus the header.
- uncompressed_size_bytes.
"""
# import operator status success
here = path.abspath(path.dirname(__file__))
data_dir = path.join(here, 'unittest_data/operator')
valid_csv_operator_data_file_name = 'operator1_20160701_20160731.csv'
valid_csv_operator_data_file = path.join(data_dir, valid_csv_operator_data_file_name)
# create a zip file inside a temp dir
valid_zip_operator_data_file_path = \
str(tmpdir.join('operator1_20160701_20160731.zip'))
with zipfile.ZipFile(valid_zip_operator_data_file_path, 'w',
compression=zipfile.ZIP_DEFLATED) as valid_csv_operator_data_file_zfile:
# zipfile write() method supports an extra argument (arcname) which is the
# archive name to be stored in the zip file.
valid_csv_operator_data_file_zfile.write(valid_csv_operator_data_file, valid_csv_operator_data_file_name)
catalog_config_dict = {
'prospectors': [
{
'file_type': 'operator',
'paths': [valid_zip_operator_data_file_path],
'schema_filename': 'OperatorImportSchema_v2.csvs'
}
],
'perform_prevalidation': False
}
catalog_config = CatalogConfig(ignore_env=True, **catalog_config_dict)
monkeypatch.setattr(mocked_config, | |
Optional[str], # pylint: disable=unused-argument
data: Optional[Mapping[str, Any]], # pylint: disable=unused-argument
**_kwargs: Any,
) -> Path:
path = Path(value)
if not path.exists():
raise ValidationError(f'Given path {value} does not exist')
if not path.is_dir():
raise ValidationError(f'Given path {value} is not a directory')
return path
class FileField(fields.Field):
def _deserialize(
self,
value: str,
attr: Optional[str], # pylint: disable=unused-argument
data: Optional[Mapping[str, Any]], # pylint: disable=unused-argument
**_kwargs: Any,
) -> Path:
if not isinstance(value, str):
raise ValidationError('Provided non string type for filepath')
path = Path(value)
if not path.exists():
raise ValidationError(f'Given path {value} does not exist')
if not path.is_file():
raise ValidationError(f'Given path {value} is not a file')
return path
class AsyncTasksQuerySchema(Schema):
task_id = fields.Integer(strict=True, missing=None)
class TradesQuerySchema(Schema):
from_timestamp = TimestampField(missing=Timestamp(0))
to_timestamp = TimestampField(missing=ts_now)
location = LocationField(missing=None)
class TradeSchema(Schema):
timestamp = TimestampField(required=True)
location = LocationField(required=True)
pair = TradePairField(required=True)
trade_type = TradeTypeField(required=True)
amount = PositiveAmountField(required=True)
rate = PriceField(required=True)
fee = FeeField(required=True)
fee_currency = AssetField(required=True)
link = fields.String(missing='')
notes = fields.String(missing='')
class FiatBalancesSchema(Schema):
balances = fields.Dict(
keys=FiatAssetField(),
values=PositiveOrZeroAmountField(),
required=True,
)
class ManuallyTrackedBalanceSchema(Schema):
asset = AssetField(required=True)
label = fields.String(required=True)
amount = PositiveAmountField(required=True)
location = LocationField(required=True)
tags = fields.List(fields.String(), missing=None)
@post_load # type: ignore
def make_manually_tracked_balances( # pylint: disable=no-self-use
self,
data: Dict[str, Any],
**_kwargs: Any,
) -> ManuallyTrackedBalance:
return ManuallyTrackedBalance(**data)
class ManuallyTrackedBalancesSchema(Schema):
balances = fields.List(fields.Nested(ManuallyTrackedBalanceSchema), required=True)
class ManuallyTrackedBalancesDeleteSchema(Schema):
labels = fields.List(fields.String(required=True), required=True)
class TradePatchSchema(TradeSchema):
trade_id = fields.String(required=True)
class TradeDeleteSchema(Schema):
trade_id = fields.String(required=True)
class TagSchema(Schema):
name = fields.String(required=True)
description = fields.String(missing=None)
background_color = ColorField(required=True)
foreground_color = ColorField(required=True)
class TagEditSchema(Schema):
name = fields.String(required=True)
description = fields.String(missing=None)
background_color = ColorField(missing=None)
foreground_color = ColorField(missing=None)
class TagDeleteSchema(Schema):
name = fields.String(required=True)
class ModifiableSettingsSchema(Schema):
"""This is the Schema for the settings that can be modified via the API"""
premium_should_sync = fields.Bool(missing=None)
include_crypto2crypto = fields.Bool(missing=None)
anonymized_logs = fields.Bool(missing=None)
submit_usage_analytics = fields.Bool(missing=None)
ui_floating_precision = fields.Integer(
strict=True,
validate=webargs.validate.Range(
min=0,
max=8,
error='Floating numbers precision in the UI must be between 0 and 8',
),
missing=None,
)
taxfree_after_period = TaxFreeAfterPeriodField(missing=None)
balance_save_frequency = fields.Integer(
strict=True,
validate=webargs.validate.Range(
min=1,
error='The number of hours after which balances should be saved should be >= 1',
),
missing=None,
)
include_gas_costs = fields.Bool(missing=None)
# TODO: Add some validation to this field
historical_data_start = fields.String(missing=None)
# TODO: Add some validation to this field
# even though it gets validated since we try to connect to it
eth_rpc_endpoint = fields.String(missing=None)
main_currency = FiatAssetField(missing=None)
# TODO: Add some validation to this field
date_display_format = fields.String(missing=None)
kraken_account_type = KrakenAccountTypeField(missing=None)
active_modules = fields.List(fields.String(), missing=None)
@validates_schema # type: ignore
def validate_settings_schema( # pylint: disable=no-self-use
self,
data: Dict[str, Any],
**_kwargs: Any,
) -> None:
if data['active_modules'] is not None:
for module in data['active_modules']:
if module not in AVAILABLE_MODULES:
raise ValidationError(
message=f'{module} is not a valid module',
field_name='active_modules',
)
@post_load # type: ignore
def transform_data( # pylint: disable=no-self-use
self,
data: Dict[str, Any],
**_kwargs: Any,
) -> Any:
return ModifiableDBSettings(
premium_should_sync=data['premium_should_sync'],
include_crypto2crypto=data['include_crypto2crypto'],
anonymized_logs=data['anonymized_logs'],
ui_floating_precision=data['ui_floating_precision'],
taxfree_after_period=data['taxfree_after_period'],
balance_save_frequency=data['balance_save_frequency'],
include_gas_costs=data['include_gas_costs'],
historical_data_start=data['historical_data_start'],
eth_rpc_endpoint=data['eth_rpc_endpoint'],
main_currency=data['main_currency'],
date_display_format=data['date_display_format'],
submit_usage_analytics=data['submit_usage_analytics'],
kraken_account_type=data['kraken_account_type'],
active_modules=data['active_modules'],
)
class EditSettingsSchema(Schema):
settings = fields.Nested(ModifiableSettingsSchema, required=True)
class BaseUserSchema(Schema):
name = fields.String(required=True)
password = fields.String(required=True)
class UserActionSchema(Schema):
name = fields.String(required=True)
# All the fields below are not needed for logout/modification so are not required=True
password = fields.String(missing=None)
sync_approval = fields.String(
missing='unknown',
validate=webargs.validate.OneOf(choices=('unknown', 'yes', 'no')),
)
action = fields.String(
validate=webargs.validate.OneOf(choices=('login', 'logout')),
missing=None,
)
premium_api_key = fields.String(missing='')
premium_api_secret = fields.String(missing='')
@validates_schema # type: ignore
def validate_user_action_schema( # pylint: disable=no-self-use
self,
data: Dict[str, Any],
**_kwargs: Any,
) -> None:
if data['action'] == 'login':
if data['password'] is None:
raise ValidationError('Missing password field for login')
elif data['action'] is None:
if data['premium_api_key'] == '' or data['premium_api_secret'] == '':
raise ValidationError(
'Without an action premium api key and secret must be provided',
)
class UserPasswordChangeSchema(Schema):
name = fields.String(required=True)
current_password = fields.String(required=True)
new_password = fields.String(required=True)
class UserPremiumKeyRemoveSchema(Schema):
name = fields.String(required=True)
class NewUserSchema(BaseUserSchema):
premium_api_key = fields.String(missing='')
premium_api_secret = fields.String(missing='')
initial_settings = fields.Nested(ModifiableSettingsSchema, missing=None)
class AllBalancesQuerySchema(Schema):
async_query = fields.Boolean(missing=False)
save_data = fields.Boolean(missing=False)
ignore_cache = fields.Boolean(missing=False)
class ExternalServiceSchema(Schema):
name = ExternalServiceNameField(required=True)
api_key = fields.String(required=True)
@post_load # type: ignore
def make_external_service( # pylint: disable=no-self-use
self,
data: Dict[str, Any],
**_kwargs: Any,
) -> ExternalServiceApiCredentials:
"""Used when encoding an external resource given in via the API"""
return ExternalServiceApiCredentials(service=data['name'], api_key=data['api_key'])
class ExternalServicesResourceAddSchema(Schema):
services = fields.List(fields.Nested(ExternalServiceSchema), required=True)
class ExternalServicesResourceDeleteSchema(Schema):
services = fields.List(ExternalServiceNameField(), required=True)
class ExchangesResourceAddSchema(Schema):
name = ExchangeNameField(required=True)
api_key = ApiKeyField(required=True)
api_secret = ApiSecretField(required=True)
passphrase = fields.String(missing=None)
class ExchangesResourceRemoveSchema(Schema):
name = ExchangeNameField(required=True)
class ExchangeBalanceQuerySchema(Schema):
name = ExchangeNameField(missing=None)
async_query = fields.Boolean(missing=False)
ignore_cache = fields.Boolean(missing=False)
class ExchangeTradesQuerySchema(Schema):
name = ExchangeNameField(missing=None)
from_timestamp = TimestampField(missing=Timestamp(0))
to_timestamp = TimestampField(missing=ts_now)
async_query = fields.Boolean(missing=False)
class BlockchainBalanceQuerySchema(Schema):
blockchain = BlockchainField(missing=None)
async_query = fields.Boolean(missing=False)
ignore_cache = fields.Boolean(missing=False)
class StatisticsAssetBalanceSchema(Schema):
asset = AssetField(required=True)
from_timestamp = TimestampField(missing=Timestamp(0))
to_timestamp = TimestampField(missing=ts_now)
class StatisticsValueDistributionSchema(Schema):
distribution_by = fields.String(
required=True,
validate=webargs.validate.OneOf(choices=('location', 'asset')),
)
class HistoryProcessingSchema(Schema):
from_timestamp = TimestampField(missing=Timestamp(0))
to_timestamp = TimestampField(missing=ts_now)
async_query = fields.Boolean(missing=False)
class HistoryExportingSchema(Schema):
directory_path = DirectoryField(required=True)
class EthTokensSchema(Schema):
eth_tokens = fields.List(EthereumTokenAssetField(), required=True)
async_query = fields.Boolean(missing=False)
class BlockchainAccountDataSchema(Schema):
address = fields.String(required=True)
label = fields.String(missing=None)
tags = fields.List(fields.String(), missing=None)
class BlockchainAccountsGetSchema(Schema):
blockchain = BlockchainField(required=True)
def _validate_blockchain_account_schemas(
data: Dict[str, Any],
address_getter: Callable,
) -> None:
"""Validates schema input for the PUT/PATCH/DELETE on blockchain account data"""
# Make sure no duplicates addresses are given
given_addresses = set()
# Make sure ethereum addresses are checksummed
if data['blockchain'] == SupportedBlockchain.ETHEREUM:
for account_data in data['accounts']:
address_string = address_getter(account_data)
if not address_string.endswith('.eth'):
# Make sure that given value is an ethereum address
try:
address = to_checksum_address(address_string)
except (ValueError, TypeError):
raise ValidationError(
f'Given value {address_string} is not an ethereum address',
field_name='address',
)
else:
# else it's ENS name and will be checked in the transformation step and not here
address = address_string
if address in given_addresses:
raise ValidationError(
f'Address {address} appears multiple times in the request data',
field_name='address',
)
given_addresses.add(address)
# Make sure bitcoin addresses are valid
elif data['blockchain'] == SupportedBlockchain.BITCOIN:
for account_data in data['accounts']:
address = address_getter(account_data)
if not is_valid_btc_address(address):
raise ValidationError(
f'Given value {address} is not a valid bitcoin address',
field_name='address',
)
if address in given_addresses:
raise ValidationError(
f'Address {address} appears multiple times in the request data',
field_name='address',
)
given_addresses.add(address)
def _transform_eth_address(
ethereum: EthereumManager, given_address: str) -> ChecksumEthAddress:
try:
address = to_checksum_address(given_address)
except ValueError:
# Validation will only let .eth names come here.
# So let's see if it resolves to anything
resolved_address = ethereum.ens_lookup(given_address)
if resolved_address is None:
raise ValidationError(
f'Given ENS address {given_address} could not be resolved',
field_name='address',
)
else:
address = to_checksum_address(resolved_address)
log.info(f'Resolved ENS {given_address} to {address}')
return address
class BlockchainAccountsPatchSchema(Schema):
blockchain = BlockchainField(required=True)
accounts = fields.List(fields.Nested(BlockchainAccountDataSchema), required=True)
def __init__(self, ethereum_manager: EthereumManager):
super().__init__()
self.ethereum_manager = ethereum_manager
@validates_schema # type: ignore
def validate_schema( # pylint: disable=no-self-use
self,
data: Dict[str, Any],
**_kwargs: Any,
) -> None:
_validate_blockchain_account_schemas(data, lambda x: x['address'])
@post_load # type: ignore
def transform_data( # pylint: disable=no-self-use
self,
data: Dict[str, Any],
**_kwargs: Any,
) -> Any:
if data['blockchain'] == SupportedBlockchain.ETHEREUM:
for idx, account in enumerate(data['accounts']):
data['accounts'][idx]['address'] = _transform_eth_address(
ethereum=self.ethereum_manager,
given_address=account['address'],
)
return data
class BlockchainAccountsPutSchema(BlockchainAccountsPatchSchema):
async_query = fields.Boolean(missing=False)
class BlockchainAccountsDeleteSchema(Schema):
blockchain = BlockchainField(required=True)
accounts = fields.List(fields.String(), required=True)
async_query = fields.Boolean(missing=False)
def __init__(self, ethereum_manager: EthereumManager):
super().__init__()
self.ethereum_manager = ethereum_manager
@validates_schema # type: ignore
def validate_blockchain_accounts_delete_schema( # pylint: disable=no-self-use
self,
data: Dict[str, Any],
**_kwargs: Any,
) -> None:
_validate_blockchain_account_schemas(data, lambda x: x)
@post_load # type: ignore
def transform_data( # pylint: disable=no-self-use
self,
data: Dict[str, Any],
**_kwargs: Any,
) -> Any:
if data['blockchain'] == SupportedBlockchain.ETHEREUM:
data['accounts'] = [
_transform_eth_address(self.ethereum_manager, x) for x in data['accounts']
]
return data
class IgnoredAssetsSchema(Schema):
assets = fields.List(AssetField(), required=True)
class QueriedAddressesSchema(Schema):
module = fields.String(
required=True,
validate=webargs.validate.OneOf(choices=AVAILABLE_MODULES),
)
address = EthereumAddressField(required=True)
class DataImportSchema(Schema):
source = fields.String(
required=True,
validate=webargs.validate.OneOf(choices=('cointracking.info',)),
)
filepath = FileField(required=True)
class FiatExchangeRatesSchema(Schema):
currencies = DelimitedOrNormalList(FiatAssetField(), missing=None)
class AsyncQueryArgumentSchema(Schema):
"""A schema for getters that only have one argument enabling async query"""
async_query = fields.Boolean(missing=False)
class AsyncQueryResetDBSchema(AsyncQueryArgumentSchema):
"""A schema for getters that have 2 arguments.
One to enable async querying and another to force reset DB data by querying everytying again"""
reset_db_data = fields.Boolean(missing=False)
class WatcherSchema(Schema):
type = fields.String(required=True)
args = fields.Dict(required=True)
class WatchersAddSchema(Schema):
"""The schema for adding a watcher.
No validation here since it happens server side and no need to duplicate code
TODO: When we have common libraries perhaps do validation here too to
avoid potential server roundtrip for nothing
"""
watchers = fields.List(fields.Nested(WatcherSchema), required=True)
class WatcherForEditingSchema(WatcherSchema):
identifier = fields.String(required=True)
class WatchersEditSchema(WatchersAddSchema):
"""The schema for editing a watcher.
No validation here since it happens server side and no need | |
Objects are equal when all are requested
self.assertEqual(n_random_seqs(aln1, 4), aln1)
# Objects are not equal when subset are requested
self.assertNotEqual(n_random_seqs(aln1, 3), aln1)
# In 1000 iterations, we get at least one different alignment --
# this tests the random selection
different = False
new_aln = n_random_seqs(aln1, 2)
for i in range(1000):
new_aln2 = n_random_seqs(aln1, 2)
if new_aln != new_aln2:
different = True
break
self.assertTrue(different)
class AncestorCoevolve(TestCase):
"""Tests of the ancestral state method for detecting coevolution"""
def setUp(self):
""" """
# t1, ancestral_states1, and aln1_* are used to test that when
# alternate seqs are used with the same tree and ancestral_states,
# the results vary when appropriate
self.t1 = make_tree(
treestring="((A:0.5,B:0.5):0.5,(C:0.5,(D:0.5,E:0.5):0.5):0.5);"
)
self.ancestral_states1 = ArrayAlignment(
data={"root": "AAA", "edge.0": "AAA", "edge.1": "AAA", "edge.2": "AAA"},
moltype=PROTEIN,
)
self.ancestral_states1_w_gaps = ArrayAlignment(
data={"root": "AAA", "edge.0": "AAA", "edge.1": "A-A", "edge.2": "AA-"},
moltype=PROTEIN,
)
# no correlated changes count
self.aln1_1 = ArrayAlignment(
data={"A": "AAC", "B": "AAD", "C": "AAA", "D": "AAE", "E": "AFA"},
moltype=PROTEIN,
)
# 1 correlated change count
self.aln1_2 = ArrayAlignment(
data={"A": "AAC", "B": "AAD", "C": "AAA", "D": "AEE", "E": "AFF"},
moltype=PROTEIN,
)
# 1 different correlated change count
self.aln1_3 = ArrayAlignment(
data={"A": "AAC", "B": "AAD", "C": "AAA", "D": "AGE", "E": "AFH"},
moltype=PROTEIN,
)
# 3 correlated change counts
self.aln1_4 = ArrayAlignment(
data={"A": "AAC", "B": "AGD", "C": "AAA", "D": "AGE", "E": "AFH"},
moltype=PROTEIN,
)
# 8 correlated change counts
self.aln1_5 = ArrayAlignment(
data={"A": "YYC", "B": "HGD", "C": "AAA", "D": "AGE", "E": "AFH"},
moltype=PROTEIN,
)
self.aln1_w_gaps = ArrayAlignment(
data={"A": "AAC", "B": "AAD", "C": "AAA", "D": "AG-", "E": "A-H"},
moltype=PROTEIN,
)
# t2, ancestral_states2_*, and aln2 are used to test that when
# alternate ancestral states are used with the same aln and tree,
# the results vary when appropriate
self.t2 = make_tree(treestring="(A:0.5,B:0.5,C:0.5);")
self.ancestral_states2_1 = ArrayAlignment(data={"root": "AA"}, moltype=PROTEIN)
self.ancestral_states2_2 = ArrayAlignment(data={"root": "CC"}, moltype=PROTEIN)
self.ancestral_states2_3 = ArrayAlignment(data={"root": "EF"}, moltype=PROTEIN)
self.aln2 = ArrayAlignment(
data={"A": "AA", "B": "CC", "C": "CA"}, moltype=PROTEIN
)
# t3_*, ancestral_states3, and aln3 are used to test that when
# alternate trees are used with the same aln and ancestral_states,
# the results vary when appropriate
self.t3_1 = make_tree(treestring="(A:0.5,(B:0.5,C:0.5):0.5);")
self.t3_2 = make_tree(treestring="((A:0.5,B:0.5):0.5,C:0.5);")
self.ancestral_states3 = ArrayAlignment(
data={"root": "CC", "edge.0": "AD"}, moltype=PROTEIN
)
self.aln3 = ArrayAlignment(
data={"A": "AC", "B": "CA", "C": "CC"}, moltype=PROTEIN
)
def test_validate_ancestral_seqs_invalid(self):
"""validate_ancestral_seqs: ValueError on incompatible anc. seqs & tree"""
# edge missing
aln = ArrayAlignment(data={"A": "AC", "B": "CA", "C": "CC"}, moltype=PROTEIN)
self.assertRaises(
ValueError,
validate_ancestral_seqs,
aln,
tree=make_tree(treestring="((A:0.5,B:0.5):0.5,C:0.5);"),
ancestral_seqs=ArrayAlignment(data={"root": "AA"}, moltype=PROTEIN),
)
# root missing
self.assertRaises(
ValueError,
validate_ancestral_seqs,
aln,
tree=make_tree(treestring="((A:0.5,B:0.5):0.5,C:0.5);"),
ancestral_seqs=ArrayAlignment(data={"edge.0": "AA"}, moltype=PROTEIN),
)
# correct numSeqs but wrong names
self.assertRaises(
ValueError,
validate_ancestral_seqs,
aln,
tree=make_tree(treestring="((A:0.5,B:0.5):0.5,C:0.5);"),
ancestral_seqs=ArrayAlignment(
data={"root": "AA", "edge.1": "AA"}, moltype=PROTEIN
),
)
self.assertRaises(
ValueError,
validate_ancestral_seqs,
aln,
tree=make_tree(treestring="((A:0.5,B:0.5):0.5,C:0.5);"),
ancestral_seqs=ArrayAlignment(
data={"r": "AA", "edge.0": "AA"}, moltype=PROTEIN
),
)
self.assertRaises(
ValueError,
validate_ancestral_seqs,
aln,
tree=make_tree(treestring="((A:0.5,B:0.5):0.5,C:0.5);"),
ancestral_seqs=ArrayAlignment(data={"r": "AA", "e": "AA"}, moltype=PROTEIN),
)
# different tree: invalid
aln = ArrayAlignment(
data={"A": "AC", "B": "CA", "C": "CC", "D": "DD"}, moltype=PROTEIN
)
self.assertRaises(
ValueError,
validate_ancestral_seqs,
aln,
tree=make_tree(treestring="((A:0.5,B:0.5):0.5,(C:0.5,D:0.5):0.5);"),
ancestral_seqs=ArrayAlignment(
data={"root": "AA", "e": "AA", "edge.1": "AA"}, moltype=PROTEIN
),
)
def test_validate_ancestral_seqs_valid(self):
"""validate_ancestral_seqs: does nothing on compatible anc. seqs & tree"""
aln = ArrayAlignment(data={"A": "AC", "B": "CA", "C": "CC"}, moltype=PROTEIN)
# valid data -> no error
validate_ancestral_seqs(
aln,
tree=make_tree(treestring="((A:0.5,B:0.5):0.5,C:0.5);"),
ancestral_seqs=ArrayAlignment(
data={"root": "AA", "edge.0": "AA"}, moltype=PROTEIN
),
)
# different tree: valid
aln = ArrayAlignment(
data={"A": "AC", "B": "CA", "C": "CC", "D": "DD"}, moltype=PROTEIN
)
validate_ancestral_seqs(
aln,
tree=make_tree(treestring="((A:0.5,B:0.5):0.5,(C:0.5,D:0.5):0.5);"),
ancestral_seqs=ArrayAlignment(
data={"root": "AA", "edge.0": "AA", "edge.1": "AA"}, moltype=PROTEIN
),
)
def test_ancestral_states_input_validation(self):
"""ancestral_states_input_validation: all validation steps performed"""
aln = ArrayAlignment(
data={"A": "AC", "B": "CA", "C": "CC", "D": "DD"}, moltype=PROTEIN
)
# incompatible tree and ancestral states (more thorough testing in
# test_validate_ancestral_seqs)
self.assertRaises(
ValueError,
ancestral_states_input_validation,
aln,
tree=make_tree(treestring="((A:0.5,B:0.5):0.5,(C:0.5,D:0.5):0.5);"),
ancestral_seqs=ArrayAlignment(
data={"root": "AA", "e": "AA", "edge.1": "AA"}, moltype=PROTEIN
),
)
# no tree provided
self.assertRaises(
ValueError,
ancestral_states_input_validation,
aln,
ancestral_seqs=ArrayAlignment(
data={"root": "AA", "e": "AA", "edge.1": "AA"}, moltype=PROTEIN
),
)
# incompatible tree and alignment (more tests in test_validate_tree)
aln = ArrayAlignment(data={"A": "AC", "B": "CA", "C": "CC"}, moltype=PROTEIN)
self.assertRaises(
ValueError,
ancestral_states_input_validation,
aln,
tree=make_tree(treestring="((A:0.5,B:0.5):0.5,(C:0.5,D:0.5):0.5);"),
)
def test_validate_tree_valid(self):
"""validate_tree: does nothing on compatible tree and aln"""
t = make_tree(treestring="((A:0.5,B:0.5):0.5,(C:0.5,D:0.5):0.5);")
aln = ArrayAlignment(
data={"A": "AC", "B": "CA", "C": "CC", "D": "DD"}, moltype=PROTEIN
)
validate_tree(aln, t)
t = make_tree(treestring="((A:0.5,B:0.5):0.5,C:0.5);")
aln = ArrayAlignment(data={"A": "AC", "B": "CA", "C": "CC"}, moltype=PROTEIN)
validate_tree(aln, t)
def test_validate_tree_invalid(self):
"""validate_tree: raises ValueError on incompatible tree and aln"""
# different scale tree and aln
t = make_tree(treestring="((A:0.5,B:0.5):0.5,C:0.5);")
aln = ArrayAlignment(
data={"A": "AC", "B": "CA", "C": "CC", "D": "DD"}, moltype=PROTEIN
)
self.assertRaises(ValueError, validate_tree, aln, t)
t = make_tree(treestring="((A:0.5,B:0.5):0.5,(C:0.5,D:0.5):0.5);")
aln = ArrayAlignment(data={"A": "AC", "B": "CA", "C": "CC"}, moltype=PROTEIN)
self.assertRaises(ValueError, validate_tree, aln, t)
# same scale tree and aln, but different names
t = make_tree(treestring="((A:0.5,B:0.5):0.5,(C:0.5,Dee:0.5):0.5);")
aln = ArrayAlignment(
data={"A": "AC", "B": "CA", "C": "CC", "D": "DD"}, moltype=PROTEIN
)
self.assertRaises(ValueError, validate_tree, aln, t)
def test_get_ancestral_seqs(self):
"""get_ancestral_seqs: returns valid collection of ancestral seqs"""
t = make_tree(treestring="((A:0.5,B:0.5):0.5,C:0.5);")
aln = ArrayAlignment(data={"A": "AA", "B": "AA", "C": "AC"}, moltype=PROTEIN)
expected = ArrayAlignment(data={"root": "AA", "edge.0": "AA"}, moltype=PROTEIN)
self.assertEqual(get_ancestral_seqs(aln, t, optimise=False), expected)
t = make_tree(treestring="(A:0.5,B:0.5,C:0.5);")
aln = ArrayAlignment(data={"A": "AA", "B": "AA", "C": "AC"}, moltype=PROTEIN)
expected = ArrayAlignment(data={"root": "AA"}, moltype=PROTEIN)
self.assertEqual(get_ancestral_seqs(aln, t, optimise=False), expected)
t = make_tree(
treestring="(((A1:0.5,A2:0.5):0.5,B:0.5):0.5,\
(C:0.5,D:0.5):0.5);"
)
aln = ArrayAlignment(
data={"A1": "AD", "A2": "AD", "B": "AC", "C": "AC", "D": "AC"},
moltype=PROTEIN,
)
expected = ArrayAlignment(
data={"root": "AC", "edge.0": "AD", "edge.1": "AC", "edge.2": "AC"},
moltype=PROTEIN,
)
self.assertEqual(get_ancestral_seqs(aln, t, optimise=False), expected)
def test_get_ancestral_seqs_handles_gaps(self):
"""get_ancestral_seqs: handles gaps"""
# gaps handled OK
t = make_tree(treestring="(A:0.5,B:0.5,C:0.5);")
aln = ArrayAlignment(data={"A": "A-", "B": "AA", "C": "AA"}, moltype=PROTEIN)
expected = ArrayAlignment(data={"root": "AA"}, moltype=PROTEIN)
self.assertEqual(get_ancestral_seqs(aln, t, optimise=False), expected)
def test_get_ancestral_seqs_handles_ambiguous_residues(self):
"""get_ancestral_seqs: handles ambiguous residues"""
# Non-canonical residues handled OK
t = make_tree(treestring="(A:0.5,B:0.5,C:0.5);")
aln = ArrayAlignment(data={"A": "AX", "B": "Z-", "C": "BC"}, moltype=PROTEIN)
actual = get_ancestral_seqs(aln, t, optimise=False)
self.assertEqual(len(actual), 2)
self.assertEqual(actual.num_seqs, 1)
def test_ancestral_state_alignment_handles_ancestral_state_calc(self):
"""ancestral_state_alignment: functions when calc'ing ancestral states"""
t = make_tree(treestring="((A:0.5,B:0.5):0.5,C:0.5);")
aln = ArrayAlignment(data={"A": "AA", "B": "AA", "C": "AC"}, moltype=PROTEIN)
assert_equal(ancestral_state_alignment(aln, t), [[0, 0], [0, 2]])
# non-bifurcating tree
t = make_tree(treestring="(A:0.5,B:0.5,C:0.5);")
aln = ArrayAlignment(data={"A": "AA", "B": "AA", "C": "AC"}, moltype=PROTEIN)
assert_equal(ancestral_state_alignment(aln, t), [[0, 0], [0, 2]])
def test_ancestral_state_position_handles_ancestral_state_calc(self):
"""ancestral_state_position: functions when calc'ing ancestral states"""
t = make_tree(treestring="((A:0.5,B:0.5):0.5,C:0.5);")
aln = ArrayAlignment(data={"A": "AA", "B": "AA", "C": "AC"}, moltype=PROTEIN)
assert_equal(ancestral_state_position(aln, t, 0), [0, 0])
assert_equal(ancestral_state_position(aln, t, 1), [0, 2])
def test_ancestral_state_pair_handles_ancestral_state_calc(self):
"""ancestral_state_position: functions when calc'ing ancestral states"""
t = make_tree(treestring="((A:0.5,B:0.5):0.5,C:0.5);")
aln = ArrayAlignment(data={"A": "AA", "B": "AA", "C": "AC"}, moltype=PROTEIN)
self.assertEqual(ancestral_state_pair(aln, t, 0, 0), 0)
self.assertEqual(ancestral_state_pair(aln, t, 0, 1), 0)
self.assertEqual(ancestral_state_pair(aln, t, 1, 1), 2)
self.assertEqual(ancestral_state_pair(aln, t, 1, 0), 0)
def test_ancestral_state_alignment_no_error_on_gap(self):
"""ancestral_state_alignment: return w/o error with gapped seqs"""
ancestral_state_alignment(self.aln1_w_gaps, self.t1, self.ancestral_states1)
ancestral_state_alignment(self.aln1_1, self.t1, self.ancestral_states1_w_gaps)
def test_ancestral_state_methods_handle_bad_ancestor_aln(self):
"""ancestral state methods raise error on bad ancestor alignment"""
# bad length and seq names
self.assertRaises(
ValueError,
coevolve_alignment,
ancestral_state_alignment,
self.aln1_2,
tree=self.t1,
ancestral_seqs=self.ancestral_states2_1,
)
self.assertRaises(
ValueError,
coevolve_position,
ancestral_state_position,
self.aln1_2,
0,
tree=self.t1,
ancestral_seqs=self.ancestral_states2_1,
)
self.assertRaises(
ValueError,
coevolve_pair,
ancestral_state_pair,
self.aln1_2,
0,
1,
tree=self.t1,
ancestral_seqs=self.ancestral_states2_1,
)
# bad seq names
self.assertRaises(
ValueError,
coevolve_alignment,
ancestral_state_alignment,
self.aln1_2,
tree=self.t1,
ancestral_seqs=self.aln1_2,
)
self.assertRaises(
ValueError,
coevolve_position,
ancestral_state_position,
self.aln1_2,
0,
tree=self.t1,
ancestral_seqs=self.aln1_2,
)
self.assertRaises(
ValueError,
coevolve_pair,
ancestral_state_pair,
self.aln1_2,
0,
1,
tree=self.t1,
ancestral_seqs=self.aln1_2,
)
# bad length
a = ArrayAlignment(
data={"root": "AC", "edge.0": "AD", "edge.1": "AA", "edge.2": "EE"}
)
self.assertRaises(
ValueError,
coevolve_alignment,
ancestral_state_alignment,
self.aln1_2,
tree=self.t1,
ancestral_seqs=a,
)
self.assertRaises(
ValueError,
coevolve_position,
ancestral_state_position,
self.aln1_2,
0,
tree=self.t1,
ancestral_seqs=a,
)
self.assertRaises(
ValueError,
coevolve_pair,
ancestral_state_pair,
self.aln1_2,
0,
1,
tree=self.t1,
ancestral_seqs=a,
)
def test_ancestral_states_methods_handle_bad_position_numbers(self):
"""coevolve_* w/ ancestral_states raise ValueError on bad position"""
self.assertRaises(
ValueError,
coevolve_position,
ancestral_state_position,
self.aln1_2,
42,
tree=self.t1,
ancestral_states=self.ancestral_states2_1,
)
self.assertRaises(
ValueError,
coevolve_pair,
ancestral_state_pair,
self.aln1_2,
0,
42,
tree=self.t1,
ancestral_states=self.ancestral_states2_1,
)
self.assertRaises(
ValueError,
coevolve_pair,
ancestral_state_pair,
self.aln1_2,
42,
0,
tree=self.t1,
ancestral_states=self.ancestral_states2_1,
)
def test_ancestral_state_alignment_non_bifurcating_tree(self):
"""ancestral_state_alignment: handles non-bifurcating tree correctly"""
assert_equal(
ancestral_state_alignment(self.aln2, self.t2, self.ancestral_states2_3),
[[9, 9], [9, 9]],
)
def test_ancestral_state_alignment_bifurcating_tree(self):
"""ancestral_state_alignment: handles bifurcating tree correctly"""
assert_allclose(
ancestral_state_alignment(self.aln1_5, self.t1, self.ancestral_states1),
[[5, 5, 5], [5, 11.6, 11.6], [5, 11.6, 11.6]],
)
def test_ancestral_state_alignment_ancestor_difference(self):
"""ancestral_state_alignment: different ancestor -> different result"""
# ancestral_states2_1
assert_equal(
ancestral_state_alignment(self.aln2, self.t2, self.ancestral_states2_1),
[[5, 2], [2, 2]],
)
# ancestral_states2_2
assert_equal(
ancestral_state_alignment(self.aln2, self.t2, self.ancestral_states2_2),
[[2, 2], [2, 5]],
)
# ancestral_states2_3
assert_equal(
ancestral_state_alignment(self.aln2, self.t2, self.ancestral_states2_3),
[[9, 9], [9, 9]],
| |
#! /usr/bin/env python
import os
import string
import remi.gui as gui
from remi_plus import TabView, append_with_label, OKDialog, OKCancelDialog,AdaptableDialog,FileSelectionDialog
from pprint import pprint
from pp_utils import calculate_relative_path
# !!! do not use self.container in dialog sub-classes
# *************************************
# EDIT SHOW AND TRACK CONTENT
# ************************************
class WebEditItem(AdaptableDialog):
def __init__(self, title, field_content, record_specs,field_specs,show_refs,initial_media_dir,
pp_home_dir,pp_profile_dir,initial_tab,callback):
self.callback=callback
self.frame_width=800 # frame for 2 columns
self.col_width=400 # width of a column
self.field_width= 200 # width of data field, label must fit in col_width-field_width- 15
self.fields_height=380
self.rows_in_col=18
self.tab_height=100
self.okcancel_height=100
super(WebEditItem, self).__init__(title,'',width=self.frame_width+700,height=self.fields_height+self.tab_height+self.okcancel_height,
confirm_name='OK',cancel_name='Cancel')
self.field_content = field_content # dictionary - the track parameters to be edited and returned
# key is field name e.g. omx-window
self.record_specs= record_specs # list of field names and seps/tabs in the order that they appear
self.field_specs=field_specs # dictionary of specs referenced by field name
self.show_refs=show_refs # used for droppdown of list of shows
self.show_refs.append('')
self.initial_media_dir=initial_media_dir
self.pp_home_dir=pp_home_dir
self.pp_profile_dir=pp_profile_dir
self.initial_tab=initial_tab
# create a Tabbed Editor
self.tabview= TabView(self.frame_width,self.fields_height,30)
# tabs are created dynamically from pp_definitions.py
# get fields for this record using the record type in the loaded record
record_fields=self.record_specs[self.field_content['type']]
# init results of building the form
self.tab_row=1 # row on form
self.field_objs=[] # list of field objects in record fields order, not for sep or tab
self.field_index=0 # index to self.field_objs incremented after each field except tab and sep
# can be used as an index to self.field_objs and self.button_objs
self.button_objs=[] # list of button objects in record field order , not for sep or tab =None for non-buttons
self.col_row=0
self.current_col=None
self.col_1=None
# populate the dialog box using the record fields to determine the order
for field in record_fields:
# get list of values where required
values=[]
if self.field_specs[field]['shape']in("option-menu",'spinbox'):
# print 'should be field name', field
# print 'should be shape',self.field_specs[field]['shape']
if field in ('sub-show','start-show'):
values=self.show_refs
else:
values=self.field_specs[field]['values']
else:
values=[]
# make the entry
obj,button=self.make_entry(field,self.field_specs[field],values)
if obj is not None:
self.field_objs.append(obj)
self.button_objs.append(button)
self.field_index +=1
#construct the tabview
self.tabview.construct_tabview()
# frame for tabbed editor
self.root_frame = gui.HBox(width=self.tabview.get_width() + 100, height=self.fields_height+self.tab_height) #1
self.root_frame.append(self.tabview,key='switch')
self.append_field(self.root_frame,'cont')
#adjust width of dialog box
self.style['width']=gui.to_pix(self.tabview.get_width() + 100)
return None
def show_tab(self,key):
self.tabview.show(key)
# create an entry in an editor panel
def make_entry(self,field,field_spec,values):
# print 'make entry',self.field_index,field,field_spec
if self.col_row >= self.rows_in_col:
self.current_col=self.col_1
# print 'make entry',self.col_row, self.current_col
if field_spec['shape']=='tab':
width=len(field_spec['text'])*8+4
self.current_tab = self.tabview.add_tab(width,field_spec['name'],field_spec['text'])
# print 'make tab', field_spec['name']
self.current_tab.set_layout_orientation(gui.Container.LAYOUT_HORIZONTAL)
self.col_0=gui.Container(width=self.frame_width/2) #0
self.col_0.set_layout_orientation(gui.Container.LAYOUT_VERTICAL)
self.col_1=gui.Container(width=self.frame_width/2) #0
self.col_1.set_layout_orientation(gui.Container.LAYOUT_VERTICAL)
self.current_tab.append(self.col_0,key='col_0')
self.current_tab.append(self.col_1,key='col_1')
self.current_col=self.col_0
self.col_row=1
# print '\nNEW TAB',self.col_row
self.tab_row=1
return None,None
elif field_spec['shape']=='sep':
self.current_col.append(gui.Label('',width=self.field_width,height=10))
self.tab_row+=1
self.col_row+=1
return None,None
else:
# print 'replace param in make entry',field
# print 'content', field, self.field_content[field]
# is it in the field content dictionary
if not field in self.field_content:
print("Value for field not found in opened file: " + field)
return None,None
else:
# bodge - assumes only type is read only
if field_spec['read-only']=='yes':
obj=(gui.Label(self.field_content[field],width=self.field_width,height=20))
obj.style['font-weight']='bold'
elif field_spec['shape']in ('entry','browse','font','colour'):
obj=gui.TextInput(single_line=True,width=self.field_width,height=20)
obj.set_value(self.field_content[field])
elif field_spec['shape']=='text':
obj=gui.TextInput(width=self.field_width,height=110,single_line=False)
obj.set_value(self.field_content[field])
# extra lines
self.col_row+=5
elif field_spec['shape']=='spinbox':
print('spinbox not implemented')
return None,None
elif field_spec['shape']=='option-menu':
obj=gui.DropDown(width=self.field_width,height=25)
for key, value in enumerate(values):
item=gui.DropDownItem(value,width=self.field_width,height=25)
obj.append(item, key=key)
content=self.field_content[field]
if self.field_content[field] not in values:
obj.style['color'] = 'red'
content=values[0]
obj.set_value(content)
# print self.field_content[field],obj.get_value(),values
else:
print("Uknown shape for: " + field)
return None,None
# create buttons where required
if field_spec['shape']=='browse':
button=self.browse_button(20,20,'','browse_button',self.field_index,field_spec['text'])
elif field_spec['shape']=='colour':
# print ('colour',self.field_content[field])
if ColourMap().exists(self.field_content[field]) is True:
colour=ColourMap().lookup(self.field_content[field])
else:
colour=self.field_content[field]
button= gui.ColorPicker(colour,width=20, height=20)
button.set_on_change_listener(self.color_picker_changed)
else:
button=None
append_with_label(self.current_col,field_spec['text'],obj,button,width=self.col_width)
self.tab_row+=1
self.col_row+=1
return obj,button
@gui.decorate_event
def confirm_dialog(self,emitter):
# OK button is pressed so update the field values in the profile
# self.field_content - dictionary - the track parameters to be edited and returned
# key is field name e.g. omx-window
# get list of fields in the record in the same order as the form was generated
record_fields=self.record_specs[self.field_content['type']]
field_index=0 # index to self.field_objs - not incremented for tab and sep
for field in record_fields:
# get the details of this field
field_spec=self.field_specs[field]
#print 'reading row',field_index,field_spec['shape']
# and get the value
if field_spec['shape']not in ('sep','tab'):
#bodge for type which is a label
if field_spec['read-only']=='yes':
self.field_content[field]=self.field_objs[field_index].get_text().strip()
# get result of button from button and also put it in field
elif field_spec['shape'] in ('colour'):
# button is actually a color picker
self.field_content[field]=self.button_objs[field_index].get_value().strip()
self.field_objs[field_index].set_value(self.field_content[field])
else:
self.field_content[field]=str(self.field_objs[field_index].get_value()).strip()
# print field_spec['shape'],field_spec['text']+': ',self.field_content[field]
# print field_spec['shape'],field_spec['text']+': ',self.field_content[field]
field_index +=1
# print 'edit item hide'
self.hide()
self.callback()
# browse button
def browse_button(self,w,h,label,base_name,field_index,title):
# create a button that returns the key to on_click_listener
bname=base_name+str(field_index)
but=gui.Button(label,width=w,height=h)
# f = lambda _bname=field_index: self.open_fileselection_dialog( _bname)
# fname='self.'+base_name+'_' + str(field_index)
# setattr(self, fname, f)
but.set_on_click_listener(self.open_fileselection_dialog,field_index,title)
return but
def open_fileselection_dialog(self,widget,browse_field_index,title):
self.browse_field_index=browse_field_index
self.file_dialog=FileSelectionDialog(title,'Select File',False, self.initial_media_dir,callback=self.on_fileselection_dialog_callback)
self.file_dialog.show(self._base_app_instance)
def on_fileselection_dialog_callback(self,flist):
# a list() of filenames and folders is returned
if len(flist)==0:
OKDialog('Select File','nothing selected').show(self._base_app_instance)
return
file_path=os.path.normpath(flist[0])
# print "file path ", file_path
result=calculate_relative_path(file_path,self.pp_home_dir,self.pp_profile_dir)
self.field_objs[self.browse_field_index].set_value(result)
# colour picker
def color_picker_changed(self,widget,result):
self.update_colour_fields()
# print ('colour picked',result)
def update_colour_fields(self):
# copy result of colour picker into the text field
record_fields=self.record_specs[self.field_content['type']]
field_index=0 # index to self.field_objs - not incremented for tab and sep
for field in record_fields:
field_spec=self.field_specs[field]
if field_spec['shape']not in ('sep','tab'):
if field_spec['shape'] in ('colour'):
self.field_objs[field_index].set_value(self.button_objs[field_index].get_value())
field_index +=1
# ****************************************************
# convert colour string to hex rgb string
# ****************************************************
class ColourMap(object):
def init(self):
ColourMap.colour_map=dict(snow='#fffafa',ghostwhite='#f8f8ff',whitesmoke='#f5f5f5',gainsboro='#dcdcdc',
floralwhite='#fffaf0',oldlace='#fdf5e6',linen='#faf0e6',antiquewhite='#faebd7',
papayawhip='#ffefd5',blanchedalmond='#ffebcd',bisque='#ffe4c4',peachpuff='#ffdab9',
navajowhite='#ffdead',moccasin='#ffe4b5',cornsilk='#fff8dc',ivory='#fffff0',
lemonchiffon='#fffacd',seashell='#fff5ee',honeydew='#f0fff0',mintcream='#f5fffa',
azure='#f0ffff',aliceblue='#f0f8ff',lavender='#e6e6fa',lavenderblush='#fff0f5',
mistyrose='#ffe4e1',white='#ffffff',black='#000000',darkslategray='#2f4f4f',
dimgray='#696969',slategray='#708090',lightslategray='#778899',gray='#bebebe',
lightgray='#d3d3d3',midnightblue='#191970',navy='#000080',navyblue='#000080',
cornflowerblue='#6495ed',darkslateblue='#483d8b',slateblue='#6a5acd',mediumslateblue='#7b68ee',
lightslateblue='#8470ff',mediumblue='#0000cd',royalblue='#4169e1',blue='#0000ff',
dodgerblue='#1e90ff',deepskyblue='#00bfff',skyblue='#87ceeb',lightskyblue='#87cefa',
steelblue='#4682b4',lightsteelblue='#b0c4de',lightblue='#add8e6',powderblue='#b0e0e6',
paleturquoise='#afeeee',darkturquoise='#00ced1',mediumturquoise='#48d1cc',turquoise='#40e0d0',
cyan='#00ffff',lightcyan='#e0ffff',cadetblue='#5f9ea0',mediumaquamarine='#66cdaa')
dict1=dict(aquamarine='#7fffd4',darkgreen='#006400',darkolivegreen='#556b2f',darkseagreen='#8fbc8f',
seagreen='#2e8b57',mediumseagreen='#3cb371',lightseagreen='#20b2aa',palegreen='#98fb98',
springgreen='#00ff7f',lawngreen='#7cfc00',green='#00ff00',chartreuse='#7fff00',
mediumspringgreen='#00fa9a',greenyellow='#adff2f',limegreen='#32cd32',yellowgreen='#9acd32',
forestgreen='#228b22',olivedrab='#6b8e23',darkkhaki='#bdb76b',khaki='#f0e68c',
palegoldenrod='#eee8aa',lightgoldenrodyellow='#fafad2',lightyellow='#ffffe0',yellow='#ffff00',
gold='#ffd700',lightgoldenrod='#eedd82',goldenrod='#daa520',darkgoldenrod='#b8860b',
rosybrown='#bc8f8f',indianred='#cd5c5c',saddlebrown='#8b4513',sienna='#a0522d',
peru='#cd853f',burlywood='#deb887',beige='#f5f5dc',wheat='#f5deb3',
sandybrown='#f4a460',tan='#d2b48c',chocolate='#d2691e',firebrick='#b22222',
brown='#a52a2a',darksalmon='#e9967a',salmon='#fa8072',lightsalmon='#ffa07a',
orange='#ffa500',darkorange='#ff8c00',coral='#ff7f50',lightcoral='#f08080',
tomato='#ff6347',orangered='#ff4500',red='#ff0000',hotpink='#ff69b4',
deeppink='#ff1493',pink='#ffc0cb',lightpink='#ffb6c1',palevioletred='#db7093',
maroon='#b03060',mediumvioletred='#c71585',violetred='#d02090',magenta='#ff00ff',
violet='#ee82ee',plum='#dda0dd',orchid='#da70d6',mediumorchid='#ba55d3',
darkorchid='#9932cc',darkviolet='#9400d3',blueviolet='#8a2be2',purple='#a020f0',
mediumpurple='#9370db',thistle='#d8bfd8',snow1='#fffafa',snow2='#eee9e9')
ColourMap.colour_map.update(dict1)
dict2=dict(snow3='#cdc9c9',snow4='#8b8989',seashell1='#fff5ee',seashell2='#eee5de',
seashell3='#cdc5bf',seashell4='#8b8682',antiquewhite1='#ffefdb',antiquewhite2='#eedfcc',
antiquewhite3='#cdc0b0',antiquewhite4='#8b8378',bisque1='#ffe4c4',bisque2='#eed5b7',
bisque3='#cdb79e',bisque4='#8b7d6b',peachpuff1='#ffdab9',peachpuff2='#eecbad',
peachpuff3='#cdaf95',peachpuff4='#8b7765',navajowhite1='#ffdead',navajowhite2='#eecfa1',
navajowhite3='#cdb38b',navajowhite4='#8b795e',lemonchiffon1='#fffacd',lemonchiffon2='#eee9bf',
lemonchiffon3='#cdc9a5',lemonchiffon4='#8b8970',cornsilk1='#fff8dc',cornsilk2='#eee8cd',
cornsilk3='#cdc8b1',cornsilk4='#8b8878',ivory1='#fffff0',ivory2='#eeeee0',
ivory3='#cdcdc1',ivory4='#8b8b83',honeydew1='#f0fff0',honeydew2='#e0eee0',
honeydew3='#c1cdc1',honeydew4='#838b83',lavenderblush1='#fff0f5',lavenderblush2='#eee0e5',
lavenderblush3='#cdc1c5',lavenderblush4='#8b8386',mistyrose1='#ffe4e1',mistyrose2='#eed5d2',
mistyrose3='#cdb7b5',mistyrose4='#8b7d7b',azure1='#f0ffff',azure2='#e0eeee',
azure3='#c1cdcd',azure4='#838b8b',slateblue1='#836fff',slateblue2='#7a67ee',
slateblue3='#6959cd',slateblue4='#473c8b',royalblue1='#4876ff',royalblue2='#436eee',
royalblue3='#3a5fcd',royalblue4='#27408b',blue1='#0000ff',blue2='#0000ee',
blue3='#0000cd',blue4='#00008b',dodgerblue1='#1e90ff',dodgerblue2='#1c86ee',
dodgerblue3='#1874cd',dodgerblue4='#104e8b',steelblue1='#63b8ff',steelblue2='#5cacee',
steelblue3='#4f94cd',steelblue4='#36648b',deepskyblue1='#00bfff',deepskyblue2='#00b2ee')
ColourMap.colour_map.update(dict2)
dict3 = dict (deepskyblue3='#009acd',deepskyblue4='#00688b',skyblue1='#87ceff',skyblue2='#7ec0ee',
skyblue3='#6ca6cd',skyblue4='#4a708b',lightskyblue1='#b0e2ff',lightskyblue2='#a4d3ee',
lightskyblue3='#8db6cd',lightskyblue4='#607b8b',slategray1='#c6e2ff',slategray2='#b9d3ee',
slategray3='#9fb6cd',slategray4='#6c7b8b',lightsteelblue1='#cae1ff',lightsteelblue2='#bcd2ee',
lightsteelblue3='#a2b5cd',lightsteelblue4='#6e7b8b',lightblue1='#bfefff',lightblue2='#b2dfee',
lightblue3='#9ac0cd',lightblue4='#68838b',lightcyan1='#e0ffff',lightcyan2='#d1eeee',
lightcyan3='#b4cdcd',lightcyan4='#7a8b8b',paleturquoise1='#bbffff',paleturquoise2='#aeeeee',
paleturquoise3='#96cdcd',paleturquoise4='#668b8b',cadetblue1='#98f5ff',cadetblue2='#8ee5ee',
cadetblue3='#7ac5cd',cadetblue4='#53868b',turquoise1='#00f5ff',turquoise2='#00e5ee',
turquoise3='#00c5cd',turquoise4='#00868b',cyan1='#00ffff',cyan2='#00eeee',
cyan3='#00cdcd',cyan4='#008b8b',darkslategray1='#97ffff',darkslategray2='#8deeee',
darkslategray3='#79cdcd',darkslategray4='#528b8b',aquamarine1='#7fffd4',aquamarine2='#76eec6',
aquamarine3='#66cdaa',aquamarine4='#458b74',darkseagreen1='#c1ffc1',darkseagreen2='#b4eeb4',
darkseagreen3='#9bcd9b',darkseagreen4='#698b69',seagreen1='#54ff9f',seagreen2='#4eee94',
seagreen3='#43cd80',seagreen4='#2e8b57',palegreen1='#9aff9a',palegreen2='#90ee90',
palegreen3='#7ccd7c',palegreen4='#548b54',springgreen1='#00ff7f',springgreen2='#00ee76',
springgreen3='#00cd66',springgreen4='#008b45',green1='#00ff00',green2='#00ee00')
ColourMap.colour_map.update(dict3)
dict3a=dict(green3='#00cd00',green4='#008b00',chartreuse1='#7fff00',chartreuse2='#76ee00',
chartreuse3='#66cd00',chartreuse4='#458b00',olivedrab1='#c0ff3e',olivedrab2='#b3ee3a',
olivedrab3='#9acd32',olivedrab4='#698b22',darkolivegreen1='#caff70',darkolivegreen2='#bcee68',
darkolivegreen3='#a2cd5a',darkolivegreen4='#6e8b3d',khaki1='#fff68f',khaki2='#eee685',
khaki3='#cdc673',khaki4='#8b864e',lightgoldenrod1='#ffec8b',lightgoldenrod2='#eedc82',
lightgoldenrod3='#cdbe70',lightgoldenrod4='#8b814c',lightyellow1='#ffffe0',lightyellow2='#eeeed1',
lightyellow3='#cdcdb4',lightyellow4='#8b8b7a',yellow1='#ffff00',yellow2='#eeee00',
yellow3='#cdcd00',yellow4='#8b8b00',gold1='#ffd700',gold2='#eec900',
gold3='#cdad00',gold4='#8b7500',goldenrod1='#ffc125',goldenrod2='#eeb422',
goldenrod3='#cd9b1d',goldenrod4='#8b6914',darkgoldenrod1='#ffb90f',darkgoldenrod2='#eead0e',
darkgoldenrod3='#cd950c',darkgoldenrod4='#8b6508',rosybrown1='#ffc1c1',rosybrown2='#eeb4b4',
rosybrown3='#cd9b9b',rosybrown4='#8b6969',indianred1='#ff6a6a',indianred2='#ee6363',
indianred3='#cd5555',indianred4='#8b3a3a',sienna1='#ff8247',sienna2='#ee7942',
sienna3='#cd6839',sienna4='#8b4726',burlywood1='#ffd39b',burlywood2='#eec591',
burlywood3='#cdaa7d',burlywood4='#8b7355',wheat1='#ffe7ba',wheat2='#eed8ae')
ColourMap.colour_map.update(dict3a)
dict4=dict(wheat3='#cdba96',wheat4='#8b7e66',tan1='#ffa54f',tan2='#ee9a49',
tan3='#cd853f',tan4='#8b5a2b',chocolate1='#ff7f24',chocolate2='#ee7621',
chocolate3='#cd661d',chocolate4='#8b4513',firebrick1='#ff3030',firebrick2='#ee2c2c',
firebrick3='#cd2626',firebrick4='#8b1a1a',brown1='#ff4040',brown2='#ee3b3b',
brown3='#cd3333',brown4='#8b2323',salmon1='#ff8c69',salmon2='#ee8262',
salmon3='#cd7054',salmon4='#8b4c39',lightsalmon1='#ffa07a',lightsalmon2='#ee9572',
lightsalmon3='#cd8162',lightsalmon4='#8b5742',orange1='#ffa500',orange2='#ee9a00',
orange3='#cd8500',orange4='#8b5a00',darkorange1='#ff7f00',darkorange2='#ee7600',
darkorange3='#cd6600',darkorange4='#8b4500',coral1='#ff7256',coral2='#ee6a50',
coral3='#cd5b45',coral4='#8b3e2f',tomato1='#ff6347',tomato2='#ee5c42',
tomato3='#cd4f39',tomato4='#8b3626',orangered1='#ff4500',orangered2='#ee4000',
orangered3='#cd3700',orangered4='#8b2500',red1='#ff0000',red2='#ee0000',
red3='#cd0000',red4='#8b0000',deeppink1='#ff1493',deeppink2='#ee1289',
deeppink3='#cd1076',deeppink4='#8b0a50',hotpink1='#ff6eb4',hotpink2='#ee6aa7',
hotpink3='#cd6090',hotpink4='#8b3a62',pink1='#ffb5c5',pink2='#eea9b8')
ColourMap.colour_map.update(dict4)
dict5=dict(pink3='#cd919e',pink4='#8b636c',lightpink1='#ffaeb9',lightpink2='#eea2ad',
lightpink3='#cd8c95',lightpink4='#8b5f65',palevioletred1='#ff82ab',palevioletred2='#ee799f',
palevioletred3='#cd6889',palevioletred4='#8b475d',maroon1='#ff34b3',maroon2='#ee30a7',
maroon3='#cd2990',maroon4='#8b1c62',violetred1='#ff3e96',violetred2='#ee3a8c',
violetred3='#cd3278',violetred4='#8b2252',magenta1='#ff00ff',magenta2='#ee00ee',
magenta3='#cd00cd',magenta4='#8b008b',orchid1='#ff83fa',orchid2='#ee7ae9',
orchid3='#cd69c9',orchid4='#8b4789',plum1='#ffbbff',plum2='#eeaeee',
plum3='#cd96cd',plum4='#8b668b',mediumorchid1='#e066ff',mediumorchid2='#d15fee',
mediumorchid3='#b452cd',mediumorchid4='#7a378b',darkorchid1='#bf3eff',darkorchid2='#b23aee',
darkorchid3='#9a32cd',darkorchid4='#68228b',purple1='#9b30ff',purple2='#912cee',
purple3='#7d26cd',purple4='#551a8b',mediumpurple1='#ab82ff',mediumpurple2='#9f79ee',
mediumpurple3='#8968cd',mediumpurple4='#5d478b',thistle1='#ffe1ff',thistle2='#eed2ee',
thistle3='#cdb5cd',thistle4='#8b7b8b',gray0='#000000',gray1='#030303')
ColourMap.colour_map.update(dict5)
dict6=dict(gray2='#050505',gray3='#080808',gray4='#0a0a0a',gray5='#0d0d0d',
gray6='#0f0f0f',gray7='#121212',gray8='#141414',gray9='#171717',
gray10='#1a1a1a',gray11='#1c1c1c',gray12='#1f1f1f',gray13='#212121',
gray14='#242424',gray15='#262626',gray16='#292929',gray17='#2b2b2b',
gray18='#2e2e2e',gray19='#303030',gray20='#333333',gray21='#363636',
gray22='#383838',gray23='#3b3b3b',gray24='#3d3d3d',gray25='#404040',
gray26='#424242',gray27='#454545',gray28='#474747',gray29='#4a4a4a',
gray30='#4d4d4d',gray31='#4f4f4f',gray32='#525252',gray33='#545454',
gray34='#575757',gray35='#595959',gray36='#5c5c5c',gray37='#5e5e5e',
gray38='#616161',gray39='#636363',gray40='#666666',gray41='#696969',
gray42='#6b6b6b',gray43='#6e6e6e',gray44='#707070',gray45='#737373',
gray46='#757575',gray47='#787878',gray48='#7a7a7a',gray49='#7d7d7d',
gray50='#7f7f7f',gray51='#828282',gray52='#858585',gray53='#878787',
gray54='#8a8a8a',gray55='#8c8c8c',gray56='#8f8f8f',gray57='#919191',
gray58='#949494',gray59='#969696',gray60='#999999',gray61='#9c9c9c',
gray62='#9e9e9e',gray63='#a1a1a1',gray64='#a3a3a3',gray65='#a6a6a6',
gray66='#a8a8a8',gray67='#ababab',gray68='#adadad',gray69='#b0b0b0',
gray70='#b3b3b3',gray71='#b5b5b5',gray72='#b8b8b8',gray73='#bababa',
gray74='#bdbdbd',gray75='#bfbfbf',gray76='#c2c2c2',gray77='#c4c4c4',
gray78='#c7c7c7',gray79='#c9c9c9',gray80='#cccccc',gray81='#cfcfcf',
gray82='#d1d1d1',gray83='#d4d4d4',gray84='#d6d6d6',gray85='#d9d9d9',
gray86='#dbdbdb',gray87='#dedede',gray88='#e0e0e0',gray89='#e3e3e3',
gray90='#e5e5e5',gray91='#e8e8e8',gray92='#ebebeb',gray93='#ededed',
gray94='#f0f0f0',gray95='#f2f2f2',gray96='#f5f5f5',gray97='#f7f7f7',
gray98='#fafafa',gray99='#fcfcfc',gray100='#ffffff',darkgray='#a9a9a9',
darkblue='#00008b',darkcyan='#008b8b',darkmagenta='#8b008b',darkred='#8b0000',
lightgreen='#90ee90' )
ColourMap.colour_map.update(dict6)
def lookup ( self, colour_name ):
return ColourMap.colour_map[colour_name.lower()]
def exists(self,colour_name):
return colour_name in ColourMap.colour_map
# ****************************************************
# one off conversion of COLOURS to dictionary source - used offline
# dictionary needs splitting into chunks before use.
# ****************************************************
COLOURS = [
'\xFF\xFA\xFAsnow', '\xF8\xF8\xFFGhostWhite',
'\xF5\xF5\xF5WhiteSmoke', '\xDC\xDC\xDCgainsboro',
'\xFF\xFA\xF0FloralWhite', '\xFD\xF5\xE6OldLace',
'\xFA\xF0\xE6linen', '\xFA\xEB\xD7AntiqueWhite',
'\xFF\xEF\xD5PapayaWhip', '\xFF\xEB\xCDBlanchedAlmond',
'\xFF\xE4\xC4bisque', '\xFF\xDA\xB9PeachPuff',
'\xFF\xDE\xADNavajoWhite', '\xFF\xE4\xB5moccasin',
'\xFF\xF8\xDCcornsilk', '\xFF\xFF\xF0ivory',
'\xFF\xFA\xCDLemonChiffon', '\xFF\xF5\xEEseashell',
'\xF0\xFF\xF0honeydew', '\xF5\xFF\xFAMintCream',
'\xF0\xFF\xFFazure', '\xF0\xF8\xFFAliceBlue',
'\xE6\xE6\xFAlavender', '\xFF\xF0\xF5LavenderBlush',
'\xFF\xE4\xE1MistyRose', '\xFF\xFF\xFFwhite',
'\x00\x00\x00black', '\x2F\x4F\x4FDarkSlateGray',
'\x69\x69\x69DimGray', '\x70\x80\x90SlateGray',
'\x77\x88\x99LightSlateGray', '\xBE\xBE\xBEgray',
'\xD3\xD3\xD3LightGray', '\x19\x19\x70MidnightBlue',
'\x00\x00\x80navy', '\x00\x00\x80NavyBlue',
'\x64\x95\xEDCornflowerBlue', '\x48\x3D\x8BDarkSlateBlue',
'\x6A\x5A\xCDSlateBlue', '\x7B\x68\xEEMediumSlateBlue',
'\x84\x70\xFFLightSlateBlue', '\x00\x00\xCDMediumBlue',
'\x41\x69\xE1RoyalBlue', '\x00\x00\xFFblue',
'\x1E\x90\xFFDodgerBlue', '\x00\xBF\xFFDeepSkyBlue',
'\x87\xCE\xEBSkyBlue', '\x87\xCE\xFALightSkyBlue',
'\x46\x82\xB4SteelBlue', '\xB0\xC4\xDELightSteelBlue',
'\xAD\xD8\xE6LightBlue', '\xB0\xE0\xE6PowderBlue',
'\xAF\xEE\xEEPaleTurquoise', '\x00\xCE\xD1DarkTurquoise',
'\x48\xD1\xCCMediumTurquoise', '\x40\xE0\xD0turquoise',
'\x00\xFF\xFFcyan', '\xE0\xFF\xFFLightCyan',
'\x5F\x9E\xA0CadetBlue', '\x66\xCD\xAAMediumAquamarine',
'\x7F\xFF\xD4aquamarine', '\x00\x64\x00DarkGreen',
'\x55\x6B\x2FDarkOliveGreen', '\x8F\xBC\x8FDarkSeaGreen',
'\x2E\x8B\x57SeaGreen', '\x3C\xB3\x71MediumSeaGreen',
'\x20\xB2\xAALightSeaGreen', '\x98\xFB\x98PaleGreen',
'\x00\xFF\x7FSpringGreen', '\x7C\xFC\x00LawnGreen',
'\x00\xFF\x00green', '\x7F\xFF\x00chartreuse',
'\x00\xFA\x9AMediumSpringGreen', '\xAD\xFF\x2FGreenYellow',
'\x32\xCD\x32LimeGreen', '\x9A\xCD\x32YellowGreen',
'\x22\x8B\x22ForestGreen', '\x6B\x8E\x23OliveDrab',
'\xBD\xB7\x6BDarkKhaki', '\xF0\xE6\x8Ckhaki',
'\xEE\xE8\xAAPaleGoldenrod',
'\xFA\xFA\xD2LightGoldenrodYellow',
'\xFF\xFF\xE0LightYellow', '\xFF\xFF\x00yellow',
'\xFF\xD7\x00gold', '\xEE\xDD\x82LightGoldenrod',
'\xDA\xA5\x20goldenrod', '\xB8\x86\x0BDarkGoldenrod',
'\xBC\x8F\x8FRosyBrown', '\xCD\x5C\x5CIndianRed',
'\x8B\x45\x13SaddleBrown', '\xA0\x52\x2Dsienna',
'\xCD\x85\x3Fperu', '\xDE\xB8\x87burlywood',
'\xF5\xF5\xDCbeige', '\xF5\xDE\xB3wheat',
'\xF4\xA4\x60SandyBrown', '\xD2\xB4\x8Ctan',
'\xD2\x69\x1Echocolate', '\xB2\x22\x22firebrick',
'\xA5\x2A\x2Abrown', '\xE9\x96\x7ADarkSalmon',
'\xFA\x80\x72salmon', '\xFF\xA0\x7ALightSalmon',
'\xFF\xA5\x00orange', '\xFF\x8C\x00DarkOrange',
'\xFF\x7F\x50coral', '\xF0\x80\x80LightCoral',
'\xFF\x63\x47tomato', '\xFF\x45\x00OrangeRed',
'\xFF\x00\x00red', '\xFF\x69\xB4HotPink',
'\xFF\x14\x93DeepPink', '\xFF\xC0\xCBpink',
'\xFF\xB6\xC1LightPink', '\xDB\x70\x93PaleVioletRed',
'\xB0\x30\x60maroon', '\xC7\x15\x85MediumVioletRed',
'\xD0\x20\x90VioletRed', '\xFF\x00\xFFmagenta',
'\xEE\x82\xEEviolet', '\xDD\xA0\xDDplum',
'\xDA\x70\xD6orchid', '\xBA\x55\xD3MediumOrchid',
'\x99\x32\xCCDarkOrchid', '\x94\x00\xD3DarkViolet',
'\x8A\x2B\xE2BlueViolet', '\xA0\x20\xF0purple',
'\x93\x70\xDBMediumPurple', '\xD8\xBF\xD8thistle',
'\xFF\xFA\xFAsnow1', '\xEE\xE9\xE9snow2',
'\xCD\xC9\xC9snow3', '\x8B\x89\x89snow4',
'\xFF\xF5\xEEseashell1', '\xEE\xE5\xDEseashell2',
'\xCD\xC5\xBFseashell3', '\x8B\x86\x82seashell4',
'\xFF\xEF\xDBAntiqueWhite1', '\xEE\xDF\xCCAntiqueWhite2',
'\xCD\xC0\xB0AntiqueWhite3', '\x8B\x83\x78AntiqueWhite4',
'\xFF\xE4\xC4bisque1', '\xEE\xD5\xB7bisque2',
'\xCD\xB7\x9Ebisque3', '\x8B\x7D\x6Bbisque4',
'\xFF\xDA\xB9PeachPuff1', '\xEE\xCB\xADPeachPuff2',
'\xCD\xAF\x95PeachPuff3', '\x8B\x77\x65PeachPuff4',
'\xFF\xDE\xADNavajoWhite1', '\xEE\xCF\xA1NavajoWhite2',
'\xCD\xB3\x8BNavajoWhite3', '\x8B\x79\x5ENavajoWhite4',
'\xFF\xFA\xCDLemonChiffon1', '\xEE\xE9\xBFLemonChiffon2',
'\xCD\xC9\xA5LemonChiffon3', '\x8B\x89\x70LemonChiffon4',
'\xFF\xF8\xDCcornsilk1', '\xEE\xE8\xCDcornsilk2',
'\xCD\xC8\xB1cornsilk3', '\x8B\x88\x78cornsilk4',
'\xFF\xFF\xF0ivory1', '\xEE\xEE\xE0ivory2',
'\xCD\xCD\xC1ivory3', '\x8B\x8B\x83ivory4',
'\xF0\xFF\xF0honeydew1', '\xE0\xEE\xE0honeydew2',
'\xC1\xCD\xC1honeydew3', '\x83\x8B\x83honeydew4',
'\xFF\xF0\xF5LavenderBlush1', '\xEE\xE0\xE5LavenderBlush2',
'\xCD\xC1\xC5LavenderBlush3', '\x8B\x83\x86LavenderBlush4',
'\xFF\xE4\xE1MistyRose1', '\xEE\xD5\xD2MistyRose2',
'\xCD\xB7\xB5MistyRose3', '\x8B\x7D\x7BMistyRose4',
'\xF0\xFF\xFFazure1', '\xE0\xEE\xEEazure2',
'\xC1\xCD\xCDazure3', '\x83\x8B\x8Bazure4',
'\x83\x6F\xFFSlateBlue1', '\x7A\x67\xEESlateBlue2',
'\x69\x59\xCDSlateBlue3', '\x47\x3C\x8BSlateBlue4',
'\x48\x76\xFFRoyalBlue1', '\x43\x6E\xEERoyalBlue2',
'\x3A\x5F\xCDRoyalBlue3', '\x27\x40\x8BRoyalBlue4',
'\x00\x00\xFFblue1', '\x00\x00\xEEblue2',
'\x00\x00\xCDblue3', '\x00\x00\x8Bblue4',
'\x1E\x90\xFFDodgerBlue1', '\x1C\x86\xEEDodgerBlue2',
'\x18\x74\xCDDodgerBlue3', '\x10\x4E\x8BDodgerBlue4',
'\x63\xB8\xFFSteelBlue1', '\x5C\xAC\xEESteelBlue2',
'\x4F\x94\xCDSteelBlue3', '\x36\x64\x8BSteelBlue4',
'\x00\xBF\xFFDeepSkyBlue1', '\x00\xB2\xEEDeepSkyBlue2',
'\x00\x9A\xCDDeepSkyBlue3', '\x00\x68\x8BDeepSkyBlue4',
'\x87\xCE\xFFSkyBlue1', '\x7E\xC0\xEESkyBlue2',
'\x6C\xA6\xCDSkyBlue3', '\x4A\x70\x8BSkyBlue4',
'\xB0\xE2\xFFLightSkyBlue1', '\xA4\xD3\xEELightSkyBlue2',
'\x8D\xB6\xCDLightSkyBlue3', '\x60\x7B\x8BLightSkyBlue4',
'\xC6\xE2\xFFSlateGray1', '\xB9\xD3\xEESlateGray2',
'\x9F\xB6\xCDSlateGray3', '\x6C\x7B\x8BSlateGray4',
'\xCA\xE1\xFFLightSteelBlue1', '\xBC\xD2\xEELightSteelBlue2',
'\xA2\xB5\xCDLightSteelBlue3', '\x6E\x7B\x8BLightSteelBlue4',
'\xBF\xEF\xFFLightBlue1', '\xB2\xDF\xEELightBlue2',
'\x9A\xC0\xCDLightBlue3', '\x68\x83\x8BLightBlue4',
'\xE0\xFF\xFFLightCyan1', '\xD1\xEE\xEELightCyan2',
'\xB4\xCD\xCDLightCyan3', '\x7A\x8B\x8BLightCyan4',
'\xBB\xFF\xFFPaleTurquoise1', '\xAE\xEE\xEEPaleTurquoise2',
'\x96\xCD\xCDPaleTurquoise3', '\x66\x8B\x8BPaleTurquoise4',
'\x98\xF5\xFFCadetBlue1', '\x8E\xE5\xEECadetBlue2',
'\x7A\xC5\xCDCadetBlue3', '\x53\x86\x8BCadetBlue4',
'\x00\xF5\xFFturquoise1', '\x00\xE5\xEEturquoise2',
'\x00\xC5\xCDturquoise3', '\x00\x86\x8Bturquoise4',
'\x00\xFF\xFFcyan1', '\x00\xEE\xEEcyan2',
'\x00\xCD\xCDcyan3', '\x00\x8B\x8Bcyan4',
'\x97\xFF\xFFDarkSlateGray1', '\x8D\xEE\xEEDarkSlateGray2',
'\x79\xCD\xCDDarkSlateGray3', '\x52\x8B\x8BDarkSlateGray4',
'\x7F\xFF\xD4aquamarine1', '\x76\xEE\xC6aquamarine2',
'\x66\xCD\xAAaquamarine3', '\x45\x8B\x74aquamarine4',
| |
# Generated from Documents\THESE\pycropml_pheno\src\pycropml\antlr_grammarV4\java\java8\Java8Parser.g4 by ANTLR 4.8
from antlr4 import *
if __name__ is not None and "." in __name__:
from .Java8Parser import Java8Parser
else:
from Java8Parser import Java8Parser
# This class defines a complete generic visitor for a parse tree produced by Java8Parser.
class Java8ParserVisitor(ParseTreeVisitor):
# Visit a parse tree produced by Java8Parser#literal.
def visitLiteral(self, ctx:Java8Parser.LiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#primitiveType.
def visitPrimitiveType(self, ctx:Java8Parser.PrimitiveTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#numericType.
def visitNumericType(self, ctx:Java8Parser.NumericTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#integralType.
def visitIntegralType(self, ctx:Java8Parser.IntegralTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#floatingPointType.
def visitFloatingPointType(self, ctx:Java8Parser.FloatingPointTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#referenceType.
def visitReferenceType(self, ctx:Java8Parser.ReferenceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classOrInterfaceType.
def visitClassOrInterfaceType(self, ctx:Java8Parser.ClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classType.
def visitClassType(self, ctx:Java8Parser.ClassTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classType_lf_classOrInterfaceType.
def visitClassType_lf_classOrInterfaceType(self, ctx:Java8Parser.ClassType_lf_classOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classType_lfno_classOrInterfaceType.
def visitClassType_lfno_classOrInterfaceType(self, ctx:Java8Parser.ClassType_lfno_classOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#interfaceType.
def visitInterfaceType(self, ctx:Java8Parser.InterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#interfaceType_lf_classOrInterfaceType.
def visitInterfaceType_lf_classOrInterfaceType(self, ctx:Java8Parser.InterfaceType_lf_classOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#interfaceType_lfno_classOrInterfaceType.
def visitInterfaceType_lfno_classOrInterfaceType(self, ctx:Java8Parser.InterfaceType_lfno_classOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeVariable.
def visitTypeVariable(self, ctx:Java8Parser.TypeVariableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#arrayType.
def visitArrayType(self, ctx:Java8Parser.ArrayTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#dims.
def visitDims(self, ctx:Java8Parser.DimsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeParameter.
def visitTypeParameter(self, ctx:Java8Parser.TypeParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeParameterModifier.
def visitTypeParameterModifier(self, ctx:Java8Parser.TypeParameterModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeBound.
def visitTypeBound(self, ctx:Java8Parser.TypeBoundContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#additionalBound.
def visitAdditionalBound(self, ctx:Java8Parser.AdditionalBoundContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeArguments.
def visitTypeArguments(self, ctx:Java8Parser.TypeArgumentsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeArgumentList.
def visitTypeArgumentList(self, ctx:Java8Parser.TypeArgumentListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeArgument.
def visitTypeArgument(self, ctx:Java8Parser.TypeArgumentContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#wildcard.
def visitWildcard(self, ctx:Java8Parser.WildcardContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#wildcardBounds.
def visitWildcardBounds(self, ctx:Java8Parser.WildcardBoundsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#packageName.
def visitPackageName(self, ctx:Java8Parser.PackageNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeName.
def visitTypeName(self, ctx:Java8Parser.TypeNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#packageOrTypeName.
def visitPackageOrTypeName(self, ctx:Java8Parser.PackageOrTypeNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#expressionName.
def visitExpressionName(self, ctx:Java8Parser.ExpressionNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodName.
def visitMethodName(self, ctx:Java8Parser.MethodNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#ambiguousName.
def visitAmbiguousName(self, ctx:Java8Parser.AmbiguousNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#compilationUnit.
def visitCompilationUnit(self, ctx:Java8Parser.CompilationUnitContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#packageDeclaration.
def visitPackageDeclaration(self, ctx:Java8Parser.PackageDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#packageModifier.
def visitPackageModifier(self, ctx:Java8Parser.PackageModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#importDeclaration.
def visitImportDeclaration(self, ctx:Java8Parser.ImportDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#singleTypeImportDeclaration.
def visitSingleTypeImportDeclaration(self, ctx:Java8Parser.SingleTypeImportDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeImportOnDemandDeclaration.
def visitTypeImportOnDemandDeclaration(self, ctx:Java8Parser.TypeImportOnDemandDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#singleStaticImportDeclaration.
def visitSingleStaticImportDeclaration(self, ctx:Java8Parser.SingleStaticImportDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#staticImportOnDemandDeclaration.
def visitStaticImportOnDemandDeclaration(self, ctx:Java8Parser.StaticImportOnDemandDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeDeclaration.
def visitTypeDeclaration(self, ctx:Java8Parser.TypeDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classDeclaration.
def visitClassDeclaration(self, ctx:Java8Parser.ClassDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#normalClassDeclaration.
def visitNormalClassDeclaration(self, ctx:Java8Parser.NormalClassDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classModifier.
def visitClassModifier(self, ctx:Java8Parser.ClassModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeParameters.
def visitTypeParameters(self, ctx:Java8Parser.TypeParametersContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeParameterList.
def visitTypeParameterList(self, ctx:Java8Parser.TypeParameterListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#superclass.
def visitSuperclass(self, ctx:Java8Parser.SuperclassContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#superinterfaces.
def visitSuperinterfaces(self, ctx:Java8Parser.SuperinterfacesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#interfaceTypeList.
def visitInterfaceTypeList(self, ctx:Java8Parser.InterfaceTypeListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classBody.
def visitClassBody(self, ctx:Java8Parser.ClassBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classBodyDeclaration.
def visitClassBodyDeclaration(self, ctx:Java8Parser.ClassBodyDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classMemberDeclaration.
def visitClassMemberDeclaration(self, ctx:Java8Parser.ClassMemberDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#fieldDeclaration.
def visitFieldDeclaration(self, ctx:Java8Parser.FieldDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#fieldModifier.
def visitFieldModifier(self, ctx:Java8Parser.FieldModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#variableDeclaratorList.
def visitVariableDeclaratorList(self, ctx:Java8Parser.VariableDeclaratorListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#variableDeclarator.
def visitVariableDeclarator(self, ctx:Java8Parser.VariableDeclaratorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#variableDeclaratorId.
def visitVariableDeclaratorId(self, ctx:Java8Parser.VariableDeclaratorIdContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#variableInitializer.
def visitVariableInitializer(self, ctx:Java8Parser.VariableInitializerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannType.
def visitUnannType(self, ctx:Java8Parser.UnannTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannPrimitiveType.
def visitUnannPrimitiveType(self, ctx:Java8Parser.UnannPrimitiveTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannReferenceType.
def visitUnannReferenceType(self, ctx:Java8Parser.UnannReferenceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannClassOrInterfaceType.
def visitUnannClassOrInterfaceType(self, ctx:Java8Parser.UnannClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannClassType.
def visitUnannClassType(self, ctx:Java8Parser.UnannClassTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannClassType_lf_unannClassOrInterfaceType.
def visitUnannClassType_lf_unannClassOrInterfaceType(self, ctx:Java8Parser.UnannClassType_lf_unannClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannClassType_lfno_unannClassOrInterfaceType.
def visitUnannClassType_lfno_unannClassOrInterfaceType(self, ctx:Java8Parser.UnannClassType_lfno_unannClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannInterfaceType.
def visitUnannInterfaceType(self, ctx:Java8Parser.UnannInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannInterfaceType_lf_unannClassOrInterfaceType.
def visitUnannInterfaceType_lf_unannClassOrInterfaceType(self, ctx:Java8Parser.UnannInterfaceType_lf_unannClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannInterfaceType_lfno_unannClassOrInterfaceType.
def visitUnannInterfaceType_lfno_unannClassOrInterfaceType(self, ctx:Java8Parser.UnannInterfaceType_lfno_unannClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannTypeVariable.
def visitUnannTypeVariable(self, ctx:Java8Parser.UnannTypeVariableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannArrayType.
def visitUnannArrayType(self, ctx:Java8Parser.UnannArrayTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodDeclaration.
def visitMethodDeclaration(self, ctx:Java8Parser.MethodDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodModifier.
def visitMethodModifier(self, ctx:Java8Parser.MethodModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodHeader.
def visitMethodHeader(self, ctx:Java8Parser.MethodHeaderContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#result.
def visitResult(self, ctx:Java8Parser.ResultContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodDeclarator.
def visitMethodDeclarator(self, ctx:Java8Parser.MethodDeclaratorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#formalParameterList.
def visitFormalParameterList(self, ctx:Java8Parser.FormalParameterListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#formalParameters.
def visitFormalParameters(self, ctx:Java8Parser.FormalParametersContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#formalParameter.
def visitFormalParameter(self, ctx:Java8Parser.FormalParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#variableModifier.
def visitVariableModifier(self, ctx:Java8Parser.VariableModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#lastFormalParameter.
def visitLastFormalParameter(self, ctx:Java8Parser.LastFormalParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#receiverParameter.
def visitReceiverParameter(self, ctx:Java8Parser.ReceiverParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#throws_.
def visitThrows_(self, ctx:Java8Parser.Throws_Context):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#exceptionTypeList.
def visitExceptionTypeList(self, ctx:Java8Parser.ExceptionTypeListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#exceptionType.
def visitExceptionType(self, ctx:Java8Parser.ExceptionTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodBody.
def visitMethodBody(self, ctx:Java8Parser.MethodBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#instanceInitializer.
def visitInstanceInitializer(self, ctx:Java8Parser.InstanceInitializerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#staticInitializer.
def visitStaticInitializer(self, ctx:Java8Parser.StaticInitializerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#constructorDeclaration.
def visitConstructorDeclaration(self, ctx:Java8Parser.ConstructorDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#constructorModifier.
def visitConstructorModifier(self, ctx:Java8Parser.ConstructorModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#constructorDeclarator.
def visitConstructorDeclarator(self, ctx:Java8Parser.ConstructorDeclaratorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#simpleTypeName.
def visitSimpleTypeName(self, ctx:Java8Parser.SimpleTypeNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#constructorBody.
def visitConstructorBody(self, ctx:Java8Parser.ConstructorBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#explicitConstructorInvocation.
def visitExplicitConstructorInvocation(self, ctx:Java8Parser.ExplicitConstructorInvocationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#enumDeclaration.
def visitEnumDeclaration(self, ctx:Java8Parser.EnumDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#enumBody.
def visitEnumBody(self, ctx:Java8Parser.EnumBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#enumConstantList.
def visitEnumConstantList(self, ctx:Java8Parser.EnumConstantListContext):
return self.visitChildren(ctx)
# Visit a parse | |
<reponame>zhuyongyong/crosswalk-test-suite
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# <NAME><<EMAIL>>
import unittest
import os
import comm
import zipfile
import shutil
from xml.etree import ElementTree
import json
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_create_package_target_bit_crosswalkzip(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + ' --targets="32 64" ' + comm.ConstPath + "/../testapp/create_package_basic/"
return_code = os.system(cmd)
apks = os.listdir(os.getcwd())
x86Length = 0
x86_64Length = 0
armLength = 0
arm_64Length = 0
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if "64" in apks[i]:
x86_64Length = x86_64Length + 1
else:
x86Length = x86Length + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if "64" in apks[i]:
arm_64Length = arm_64Length + 1
else:
armLength = armLength + 1
if comm.BIT == "64":
self.assertEquals(x86_64Length, 1)
self.assertEquals(arm_64Length, 1)
self.assertEquals(x86Length, 0)
self.assertEquals(armLength, 0)
else:
self.assertEquals(x86_64Length, 0)
self.assertEquals(arm_64Length, 0)
self.assertEquals(x86Length, 1)
self.assertEquals(armLength, 1)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
def test_create_package_target_bit_crosswalkdir(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
crosswalkdir = zipfile.ZipFile(comm.crosswalkzip,'r')
for file in crosswalkdir.namelist():
crosswalkdir.extract(file, r'.')
crosswalkdir.close()
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip[:comm.crosswalkzip.index(".zip")] + ' --targets="32 64" ' + comm.ConstPath + "/../testapp/create_package_basic/"
return_code = os.system(cmd)
apks = os.listdir(os.getcwd())
x86Length = 0
x86_64Length = 0
armLength = 0
arm_64Length = 0
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if "64" in apks[i]:
x86_64Length = x86_64Length + 1
else:
x86Length = x86Length + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if "64" in apks[i]:
arm_64Length = arm_64Length + 1
else:
armLength = armLength + 1
if comm.BIT == "64":
self.assertEquals(x86_64Length, 1)
self.assertEquals(arm_64Length, 1)
self.assertEquals(x86Length, 0)
self.assertEquals(armLength, 0)
else:
self.assertEquals(x86_64Length, 0)
self.assertEquals(arm_64Length, 0)
self.assertEquals(x86Length, 1)
self.assertEquals(armLength, 1)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
shutil.rmtree(comm.crosswalkzip[:comm.crosswalkzip.index(".zip")])
self.assertEquals(return_code, 0)
def test_create_package_target_bit(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + ' --targets="' + comm.BIT + '" -c canary ' + comm.ConstPath + "/../testapp/create_package_basic/"
return_code = os.system(cmd)
apks = os.listdir(os.getcwd())
x86Length = 0
x86_64Length = 0
armLength = 0
arm_64Length = 0
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if "64" in apks[i]:
x86_64Length = x86_64Length + 1
else:
x86Length = x86Length + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if "64" in apks[i]:
arm_64Length = arm_64Length + 1
else:
armLength = armLength + 1
if comm.BIT == "64":
self.assertEquals(x86_64Length, 1)
self.assertEquals(arm_64Length, 1)
self.assertEquals(x86Length, 0)
self.assertEquals(armLength, 0)
else:
self.assertEquals(x86_64Length, 0)
self.assertEquals(arm_64Length, 0)
self.assertEquals(x86Length, 1)
self.assertEquals(armLength, 1)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
def test_create_package_target_ar_x8(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + ' --targets="ar x8" -c canary ' + comm.ConstPath + "/../testapp/create_package_basic/"
return_code = os.system(cmd)
apks = os.listdir(os.getcwd())
x86Length = 0
x86_64Length = 0
armLength = 0
arm_64Length = 0
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if "64" in apks[i]:
x86_64Length = x86_64Length + 1
else:
x86Length = x86Length + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if "64" in apks[i]:
arm_64Length = arm_64Length + 1
else:
armLength = armLength + 1
self.assertEquals(x86_64Length, 1)
self.assertEquals(arm_64Length, 1)
self.assertEquals(x86Length, 1)
self.assertEquals(armLength, 1)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
def test_create_package_target_arm_x86(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + ' --targets="arm x86" -c canary ' + comm.ConstPath + "/../testapp/create_package_basic/"
return_code = os.system(cmd)
apks = os.listdir(os.getcwd())
x86Length = 0
x86_64Length = 0
armLength = 0
arm_64Length = 0
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if "64" in apks[i]:
x86_64Length = x86_64Length + 1
else:
x86Length = x86Length + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if "64" in apks[i]:
arm_64Length = arm_64Length + 1
else:
armLength = armLength + 1
self.assertEquals(x86_64Length, 0)
self.assertEquals(arm_64Length, 1)
self.assertEquals(x86Length, 1)
self.assertEquals(armLength, 1)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
def test_create_package_target_arm(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + ' --targets="arm" -c canary ' + comm.ConstPath + "/../testapp/create_package_basic/"
return_code = os.system(cmd)
apks = os.listdir(os.getcwd())
x86Length = 0
x86_64Length = 0
armLength = 0
arm_64Length = 0
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if "64" in apks[i]:
x86_64Length = x86_64Length + 1
else:
x86Length = x86Length + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if "64" in apks[i]:
arm_64Length = arm_64Length + 1
else:
armLength = armLength + 1
self.assertEquals(x86_64Length, 0)
self.assertEquals(arm_64Length, 1)
self.assertEquals(x86Length, 0)
self.assertEquals(armLength, 1)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
def test_create_package_target_invalid(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + ' --targets="invalid" -c canary ' + comm.ConstPath + "/../testapp/create_package_basic/"
return_code = os.system(cmd)
apks = os.listdir(os.getcwd())
x86Length = 0
x86_64Length = 0
armLength = 0
arm_64Length = 0
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if "64" in apks[i]:
x86_64Length = x86_64Length + 1
else:
x86Length = x86Length + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if "64" in apks[i]:
arm_64Length = arm_64Length + 1
else:
armLength = armLength + 1
self.assertEquals(x86_64Length, 0)
self.assertEquals(arm_64Length, 0)
self.assertEquals(x86Length, 1)
self.assertEquals(armLength, 1)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
def test_create_package_target_armeabi_x8(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + ' -t "armeabi-v7a x8" -c canary ' + comm.ConstPath + "/../testapp/create_package_basic/"
return_code = os.system(cmd)
apks = os.listdir(os.getcwd())
x86Length = 0
x86_64Length = 0
armLength = 0
arm_64Length = 0
apkLength = 0
if comm.MODE != | |
sigma_x = np.sqrt(self.random_walk * (x_range - 1) + 1) * sigma
sigma_x2 = sigma_x ** 2
a_x = 1 / np.sqrt(2 * np.pi * sigma_x2)
for i in range(len(mu_x)):
conf_ = np.ceil(3 * sigma_x[i]) # multiplication with 3 ensures covering of 99% of the gauss pdf.
x = np.arange(int(max(1, mu_x[i] - conf_)), int(mu_x[i] + conf_ + 1), 1, dtype=np.int)
x = x[np.where(x < self.loss_length)]
gmu[:, x] += (a_x[i] * np.exp((-0.5 * (x - mu_x[i]) ** 2) / sigma_x2[i])) * b_x[i]
return gmu
def get_loss(self, dmu, gmu=[], mu_list=[], sigma_list=[]):
"""
Estimate loss for a a list of mu/sigma.
:param mu_list: list of int
:param sigma_list: list of int
:param gmu: np array of dim=(len(mu), dmu.shape[1])
:return: float
"""
if len(gmu) == 0:
gmu = self.get_gmu(mu_list, sigma_list)
diff_dmu_pmu = dmu - gmu.sum(axis=0)
loss = np.sum(np.abs(diff_dmu_pmu), axis=1) / np.sum(np.abs(dmu), axis=1)
return loss
def get_gmu(self, mu_list, sigma_list):
"""
Estimate gmu for a a list of mu/sigma.
:param mu_list: list of int
:param sigma_list: list of int
:return: tuple:
np array of dim=(len(mu), dmu.shape[1]),
float
"""
gmu = np.zeros((len(mu_list), self.loss_length))
for i in range(len(mu_list)):
gmu[i, :] = self.gauss_pdf_explain(mu_list[i], sigma_list[i])
return gmu
def get_sigma_init(self, dmu, mu_list, sigma_list):
"""
Optimize sigma for every single mu.
:param dmu:
:param mu_list:
:return: list
"""
sigma_new = []
for mu, sigma in zip(mu_list, sigma_list):
sigma = self.get_sigma(dmu, [mu], [sigma])
sigma_new.extend(sigma)
return sigma_new
def get_sigma(self, dmu, mu_list, sigma_list):
"""
Use curvefit to improve the guess of sigma.
:param dmu: np array of dim (1, self.loss_length)
:param mu_list: sorted list
:param sigma_list: list
:return: list
"""
mu_max = mu_list[-1]
mu_min = mu_list[0]
# 3 * self.sigma to cover 99 of pdf, +1 to have loss_length_ inclusive
loss_length_ = int(min(mu_max + np.ceil(3 * sigma_list[-1] + 1), dmu.shape[1]))
st = int(max((mu_min - 0.75 * mu_min), 0))
end = int(min((mu_max + 0.75 * mu_max + 1), dmu.shape[1]))
y = dmu[:, 0:loss_length_][0]
y = dmu[:, st:end][0]
x = np.arange(st, end)
init_params = []
mu_fc = []
b_up = []
for mu, sigma in zip(mu_list, sigma_list):
off_set = np.floor(mu_max / mu).astype("int")
for i in range(off_set):
mu_fc.append(float(mu * (i + 1)))
init_params.append(max(min(float(sigma), np.ceil(np.log(mu_max)) - 1), 1))
init_params.append(1.0)
b_up.append(0.25 * mu * (i + 1))
b_up.append(np.inf)
# Lower bounds
b_low = np.ones((1, len(init_params)))
b_low[0, 1::2] = 0.0 # for a, set lower bound to zero, no upper bound as dmu is not normalized.
# Fit dmu curve
fc = FitClass()
fc.mu = mu_fc
params, _ = curve_fit(fc.multi_modal, x, y, init_params, bounds=([b_low.tolist()[0], b_up]))
results = np.zeros((int(len(params) / 2), 2))
for i in range(int(len(params) / 2)):
row = i * 2
results[i, :] = [params[row], params[row + 1]]
off_set = 0
sigma_new = []
for mu in mu_list:
sigma_new.append(np.round(results[off_set, 0]))
off_set += np.floor(mu_max / mu).astype("int")
return sigma_new
@staticmethod
def get_mod_mu(mu_list, pmu):
"""
Remove all multiples of a periodicity mu, except the multiples with a higher pmu.
:param mu_list, list of mus
:param pmu
:return: mu_list: sorted periodicities without their multiples
multiples: list of multiples
"""
mu_list, pmu = (list(t) for t in zip(*sorted(zip(mu_list, pmu), reverse=False)))
mu_pmu = {k: l for k, l in zip(mu_list, pmu)}
mu_pmu_red = mu_pmu.copy()
mu_list = []
for k in mu_pmu.keys():
mu_mod = {i: v for i, v in mu_pmu_red.items() if not i % k}
if mu_mod:
for key in mu_mod:
mu_pmu_red.pop(key)
# keep mus that have pmu >= pmu of the min mu_list
values = list(mu_mod.values())
mu_keep = {k: v for k, v in mu_mod.items() if v >= values[0]}
mu_list.extend(list(mu_keep.keys()))
if not mu_pmu_red:
break
return mu_list
def initialize_periods(self, dmu):
"""
Use step function support instead of Gaussian to pre-compute the set of periodicities.
:param dmu:
:return: sorted tuple of numpy ndarray: mu_list - contains all periodicities, sorted
"""
mu_list = []
pmu_set = []
for iteration in range(self.max_depth):
# Integrate across dmu
tau_mu = self.integral_convolution(dmu.copy())
if iteration == 0:
tau_mu_init = tau_mu
# Sort and get the indices of max_candidates values from tau_mu
tau_mu = self.round_down(tau_mu, 6)
top_mu = np.argsort(-1 * tau_mu[0, : self.mu_range_max + 1], kind="mergesort") # sort
top_mu = top_mu[np.array(top_mu) >= self.mu_range_min][: self.max_candidates] # truncate
top_mu = np.intersect1d(top_mu, np.where(tau_mu[0, :] > 0)[0]) # exclude mu with tau zero
mu_list.extend(top_mu)
# Estimate how much top_mu can explain, top_mu[p]/loss_length is used to get absolute Pmu for all mu
len_top_mu = len(top_mu)
pmu = np.zeros((1, len_top_mu))
if len_top_mu > 0:
for p in range(len_top_mu):
pmu[:, p] = self.explain_data(tau_mu.copy(), top_mu[p]) * (
top_mu[p] / self.loss_length
) # ATTENTION, keep () otherwise rounding issues.
pmu_set.extend(pmu[:, p])
# Remove which can be explained by the best
idx_max = np.argmax(pmu[0, :])
dmu = self.remove_explained(dmu, top_mu[idx_max])
else:
print("Reduce max_depth or sigma")
mu_list = self.get_mod_mu(mu_list, pmu_set)
mu_list.sort()
return mu_list, tau_mu_init
def get_best_combination(self, dmu, gmu, mu_list, sigma_list, loss_best):
"""
Get the optimal set of combinations of all periodicities.
:param dmu: numpy ndarray, (1,loss_length), frequencies of intervals
:param gmu: numpy ndarray, (1,loss_length), frequencies of intervals wrt generative model
:param mu_list: list of top mu
:param loss_best: float
:return: list, float
"""
if self.max_periods > len(mu_list):
self.max_periods = len(mu_list)
mu_tmp = si_tmp = gmu_tmp = []
mu_best = si_best = gmu_best = []
for num_of_periods in range(1, self.max_periods + 1):
loss_origin = loss_best
for idx in itertools.combinations(range(len(mu_list)), num_of_periods):
# test for overlapping mu/sigmas
mu_idx = [mu_list[i] for i in idx]
si_idx = [sigma_list[i] for i in idx]
sigma_plus = [s1 + s2 for s1, s2 in zip(si_idx[:-1], si_idx[1:])]
if (np.diff(mu_idx) > sigma_plus).all():
loss = self.get_loss(dmu, gmu=gmu[idx, :])
if loss < loss_best:
loss_best = loss
mu_tmp, si_tmp, gmu_tmp = mu_idx, si_idx, gmu[idx, :]
if loss_best < loss_origin - self.loss_change_tol:
loss_origin = loss_best
mu_best, si_best, gmu_best = mu_tmp, si_tmp, gmu_tmp
return gmu_best, mu_best, loss_origin, si_best
def extract_periods(self, ts=None, verbose=True):
"""
GMPDA Algo for periodicity extraction.
:param self:
:param ts, numpy ndarray, series of events
:param verbose, boolean
:return:
mu_list: list of top extracted periodicities
dmu: numpy ndarray, (1,loss_length), frequencies of intervals
tau_mu: numpy ndarray, (1,loss_length), smoothed frequencies of intervals
loss_best: float, loss obtained wrt mu_list
self.sigma: list, sigmas for mu_list
"""
def printv(*txt):
if verbose:
print(txt[0])
printv("==" * 25)
printv("GMPDA STARTS")
###################################################################
# 0. Calculate reference loss if requested.
###################################################################
ref_loss = np.NaN
if self.ref_loss_n > 0:
ref_loss = self.get_ref_loss(self.ref_loss_n)
printv(
f"Reference Loss: min={min(ref_loss)}, 0.01 quantile={np.quantile(ref_loss,0.01)}, "
f"0.05 quantile={np.quantile(ref_loss,0.05)}"
)
###################################################################
# 1. If ts is not none update initialization
###################################################################
if ts is not None:
self.reinit_ts(ts_=ts)
###################################################################
# 2. Compute Intervals
###################################################################
dmu, dmu_init = self.get_intervals()
loss_best = np.array(np.finfo("f").max)
gmu_best = []
###################################################################
# 3. Initialize periods
###################################################################
mu_list, tau_mu = self.initialize_periods(dmu.copy())
###################################################################
# 4. Initialize sigma
###################################################################
if self.sigma_log_init:
sigma_list = np.ceil(np.log(mu_list))
else:
sigma_list = [self.sigma] * len(mu_list)
idx = np.where(sigma_list > mu_list)[0]
for i in idx:
sigma_list[i] = int(np.ceil(np.log(mu_list[i])))
# Check if sigma==0, replace by 1
sigma_list = [1 if x == 0 else x for x in sigma_list]
###################################################################
# 5. Check if the data has variation, else GMPDA is not appropriate
###################################################################
if len(mu_list) == 0:
printv("No periods could be found")
return mu_list, dmu, tau_mu, gmu_best, loss_best, [sigma_list]
elif len(mu_list) == 1 and (sum(dmu - tau_mu) == 0).all():
printv("There is one period, with sigma=0")
printv("GMPDA FINISHED")
printv("Best obtained mu {}, loss {}, sigma {}".format(mu_list, 0, 0))
printv("==" * 25)
return mu_list, dmu, tau_mu, gmu_best, loss_best, [sigma_list]
elif (sum(dmu - tau_mu) == 0).all():
printv("Warning: It seems there is no randomness in the process, i.e, sigma=0")
printv("Top selected mu {}".format(mu_list[0 : self.max_candidates]))
###################################################################
# 6. Optimize sigma for all candidate mu
###################################################################
if self.sigma_curvefit:
try:
sigma_list = self.get_sigma_init(dmu, mu_list, sigma_list)
printv("Sigma optimized via Trust Region Reflective Algo.")
except Exception as e:
printv(f"Could not find optimal sigma using TRF, Error message: {str(e)}")
###################################################################
# 7. Compute gaussian mixture gmu for each candidate periodicity
###################################################################
gmu = self.get_gmu(mu_list, sigma_list)
###################################################################
# 8. Find combination of periodicities which minimize loss
###################################################################
gmu_best, mu_list, loss_best, sigma_list = self.get_best_combination(dmu, gmu, mu_list, sigma_list, loss_best)
###################################################################
# 9. Update loss and sigma | |
exception
def test_guess_nonlinear_feature(self):
import openmdao.api as om
class Discipline(om.Group):
def setup(self):
self.add_subsystem('comp0', om.ExecComp('y=x**2'))
self.add_subsystem('comp1', om.ExecComp('z=2*external_input'),
promotes_inputs=['external_input'])
self.add_subsystem('balance', om.BalanceComp('x', lhs_name='y', rhs_name='z'),
promotes_outputs=['x'])
self.connect('comp0.y', 'balance.y')
self.connect('comp1.z', 'balance.z')
self.connect('x', 'comp0.x')
self.nonlinear_solver = om.NewtonSolver(iprint=2, solve_subsystems=True)
self.linear_solver = om.DirectSolver()
def guess_nonlinear(self, inputs, outputs, residuals):
# inputs are addressed using full path name, regardless of promotion
external_input = inputs['comp1.external_input']
# balance drives x**2 = 2*external_input
x_guess = (2*external_input)**.5
# outputs are addressed by the their promoted names
outputs['x'] = x_guess # perfect guess should converge in 0 iterations
p = om.Problem()
p.model.add_subsystem('parameters', om.IndepVarComp('input_value', 1.))
p.model.add_subsystem('discipline', Discipline())
p.model.connect('parameters.input_value', 'discipline.external_input')
p.setup()
p.run_model()
self.assertEqual(p.model.nonlinear_solver._iter_count, 0)
assert_rel_error(self, p['discipline.x'], 1.41421356, 1e-6)
def test_guess_nonlinear_complex_step(self):
class Discipline(om.Group):
def setup(self):
self.add_subsystem('comp0', om.ExecComp('y=x**2'))
self.add_subsystem('comp1', om.ExecComp('z=2*external_input'),
promotes_inputs=['external_input'])
self.add_subsystem('balance', om.BalanceComp('x', lhs_name='y', rhs_name='z'),
promotes_outputs=['x'])
self.connect('comp0.y', 'balance.y')
self.connect('comp1.z', 'balance.z')
self.connect('x', 'comp0.x')
self.nonlinear_solver = om.NewtonSolver(iprint=2, solve_subsystems=True)
self.linear_solver = om.DirectSolver()
def guess_nonlinear(self, inputs, outputs, residuals):
if outputs._data.dtype == np.complex:
raise RuntimeError('Vector should not be complex when guess_nonlinear is called.')
# inputs are addressed using full path name, regardless of promotion
external_input = inputs['comp1.external_input']
# balance drives x**2 = 2*external_input
x_guess = (2*external_input)**.5
# outputs are addressed by the their promoted names
outputs['x'] = x_guess # perfect guess should converge in 0 iterations
p = om.Problem()
p.model.add_subsystem('parameters', om.IndepVarComp('input_value', 1.))
p.model.add_subsystem('discipline', Discipline())
p.model.connect('parameters.input_value', 'discipline.external_input')
p.setup(force_alloc_complex=True)
p.run_model()
self.assertEqual(p.model.nonlinear_solver._iter_count, 0)
assert_rel_error(self, p['discipline.x'], 1.41421356, 1e-6)
totals = p.check_totals(of=['discipline.comp1.z'], wrt=['parameters.input_value'], method='cs', out_stream=None)
for key, val in iteritems(totals):
assert_rel_error(self, val['rel error'][0], 0.0, 1e-15)
class MyComp(om.ExplicitComponent):
def __init__(self, input_shape, src_indices=None, flat_src_indices=False):
super(MyComp, self).__init__()
self._input_shape = input_shape
self._src_indices = src_indices
self._flat_src_indices = flat_src_indices
def setup(self):
self.add_input('x', val=np.zeros(self._input_shape),
src_indices=self._src_indices, flat_src_indices=self._flat_src_indices)
self.add_output('y', val=np.zeros(self._input_shape))
def compute(self, inputs, outputs):
outputs['y'] = 2.0 * inputs['x']
def src_indices_model(src_shape, tgt_shape, src_indices=None, flat_src_indices=False,
promotes=None):
prob = om.Problem()
prob.model.add_subsystem('indeps', om.IndepVarComp('x', shape=src_shape),
promotes=promotes)
prob.model.add_subsystem('C1', MyComp(tgt_shape,
src_indices=src_indices if promotes else None,
flat_src_indices=flat_src_indices),
promotes=promotes)
if promotes is None:
prob.model.connect('indeps.x', 'C1.x', src_indices=src_indices,
flat_src_indices=flat_src_indices)
prob.setup()
return prob
class TestConnect(unittest.TestCase):
def setUp(self):
prob = om.Problem(om.Group())
sub = prob.model.add_subsystem('sub', om.Group())
idv = sub.add_subsystem('src', om.IndepVarComp())
idv.add_output('x', np.arange(15).reshape((5, 3))) # array
idv.add_output('s', 3.) # scalar
sub.add_subsystem('tgt', om.ExecComp('y = x'))
sub.add_subsystem('cmp', om.ExecComp('z = x'))
sub.add_subsystem('arr', om.ExecComp('a = x', x=np.zeros(2)))
self.sub = sub
self.prob = prob
def test_src_indices_as_int_list(self):
self.sub.connect('src.x', 'tgt.x', src_indices=[1])
def test_src_indices_as_int_array(self):
self.sub.connect('src.x', 'tgt.x', src_indices=np.zeros(1, dtype=int))
def test_src_indices_as_float_list(self):
msg = "src_indices must contain integers, but src_indices for " + \
"connection from 'src.x' to 'tgt.x' is <.* 'numpy.float64'>."
with assertRaisesRegex(self, TypeError, msg):
self.sub.connect('src.x', 'tgt.x', src_indices=[1.0])
def test_src_indices_as_float_array(self):
msg = "src_indices must contain integers, but src_indices for " + \
"connection from 'src.x' to 'tgt.x' is <.* 'numpy.float64'>."
with assertRaisesRegex(self, TypeError, msg):
self.sub.connect('src.x', 'tgt.x', src_indices=np.zeros(1))
def test_src_indices_as_str(self):
msg = "src_indices must be an index array, " + \
"did you mean connect('src.x', [tgt.x, cmp.x])?"
with assertRaisesRegex(self, TypeError, msg):
self.sub.connect('src.x', 'tgt.x', 'cmp.x')
def test_already_connected(self):
msg = "Input 'tgt.x' is already connected to 'src.x'."
self.sub.connect('src.x', 'tgt.x', src_indices=[1])
with assertRaisesRegex(self, RuntimeError, msg):
self.sub.connect('cmp.x', 'tgt.x', src_indices=[1])
def test_invalid_source(self):
msg = "Output 'src.z' does not exist for connection " + \
"in 'sub' from 'src.z' to 'tgt.x'."
# source and target names can't be checked until setup
# because setup is not called until then
self.sub.connect('src.z', 'tgt.x', src_indices=[1])
with assertRaisesRegex(self, NameError, msg):
self.prob.setup()
def test_invalid_target(self):
msg = "Group (sub): Input 'tgt.z' does not exist for connection from 'src.x' to 'tgt.z'."
# source and target names can't be checked until setup
# because setup is not called until then
self.sub.connect('src.x', 'tgt.z', src_indices=[1])
with self.assertRaises(NameError) as ctx:
self.prob.setup()
self.assertEqual(str(ctx.exception), msg)
def test_connect_within_system(self):
msg = "Output and input are in the same System for connection " + \
"from 'tgt.y' to 'tgt.x'."
with assertRaisesRegex(self, RuntimeError, msg):
self.sub.connect('tgt.y', 'tgt.x', src_indices=[1])
def test_connect_within_system_with_promotes(self):
prob = om.Problem()
sub = prob.model.add_subsystem('sub', om.Group())
sub.add_subsystem('tgt', om.ExecComp('y = x'), promotes_outputs=['y'])
sub.connect('y', 'tgt.x', src_indices=[1])
msg = "Group (sub): Output and input are in the same System for connection from 'y' to 'tgt.x'."
with self.assertRaises(RuntimeError) as ctx:
prob.setup()
self.assertEqual(str(ctx.exception), msg)
def test_connect_units_with_unitless(self):
prob = om.Problem()
prob.model.add_subsystem('px1', om.IndepVarComp('x1', 100.0))
prob.model.add_subsystem('src', om.ExecComp('x2 = 2 * x1', x2={'units': 'degC'}))
prob.model.add_subsystem('tgt', om.ExecComp('y = 3 * x', x={'units': 'unitless'}))
prob.model.connect('px1.x1', 'src.x1')
prob.model.connect('src.x2', 'tgt.x')
msg = "Group (<model>): Output 'src.x2' with units of 'degC' is connected " \
"to input 'tgt.x' which has no units."
with assert_warning(UserWarning, msg):
prob.setup()
def test_connect_incompatible_units(self):
msg = "Output units of 'degC' for 'src.x2' are incompatible " \
"with input units of 'm' for 'tgt.x'."
prob = om.Problem()
prob.model.add_subsystem('px1', om.IndepVarComp('x1', 100.0))
prob.model.add_subsystem('src', om.ExecComp('x2 = 2 * x1', x2={'units': 'degC'}))
prob.model.add_subsystem('tgt', om.ExecComp('y = 3 * x', x={'units': 'm'}))
prob.model.connect('px1.x1', 'src.x1')
prob.model.connect('src.x2', 'tgt.x')
with assertRaisesRegex(self, RuntimeError, msg):
prob.setup()
def test_connect_units_with_nounits(self):
prob = om.Problem()
prob.model.add_subsystem('px1', om.IndepVarComp('x1', 100.0))
prob.model.add_subsystem('src', om.ExecComp('x2 = 2 * x1'))
prob.model.add_subsystem('tgt', om.ExecComp('y = 3 * x', x={'units': 'degC'}))
prob.model.connect('px1.x1', 'src.x1')
prob.model.connect('src.x2', 'tgt.x')
prob.set_solver_print(level=0)
msg = "Group (<model>): Input 'tgt.x' with units of 'degC' is " \
"connected to output 'src.x2' which has no units."
with assert_warning(UserWarning, msg):
prob.setup()
prob.run_model()
assert_rel_error(self, prob['tgt.y'], 600.)
def test_connect_units_with_nounits_prom(self):
prob = om.Problem()
prob.model.add_subsystem('px1', om.IndepVarComp('x', 100.0), promotes_outputs=['x'])
prob.model.add_subsystem('src', om.ExecComp('y = 2 * x'), promotes=['x', 'y'])
prob.model.add_subsystem('tgt', om.ExecComp('z = 3 * y', y={'units': 'degC'}), promotes=['y'])
prob.set_solver_print(level=0)
msg = "Group (<model>): Input 'tgt.y' with units of 'degC' is " \
"connected to output 'src.y' which has no units."
with assert_warning(UserWarning, msg):
prob.setup()
prob.run_model()
assert_rel_error(self, prob['tgt.z'], 600.)
def test_mix_promotes_types(self):
prob = om.Problem()
prob.model.add_subsystem('src', om.ExecComp(['y = 2 * x', 'y2 = 3 * x']),
promotes=['x', 'y'], promotes_outputs=['y2'])
with self.assertRaises(RuntimeError) as context:
prob.setup()
self.assertEqual(str(context.exception),
"ExecComp (src): 'promotes' cannot be used at the same time as "
"'promotes_inputs' or 'promotes_outputs'.")
def test_mix_promotes_types2(self):
prob = om.Problem()
prob.model.add_subsystem('src', om.ExecComp(['y = 2 * x', 'y2 = 3 * x2']),
promotes=['x', 'y'], promotes_inputs=['x2'])
with self.assertRaises(RuntimeError) as context:
prob.setup()
self.assertEqual(str(context.exception),
"ExecComp (src): 'promotes' cannot be used at the same time as "
"'promotes_inputs' or 'promotes_outputs'.")
def test_nested_nested_conn(self):
prob = om.Problem()
root = prob.model
root.add_subsystem('p', om.IndepVarComp('x', 1.0))
G1 = root.add_subsystem('G1', om.Group())
par1 = G1.add_subsystem('par1', om.Group())
par1.add_subsystem('c2', om.ExecComp('y = x * 2.0'))
par1.add_subsystem('c4', om.ExecComp('y = x * 4.0'))
prob.model.add_design_var('p.x')
prob.model.add_constraint('G1.par1.c4.y', upper=0.0)
root.connect('p.x', 'G1.par1.c2.x')
root.connect('G1.par1.c2.y', 'G1.par1.c4.x')
prob.setup()
prob.run_driver()
assert_rel_error(self, prob['G1.par1.c4.y'], 8.0)
def test_bad_shapes(self):
self.sub.connect('src.s', 'arr.x')
msg = ("The source and target shapes do not match or are ambiguous for the connection "
"'sub.src.s' to 'sub.arr.x'.")
with assertRaisesRegex(self, ValueError, msg):
self.prob.setup()
def test_bad_indices_shape(self):
p = om.Problem()
p.model.add_subsystem('IV', om.IndepVarComp('x', np.arange(12).reshape((4, 3))))
p.model.add_subsystem('C1', om.ExecComp('y=sum(x)*2.0', x=np.zeros((2, 2))))
p.model.connect('IV.x', 'C1.x', src_indices=[(1, 1)])
msg = (r"The source indices \[\[1 1\]\] do not specify a valid shape for "
r"the connection 'IV.x' to 'C1.x'. The target "
r"shape is \(2.*, 2.*\) but indices are \(1.*, 2.*\).")
with assertRaisesRegex(self, ValueError, msg):
p.setup()
def test_bad_indices_dimensions(self):
self.sub.connect('src.x', 'arr.x', src_indices=[(2, -1, 2), (2, 2, 2)],
flat_src_indices=False)
msg = ("Group (sub): The source indices [[ 2 -1 2] [ 2 2 2]] do not specify a "
"valid shape for the connection 'sub.src.x' to 'sub.arr.x'. "
"The source has 2 dimensions but the indices expect 3.")
try:
self.prob.setup()
except ValueError as err:
self.assertEqual(str(err), msg)
else:
self.fail('Exception expected.')
def test_bad_indices_index(self):
# the index value within src_indices is outside the valid range for the source
self.sub.connect('src.x', 'arr.x', src_indices=[(2, -1), (4, 4)],
flat_src_indices=False)
msg = ("Group (sub): The source indices do not specify a valid index for the "
"connection 'sub.src.x' to 'sub.arr.x'. Index '4' "
"is out of range for source dimension of size 3.")
try:
self.prob.setup()
except ValueError as err:
self.assertEqual(str(err), msg)
else:
self.fail('Exception expected.')
def test_src_indices_shape(self):
src_indices_model(src_shape=(3, 3), tgt_shape=(2, 2),
src_indices=[[4, 5], [7, 8]],
flat_src_indices=True)
def test_src_indices_shape_bad_idx_flat(self):
try:
src_indices_model(src_shape=(3, 3), tgt_shape=(2, 2),
src_indices=[[4, 5], [7, 9]],
flat_src_indices=True)
except Exception as err:
self.assertEqual(str(err), "Group (<model>): The source indices do not specify a valid index "
"for the connection 'indeps.x' to 'C1.x'. "
"Index '9' is out of range for a flat source of size 9.")
else:
self.fail("Exception expected.")
def test_src_indices_shape_bad_idx_flat_promotes(self):
try:
src_indices_model(src_shape=(3, 3), tgt_shape=(2, 2),
src_indices=[[4, 5], [7, 9]],
flat_src_indices=True, promotes=['x'])
except Exception as err:
self.assertEqual(str(err), "Group (<model>): The source indices do not specify a valid index "
"for the connection 'indeps.x' to 'C1.x'. "
"Index '9' is out of range for a flat source of size 9.")
else:
self.fail("Exception expected.")
def test_src_indices_shape_bad_idx_flat_neg(self):
try:
| |
t_cycles, post, name)
def test_sra(self):
tests = []
for (X,f) in [(0x00, 0x00),
(0x01, 0x01),
(0x80, 0x00),
(0xF0, 0x28),
(0xFF, 0x29),
(0x7F, 0x29) ]:
for (r,i) in [ ('B', 0x28),
('C', 0x29),
('D', 0x2A),
('E', 0x2B),
('H', 0x2C),
('L', 0x2D),
('A', 0x2F) ]:
tests += [
[ [ set_register_to(r,X) ], [ 0xCB, i ], 8, [ expect_register_equal(r, ((X >> 1) | (X&0x80))&0xFF), (F == f) ], "SRA {} (of 0x{:X})".format(r,X) ],
]
tests += [
[ [ M(0x1BBC, X), HL(0x1BBC) ], [ 0xCB, 0x2E ], 15, [ (M[0x1BBC] == ((X >> 1) | (X&0x80))&0xFF), (F == f) ], "SRA (HL) (of 0x{:X})".format(X) ],
[ [ M(0x1BBC, X), IX(0x1BB0) ], [ 0xDD, 0xCB, 0x0C, 0x2E ], 23, [ (M[0x1BBC] == ((X >> 1) | (X&0x80))&0xFF), (F == f) ], "SRA (IX+0CH) (of 0x{:X})".format(X) ],
[ [ M(0x1BBC, X), IY(0x1BB0) ], [ 0xFD, 0xCB, 0x0C, 0x2E ], 23, [ (M[0x1BBC] == ((X >> 1) | (X&0x80))&0xFF), (F == f) ], "SRA (IY+0CH) (of 0x{:X})".format(X) ],
]
for (pre, instructions, t_cycles, post, name) in tests:
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_sl1(self):
tests = []
for (X,f) in [(0x00, 0x00),
(0x01, 0x00),
(0x80, 0x01),
(0xF0, 0x21),
(0xFF, 0x29),
(0x7F, 0x28), ]:
for (r,i) in [ ('B', 0x30),
('C', 0x31),
('D', 0x32),
('E', 0x33),
('H', 0x34),
('L', 0x35),
('A', 0x37) ]:
tests += [
[ [ set_register_to(r,X) ], [ 0xCB, i ], 8, [ expect_register_equal(r, ((X << 1) + 1)&0xFF), (F == f) ], "SL1 {} (of 0x{:X})".format(r,X) ],
]
tests += [
[ [ M(0x1BBC, X), HL(0x1BBC) ], [ 0xCB, 0x36 ], 15, [ (M[0x1BBC] == ((X << 1) + 1)&0xFF), (F == f) ], "SL1 (HL) (of 0x{:X})".format(X) ],
[ [ M(0x1BBC, X), IX(0x1BB0) ], [ 0xDD, 0xCB, 0x0C, 0x36 ], 23, [ (M[0x1BBC] == ((X << 1) + 1)&0xFF), (F == f) ], "SL1 (IX+0CH) (of 0x{:X})".format(X) ],
[ [ M(0x1BBC, X), IY(0x1BB0) ], [ 0xFD, 0xCB, 0x0C, 0x36 ], 23, [ (M[0x1BBC] == ((X << 1) + 1)&0xFF), (F == f) ], "SL1 (IY+0CH) (of 0x{:X})".format(X) ],
]
for (pre, instructions, t_cycles, post, name) in tests:
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_srl(self):
tests = []
for (X,f) in [(0x00, 0x00),
(0x01, 0x01),
(0x80, 0x00),
(0xF0, 0x28),
(0xFF, 0x29),
(0x7F, 0x29) ]:
for (r,i) in [ ('B', 0x38),
('C', 0x39),
('D', 0x3A),
('E', 0x3B),
('H', 0x3C),
('L', 0x3D),
('A', 0x3F) ]:
tests += [
[ [ set_register_to(r,X) ], [ 0xCB, i ], 8, [ expect_register_equal(r, (X >> 1)), (F == f) ], "SRL {} (of 0x{:X})".format(r,X) ],
]
tests += [
[ [ M(0x1BBC, X), HL(0x1BBC) ], [ 0xCB, 0x3E ], 15, [ (M[0x1BBC] == (X >> 1)&0xFF), (F == f) ], "SRL (HL) (of 0x{:X})".format(X) ],
[ [ M(0x1BBC, X), IX(0x1BB0) ], [ 0xDD, 0xCB, 0x0C, 0x3E ], 23, [ (M[0x1BBC] == (X >> 1)&0xFF), (F == f) ], "SRL (IX+0CH) (of 0x{:X})".format(X) ],
[ [ M(0x1BBC, X), IY(0x1BB0) ], [ 0xFD, 0xCB, 0x0C, 0x3E ], 23, [ (M[0x1BBC] == (X >> 1)&0xFF), (F == f) ], "SRL (IY+0CH) (of 0x{:X})".format(X) ],
]
for (pre, instructions, t_cycles, post, name) in tests:
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_rld(self):
tests = [
[ [ A(0xF1), M(0x1BBC,0x23), HL(0x1BBC) ], [ 0xED, 0x6F ], 18, [ (A == 0x02), (M[0x1BBC] == 0x31), (F == 0x00) ], "RLD (of 0xF1 and 0x23)".format() ],
]
for (pre, instructions, t_cycles, post, name) in tests:
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_rrd(self):
tests = [
[ [ A(0xF1), M(0x1BBC,0x23), HL(0x1BBC) ], [ 0xED, 0x67 ], 18, [ (A == 0x03), (M[0x1BBC] == 0x12), (F == 0x04) ], "RRD (of 0xF1 and 0x23)".format() ],
]
for (pre, instructions, t_cycles, post, name) in tests:
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_bit(self):
tests = []
for X in range(0,256):
for b in range(0,8):
f = ((1 - ((X >> b)&0x1))*0x44) + 0x10 + ((X&(1 << b))&0xA8)
for (reg, r) in [ ('B', 0x0), ('C', 0x1), ('D',0x2), ('E',0x3), ('H',0x4), ('L',0x5), ('A',0x7) ]:
i = 0x40 + (b << 3) + r
tests += [
[ [ set_register_to(reg,X) ], [ 0xCB, i ], 8, [ expect_register_equal(reg, X), (F == f) ], "BIT {},{} (of 0x{:X})".format(b,reg,X) ],
]
tests += [
[ [ HL(0x1BBC), M(0x1BBC, X) ], [ 0xCB, (0x46 + (b << 3)) ], 12, [ (M[0x1BBC] == X), (F == f) ], "BIT {},(HL) (of 0x{:X})".format(b,X) ],
[ [ IX(0x1BB0), M(0x1BBC, X) ], [ 0xDD, 0xCB, 0xC, (0x46 + (b << 3)) ], 20, [ (M[0x1BBC] == X), (F == f) ], "BIT {},(IX+0C) (of 0x{:X})".format(b,X) ],
[ [ IY(0x1BB0), M(0x1BBC, X) ], [ 0xFD, 0xCB, 0xC, (0x46 + (b << 3)) ], 20, [ (M[0x1BBC] == X), (F == f) ], "BIT {},(IY+0C) (of 0x{:X})".format(b,X) ],
]
for (pre, instructions, t_cycles, post, name) in tests:
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_res(self):
tests = []
for b in range(0,8):
for (reg, r) in [ ('B', 0x0), ('C', 0x1), ('D',0x2), ('E',0x3), ('H',0x4), ('L',0x5), ('A',0x7) ]:
i = 0x80 + (b << 3) + r
tests += [
[ [ set_register_to(reg,0xFF) ], [ 0xCB, i ], 8, [ expect_register_equal(reg, 0xFF - (1 << b)) ], "RES {},{}".format(b,reg) ],
]
tests += [
[ [ HL(0x1BBC), M(0x1BBC, 0xFF) ], [ 0xCB, (0x86 + (b << 3)) ], 15, [ (M[0x1BBC] == (0xFF - (1 << b))) ], "RES {},(HL)".format(b) ],
[ [ IX(0x1BB0), M(0x1BBC, 0xFF) ], [ 0xDD, 0xCB, 0xC, (0x86 + (b << 3)) ], 23, [ (M[0x1BBC] == (0xFF - (1 << b))) ], "RES {},(IX+0C)".format(b) ],
[ [ IY(0x1BB0), M(0x1BBC, 0xFF) ], [ 0xFD, 0xCB, 0xC, (0x86 + (b << 3)) ], 23, [ (M[0x1BBC] == (0xFF - (1 << b))) ], "RES {},(IY+0C)".format(b) ],
]
for (pre, instructions, t_cycles, post, name) in tests:
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_set(self):
tests = []
for b in range(0,8):
for (reg, r) in [ ('B', 0x0), ('C', 0x1), ('D',0x2), ('E',0x3), ('H',0x4), ('L',0x5), ('A',0x7) ]:
i = 0xC0 + (b << 3) + r
tests += [
[ [ set_register_to(reg,0x00) ], [ 0xCB, i ], 8, [ expect_register_equal(reg, (1 << b)) ], "SET {},{}".format(b,reg) ],
]
tests += [
[ [ HL(0x1BBC), M(0x1BBC, 0x00) ], [ 0xCB, (0xC6 + (b << 3)) ], 15, [ (M[0x1BBC] == (1 << b)) ], "SET {},(HL)".format(b) ],
[ [ IX(0x1BB0), M(0x1BBC, 0x00) ], [ 0xDD, 0xCB, 0xC, (0xC6 + (b << 3)) ], 23, [ (M[0x1BBC] == (1 << b)) ], "SET {},(IX+0C)".format(b) ],
[ [ IY(0x1BB0), M(0x1BBC, 0x00) ], [ 0xFD, 0xCB, 0xC, (0xC6 + (b << 3)) ], 23, [ (M[0x1BBC] == (1 << b)) ], "SET {},(IY+0C)".format(b) ],
]
for (pre, instructions, t_cycles, post, name) in tests:
self.execute_instructions(pre, instructions, t_cycles, post, name)
def test_jp(self):
# actions taken first, instructions to execute, t-cycles to run for, expected conditions post, name
tests = [
[ [], [ 0xC3, 0xBC, 0x1B ], 10, [ (PC == 0x1BBC) ], "JP 01BBCH" ],
[ [ F(0x00) ], [ 0xDA, 0xBC, 0x1B ], 10, [ (PC == 0x0003) ], "JP C,01BBCH (no jump)" ],
[ [ F(0x01) ], [ 0xDA, 0xBC, 0x1B ], 10, [ (PC == 0x1BBC) ], "JP C,01BBCH (jump)" ],
[ [ F(0x01) ], [ 0xD2, 0xBC, 0x1B ], 10, [ (PC == 0x0003) ], "JP NC,01BBCH (no jump)" ],
[ [ F(0x00) ], [ 0xD2, 0xBC, 0x1B ], 10, [ (PC == 0x1BBC) ], "JP NC,01BBCH (jump)" ],
[ [ F(0x00) ], [ 0xCA, 0xBC, 0x1B ], 10, [ (PC == 0x0003) ], "JP Z,01BBCH (no jump)" ],
[ [ F(0x40) ], [ 0xCA, 0xBC, 0x1B ], 10, [ (PC == 0x1BBC) ], "JP Z,01BBCH (jump)" ],
[ [ F(0x40) ], [ 0xC2, 0xBC, 0x1B ], 10, [ (PC == 0x0003) ], "JP NZ,01BBCH (no jump)" ],
[ [ F(0x00) ], [ 0xC2, | |
INPUT:
- ``v`` -- a label of the standard part of the tableau
OUTPUT:
- an integer value representing the spin of the ribbon with label ``v``.
EXAMPLES::
sage: T = StrongTableau([[-1,-2,5,6],[-3,-4,-7,8],[-5,-6],[7,-8]], 3)
sage: [T.spin_of_ribbon(v) for v in range(1,9)]
[0, 0, 0, 0, 0, 0, 1, 0]
sage: T = StrongTableau([[None,None,-1,-3],[-2,3,-3,4],[2,3],[-3,-4]], 3)
sage: [T.spin_of_ribbon(v) for v in range(1,7)]
[0, 1, 0, 0, 1, 0]
TESTS::
sage: StrongTableau([[None, None], [None]], 4).spin_of_ribbon(1)
0
sage: StrongTableau([],4).spin_of_ribbon(1)
0
"""
return (self.height_of_ribbon(v)-1)*self.number_of_connected_components(v)+self.ribbons_above_marked(v)
def spin( self ):
r"""
Return the spin statistic of the tableau ``self``.
The spin is an integer statistic on a strong marked tableau. It is
the sum of `(h-1) r` plus the number of connected components above the
marked one where `h` is the height of the marked ribbon and `r` is
the number of connected components.
.. SEEALSO:: :meth:`height_of_ribbon`, :meth:`number_of_connected_components`,
:meth:`ribbons_above_marked`
The `k`-Schur functions with a parameter `t` can be defined as
.. MATH::
s^{(k)}_\lambda[X; t] = \sum_T t^{spin(T)} m_{weight(T)}[X]
where the sum is over all column strict marked strong `k`-tableaux
of shape `\lambda` and partition content.
OUTPUT:
- an integer value representing the spin.
EXAMPLES::
sage: StrongTableau([[-1,-2,5,6],[-3,-4,-7,8],[-5,-6],[7,-8]], 3, [2,2,3,1]).spin()
1
sage: StrongTableau([[-1,-2,-4,-7],[-3,6,-6,8],[4,7],[-5,-8]], 3, [2,2,3,1]).spin()
2
sage: StrongTableau([[None,None,-1,-3],[-2,3,-3,4],[2,3],[-3,-4]], 3).spin()
2
sage: ks3 = SymmetricFunctions(QQ['t'].fraction_field()).kschur(3)
sage: t = ks3.realization_of().t
sage: m = ks3.ambient().realization_of().m()
sage: myks221 = sum(sum(t**T.spin() for T in StrongTableaux(3,[3,2,1],weight=mu))*m(mu) for mu in Partitions(5, max_part=3))
sage: myks221 == m(ks3[2,2,1])
True
sage: h = ks3.ambient().realization_of().h()
sage: Core([4,4,2,2],4).to_bounded_partition()
[2, 2, 2, 2]
sage: ks3[2,2,2,2].lift().scalar(h[3,3,2]) == sum( t**T.spin() for T in StrongTableaux(3, [4,4,2,2], weight=[3,3,2]) )
True
TESTS::
sage: StrongTableau([[None, None], [None]], 4).spin()
0
sage: StrongTableau([],4).spin()
0
"""
return sum(self.spin_of_ribbon(v) for v in range(1,self.size()+1))
def to_transposition_sequence( self ):
"""
Return a list of transpositions corresponding to ``self``.
Given a strong column strict tableau ``self`` returns the list of transpositions
which when applied to the left of an empty tableau gives the corresponding strong
standard tableau.
OUTPUT:
- a list of pairs of values ``[i,j]`` representing the transpositions `t_{ij}`
EXAMPLES::
sage: T = StrongTableau([[-1, -1, -1], [1]],2)
sage: T.to_transposition_sequence()
[[2, 3], [1, 2], [0, 1]]
sage: T = StrongTableau([[-1, -1, 2], [-2]],2)
sage: T.to_transposition_sequence()
[[-1, 0], [1, 2], [0, 1]]
sage: T = StrongTableau([[None, -1, 2, -3], [-2, 3]],2)
sage: T.to_transposition_sequence()
[[3, 4], [-1, 0], [1, 2]]
TESTS::
sage: StrongTableau([[None, None], [None]], 4).to_transposition_sequence()
[]
sage: StrongTableau([],4).to_transposition_sequence()
[]
"""
return StrongTableaux.marked_CST_to_transposition_sequence( self.to_standard_list(), self.k )
class StrongTableaux(UniqueRepresentation, Parent):
def __init__( self, k, shape, weight ):
r"""
TESTS::
sage: strongT = StrongTableaux(2, [3,1], weight=[2,1])
sage: TestSuite(strongT).run()
sage: strongT = StrongTableaux(0, [2,2], weight=[2,2])
Traceback (most recent call last):
...
ValueError: The input k has to be a positive integer
"""
self._outer_shape = shape[0]
self._inner_shape = shape[1]
self.k = k
if weight is None:
self._weight = (1,)*(self._outer_shape.length()-self._inner_shape.length())
else:
self._weight = weight
Parent.__init__(self, category = FiniteEnumeratedSets())
@staticmethod
def __classcall_private__(cls, k, shape, weight=None):
r"""
Straighten arguments before unique representation.
TESTS::
sage: ST3 = StrongTableaux(3, [2,2], weight=[1,1,1,1])
sage: TestSuite(ST3).run()
"""
if k<=0:
raise ValueError("The input k has to be a positive integer")
if shape==[] or shape[0] in ZZ:
outer_shape = Core(shape,k+1)
inner_shape = Core([],k+1)
else:
outer_shape = Core(shape[0],k+1)
inner_shape = Core(shape[1],k+1)
if weight is not None:
weight = tuple(weight)
return super(StrongTableaux, cls).__classcall__(cls, k, (outer_shape, inner_shape), weight)
def _repr_( self ):
r"""
Return the representation of ``self``.
EXAMPLES::
sage: StrongTableaux(3, [2,2], weight=[1,1,1,1])
Set of strong 3-tableaux of shape [2, 2] and of weight (1, 1, 1, 1)
sage: StrongTableaux(3, [2,2])
Set of strong 3-tableaux of shape [2, 2] and of weight (1, 1, 1, 1)
sage: StrongTableaux(3, [[2,2],[1]], weight=[0,0,2,1])
Set of strong 3-tableaux of shape [[2, 2], [1]] and of weight (0, 0, 2, 1)
sage: StrongTableaux(3, [[],[]], weight=[])
Set of strong 3-tableaux of shape [] and of weight ()
"""
if self._inner_shape==Core([],self.k+1):
s = "Set of strong %s-tableaux"%self.k
s +=" of shape %s"%self._outer_shape
else:
s = "Set of strong %s-tableaux"%self.k
s +=" of shape [%s, %s]"%(self._outer_shape, self._inner_shape)
s +="%sand of weight %s"%(" ",self._weight)
return s
options = Tableaux.options
def an_element(self):
r"""
Return the first generated element of the class of ``StrongTableaux``.
EXAMPLES::
sage: ST = StrongTableaux(3, [3], weight=[3])
sage: ST.an_element()
[[-1, -1, -1]]
"""
return next(iter(self))
def outer_shape(self):
r"""
Return the outer shape of the class of strong tableaux.
OUTPUT:
- a `k+1`-core
EXAMPLES::
sage: StrongTableaux( 2, [3,1] ).outer_shape()
[3, 1]
sage: type(StrongTableaux( 2, [3,1] ).outer_shape())
<class 'sage.combinat.core.Cores_length_with_category.element_class'>
sage: StrongTableaux( 4, [[2,1], [1]] ).outer_shape()
[2, 1]
"""
return self._outer_shape
def inner_shape(self):
r"""
Return the inner shape of the class of strong tableaux.
OUTPUT:
- a `k+1`-core
EXAMPLES::
sage: StrongTableaux( 2, [3,1] ).inner_shape()
[]
sage: type(StrongTableaux( 2, [3,1] ).inner_shape())
<class 'sage.combinat.core.Cores_length_with_category.element_class'>
sage: StrongTableaux( 4, [[2,1], [1]] ).inner_shape()
[1]
"""
return self._inner_shape
def shape(self):
r"""
Return the shape of ``self``.
If the ``self`` has an inner shape return a pair consisting of an inner and
an outer shape. If the inner shape is empty then return only the outer shape.
OUTPUT:
- a `k+1`-core or a pair of `k+1`-cores
EXAMPLES::
sage: StrongTableaux( 2, [3,1] ).shape()
[3, 1]
sage: type(StrongTableaux( 2, [3,1] ).shape())
<class 'sage.combinat.core.Cores_length_with_category.element_class'>
sage: StrongTableaux( 4, [[2,1], [1]] ).shape()
([2, 1], [1])
"""
if self._inner_shape:
return (self._outer_shape, self._inner_shape)
return self._outer_shape
def __iter__(self):
r"""
TESTS::
sage: ST = StrongTableaux(3, [4,1], weight=[2,2])
sage: ST.list()
[[[-1, -1, -2, -2], [2]], [[-1, -1, 2, -2], [-2]]]
sage: ST = StrongTableaux(3, [5,2,2], weight=[2,2,2,1])
sage: ST.cardinality()
14
sage: StrongTableaux(3, [5,2,2], weight=[3,3,1]).list()
[[[-1, -1, -1, -2, -2], [-2, 2], [2, -3]], [[-1, -1, -1, 2, -2], [-2, -2], [2, -3]], [[-1, -1, -1, -2, -3], [-2, -2], [2, 2]]]
sage: StrongTableaux(3, [4,1,1]).cardinality()
10
sage: StrongTableaux(3, [5,2,2], weight=[6,1]).list() # there are no strong column strict tableaux of shape [5,2,2] and weight (6,1)
[]
sage: StrongTableaux(3, [[5,2,2], [3,1,1]], weight=[2,1]).list()
[[[None, None, None, -1, -1], [None, 1], [None, -2]],
[[None, None, None, 1, -1], [None, -1], [None, -2]],
[[None, None, None, -1, -2], [None, -1], [None, 1]]]
sage: StrongTableaux(2, [[4,3,3,2,2,1,1], [2,1,1]], weight=[1,1,1,1]).cardinality()
150
sage: StrongTableaux(2, [[7,5,3,1], [2,1,1]], weight=[2,2]).cardinality()
18
sage: StrongTableaux(2, [[3,1],[3,1]]).list()
[[[None, None, None], [None]]]
sage: StrongTableaux(4, []).list()
[[]]
"""
size = sum(self._weight)
if size==0:
yield self([[None]*(row) for row in self._inner_shape])
else:
for unT in StrongTableaux.standard_unmarked_iterator( self.k, size, self._outer_shape, self._inner_shape ):
for T in StrongTableaux.marked_given_unmarked_and_weight_iterator( unT, self.k, self._weight ):
yield T
@classmethod
def standard_unmarked_iterator( cls, k, size, outer_shape=None, inner_shape=[] ):
r"""
An iterator for standard unmarked strong tableaux.
An iterator which generates all unmarked tableaux of a given ``size`` which are
contained in ``outer_shape`` and which contain the ``inner_shape``.
These are built recursively by building all standard marked strong tableaux of
size ``size`` `-1` and adding all possible covers.
If ``outer_shape`` is ``None`` then there is no restriction on the shape of the
tableaux which are created.
INPUT:
- ``k``, ``size`` - a positive integers
- ``outer_shape`` - a list representing a `k+1`-core (default: ``None``)
- ``inner_shape`` - a list representing a `k+1`-core (default: [])
OUTPUT:
- an iterator which lists all standard strong unmarked tableaux with ``size``
cells and which are contained in ``outer_shape`` and contain ``inner_shape``
EXAMPLES::
sage: list(StrongTableaux.standard_unmarked_iterator(2, 3))
[[[1, 2, 3], [3]], [[1, 2], [3], [3]], [[1, 3, 3], [2]], [[1, 3], [2], [3]]]
sage: list(StrongTableaux.standard_unmarked_iterator(2, 1, inner_shape=[1,1]))
[[[None, 1, 1], [None]], [[None, 1], [None], [1]]]
sage: len(list(StrongTableaux.standard_unmarked_iterator(4,4)))
10
sage: len(list(StrongTableaux.standard_unmarked_iterator(4,6)))
98
sage: len(list(StrongTableaux.standard_unmarked_iterator(4,4, inner_shape=[2,2])))
92
sage: len(list(StrongTableaux.standard_unmarked_iterator(4,4, outer_shape=[5,2,2,1], inner_shape=[2,2])))
10
TESTS::
sage: list(StrongTableaux.standard_unmarked_iterator(2,0, outer_shape=[3,1], inner_shape=[3,1]))
[[[None, None, None], [None]]]
sage: list(StrongTableaux.standard_unmarked_iterator(4,0, outer_shape=[]))
[[]]
"""
if size==0:
if outer_shape is None or Core(outer_shape,k+1).contains(inner_shape):
yield [[None]*(inner_shape[i]) for i in range(len(inner_shape))]
else:
for T in cls.standard_unmarked_iterator(k, size-1, outer_shape, inner_shape):
for TT in cls.follows_tableau_unsigned_standard(T, k):
if outer_shape is None or Core(outer_shape, k+1).contains([len(r) for r in TT]):
yield TT
@classmethod
def marked_given_unmarked_and_weight_iterator(cls, unmarkedT, k, weight):
r"""
An iterator generating strong marked tableaux from an unmarked strong tableau.
Iterator which lists all marked tableaux of weight ``weight`` such that the
standard unmarked part of the tableau is equal to ``unmarkedT``.
INPUT:
- ``unmarkedT`` - a list | |
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long
import logger
import testutil
import test_engine
log = logger.Logger(__name__, logger.INFO)
class TestTypes(test_engine.EngineTestCase):
sample = testutil.parse_test_sample({
"SCHEMA": [
[1, "Types", [
[21, "text", "Text", False, "", "", ""],
[22, "numeric", "Numeric", False, "", "", ""],
[23, "int", "Int", False, "", "", ""],
[24, "bool", "Bool", False, "", "", ""],
[25, "date", "Date", False, "", "", ""]
]],
[2, "Formulas", [
[30, "division", "Any", True, "Types.lookupOne(id=18).numeric / 2", "", ""]
]]
],
"DATA": {
"Types": [
["id", "text", "numeric", "int", "bool", "date"],
[11, "New York", "New York", "New York", "New York", "New York"],
[12, "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö"],
[13, False, False, False, False, False],
[14, True, True, True, True, True],
[15, 1509556595, 1509556595, 1509556595, 1509556595, 1509556595],
[16, 8.153, 8.153, 8.153, 8.153, 8.153],
[17, 0, 0, 0, 0, 0],
[18, 1, 1, 1, 1, 1],
[19, "", "", "", "", ""],
[20, None, None, None, None, None]],
"Formulas": [
["id"],
[1]]
},
})
all_row_ids = [11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
def test_update_typed_cells(self):
"""
Tests that updated typed values are set as expected in the sandbox. Types should follow
the rules:
- After updating a cell with a value of a type compatible to the column type,
the cell value should have the column's standard type
- Otherwise, the cell value should have the type AltText
"""
self.load_sample(self.sample)
out_actions = self.apply_user_action(["BulkUpdateRecord", "Types", self.all_row_ids, {
"text": [None, "", 1, 0, 8.153, 1509556595, True, False, u"Chîcágö", "New York"],
"numeric": [None, "", 1, 0, 8.153, 1509556595, True, False, u"Chîcágö", "New York"],
"int": [None, "", 1, 0, 8.153, 1509556595, True, False, u"Chîcágö", "New York"],
"bool": [None, "", 1, 0, 8.153, 1509556595, True, False, u"Chîcágö", "New York"],
"date": [None, "", 1, 0, 8.153, 1509556595, True, False, u"2019-01-22 00:47:39", "New York"]
}])
self.assertPartialOutActions(out_actions, {
"stored": [["BulkUpdateRecord", "Types", self.all_row_ids, {
"text": [None,"","1","0","8.153","1509556595","True","False","Chîcágö","New York"],
"numeric": [None, None, 1.0, 0.0, 8.153, 1509556595.0, 1.0, 0.0, "Chîcágö", "New York"],
"int": [None, None, 1, 0, 8, 1509556595, 1, 0, "Chîcágö", "New York"],
"bool": [False, False, True, False, True, True, True, False, "Chîcágö", "New York"],
"date": [None, None, 1.0, 0.0, 8.153, 1509556595.0, 1.0, 0.0, 1548115200.0, "New York"]
}],
["UpdateRecord", "Formulas", 1, {"division": 0.0}],
],
"undo": [["BulkUpdateRecord", "Types", self.all_row_ids, {
"text": ["New York", "Chîcágö", False, True, 1509556595, 8.153, 0, 1, "", None],
"numeric": ["New York", "Chîcágö", False, True, 1509556595, 8.153, 0, 1, "", None],
"int": ["New York", "Chîcágö", False, True, 1509556595, 8.153, 0, 1, "", None],
"bool": ["New York", "Chîcágö", False, True, 1509556595, 8.153, False, True, "", None],
"date": ["New York", "Chîcágö", False, True, 1509556595, 8.153, 0, 1, "", None]
}],
["UpdateRecord", "Formulas", 1, {"division": 0.5}],
]
})
self.assertTableData("Types", data=[
["id", "text", "numeric", "int", "bool", "date"],
[11, None, None, None, False, None],
[12, "", None, None, False, None],
[13, "1", 1.0, 1, True, 1.0],
[14, "0", 0.0, 0, False, 0.0],
[15, "8.153", 8.153, 8, True, 8.153],
[16, "1509556595", 1509556595, 1509556595, True, 1509556595.0],
[17, "True", 1.0, 1, True, 1.0],
[18, "False", 0.0, 0, False, 0.0],
[19, "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö", 1548115200.0],
[20, "New York", "New York", "New York", "New York", "New York"]
])
def test_text_conversions(self):
"""
Tests that column type changes occur as expected in the sandbox:
- Resulting cell values should all be Text
- Only non-compatible values should appear in the resulting BulkUpdateRecord
"""
self.load_sample(self.sample)
# Test Text -> Text conversion
out_actions = self.apply_user_action(["ModifyColumn", "Types", "text", { "type" : "Text" }])
self.assertPartialOutActions(out_actions, {
"stored": [],
"undo": []
})
# Test Numeric -> Text conversion
out_actions = self.apply_user_action(["ModifyColumn", "Types", "numeric", { "type" : "Text" }])
self.assertPartialOutActions(out_actions, {
"stored": [
["ModifyColumn", "Types", "numeric", {"type": "Text"}],
["BulkUpdateRecord", "Types", [13, 14, 15, 16, 17, 18],
{"numeric": ["False", "True", "1509556595.0", "8.153", "0.0", "1.0"]}],
["UpdateRecord", "_grist_Tables_column", 22, {"type": "Text"}],
["UpdateRecord", "Formulas", 1, {"division": ["E", "TypeError"]}],
],
"undo": [
["BulkUpdateRecord", "Types", [13, 14, 15, 16, 17, 18],
{"numeric": [False, True, 1509556595, 8.153, 0, 1]}],
["ModifyColumn", "Types", "numeric", {"type": "Numeric"}],
["UpdateRecord", "_grist_Tables_column", 22, {"type": "Numeric"}],
["UpdateRecord", "Formulas", 1, {"division": 0.5}],
]
})
# Test Int -> Text conversion
out_actions = self.apply_user_action(["ModifyColumn", "Types", "int", { "type" : "Text" }])
self.assertPartialOutActions(out_actions, {
"stored": [
["ModifyColumn", "Types", "int", {"type": "Text"}],
["BulkUpdateRecord", "Types", [13, 14, 15, 16, 17, 18],
{"int": ["False", "True", "1509556595", "8.153", "0", "1"]}],
["UpdateRecord", "_grist_Tables_column", 23, {"type": "Text"}],
],
"undo": [
["BulkUpdateRecord", "Types", [13, 14, 15, 16, 17, 18],
{"int": [False, True, 1509556595, 8.153, 0, 1]}],
["ModifyColumn", "Types", "int", {"type": "Int"}],
["UpdateRecord", "_grist_Tables_column", 23, {"type": "Int"}],
]
})
# Test Bool -> Text
out_actions = self.apply_user_action(["ModifyColumn", "Types", "bool", { "type" : "Text" }])
self.assertPartialOutActions(out_actions, {
"stored": [
["ModifyColumn", "Types", "bool", {"type": "Text"}],
["BulkUpdateRecord", "Types", [13, 14, 15, 16, 17, 18],
{"bool": ["False", "True", "1509556595", "8.153", "False", "True"]}],
["UpdateRecord", "_grist_Tables_column", 24, {"type": "Text"}],
],
"undo": [
["BulkUpdateRecord", "Types", [13, 14, 15, 16, 17, 18],
{"bool": [False, True, 1509556595, 8.153, False, True]}],
["ModifyColumn", "Types", "bool", {"type": "Bool"}],
["UpdateRecord", "_grist_Tables_column", 24, {"type": "Bool"}],
]
})
# Test Date -> Text
out_actions = self.apply_user_action(["ModifyColumn", "Types", "date", { "type" : "Text" }])
self.assertPartialOutActions(out_actions, {
"stored": [
["ModifyColumn", "Types", "date", {"type": "Text"}],
["BulkUpdateRecord", "Types", [13, 14, 15, 16, 17, 18],
{"date": ["False", "True", "1509556595.0", "8.153", "0.0", "1.0"]}],
["UpdateRecord", "_grist_Tables_column", 25, {"type": "Text"}]
],
"undo": [
["BulkUpdateRecord", "Types", [13, 14, 15, 16, 17, 18],
{"date": [False, True, 1509556595.0, 8.153, 0.0, 1.0]}],
["ModifyColumn", "Types", "date", {"type": "Date"}],
["UpdateRecord", "_grist_Tables_column", 25, {"type": "Date"}]
]
})
# Assert that the final table is as expected
self.assertTableData("Types", data=[
["id", "text", "numeric", "int", "bool", "date"],
[11, "New York", "New York", "New York", "New York", "New York"],
[12, "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö", "Chîcágö"],
[13, False, "False", "False", "False", "False"],
[14, True, "True", "True", "True", "True"],
[15, 1509556595, "1509556595.0","1509556595","1509556595","1509556595.0"],
[16, 8.153, "8.153", "8.153", "8.153", "8.153"],
[17, 0, "0.0", "0", "False", "0.0"],
[18, 1, "1.0", "1", "True", "1.0"],
[19, "", "", "", "", ""],
[20, None, None, None, None, None]
])
def test_numeric_conversions(self):
"""
Tests that column type changes occur as expected in the sandbox:
- Resulting cell values should all be of type Numeric or AltText
- Only non-compatible values should appear in the resulting BulkUpdateRecord
"""
self.load_sample(self.sample)
# Test Text -> Numeric conversion
out_actions = self.apply_user_action(["ModifyColumn", "Types", "text", { "type" : "Numeric" }])
self.assertPartialOutActions(out_actions, {
"stored": [
["ModifyColumn", "Types", "text", {"type": "Numeric"}],
["BulkUpdateRecord", "Types", [13, 14, 19],
{"text": [0.0, 1.0, None]}],
["UpdateRecord", "_grist_Tables_column", 21, {"type": "Numeric"}],
],
"undo": [
["BulkUpdateRecord", "Types", [13, 14, 19],
{"text": [False, True, ""]}],
["ModifyColumn", "Types", "text", {"type": "Text"}],
["UpdateRecord", "_grist_Tables_column", 21, {"type": "Text"}],
]
})
# Test Numeric -> Numeric conversion
out_actions = self.apply_user_action(["ModifyColumn", "Types", "numeric", {"type": "Numeric"}])
self.assertPartialOutActions(out_actions, {
"stored": [],
"undo": []
})
# Test Int -> Numeric conversion
out_actions = self.apply_user_action(["ModifyColumn", "Types", "int", { "type" : "Numeric" }])
self.assertPartialOutActions(out_actions, {
"stored": [
["ModifyColumn", "Types", "int", {"type": "Numeric"}],
["BulkUpdateRecord", "Types", [13, 14, 19],
{"int": [0.0, 1.0, None]}],
["UpdateRecord", "_grist_Tables_column", 23, {"type": "Numeric"}],
],
"undo": [
["BulkUpdateRecord", "Types", [13, 14, 19],
{"int": [False, True, ""]}],
["ModifyColumn", "Types", "int", {"type": "Int"}],
["UpdateRecord", "_grist_Tables_column", 23, {"type": "Int"}],
]
})
# Test Bool -> Numeric conversion
out_actions = self.apply_user_action(["ModifyColumn", "Types", "bool", { "type" : "Numeric" }])
self.assertPartialOutActions(out_actions, {
"stored": [
["ModifyColumn", "Types", "bool", {"type": "Numeric"}],
["BulkUpdateRecord", "Types", [13, 14, 17, 18, 19],
{"bool": [0.0, 1.0, 0.0, 1.0, None]}],
["UpdateRecord", "_grist_Tables_column", 24, {"type": "Numeric"}],
],
"undo": [
["BulkUpdateRecord", "Types", [13, 14, 17, 18, 19],
{"bool": [False, True, False, True, ""]}],
["ModifyColumn", "Types", "bool", {"type": "Bool"}],
["UpdateRecord", "_grist_Tables_column", 24, {"type": "Bool"}],
]
})
# Test Date -> Numeric conversion
out_actions = self.apply_user_action(["ModifyColumn", "Types", "date", { "type" : "Numeric" }])
self.assertPartialOutActions(out_actions, {
"stored": [
["ModifyColumn", "Types", "date", {"type": "Numeric"}],
["BulkUpdateRecord", "Types", [13, 14, 19],
{"date": [0.0, 1.0, None]}],
["UpdateRecord", "_grist_Tables_column", 25, {"type": "Numeric"}]
],
"undo": [
["BulkUpdateRecord", "Types", [13, 14, 19],
{"date": [False, True, ""]}],
["ModifyColumn", "Types", "date", {"type": "Date"}],
["UpdateRecord", "_grist_Tables_column", 25, {"type": "Date"}]
]
})
# Assert that the final table is as expected
self.assertTableData("Types", data=[
["id", "text", "numeric", | |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl.testing import parameterized
import numpy as np
import six
from six.moves import range
from tensorflow.lite.python import lite
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python.convert import ConverterError
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.variables import global_variables_initializer as _global_variables_initializer
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training.training_util import write_graph
class TestModels(test_util.TensorFlowTestCase):
def assertValidDebugInfo(self, debug_info):
"""Verify the DebugInfo is valid."""
file_names = set()
for file_path in debug_info.files:
file_names.add(os.path.basename(file_path))
# To make the test independent on how the nodes are created, we only assert
# the name of this test file.
self.assertIn('lite_test.py', file_names)
self.assertNotIn('lite_v2_test.py', file_names)
class FromConstructor(TestModels):
# Tests invalid constructors using a dummy value for the GraphDef.
def testInvalidConstructor(self):
message = ('If input_tensors and output_tensors are None, both '
'input_arrays_with_shape and output_arrays must be defined.')
# `output_arrays` is not defined.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter(
None, None, [], input_arrays_with_shape=[('input', [3, 9])])
self.assertEqual(message, str(error.exception))
# `input_arrays_with_shape` is not defined.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter(None, [], None, output_arrays=['output'])
self.assertEqual(message, str(error.exception))
# Tests valid constructors using a dummy value for the GraphDef.
def testValidConstructor(self):
converter = lite.TFLiteConverter(
None,
None,
None,
input_arrays_with_shape=[('input', [3, 9])],
output_arrays=['output'])
self.assertFalse(converter._has_valid_tensors())
self.assertEqual(converter.get_input_arrays(), ['input'])
with self.assertRaises(ValueError) as error:
converter._set_batch_size(1)
self.assertEqual(
'The batch size cannot be set for this model. Please use '
'input_shapes parameter.', str(error.exception))
converter = lite.TFLiteConverter(None, ['input_tensor'], ['output_tensor'])
self.assertTrue(converter._has_valid_tensors())
class FromSessionTest(TestModels, parameterized.TestCase):
def testFloat(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testString(self, enable_mlir):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.string)
out_tensor = array_ops.reshape(in_tensor, shape=[2, 2])
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.experimental_new_converter = enable_mlir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.string_, input_details[0]['dtype'])
self.assertTrue(([4] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('Reshape', output_details[0]['name'])
self.assertEqual(np.string_, output_details[0]['dtype'])
self.assertTrue(([2, 2] == output_details[0]['shape']).all())
# TODO(b/122659643): Test setting/getting string data via the python
# interpreter API after support has been added.
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testQuantization(self, enable_mlir):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess,
[in_tensor_1, in_tensor_2],
[out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {
'inputA': (0., 1.),
'inputB': (0., 1.)
} # mean, std_dev
converter.experimental_new_converter = enable_mlir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.),
input_details[0]['quantization']) # scale, zero_point
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.uint8, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((1., 0.),
input_details[1]['quantization']) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testQuantizationInvalid(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess,
[in_tensor_1, in_tensor_2],
[out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'inputA': (0., 1.)} # mean, std_dev
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'Quantization input stats are not available for input tensors '
'\'inputB\'.', str(error.exception))
def testIntermediateInputArray(self):
"""Convert a model from an intermediate input array."""
with ops.Graph().as_default():
in_tensor_init = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
in_tensor_final = in_tensor_init + in_tensor_init
out_tensor = in_tensor_final + in_tensor_final
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor_final],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('add', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSizeNoneInvalid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test None as shape.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual('Provide an input shape for input array \'Placeholder\'.',
str(error.exception))
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testScalarValid(self, enable_mlir):
# Construct a graph using a scalar (empty shape) input.
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[])
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test conversion with the scalar input shape.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.experimental_new_converter = enable_mlir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([] == input_details[0]['shape']).all())
# Validate inference using the scalar inputs/outputs.
test_input = np.array(4.0, dtype=np.float32)
expected_output = np.array(8.0, dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
def testSizeInvalid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, None, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test invalid shape. None after 1st dimension.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'None is only supported in the 1st dimension. Tensor '
'\'Placeholder\' has invalid shape \'[1, None, 16, 3]\'.',
str(error.exception))
def testBatchSizeValid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testBatchSizeNonZero(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[None, 4], dtype=dtypes.float32, name='input1')
in_tensor_2 = array_ops.placeholder(
shape=[4, 10], dtype=dtypes.float32, name='input2')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2)
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess,
[in_tensor_1, in_tensor_2],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 2)
self.assertEqual('input1', input_details[0]['name'])
self.assertTrue(([1, 4] == input_details[0]['shape']).all())
self.assertEqual('input2', input_details[1]['name'])
self.assertTrue(([4, 10] == input_details[1]['shape']).all())
def testFreezeGraph(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + var
sess = session.Session()
sess.run(_global_variables_initializer())
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted | |
'/'
self._get_file()
self._read_file()
self.distance = distance
self.accr = accr
if not accr:
self.mmdot = 0
elif mmdot is not None:
self.mmdot = mmdot
elif mdot is not None:
self.mmdot = self.mass * mdot # MJup^2/yr
else:
mdot = self.mass / (1e6 * self.age) # Assumed MJup/yr
self.mmdot = self.mass * mdot # MJup^2/yr
self.rin = accr_rin
self.truncated = truncated
def _get_file(self):
"""Find the file closest to the input parameters"""
files = []; masses = []; ages = []
for file in os.listdir(self.sub_dir):
files.append(file)
fsplit = re.split('[_\.]',file)
ind_mass = fsplit.index('mass') + 1
ind_age = fsplit.index('age') + 1
masses.append(int(fsplit[ind_mass]))
ages.append(int(fsplit[ind_age]))
files = np.array(files)
ages = np.array(ages)
masses = np.array(masses)
# Find those indices closest in mass
mdiff = np.abs(masses - self.mass)
ind_mass = mdiff == np.min(mdiff)
# Of those masses, find the closest age
adiff = np.abs(ages - self.age)
ind_age = adiff[ind_mass] == np.min(adiff[ind_mass])
# Get the final file name
self.file = ((files[ind_mass])[ind_age])[0]
def _read_file(self):
"""Read in the file data"""
# Read in the file's content row-by-row (saved as a string)
with open(self.sub_dir + self.file) as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
# Parse the strings into an array
# Row #, Value
# 1 col 1: age (Myr);
# cols 2-601: wavelength (in microns, in range 0.8-15.0)
# 2-end col 1: initial S;
# cols 2-601: F_nu (in mJy for a source at 10 pc)
ncol = len(content[0].split())
nrow = len(content)
arr = np.zeros([nrow,ncol])
for i,row in enumerate(content):
arr[i,:] = np.array(content[i].split(), dtype='float64')
# Find the closest entropy and save
entropy = arr[1:,0]
diff = np.abs(self.entropy - entropy)
ind = diff == np.min(diff)
self._flux = arr[1:,1:][ind,:].flatten()
self._fluxunits = 'mJy'
# Save the wavelength information
self._wave = arr[0,1:]
self._waveunits = 'um'
# Distance (10 pc)
self._distance = 10
@property
def mdot(self):
"""Accretion rate in MJup/yr"""
return self.mmdot / self.mass
@property
def wave(self):
"""Wavelength of spectrum"""
return self._wave
@property
def waveunits(self):
"""Wavelength units"""
return self._waveunits
@property
def flux(self):
"""Spectral flux"""
return self._flux
@property
def fluxunits(self):
"""Flux units"""
return self._fluxunits
@property
def distance(self):
"""Assumed distance to source (pc)"""
return self._distance
@distance.setter
def distance(self, value):
self._flux *= (self._distance/value)**2
self._distance = value
@property
def atmo(self):
"""Atmosphere type
"""
return self._atmo
@property
def mass(self):
"""Mass of planet (MJup)"""
return self._mass
@property
def age(self):
"""Age in millions of years"""
return self._age
@property
def entropy(self):
"""Initial entropy (8.0-13.0)"""
return self._entropy
def export_pysynphot(self, waveout='angstrom', fluxout='flam'):
"""Output to :mod:`pysynphot.spectrum` object
Export object settings to a :mod:`pysynphot.spectrum`.
Parameters
----------
waveout : str
Wavelength units for output
fluxout : str
Flux units for output
"""
w = self.wave; f = self.flux
name = (re.split('[\.]', self.file))[0]#[5:]
sp = S.ArraySpectrum(w, f, name=name, waveunits=self.waveunits, fluxunits=self.fluxunits)
sp.convert(waveout)
sp.convert(fluxout)
if self.accr and (self.mmdot>0):
sp_mdot = sp_accr(self.mmdot, rin=self.rin,
dist=self.distance, truncated=self.truncated,
waveout=waveout, fluxout=fluxout)
# Interpolate accretion spectrum at each wavelength
# and create new composite spectrum
fnew = np.interp(sp.wave, sp_mdot.wave, sp_mdot.flux)
sp_new = S.ArraySpectrum(sp.wave, sp.flux+fnew,
waveunits=waveout, fluxunits=fluxout)
return sp_new
else:
return sp
#class planets_sb11(planets_sb12):
# """Deprecated class. Use :class:`planets_sb12` instead."""
# # Turns out the paper is <NAME> (2012), not 2011
# def __init__(self, *args, **kwargs):
#
# _log.warning('planets_sb11 is depcrecated. Use planets_sb12 instead.')
# planets_sb12.__init__(self, *args, **kwargs)
def sp_accr(mmdot, rin=2, dist=10, truncated=False,
waveout='angstrom', fluxout='flam', base_dir=None):
"""Exoplanet accretion flux values (Zhu et al., 2015).
Calculated the wavelength-dependent flux of an exoplanet accretion disk/shock
from Zhu et al. (2015). A
Note
----
This function only uses the table of photometric values to calculate
photometric brightness from a source, so not very useful for simulating
spectral observations.
Parameters
----------
mmdot : float
Product of the exoplanet mass and mass accretion rate (MJup^2/yr).
Values range from 1e-7 to 1e-2.
rin : float
Inner radius of accretion disk (units of RJup; default: 2).
dist : float
Distance to object (pc).
truncated: bool
If True, then the values are for a disk with Rout=50 RJup,
otherwise, values were calculated for a full disk (Rout=1000 RJup).
Accretion from a "tuncated disk" is due mainly to MRI.
Luminosities for full and truncated disks are very similar.
waveout : str
Wavelength units for output
fluxout : str
Flux units for output
base_dir: str, None
Location of accretion model sub-directories.
"""
base_dir = conf.PYNRC_PATH + 'spiegel/' if base_dir is None else base_dir
fname = base_dir + 'zhu15_accr.txt'
names = ('MMdot', 'Rin', 'Tmax', 'J', 'H', 'K', 'L', 'M', 'N', 'J2', 'H2', 'K2', 'L2', 'M2', 'N2')
tbl = ascii.read(fname, guess=True, names=names)
# Inner radius values and Mdot values
rin_vals = np.unique(tbl['Rin'])
mdot_vals = np.unique(tbl['MMdot'])
nmdot = len(mdot_vals)
assert (rin >=rin_vals.min()) & (rin <=rin_vals.max()), "rin is out of range"
assert (mmdot>=mdot_vals.min()) & (mmdot<=mdot_vals.max()), "mmdot is out of range"
if truncated:
mag_names = ('J2', 'H2', 'K2', 'L2', 'M2', 'N2')
else:
mag_names = ('J', 'H', 'K', 'L', 'M', 'N')
wcen = np.array([ 1.2, 1.6, 2.2, 3.8, 4.8, 10.0])
zpt = np.array([1600, 1020, 657, 252, 163, 39.8])
mag_arr = np.zeros([6,nmdot])
for i, mv in enumerate(mdot_vals):
for j, mag in enumerate(mag_names):
tbl_sub = tbl[tbl['MMdot']==mv]
rinvals = tbl_sub['Rin']
magvals = tbl_sub[mag]
mag_arr[j,i] = np.interp(rin, rinvals, magvals)
mag_vals = np.zeros(6)
for j in range(6):
xi = 10**(mmdot)
xp = 10**(mdot_vals)
yp = 10**(mag_arr[j])
mag_vals[j] = np.log10(np.interp(xi, xp, yp))
mag_vals += 5*np.log10(dist/10)
flux_Jy = 10**(-mag_vals/2.5) * zpt
sp = S.ArraySpectrum(wcen*1e4, flux_Jy, fluxunits='Jy')
sp.convert(waveout)
sp.convert(fluxout)
return sp
def jupiter_spec(dist=10, waveout='angstrom', fluxout='flam', base_dir=None):
"""Jupiter as an Exoplanet
Read in theoretical Jupiter spectrum from Irwin et al. 2014 and output
as a :mod:`pysynphot.spectrum`.
Parameters
===========
dist : float
Distance to Jupiter (pc).
waveout : str
Wavelength units for output.
fluxout : str
Flux units for output.
base_dir: str, None
Location of tabulated file irwin_2014_ref_spectra.txt.
"""
base_dir = conf.PYNRC_PATH + 'solar_system/' if base_dir is None else base_dir
fname = base_dir + 'irwin_2014_ref_spectra.txt'
# Column 1: Wavelength (in microns)
# Column 2: 100*Ap/Astar (Earth-Sun Primary Transit)
# Column 3: 100*Ap/Astar (Earth-Mdwarf Primary Transit)
# Column 4: 100*Ap/Astar (Jupiter-Sun Primary Transit)
# Column 5: Fp/Astar (Earth-Sun Secondary Eclipse)
# Column 6: Disc-averaged radiance of Earth (W cm-2 sr-1 micron-1)
# Column 7: Fp/Astar (Jupiter-Sun Secondary Eclipse)
# Column 8: Disc-averaged radiance of Jupiter (W cm-2 sr-1 micron-1)
# Column 9: Solar spectral irradiance spectrum (W micron-1)
# (Solar Radius = 695500.0 km)
# Column 10: Mdwarf spectral irradiance spectrum (W micron-1)
# (Mdwarf Radius = 97995.0 km)
data = ascii.read(fname, data_start=14)
wspec = data['col1'] * 1e4 # Angstrom
fspec = data['col8'] * 1e3 # erg s-1 cm^-2 A^-1 sr^-1
# Steradians to square arcsec
sr_to_asec2 = (3600*180/np.pi)**2
fspec /= sr_to_asec2 # *** / arcsec^2
# Angular size of Jupiter at some distance
RJup_km = 71492.0
au_to_km = 149597870.7
# Angular size (arcsec) of Jupiter radius
RJup_asec = RJup_km / au_to_km / dist
area = np.pi * RJup_asec**2
# flux in f_lambda
fspec *= area # erg s-1 cm^-2 A^-1
sp = S.ArraySpectrum(wspec, fspec, fluxunits='flam')
sp.convert(waveout)
sp.convert(fluxout)
return sp
def linder_table(file=None, **kwargs):
"""Load Linder Model Table
Function to read in isochrone models from Linder et al. 2019.
Returns an astropy Table.
Parameters
----------
age : float
Age in Myr. If set to None, then an array of ages from the file
is used to generate dictionary. If set, chooses the closest age
supplied in table.
file : string
Location and name of Linder et al file.
Default is 'BEX_evol_mags_-3_MH_0.00.dat'
"""
# Default file to read and load
if file is None:
indir = os.path.join(conf.PYNRC_PATH, 'linder/isochrones/')
file = indir + 'BEX_evol_mags_-3_MH_0.00.dat'
with open(file) as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
cnames = content[2].split(',')
cnames = [name.split(':')[1] for name in cnames]
ncol = len(cnames)
content_arr = []
for line in content[4:]:
arr = np.array(line.split()).astype(np.float)
if len(arr)>0:
content_arr.append(arr)
content_arr = np.array(content_arr)
# Convert to Astropy Table
tbl = Table(rows=content_arr, names=cnames)
return tbl
def linder_filter(table, filt, age, dist=10, cond_interp=True, cond_file=None, **kwargs):
"""Linder Mags vs Mass Arrays
Given a Linder table, NIRCam filter, and age, return arrays of MJup
| |
rename_replace option, job creation will be
determined as follows. If the job name is already used, a new job name with
the suffix ".DataStage job" will be used. If the new job name is not
currently used, the job will be created with this name. In case the new job
name is already used, the job creation will not happen and an error will be
raised.
:param str attachment_type: (optional) Type of attachment. The default
attachment type is "isx".
:param str file_name: (optional) Name of the input file, if it exists.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ImportResponse` object
"""
if body is None:
raise ValueError('body must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V3',
operation_id='create_migration')
headers.update(sdk_headers)
params = {
'catalog_id': catalog_id,
'project_id': project_id,
'on_failure': on_failure,
'conflict_resolution': conflict_resolution,
'attachment_type': attachment_type,
'file_name': file_name
}
data = body
headers['content-type'] = 'application/octet-stream'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json;charset=utf-8'
url = '/v3/migration/isx_imports'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
data=data)
response = self.send(request)
return response
def delete_migration(self,
import_id: str,
*,
catalog_id: str = None,
project_id: str = None,
**kwargs
) -> DetailedResponse:
"""
Cancel a previous import request.
Cancel a previous import request. Use GET /v3/migration/imports/{import_id} to
obtain the current status of the import, including whether it has been cancelled.
:param str import_id: Unique ID of the import request.
:param str catalog_id: (optional) The ID of the catalog to use. catalog_id
or project_id is required.
:param str project_id: (optional) The ID of the project to use. catalog_id
or project_id is required.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if import_id is None:
raise ValueError('import_id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V3',
operation_id='delete_migration')
headers.update(sdk_headers)
params = {
'catalog_id': catalog_id,
'project_id': project_id
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
path_param_keys = ['import_id']
path_param_values = self.encode_path_vars(import_id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v3/migration/isx_imports/{import_id}'.format(**path_param_dict)
request = self.prepare_request(method='DELETE',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def get_migration(self,
import_id: str,
*,
catalog_id: str = None,
project_id: str = None,
**kwargs
) -> DetailedResponse:
"""
Get the status of a previous import request.
Gets the status of an import request. The status field in the response object
indicates if the given import is completed, in progress, or failed. Detailed
status information about each imported data flow is also contained in the response
object.
:param str import_id: Unique ID of the import request.
:param str catalog_id: (optional) The ID of the catalog to use. catalog_id
or project_id is required.
:param str project_id: (optional) The ID of the project to use. catalog_id
or project_id is required.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ImportResponse` object
"""
if import_id is None:
raise ValueError('import_id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V3',
operation_id='get_migration')
headers.update(sdk_headers)
params = {
'catalog_id': catalog_id,
'project_id': project_id
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json;charset=utf-8'
path_param_keys = ['import_id']
path_param_values = self.encode_path_vars(import_id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v3/migration/isx_imports/{import_id}'.format(**path_param_dict)
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
class CreateDatastageFlowsEnums:
"""
Enums for create_datastage_flows parameters.
"""
class AssetCategory(str, Enum):
"""
The category of the asset. Must be either SYSTEM or USER. Only a registered
service can use this parameter.
"""
SYSTEM = 'system'
USER = 'user'
class CreateDatastageSubflowsEnums:
"""
Enums for create_datastage_subflows parameters.
"""
class AssetCategory(str, Enum):
"""
The category of the asset. Must be either SYSTEM or USER. Only a registered
service can use this parameter.
"""
SYSTEM = 'system'
USER = 'user'
class CreateMigrationEnums:
"""
Enums for create_migration parameters.
"""
class OnFailure(str, Enum):
"""
Action when the first import failure occurs. The default action is "continue"
which will continue importing the remaining data flows. The "stop" action will
stop the import operation upon the first error.
"""
CONTINUE = 'continue'
STOP = 'stop'
class ConflictResolution(str, Enum):
"""
Resolution when data flow to be imported has a name conflict with an existing data
flow in the project or catalog. The default conflict resolution is "skip" will
skip the data flow so that it will not be imported. The "rename" resolution will
append "_Import_NNNN" suffix to the original name and use the new name for the
imported data flow, while the "replace" resolution will first remove the existing
data flow with the same name and import the new data flow. For the
"rename_replace" option, when the flow name is already used, a new flow name with
the suffix
"_DATASTAGE_ISX_IMPORT" will be used. If the name is not currently used, the
imported flow will be created with this name. In case the new name is already
used, the existing flow will be removed first before the imported flow is
created. With the rename_replace option, job creation will be determined as
follows. If the job name is already used, a new job name with the suffix
".DataStage job" will be used. If the new job name is not currently used, the job
will be created with this name. In case the new job name is already used, the job
creation will not happen and an error will be raised.
"""
SKIP = 'skip'
RENAME = 'rename'
REPLACE = 'replace'
RENAME_REPLACE = 'rename_replace'
class AttachmentType(str, Enum):
"""
Type of attachment. The default attachment type is "isx".
"""
ISX = 'isx'
##############################################################################
# Models
##############################################################################
class AssetEntityROV():
"""
The rules of visibility for an asset.
:attr List[str] members: (optional) An array of members belonging to
AssetEntityROV.
:attr int mode: (optional) The values for mode are 0 (public, searchable and
viewable by all), 8 (private, searchable by all, but not viewable unless view
permission given) or 16 (hidden, only searchable by users with view
permissions).
"""
def __init__(self,
*,
members: List[str] = None,
mode: int = None) -> None:
"""
Initialize a AssetEntityROV object.
:param List[str] members: (optional) An array of members belonging to
AssetEntityROV.
:param int mode: (optional) The values for mode are 0 (public, searchable
and viewable by all), 8 (private, searchable by all, but not viewable
unless view permission given) or 16 (hidden, only searchable by users with
view permissions).
"""
self.members = members
self.mode = mode
@classmethod
def from_dict(cls, _dict: Dict) -> 'AssetEntityROV':
"""Initialize a AssetEntityROV object from a json dictionary."""
args = {}
if 'members' in _dict:
args['members'] = _dict.get('members')
if 'mode' in _dict:
args['mode'] = _dict.get('mode')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a AssetEntityROV object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'members') and self.members is not None:
_dict['members'] = self.members
if hasattr(self, 'mode') and self.mode is not None:
_dict['mode'] = self.mode
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this AssetEntityROV object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'AssetEntityROV') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'AssetEntityROV') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class AssetSystemMetadata():
"""
System metadata about an asset.
:attr str asset_id: (optional) The ID of the asset.
:attr str asset_type: (optional) The type of the asset.
:attr str catalog_id: (optional) The ID of the catalog which contains the asset.
`catalog_id` or `project_id` is required.
:attr datetime create_time: (optional) The timestamp when the asset was created
(in format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the
date-time format as specified by RFC 3339).
:attr str creator_id: (optional) The IAM ID of the | |
<gh_stars>1-10
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""math operations, the function docs are adapted from Numpy API."""
from ..ops import operations as P
from ..ops import functional as F
from ..ops import composite as C
from ..ops.primitive import constexpr
from ..common import dtype as mstype
from .array_ops import ravel
from .array_ops import where as where_
from .array_creations import asarray, full
from .utils import _is_scalar, _expand, _broadcast_to, _is_empty
from .utils_const import _infer_out_shape, _check_axis_valid, _get_device_compile, \
_check_shape_aligned, _empty, _check_is_tensor, _raise_type_error, _check_same_type, \
_check_is_float, _check_input_tensor
from .dtypes import nan
_mean_default = P.ReduceMean()
_mean_keepdims = P.ReduceMean(True)
_matmul = P.MatMul(False, False)
_matmul_T = P.MatMul(False, True)
def absolute(x, out=None, where=True, dtype=None):
"""
Calculates the absolute value element-wise.
Note:
Numpy arguments casting, order, dtype, subok, signature, and extobj are
not supported.
When argument where is provided, argument out must have a tensor value.
Argument out is not supported for storing the result, however it can be
used in combination with argument where to set the value at indices for
which where is set to False.
Currently the backend kernel only supports float calculation, if the input
is not a float, then it will be casted to float32 and casted back.
Args:
x (Tensor): Tensor to be used for calculation.
out (Tensor or None): optional, defaults to None.
where (Tensor or None): optional. For any non-default value of type other
than Tensor or None, the output retains its original value.
This condition is broadcasted over the input. At locations where the
condition is True, the out array will be set to the ufunc result.
Elsewhere, the out array will retain its original value. Note that
if an uninitialized out array is created via the default out=None,
locations within it where the condition is False will remain
uninitialized.
dtype (data type): optional, defaults to None. Overrides the dtype of the
output Tensor.
Returns:
Tensor.
Raises:
TypeError: If input arguments have types not specified above.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> x = np.asarray([1, 2, 3, -4, -5], np.float32)
>>> output = np.absolute(x)
>>> print(output)
[1. 2. 3. 4. 5.]
"""
if not _check_is_tensor(F.typeof(x)):
_raise_type_error("Input is expected to be a tensor, but got ", x)
original_dtype = x.dtype
if not _check_is_float(original_dtype) and dtype is None:
x = x.astype(mstype.float32)
return _apply_tensor_op(F.absolute, x, out=out, where=where, dtype=dtype).astype(original_dtype)
return _apply_tensor_op(F.absolute, x, out=out, where=where, dtype=dtype)
def add(x1, x2, out=None, where=True, dtype=None):
"""
Adds arguments element-wise.
Note:
Numpy arguments casting, order, dtype, subok, signature, and extobj are
not supported.
When argument where is provided, argument out must have a tensor value.
Argument out is not supported for storing the result, however it can be
used in combination with argument where to set the value at indices for
which where is set to False.
Args:
x1 (Tensor): input to be added.
x2 (Tensor): input to be added.
out (Tensor or None): optional, defaults to None.
where (Tensor or None): optional. For any non-default value of type other
than Tensor or None, the output retains its original value.
This condition is broadcast over the input. At locations where the
condition is True, the out array will be set to the ufunc result.
Elsewhere, the out array will retain its original value. Note that
if an uninitialized out array is created via the default out=None,
locations within it where the condition is False will remain
uninitialized.
dtype (data type): optional, defaults to None. Overrides the dtype of the
output Tensor.
Returns:
Tensor or scalar, the sum of x1 and x2, element-wise. This is a scalar
if both x1 and x2 are scalars.
Raises:
TypeError: if the input is not a tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x1 = np.full((3, 2), [1, 2])
>>> x2 = np.full((3, 2), [3, 4])
>>> output = np.add(x1, x2)
>>> print(output)
[[4, 6],
[4, 6],
[4, 6]]
"""
# broadcast is not fully supported in tensor_add on CPU,
# so we use tensor_sub as a substitute solution
if _get_device_compile() == 'CPU':
return subtract(x1, F.neg_tensor(x2), out=out, where=where, dtype=dtype)
return _apply_tensor_op(F.tensor_add, x1, x2, out=out, where=where, dtype=dtype)
def subtract(x1, x2, out=None, where=True, dtype=None):
"""
Subtracts arguments, element-wise.
Note:
Numpy arguments casting, order, dtype, subok, signature, and extobj are
not supported.
When argument where is provided, argument out must have a tensor value.
Argument out is not supported for storing the result, however it can be
used in combination with argument where to set the value at indices for
which where is set to False.
Args:
x1 (Tensor): the input to be subtracted from.
x2 (Tensor): the input to be subtracted by.
out (Tensor or None): optional, defaults to None.
where (Tensor or None): optional. For any non-default value of type other
than Tensor or None, the output retains its original value.
This condition is broadcast over the input. At locations where the
condition is True, the out array will be set to the ufunc result.
Elsewhere, the out array will retain its original value. Note that
if an uninitialized out array is created via the default out=None,
locations within it where the condition is False will remain
uninitialized.
dtype (data type): optional, defaults to None. Overrides the dtype of the
output Tensor.
Returns:
Tensor or scalar, the difference of x1 and x2, element-wise. This is a
scalar if both x1 and x2 are scalars.
Raises:
TypeError: if the input is not a tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x1 = np.full((3, 2), [1, 2])
>>> x2 = np.full((3, 2), [3, 4])
>>> output = np.subtract(x1, x2)
>>> print(output)
[[-2, -2],
[-2, -2],
[-2, -2]]
"""
return _apply_tensor_op(F.tensor_sub, x1, x2, out=out, where=where, dtype=dtype)
def multiply(x1, x2, out=None, where=True, dtype=None):
"""
Multiplies arguments element-wise.
Note:
Numpy arguments casting, order, dtype, subok, signature, and extobj are
not supported.
When argument where is provided, argument out must have a tensor value.
Argument out is not supported for storing the result, however it can be
used in combination with argument where to set the value at indices for
which where is set to False.
Args:
x1 (Tensor): input tensor to be multiplied.
x2 (Tensor): input tensor to be multiplied.
out (Tensor or None): optional, defaults to None.
where (Tensor or None): optional. For any non-default value of type other
than Tensor or None, the output retains its original value.
This condition is broadcast over the input. At locations where the
condition is True, the out array will be set to the ufunc result.
Elsewhere, the out array will retain its original value. Note that
if an uninitialized out array is created via the default out=None,
locations within it where the condition is False will remain
uninitialized.
dtype (data type): optional, defaults to None. Overrides the dtype of the
output Tensor.
Returns:
Tensor or scalar, the product of x1 and x2, element-wise. This is a scalar
if both x1 and x2 are scalars.
Raises:
TypeError: if the input is not a tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x1 = np.full((3, 2), [1, 2])
>>> x2 = np.full((3, 2), [3, 4])
>>> output = np.multiply(x1, x2)
>>> print(output)
[[3, 8],
[3, 8],
[3, 8]]
"""
if _get_device_compile() == 'CPU':
# broadcast is not fully supported on CPU backend,
# and explicit broadcasting is performed
shape_out = _infer_out_shape(F.shape(x1), F.shape(x2))
ndim_out = F.tuple_len(shape_out)
x1 = _expand(x1, ndim_out)
x2 = _expand(x2, ndim_out)
x1 = _broadcast_to(x1, F.shape(x1), shape_out, ndim_out)
x2 = _broadcast_to(x2, F.shape(x2), shape_out, ndim_out)
return _apply_tensor_op(F.tensor_mul, | |
see this path.",
parent=ui_)
return
if S_ISDIR(inf.permissions) != 0:
self.cwd = normpath(path_).replace("\\", "/")
donefunc(refresh=True, path=self.cwd, selected=last)
else:
if S_ISLNK(conn.lstat(path_).permissions) != 0:
messagebox.showerror("Not supported",
"Can't download links yet.",
parent=ui_)
return
if item_nfo[0]:
destination = filedialog.askdirectory(
title="Choose download destination",
parent=ui_,
initialdir=Path.home()
)
if not destination:
return
self.download(
ui_,
pjoin(self.cwd, item_nfo[0]),
item_nfo[0],
(inf.atime, inf.mtime),
updatefunc,
donefunc,
True,
destination,
inf.filesize
)
elif self._mode == "FTP" and conn:
fd = False
if not item_nfo or item_nfo[0] == ".." or item_nfo[1][0] != "-":
try: # Try to change into path. If we can't, then it's either a file or insufficient permissions
conn.cwd(path_)
self.cwd = normpath(path_).replace("\\", "/")
donefunc(refresh=True, message="", path=self.cwd, selected=last)
fd = True
except error_perm:
if item_nfo[1][0] == "l":
messagebox.showerror("Not supported",
"Can't download links yet.",
parent=ui_)
return
elif not item_nfo or item_nfo[1][0] not in ["-", ]:
messagebox.showerror("Path Error",
"No such path or no permission to see this path.",
parent=ui_)
return
except Exception as e:
print(type(e), str(e))
if not item_nfo or item_nfo[1][0] not in ["-", ]:
messagebox.showerror("Path Error",
"No such path or no permission to see this path.",
parent=ui_)
return
if not fd:
ts = datetime.strptime(
item_nfo[2],
"%Y-%m-%d %H:%M:%S").timestamp()
destination = filedialog.askdirectory(
title="Choose download destination",
parent=ui_,
initialdir=Path.home()
)
if not destination:
return
self.download(ui_, self.cwd, item_nfo[0], (ts, ts), updatefunc, donefunc, True, destination, item_nfo[3])
def _search_worker(self, conn, path_, recursive_, depth_, filename_, sensitive_, regex_, resultfunc, donefunc):
"""
search for a file in the current path or in its subfolders - executed in separate thread
:param conn: Connection object for sftp/ftp tasks
:type conn: object
:param path_: file or directory
:type path_: str
:param recursive_: flag to search in subfolders
:type recursive_: bool
:param depth_: maximum depth of path
:type depth_: int
:param filename_: name/pattern to search for
:type filename_: str
:param sensitive_: flag for case sensitivity
:type sensitive_: bool
:param regex_: flag to use regular expressions to search with
:type regex_: bool
:param resultfunc: callback function to return results to UI
:type resultfunc: function
:param donefunc: callback to inform user about the end of search
:type donefunc: function
"""
if self._mode == "SFTP":
def recurse(pth):
current_depth = len(pth[len(path_):].split("/"))
if self.stop_search or (not recursive_ and current_depth > 1) or 0 < depth_ < current_depth:
return
try:
with conn.opendir(pth) as dirh:
for size, buf, attrs in sorted(
dirh.readdir(),
key=(lambda f: (S_ISREG(f[2].permissions) != 0, f[1]))):
obj = buf.decode(self._enc)
if obj in [".", ".."]:
continue
if S_ISDIR(attrs.permissions) != 0:
recurse(pjoin(pth, obj))
elif S_ISREG(attrs.permissions) != 0:
if not regex_ and ((sensitive_ and filename_ in obj) or (
not sensitive_ and filename_.lower() in obj.lower())):
resultfunc(pjoin(pth, obj))
elif regex_ and search(filename_, obj, 0 if sensitive_ else IGNORECASE):
resultfunc(pjoin(pth, obj))
elif S_ISLNK(attrs.permissions) != 0:
recurse(pjoin(pth, obj))
except SocketRecvError as e:
messagebox.showinfo("Lost connection", "The connection was lost.")
except (PermissionError, SFTPProtocolError, SFTPHandleError) as e:
print("error", type(e), str(e))
recurse(path_)
else: # FTP
def recurse(pth):
current_depth = len(pth[len(path_):].split("/"))
if self.stop_search or (not recursive_ and current_depth > 1) or 0 < depth_ < current_depth:
return
try:
data = ftp_file_list(conn, pth)
for p, i in data.items():
d = i.split()
if d[0][0] == "d":
recurse(pjoin(pth, p))
elif d[0][0] == "-":
if not regex_ and ((sensitive_ and filename_ in p) or (
not sensitive_ and filename_.lower() in p.lower())):
resultfunc(pjoin(pth, p))
elif regex_ and search(filename_, p, 0 if sensitive_ else IGNORECASE):
resultfunc(pjoin(pth, p))
elif d[0][0] == "l":
recurse(pjoin(pth, p))
except Exception as e:
print(type(e), str(e))
recurse(path_)
donefunc()
def _download_worker(self, conn, ui_, src_, file_, ts_, updatefunc, donefunc, isFile_=True, destination_="",
size_sum_=None):
"""
download files or folders - executed in separate thread
:param conn: Connection object for sftp/ftp tasks
:type conn: object
:param ui_: root Tk object
:type ui_: Tk
:param src_: original path
:type src_: str
:param file_: name of object (file or folder)
:type file_: str
:param ts_: timestamp tuple (atime, mtime)
:type ts_: tuple
:param updatefunc: callback function to update the UI
:type updatefunc: function
:param donefunc: callback function to inform the user about the end of a download
:type donefunc: function
:param isFile_: flag: true if it's a file else false
:type isFile_: bool
:param destination_: destination to download to
:type destination_: str
:param size_sum_: overall bytes to download
:type size_sum_: int
"""
if self._worker.quitting:
return
if isFile_:
if updatefunc:
updatefunc(maximum=size_sum_ if size_sum_ else 0)
if destination_:
overwrite = True
if exists(ojoin(destination_, file_)):
overwrite = messagebox.askokcancel(
"Overwrite existing file?",
"A file with the same name already exists. Do you want to override it?",
parent=ui_)
if overwrite:
if self._mode == "SFTP":
try:
with conn.open(src_, LIBSSH2_FXF_READ, LIBSSH2_SFTP_S_IRUSR) as inpt:
fstat = inpt.fstat()
self._worker.fileDescriptor = open(ojoin(destination_, file_), "wb", buffering=ui_.buffer_size)
while True:
res, buf = inpt.read(ui_.buffer_size)
if res == LIBSSH2_ERROR_SOCKET_RECV:
raise SocketRecvError
if not buf:
break
else:
self._worker.fileDescriptor.write(buf)
if updatefunc:
updatefunc(step=len(buf))
self._worker.fileDescriptor.close()
self._worker.fileDescriptor = None
utime(ojoin(destination_, file_), (fstat.atime, fstat.mtime))
except SCPProtocolError as e:
if self._worker.fileDescriptor:
self._worker.fileDescriptor.close()
self._worker.fileDescriptor = None
raise Exception("Insufficient Permissions")
# messagebox.showerror("Insufficient Permissions",
# "Could not receive file because of insufficient permissions.",
# parent=ui_)
else:
try:
conn.cwd(self.cwd)
csize = {"s": 0}
def handleDownload(block, fi, size_):
size_["s"] += len(block)
fi.write(block)
if updatefunc:
updatefunc(value=size_["s"])
self._worker.fileDescriptor = open(ojoin(destination_, file_), "wb+", buffering=ui_.buffer_size)
conn.retrbinary("RETR %s" % pjoin(src_, file_),
lambda blk: handleDownload(blk, self._worker.fileDescriptor, csize),
blocksize=ui_.buffer_size)
self._worker.fileDescriptor.close()
self._worker.fileDescriptor = None
utime(ojoin(destination_, file_), ts_)
except error_perm:
if self._worker.fileDescriptor:
self._worker.fileDescriptor.close()
self._worker.fileDescriptor = None
messagebox.showerror("Insufficient Permissions",
"Could not receive file because of insufficient permissions.",
parent=ui_)
remove(ojoin(destination_, file_))
else:
if destination_:
overwrite = True
if exists(ojoin(destination_, file_)):
overwrite = messagebox.askokcancel(
"Overwrite existing files?",
"A folder with the same name already exists. Do you want to override all contained files?",
parent=ui_)
else:
makedirs(ojoin(destination_, file_), exist_ok=True)
if overwrite:
if self._mode == "SFTP":
def recurse(orig, path, fi):
if self._worker.quitting:
return
if S_ISDIR(fi[1].permissions) != 0:
makedirs(path, exist_ok=True)
with conn.opendir(orig) as dirh_:
for size_, buf_, attrs_ in dirh_.readdir():
o_ = buf_.decode(self._enc)
if o_ not in [".", ".."]:
recurse(pjoin(orig, o_), pjoin(path, o_), (o_, attrs_))
elif S_ISREG(fi[1].permissions) != 0:
try:
with conn.open(src_, LIBSSH2_FXF_READ, LIBSSH2_SFTP_S_IRUSR) as inpt:
fstat = inpt.fstat()
self._worker.fileDescriptor = open(ojoin(destination_, file_), "wb", buffering=ui_.buffer_size)
while True:
res, buf = inpt.read(ui_.buffer_size)
if res == LIBSSH2_ERROR_SOCKET_RECV:
raise SocketRecvError
if not buf:
break
else:
self._worker.fileDescriptor.write(buf)
if updatefunc:
updatefunc(step=len(buf))
self._worker.fileDescriptor.close()
self._worker.fileDescriptor = None
utime(ojoin(destination_, file_), (fstat.atime, fstat.mtime))
except SCPProtocolError:
if self._worker.fileDescriptor:
self._worker.fileDescriptor.close()
self._worker.fileDescriptor = None
raise Exception("Insufficient Permissions")
# messagebox.showerror("Insufficient Permissions",
# "Could not receive file because of insufficient permissions.",
# parent=ui_)
size_all = {"size": size_sum_ if size_sum_ is not None else 0}
if size_sum_ is None:
if updatefunc:
updatefunc(mode="indeterminate", start=True, maximum=100)
get_size(conn, self._mode, self._enc, src_, size_all, isFile=False)
updatefunc(mode="determinate", stop=True, maximum=size_all["size"], value=0)
with conn.opendir(src_) as dirh:
for size, buf, attrs in dirh.readdir():
o = buf.decode(self._enc)
if o not in [".", ".."]:
recurse(pjoin(src_, o), pjoin(destination_, file_, o), (o, attrs))
else: # FTP
conn.cwd(self.cwd)
def recurse(path, fi):
if self._worker.quitting:
return
# print(path, fi)
if fi[0] == "d":
makedirs(ojoin(destination_, path[len(src_) + 1:]), exist_ok=True)
data = ftp_file_list(conn, path)
for x in data.items():
recurse(pjoin(path, x[0]), x[1])
elif fi[0] == "-":
# print("local", pjoin(destination, file, basename(path)))
# print("remote", path)
csize = {"": 0}
def handleDownload(block, fi, size_):
fi.write(block)
size_[""] += len(block)
if updatefunc:
updatefunc(value=size_[""])
self._worker.fileDescriptor = open(ojoin(destination_, path[len(src_) + 1:]), "wb+", buffering=ui_.buffer_size)
conn.retrbinary("RETR %s" % path,
lambda blk: handleDownload(blk, self._worker.fileDescriptor, csize),
blocksize=ui_.buffer_size)
try:
dt = None
if ":" in fi[-5:]:
dt = datetime.strptime(
datetime.now().strftime("%Y") + " ".join(fi.split()[-3:]), "%Y%b %d %H:%M")
else:
dt = datetime.strptime(" ".join(fi.split()[-3:]), "%b %d %Y")
self._worker.fileDescriptor.close()
self._worker.fileDescriptor = None
utime(ojoin(destination_, path[len(src_) + 1:]), (dt.timestamp(), dt.timestamp()))
except Exception as e:
if self._worker.fileDescriptor:
self._worker.fileDescriptor.close()
self._worker.fileDescriptor = None
print(type(e), e, path)
size_all = {"size": size_sum_ if size_sum_ is not None else 0}
if size_sum_ is None:
if updatefunc:
updatefunc(mode="indeterminate", start=True, maximum=100)
get_size(conn, self._mode, self._enc, pjoin(src_, file_), size_all, isFile=False)
updatefunc(mode="determinate", stop=True, maximum=size_all["size"], value=0)
dat = ftp_file_list(conn, pjoin(src_, file_))
for inf in dat.items():
recurse(pjoin(src_, file_, inf[0]), inf[1])
if not self._worker.quitting and donefunc:
donefunc(message="Download done!")
def _download_multi_worker(self, conn, ui_, sel, updatefunc, donefunc):
"""
download multiple objects from selected items - executed in separate thread
:param conn: Connection object for sftp/ftp tasks
:type conn: object
:param ui_: root Tk object
:type ui_: Tk
:param sel: list of selected item | |
"""
.. module:: Katna.image
:platform: OS X
:synopsis: This module has functions related to smart cropping
"""
import os
import cv2
import numpy as np
from Katna.decorators import FileDecorators
from Katna.feature_list import FeatureList
from Katna.filter_list import FilterList
from Katna.crop_extractor import CropExtractor
from Katna.crop_selector import CropSelector
import Katna.config as config
from Katna.decorators import DebugDecorators
class UserFiltersEnum:
"""Enum class for filters"""
text = "TextDetector"
class Image(object):
"""Class for all image cropping operations
:param object: base class inheritance
:type object: class:`Object`
"""
def __init__(self, disable_text=True):
"""Constructor for image files"""
featureList = FeatureList()
filterList = FilterList()
self.user_filters_enum = UserFiltersEnum()
self.crop_extractor = CropExtractor()
self.crop_selector = CropSelector()
self.features = featureList.get_features()
self.definedFilters = filterList.get_filters()
def _get_crop_specs(
self, image_height, image_width, ratio_height, ratio_width, is_height_small=True
):
"""Internal function to create the crop specs for a given aspect ratio
:param image_height: height of image
:type image_height: int, required
:param image_width: width of image
:type image_width: int, required
:param ratio_height: aspect ratio height (eg. 3 from 4:3)
:type ratio_height: int, required
:param ratio_width: aspect ratio width (eg. 4 from 4:3)
:type ratio_width: int, required
:param is_height_small: parameter to check if crop dimension should be reduced wrt height[default=True]
:type is_height_small: boolean, required
:return: list of crop height and crop width
:rtype:list of tuples
"""
# multiplication factor by which height/width of crop should be decreased to get crop specs
multiply_by = 1
crop_list_tuple = []
# Calculating the height and width ratio wrt aspect ratio
hr, wr = image_height / ratio_height, image_width / ratio_width
# print("hr, wr",hr, wr)
# Check if height is smaller than the width.If yes, interchange height and width.
if not is_height_small:
image_height, image_width = image_width, image_height
hr, wr = wr, hr
crop_height, crop_width = image_height, hr * ratio_width
# Decreasing the height and width for crops while checking it don't get small by 1/(min) of image height/width
while True:
if not (
(crop_height >= (image_height // config.Image.min_image_to_crop_factor))
and (
crop_width >= (image_width // config.Image.min_image_to_crop_factor)
)
):
break
crop_height, crop_width = (
int(crop_height),
int((ratio_width / ratio_height) * crop_height),
)
crop_list_tuple.append((crop_height, crop_width))
crop_height /= multiply_by
crop_height, crop_width = (
int(crop_height),
int((ratio_width / ratio_height) * crop_height),
)
multiply_by += config.Image.crop_height_reduction_factor_in_each_iteration
return crop_list_tuple
# Apply optional Debug mode decorator , If config=DEBUG is true this decorator
# will populate internal variables of Image module.debug_images with debug images
# Which you can see by opencv Imshow to check if every feature is working as expected
@DebugDecorators.add_optional_debug_images_for_image_module
def crop_image_from_cvimage(
self,
input_image,
crop_width,
crop_height,
num_of_crops,
filters=[],
down_sample_factor=config.Image.down_sample_factor,
):
"""smartly crops the imaged based on the specification - width and height
:param input_image: Input image
:type input_image: numpy array, required
:param crop_width: output crop width
:type crop_width: int
:param crop_height: output crop heigh
:type crop_height: int
:param num_of_crops: number of crops required
:type num_of_crops: int
:param filters: filters to be applied for cropping(only returns crops containing english text where the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: crop list
:rtype: list of structure crop_rect
"""
self.crop_extractor.down_sample_factor = down_sample_factor
if (
input_image.shape[0] + 5 <= crop_height
or input_image.shape[1] + 5 <= crop_width
):
# print(
# "Error: crop width or crop height larger than Image",
# "input_image.shape",
# input_image.shape,
# "crop_width",
# crop_width,
# "crop_height",
# crop_height,
# )
return []
extracted_candidate_crops = self.crop_extractor.extract_candidate_crops(
input_image, crop_width, crop_height, self.features
)
# print(extracted_candidate_crops)
# text: TextDetector
# dummy: DummyDetector
self.filters = []
for x in filters:
try:
self.filters.append(eval("self.user_filters_enum." + x))
except AttributeError as e:
print(str(e))
# self.filters = [eval("user_filters_enum."+x) for x in filters]
crops_list = self.crop_selector.select_candidate_crops(
input_image,
num_of_crops,
extracted_candidate_crops,
self.definedFilters,
self.filters,
)
return crops_list
def _extract_crop_for_files_iterator(
self,
list_of_files,
crop_width,
crop_height,
num_of_crops,
filters,
down_sample_factor,
):
"""Generator which yields crop data / error for filepaths in a list
:param list_of_files: list of files to process for crop
:type list_of_files: list, required
:param crop_width: output crop width
:type crop_width: int
:param crop_height: output crop height
:type crop_height: int
:param num_of_crops: number of crops required
:type num_of_crops: int
:param filters: filters to be applied for cropping(checks if image contains english text and the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:yield: dict containing error (if any), data ,and filepath of image processed
:rtype: dict
"""
for filepath in list_of_files:
print("Running for : ", filepath)
try:
crop_list = self._crop_image(
filepath,
crop_width,
crop_height,
num_of_crops,
filters,
down_sample_factor,
)
yield {"crops": crop_list, "error": None,"filepath": filepath}
except Exception as e:
yield {"crops": crop_list, "error": e,"filepath": filepath}
@FileDecorators.validate_dir_path
def crop_image_from_dir(
self,
dir_path,
crop_width,
crop_height,
num_of_crops,
writer,
filters=[],
down_sample_factor=config.Image.down_sample_factor,
):
"""smartly crops all the images (inside a directory) based on the specification - width and height
:param dir_path: Input Directory path
:type dir_path: str, required
:param crop_width: output crop width
:type crop_width: int
:param crop_height: output crop height
:type crop_height: int
:param num_of_crops: number of crops required
:type num_of_crops: int
:param writer: number of crops required
:type writer: int
:param filters: filters to be applied for cropping(checks if image contains english text and the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: crop dict with key as filepath and crop list for the file
:rtype: dict
"""
valid_files = []
all_crops = {}
for path, subdirs, files in os.walk(dir_path):
for filename in files:
filepath = os.path.join(path, filename)
if self._check_if_valid_image(filepath):
valid_files.append(filepath)
if len(valid_files) > 0:
generator = self._extract_crop_for_files_iterator(
valid_files,
crop_width,
crop_height,
num_of_crops,
filters,
down_sample_factor
)
for data in generator:
file_path = data["filepath"]
file_crops = data["crops"]
error = data["error"]
if error is None:
writer.write(file_path, file_crops)
print("Completed processing for : ", file_path)
else:
print("Error processing file : ", file_path)
print(error)
else:
print("All the files in directory %s are invalid video files" % dir_path)
def _crop_image(
self,
file_path,
crop_width,
crop_height,
num_of_crops,
filters=[],
down_sample_factor=config.Image.down_sample_factor,
):
"""smartly crops the imaged based on the specification - width and height
:param file_path: Input file path
:type file_path: str, required
:param crop_width: output crop width
:type crop_width: int
:param crop_height: output crop heigh
:type crop_height: int
:param num_of_crops: number of crops required
:type num_of_crops: int
:param filters: filters to be applied for cropping(checks if image contains english text and the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: crop list
:rtype: list of structure crop_rect
"""
imgFile = cv2.imread(file_path)
crop_list = self.crop_image_from_cvimage(
input_image=imgFile,
crop_width=crop_width,
crop_height=crop_height,
num_of_crops=num_of_crops,
filters=filters,
down_sample_factor=down_sample_factor,
)
return crop_list
@FileDecorators.validate_file_path
def crop_image(
self,
file_path,
crop_width,
crop_height,
num_of_crops,
writer,
filters=[],
down_sample_factor=config.Image.down_sample_factor,
):
"""smartly crops the imaged based on the specification - width and height
:param file_path: Input file path
:type file_path: str, required
:param crop_width: output crop width
:type crop_width: int
:param crop_height: output crop heigh
:type crop_height: int
:param num_of_crops: number of crops required
:type num_of_crops: int
:param writer: writer object to process data
:type writer: Writer, required
:param filters: filters to be applied for cropping(checks if image contains english text and the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: crop list
:rtype: list of structure crop_rect
"""
crop_list = self._crop_image(
file_path,
crop_width,
crop_height,
num_of_crops,
filters=[],
down_sample_factor=config.Image.down_sample_factor
)
writer.write(file_path, crop_list)
@FileDecorators.validate_file_path
def crop_image_with_aspect(
self,
file_path,
crop_aspect_ratio,
num_of_crops,
writer,
filters=[],
down_sample_factor=8
):
"""smartly crops the imaged based | |
# -*- coding: utf-8 -*-
'''
<NAME>, Ph.D.
<EMAIL>
www.reubotics.com
Apache 2 License
Software Revision E, 09/03/2021
Verified working on: Python 2.7 and 3.7 for Windows 8.1 64-bit and Raspberry Pi Buster (no Mac testing yet).
'''
__author__ = 'reuben.brewer'
import os, sys, platform
import time, datetime
import traceback
import threading
###############
if sys.version_info[0] < 3:
from Tkinter import * #Python 2
import tkFont
else:
from tkinter import * #Python 3
import tkinter.font as tkFont #Python 3
###############
###############
if sys.version_info[0] < 3:
import Queue # Python 2
else:
import queue as Queue # Python 3
###############
###############
if sys.version_info[0] < 3:
from builtins import raw_input as input
else:
from future.builtins import input as input #"sudo pip3 install future" (Python 3) AND "sudo pip install future" (Python 2)
###############
##########################################################################################################
##########################################################################################################
class MyPrint_ReubenPython2and3Class(Frame): #Subclass the Tkinter Frame
def __init__(self, setup_dict):
print("#################### MyPrint_ReubenPython2and3Class __init__ starting. ####################")
self.OBJECT_CREATED_SUCCESSFULLY_FLAG = -1
self.EXIT_PROGRAM_FLAG = 0
##########################################
##########################################
if platform.system() == "Linux":
if "raspberrypi" in platform.uname(): #os.uname() doesn't work in windows
self.my_platform = "pi"
else:
self.my_platform = "linux"
elif platform.system() == "Windows":
self.my_platform = "windows"
elif platform.system() == "Darwin":
self.my_platform = "mac"
else:
self.my_platform = "other"
print("The OS platform is: " + self.my_platform)
##########################################
##########################################
##########################################
##########################################
if "GUIparametersDict" in setup_dict:
self.GUIparametersDict = setup_dict["GUIparametersDict"]
##########################################
if "USE_GUI_FLAG" in self.GUIparametersDict:
self.USE_GUI_FLAG = self.PassThrough0and1values_ExitProgramOtherwise("USE_GUI_FLAG", self.GUIparametersDict["USE_GUI_FLAG"])
else:
self.USE_GUI_FLAG = 0
print("USE_GUI_FLAG = " + str(self.USE_GUI_FLAG))
##########################################
##########################################
if "root" in self.GUIparametersDict:
self.root = self.GUIparametersDict["root"]
self.RootIsOwnedExternallyFlag = 1
else:
self.root = None
self.RootIsOwnedExternallyFlag = 0
print("RootIsOwnedExternallyFlag = " + str(self.RootIsOwnedExternallyFlag))
##########################################
##########################################
if "GUI_RootAfterCallbackInterval_Milliseconds" in self.GUIparametersDict:
self.GUI_RootAfterCallbackInterval_Milliseconds = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("GUI_RootAfterCallbackInterval_Milliseconds", self.GUIparametersDict["GUI_RootAfterCallbackInterval_Milliseconds"], 0.0, 1000.0))
else:
self.GUI_RootAfterCallbackInterval_Milliseconds = 30
print("GUI_RootAfterCallbackInterval_Milliseconds = " + str(self.GUI_RootAfterCallbackInterval_Milliseconds))
##########################################
##########################################
if "UseBorderAroundThisGuiObjectFlag" in self.GUIparametersDict:
self.UseBorderAroundThisGuiObjectFlag = self.PassThrough0and1values_ExitProgramOtherwise("UseBorderAroundThisGuiObjectFlag", self.GUIparametersDict["UseBorderAroundThisGuiObjectFlag"])
else:
self.UseBorderAroundThisGuiObjectFlag = 0
print("UseBorderAroundThisGuiObjectFlag: " + str(self.UseBorderAroundThisGuiObjectFlag))
##########################################
##########################################
if "GUI_ROW" in self.GUIparametersDict:
self.GUI_ROW = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("GUI_ROW", self.GUIparametersDict["GUI_ROW"], 0.0, 1000.0))
else:
self.GUI_ROW = 0
print("GUI_ROW = " + str(self.GUI_ROW))
##########################################
##########################################
if "GUI_COLUMN" in self.GUIparametersDict:
self.GUI_COLUMN = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("GUI_COLUMN", self.GUIparametersDict["GUI_COLUMN"], 0.0, 1000.0))
else:
self.GUI_COLUMN = 0
print("GUI_COLUMN = " + str(self.GUI_COLUMN))
##########################################
##########################################
if "GUI_PADX" in self.GUIparametersDict:
self.GUI_PADX = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("GUI_PADX", self.GUIparametersDict["GUI_PADX"], 0.0, 1000.0))
else:
self.GUI_PADX = 0
print("GUI_PADX = " + str(self.GUI_PADX))
##########################################
##########################################
if "GUI_PADY" in self.GUIparametersDict:
self.GUI_PADY = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("GUI_PADY", self.GUIparametersDict["GUI_PADY"], 0.0, 1000.0))
else:
self.GUI_PADY = 0
print("GUI_PADY = " + str(self.GUI_PADY))
##########################################
##########################################
if "GUI_ROWSPAN" in self.GUIparametersDict:
self.GUI_ROWSPAN = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("GUI_ROWSPAN", self.GUIparametersDict["GUI_ROWSPAN"], 0.0, 1000.0))
else:
self.GUI_ROWSPAN = 0
print("GUI_ROWSPAN = " + str(self.GUI_ROWSPAN))
##########################################
##########################################
if "GUI_COLUMNSPAN" in self.GUIparametersDict:
self.GUI_COLUMNSPAN = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("GUI_COLUMNSPAN", self.GUIparametersDict["GUI_COLUMNSPAN"], 0.0, 1000.0))
else:
self.GUI_COLUMNSPAN = 0
print("GUI_COLUMNSPAN = " + str(self.GUI_COLUMNSPAN))
##########################################
else:
self.GUIparametersDict = dict()
self.USE_GUI_FLAG = 0
print("No GUIparametersDict present, setting USE_GUI_FLAG = " + str(self.USE_GUI_FLAG))
print("GUIparametersDict = " + str(self.GUIparametersDict))
##########################################
##########################################
##########################################
if "NumberOfPrintLines" in setup_dict:
self.NumberOfPrintLines = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("NumberOfPrintLines", setup_dict["NumberOfPrintLines"], 0.0, 1000.0))
else:
self.NumberOfPrintLines = 15
print("NumberOfPrintLines = " + str(self.NumberOfPrintLines))
##########################################
##########################################
if "WidthOfPrintingLabel" in setup_dict:
self.WidthOfPrintingLabel = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("WidthOfPrintingLabel", setup_dict["WidthOfPrintingLabel"], 10.0, 1920.0))
else:
self.WidthOfPrintingLabel = 100
print("WidthOfPrintingLabel = " + str(self.WidthOfPrintingLabel))
##########################################
##########################################
if "PrintToConsoleFlag" in setup_dict:
self.PrintToConsoleFlag = self.PassThrough0and1values_ExitProgramOtherwise("PrintToConsoleFlag", setup_dict["PrintToConsoleFlag"])
else:
self.PrintToConsoleFlag = 0
print("PrintToConsoleFlag = " + str(self.PrintToConsoleFlag))
##########################################
##########################################
if "LogFileNameFullPath" in setup_dict:
self.LogFileNameFullPath = str(setup_dict["LogFileNameFullPath"])
if self.LogFileNameFullPath.find("/") == -1 and self.LogFileNameFullPath.find("\\") == -1:
print("MyPrint_ReubenPython2and3Class __init__ error: 'LogFileNameFullPath' must be FULL path (should include slashes).")
return
else:
self.LogFileNameFullPath = os.getcwd()
print("LogFileNameFullPath = " + str(self.LogFileNameFullPath))
##########################################
self.PrintToGui_Label_TextInputHistory_List = [" "]*self.NumberOfPrintLines
self.PrintToGui_Label_TextInput_Str = ""
self.LinesToBeWrittenToFileQueue = Queue.Queue()
self.GUI_ready_to_be_updated_flag = 0
print("#################### MyPrint_ReubenPython2and3Class __init__ ended input-parameter parsing and variable initialization. ####################")
##########################################
self.MainDataWritingThread_ThreadingObject = threading.Thread(target=self.MainDataWritingThread, args=())
self.MainDataWritingThread_ThreadingObject.start()
##########################################
##########################################
if self.USE_GUI_FLAG == 1:
self.StartGUI(self.root)
##########################################
self.OBJECT_CREATED_SUCCESSFULLY_FLAG = 1
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def __del__(self):
pass
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def IsNumber0or1(self, InputNumber):
if float(InputNumber) == 0.0 or float(InputNumber) == 1:
return 1
else:
return 0
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def PassThrough0and1values_ExitProgramOtherwise(self, InputNameString, InputNumber):
try:
InputNumber_ConvertedToFloat = float(InputNumber)
except:
exceptions = sys.exc_info()[0]
print("PassThrough0and1values_ExitProgramOtherwise Error. InputNumber must be a float value, Exceptions: %s" % exceptions)
input("Press any key to continue")
sys.exit()
try:
if InputNumber_ConvertedToFloat == 0.0 or InputNumber_ConvertedToFloat == 1:
return InputNumber_ConvertedToFloat
else:
input("PassThrough0and1values_ExitProgramOtherwise Error. '" +
InputNameString +
"' must be 0 or 1 (value was " +
str(InputNumber_ConvertedToFloat) +
"). Press any key (and enter) to exit.")
sys.exit()
except:
exceptions = sys.exc_info()[0]
print("PassThrough0and1values_ExitProgramOtherwise Error, Exceptions: %s" % exceptions)
input("Press any key to continue")
sys.exit()
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def PassThroughFloatValuesInRange_ExitProgramOtherwise(self, InputNameString, InputNumber, RangeMinValue, RangeMaxValue):
try:
InputNumber_ConvertedToFloat = float(InputNumber)
except:
exceptions = sys.exc_info()[0]
print("PassThroughFloatValuesInRange_ExitProgramOtherwise Error. InputNumber must be a float value, Exceptions: %s" % exceptions)
input("Press any key to continue")
sys.exit()
try:
if InputNumber_ConvertedToFloat >= RangeMinValue and InputNumber_ConvertedToFloat <= RangeMaxValue:
return InputNumber_ConvertedToFloat
else:
input("PassThroughFloatValuesInRange_ExitProgramOtherwise Error. '" +
InputNameString +
"' must be in the range [" +
str(RangeMinValue) +
", " +
str(RangeMaxValue) +
"] (value was " +
str(InputNumber_ConvertedToFloat) + "). Press any key (and enter) to exit.")
sys.exit()
except:
exceptions = sys.exc_info()[0]
print("PassThroughFloatValuesInRange_ExitProgramOtherwise Error, Exceptions: %s" % exceptions)
input("Press any key to continue")
sys.exit()
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def getPreciseSecondsTimeStampString(self):
ts = time.time()
return ts
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def getTimeStampStringReubenPreferredFormat(self):
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('date-%m-%d-%Y---time-%H-%M-%S')
return st
##########################################################################################################
##########################################################################################################
#######################################################################################################################
#######################################################################################################################
def CreateNewDirectoryIfItDoesntExist(self, DirectoryFullPathToCheck):
try:
#print("CreateNewDirectoryIfItDoesntExist, DirectoryFullPathToCheck = " + DirectoryFullPathToCheck)
if os.path.isdir(DirectoryFullPathToCheck) == 0:
os.makedirs(DirectoryFullPathToCheck)
#print("CreateNewDirectoryIfItDoesntExist, created new directory!")
except:
exceptions = sys.exc_info()[0]
print("CreateNewDirectoryIfItDoesntExist ERROR, Exceptions: %s" % exceptions)
traceback.print_exc()
#######################################################################################################################
#######################################################################################################################
##########################################################################################################
##########################################################################################################
def SetPrintToConsoleFlag(self, value):
if value == 0 or value == 1:
self.PrintToConsoleFlag = value
else:
self.my_print("SetPrintToConsoleFlag ERROR: This function accepts only 0 or 1.")
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def MainDataWritingThread(self):
self.my_print("Started the MainDataWritingThread for MyPrint_ReubenPython2and3Class object.")
######################################################
try:
if self.LogFileNameFullPath != "":
print(os.path.dirname(self.LogFileNameFullPath))
self.CreateNewDirectoryIfItDoesntExist(os.path.dirname(self.LogFileNameFullPath)) #os.path.dirname grabs just the directory path
with open(self.LogFileNameFullPath, "a+") as self.LogFileObject: #File is created if it doesn't exist, automatically closes cleanly due to "with" if program crashes
self.LogFileObject.write("######################################################" + "\n")
while True:
if self.LinesToBeWrittenToFileQueue.qsize() > 0:
NextLineToBeWrittenToFile = self.LinesToBeWrittenToFileQueue.get()
self.LogFileObject.write(self.getTimeStampStringReubenPreferredFormat() + ": " + NextLineToBeWrittenToFile + "\n")
self.LogFileObject.flush() #IF WE WON'T FLUSH, THEN WE'LL LOSE ALL CHANGES IF PYTHON CRASHES
if self.EXIT_PROGRAM_FLAG == 1:
self.LogFileObject.write("######################################################" + "\n")
break
else:
time.sleep(0.001)
except:
exceptions = sys.exc_info()[0]
print("MyPrintClass ERROR: failed opening/writing LogFileNameFullPath = '" + str(self.LogFileNameFullPath) + "', Exceptions: %s" % exceptions)
######################################################
print("Exited the main thread for MyPrint_ReubenPython2and3Class object.")
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def ExitProgram_Callback(self):
print("Exiting all threads for MyPrint_ReubenPython2and3Class.")
self.EXIT_PROGRAM_FLAG = 1
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def StartGUI(self, GuiParent=None):
GUI_Thread_ThreadingObject = threading.Thread(target=self.GUI_Thread, args=(GuiParent,))
GUI_Thread_ThreadingObject.setDaemon(True) #Should mean that the GUI thread is destroyed automatically when the main thread is destroyed.
GUI_Thread_ThreadingObject.start()
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def GUI_Thread(self, parent=None):
print("Starting the GUI_Thread for MyPrint_ReubenPython2and3Class object.")
########################
if parent == None: #This class object owns root and must handle it properly
self.root = Tk()
self.parent = self.root
################################################### SET THE DEFAULT FONT FOR ALL WIDGETS CREATED AFTTER/BELOW THIS CALL
default_font = tkFont.nametofont("TkDefaultFont")
default_font.configure(size=8)
self.root.option_add("*Font", default_font)
###################################################
else:
self.root = parent
self.parent = parent
########################
########################
self.myFrame = Frame(self.root)
if self.UseBorderAroundThisGuiObjectFlag == 1:
self.myFrame["borderwidth"] = 2
self.myFrame["relief"] = "ridge"
self.myFrame.grid(row = self.GUI_ROW,
column = self.GUI_COLUMN,
padx = self.GUI_PADX,
pady = self.GUI_PADY,
rowspan = self.GUI_ROWSPAN,
columnspan= self.GUI_COLUMNSPAN)
########################
########################
self.PrintToGui_Label = Label(self.myFrame, text="PrintToGui_Label", width=self.WidthOfPrintingLabel)
self.PrintToGui_Label.grid(row=0, column=0, padx=1, pady=1, columnspan=1, rowspan=1)
########################
########################
if self.RootIsOwnedExternallyFlag == 0: #This class object owns root and must handle it properly
self.root.protocol("WM_DELETE_WINDOW", self.ExitProgram_Callback)
self.root.after(self.GUI_RootAfterCallbackInterval_Milliseconds, self.GUI_update_clock)
self.GUI_ready_to_be_updated_flag = 1
self.root.mainloop()
else:
self.GUI_ready_to_be_updated_flag = 1
########################
########################
if self.RootIsOwnedExternallyFlag == 0: #This class object owns root and must handle it properly
self.root.quit() # Stop the GUI thread, MUST BE CALLED FROM GUI_Thread
self.root.destroy() # Close down the GUI thread, MUST BE CALLED FROM GUI_Thread
########################
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def GUI_update_clock(self):
#######################################################
#######################################################
#######################################################
if self.USE_GUI_FLAG == 1 and self.EXIT_PROGRAM_FLAG == 0:
#######################################################
#######################################################
if self.GUI_ready_to_be_updated_flag == 1:
#######################################################
self.PrintToGui_Label.config(text = self.PrintToGui_Label_TextInput_Str)
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
if self.RootIsOwnedExternallyFlag == 0: #This class object owns root and must handle it properly
self.root.after(self.GUI_RootAfterCallbackInterval_Milliseconds, self.GUI_update_clock)
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def my_print(self, input_string):
input_string = str(input_string)
if input_string != "":
################################ Write to file WITHOUT MODIFICATION
if self.LogFileNameFullPath != "":
self.LinesToBeWrittenToFileQueue.put(input_string)
################################
#input_string = input_string.replace("\n", "").replace("\r", "")
################################ Write | |
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
from requests import HTTPError
from typing import Dict, Any
from json.decoder import JSONDecodeError
import json
import traceback
import requests
import math
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
TOKEN = demisto.params().get('token')
# Remove trailing slash to prevent wrong URL path to service
SERVER = demisto.params().get('url')[:-1] \
if ('url' in demisto.params() and demisto.params()['url'].endswith('/')) else demisto.params().get('url', '')
# Should we use SSL
USE_SSL = not demisto.params().get('insecure', False)
# Headers to be sent in requests
HEADERS = {
'Authorization': f'Token {TOKEN}',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
# Error messages
INVALID_ID_ERR_MSG = 'Error in API call. This may be happen if you provided an invalid id.'
API_ERR_MSG = 'Error in API call to AttackIQ. '
DEFAULT_PAGE_SIZE = 10
# Transformation dicts
ASSESSMENTS_TRANS = {
'id': 'Id',
'name': 'Name',
'user': 'User',
'users': 'Users',
'owner': 'Owner',
'groups': 'Groups',
'creator': 'Creator',
'created': 'Created',
'end_date': 'EndDate',
'modified': 'Modified',
'start_date': 'StartDate',
'description': 'Description',
'project_state': 'AssessmentState',
'master_job_count': 'MasterJobCount',
'default_schedule': 'DefaultSchedule',
'default_asset_count': 'DefaultAssetCount',
'project_template.id': 'AssessmentTemplateId',
'default_asset_group_count': 'DefaultAssetGroupCount',
'project_template.company': 'AssessmentTemplateCompany',
'project_template.created': 'AssessmentTemplateCreated',
'project_template.modified': 'AssessmentTemplateModified',
'project_template.template_name': 'AssessmentTemplateName',
'project_template.default_schedule': 'AssessmentTemplateDefaultSchedule',
'project_template.template_description': 'AssessmentTemplateDescription'
}
TESTS_TRANS = {
'id': 'Id',
'name': 'Name',
'description': 'Description',
'project': 'Assessment',
'total_asset_count': 'TotalAssetCount',
'cron_expression': 'CronExpression',
'runnable': 'Runnable',
'last_result': 'LastResult',
'user': 'User',
'created': 'Created',
'modified': 'Modified',
'using_default_schedule': 'UsingDefaultSchedule',
'using_default_assets': 'UsingDefaultAssets',
'latest_instance_id': 'LatestInstanceId',
'scenarios': {
'name': 'Name',
'id': 'Id'
},
'assets': {
'id': 'Id',
'ipv4_address': 'Ipv4Address',
'hostname': 'Hostname',
'product_name': 'ProductName',
'modified': 'Modified',
'status': 'Status'
}
}
TEST_STATUS_TRANS = {
'detected': 'Detected',
'failed': 'Failed',
'finished': 'Finished',
'passed': 'Passed',
'errored': 'Errored',
'total': 'Total'
}
TEST_RESULT_TRANS = {
'id': 'Id',
'modified': 'Modified',
'project_id': 'Assessment.Id',
'project_name': 'Assessment.Name',
'scenario.id': 'Scenario.Id',
'scenario.name': 'Scenario.Name',
'scenario.description': 'Scenario.Description',
'asset.id': 'Asset.Id',
'asset.ipv4_address': 'Asset.Ipv4Address',
'asset.hostname': 'Asset.Hostname',
'asset.product_name': 'Asset.ProductName',
'asset.modified': 'Asset.Modified',
'asset_group': 'Asset.AssetGroup',
'job_state_name': 'JobState',
'outcome_name': 'Outcome'
}
''' HELPER FUNCTIONS '''
def http_request(method, url_suffix, params=None, data=None):
url = urljoin(SERVER, url_suffix)
LOG(f'AttackIQ is attempting {method} request sent to {url} with params:\n{json.dumps(params, indent=4)} \n '
f'data:\n"{json.dumps(data)}')
try:
res = requests.request(
method,
url,
verify=USE_SSL,
params=params,
data=data,
headers=HEADERS
)
if res.status_code == 204:
return ''
# Handle error responses gracefully
if res.status_code not in {200, 201}:
error_reason = get_http_error_reason(res)
raise HTTPError(f'[{res.status_code}] - {error_reason}')
try:
return res.json()
except JSONDecodeError:
return_error('Response contained no valid body. See logs for more information.',
error=f'AttackIQ response body:\n{res.content!r}')
except requests.exceptions.ConnectionError as e:
LOG(str(e))
return_error('Encountered issue reaching the endpoint, please check that you entered the URL correctly.')
def get_http_error_reason(res):
"""
Get error reason from an AttackIQ http error
Args:
res: AttackIQ response
Returns: Reason for http error
"""
err_reason = res.reason
try:
res_json = res.json()
if 'detail' in res_json:
err_reason = f'{err_reason}. {res_json["detail"]}'
except JSONDecodeError:
pass
return err_reason
def build_transformed_dict(src, trans_dict):
"""Builds a dictionary according to a conversion map
Args:
src (dict): original dictionary to build from
trans_dict (dict): dict in the format { 'OldKey': 'NewKey', ...}
Returns: src copy with changed keys
"""
if isinstance(src, list):
return [build_transformed_dict(x, trans_dict) for x in src]
res: Dict[str, Any] = {}
for key, val in trans_dict.items():
if isinstance(val, dict):
# handle nested list
sub_res = res
item_val = [build_transformed_dict(item, val) for item in (demisto.get(src, key) or [])]
key = underscoreToCamelCase(key)
for sub_key in key.split('.')[:-1]:
if sub_key not in sub_res:
sub_res[sub_key] = {}
sub_res = sub_res[sub_key]
sub_res[key.split('.')[-1]] = item_val
elif '.' in val:
# handle nested vals
update_nested_value(res, val, to_val=demisto.get(src, key))
else:
res[val] = demisto.get(src, key)
return res
def create_invalid_id_err_msg(orig_err, error_codes):
"""
Creates an 'invalid id' error message
Args:
orig_err (str): The original error message
error_codes (list): List of error codes to look for
Returns (str): Error message for invalid id
"""
err_msg = API_ERR_MSG
if any(err_code in orig_err for err_code in error_codes):
err_msg += 'This may be happen if you provided an invalid id.\n'
err_msg += orig_err
return err_msg
def update_nested_value(src_dict, to_key, to_val):
"""
Updates nested value according to transformation dict structure where 'a.b' key will create {'a': {'b': val}}
Args:
src_dict (dict): The original dict
to_key (str): Key to transform to (expected to contain '.' to mark nested)
to_val: The value that'll be put under the nested key
"""
sub_res = src_dict
to_key_lst = to_key.split('.')
for sub_to_key in to_key_lst[:-1]:
if sub_to_key not in sub_res:
sub_res[sub_to_key] = {}
sub_res = sub_res[sub_to_key]
sub_res[to_key_lst[-1]] = to_val
def get_page_number_and_page_size(args):
"""
Get arguments page_number and page_size from args
Args:
args (dict): Argument dictionary, with possible page_number and page_size keys
Returns (int, int): Return a tuple of (page_number, page_size)
"""
page = args.get('page_number', 1)
page_size = args.get('page_size', DEFAULT_PAGE_SIZE)
err_msg_format = 'Error: Invalid {arg} value. "{val}" Is not a valid value. Please enter a positive integer.'
try:
page = int(page)
if page <= 0:
raise ValueError()
except (ValueError, TypeError):
return_error(err_msg_format.format(arg='page_number', val=page))
try:
page_size = int(page_size)
if page_size <= 0:
raise ValueError()
except (ValueError, TypeError):
return_error(err_msg_format.format(arg='page_size', val=page_size))
return page, page_size
''' COMMANDS + REQUESTS FUNCTIONS '''
def test_module():
"""
Performs basic get request to get item samples
"""
http_request('GET', '/v1/assessments')
demisto.results('ok')
''' COMMANDS MANAGER / SWITCH PANEL '''
def activate_assessment_command():
""" Implements attackiq-activate-assessment command
"""
ass_id = demisto.getArg('assessment_id')
try:
raw_res = http_request('POST', f'/v1/assessments/{ass_id}/activate')
hr = raw_res['message'] if 'message' in raw_res else f'Assessment {ass_id} activation was sent successfully.'
demisto.results(hr)
except HTTPError as e:
return_error(create_invalid_id_err_msg(str(e), ['403']))
def get_assessment_execution_status_command():
""" Implements attackiq-get-assessment-execution-status command
"""
ass_id = demisto.getArg('assessment_id')
try:
raw_res = http_request('GET', f'/v1/assessments/{ass_id}/is_on_demand_running')
ex_status = raw_res.get('message')
hr = f'Assessment {ass_id} execution is {"" if ex_status else "not "}running.'
ec = {
'AttackIQ.Assessment(val.Id === obj.Id)': {
'Running': ex_status,
'Id': ass_id
}
}
return_outputs(hr, ec, raw_res)
except HTTPError as e:
return_error(create_invalid_id_err_msg(str(e), ['403']))
def get_test_execution_status_command():
""" Implements attackiq-get-test-execution-status command
"""
test_id = demisto.getArg('test_id')
try:
raw_test_status = http_request('GET', f'/v1/tests/{test_id}/get_status')
test_status = build_transformed_dict(raw_test_status, TEST_STATUS_TRANS)
test_status['Id'] = test_id
hr = tableToMarkdown(f'Test {test_id} status', test_status)
return_outputs(hr, {'AttackIQTest(val.Id === obj.Id)': test_status}, raw_test_status)
except HTTPError as e:
return_error(create_invalid_id_err_msg(str(e), ['500']))
def build_test_results_hr(test_results, test_id, page, tot_pages):
"""
Creates test results human readable
Args:
page (int): Current page
tot_pages (int): Total pages
test_results (list): Results of the test (after being transformed)
test_id (str): ID of the test
Returns: Human readable of test results
"""
keys = ['Assessment Name', 'Scenario Name', 'Hostname', 'Asset IP', 'Job State', 'Modified', 'Outcome']
test_results_mod = []
for t_res in test_results:
assessment = t_res.get('Assessment')
asset = t_res.get('Asset')
scenario = t_res.get('Scenario')
hr_items = {
keys[0]: assessment.get('Name'),
keys[1]: scenario.get('Name'),
keys[2]: asset.get('Hostname'),
keys[3]: asset.get('Ipv4Address'),
keys[4]: demisto.get(t_res, 'JobState'),
keys[5]: t_res.get('Modified'),
keys[6]: demisto.get(t_res, 'Outcome.Name')
}
test_results_mod.append(hr_items)
return tableToMarkdown(f'Test Results for {test_id}\n ### Page {page}/{tot_pages}', test_results_mod, keys)
def get_test_results(page, page_size, test_id, show_last_res):
"""
Get test results response
Args:
page (int): Page number
page_size (int): Page size
test_id (int): ID of test
show_last_res (bool): Flag for showing only last result
Returns: Test results
"""
params = {
'page': page,
'page_size': page_size,
'test_id': test_id,
'show_last_result': show_last_res
}
return http_request('GET', '/v1/results', params=params)
def get_test_results_command(args=demisto.args()):
""" Implements attackiq-get-test-results command
"""
test_id = args.get('test_id')
outcome_filter = args.get('outcome_filter')
page, page_size = get_page_number_and_page_size(demisto.args())
try:
raw_test_res = get_test_results(page, page_size, test_id, args.get('show_last_result') == 'True')
test_cnt = raw_test_res.get('count')
if test_cnt == 0:
return_outputs('No results were found', {})
else:
total_pages = math.ceil(test_cnt / page_size)
remaining_pages = total_pages - page
if remaining_pages < 0:
remaining_pages = 0
test_res = build_transformed_dict(raw_test_res['results'], TEST_RESULT_TRANS)
if outcome_filter:
test_res = list(filter(lambda x: x.get('Outcome') == outcome_filter, test_res))
context = {
'AttackIQTestResult(val.Id === obj.Id)': test_res,
'AttackIQTestResult(val.Count).Count': test_cnt,
'AttackIQTestResult(val.RemainingPages).RemainingPages': remaining_pages
}
hr = build_test_results_hr(test_res, test_id, page, total_pages)
return_outputs(hr, context, raw_test_res)
except HTTPError as e:
return_error(create_invalid_id_err_msg(str(e), ['500']))
def get_assessments(page='1', assessment_id=None, page_size=DEFAULT_PAGE_SIZE):
"""
Fetches assessments from attackIQ
Args:
page (str or int): Page number to fetch
assessment_id (str): (Optional) If provided will fetch only the assessment with matching ID
Returns: Assessments from attackIQ
"""
params = {
'page_size': page_size,
'page': page
}
if assessment_id:
return http_request('GET', f'/v1/assessments/{assessment_id}')
return http_request('GET', '/v1/assessments', params=params)
def list_assessments_command():
""" Implements attackiq-list-assessments command
"""
page, page_size = get_page_number_and_page_size(demisto.args())
raw_assessments = get_assessments(page=page, page_size=page_size)
assessments_res = build_transformed_dict(raw_assessments.get('results'), ASSESSMENTS_TRANS)
ass_cnt = raw_assessments.get('count')
total_pages = math.ceil(ass_cnt / page_size)
remaining_pages = total_pages - page
if remaining_pages < 0:
remaining_pages = 0
context = {
'AttackIQ.Assessment(val.Id === obj.Id)': assessments_res,
'AttackIQ.Assessment(val.Count).Count': ass_cnt,
'AttackIQ.Assessment(val.RemainingPages).RemainingPages': remaining_pages
}
hr = tableToMarkdown(f'AttackIQ Assessments Page {page}/{total_pages}', assessments_res,
headers=['Id', 'Name', 'Description', 'User', 'Created', 'Modified'])
return_outputs(hr, context, raw_assessments)
def get_assessment_by_id_command():
""" Implements attackiq-get-assessment-by-id command
"""
assessment_id = demisto.getArg('assessment_id')
try:
raw_assessments = get_assessments(assessment_id=assessment_id)
assessments_res = build_transformed_dict(raw_assessments, ASSESSMENTS_TRANS)
hr = tableToMarkdown(f'AttackIQ Assessment {assessment_id}', assessments_res,
headers=['Id', 'Name', 'Description', 'User', 'Created', 'Modified'])
return_outputs(hr, {'AttackIQ.Assessment(val.Id === obj.Id)': assessments_res}, | |
u'datetime': attribDatetime_512717149655375401,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'cite': attribCite_319597539676272225,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'datetime': attribDatetime_512717149655375401,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'cite': attribCite_319597539676272225,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'del'
# ================== The Anchor Element ================================
# content is %Inline; except that anchors shouldn't be nested
class A(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'accesskey': attribAccesskey_1489765571156059024,
u'onmousedown': attribOnmousedown_312304592206311721,
u'shape': attribShape_2366611433715347289,
u'href': attribHref_2430637454403731329,
u'id': attribId_4002951160133423716,
u'style': attribStyle_733285237156411536,
u'title': attribTitle_1178737426446382009,
u'charset': attribCharset_620446450916798289,
u'rev': attribRev_169855681900044196,
u'rel': attribRel_1808232010361190976,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onfocus': attribOnfocus_60779381971423504,
u'type': attribType_2839642281990897124,
u'onblur': attribOnblur_280018615590293904,
u'onclick': attribOnclick_1389815037327772224,
u'onmouseout': attribOnmouseout_55467262469652544,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'onmouseover': attribOnmouseover_741809317326693841,
u'hreflang': attribHreflang_475574228616447801,
u'class': attribClass_1166814720137472289,
u'target': attribTarget_2606114967532412449,
u'lang': attribLang_267608473188383376,
u'name': attribName_1939937075622105121,
u'xml:lang': attribXml_lang_1645670971257252241,
u'coords': attribCoords_942228262644424900,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'dir': attribDir_4297072167429554704,
u'tabindex': attribTabindex_1133897031401996169,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'accesskey': attribAccesskey_1489765571156059024,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'shape': attribShape_2366611433715347289,
'href': attribHref_2430637454403731329,
'id': attribId_4002951160133423716,
'style': attribStyle_733285237156411536,
'title': attribTitle_1178737426446382009,
'charset': attribCharset_620446450916798289,
'rev': attribRev_169855681900044196,
'rel': attribRel_1808232010361190976,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseup': attribOnmouseup_162556595998286400,
'onfocus': attribOnfocus_60779381971423504,
'type': attribType_2839642281990897124,
'onblur': attribOnblur_280018615590293904,
'onclick': attribOnclick_1389815037327772224,
'onmouseout': attribOnmouseout_55467262469652544,
'onkeypress': attribOnkeypress_532917457362969849,
'onkeydown': attribOnkeydown_1257884844152169025,
'class_': attribClass_1166814720137472289,
'onmouseover': attribOnmouseover_741809317326693841,
'hreflang': attribHreflang_475574228616447801,
'target': attribTarget_2606114967532412449,
'lang': attribLang_267608473188383376,
'name': attribName_1939937075622105121,
'coords': attribCoords_942228262644424900,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'dir': attribDir_4297072167429554704,
'tabindex': attribTabindex_1133897031401996169,
}
_name = u'a'
# ===================== Inline Elements ================================
class Span(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'span'
# generic language/style container
class Bdo(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_353249812560650625,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_353249812560650625,
}
_name = u'bdo'
# I18N BiDi over-ride
class Br(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'clear': attribClear_1130432899291921,
u'style': attribStyle_733285237156411536,
u'title': attribTitle_1178737426446382009,
u'id': attribId_4002951160133423716,
u'class': attribClass_1166814720137472289,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel(None)
KWATTRIBUTES = {
'class_': attribClass_1166814720137472289,
'style': attribStyle_733285237156411536,
'id': attribId_4002951160133423716,
'clear': attribClear_1130432899291921,
'title': attribTitle_1178737426446382009,
}
_name = u'br'
# forced line break
class Em(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'em'
# emphasis
class Strong(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'strong'
# strong emphasis
class Dfn(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'dfn'
# definitional
class Code(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'code'
# program code
class Samp(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'samp'
# sample
class Kbd(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
| |
from __future__ import print_function
import gc, os, sys
import numpy as np
import scipy as sp
import numpy.linalg as la
import scipy.linalg as sla
from numpy.linalg import norm
from time import time
from copy import deepcopy
from warnings import warn
from time import time
from Kuru.FiniteElements.Assembly import Assemble #, AssembleExplicit
from Kuru.FiniteElements.LocalAssembly.KinematicMeasures import *
from Kuru.FiniteElements.LocalAssembly._KinematicMeasures_ import _KinematicMeasures_
from Kuru import Mesh
from Kuru import FunctionSpace, QuadratureRule
__all__ = ["GrowthRemodelingIntegrator"]
class GrowthRemodelingIntegrator(object):
"""Base class for structural time integerators
"""
def __init__(self,
gain,
turnover,
density_turnover="self",
not_degradation=True,
degradation_at_line=False,
degradation_at_point=False,
degradation_point=None,
aging_only=False,
monitoring_node=0,
damage_spread_space=None,
damage_spread_time=None,
damage_axis=0,
has_dissection=False,
dissection_time_threshold = None,
maximum_dissection_spread = None,
dissection_pressure = None
):
self.HomeostaticStress = None
self.gain = gain
self.turnover = turnover
self.density_turnover = density_turnover
self.degradation_at_line = degradation_at_line
self.degradation_at_point = degradation_at_point
self.degradation_point = degradation_point
self.aging_only = aging_only
self.not_degradation = not_degradation
self.monitoring_node = monitoring_node
if degradation_at_point or aging_only or degradation_at_line:
self.not_degradation = False
if degradation_point is None and degradation_at_point:
self.degradation_point = [0.,0.,0.]
self.damage_spread_space = damage_spread_space
self.damage_spread_time = damage_spread_time
self.damage_axis = damage_axis
self.has_dissection = has_dissection
self.dissection_time_threshold = dissection_time_threshold
self.maximum_dissection_spread = maximum_dissection_spread
self.dissection_pressure = dissection_pressure
def HomeostaticDistortion(self, fem_solver, formulation, TotalDisp, Increment):
""" check the distortion of homeostasis"""
dmesh = Mesh()
dmesh.points = TotalDisp[:,:formulation.ndim,Increment]
dmesh_bounds = dmesh.Bounds
distortion = 100.0*np.sqrt(dmesh_bounds[1,0]**2+dmesh_bounds[1,1]**2+\
dmesh_bounds[1,2]**2)/0.010
if distortion<5.0:
print("The Distortion in Homeostasis is: {}".format(distortion))
else:
print("The Distortion in Homeostasis is: {}".format(distortion))
sys.exit("Growth and Remodeling solver stop, distortion in Homeostasis is to big")
def LogSave(self, fem_solver, formulation, TotalDisp, Increment, materials, FibreStress, gr_materials):
if fem_solver.print_incremental_log:
# find the set of the node under surveillance
imat = -1
for i in range(gr_materials.shape[0]):
if self.monitoring_node in materials[gr_materials[i]].node_set:
imat = gr_materials[i]
imat0 = i
inode = np.where(materials[imat].node_set==self.monitoring_node)[0][0]
break
if imat is -1:
print("Set of the node is not recognized. I will use material 0 and its node 0.")
imat = gr_materials[0]
imat0 = 0
inode = 0
dmesh = Mesh()
dmesh.points = TotalDisp[:,:formulation.ndim,Increment]
dmesh_bounds = dmesh.Bounds
print("\nMinimum and maximum incremental solution values at increment {} are \n".\
format(Increment),dmesh_bounds)
print("\nGrowth and Remodeling properties at node, {}".format(self.monitoring_node))
print("Densities: {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}".\
format(materials[imat].state_variables[inode,14],materials[imat].state_variables[inode,15],\
materials[imat].state_variables[inode,16],materials[imat].state_variables[inode,17],\
materials[imat].state_variables[inode,18],materials[imat].state_variables[inode,19]))
print("Remodeling: {:6.3f}, {:6.3f}, {:6.3f}, {:6.3f}, {:6.3f}".\
format(materials[imat].state_variables[inode,9],materials[imat].state_variables[inode,10],\
materials[imat].state_variables[inode,11],materials[imat].state_variables[inode,12],\
materials[imat].state_variables[inode,13]))
print("Growth: {:6.3f}".format(materials[imat].state_variables[inode,20]))
print("FibreStress: {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}".\
format(FibreStress[imat0][inode,0],FibreStress[imat0][inode,1],FibreStress[imat0][inode,2],\
FibreStress[imat0][inode,3],FibreStress[imat0][inode,4]))
# SAVE INCREMENTAL SOLUTION IF ASKED FOR
if fem_solver.save_incremental_solution:
# FOR BIG MESHES
if Increment % fem_solver.incremental_solution_save_frequency !=0:
return
from scipy.io import savemat
filename = fem_solver.incremental_solution_filename
if filename is not None:
if ".mat" in filename:
filename = filename.split(".")[0]
savemat(filename+"_"+str(Increment),
{'solution':TotalDisp[:,:,Increment]},do_compression=True)
else:
raise ValueError("No file name provided to save incremental solution")
def NewtonRaphson(self, function_spaces, formulation, solver, Increment, K, NodalForces,
Residual, mesh, Eulerx, materials, boundary_condition, AppliedDirichletInc, fem_solver):
Tolerance = fem_solver.newton_raphson_tolerance
LoadIncrement = fem_solver.number_of_load_increments
Iter = 0
fem_solver.iterative_norm_history = []
# APPLY INCREMENTAL DIRICHLET PER LOAD STEP (THIS IS INCREMENTAL NOT ACCUMULATIVE)
IncDirichlet = boundary_condition.UpdateFixDoFs(AppliedDirichletInc,
K.shape[0],formulation.nvar)
# UPDATE EULERIAN COORDINATE
Eulerx += IncDirichlet[:,:formulation.ndim]
while fem_solver.norm_residual > Tolerance or Iter==0:
# GET THE REDUCED SYSTEM OF EQUATIONS
K_b, F_b = boundary_condition.GetReducedMatrices(K,Residual)[:2]
# SOLVE THE SYSTEM
sol = solver.Solve(K_b,-F_b)
# GET ITERATIVE SOLUTION
dU = boundary_condition.UpdateFreeDoFs(sol,K.shape[0],formulation.nvar)
# UPDATE THE EULERIAN COMPONENTS
# UPDATE THE GEOMETRY
Eulerx += dU[:,:formulation.ndim]
# RE-ASSEMBLE - COMPUTE STIFFNESS AND INTERNAL TRACTION FORCES
K, TractionForces = Assemble(fem_solver, function_spaces, formulation, mesh, materials,
boundary_condition, Eulerx)[:2]
# COMPUTE ROBIN STIFFNESS AND FORCES (EXTERNAL)
K, TractionForces = boundary_condition.ComputeRobinForces(mesh, materials, function_spaces,
fem_solver, Eulerx, K, TractionForces)
# FIND THE RESIDUAL
Residual[boundary_condition.columns_in] = TractionForces[boundary_condition.columns_in] - \
NodalForces[boundary_condition.columns_in]
# SAVE THE NORM
fem_solver.abs_norm_residual = la.norm(Residual[boundary_condition.columns_in])
if Iter==0:
fem_solver.NormForces = la.norm(Residual[boundary_condition.columns_in])
fem_solver.norm_residual = np.abs(la.norm(Residual[boundary_condition.columns_in])/fem_solver.NormForces)
# SAVE THE NORM
fem_solver.NRConvergence['Increment_'+str(Increment)] = np.append(fem_solver.NRConvergence[\
'Increment_'+str(Increment)],fem_solver.norm_residual)
print("Iteration {} for increment {}.".format(Iter, Increment) +\
" Residual (abs) {0:>16.7g}".format(fem_solver.abs_norm_residual),
"\t Residual (rel) {0:>16.7g}".format(fem_solver.norm_residual))
# BREAK BASED ON RELATIVE NORM
if np.abs(fem_solver.abs_norm_residual) < Tolerance:
break
# BREAK BASED ON INCREMENTAL SOLUTION - KEEP IT AFTER UPDATE
if norm(dU) <= fem_solver.newton_raphson_solution_tolerance and Iter!=0:
print("Incremental solution within tolerance i.e. norm(dU): {}".format(norm(dU)))
break
# UPDATE ITERATION NUMBER
Iter +=1
if Iter==fem_solver.maximum_iteration_for_newton_raphson:
fem_solver.newton_raphson_failed_to_converge = True
break
if np.isnan(fem_solver.norm_residual) or fem_solver.norm_residual>1e06:
fem_solver.newton_raphson_failed_to_converge = True
break
# IF BREAK WHEN NEWTON RAPHSON STAGNATES IS ACTIVATED
if fem_solver.break_at_stagnation:
fem_solver.iterative_norm_history.append(fem_solver.norm_residual)
if Iter >= 5:
if np.mean(fem_solver.iterative_norm_history) < 1.:
break
# USER DEFINED CRITERIA TO BREAK OUT OF NEWTON-RAPHSON
if fem_solver.user_defined_break_func != None:
if fem_solver.user_defined_break_func(Increment,Iter,fem_solver.norm_residual,fem_solver.abs_norm_residual, Tolerance):
break
# USER DEFINED CRITERIA TO STOP NEWTON-RAPHSON AND THE WHOLE ANALYSIS
if fem_solver.user_defined_stop_func != None:
if fem_solver.user_defined_stop_func(Increment,Iter,fem_solver.norm_residual,fem_solver.abs_norm_residual, Tolerance):
fem_solver.newton_raphson_failed_to_converge = True
break
return Eulerx, K, Residual
def GetFibreStressAndSoftness(self, mesh, formulation, material, fem_solver, Eulerx, average_derived_quantities=True):
"""
steps: [list,np.1darray] for which time steps/increments the data should
be recovered
"""
det = np.linalg.det
inv = np.linalg.inv
# GET THE UNDERLYING LINEAR MESH
# lmesh = mesh.GetLinearMesh()
C = mesh.InferPolynomialDegree() - 1
ndim = mesh.InferSpatialDimension()
nelem = mesh.elements.shape[0]; npoint = mesh.points.shape[0]
nodeperelem = mesh.elements.shape[1]
# GET QUADRATURE
norder = 2*C
if norder == 0:
norder=1
# quadrature = QuadratureRule(qtype="gauss", norder=norder, mesh_type=mesh.element_type, optimal=3)
# Domain = FunctionSpace(mesh, quadrature, p=C+1)
Domain = FunctionSpace(mesh, p=C+1, evaluate_at_nodes=True)
Jm = Domain.Jm
AllGauss = Domain.AllGauss
Bases = Domain.Bases
# requires_geometry_update = fem_solver.requires_geometry_update
requires_geometry_update = True # ALWAYS TRUE FOR THIS ROUTINE
F = np.zeros((material.element_set.shape[0],nodeperelem,ndim,ndim))
# DEFINE CONSTITUENT STRESSES FOR GROWTH-REMODELING PROBLEM
ElemFibreStress = np.zeros((material.element_set.shape[0],nodeperelem,5)) # 5-fibres
ElemSoftness = np.zeros((material.element_set.shape[0],nodeperelem,5)) # 5-fibres
FibreStress = np.zeros((material.node_set.shape[0],5))
Softness = np.zeros((material.node_set.shape[0],5))
# LOOP OVER ELEMENTS
for ielem in range(material.element_set.shape[0]):
elem = material.element_set[ielem]
# GET THE FIELDS AT THE ELEMENT LEVEL
LagrangeElemCoords = mesh.points[mesh.elements[elem,:],:]
EulerElemCoords = Eulerx[mesh.elements[elem,:],:]
# GROWTH-REMODELING VALUES FOR THIS ELEMENT
material.MappingStateVariables(mesh,Domain,elem)
if material.has_low_level_dispatcher:
# GET LOCAL KINEMATICS
SpatialGradient, F[ielem,:,:,:], detJ, dV = _KinematicMeasures_(Jm, AllGauss[:,0],
LagrangeElemCoords, EulerElemCoords, requires_geometry_update)
# PARAMETERS FOR INCOMPRESSIBILITY (MEAN DILATATION METHOD HU-WASHIZU)
if material.is_incompressible:
MaterialVolume = np.sum(dV)
if fem_solver.has_growth_remodeling:
dve = np.true_divide(detJ,material.StateVariables[:,20])
CurrentVolume = np.sum(dve)
else:
CurrentVolume = np.sum(detJ)
material.pressure = material.kappa*(CurrentVolume-MaterialVolume)/MaterialVolume
# COMPUTE FIBRE STRESS AND SOFTNESS
ElemFibreStress[ielem,:,:],ElemSoftness[ielem,:,:] = material._ConstituentMeasures_(F[ielem,:,:,:],elem)
else:
# GAUSS LOOP IN VECTORISED FORM
ParentGradientX = np.einsum('ijk,jl->kil', Jm, LagrangeElemCoords)
# MATERIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla_0 (N)]
MaterialGradient = np.einsum('ijk,kli->ijl', inv(ParentGradientX), Jm)
# DEFORMATION GRADIENT TENSOR [\vec{x} \otimes \nabla_0 (N)]
F[ielem,:,:,:] = np.einsum('ij,kli->kjl', EulerElemCoords, MaterialGradient)
# COMPUTE REMAINING KINEMATIC MEASURES
StrainTensors = KinematicMeasures(F[ielem,:,:,:], fem_solver.analysis_nature)
# GEOMETRY UPDATE IS A MUST
# MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
ParentGradientx = np.einsum('ijk,jl->kil',Jm,EulerElemCoords)
# SPATIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla (N)]
SpatialGradient = np.einsum('ijk,kli->ilj',inv(ParentGradientx),Jm)
# COMPUTE ONCE detJ (GOOD SPEEDUP COMPARED TO COMPUTING TWICE)
detJ = np.einsum('i,i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)),
np.abs(StrainTensors['J']))
# COMPUTE PARAMETERS FOR MEAN DILATATION METHOD, IT NEEDS TO BE BEFORE COMPUTE HESSIAN AND STRESS
if material.is_incompressible:
dV = np.einsum('i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)))
MaterialVolume = np.sum(dV)
if fem_solver.has_growth_remodeling:
dve = np.true_divide(detJ,material.StateVariables[:,20])
CurrentVolume = np.sum(dve)
else:
CurrentVolume = np.sum(detJ)
material.pressure = material.kappa*(CurrentVolume-MaterialVolume)/MaterialVolume
# LOOP OVER GAUSS POINTS
for counter in range(AllGauss.shape[0]):
# COMPUTE FIBRE STRESS AND SOFTNESS
ElemFibreStress[ielem,counter,:],ElemSoftness[ielem,counter,:] = material.ConstituentMeasures(
StrainTensors,elem,counter)
# COMPUTE THE COMMON/NEIGHBOUR NODES ONCE
Elss, Poss = material.GetNodeCommonality()[:2]
for inode in range(material.node_set.shape[0]):
Els, Pos = Elss[inode], Poss[inode]
ncommon_nodes = Els.shape[0]
for uelem in range(ncommon_nodes):
FibreStress[inode,:] += ElemFibreStress[Els[uelem],Pos[uelem],:]
Softness[inode,:] += ElemSoftness[Els[uelem],Pos[uelem],:]
# AVERAGE OUT
FibreStress[inode,:] /= ncommon_nodes
Softness[inode,:] /= ncommon_nodes
return FibreStress,Softness
def RatesGrowthRemodeling(self, mesh, material, FibreStress, Softness, imat):
""" This are the rates of Growth and Rmodeling
"""
if self.HomeostaticStress is None:
raise ValueError("Homeostatic Stress is not fixed")
#k_s = np.zeros((),dytpe=)
k_s = self.gain/self.turnover
Rates = np.zeros((material.node_set.shape[0],10),dtype=np.float64)
# choose a mode of collagen addition, either self-driven or muscle-driven
# each fibre density is driven by its own stress
if self.density_turnover is "self":
for node in range(material.node_set.shape[0]):
for fibre in range(5):
if self.HomeostaticStress[imat][node,fibre] == 0.:
continue
DeltaStress = FibreStress[imat][node,fibre] - self.HomeostaticStress[imat][node,fibre]
# Fibre density rate
Rates[node,fibre+5] = k_s*material.state_variables[node,fibre+15]*\
DeltaStress/self.HomeostaticStress[imat][node,fibre]
# Fibre remodeling rate
Rates[node,fibre] = (Rates[node,fibre+5]/material.state_variables[node,fibre+15] + \
1./self.turnover)*DeltaStress*Softness[imat][node,fibre]
# each fibre density is driven by muscle stress
elif self.density_turnover is "muscle":
for node in range(material.node_set.shape[0]):
for fibre in range(5):
if self.HomeostaticStress[imat][node,fibre] == 0.:
continue
# assuming the material 0 is the media
DeltaStress_m = FibreStress[0][node,0] - self.HomeostaticStress[0][node,0]
DeltaStress = FibreStress[imat][node,fibre] - self.HomeostaticStress[imat][node,fibre]
# Fibre density rate
Rates[node,fibre+5] = k_s*material.state_variables[node,fibre+15]*\
DeltaStress_m/self.HomeostaticStress[0][node,0]
# Fibre remodeling rate
Rates[node,fibre] = (Rates[node,fibre+5]/material.state_variables[node,fibre+15] + \
1./self.turnover)*DeltaStress*Softness[imat][node,fibre]
# each fibre density is driven by its own stress and mass is added just when de delta is positive
elif self.density_turnover is "self_sgn":
for node in range(material.node_set.shape[0]):
for fibre in | |
self.submodel
if self.init_state_model:
self.specs['init_state_obs_mean'] = \
self.init_state_model.scaler_X._mean
self.specs['init_state_obs_var'] = \
self.init_state_model.scaler_X._var
if self.difftraj:
if self.validation:
self.specs['nb_difftraj_train'] = len(train_split)
self.specs['nb_difftraj_val'] = len(val_split)
else:
self.specs['nb_difftraj_train'] = self.nb_difftraj
self.specs['nb_difftraj_val'] = 0
# Metrics to evaluate learned model
if self.config.grid_inf is None:
logging.warning('No grid was predefined by the user for one step '
'ahead model evaluation and rollouts, so using '
'min and max of state data.')
self.grid_inf = torch.min(self.X_train, dim=0).values
self.grid_sup = torch.max(self.X_train, dim=0).values
self.grid_inf = torch.as_tensor(self.grid_inf)
self.grid_sup = torch.as_tensor(self.grid_sup)
self.step = 0
self.sample_idx = 0
if ground_truth_approx:
# Data rollouts cannot be longer than data
self.rollout_length = int(
np.min([self.rollout_length, self.nb_samples - 1]))
self.prior_kwargs = self.config.prior_kwargs
if self.ground_truth_approx:
logging.warning('True dynamics are approximated from data or '
'from a simplified model: there is actually no '
'ground truth, the true dynamics are only used as '
'a comparison to the GP model! Hence, model '
'evaluation tools such as GP_plot, rollouts or '
'model_evaluation are only indicative; true '
'evaluation of the model can only be obtained by '
'predicting on a test set and comparing to the '
'true data.')
self.init_time = time.time()
self.time = torch.zeros((1, 1))
self.grid_RMSE = torch.zeros((0, 2))
self.grid_SRMSE = torch.zeros((0, 2))
self.rollout_RMSE = torch.zeros((0, 2))
self.rollout_SRMSE = torch.zeros((0, 2))
self.rollout_RMSE_init = torch.zeros((0, 2))
self.rollout_SRMSE_init = torch.zeros((0, 2))
self.rollout_RMSE_output = torch.zeros((0, 2))
self.rollout_SRMSE_output = torch.zeros((0, 2))
self.train_loss = torch.zeros((0,))
self.val_loss = torch.zeros((0,))
# Create rollouts for evaluation
self.rollout_list = self.create_rollout_list()
# Create grid of (x_t, u_t) to evaluate model quality (true dynamics
# needed to compare true x_t+1 to predicted)
self.grid, self.grid_controls = \
self.create_grid(self.constrain_u, self.grid_inf, self.grid_sup)
self.true_predicted_grid = \
self.create_true_predicted_grid(self.grid, self.grid_controls)
if not self.ground_truth_approx:
# Reject outliers from grid
true_predicted_grid_df = pd.DataFrame(
self.true_predicted_grid.cpu().numpy())
grid_df = pd.DataFrame(self.grid.cpu().numpy())
grid_controls_df = pd.DataFrame(self.grid_controls.cpu().numpy())
mask = remove_outlier(true_predicted_grid_df)
true_predicted_grid_df = true_predicted_grid_df[mask]
grid_df = grid_df[mask]
grid_controls_df = grid_controls_df[mask]
self.true_predicted_grid = \
true_predicted_grid_df.values
self.grid = grid_df.values
self.grid_controls = \
grid_controls_df.values
# Update variables
self.variables = {'X_train': self.X_train, 'U_train': self.U_train,
'Computation_time': self.time}
self.variables['grid_RMSE'] = self.grid_RMSE
self.variables['grid_SRMSE'] = self.grid_SRMSE
self.variables['rollout_RMSE'] = self.rollout_RMSE
self.variables['rollout_SRMSE'] = self.rollout_SRMSE
self.variables['rollout_RMSE_init'] = self.rollout_RMSE_init
self.variables['rollout_SRMSE_init'] = self.rollout_SRMSE_init
self.variables['rollout_RMSE_output'] = self.rollout_RMSE_output
self.variables['rollout_SRMSE_output'] = self.rollout_SRMSE_output
if self.validation:
self.variables['X_val'] = self.X_val
self.variables['U_val'] = self.U_val
self.variables['train_idx'] = self.train_idx
self.variables['val_idx'] = self.val_idx
# Create unique results folder and save test data
params = str(np.random.uniform()) + '_' + sensitivity + '_' + str(
self.nb_samples) + 'samples_noise' + str(
self.true_meas_noise_var) + '_' + str(
# self.model.__class__.__name__)
self.NODE_model.__class__.__name__)
if 'difftraj' in self.__class__.__name__:
params = params + str(self.nb_difftraj)
params = params + '_' + str(
self.optim_method.__name__) + str(self.optim_lr)
if self.init_state_model:
params = params + '_' + str(self.init_state_obs_method) + str(
self.init_state_obs_T)
params = str(self.init_state_obs_method) + '/' + params
else:
params = 'x0/' + params
if self.ground_truth_approx:
params = self.data_folder.split('/')[-2] + '/' + params
elif self.nb_rollouts > 0:
params = str(self.nb_rollouts) + '_rollouts/' + params
self.results_folder = os.path.join(
str(LOCAL_PATH_TO_SRC), 'Figures', str(self.system), params)
os.makedirs(self.results_folder, exist_ok=False)
self.save_grid_variables(self.grid, self.grid_controls,
self.true_predicted_grid,
self.results_folder)
self.true_predicted_grid = torch.as_tensor(
self.true_predicted_grid, device=self.device)
self.grid = torch.as_tensor(self.grid, device=self.device)
self.grid_controls = torch.as_tensor(self.grid_controls,
device=self.device)
self.grid_variables = {
'Evaluation_grid': self.grid,
'Grid_controls': self.grid_controls,
'True_predicted_grid': self.true_predicted_grid}
save_rollout_variables(self, self.results_folder, self.nb_rollouts,
self.rollout_list, step=self.step,
ground_truth_approx=self.ground_truth_approx,
plots=self.monitor_experiment, NODE=True)
# Save log in results folder
os.rename(str(LOCAL_PATH_TO_SRC) + '/Figures/Logs/' + 'log' +
str(sys.argv[1]) + '.log',
os.path.join(self.results_folder,
'log' + str(sys.argv[1]) + '.log'))
save_log(self.results_folder)
if self.verbose:
logging.info(self.results_folder)
# self.save_hyperparameters() # Good practice but deepcopy hard...
def forward(self, x):
# Make predictions after training
return self.model(x)
def configure_optimizers(self):
# https://pytorch-lightning.readthedocs.io/en/stable/common/optimizers.html
# https://github.com/PyTorchLightning/pytorch-lightning/issues/2976
if self.config.optim_options:
optim_options = self.optim_options
else:
optim_options = {}
parameters = self.model.parameters()
optimizer = self.optim_method(parameters, self.optim_lr,
**optim_options)
if self.config.optim_scheduler:
if self.config.optim_scheduler_options:
optim_scheduler_options = self.optim_scheduler_options
else:
optim_scheduler_options = {}
scheduler = {
'scheduler': self.optim_scheduler(optimizer,
**optim_scheduler_options),
'monitor': 'train_loss'}
return [optimizer], [scheduler]
else:
return optimizer
def train_dataloader(self):
# Data = (idx, observations). If partial observations, use X_train =
# observations and observation function in self.model.forward_traj_obs
# Minibatches (over length of X_train) only appear after forward
# simulation of x(t) so no impact
train_dataset = TensorDataset(
torch.arange(len(self.X_train)), self.X_train)
train_loader = DataLoader(
train_dataset, batch_size=self.optim_minibatch_size,
shuffle=self.optim_shuffle)
return train_loader
def training_step(self, batch, batch_idx):
# Forward pass NODE x0 -> xN, get samples matching batch, compute loss
idx_batch, y_batch = batch
if self.init_state_model:
init_state_estim = self.init_state_obs
else:
init_state_estim = self.init_state_estim
if (self.config.KKL_l2_reg is not None) and (
'optimD' in self.init_state_obs_method):
# Compute KKL traj to keep it for loss, then use same simulation
# to compute z(T) -> x0 -> x(t)
KKL_traj = self.init_state_model.simulate_ztraj(
init_state_estim[...,
:self.init_state_model.KKL_ODE_model.n],
self.init_state_model.z_t_eval)
z = torch.squeeze(self.init_state_model.simulate_zu(
init_state_estim, ztraj=KKL_traj))
init_state_estim = reshape_pt1(
self.init_state_model.init_state_model(z))
x_estim = self.NODE_model(init_state_estim)[idx_batch]
KKL_traj = KKL_traj[idx_batch]
else:
x_estim = self.model(init_state_estim)[idx_batch]
if self.config.KKL_l2_reg is not None:
KKL_traj = self.config.KKL_traj[idx_batch]
else:
KKL_traj = None
y_estim = self.observe_data_x(x_estim)
if self.no_control:
xu_estim = x_estim
else:
u_estim = self.config.controller(
self.t_eval, self.config, self.t0, self.init_control)[idx_batch]
xu_estim = torch.cat((x_estim, u_estim), dim=-1)
losses = self.NODE_model.loss(
y_estim=y_estim, y_true=y_batch, xu_estim=xu_estim,
KKL_traj=KKL_traj, scaler_Y=self.scaler_Y, scaler_X=self.scaler_X)
loss = sum(losses.values())
self.log('train_loss', loss, on_step=True, prog_bar=True, logger=True)
self.train_loss = torch.cat((self.train_loss, torch.tensor([loss])))
self.time = torch.cat((
self.time, torch.tensor([[time.time() - self.init_time]])), dim=0)
logs = {'train_loss': loss.detach()}
for key, val in losses.items():
logs.update({key: val.detach()})
self.log(key, val, prog_bar=False, logger=True)
return {'loss': loss, 'log': logs}
def val_dataloader(self):
# For now validation same as training, just for early stopping
if self.validation:
val_dataset = TensorDataset(
torch.arange(len(self.X_val)), self.X_val)
val_loader = DataLoader(
val_dataset, batch_size=self.optim_minibatch_size,
shuffle=self.optim_shuffle)
else:
val_dataset = TensorDataset(
torch.arange(len(self.X_train)), self.X_train)
val_loader = DataLoader(
val_dataset, batch_size=self.optim_minibatch_size,
shuffle=self.optim_shuffle)
return val_loader
def validation_step(self, batch, batch_idx):
# Validation is same as training, but on validation data if exists
# (otherwise just training loss again, used for early stopping)
with torch.no_grad():
if self.validation:
idx_batch, y_batch = batch
if self.init_state_model:
init_state_estim = self.init_state_obs_val
else:
init_state_estim = self.init_state_estim_val
if (self.config.KKL_l2_reg is not None) and (
'optimD' in self.init_state_obs_method):
# Compute KKL traj to keep it for loss, then use same simulation
# to compute z(T) -> x0 -> x(t)
KKL_traj = self.init_state_model.simulate_ztraj(
init_state_estim[...,
:self.init_state_model.KKL_ODE_model.n],
self.init_state_model.z_t_eval)
z = torch.squeeze(self.init_state_model.simulate_zu(
init_state_estim, ztraj=KKL_traj))
init_state_estim = reshape_pt1(
self.init_state_model.init_state_model(z))
x_estim = self.NODE_model(init_state_estim)[idx_batch]
KKL_traj = KKL_traj[idx_batch]
if (self.config.KKL_l2_reg is not None) and (
'optimD' in self.init_state_obs_method):
# Compute KKL traj to keep it for loss, then use same simulation
# to compute z(T) -> x0 -> x(t)
KKL_traj = self.init_state_model.simulate_ztraj(
init_state_estim[...,
:self.init_state_model.KKL_ODE_model.n],
self.init_state_model.z_t_eval)
z = torch.squeeze(self.init_state_model.simulate_zu(
init_state_estim, ztraj=KKL_traj))
init_state_estim = reshape_pt1(
self.init_state_model.init_state_model(z))
x_estim = self.NODE_model(init_state_estim)[idx_batch]
KKL_traj = KKL_traj[idx_batch]
else:
x_estim = self.model(init_state_estim)[idx_batch]
if self.config.KKL_l2_reg is not None:
KKL_traj = self.config.KKL_traj[idx_batch]
else:
KKL_traj = None
y_estim = self.observe_data_x(x_estim)
if self.no_control:
xu_estim = x_estim
else:
u_estim = self.config.controller(
self.t_eval, self.config, self.t0,
self.init_control)[idx_batch]
xu_estim = torch.cat((x_estim, u_estim), dim=-1)
losses = self.NODE_model.loss(
y_estim=y_estim, y_true=y_batch, xu_estim=xu_estim,
KKL_traj=KKL_traj, scaler_Y=self.scaler_Y,
scaler_X=self.scaler_X)
loss = sum(losses.values())
self.log('val_loss', loss, on_step=True, prog_bar=True,
logger=True)
self.val_loss = torch.cat((self.val_loss, torch.tensor([loss])))
logs = {'val_loss': loss.detach()}
for key, val in losses.items():
logs.update({key: val.detach()})
self.log(key, val, prog_bar=False, logger=True)
else:
if len(self.train_loss) == 0:
loss = torch.tensor(np.nan)
else:
loss = self.train_loss[-1]
self.log('val_loss', loss, on_step=True, prog_bar=True,
logger=True)
logs = {'val_loss': loss.detach()}
return {'loss': loss, 'log': logs}
def train_forward_sensitivity(self):
# Train model with forward sensitivity method
# Only on full state data for autonomous systems!
self.losses = []
if self.config.optim_scheduler:
optimizer, scheduler = \
self.configure_optimizers()[0][0], \
self.configure_optimizers()[1][0]['scheduler']
else:
optimizer = self.configure_optimizers()[0]
# Prepare dataset, dataloader and iterators through data
xtraj_true = self.X_train
epochs_iter = tqdm.tqdm(range(self.trainer_options['max_epochs']),
desc="Epoch", leave=True)
# Forward pass: in each epoch and each minibatch, solve training data
# (x_estim, lambda_estim) for current weights, then optimize loss to get
# new weights. Minibatches go over fixed data = (idx_yobs, yobs)
for k in epochs_iter:
# Simulate x, lambda
traj_estim = dynamics_traj(x0=self.init_ext,
u=self.controller,
t0=self.t0, dt=self.dt,
init_control=self.init_control,
discrete=self.discrete,
version=self.dynext_forward_sensitivity,
meas_noise_var=0,
process_noise_var=0,
method=self.simu_solver,
t_eval=self.t_eval,
kwargs=self.config)
# Organize simulation results in (x, lambda) and keep only minibatch
xtraj_estim = traj_estim[:, :self.n]
lambdatraj_estim = traj_estim[:, self.n:]
lambdatraj_estim = lambdatraj_estim.reshape(
-1, self.n_param, self.n).permute(0, 2, 1)
# Compute loss, its gradient, step of optimizer and optimize param
loss = 1. / 2 * torch.sum(
torch.square(xtraj_estim - xtraj_true))
self.losses.append(loss.item())
dloss = torch.sum(
torch.matmul((xtraj_estim - xtraj_true).unsqueeze(1),
lambdatraj_estim).squeeze(), dim=0)
# Take a step of gradient descent, this time grad is needed
# Manually set grad for each param
param_beg = 0
for name, parameter in self.model.named_parameters():
param_end = param_beg | |
position
break
if restricted_position is None:
restricted_position = agent_path[1]
amt_positions -= 1
return restricted_position, amt_positions
def get_restricted_area_constraints(graph,
fst_handle,
fst_agent_path,
snd_handle,
snd_agent_path,
time_step):
"""Computes the constraints for two agents, given they are colliding.
:param graph: The graph representation of the environment.
:param fst_handle: The handle of the first agent.
:param fst_agent_path: The path (previous solution) of the first agent.
:param snd_handle: The handle of the second agent.
:param snd_agent_path: The path (previous solution) of the second agent.
:param time_step: The time step when the agents collide.
:return: A list of length two. The first entry contains the constraints for the first agent.
The second entry contains the constraints for the second agent.
"""
restricted_positions = []
backtracking_steps = []
for path in [fst_agent_path, snd_agent_path]:
r_pos, b_steps = get_restricted_position(graph, path, time_step)
restricted_positions.append(r_pos)
backtracking_steps.append(b_steps)
restricted_area, start_times, end_times = get_restricted_area(
fst_agent_path,
snd_agent_path,
restricted_positions[0],
restricted_positions[1],
time_step
)
# Add constraints for the first agent
is_on_spawn = 2 * int(fst_agent_path[0] != fst_agent_path[1] and restricted_positions[0] == fst_agent_path[1])
new_constraints = [[
(fst_handle, restricted_positions[0], ts) for ts in
range(start_times[0], end_times[1] + is_on_spawn)
]]
# Add constraints for the second agent
is_on_spawn = 2 * int(snd_agent_path[0] != snd_agent_path[1] and restricted_positions[1] == snd_agent_path[1])
new_constraints.append([
(snd_handle, restricted_positions[1], ts) for ts in
range(start_times[1], end_times[0] + is_on_spawn)
])
return new_constraints
def agent_is_active(agent_path, time_step):
"""Checks whether or not an agent changes his position at any time in the future.
:param agent_path: The agent's solution.
:param time_step: The time step, from which one the path should be checked.
:return: Whether or not the agent will move at any time in the future.
"""
first_pos = agent_path[0]
for next_pos in agent_path[1:time_step + 1]:
if next_pos != first_pos:
return True
return False
def validate(env, all_solutions):
"""The validation part of the CBS algorithm.
Checks whether or not a given solution (containing solutions for all agents) causes a deadlock
between any pair of agents. Breaks immediatly if a collision is found between two agents.
:param env: The environment.
:param all_solutions: A solution for all agents given by the low level part of the CBS
algorithm.
:return: Returns a list of length two if a collision is found
(see 'get_restricted_area_constraints'). Returns an empty list if no problems could
be identified.
"""
new_constraints = []
for fst_handle, (fst_cost, fst_agent_path) in enumerate(all_solutions[:-1]):
for snd_handle, (snd_cost, snd_agent_path) in enumerate(all_solutions[fst_handle + 1:],
fst_handle + 1):
min_length = min(fst_cost, snd_cost)
for time_step in range(min_length):
fst_current_pos = fst_agent_path[time_step]
snd_current_pos = snd_agent_path[time_step]
fst_agent_active = agent_is_active(fst_agent_path, time_step)
snd_agent_active = agent_is_active(snd_agent_path, time_step)
if time_step < min_length - 1:
fst_next_pos = fst_agent_path[time_step + 1]
snd_next_pos = snd_agent_path[time_step + 1]
fst_agent_active = agent_is_active(fst_agent_path, time_step + 1)
snd_agent_active = agent_is_active(snd_agent_path, time_step + 1)
if all([fst_agent_active, snd_agent_active]):
# Two agents cannot pass each other on the same railway
if fst_current_pos == snd_next_pos and fst_next_pos == snd_current_pos:
return get_restricted_area_constraints(
env.graph,
fst_handle,
fst_agent_path,
snd_handle,
snd_agent_path,
time_step
)
# Driving agent tries to pass a stopped agent on the same railway
if fst_next_pos == snd_current_pos and fst_next_pos == snd_next_pos:
return get_restricted_area_constraints(
env.graph,
fst_handle,
fst_agent_path,
snd_handle,
snd_agent_path,
time_step
)
# Driving agent tries to pass a stopped agent on the same railway
if snd_next_pos == fst_current_pos and snd_next_pos == fst_next_pos:
return get_restricted_area_constraints(
env.graph,
fst_handle,
fst_agent_path,
snd_handle,
snd_agent_path,
time_step
)
# Two agents cannot occupy one position
if fst_current_pos == snd_current_pos and all([fst_agent_active, snd_agent_active]):
new_constraints.append([(fst_handle, snd_current_pos, time_step)])
new_constraints.append([(snd_handle, fst_current_pos, time_step)])
return new_constraints
return new_constraints
def visualize_ct_node(env, node):
"""Debugging tool: Visualizes nodes from the constraint tree as colored graphs.
:param env: The environment.
:param node: A node from the constraint tree to be visualized.
"""
solution = node[1]
constraints = node[2]
agent_positions = [get_agent_position(env, handle) for handle in env.get_agent_handles()]
agent_targets = [a.target for a in env.agents]
color_map = []
label_dict = {}
for node in env.graph:
label_dict[node] = ""
color = "grey"
for handle, path in enumerate(solution):
if node in path[1]:
label_dict[node] += f"{handle},"
if node in agent_positions:
label_dict[node] = agent_positions.index(node)
color = "green"
elif node in agent_targets:
label_dict[node] = agent_targets.index(node)
color = "yellow"
for handle, pos, time_step in constraints:
if node == pos:
label_dict[node] = f"{handle, time_step}"
if color == "grey":
color = "red"
elif color == "yellow":
color = "black"
elif color == "green":
color = "purple"
break
color_map.append(color)
fig, ax = plt.subplots(figsize=(8, 6))
nx.draw(
env.graph,
pos={node_key: node_key[::-1] for node_key in list(env.graph.nodes)},
node_size=50,
node_color=color_map,
labels=label_dict,
with_labels=True,
font_size=10
)
plt.gca().invert_yaxis()
plt.axis("on")
ax.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True)
plt.grid(True)
plt.show()
def cbs(env, verbose=True):
"""Conflict based search for the Flatland MAPF-problem.
Conflict based search as described in:
@article{SHARON201540,
title = {Conflict-based search for optimal multi-agent pathfinding},
journal = {Artificial Intelligence},
volume = {219},
pages = {40-66},
year = {2015},
issn = {0004-3702},
doi = {https://doi.org/10.1016/j.artint.2014.11.006},
url = {https://www.sciencedirect.com/science/article/pii/S0004370214001386},
author = {<NAME> and <NAME> and <NAME> and <NAME>},
keywords = {Heuristic search, Multi-agent, Pathfinding},
}
:return: Returns a list of paths with their costs as a solution for a given environment.
Returns an empty list if no solution could be found in time (Execution terminates
after a given amount of tries).
"""
open_ = []
root_constraints = []
root_solution = low_level(env=env, constraints=[], verbose=verbose)
if not bool(root_solution):
return []
root_cost = max(root_solution)[0]
# A node is a tuple described by the following entries: (cost, solution, constraints)
IDX_SOLUTION = 1
IDX_CONSTRAINTS = 2
root_node = (root_cost, root_solution, root_constraints)
open_.append(root_node)
create_new_nodes = True
while bool(open_):
if verbose:
sys.stdout.write(f"\rCBS: Amount of unvalidated constraint nodes: {len(open_)}")
# if len(open_) >= 500:
# return []
if len(open_) == 50:
create_new_nodes = False
# Choose node with lowest cost from open_
node = heapq.heappop(open_)
# visualize_ct_node(env, node)
# If validation finds no conflicts, return solution
node_constraints = validate(env, node[IDX_SOLUTION])
if not bool(node_constraints):
return node[IDX_SOLUTION]
# If validation finds conflicts, add new constraints to constraint tree
if create_new_nodes:
for new_constraints in node_constraints:
if len(new_constraints) > 0:
new_node_constraints = node[IDX_CONSTRAINTS].copy()
new_node_constraints.extend(new_constraints)
new_node_solution = low_level(
env=env,
constraints=new_node_constraints,
verbose=verbose
)
if not bool(new_node_solution):
continue
new_node_cost = max(new_node_solution)[0]
heapq.heappush(open_, (new_node_cost, new_node_solution, new_node_constraints))
return []
def get_agent_position(env, handle):
"""Returns the agent position of agent referenced by 'handle'.
:param env: The environment.
:param handle: The handle of the agent for which the position shall be found.
:return: The agent's position.
"""
agent = env.agents[handle]
if agent.status == RailAgentStatus.READY_TO_DEPART:
agent_position = agent.initial_position
elif agent.status == RailAgentStatus.ACTIVE:
agent_position = agent.position
elif agent.status == RailAgentStatus.DONE:
agent_position = agent.target
else:
agent_position = agent.target
return agent_position
def translate_path_into_actions(rail_env, pathways, time_step):
"""Translates the solution paths of the CBS algorithm into an action for a given time step.
:param rail_env: The Flatland environment.
:param pathways: The solution paths of the CBS algorithm.
:param time_step: The time step for which an action is needed.
:return: An array of actions.
"""
acts = np.full(len(rail_env.get_agent_handles()), -1, dtype=np.int32)
if time_step >= len(pathways[0]) - 1:
return np.full(len(rail_env.get_agent_handles()), 4, dtype=np.int32)
for agent_handle, pathway in enumerate(pathways):
if np.all(pathway[time_step:] == pathway[time_step]):
rail_env.agents[agent_handle].status = RailAgentStatus.DONE_REMOVED
acts[agent_handle] = 4
current_positions = pathways[:, time_step]
next_positions = pathways[:, time_step + 1]
differences = next_positions - current_positions
for agent_handle, diff in enumerate(differences):
if acts[agent_handle] != -1:
continue
# Do not activate agents if they are located in their 'spawn' and shall not move
if rail_env.agents[agent_handle].status == RailAgentStatus.READY_TO_DEPART \
and np.array_equal(diff, [0, 0]):
acts[agent_handle] = 0
continue
# Activate agents otherwise
elif rail_env.agents[agent_handle].status == RailAgentStatus.READY_TO_DEPART:
rail_env.agents[agent_handle].position = rail_env.agents[agent_handle].initial_position
rail_env.agents[agent_handle].status = RailAgentStatus.ACTIVE
if np.array_equal(diff, [-1, 0]):
cardinal_dir_next_pos = 0
elif np.array_equal(diff, [0, 1]):
cardinal_dir_next_pos = 1
elif np.array_equal(diff, [1, 0]):
cardinal_dir_next_pos = 2
elif np.array_equal(diff, [0, -1]):
cardinal_dir_next_pos = 3
elif np.array_equal(diff, [0, 0]):
acts[agent_handle] = 4
else:
raise RuntimeError("Something went wrong!")
if acts[agent_handle] == -1:
agent_orientation = rail_env.agents[agent_handle].direction
action = (cardinal_dir_next_pos - agent_orientation + 2) % 4
acts[agent_handle] = action
return acts
# Check the cbs algorithm
# (currently buggy because of changes at a different location in the code)
if __name__ == "__main__":
width = 24
height = 24
max_episode_no = 10
amount_agents = 6
amount_cities = 2
max_steps = int(4 * 2 * (width + height + (amount_agents / amount_cities)))
seed = 420
random.seed(seed)
np.random.seed(seed)
r_env = | |
""", 'x', [True, 3, 4, 6]
def test_type_of_constants(self):
yield self.simple_test, "x=[0, 0L]", 'type(x[1])', long
yield self.simple_test, "x=[(1,0), (1,0L)]", 'type(x[1][1])', long
yield self.simple_test, "x=['2?-', '2?-']", 'id(x[0])==id(x[1])', True
def test_pprint(self):
# a larger example that showed a bug with jumps
# over more than 256 bytes
decl = py.code.Source("""
def _safe_repr(object, context, maxlevels, level):
typ = type(object)
if typ is str:
if 'locale' not in _sys.modules:
return repr(object), True, False
if "'" in object and '"' not in object:
closure = '"'
quotes = {'"': '\\"'}
else:
closure = "'"
quotes = {"'": "\\'"}
qget = quotes.get
sio = _StringIO()
write = sio.write
for char in object:
if char.isalpha():
write(char)
else:
write(qget(char, repr(char)[1:-1]))
return ("%s%s%s" % (closure, sio.getvalue(), closure)), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = id(object)
if maxlevels and level > maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
for k, v in object.iteritems():
krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % ', '.join(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif _len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = id(object)
if maxlevels and level > maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % ', '.join(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
""")
decl = str(decl) + '\n'
g = {}
exec decl in g
expected = g['_safe_repr']([5], {}, 3, 0)
yield self.st, decl + 'x=_safe_repr([5], {}, 3, 0)', 'x', expected
def test_mapping_test(self):
decl = py.code.Source("""
class X(object):
reference = {1:2, "key1":"value1", "key2":(1,2,3)}
key, value = reference.popitem()
other = {key:value}
key, value = reference.popitem()
inmapping = {key:value}
reference[key] = value
def _empty_mapping(self):
return {}
_full_mapping = dict
def assertEqual(self, x, y):
assert x == y
failUnlessRaises = staticmethod(raises)
def assert_(self, x):
assert x
def failIf(self, x):
assert not x
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) #workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p:
p = p1
#Indexing
for key, value in self.reference.items():
self.assertEqual(d[key], value)
knownkey = self.other.keys()[0]
self.failUnlessRaises(KeyError, lambda:d[knownkey])
#len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
#has_key
for k in self.reference:
self.assert_(d.has_key(k))
self.assert_(k in d)
for k in self.other:
self.failIf(d.has_key(k))
self.failIf(k in d)
#cmp
self.assertEqual(cmp(p,p), 0)
self.assertEqual(cmp(d,d), 0)
self.assertEqual(cmp(p,d), -1)
self.assertEqual(cmp(d,p), 1)
#__non__zero__
if p: self.fail("Empty mapping must compare to False")
if not d: self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
self.assert_(hasattr(iter, 'next'))
self.assert_(hasattr(iter, '__iter__'))
x = list(iter)
self.assert_(set(x)==set(lst)==set(ref))
check_iterandlist(d.iterkeys(), d.keys(), self.reference.keys())
check_iterandlist(iter(d), d.keys(), self.reference.keys())
check_iterandlist(d.itervalues(), d.values(), self.reference.values())
check_iterandlist(d.iteritems(), d.items(), self.reference.items())
#get
key, value = d.iteritems().next()
knownkey, knownvalue = self.other.iteritems().next()
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.failIf(knownkey in d)
return 42
""")
decl = str(decl) + '\n'
yield self.simple_test, decl + 'r = test_read(X())', 'r', 42
def test_stack_depth_bug(self):
decl = py.code.Source("""
class A:
def initialize(self):
# install all the MultiMethods into the space instance
if isinstance(mm, object):
def make_boundmethod(func=func):
def boundmethod(*args):
return func(self, *args)
r = None
""")
decl = str(decl) + '\n'
yield self.simple_test, decl, 'r', None
def test_indentation_error(self):
source = py.code.Source("""
x
y
""")
try:
self.simple_test(source, None, None)
except IndentationError as e:
assert e.msg == 'unexpected indent'
else:
raise Exception("DID NOT RAISE")
def test_no_indent(self):
source = py.code.Source("""
def f():
xxx
""")
try:
self.simple_test(source, None, None)
except IndentationError as e:
assert e.msg == 'expected an indented block'
else:
raise Exception("DID NOT RAISE")
def test_indent_error_filename(self):
source = py.code.Source("""
def f():
x
y
""")
try:
self.simple_test(source, None, None)
except IndentationError as e:
assert e.filename == '<test>'
else:
raise Exception("DID NOT RAISE")
def test_kwargs_last(self):
py.test.raises(SyntaxError, self.simple_test, "int(base=10, '2')",
None, None)
def test_crap_after_starargs(self):
source = "call(*args, *args)"
py.test.raises(SyntaxError, self.simple_test, source, None, None)
def test_not_a_name(self):
source = "call(a, b, c, 3=3)"
py.test.raises(SyntaxError, self.simple_test, source, None, None)
def test_assignment_to_call_func(self):
source = "call(a, b, c) = 3"
py.test.raises(SyntaxError, self.simple_test, source, None, None)
def test_augassig_to_sequence(self):
source = "a, b += 3"
py.test.raises(SyntaxError, self.simple_test, source, None, None)
def test_broken_setups(self):
source = """if 1:
try:
break
finally:
pass
"""
py.test.raises(SyntaxError, self.simple_test, source, None, None)
def test_unpack_singletuple(self):
source = """if 1:
l = []
for x, in [(1,), (2,)]:
l.append(x)
"""
self.simple_test(source, 'l', [1, 2])
def test_unpack_wrong_stackeffect(self):
source = """if 1:
l = [1, 2]
a, b = l
a, b = l
a, b = l
a, b = l
a, b = l
a, b = l
"""
code = compile_with_astcompiler(source, 'exec', self.space)
assert code.co_stacksize == 2
def test_stackeffect_bug3(self):
source = """if 1:
try: pass
finally: pass
try: pass
finally: pass
try: pass
finally: pass
try: pass
finally: pass
try: pass
finally: pass
try: pass
finally: pass
"""
code = compile_with_astcompiler(source, 'exec', self.space)
assert code.co_stacksize == 3
def test_stackeffect_bug4(self):
source = """if 1:
with a: pass
with a: pass
with a: pass
with a: pass
with a: pass
with a: pass
"""
code = compile_with_astcompiler(source, 'exec', self.space)
assert code.co_stacksize == 4
def test_stackeffect_bug5(self):
source = """if 1:
a[:]; a[:]; a[:]; a[:]; a[:]; a[:]
a[1:]; a[1:]; a[1:]; a[1:]; a[1:]; a[1:]
a[:2]; a[:2]; a[:2]; a[:2]; a[:2]; a[:2]
a[1:2]; a[1:2]; a[1:2]; a[1:2]; a[1:2]; a[1:2]
"""
code = compile_with_astcompiler(source, 'exec', self.space)
assert code.co_stacksize == 3
def test_stackeffect_bug6(self):
source = """if 1:
{1}; {1}; {1}; {1}; {1}; {1}; {1}
"""
code = compile_with_astcompiler(source, 'exec', self.space)
assert code.co_stacksize == 1
def test_stackeffect_bug7(self):
source = '''def f():
for i in a:
return
'''
code = compile_with_astcompiler(source, 'exec', self.space)
def test_lambda(self):
yield self.st, "y = lambda x: x", "y(4)", 4
def test_backquote_repr(self):
yield self.st, "x = None; y = `x`", "y", "None"
def test_deleting_attributes(self):
test = """if 1:
class X():
x = 3
del X.x
try:
X.x
except AttributeError:
pass
else:
raise AssertionError("attribute not removed")"""
yield self.st, test, "X.__name__", "X"
def test_lots_of_loops(self):
source = "for x in y: pass\n" * 1000
compile_with_astcompiler(source, 'exec', self.space)
def test_assign_to_empty_list_1(self):
source = """if 1:
for i in range(5):
del []
[] = ()
[] = []
[] = [] = []
ok = 1
"""
self.simple_test(source, 'ok', 1)
def test_assign_to_empty_list_2(self):
source = """if 1:
for i in range(5):
try: [] = 1, 2, 3
except ValueError: pass
else: raise AssertionError
try: [] = a = 1
except TypeError: pass
else: raise AssertionError
try: [] = _ = iter(['foo'])
except ValueError: pass
else: raise AssertionError
try: [], _ = iter(['foo']), 1
except ValueError: pass
else: raise AssertionError
ok = 1
"""
self.simple_test(source, 'ok', 1)
@py.test.mark.parametrize('expr, result', [
("f1.__doc__", None),
("f2.__doc__", 'docstring'),
("f2()", 'docstring'),
("f3.__doc__", None),
("f3()", 'bar'),
("C1.__doc__", None),
("C2.__doc__", 'docstring'),
("C3.field", 'not docstring'),
("C4.field", 'docstring'),
("C4.__doc__", 'docstring'),
("C4.__doc__", 'docstring'),
("__doc__", None),])
def test_remove_docstring(self, expr, result):
source = '"module_docstring"\n' + """if 1:
def f1():
'docstring'
def f2():
'docstring'
return 'docstring'
def f3():
'foo'
return 'bar'
class C1():
'docstring'
class C2():
__doc__ = 'docstring'
class C3():
field = 'not docstring'
class C4():
'docstring'
field = 'docstring'
"""
code_w = compile_with_astcompiler(source, 'exec', self.space)
code_w.remove_docstrings(self.space)
dict_w = self.space.newdict();
code_w.exec_code(self.space, dict_w, dict_w)
self.check(dict_w, expr, result)
def test_assert_skipping(self):
space = self.space
mod = space.getbuiltinmodule('__pypy__')
w_set_debug = | |
import numpy as np
import torch
import copy, os
from collections import OrderedDict
from util.util import util
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
import glob
import torch.nn.functional as F
import cv2
from skimage import io
def norm_image(image):
"""
:param image: image with [H,W,C]
:return: image in np uint8
"""
image = image.copy()
image -= np.max(np.min(image), 0)
image /= np.max(image)
image *= 255.
return np.uint8(image)
class DISAM_Model(BaseModel):
def name(self):
return 'DISAM_Model'
def __init__(self, opt):
super(DISAM_Model, self).__init__(opt)
self.n_domains = opt.n_domains
self.DA, self.DB = None, None
self.real_A = self.Tensor(opt.batchSize, opt.input_nc, opt.fineSize, opt.fineSize)
self.real_B = self.Tensor(opt.batchSize, opt.output_nc, opt.fineSize, opt.fineSize)
# used metrics
self.cos = torch.nn.CosineSimilarity(dim=0, eps=1e-8)
self.mean_cos = torch.nn.CosineSimilarity(dim=1, eps=1e-8)
self.L2loss = torch.nn.MSELoss()
# load/define networks
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
opt.netG_n_blocks, opt.netG_n_shared,
self.n_domains, opt.norm, opt.use_dropout, self.gpu_ids)
if not self.isTrain:
self.use_two_stage = opt.use_two_stage
if self.use_two_stage:
self.netG_finer = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
opt.netG_n_blocks, opt.netG_n_shared,
self.n_domains, opt.norm, opt.use_dropout, self.gpu_ids)
self.top_n = opt.top_n
self.last_retrieval_index_c0 = 0
self.last_retrieval_index_c1 = 0
self.last_domain = 0
if self.isTrain:
blur_fn = lambda x: torch.nn.functional.conv2d(x, self.Tensor(util().gkern_2d()), groups=3, padding=2)
self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD_n_layers,
self.n_domains, blur_fn, opt.norm, self.gpu_ids)
if not self.isTrain or opt.continue_train:
which_epoch = opt.which_epoch
self.load_network(self.netG, 'G', which_epoch)
if not self.isTrain:
if opt.use_two_stage:
self.load_network(self.netG_finer, 'G', opt.which_epoch_finer, self.use_two_stage)
if self.isTrain:
self.load_network(self.netD, 'D', which_epoch)
if not self.isTrain:
self.test_using_cos = opt.test_using_cos
# used for retrieval
self.database_feature_c0 = []
self.database_path_c0 = []
self.database_feature_c1 = []
self.database_path_c1 = []
self.database_dist_list_c0 = [] # only for visualization
self.query_feature_list = []
self.dist_mat_torch = None
self.robotcar_database = []
if self.isTrain:
self.neg_B = self.Tensor(opt.num_hard_neg, opt.input_nc, opt.fineSize, opt.fineSize)
self.train_using_cos = opt.train_using_cos
self.fake_pools = [ImagePool(opt.pool_size) for _ in range(self.n_domains)]
# used in the adaptive triplet loss
self.margin = opt.margin
self.adapt = opt.adapt
self.margin_sam_triplet = opt.margin_sam_triplet
self.adapt_sam_triplet = opt.adapt_sam_triplet
self.use_realAB_as_negative = opt.use_realAB_as_negative
self.hard_negative = opt.hard_negative
# define loss functions
self.criterionCycle = torch.nn.SmoothL1Loss()
self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor)
# initialize optimizers
self.netG.init_optimizers(torch.optim.Adam, opt.lr, (opt.beta1, 0.999))
self.netD.init_optimizers(torch.optim.Adam, opt.lr, (opt.beta1, 0.999))
# initialize loss storage
self.loss_D, self.loss_G = [0] * self.n_domains, [0] * self.n_domains
self.loss_cycle = [0] * self.n_domains
self.loss_triplet = [0] * self.n_domains
self.loss_sam = [0] * self.n_domains
self.loss_sam_triplet = [0] * self.n_domains
self.feature_distance = [0] * self.n_domains
self.feature_cos = [0] * self.n_domains
self.use_cos_latent_with_L2 = opt.use_cos_latent_with_L2
# initialize loss multipliers
self.lambda_triplet, self.lambda_cyc, self.lambda_latent = opt.lambda_triplet, opt.lambda_cycle, opt.lambda_latent
self.lambda_sam, self.lambda_sam_triplet = opt.lambda_sam, opt.lambda_sam_triplet
def set_input(self, input):
input_A = input['A']
self.real_A.resize_(input_A.size()).copy_(input_A)
self.DA = input['DA'][0]
if self.isTrain:
input_B = input['B']
self.real_B.resize_(input_B.size()).copy_(input_B)
self.DB = input['DB'][0]
if self.hard_negative:
self.neg_B = input['neg_B_tensor'][0].cuda()
self.neg_DB_list = input['neg_DB_list'][0]
self.image_paths = input['path']
def image_retrieval(self, query_encoded, query_path, query_encoded_finer=None, test_index=-1):
"""
Used to retrieve the target image in the database given the query encoded feature
:param query_encoded: the query code
:param query_path: the path of query image
:param query_encoded_finer: the query code in the finer retrieval model
:param test_index: the index of input query images when testing
:return: the retrieved iamge path and the encoded feature in the database
"""
min_dix = 100000
if self.use_two_stage:
top_n_tensor = torch.ones(self.top_n) * 100000
top_n_tensor = top_n_tensor.cuda()
top_n_index = torch.ones(self.top_n)
path = None
final_index = 0
if query_path.split('/')[-1][11] == '0':
# for c0, camera 0 in the CMU-Seasons dataset
self.database_dist_list_c0 = []
for i, db_path in enumerate(self.database_path_c0):
if self.test_using_cos:
# use the cosine retrieval metric
if self.opt.mean_cos:
dist = -self.mean_cos(query_encoded.view(256, -1),
self.database_feature_c0[i][0].view(256, -1)).mean(0)
else:
dist = -self.cos(query_encoded.view(-1),
self.database_feature_c0[i][0].view(-1))
else:
# use L2 metric
dist = self.L2loss(query_encoded.view(-1), self.database_feature_c0[i][0].view(-1))
self.database_dist_list_c0.append(dist.item())
if not self.use_two_stage:
if dist < min_dix:
min_dix = dist
final_index = i
path = db_path
else:
# find top N for finer retrieval
if dist < top_n_tensor[self.top_n - 1]:
top_n_tensor[self.top_n - 1] = dist
top_n_index[self.top_n - 1] = i
tmp = top_n_tensor.sort()
top_n_tensor = tmp[0]
top_n_index = top_n_index[tmp[1]]
if self.use_two_stage:
# from coarse to fine strategy
for i in list(range(self.top_n)):
if self.test_using_cos:
if self.opt.meancos_finer:
dist = -self.mean_cos(query_encoded_finer.view(256, -1),
self.database_feature_c0[top_n_index[i].int()][1].view(256, -1)).mean(0)
else:
dist = -self.cos(query_encoded_finer.view(-1),
self.database_feature_c0[top_n_index[i].int()][1].view(-1))
else:
dist = self.L2loss(query_encoded_finer.view(-1),
self.database_feature_c0[top_n_index[i].int()][1].view(-1))
if dist < min_dix:
min_dix = dist
final_index = top_n_index[i].int()
path = self.database_path_c0[final_index]
if self.opt.save_sam_visualization and test_index % 10 == 0:
# save the visualized SAM maps
self.find_grad_sam(query_encoded_finer, query_path, self.database_feature_c0[
self.database_dist_list_c0.index(sorted(self.database_dist_list_c0)[100])][1], test_index, 100)
self.find_grad_sam(self.database_feature_c0[
self.database_dist_list_c0.index(sorted(self.database_dist_list_c0)[100])][
1], self.database_path_c0[
self.database_dist_list_c0.index(sorted(self.database_dist_list_c0)[100])],
query_encoded_finer, test_index, 100)
self.find_grad_sam(query_encoded_finer, self.image_paths[0],
self.database_feature_c0[final_index][1], test_index)
self.find_grad_sam(self.database_feature_c0[final_index][1], path, query_encoded_finer, test_index)
print("Minimun distance is :", min_dix.item(), " least index: ", final_index)
print("Retrieved path: ", path.split('/')[-1], " query path: ", query_path.split('/')[-1])
else:
for i, db_path in enumerate(self.database_path_c1):
# for camera 1
if self.test_using_cos:
if self.opt.mean_cos:
dist = -self.mean_cos(query_encoded.view(256, -1),
self.database_feature_c1[i][0].view(256, -1)).mean(0)
else:
dist = -self.cos(query_encoded.view(-1),
self.database_feature_c1[i][0].view(-1)) # + L2loss(query_encoded,item[1])*0
else:
dist = self.L2loss(query_encoded.view(-1), self.database_feature_c1[i][0].view(-1))
if not self.use_two_stage:
if dist < min_dix:
min_dix = dist
final_index = i
path = db_path
else:
if dist < top_n_tensor[self.top_n - 1]:
top_n_tensor[self.top_n - 1] = dist
top_n_index[self.top_n - 1] = i
tmp = top_n_tensor.sort()
top_n_tensor = tmp[0]
top_n_index = top_n_index[tmp[1]]
if self.use_two_stage:
for i in list(range(self.top_n)):
if self.test_using_cos:
if self.opt.meancos_finer:
dist = -self.mean_cos(query_encoded_finer.view(256, -1),
self.database_feature_c1[top_n_index[i].int()][1].view(256, -1)).mean(0)
else:
dist = -self.cos(query_encoded_finer.view(-1),
self.database_feature_c1[top_n_index[i].int()][1].view(-1))
else:
dist = self.L2loss(query_encoded_finer.view(-1),
self.database_feature_c1[top_n_index[i].int()][1].view(-1))
if dist < min_dix:
min_dix = dist
final_index = top_n_index[i].int()
path = self.database_path_c1[final_index]
print("Minimun distance is :", min_dix.item(), " least index: ", final_index)
print("Retrieved path: ", path.split('/')[-1], " query path: ", query_path.split('/')[-1])
if query_path.split('/')[-1][11] == '0':
if self.use_two_stage:
return path, self.database_feature_c0[final_index][1]
else:
return path, self.database_feature_c0[final_index][0]
else:
if self.use_two_stage:
return path, self.database_feature_c1[final_index][1]
else:
return path, self.database_feature_c1[final_index][0]
def test(self, index=0):
with torch.no_grad():
self.visuals = [self.real_A]
self.labels = ['query_image_%d' % self.DA]
raw_encoded = self.netG.encode(self.real_A, self.DA)
raw_encoded_finer = None
if self.use_two_stage: raw_encoded_finer = self.netG_finer.encode(self.real_A, self.DA)
if self.DA == 0:
# building the feature database
db_path = copy.deepcopy(self.image_paths[0])
if db_path.split('/')[-1][11] == '0':
self.database_feature_c0.append((raw_encoded, raw_encoded_finer))
self.database_path_c0.append(db_path)
else:
self.database_feature_c1.append((raw_encoded, raw_encoded_finer))
self.database_path_c1.append(db_path)
return "database"
else:
path, retrieved_image = self.image_retrieval(raw_encoded, self.image_paths[0], raw_encoded_finer, index)
return path
def find_grad_sam(self, raw_encoded, query_path, retrieved_image, index, rank=-1):
with torch.set_grad_enabled(True):
new_raw_encoded = copy.deepcopy(raw_encoded.view(256, 64, 64)).cuda()
new_raw_encoded.requires_grad_(True)
new_retrieved_image = copy.deepcopy(retrieved_image.view(256, 64, 64)).cuda()
mean_cos = torch.nn.CosineSimilarity(dim=1, eps=1e-8)
mean_cos_similarity = mean_cos(new_raw_encoded.view(256, -1), new_retrieved_image.view(256, -1)).mean(0)
mean_cos_similarity.backward()
mask = F.relu(torch.mul(new_raw_encoded,
new_raw_encoded.grad.sum(1).sum(1).view(256, 1, 1).expand([256, 64, 64])).sum(
dim=0))
# normalization
mask -= mask.min()
mask /= mask.max()
mask = cv2.resize(mask.cpu().detach().numpy(), (256, 256))
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
heatmap = heatmap[..., ::-1] # gbr to rgb
img = io.imread(query_path)
img = np.float32(cv2.resize(img, (256, 256))) / 255
sam = heatmap + np.float32(img)
sam = norm_image(sam)
heatmap = norm_image(heatmap)
img = norm_image(img)
if not os.path.exists(self.opt.sam_matched_dir):
os.makedirs(self.opt.sam_matched_dir)
if not os.path.exists(self.opt.sam_mismatched_dir):
os.makedirs(self.opt.sam_mismatched_dir)
if rank == -1:
io.imsave(self.opt.sam_matched_dir + self.opt.name + "_" + self.opt.name_finer + '_s' + str(
self.opt.which_slice) + "_top" + str(self.opt.top_n) + "_" + str(index) + '_sam' + '_' +
query_path.split('/')[-1], sam)
io.imsave(self.opt.sam_matched_dir + self.opt.name + "_" + self.opt.name_finer + '_s' + str(
self.opt.which_slice) + "_top" + str(self.opt.top_n) + "_" + str(index) + '_heat' + '_' +
query_path.split('/')[-1], heatmap)
io.imsave(self.opt.sam_matched_dir + self.opt.name + "_" + self.opt.name_finer + '_s' + str(
self.opt.which_slice) + "_top" + str(self.opt.top_n) + "_" + str(index) + '_img' + '_' +
query_path.split('/')[-1], img)
else:
io.imsave(self.opt.sam_mismatched_dir + self.opt.name + "_" + self.opt.name_finer + '_s' + str(
self.opt.which_slice) + "_top" + str(self.opt.top_n) + "_" + str(index) + '_sam_' + str(
rank) + '_' + query_path.split('/')[
-1], sam)
io.imsave(self.opt.sam_mismatched_dir + self.opt.name + "_" + self.opt.name_finer + '_s' + str(
self.opt.which_slice) + "_top" + str(self.opt.top_n) + "_" + str(index) + '_heat_' + str(
rank) + '_' + query_path.split('/')[
-1], heatmap)
io.imsave(self.opt.sam_mismatched_dir + self.opt.name + "_" + self.opt.name_finer + '_s' + str(
self.opt.which_slice) + "_top" + str(self.opt.top_n) + "_" + str(index) + '_img_' + str(
rank) + '_' + query_path.split('/')[
-1], img)
def find_sam_weight(self, query, db):
mean_cos = torch.nn.CosineSimilarity(dim=1, eps=1e-8)
mean_cos_similarity = mean_cos(query.view(256, -1), db.view(256, -1)).mean(0)
grad_map = torch.autograd.grad(mean_cos_similarity, query, create_graph=True)[0]
weight = grad_map.sum(1).sum(1).view(256, 1, 1).expand([256, 64, 64])
return weight
def get_image_paths(self):
return self.image_paths
def save_features(self):
with torch.no_grad():
self.labels = ['query_image_%d' % self.DA]
raw_encoded = self.netG.encode(self.real_A, self.DA)
encoded = raw_encoded.view(-1) # encoded_new1
encoded_np = encoded.cpu().numpy()
db_path = copy.deepcopy(self.image_paths[0])
if not os.path.exists("./features/" + db_path.split('/')[-3]):
os.makedirs("./features/" + db_path.split('/')[-3])
print("./features/" + db_path.split('/')[-3] + '/' + db_path.split('/')[-1][:-4])
np.savez("./features/" + db_path.split('/')[-3] + '/' + db_path.split('/')[-1][:-4], encoded_np, db_path)
if self.use_two_stage:
if not os.path.exists("./features_finer/" + db_path.split('/')[-3]):
os.makedirs("./features_finer/" + db_path.split('/')[-3])
raw_encoded_finer = self.netG_finer.encode(self.real_A, self.DA)
np.savez("./features_finer/" + db_path.split('/')[-3] + '/' + db_path.split('/')[-1][:-4],
raw_encoded_finer.view(-1).cpu().numpy(),
db_path)
def | |
<filename>client_server_test INHERIT/LocalModel.py<gh_stars>0
# Import all the useful libraries
import numpy as np
import pandas as pd
import fancyimpute
from sklearn import model_selection
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import AdaBoostClassifier # PROBABILITY
from sklearn.tree import DecisionTreeClassifier # PROBABILITY
from sklearn.neighbors import RadiusNeighborsClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier # PROBABILITY
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier # PROBABILITY
from sklearn.linear_model import LogisticRegression # PROBABILITY
from sklearn.naive_bayes import GaussianNB # PROBABILITY
from sklearn.ensemble import ExtraTreesClassifier # PROBABILITY
from sklearn.neighbors import KNeighborsClassifier # PROBABILITY
from sklearn.ensemble import BaggingClassifier # PROBABILITY
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import ADASYN
from imblearn.under_sampling import TomekLinks
from sklearn.externals import joblib
# MISSING PARTs
# 1) send the distribution (mean and std) of the data if requested (for example, how the two classes are distrubuted over the age of the population (or any other feature))
# 2) send other useful data ? ((if available) feature importance, decision_path)
# ...
# training data -> expected to be with all the listed features (IN ORDER -> like in the data we have). It is ok, if there are missing values
class LocalModel:
# local model functions
# train
# predict
# initialize the local model with the training data
def __init__(self, data = "none", target_name = "AFclass" , model_name = "ada4",random_state = 12345678, imputation_strategy = 'mice',balance_strategy = 'SMOTE'):
# we train the model with all the available data
self.target_name = target_name ## it the name of the target column
self.target = None ## it is the target vector
self.data_lm = data ## it is the complete dataset -> will be modified
self.original_data = data ## store a copy of the original data -> never modified
self.X = None ## it is the data except the target
self.features_lm = None ## available features
self.imputation_strategy = imputation_strategy
self.balance_strategy = balance_strategy
# for cross-validation
self.model_accuracy = ""
self.cv_x = None # data -> in principle equal to self.X
self.cv_y = None # target -> in principle equal to self.target
self.random_state = random_state # random state -> fixed for testing
self.selected_model_name = model_name # name of the model -> default fixed
self.selected_model = AdaBoostClassifier(DecisionTreeClassifier(max_depth=10, random_state = self.random_state),algorithm="SAMME", n_estimators=300)#DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=15, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=self.random_state, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None, presort=False) ## default model
self.models = [] ## list of all the available models
self.important_features = pd.DataFrame([],columns = {"important features"})
#if not isinstance(self, LocalModel):
# self.chosen_model(model_name) # select the chosen model -> otherwise use the default one
#self.check1, self.check2, self.check3 = self.fixDataset(imputation_strategy = imputation_strategy, balance_strategy = balance_strategy) ## fix data set before training -> clean data (remove unused columns, convert categotical attributes into numerical), recover missing values (use a strategy to impute the missing values), balance the data set
#if isinstance(self, LocalModel):
# self.chooseModel_with_crossValidation()
self.localModelType = "app" ## gui or app -> gui can only respond to predictions , app can only send prediction requests or send data to central model
if not str(self.data_lm) == "none":
self.localModelType = "gui"
self.perfromLocalOperations()
def perfromLocalOperations(self):
self.fixDataset(imputation_strategy = self.imputation_strategy, balance_strategy = self.balance_strategy) ## fix data set before training -> clean data (remove unused columns, convert categotical attributes into numerical), recover missing values (use a strategy to impute the missing values), balance the data set
#self.train()
# initiate the models_definition
def chooseModel_with_crossValidation_and_train(self):
r = []
if not str(self.data_lm) == "none":
try :
print ("TRY load model, " + self.selected_model_name)
self.selected_model = joblib.load(self.selected_model_name + '.pkl')
print ("model loaded")
r = self.crossValidation(all_models = 0) ## just to get the accuracy and the std deviation
print ("skip trainign -Z model loaded")
except :
self.models_definition(self.random_state)
r = self.crossValidation(all_models = 1, k_fold = 10)
found = 0
for (n,i) in self.models: # n = name , i = model
if n == r.iloc[0][0] and found == 0:
found = 1
self.selected_model = i
self.selected_model_name = n
if found == 0:
self.selected_model = DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=15, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=self.random_state, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None, presort=False)
self.selected_model_name = "dt4"
self.train()
joblib.dump(self.selected_model, self.selected_model_name + '.pkl')
else:
print ("no data")
self.getImportantFetures()
print ("DONE Cross validation -> choosen model")
print (self.important_features.shape)
print (self.important_features)
print ("shape")
print (self.selected_model_name, self.selected_model)
return r
def getImportantFetures(self):
a = []
print (self.selected_model.feature_importances_)
try:
indices = np.argsort(self.selected_model.feature_importances_)[-10:]
for i in indices :
a.append(self.data_lm.columns[i])
print ("important features here")
print (self.important_features)
print (" finish printing important features ")
except :
print("no features importance")
a = pd.DataFrame(a,columns = {"important features"})
self.important_features = a
def updateFeatures(self,features):
f = features["important features"].tolist()
pos = 0
res = self.important_features["important features"].tolist()
try:
for i in f:
if i not in res:
res.insert(pos , i)
pos += 1
else :
oldPos = res.index(i)
res.remove(i)
res.insert(int((pos + oldPos) / 2) , i)
pos += 1
res = pd.DataFrame(res,columns = {"important features"})
self.important_features = res
except:
print("error in update feature")
return res
def addData(self, data, target_name = "AFclass", localModelType = "gui"):
# add data only if there is not other yet (in future -> possibility to concat to self.original_data)
if str(self.data_lm) == "none":
self.localModelType = "gui"
self.data_lm = data
self.target_name = target_name
self.perfromLocalOperations()
print ("data Added and fixed")
return True
print ("abort -> there is already data")
return False
def models_definition(self,random_state):
## here we can tune the paramenters of the models
#self.models.append(("ada1",AdaBoostClassifier(DecisionTreeClassifier(max_depth=1, random_state = self.random_state),algorithm="SAMME", n_estimators=200)))
#self.models.append(("ada2",AdaBoostClassifier(DecisionTreeClassifier(max_depth=3, random_state = self.random_state),algorithm="SAMME", n_estimators=200)))
#self.models.append(("ada3",AdaBoostClassifier(DecisionTreeClassifier(max_depth=5, random_state = self.random_state),algorithm="SAMME", n_estimators=100)))
self.models.append(("ada4",AdaBoostClassifier(DecisionTreeClassifier(max_depth=10, random_state = self.random_state),algorithm="SAMME", n_estimators=300)))
#self.models.append(("ada5",AdaBoostClassifier(DecisionTreeClassifier(max_depth=20, random_state = self.random_state),algorithm="SAMME", n_estimators=100)))
#self.models.append(("ada6",AdaBoostClassifier(RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',max_depth=2, max_features='auto', max_leaf_nodes=None,min_impurity_decrease=0.0, min_impurity_split=None,min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,oob_score=False, random_state=self.random_state, verbose=0, warm_start=False))))
#self.models.append(("ada7",AdaBoostClassifier(RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',max_depth=5, max_features='auto', max_leaf_nodes=None,min_impurity_decrease=0.0, min_impurity_split=None,min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,oob_score=False, random_state=self.random_state, verbose=0, warm_start=False))))
#self.models.append(("ada8",AdaBoostClassifier(RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',max_depth=10, max_features='auto', max_leaf_nodes=None,min_impurity_decrease=0.0, min_impurity_split=None,min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,oob_score=False, random_state=self.random_state, verbose=0, warm_start=False))))
"""
#self.model.append(RadiusNeighborsClassifier(radius=10.0, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski'))
#self.models.append(("ridge1", RidgeClassifier(alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=0.001, class_weight=None, solver='auto', random_state=self.random_state)))
#paramsGB1 = {'n_estimators': 120, 'max_depth': 3, 'subsample': 0.5,'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': self.random_state}
#paramsGB2 = {'n_estimators': 120, 'max_depth': 6, 'subsample': 0.5,'learning_rate': 0.05, 'min_samples_leaf': 1, 'random_state': self.random_state}
#paramsGB3 = {'n_estimators': 60, 'max_depth': 15, 'subsample': 0.5,'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': self.random_state}
paramsGB4 = {'n_estimators': 320, 'max_depth': 10, 'subsample': 0.5,'learning_rate': 0.005, 'min_samples_leaf': 1, 'random_state': self.random_state}
#self.models.append(("gb1",GradientBoostingClassifier(**paramsGB1)))
#self.models.append(("gb2",GradientBoostingClassifier(**paramsGB2)))
#self.models.append(("gb3",GradientBoostingClassifier(**paramsGB3)))
self.models.append(("gb4",GradientBoostingClassifier(**paramsGB4)))
"""
#self.models.append(("dt1",DecisionTreeClassifier(random_state=self.random_state)))
#self.models.append(("dt2",DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=3, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=self.random_state, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None, presort=False)))
#self.models.append(("dt3",DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=7, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=self.random_state, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None, presort=False)))
self.models.append(("dt4",DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=15, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=self.random_state, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None, presort=False)))
#self.models.append(("dt5",DecisionTreeClassifier(criterion='entropy', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=self.random_state, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None, presort=False)))
"""
#self.models.append(("rf1",RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',max_depth=2, max_features='auto', max_leaf_nodes=None,min_impurity_decrease=0.0, min_impurity_split=None,min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,oob_score=False, random_state=self.random_state, verbose=0, warm_start=False)))
self.models.append(("rf2",RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',max_depth=5, max_features='auto', max_leaf_nodes=None,min_impurity_decrease=0.0, min_impurity_split=None,min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=20, n_jobs=1,oob_score=False, random_state=self.random_state, verbose=0, warm_start=False)))
#self.models.append(("rf3",RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',max_depth=10, max_features='auto', max_leaf_nodes=None,min_impurity_decrease=0.0, min_impurity_split=None,min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=50, n_jobs=1,oob_score=False, random_state=self.random_state, verbose=0, warm_start=False)))
#self.models.append(("ld1",LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,solver='svd', store_covariance=False, tol=0.0001)))
#self.models.append(("lr1",LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=self.random_state, solver='liblinear', max_iter=100, multi_class='ovr', verbose=0, warm_start=False, n_jobs=1)))
#self.models.append(("knn1",KNeighborsClassifier(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=1)))
self.models.append(("knn2",KNeighborsClassifier(n_neighbors=10, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=1)))
#self.models.append(("knn3",KNeighborsClassifier(n_neighbors=15, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=1)))
#self.models.append(("knn4",KNeighborsClassifier(n_neighbors=20, weights='distance', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=1)))
#self.models.append(("knn5",KNeighborsClassifier(n_neighbors=50, weights='distance', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=1)))
#self.models.append(("nb1",GaussianNB()))
#self.models.append(("et1",ExtraTreesClassifier(n_estimators=50, random_state=self.random_state)))
#self.models.append(("et2",ExtraTreesClassifier(n_estimators=100, random_state=self.random_state)))
self.models.append(("et3",ExtraTreesClassifier(n_estimators=200, random_state=self.random_state)))
#self.models.append(("bag1",BaggingClassifier(base_estimator=None, n_estimators=5, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
#self.models.append(("bag2",BaggingClassifier(base_estimator=None, n_estimators=10, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
#self.models.append(("bag3",BaggingClassifier(base_estimator=None, n_estimators=20, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
#self.models.append(("bag4",BaggingClassifier(base_estimator=None, n_estimators=50, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
#self.models.append(("bag5",BaggingClassifier(base_estimator=None, n_estimators=100, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
#self.models.append(("bag6",BaggingClassifier(base_estimator=None, n_estimators=150, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
#self.models.append(("bag7",BaggingClassifier(base_estimator=None, n_estimators=200, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
self.models.append(("bag8",BaggingClassifier(base_estimator=RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',max_depth=2, max_features='auto', max_leaf_nodes=None,min_impurity_decrease=0.0, min_impurity_split=None,min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,oob_score=False, random_state=self.random_state, verbose=0, warm_start=False), n_estimators=200, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
#self.models.append(("bag9",BaggingClassifier(base_estimator=RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',max_depth=5, max_features='auto', max_leaf_nodes=None,min_impurity_decrease=0.0, min_impurity_split=None,min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,oob_score=False, random_state=self.random_state, verbose=0, warm_start=False), n_estimators=200, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
#self.models.append(("bag10",BaggingClassifier(base_estimator=RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',max_depth=10, max_features='auto', max_leaf_nodes=None,min_impurity_decrease=0.0, min_impurity_split=None,min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,oob_score=False, random_state=self.random_state, verbose=0, warm_start=False), n_estimators=200, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
#self.models.append(("bag11",BaggingClassifier(base_estimator=RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',max_depth=20, max_features='auto', max_leaf_nodes=None,min_impurity_decrease=0.0, min_impurity_split=None,min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,oob_score=False, random_state=self.random_state, verbose=0, warm_start=False), n_estimators=200, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
"""
## add other models ...
def chosen_model(self, name):
# initialize the available models
self.models_definition(self.random_state)
found = 0
for (n,i) in self.models: # n = name , i = model
if n == name and found == 0:
found = 1
self.selected_model = i
self.selected_model_name = name
if found == 0 :
# feel free to modify the model.. if another is better
self.selected_model = DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=15, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=self.random_state, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None, presort=False)
self.selected_model_name = "dt4"
return
## to choose the best model using cross validation
## normally crossvalidate just the chosen model, if all_models = 1 -> crossvalidate all the models
def crossValidation(self, all_models = 0, k_fold = 10, random_state = 12345678):
# cross validation
if all_models == 1:
print ("begin cross validation for all models")
evaluation = []
counter = 1
numberOfModels = len(self.models)
#best = ("BEST", 0, 0)
for (name,i) in self.models:
print (round(counter / numberOfModels,3), " is complete \t" )
e = model_selection.cross_val_score(i, self.cv_x, self.cv_y, cv=StratifiedKFold(n_splits=k_fold,random_state=random_state,shuffle=True))
avg = round(np.average(e),4) * | |
import numpy as np
from numpy.linalg import slogdet, solve
from numpy import log, pi
import pandas as pd
from scipy.special import expit
from .constants import mass_pion
from .kinematics import momentum_transfer_cm, cos0_cm_from_lab, omega_cm_from_lab
from .constants import omega_lab_cusp, dsg_label, DesignLabels
from sklearn.gaussian_process.kernels import RBF
import gsum as gm
def order_transition_old(n, n_inf, omega):
return n + (n_inf - n) * expit((omega-190)/20)
def order_transition_lower_orders(n, omega):
omega1 = 180.
omega2 = 240.
omegam = (omega1 + omega2) / 2.
f = 1. / (1 + np.exp(4 * np.log(3) * (omega - omegam) / (omega1 - omega2)))
if n % 2 == 0:
return (1 - f / 2.) * n
elif n % 2 == 1:
return (1 - f / 2.) * n - 5 * f / 2.
raise ValueError('n must be an integer')
def order_transition_truncation(n, omega, n_inf):
omega1 = 180.
omega2 = 240.
omegam = (omega1 + omega2) / 2.
f = 1. / (1 + np.exp(4 * np.log(3) * (omega - omegam) / (omega1 - omega2)))
return n - (n - n_inf) * f
def expansion_parameter(X, breakdown):
X = np.atleast_2d(X)
return np.squeeze((X[:, 0] + mass_pion) / breakdown)
def expansion_parameter_phillips(breakdown, factor=1):
return np.sqrt(mass_pion * factor / breakdown)
def expansion_parameter_cm(X, breakdown, mass, factor=1.):
X = np.atleast_2d(X)
omega_lab, _ = X.T
omega_cm = omega_cm_from_lab(omega_lab, mass=mass)
num = omega_cm + mass_pion
num = num * factor
# num = (omega_cm + mass_pion) / 2
return np.squeeze(np.sqrt(num / breakdown))
def expansion_parameter_momentum_transfer_cm(X, breakdown, mass, include_correction=False):
X = np.atleast_2d(X)
omega_lab, cos0_lab = X.T
cos0_lab = np.cos(np.deg2rad(cos0_lab))
omega_cm = omega_cm_from_lab(omega_lab, mass=mass)
cos0_cm = cos0_cm_from_lab(omega_lab, mass, cos0_lab)
# q = momentum_transfer_cm(omega_cm, cos0_cm)
num = omega_cm + mass_pion
# num = (omega_cm + mass_pion) / 2
if include_correction:
# height = 200
# omega_width = 50
height = 150
omega_width = 150
cos0_width = 1
lorentz = height / (
((omega_lab - omega_lab_cusp) / omega_width) ** 2 + ((cos0_lab - 1) / cos0_width) ** 2 + 1
)
num += lorentz
from scipy.special import softmax, logsumexp
# num = softmax([q, omega_cm], axis=0)
# num = logsumexp([q, omega_cm], axis=0)
# num = (q + omega_cm) / 2.
# return np.squeeze(num / breakdown)
return np.squeeze(np.sqrt(num / breakdown))
def compute_expansion_summation_matrix(Q, first_omitted_order):
Q_mat = Q[:, None] * Q
Q_to_omitted = Q ** first_omitted_order
return Q_to_omitted[:, None] * Q_to_omitted / (1 - Q_mat)
def coefficients(y, ratio, ref=1, orders=None):
"""Returns the coefficients of a power series
Parameters
----------
y : array, shape = (n_samples, n_curves)
ratio : scalar or array, shape = (n_samples,)
ref : scalar or array, shape = (n_samples,)
orders : 1d array, optional
The orders at which y was computed. Defaults to 0, 1, ..., n_curves-1
Returns
-------
An (n_samples, n_curves) array of the extracted coefficients
"""
if y.ndim != 2:
raise ValueError('y must be 2d')
if orders is None:
orders = np.arange(y.shape[-1])
if orders.shape[-1] != y.shape[-1]:
raise ValueError('partials and orders must have the same length')
ref, ratio, orders = np.atleast_1d(ref, ratio, orders)
ref = ref[:, None]
ratio = ratio[:, None]
# Make coefficients
coeffs = np.diff(y, axis=-1) # Find differences
coeffs = np.insert(coeffs, 0, y[..., 0], axis=-1) # But keep leading term
coeffs = coeffs / (ref * ratio**orders) # Scale each order appropriately
return coeffs
def compute_idx_mat(n):
idx = np.arange(n)
idx_rows, idx_cols = np.broadcast_arrays(idx[:, None], idx)
idx_mat = np.dstack([idx_rows, idx_cols])
return idx_mat
def p_sq_grad_coeff_mat(n):
n_rows = int(n * (n + 1) / 2)
idx_mat = compute_idx_mat(n)
idx_vec = idx_mat[np.triu_indices(n)]
p_sq_grad = np.zeros((n_rows, n))
for i in range(n):
p_sq_grad[:, i] = np.sum(idx_vec == i, axis=1)
return p_sq_grad
def p_sq_grad_idx_mat(n):
idx_mat = compute_idx_mat(n)
idx1, idx2 = np.triu_indices(idx_mat.shape[0])
idx_mat_tri = idx_mat[idx1, idx2, :]
n_rows = int(n * (n + 1) / 2)
idx_mat = np.zeros((n_rows, n), dtype=int)
for i in range(n):
mask = np.any(idx_mat_tri == i, axis=1)
idx_mat[mask, i] = np.arange(np.sum(mask), dtype=int)
return idx_mat
def quadratic(x, A, b, c, flat=True):
R"""Computes a multivariate quadratic function.
Parameters
----------
x : array, shape = (p,)
The input variables
A : array, shape = (N, p(p+1)/2,)
The flattened quadratic coefficients
b : array, shape = (N, p)
The linear coefficients
c : array, shape = (N,)
The constant term
flat
Returns
-------
array, shape = (N,)
"""
if flat:
x = np.atleast_1d(x)
x_sq = x[:, None] * x
x_quad = x_sq[np.triu_indices_from(x_sq)]
quad = A @ x_quad
else:
quad = np.einsum('...ij,i,j', A, x, x)
return quad + b @ x + c
def grad_quadratic(x, A, b, c, flat=True):
R"""Computes the gradient of a multivariate quadratic function.
Parameters
----------
x : array, shape = (p,)
The input variables
A : array, shape = (N, p(p+1)/2)
The flattened quadratic coefficients
b : array, shape = (N, p)
The linear coefficients
c : array, shape = (N,)
The constant term
flat
Returns
-------
array, shape = (p, N)
"""
if flat:
x = np.atleast_1d(x)
n = len(x)
coeff_mat = p_sq_grad_coeff_mat(n)
idx_mat = p_sq_grad_idx_mat(n)
x_sq_grad = coeff_mat * x[idx_mat]
quad = A @ x_sq_grad
else:
A_trans = np.swapaxes(A, -1, -2)
quad = (A + A_trans) @ x
return (quad + b).T
def quad_ratio(x, An, bn, cn, Ad, bd, cd, flat=True):
R"""Computes the ratio of multivariate quadratic functions.
Parameters
----------
x : array, shape = (p,)
The input variables
An : array, shape = (N, p, p)
The quadratic coefficients of the numerator
bn : array, shape = (N, p)
The linear coefficients of the numerator
cn : array, shape = (N,)
The constant term of the numerator
Ad : array, shape = (N, p, p)
The quadratic coefficients of the denominator
bd : array, shape = (N, p)
The linear coefficients of the denominator
cd : array, shape = (N,)
The constant term of the denominator
flat
Returns
-------
array, shape = (N,)
"""
return quadratic(x, An, bn, cn, flat=flat) / quadratic(x, Ad, bd, cd, flat=flat)
def grad_quad_ratio(x, An, bn, cn, Ad, bd, cd, flat=True):
R"""Computes the gradient of the ratio of multivariate quadratic functions.
Parameters
----------
x : array, shape = (p,)
The input variables
An : array, shape = (N, p, p)
The quadratic coefficients of the numerator
bn : array, shape = (N, p)
The linear coefficients of the numerator
cn : array, shape = (N,)
The constant term of the numerator
Ad : array, shape = (N, p, p)
The quadratic coefficients of the denominator
bd : array, shape = (N, p)
The linear coefficients of the denominator
cd : array, shape = (N,)
The constant term of the denominator
flat
Returns
-------
array, shape = (p, N)
"""
fn = quadratic(x, An, bn, cn, flat=flat)
grad_fn = grad_quadratic(x, An, bn, cn, flat=flat)
fd = quadratic(x, Ad, bd, cd, flat=flat)
grad_fd = grad_quadratic(x, Ad, bd, cd, flat=flat)
return grad_fn / fd - fn / fd ** 2 * grad_fd
def create_linearized_matrices(x0, An, bn, cn, Ad, bd, cd, flat=True):
f0 = quad_ratio(x0, An, bn, cn, Ad, bd, cd, flat=flat)
grad_f0 = grad_quad_ratio(x0, An, bn, cn, Ad, bd, cd, flat=flat)
return f0 - x0 @ grad_f0, grad_f0.T
def posterior_precision_linear(X, cov_data, prec_p):
R"""Computes the posterior precision for parameters under a linear Gaussian model
X : np.ndarray, shape = (n_data, n_features)
The feature matrix
cov_data : np.ndarray, shape = (n_data, n_data)
The covariance matrix for the data
prec_p : np.ndarray, shape = (n_features, n_features)
The prior precision on the parameters
"""
return prec_p + X.T @ solve(cov_data, X)
def shannon_expected_utility(X, cov_data, prec_p):
R"""Computes the expected utility using the Shannon information, or the KL divergence
X : np.ndarray, shape = (n_data, n_features)
The feature matrix
cov_data : np.ndarray, shape = (n_data, n_data)
The covariance matrix for the data
prec_p : np.ndarray, shape = (n_features, n_features)
The prior precision on the parameters
"""
_, log_det = slogdet(prec_p + X.T @ solve(cov_data, X)) # The negative of log |V|
_, log_det_prec = slogdet(prec_p) # The negative of log |V_0|
return 0.5 * (log_det - log_det_prec)
# p = prec_p.shape[0]
# return 0.5 * (- p * log(2 * pi) - p + log_det)
def create_observable_set(df, cov_exp=0., p0_proton=None, cov_p_proton=None, p0_neutron=None,
cov_p_neutron=None, scale_dsg=True, p_transform=None, expts_info=None):
from compton | |
<reponame>INM-6/swan
"""
Created on Feb 23, 2018
@author: <NAME>
In this module you can find the :class:`pgWidget2d` which inherits
from :class:`src.mypgwidget.PyQtWidget2d`.
It is extended by a 2d plot and the plotting methods.
"""
# system imports
import numpy as np
import quantities as pq
from neo import SpikeTrain
from elephant.statistics import instantaneous_rate
from elephant.kernels import GaussianKernel
from pyqtgraph.Qt import QtWidgets, QtCore
# swan-specific imports
from swan.widgets.mypgwidget import PyQtWidget2d
from swan.gui.rate_profile_options_ui import RpOptionsUi
class PgWidgetRateProfile(PyQtWidget2d):
"""
A class with only one plot that shows simple 2d data.
"""
def __init__(self, *args, **kwargs):
"""
**Properties**
*_axes* (:class:`matplotlib.axes.Axes`):
The 2d plot for this widget.
"""
PyQtWidget2d.__init__(self, *args, **kwargs)
layers = ["individual", "pooled"]
self.toolbar.setup_radio_buttons(layers)
self.toolbar.doLayer.connect(self.trigger_refresh)
self.plot_item = self.pg_canvas.getPlotItem()
self.plot_item.enableAutoRange()
self.rate_profiles = []
self.datas = {}
self.time_pre = -1000
self.time_post = 1500
self.sampling_period = 5
self.kernel_width = 60.0
self.trigger_event = ""
self.border_correction_multiplier = 1
self.time_pre_min, self.time_pre_max = -5000.0, -1.0
self.time_post_min, self.time_post_max = 1.0, 5000.0
self.sampling_period_min, self.sampling_period_max = 1.0, 5000.0
self.kernel_width_min, self.kernel_width_max = 1.0, 500.0
self.events = {}
self.rate_profile_settings = RpOptionsUi(self)
self.show_grid()
def on_enter(self):
"""
This method is called if you press ENTER on one of the line
edit widgets.
Redraws everything.
"""
time_pre = self.rate_profile_settings.timePre.text()
time_post = self.rate_profile_settings.timePost.text()
sampling_period = self.rate_profile_settings.samplingPeriod.text()
kernel_width = self.rate_profile_settings.kernelWidth.text()
try:
time_pre = float(time_pre)
time_post = float(time_post)
sampling_period = float(sampling_period)
kernel_width = float(kernel_width)
if self.time_pre_max > time_pre > self.time_pre_min \
and self.time_post_min < time_post < self.time_post_max \
and self.sampling_period_min < sampling_period < self.sampling_period_max \
and self.kernel_width_min < kernel_width < self.kernel_width_max:
self.time_pre = time_pre
self.time_post = time_post
self.sampling_period = sampling_period
self.kernel_width = kernel_width
self.update_plot()
self.rate_profile_settings.errorLabel.setText("")
else:
self.rate_profile_settings.errorLabel.setText("Values outside acceptable limits!")
except Exception as e:
print(e)
self.rate_profile_settings.errorLabel.setText("Invalid inputs!")
def on_time_pre_changed(self, value):
"""
This method is called if you edit the T-Pre option.
Checks if the T-Pre is correct.
"""
time_pre = value
try:
time_pre = float(time_pre)
if self.time_pre_max > time_pre > self.time_pre_min:
self.time_pre = time_pre
self.rate_profile_settings.errorLabel.setText("")
else:
self.rate_profile_settings.errorLabel.setText("Value outside acceptable limits!")
except Exception as e:
print(e)
self.rate_profile_settings.errorLabel.setText("Invalid input!")
def on_time_post_changed(self, value):
"""
This method is called if you edit the T-Post option.
Checks if the T-Post is correct.
"""
t_post = value
try:
t_post = float(t_post)
if self.time_post_min < t_post < self.time_post_max:
self.time_post = t_post
self.rate_profile_settings.errorLabel.setText("")
else:
self.rate_profile_settings.errorLabel.setText("Value outside acceptable limits!")
except Exception as e:
print(e)
self.rate_profile_settings.errorLabel.setText("Invalid input!")
def on_sampling_period_changed(self, value):
"""
This method is called if you edit the sampling period option.
Checks if the sampling period is correct.
"""
sampling_period = value
try:
sampling_period = float(sampling_period)
if self.sampling_period_min < sampling_period < self.sampling_period_max:
self.sampling_period = sampling_period
self.rate_profile_settings.errorLabel.setText("")
else:
self.rate_profile_settings.errorLabel.setText("Value outside acceptable limits!")
except Exception as e:
print(e)
self.rate_profile_settings.errorLabel.setText("Invalid input!")
def on_kernel_width_changed(self, value):
"""
This method is called if you edit the kernel width option.
Checks if the sampling period is correct.
"""
kernel_width = value
try:
kernel_width = float(kernel_width)
if self.kernel_width_min < kernel_width < self.kernel_width_max:
self.kernel_width = kernel_width
self.rate_profile_settings.errorLabel.setText("")
else:
self.rate_profile_settings.errorLabel.setText("Value outside acceptable limits!")
except Exception as e:
print(e)
self.rate_profile_settings.errorLabel.setText("Invalid input!")
@staticmethod
def create_raster_psth(spiketrain, trigger, timerange, border_correction=0.0):
"""
It calculates a list of concatenated spikes around stimulus onset and offset, for a later PSTH analysis.
:param spiketrain: list with spike times
:param trigger: list with timings of the trigger
:param timerange: time range for the PSTHs
:param border_correction: time window around edges to use for border correction
:return: raster_trig
"""
if len(spiketrain) < 2:
# raise(ValueError, "The spiketrain contains fewer than 2 spikes.")
print("The spiketrain contains fewer than 2 spikes. Returning empty list.")
return []
border_correction = border_correction * pq.ms
spiketrain = spiketrain * pq.ms
# find period of activity (poa)
poa_start = spiketrain[0]
poa_stop = spiketrain[-1]
trig_unit = trigger[(trigger >= poa_start) & (trigger <= poa_stop)]
if len(trig_unit) < 1:
print('-- No neural activity during this block for this neuronID')
return []
# extract spike times around saccade and fixation
raster_trig = [] # spikes around trig
for i_trig, t_trig in enumerate(trig_unit):
mask_trig = (spiketrain >= (t_trig + timerange[0] * pq.ms - border_correction)) & \
(spiketrain <= (t_trig + timerange[1] * pq.ms + border_correction))
spikes = spiketrain[mask_trig] - t_trig
raster_trig.append(spikes.magnitude)
return raster_trig
def compute_psth_from_raster(self, array_raster_trig, timerange, minimum_spikes=10, border_correction=0.0):
"""
:param array_raster_trig:
:param timerange:
:param minimum_spikes:
:param border_correction:
:return:
"""
out = dict()
if len(array_raster_trig) < 1:
print('PSTH not computed due to lack of spikes.')
out["values"] = np.array([])
out["times"] = []
return out
raster_trig = np.sort(np.hstack(array_raster_trig))
if len(raster_trig) <= minimum_spikes:
print('PSTH not computed due to lack of spikes.')
out["values"] = np.array([])
out["times"] = []
return out
rate_estimate, psth_times = self.calc_rate_estimate(raster_trig, timerange,
sampling_period=self.sampling_period,
kernel_width=self.kernel_width,
border_correction=border_correction)
psth_trig = rate_estimate / float(len(array_raster_trig))
psth_trig = np.array(psth_trig)[:, 0]
return psth_times, psth_trig
@staticmethod
def calc_rate_estimate(spike_times, timerange, sampling_period,
kernel_width, border_correction):
"""
:param spike_times: array of spike times
:param timerange: tuple or list of times between which to calculate PSTH
:param sampling_period: sampling period for calculating PSTH
:param kernel_width: width of kernel (in ms) used for PSTH
:param border_correction: time window around edges to use to negate border effects in PSTH
:return rate_estimate: array containing the time scale of the rate estimate
:return kernel_width: array containing the values of the rate estimate
"""
t_start = timerange[0] - border_correction
t_stop = timerange[1] + border_correction
mask = (spike_times >= t_start) & (spike_times <= t_stop)
spiketrain = spike_times[mask].copy()
if len(spiketrain) < 2:
times = np.linspace(t_start, t_stop, int((t_stop - t_start) * 1000 / sampling_period),
endpoint=False)
rate = np.zeros_like(times)
return rate, times
spike_train = SpikeTrain(spiketrain * pq.ms, t_start=t_start * pq.ms, t_stop=t_stop * pq.ms)
rate_estimate = instantaneous_rate(spike_train, sampling_period=sampling_period * pq.ms,
kernel=GaussianKernel(kernel_width * pq.ms))
rate_estimate = rate_estimate.time_slice(t_start=timerange[0] * pq.ms, t_stop=timerange[1] * pq.ms)
return rate_estimate.rescale('Hz').magnitude, rate_estimate.times.rescale('s').magnitude
def compute_psth(self, spiketrain, trigger, timerange, minimum_spikes, border_correction):
"""
Calculates the peri-stimulus time histogram for the given spiketrain around the given event time stamps.
:param spiketrain: Array of spike times in units of milliseconds
:param trigger: Array of event timestamps in units of milliseconds
:param timerange: List or tuple containing tPre and tPost values with which to calculate the PSTH
:param minimum_spikes: Minimum spikes required in spiketrain (and around trigger) to calculate PSTH
:param border_correction: Time (in milliseconds) to be used to correct for edge effects in PSTH
:return psth_times: Array of times (in seconds)
:return psth_trig: Array of values corresponding to psth_times (in Herz)
"""
if len(spiketrain) < minimum_spikes:
# raise(ValueError, "The spiketrain contains fewer than 2 spikes.")
# print("The spiketrain contains fewer than 2 spikes. Returning empty list.")
return [], []
# choose saccades within the period of activity
trig_unit = trigger[(trigger >= spiketrain[0]) & (trigger <= spiketrain[-1])]
if len(trig_unit) < 1:
# print('-- No neural activity during this block for this neuronID')
return [], []
# extract spike times around saccade and fixation
raster_trig_list = [] # spikes around trig
for i_trig, t_trig in enumerate(trig_unit):
mask_trig = (spiketrain >= (t_trig + timerange[0] - border_correction)) & \
(spiketrain <= (t_trig + timerange[1] + border_correction))
spikes = spiketrain[mask_trig] - t_trig
raster_trig_list.append(spikes)
if len(raster_trig_list) < 1:
return [], []
raster_trig = np.sort(np.hstack(raster_trig_list))
if len(raster_trig) <= minimum_spikes:
return [], []
rate_estimate, psth_times = self.calc_rate_estimate(raster_trig, timerange,
sampling_period=self.sampling_period,
kernel_width=self.kernel_width,
border_correction=border_correction)
if rate_estimate.size == psth_times.size == 0:
return [], []
else:
psth_trig = rate_estimate / float(len(raster_trig_list))
psth_trig = np.array(psth_trig)[:, 0]
return psth_times, psth_trig
def plot_profile(self, x, y, color, unit_id, session, clickable=False):
"""
Plot the rate profile onto the plot widget and also append it to self._profiles.
:param x: x values of data to be plotted
:param y: y values of data to be plotted
:param color: color of the plotted line
:param unit_id: global unit id (row number)
:param session: session id (column number)
:param clickable: toggle response to left mouse-clicks.
:return:
"""
self.rate_profiles.append(self.make_plot(x=x, y=y, color=color,
unit_id=unit_id, session=session,
clickable=clickable))
def do_plot(self, vum, data):
"""
Plots data for every layer and every visible unit.
**Arguments**
*vum* (:class:`src.virtualunitmap.VirtualUnitMap`):
Is needed to get the unit indexes.
*data* (:class:`src.neodata.NeoData`):
Is needed to get the units.
*layers* (list of string):
The layers that are visible.
"""
self.datas = {}
if self.toolbar.activate_button.current_state:
layer = self.toolbar.get_checked_layers()[0]
active = vum.get_active()
self.events = data.get_events_dict()
self.populate_event_list()
for session in range(len(active)):
| |
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from typing import Optional, Tuple, Dict, Any
import numpy as np
from collections import defaultdict
from mdlearn.utils import PathLike
from mdlearn.nn.utils import Trainer
class LSTM(nn.Module):
"""LSTM model to predict the dynamics for a
time series of feature vectors."""
def __init__(
self,
input_size: int,
hidden_size: Optional[int] = None,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
):
"""
Parameters
----------
input_size: int
The number of expected features in the input :obj:`x`.
hidden_size: Optional[int], default=None
The number of features in the hidden state h. By default, the
:obj:`hidden_size` will be equal to the :obj:`input_size` in
order to propogate the dynamics.
num_layers: int, default=1
Number of recurrent layers. E.g., setting num_layers=2 would mean
stacking two LSTMs together to form a stacked LSTM, with the second
LSTM taking in outputs of the first LSTM and computing the final
results.
bias: bool, default=True
If False, then the layer does not use bias weights b_ih and b_hh.
Default: True
dropout: float, default=0.0
If non-zero, introduces a Dropout layer on the outputs of each
LSTM layer except the last layer, with dropout probability equal
to dropout.
bidirectional: bool, default=False
If True, becomes a bidirectional LSTM.
"""
super().__init__()
self.num_layers = num_layers
if hidden_size is None:
hidden_size = input_size
self.lstm = nn.LSTM(
input_size,
hidden_size,
num_layers,
bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
# Linear prediction head to map LSTM activation
# function outputs to the correct output range
self.head = nn.Linear(hidden_size, input_size)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
x : torch.Tensor
Tensor of shape BxNxD for B batches of N examples
by D feature dimensions.
Returns
-------
torch.Tensor
The predicted tensor of size (B, N, hidden_size).
"""
_, (h_n, _) = self.lstm(x) # output, (h_n, c_n)
# Handle bidirectional and num_layers
pred = h_n[self.num_layers - 1, ...]
pred = self.head(pred)
return pred
def mse_loss(
self, y_true: torch.Tensor, y_pred: torch.Tensor, reduction: str = "mean"
) -> torch.Tensor:
"""Compute the MSE loss between :obj:`y_true` and :obj:`y_pred`.
Parameters
----------
y_true : torch.Tensor
The true data.
y_pred : torch.Tensor
The prediction.
reduction : str, default="mean"
The reduction strategy for the F.mse_loss function.
Returns
-------
torch.Tensor
The MSE loss between :obj:`y_true` and :obj:`y_pred`.
"""
return F.mse_loss(y_true, y_pred, reduction=reduction)
class LSTMTrainer(Trainer):
"""Trainer class to fit an LSTM model to a time series of feature vectors."""
# TODO: Add example usage in documentation.
def __init__(
self,
input_size: int,
hidden_size: Optional[int] = None,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
window_size: int = 10,
horizon: int = 1,
seed: int = 42,
in_gpu_memory: bool = False,
num_data_workers: int = 0,
prefetch_factor: int = 2,
split_pct: float = 0.8,
split_method: str = "partition",
batch_size: int = 128,
shuffle: bool = True,
device: str = "cpu",
optimizer_name: str = "RMSprop",
optimizer_hparams: Dict[str, Any] = {"lr": 0.001, "weight_decay": 0.00001},
scheduler_name: Optional[str] = None,
scheduler_hparams: Dict[str, Any] = {},
epochs: int = 100,
verbose: bool = False,
clip_grad_max_norm: float = 10.0,
checkpoint_log_every: int = 10,
plot_log_every: int = 10,
plot_n_samples: int = 10000,
plot_method: Optional[str] = "TSNE",
train_subsample_pct: float = 1.0,
valid_subsample_pct: float = 1.0,
use_wandb: bool = False,
):
"""
Parameters
----------
input_size: int
The number of expected features in the input x.
hidden_size: Optional[int], default=None
The number of features in the hidden state h. By default, the
:obj:`hidden_size` will be equal to the :obj:`input_size` in
order to propogate the dynamics.
num_layers: int, default=1
Number of recurrent layers. E.g., setting num_layers=2 would mean
stacking two LSTMs together to form a stacked LSTM, with the second
LSTM taking in outputs of the first LSTM and computing the final
results.
bias: bool, default=True
If False, then the layer does not use bias weights b_ih and b_hh.
Default: True
dropout: float, default=0.0
If non-zero, introduces a Dropout layer on the outputs of each
LSTM layer except the last layer, with dropout probability equal
to dropout.
bidirectional: bool, default=False
If True, becomes a bidirectional LSTM.
window_size : int, default=10
Number of timesteps considered for prediction.
horizon : int, default=1
How many time steps to predict ahead.
seed : int, default=42
Random seed for torch, numpy, and random module.
in_gpu_memory : bool, default=False
If True, will pre-load the entire :obj:`data` array to GPU memory.
num_data_workers : int, default=0
How many subprocesses to use for data loading. 0 means that
the data will be loaded in the main process.
prefetch_factor : int, by default=2
Number of samples loaded in advance by each worker. 2 means there will be a
total of 2 * num_workers samples prefetched across all workers.
split_pct : float, default=0.8
Proportion of data set to use for training. The rest goes to validation.
split_method : str, default="random"
Method to split the data. For random split use "random", for a simple
partition, use "partition".
batch_size : int, default=128
Mini-batch size for training.
shuffle : bool, default=True
Whether to shuffle training data or not.
device : str, default="cpu"
Specify training hardware either :obj:`cpu` or :obj:`cuda` for GPU devices.
optimizer_name : str, default="RMSprop"
Name of the PyTorch optimizer to use. Matches PyTorch optimizer class name.
optimizer_hparams : Dict[str, Any], default={"lr": 0.001, "weight_decay": 0.00001}
Dictionary of hyperparameters to pass to the chosen PyTorch optimizer.
scheduler_name : Optional[str], default=None
Name of the PyTorch learning rate scheduler to use.
Matches PyTorch optimizer class name.
scheduler_hparams : Dict[str, Any], default={}
Dictionary of hyperparameters to pass to the chosen PyTorch learning rate scheduler.
epochs : int, default=100
Number of epochs to train for.
verbose : bool, default=False
If True, will print training and validation loss at each epoch.
clip_grad_max_norm : float, default=10.0
Max norm of the gradients for gradient clipping for more information
see: :obj:`torch.nn.utils.clip_grad_norm_` documentation.
checkpoint_log_every : int, default=10
Epoch interval to log a checkpoint file containing the model
weights, optimizer, and scheduler parameters.
plot_log_every : int, default=10
Epoch interval to log a visualization plot of the latent space.
plot_n_samples : int, default=10000
Number of validation samples to use for plotting.
plot_method : Optional[str], default="TSNE"
The method for visualizing the latent space or if visualization
should not be run, set :obj:`plot_method=None`. If using :obj:`"TSNE"`,
it will attempt to use the RAPIDS.ai GPU implementation and
will fallback to the sklearn CPU implementation if RAPIDS.ai
is unavailable.
train_subsample_pct : float, default=1.0
Percentage of training data to use during hyperparameter sweeps.
valid_subsample_pct : float, default=1.0
Percentage of validation data to use during hyperparameter sweeps.
use_wandb : bool, default=False
If True, will log results to wandb.
Raises
------
ValueError
:obj:`split_pct` should be between 0 and 1.
ValueError
:obj:`train_subsample_pct` should be between 0 and 1.
ValueError
:obj:`valid_subsample_pct` should be between 0 and 1.
ValueError
Specified :obj:`device` as :obj:`cuda`, but it is unavailable.
"""
super().__init__(
seed,
in_gpu_memory,
num_data_workers,
prefetch_factor,
split_pct,
split_method,
batch_size,
shuffle,
device,
epochs,
verbose,
clip_grad_max_norm,
checkpoint_log_every,
plot_log_every,
plot_n_samples,
plot_method,
train_subsample_pct,
valid_subsample_pct,
use_wandb,
)
self.window_size = window_size
self.horizon = horizon
self.optimizer_name = optimizer_name
self.optimizer_hparams = optimizer_hparams
self.scheduler_name = scheduler_name
self.scheduler_hparams = scheduler_hparams
from mdlearn.utils import get_torch_optimizer, get_torch_scheduler
# Set random seeds
self._set_seed()
self.model = LSTM(
input_size, hidden_size, num_layers, bias, dropout, bidirectional
).to(self.device)
if self.use_wandb:
import wandb
wandb.watch(self.model)
# Setup optimizer
self.optimizer = get_torch_optimizer(
self.optimizer_name, self.optimizer_hparams, self.model.parameters()
)
# Setup learning rate scheduler
self.scheduler = get_torch_scheduler(
self.scheduler_name, self.scheduler_hparams, self.optimizer
)
# Log the train and validation loss each epoch
self.loss_curve_ = {"train": [], "validation": []}
def fit(
self,
X: np.ndarray,
scalars: Dict[str, np.ndarray] = {},
output_path: PathLike = "./",
checkpoint: Optional[PathLike] = None,
):
"""Trains the LSTM on the input data :obj:`X`.
Parameters
----------
X : np.ndarray
Input features vectors of shape (N, D) where N is the number
of data examples, and D is the dimension of the feature vector.
scalars : Dict[str, np.ndarray], default={}
Dictionary of scalar arrays. For instance, the root mean squared
deviation (RMSD) for each feature vector | |
"""
DC2 Object Catalog Reader
"""
import os
import re
import warnings
import itertools
import shutil
import numpy as np
import pandas as pd
import yaml
from GCR import BaseGenericCatalog
from .dc2_dm_catalog import DC2DMTractCatalog
from .dc2_dm_catalog import convert_flux_to_mag, convert_flux_to_nanoJansky, convert_nanoJansky_to_mag, convert_flux_err_to_mag_err
__all__ = ['DC2ObjectCatalog', 'DC2ObjectParquetCatalog']
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
FILE_PATTERN = r'(?:merged|object)_tract_\d+\.hdf5$'
GROUP_PATTERN = r'(?:coadd|object)_\d+_\d\d$'
SCHEMA_FILENAME = 'schema.yaml'
META_PATH = os.path.join(FILE_DIR, 'catalog_configs/_dc2_object_meta.yaml')
def convert_dm_ref_zp_flux_to_mag(flux, dm_ref_zp=27):
"""Convert the listed DM coadd-reported flux values to AB mag
Eventually this function should be a no-op. But presently
The processing of Run 1.1, 1.2 to date (2019-02-17) have
calibrated flux values with respect to a reference ZP=27 mag
The reference catalog is on an AB system.
Re-check dm_ref_zp if calibration is updated.
Eventually we will get nJy from the final calibrated DRP processing.
"""
flux_nJ = convert_dm_ref_zp_flux_to_nanoJansky(flux, dm_ref_zp=dm_ref_zp)
mag_AB = convert_nanoJansky_to_mag(flux_nJ)
return mag_AB
def convert_dm_ref_zp_flux_to_nanoJansky(flux, dm_ref_zp=27):
"""Convert the listed DM coadd-reported flux values to nanoJansky.
Eventually this function should be a no-op. But presently
The processing of Run 1.1, 1.2 to date (2019-02-17) have
calibrated flux values with respect to a reference ZP=27 mag
The reference catalog is on an AB system.
Re-check dm_ref_zp if calibration is updated.
Eventually we will get nJy from the final calibrated DRP processing.
"""
AB_mag_zp_wrt_Jansky = 8.90 # Definition of AB
AB_mag_zp_wrt_nanoJansky = 2.5 * 9 + AB_mag_zp_wrt_Jansky # 9 is from nano=10**(-9)
calibrated_flux_to_nanoJansky = 10**((AB_mag_zp_wrt_nanoJansky - dm_ref_zp)/2.5)
return calibrated_flux_to_nanoJansky * flux
def create_basic_flag_mask(*flags):
"""Generate a mask for a set of flags
For each item the mask will be true if and only if all flags are false
Args:
*flags (ndarray): Variable number of arrays with booleans or equivalent
Returns:
The combined mask array
"""
out = np.ones(len(flags[0]), np.bool)
for flag in flags:
out &= (~flag)
return out
class TableWrapper():
"""Wrapper class for pandas HDF5 storer
Provides a unified API to access both fixed and table formats.
Takes a file_handle to the HDF5 file
An HDF group key
And a schema to specify dtypes and default values for missing columns.
"""
_default_values = {'i': -1, 'b': False, 'U': ''}
def __init__(self, file_handle, key, schema=None):
if not file_handle.is_open:
raise ValueError('file handle has been closed!')
self.storer = file_handle.get_storer(key)
self.is_table = self.storer.is_table
if not self.is_table and not self.storer.format_type == 'fixed':
raise ValueError('storer format type not supported!')
self._schema = {} if schema is None else dict(schema)
self._native_schema = None
self._len = None
self._cache = None
self._constant_arrays = dict()
@property
def native_schema(self):
"""Get the native schema from either 'fixed' or 'table' formatted HDF5 files."""
if self._native_schema is None:
self._native_schema = {}
if self.is_table:
for i in itertools.count():
try:
dtype = getattr(self.storer.table.attrs, 'values_block_{}_dtype'.format(i))
except AttributeError:
break
for col in getattr(self.storer.table.attrs, 'values_block_{}_kind'.format(i)):
self._native_schema[col] = {'dtype': dtype}
else:
for i in range(self.storer.nblocks):
dtype = getattr(self.storer.group, 'block{}_values'.format(i)).dtype.name
for col in getattr(self.storer.group, 'block{}_items'.format(i)):
self._native_schema[col.decode()] = {'dtype': dtype}
return self._native_schema
@property
def columns(self):
"""Get columns from either 'fixed' or 'table' formatted HDF5 files."""
return set(self.native_schema)
def __len__(self):
if self._len is None:
if self.is_table:
self._len = self.storer.nrows
else:
self._len = self.storer.group.axis1.nrows
return self._len
def __contains__(self, item):
return item in self.native_schema
def __getitem__(self, key):
"""Return the values of the column specified by 'key'
Uses cached values, if available.
"""
if self._cache is None:
self._cache = self.storer.read()
try:
return self._cache[key].values
except KeyError:
return self._get_constant_array(key)
get = __getitem__
@classmethod
def _get_default_value(cls, dtype, key=None): # pylint: disable=W0613
return cls._default_values.get(np.dtype(dtype).kind, np.nan)
def _get_constant_array(self, key):
"""
Get a constant array for a column; `key` should be the column name.
Find dtype and default value in `self._schema`.
If not found, default to np.float64 and np.nan.
"""
schema_this = self._schema.get(key, {})
dtype = schema_this.get('dtype', np.float64)
default = schema_this.get(
'default',
self._get_default_value(dtype, key)
)
return self._generate_constant_array(dtype=dtype, value=default)
def _generate_constant_array(self, dtype, value):
"""
Actually generate a constant array according to `dtype` and `value`
"""
dtype = np.dtype(dtype)
# here `key` is used to cache the constant array
# has nothing to do with column name
key = (dtype.str, value)
if key not in self._constant_arrays:
self._constant_arrays[key] = np.asarray(np.repeat(value, len(self)), dtype=dtype)
self._constant_arrays[key].setflags(write=False)
return self._constant_arrays[key]
def clear_cache(self):
"""
clear cached data
"""
self._native_schema = self._len = self._cache = None
self._constant_arrays.clear()
class ObjectTableWrapper(TableWrapper):
"""Same as TableWrapper but add tract and patch info"""
def __init__(self, file_handle, key, schema=None):
key_items = key.split('_')
self.tract = int(key_items[1])
self.patch = ','.join(key_items[2])
super(ObjectTableWrapper, self).__init__(file_handle, key, schema)
# Add the schema info for tract, path
# These values will be read by `get_constant_array`
self._schema['tract'] = {'dtype': 'int64', 'default': self.tract}
self._schema['patch'] = {'dtype': '<U', 'default': self.patch}
@classmethod
def _get_default_value(cls, dtype, key=None):
if np.dtype(dtype).kind == 'b' and key and (
key.endswith('_flag_bad') or key.endswith('_flag_noGoodPixels')):
return True
return super()._get_default_value(dtype, key)
@property
def tract_and_patch(self):
"""Return a dict of the tract and patch info."""
return {'tract': self.tract, 'patch': self.patch}
class DC2ObjectCatalog(BaseGenericCatalog):
r"""DC2 Object Catalog reader
Parameters
----------
base_dir (str): Directory of data files being served, required
filename_pattern (str): The optional regex pattern of served data files
groupname_pattern (str): The optional regex pattern of groups in data files
schema_filename (str): The optional location of the schema file
Relative to base_dir, unless specified as absolute path.
pixel_scale (float): scale to convert pixel to arcsec (default: 0.2)
use_cache (bool): Whether or not to cache read data in memory
is_dpdd (bool): Whether or not to the files are already in DPDD-format
Attributes
----------
base_dir (str): The directory of data files being served
available_tracts (list): Sorted list of available tracts
available_tracts_and_patches (list): Available tracts and patches as dict objects
Notes
-----
The initialization sets the version of the catalog based on the existence
of certain columns and sets a version accordingly.
This version setting should be improved and standardized as we work towardj
providing the version in the catalog files in the scripts in `DC2-production`.
"""
# pylint: disable=too-many-instance-attributes
_native_filter_quantities = {'tract', 'patch'}
def _subclass_init(self, **kwargs):
self.base_dir = kwargs['base_dir']
self._filename_re = re.compile(kwargs.get('filename_pattern', FILE_PATTERN))
self._groupname_re = re.compile(kwargs.get('groupname_pattern', GROUP_PATTERN))
_schema_filename = kwargs.get('schema_filename', SCHEMA_FILENAME)
# If _schema_filename is an absolute path, os.path.join will just return _schema_filename
self._schema_path = os.path.join(self.base_dir, _schema_filename)
self.pixel_scale = float(kwargs.get('pixel_scale', 0.2))
self.use_cache = bool(kwargs.get('use_cache', True))
if not os.path.isdir(self.base_dir):
raise ValueError('`base_dir` {} is not a valid directory'.format(self.base_dir))
self._schema = None
if self._schema_path and os.path.isfile(self._schema_path):
self._schema = self._generate_schema_from_yaml(self._schema_path)
self._file_handles = dict()
self._datasets = self._generate_datasets() # uses self._schema when available
if not self._datasets:
err_msg = 'No catalogs were found in `base_dir` {}'
raise RuntimeError(err_msg.format(self.base_dir))
if not self._schema:
warnings.warn('Falling back to reading all datafiles for column names')
self._schema = self._generate_schema_from_datafiles(self._datasets)
if kwargs.get('is_dpdd'):
self._quantity_modifiers = {col: None for col in self._schema}
bands = [col[0] for col in self._schema if len(col) == 5 and col.startswith('mag_')]
else:
# A slightly crude way of checking for version of schema to have modelfit mag
# A future improvement will be to explicitly store version information in the datasets
# and just rely on that versioning.
has_modelfit_mag = any(col.endswith('_modelfit_mag') for col in self._schema)
if any(col.endswith('_fluxSigma') for col in self._schema):
dm_schema_version = 1
elif any(col.endswith('_fluxErr') for col in self._schema):
dm_schema_version = 2
elif any(col == 'base_Blendedness_abs_instFlux' for col in self._schema):
dm_schema_version = 3
else:
dm_schema_version = 4
bands = [col[0] for col in self._schema if len(col) == 5 and col.endswith('_mag')]
self._quantity_modifiers = self._generate_modifiers(
self.pixel_scale, bands, has_modelfit_mag, dm_schema_version)
self._quantity_info_dict = self._generate_info_dict(META_PATH, bands)
self._len = None
def __del__(self):
self.close_all_file_handles()
@staticmethod
def _generate_modifiers(pixel_scale=0.2, bands='ugrizy',
has_modelfit_mag=True, dm_schema_version=4):
"""Creates a dictionary relating native and homogenized column names
Args:
pixel_scale (float): Scale of pixels in coadd images
bands (list): List of photometric bands as strings
has_modelfit_mag (bool): Whether or not pre-calculated model fit magnitudes are present
dm_schema_version (int): DM schema version (1, 2, 3, 4)
Returns:
A dictionary of the form {<homogenized name>: <native name>, ...}
"""
if dm_schema_version not in (1, 2, 3, 4):
raise ValueError('Only supports dm_schema_version == 1, 2, 3, 4')
FLUX = 'flux' if dm_schema_version <= 2 else 'instFlux'
ERR = 'Sigma' if dm_schema_version <= 1 else 'Err'
BLENDEDNESS_SUFFIX = '_%s' % FLUX if dm_schema_version <= 3 else ''
modifiers = {
'objectId': 'id',
'parentObjectId': 'parent',
'ra': (np.rad2deg, 'coord_ra'),
'dec': (np.rad2deg, 'coord_dec'),
'x': 'base_SdssCentroid_x',
'y': 'base_SdssCentroid_y',
'xErr': 'base_SdssCentroid_x{}'.format(ERR),
'yErr': 'base_SdssCentroid_y{}'.format(ERR),
'xy_flag': 'base_SdssCentroid_flag',
'psNdata': 'base_PsfFlux_area',
'extendedness': 'base_ClassificationExtendedness_value',
'blendedness': 'base_Blendedness_abs{}'.format(BLENDEDNESS_SUFFIX),
}
not_good_flags = (
| |
locations along the network. Default is ``False``.
routes : dict
See ``paths`` from ``spaghetti.Network.shortest_paths``.
Default is ``None``.
id_col : str
``geopandas.GeoDataFrame`` column name for IDs. Default is ``"id"``.
When extracting routes this creates an (origin, destination) tuple.
geom_col : str
``geopandas.GeoDataFrame`` column name for geometry. Default is
``"geometry"``.
Raises
------
KeyError
In order to extract a ``network.PointPattern`` it must already
be a part of the network object. This exception is raised
when a ``network.PointPattern`` is being extracted that does
not exist within the network object.
Returns
-------
points : geopandas.GeoDataFrame
Network point elements (either vertices or ``network.PointPattern``
points) as a ``geopandas.GeoDataFrame`` of ``shapely.geometry.Point``
objects with an ``"id"`` column and ``"geometry""`` column.
If the network object has a ``network_component_vertices`` attribute,
then component labels are also added in a column.
lines : geopandas.GeoDataFrame
Network arc elements as a ``geopandas.GeoDataFrame`` of
``shapely.geometry.LineString`` objects with an ``"id"``
column and ``"geometry"`` column. If the network object has
a ``network_component_labels`` attribute, then component labels
are also added in a column.
paths : geopandas.GeoDataFrame
Shortest path routes along network arc elements as a
``geopandas.GeoDataFrame`` of ``shapely.geometry.LineString``
objects with an ``"id"`` (see ``spaghetti.Network.shortest_paths()``)
column and ``"geometry"`` column.
Notes
-----
When both network vertices and arcs are desired, the variable
declaration must be in the order: <vertices>, <arcs>.
This function requires ``geopandas``.
See also
--------
geopandas.GeoDataFrame
Examples
--------
Instantiate a network object.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Extract the network elements (vertices and arcs) as
``geopandas.GeoDataFrame`` objects.
>>> vertices_df, arcs_df = spaghetti.element_as_gdf(
... ntw, vertices=True, arcs=True
... )
Examine the first vertex. It is a member of the component labeled ``0``.
>>> vertices_df.loc[0]
id 0
geometry POINT (728368.04762 877125.89535)
comp_label 0
Name: 0, dtype: object
Calculate the total length of the network.
>>> arcs_df.geometry.length.sum()
104414.09200823458
"""
# shortest path routes between observations
if routes:
paths = util._routes_as_gdf(routes, id_col, geom_col)
return paths
# need vertices place holder to create network segment LineStrings
# even if only network edges are desired.
vertices_for_arcs = False
if arcs and not vertices:
vertices_for_arcs = True
# vertices/nodes/points
if vertices or vertices_for_arcs or pp_name:
points = util._points_as_gdf(
net,
vertices,
vertices_for_arcs,
pp_name,
snapped,
id_col=id_col,
geom_col=geom_col,
)
# return points geodataframe if arcs not specified or
# if extracting `PointPattern` points
if not arcs or pp_name:
return points
# arcs
arcs = util._arcs_as_gdf(net, points, id_col=id_col, geom_col=geom_col)
if vertices_for_arcs:
return arcs
else:
return points, arcs
def regular_lattice(bounds, nh, nv=None, exterior=False):
"""Generate a regular lattice of line segments
(`libpysal.cg.Chain objects <https://pysal.org/libpysal/generated/libpysal.cg.Chain.html#libpysal.cg.Chain>`_).
Parameters
----------
bounds : {tuple, list}
Area bounds in the form - <minx,miny,maxx,maxy>.
nh : int
The number of internal horizontal lines of the lattice.
nv : int
The number of internal vertical lines of the lattice. Defaults to
``nh`` if left as None.
exterior : bool
Flag for including the outer bounding box segments. Default is False.
Returns
-------
lattice : list
The ``libpysal.cg.Chain`` objects forming a regular lattice.
Notes
-----
The ``nh`` and ``nv`` parameters do not include the external
line segments. For example, setting ``nh=3, nv=2, exterior=True``
will result in 5 horizontal line sets and 4 vertical line sets.
Examples
--------
Create a 5x5 regular lattice with an exterior
>>> import spaghetti
>>> lattice = spaghetti.regular_lattice((0,0,4,4), 3, exterior=True)
>>> lattice[0].vertices
[(0.0, 0.0), (1.0, 0.0)]
Create a 5x5 regular lattice without an exterior
>>> lattice = spaghetti.regular_lattice((0,0,5,5), 3, exterior=False)
>>> lattice[-1].vertices
[(3.75, 3.75), (3.75, 5.0)]
Create a 7x9 regular lattice with an exterior from the
bounds of ``streets.shp``.
>>> path = libpysal.examples.get_path("streets.shp")
>>> shp = libpysal.io.open(path)
>>> lattice = spaghetti.regular_lattice(shp.bbox, 5, nv=7, exterior=True)
>>> lattice[0].vertices
[(723414.3683108028, 875929.0396895551), (724286.1381211297, 875929.0396895551)]
"""
# check for bounds validity
if len(bounds) != 4:
bounds_len = len(bounds)
msg = "The 'bounds' parameter is %s elements " % bounds_len
msg += "but should be exactly 4 - <minx,miny,maxx,maxy>."
raise RuntimeError(msg)
# check for bounds validity
if not nv:
nv = nh
try:
nh, nv = int(nh), int(nv)
except TypeError:
nlines_types = type(nh), type(nv)
msg = "The 'nh' and 'nv' parameters (%s, %s) " % nlines_types
msg += "could not be converted to integers."
raise TypeError(msg)
# bounding box line lengths
len_h, len_v = bounds[2] - bounds[0], bounds[3] - bounds[1]
# horizontal and vertical increments
incr_h, incr_v = len_h / float(nh + 1), len_v / float(nv + 1)
# define the horizontal and vertical space
space_h = [incr_h * slot for slot in range(nv + 2)]
space_v = [incr_v * slot for slot in range(nh + 2)]
# create vertical and horizontal lines
lines_h = util.build_chains(space_h, space_v, exterior, bounds)
lines_v = util.build_chains(space_h, space_v, exterior, bounds, h=False)
# combine into one list
lattice = lines_h + lines_v
return lattice
class PointPattern:
"""A stub point pattern class used to store a point pattern.
Note from the original author of ``pysal.network``:
This class is monkey patched with network specific attributes when the
points are snapped to a network. In the future this class may be
replaced with a generic point pattern class.
Parameters
----------
in_data : {str, list, tuple, libpysal.cg.Point, geopandas.GeoDataFrame}
The input geographic data. Either (1) a path to a shapefile
(str); (2) an iterable containing ``libpysal.cg.Point``
objects; (3) a single ``libpysal.cg.Point``; or
(4) a ``geopandas.GeoDataFrame``.
idvariable : str
Field in the shapefile to use as an ID variable.
attribute : bool
A flag to indicate whether all attributes are tagged to this
class (``True``) or excluded (``False``). Default is ``False``.
Attributes
----------
points : dict
Keys are the point IDs (int). Values are the :math:`(x,y)`
coordinates (tuple).
npoints : int
The number of points.
obs_to_arc : dict
Keys are arc IDs (tuple). Values are snapped point information
(``dict``). Within the snapped point information (``dict``)
keys are observation IDs (``int``), and values are snapped
coordinates.
obs_to_vertex : list
List of incident network vertices to snapped observation points
converted from a ``default_dict``. Originally in the form of
paired left/right nearest network vertices {netvtx1: obs_id1,
netvtx2: obs_id1, netvtx1: obs_id2... netvtx1: obs_idn}, then
simplified to a list in the form
[netvtx1, netvtx2, netvtx1, netvtx2, ...].
dist_to_vertex : dict
Keys are observations IDs (``int``). Values are distance lookup
(``dict``). Within distance lookup (``dict``) keys are the two
incident vertices of the arc and values are distance to each of
those arcs.
snapped_coordinates : dict
Keys are the point IDs (int). Values are the snapped :math:`(x,y)`
coordinates (tuple).
snap_dist : bool
Flag as ``True`` to include the distance from the original
location to the snapped location along the network. Default
is ``False``.
"""
def __init__(self, in_data=None, idvariable=None, attribute=False):
# initialize points dictionary and counter
self.points = {}
self.npoints = 0
# determine input point data type
in_dtype = str(type(in_data)).split("'")[1]
# flag for points from a shapefile
from_shp = False
# flag for points as libpysal.cg.Point objects
is_libpysal_points = False
supported_iterables = ["list", "tuple"]
# type error message
msg = "'%s' not supported for point pattern instantiation."
# set appropriate geometries
if in_dtype == "str":
from_shp = True
elif in_dtype in supported_iterables:
dtype = str(type(in_data[0])).split("'")[1]
if dtype == "libpysal.cg.shapes.Point":
is_libpysal_points = True
else:
raise TypeError(msg % dtype)
elif in_dtype == "libpysal.cg.shapes.Point":
in_data = [in_data]
is_libpysal_points = True
elif in_dtype == "geopandas.geodataframe.GeoDataFrame":
from_shp = False
else:
raise TypeError(msg % in_dtype)
# either set native point ID from dataset or create new IDs
if idvariable and not is_libpysal_points:
ids = weights.util.get_ids(in_data, idvariable)
else:
ids = None
# extract the point geometries
if not is_libpysal_points:
if from_shp:
pts = open(in_data)
else:
pts_objs = list(in_data.geometry)
pts = [cg.shapes.Point((p.x, p.y)) for p in pts_objs]
else:
pts = in_data
# fetch attributes if requested
if attribute and not is_libpysal_points:
# open the database file if data is from shapefile
if from_shp:
dbname = os.path.splitext(in_data)[0] + ".dbf"
db = open(dbname)
# if data is from a GeoDataFrame, drop the geometry column
# and declare attribute values as a list of lists
else:
db | |
yara
from cli import check_paths, validate_parsers, Parser
parser_entries = get_parser_entries()
parser_objs = {}
for parser_name, parser_details in parser_entries.items():
rule_source_paths = []
# if tags are present then get tag rule paths
if tags and 'tag' in parser_details['selector']:
rule_source_paths = parser_details['selector']['tag']
elif not tags and 'yara_rule' in parser_details['selector']:
rule_source_paths = parser_details['selector']['yara_rule']
if not check_paths(rule_source_paths):
continue
validated_parsers = validate_parsers(parser_details['parser'])
compiled_rules = []
for rule_source_path in rule_source_paths:
abs_path = os.path.join(ROOT_DIR, rule_source_path)
if tags:
rule = yara.compile(filepath=abs_path, externals=tags)
else:
rule = yara.compile(filepath=abs_path)
compiled_rules.append(rule)
parser_objs[parser_name] = Parser(
name=parser_name,
parser_list=validated_parsers,
compiled_rules=compiled_rules,
classification=parser_details['classification'],
malware=parser_details['malware'],
malware_types=parser_details['malware_type'],
mitre_group=parser_details['mitre_group'],
mitre_att=parser_details['mitre_att'],
category=parser_details['category'],
run_on=parser_details['run_on']
)
return parser_objs
def get_tags():
from assemblyline.odm.models.tagging import Tagging
return {f'al_{x.replace(".", "_")}': "" for x in Tagging.flat_fields().keys()}
def get_new_tags():
request_task_tags = {"a": "b"}
tags = {f"al_{k.replace('.', '_')}": i for k, i in request_task_tags.items()}
newtags = {}
# yara externals must be dicts w key value pairs being strings
for k, v in tags.items():
key = f"al_{k.replace('.', '_')}"
for i in range(len(v)):
if not isinstance(v[i], str):
v[i] = str(v[i])
value = " | ".join(v)
newtags[key] = value
return newtags
class TestCLI:
@staticmethod
@pytest.mark.parametrize("parser_list",
get_validate_parser_inputs()
)
def test_validate_parsers(parser_list):
from cli import validate_parsers
mwcp_key = "MWCP"
incorrect_key = "incorrect"
correct_parser_set = set()
incorrect_parser_set = set()
for parser in parser_list:
if mwcp_key in parser:
correct_parser_set.update(parser[mwcp_key])
else:
incorrect_parser_set.update(parser[incorrect_key])
correct_parser_list = list(correct_parser_set)
incorrect_parser_list = list(incorrect_parser_set)
if correct_parser_list:
test_parser_list = validate_parsers(parser_list)
assert test_parser_list == correct_parser_list
if incorrect_parser_list:
with pytest.raises(NameError):
validate_parsers(parser_list)
@staticmethod
@pytest.mark.parametrize("paths",
[
[],
[""],
["fake_path"],
['./tag_rules/emotet.rule']
]
)
def test_check_paths(paths):
from cli import check_paths
if not paths:
assert not check_paths(paths)
for path in paths:
abs_file_path = os.path.join(ROOT_DIR, path)
if not path:
with pytest.raises(Exception):
check_paths(paths)
if not os.path.isfile(abs_file_path):
with pytest.raises(Exception):
check_paths(paths)
@staticmethod
@pytest.mark.parametrize("tags",
[
{},
get_tags()
]
)
def test_initialize_parser_objs(tags):
from cli import initialize_parser_objs
correct_parser_objs = create_correct_parser_objs(tags)
test_parser_objs = initialize_parser_objs(tags)
assert test_parser_objs.keys() == correct_parser_objs.keys()
for key in correct_parser_objs.keys():
assert test_parser_objs[key] == correct_parser_objs[key]
@staticmethod
def test_validate_parser_config():
from cli import validate_parser_config, MWCP_PARSER_PATHS, MWCP_PARSER_CONFIG_PATH, MWCP_PARSERS_DIR_PATH
import yaml
import filecmp
# correct_parser_config_validation()
yaml_parsers = {}
# find name of parser class
for parser in MWCP_PARSER_PATHS:
file = open(parser, "r")
for line in file:
if line.partition("class ")[2].partition("(Parser):")[0]:
parser_class = line.partition("class ")[2].partition("(Parser):")[0]
entry = {
"description": f"{parser.stem} Parser",
"author": "Not Found",
"parsers": [f".{parser_class}"]
}
yaml_parsers[parser.stem] = entry
file.close()
parsers_in_config = []
# check that all parsers in dir are present in mwcp config
test_mwcp_parser_config = os.path.join(MWCP_PARSERS_DIR_PATH, "test_parser_config.yaml")
with open(test_mwcp_parser_config, "w+", encoding='utf-8') as f:
for entry, value in yaml_parsers.items():
parsers_in_config.append(entry)
p = {entry: value}
yaml.dump(p, f)
f.close()
if not os.path.exists(test_mwcp_parser_config):
assert False
validate_parser_config()
assert filecmp.cmp(test_mwcp_parser_config, MWCP_PARSER_CONFIG_PATH, shallow=False)
os.remove(test_mwcp_parser_config)
with pytest.raises(Exception):
parsers_in_config.append('apythonfileinmwcp_parsers')
assert filecmp.cmp(test_mwcp_parser_config, MWCP_PARSER_CONFIG_PATH, shallow=False)
@staticmethod
@pytest.mark.parametrize("f_path",
yield_sample_file_paths()
)
def test_run(f_path, parsers):
# TODO: need way to simulate actual malware so that parsers get matched
from cli import run
import mwcp
correct_outputs = {}
correct_reports = []
correct_file_parsers = parsers[0]
for parser in correct_file_parsers:
correct_report = mwcp.run(parser, file_path=f_path)
if correct_report.metadata:
correct_outputs[parser] = correct_report.metadata
correct_reports.append(correct_report)
test_outputs, test_reports = run(correct_file_parsers, f_path)
assert test_outputs == correct_outputs
@staticmethod
@pytest.mark.parametrize("parsers",
[
set(),
{"item"}
]
)
def test_check_names(parsers):
from cli import MWCP_PARSER_PATHS, check_names
mwcp_parsers = set()
for file in MWCP_PARSER_PATHS:
mwcp_parsers.add(file.stem)
diff = parsers - mwcp_parsers
if diff:
with pytest.raises(Exception):
check_names(parsers)
@staticmethod
@pytest.mark.parametrize("file_path",
yield_sample_file_paths()
)
def test_deduplicate(file_path, parsers):
# TODO: this method needs a lot of work, specifically we need file paths for samples that would hit
from cli import deduplicate, validate_parsers, check_names
correct_parser_entries = get_parser_entries()
correct_file_parsers, correct_tag_parsers = parsers
super_parser_list = []
and_malware = {}
for correct_parser_key, correct_parser_value in correct_parser_entries.items():
correct_parser_selector = correct_parser_value['selector']
if 'wildcard' in correct_parser_selector:
wildcard_parsers = validate_parsers(correct_parser_value['parser'])
super_parser_list.extend(wildcard_parsers)
if 'AND' in correct_parser_value['run_on']: # everything else is OR by default
if 'tag' in correct_parser_selector and 'yara_rule' in correct_parser_selector:
# then match must exist for some parser for both tag and file
malware_name = correct_parser_value['malware']
and_malware[malware_name] = correct_parser_key
else:
raise Exception("AND cannot be specified without both tag and file yara rules")
# for malware, top in and_malware.items():
# file_rules = correct_file_parsers[top].compiled_rules
# tag_rules = correct_tag_parsers[top].compiled_rules
# TODO: figure out how to simulate all_rules_match since we can't access it here
# file_bool = all_rules_match(file_rules)
# tag_bool = all_rules_match(tag_rules)
# if file_bool and tag_bool:
# print("both file and tag rules have match")
# else:
# print('tag or file rule did not match, excluding...')
# malware_to_parsers = correct_file_parsers[top].parser_list
# super_parser_list = [x for x in super_parser_list if x not in malware_to_parsers]
super_parser_list = [i[0].upper() + i[1:] for i in super_parser_list]
super_parser_list_set = set(super_parser_list)
check_names(super_parser_list_set)
correct_super_parser_set_list = list(super_parser_list_set)
newtags = get_new_tags()
test_super_parser_set_list = deduplicate(correct_file_parsers, correct_tag_parsers, file_path, newtags)
assert test_super_parser_set_list == correct_super_parser_set_list
@staticmethod
@pytest.mark.parametrize("tags",
[get_tags(), None]
)
def test_compile(tags):
from cli import compile
correct_parser_objs = create_correct_parser_objs()
correct_parser_objs_tags = None
if tags:
correct_parser_objs_tags = create_correct_parser_objs(tags)
test_parser_objs, test_parser_objs_tags = compile(tags)
assert test_parser_objs == correct_parser_objs
assert test_parser_objs_tags == correct_parser_objs_tags
@staticmethod
def test_register():
from cli import register
correct_report = get_report()
test_report = register()
assert test_report.as_dict() == correct_report.as_dict()
@staticmethod
@pytest.mark.parametrize("data",
[
{"val": "no_backslashes"},
{"val": "\\backslashes"},
{"val": ".period"},
{"val": "localhost"},
{"val": "localhost*"},
]
)
def test_check_for_backslashes(data):
from cli import check_for_backslashes
ta_key = "val"
mwcp_key = "address"
val = data[ta_key]
correct_report = get_report()
IGNORE_FIELD_LIST = ['localhost', 'localhost*']
if '\\' in val:
correct_report.add_metadata(mwcp_key, val)
elif '.' not in val and val not in IGNORE_FIELD_LIST:
correct_report.add_metadata(mwcp_key, val)
test_report = get_report()
check_for_backslashes(ta_key, mwcp_key, data, test_report)
assert test_report.as_dict() == correct_report.as_dict()
@staticmethod
@pytest.mark.parametrize("output,scriptname,mwcp_key",
[
({}, 'unrecom', None),
({}, 'notunrecom', None),
({
"Process Injection": "a",
"Injection": "b",
"Inject Exe": "c"
}, "notunrecom", "injectionprocess"),
({
"Screen Rec Link": "a",
"WebPanel": "b",
"Plugins": "c"
}, "notunrecom", "url"),
({
"Install Dir": "a",
"InstallDir": "b",
"InstallPath": "c",
"Install Folder": "d",
"Install Folder1": "e",
"Install Folder2": "f",
"Install Folder3": "g",
"Folder Name": "h",
"FolderName": "i",
"pluginfoldername": "j",
"nombreCarpeta": "k",
}, "notunrecom", "directory"),
({
"InstallName": "a",
"Install Name": "b",
"Exe Name": "c",
"Jar Name": "d",
"JarName": "e",
"StartUp Name": "f",
"File Name": "g",
"USB Name": "h",
"Log File": "i",
"Install File Name": "j",
}, "notunrecom", "filename"),
({
"Campaign ID": "a",
"CampaignID": "b",
"Campaign Name": "c",
"Campaign": "d",
"ID": "e",
"prefijo": "f",
}, "notunrecom", "missionid"),
({
"Version": "a",
"version": "b",
}, "notunrecom", "version"),
({
"FTP Interval": "a",
"Remote Delay": "b",
"RetryInterval": "c"
}, "unrecom", "interval"),
({
"EncryptionKey": "a",
}, "unrecom", "key"),
({
"Mutex": "a",
"mutex": "b",
"Mutex Main": "c",
"Mutex 4": "d",
"MUTEX": "e",
"Mutex Grabber": "f",
"Mutex Per": "g"
}, "unrecom", "mutex"),
({
'Reg Key': 'a',
'StartupName': 'a',
'Active X Key': 'a',
'ActiveX Key': 'a',
'Active X Startup': 'a',
'Registry Key': 'a',
'Startup Key': 'a',
'REG Key HKLM': 'a',
'REG Key HKCU': 'a',
'HKLM Value': 'a',
'RegistryKey': 'a',
'HKCUKey': 'a',
'HKCU Key': 'a',
'Registry Value': 'a',
'keyClase': 'a',
'regname': 'a',
'registryname': 'a',
'Custom Reg Key': 'a',
'Custom Reg Name': 'a',
'Custom Reg Value': 'a',
'HKCU': 'a',
'HKLM': 'a',
'RegKey1': 'a',
'RegKey2': 'a',
'Reg Value': 'a'
}, "unrecom", "registrypath"),
]
)
def test_ta_mapping(output, scriptname, mwcp_key):
from cli import ta_mapping, register
correct_report = add_metadata(output, mwcp_key)
test_report = register()
ta_mapping(output, scriptname)
assert check_reporter_equality(test_report, correct_report)
@staticmethod
@pytest.mark.parametrize("output,keys_of_interest",
[
({}, []),
({"a": "b"}, ["a"]),
({"a": "b"}, ["b"]),
]
)
def test_refine_data(output, keys_of_interest):
from cli import refine_data
correct_data = {val: output[val] for val in keys_of_interest if val in output}
test_data = refine_data(output, keys_of_interest)
assert correct_data == test_data
@staticmethod
@pytest.mark.parametrize("data, mwcp_key",
[
({}, None),
({"address": "b"}, "address")
]
)
def test_map_fields(data, mwcp_key):
from cli import map_fields, register
correct_report = add_metadata(data, mwcp_key)
test_report = register()
map_fields(data, mwcp_key)
assert test_report.as_dict() == correct_report.as_dict()
@staticmethod
@pytest.mark.parametrize("data",
[
{},
{"FTP UserName": "a", "FTP Password": "b"},
{"FTPUserName": "a", "FTPPassword": "b"},
{"FTPUSER": "a", "FTPPASS": "b"},
{"FTPPASS": "a"},
{"FTPUSER": "a"},
{"Password": "a"},
{"password": "a"}
]
)
def test_map_username_password_fields(data):
from cli import map_username_password_fields, USERNAME_LIST, PASSWORD_LIST, PASSWORD_ONLY_LIST, register
correct_report = get_report()
for username, password in zip(USERNAME_LIST, PASSWORD_LIST):
if username in data and password in data:
correct_report.add(metadata.Credential([data[username], data[password]]))
elif password in data:
correct_report.add(metadata.Password(data[password]))
elif username in data:
correct_report.add(metadata.Username(data[username]))
only_password_data = {val: data[val] for val in PASSWORD_ONLY_LIST if val in data}
correct_report = add_metadata(only_password_data, "password", correct_report)
test_report = register()
map_username_password_fields(data)
assert test_report.as_dict() == correct_report.as_dict()
@staticmethod
@pytest.mark.parametrize("scriptname,data",
[
("NotIgnored", {"Install Path": "a", "Install Name": "b"}),
("NotIgnored", {"Install Path": "a"}),
("NotIgnored", {"Install Name": "a"}),
| |
import numpy
import pytest
import cupy
from cupy import testing
from cupy import cuda
class TestJoin:
@testing.for_all_dtypes(name='dtype1')
@testing.for_all_dtypes(name='dtype2')
@testing.numpy_cupy_array_equal()
def test_column_stack(self, xp, dtype1, dtype2):
a = testing.shaped_arange((4, 3), xp, dtype1)
b = testing.shaped_arange((4,), xp, dtype2)
c = testing.shaped_arange((4, 2), xp, dtype1)
return xp.column_stack((a, b, c))
def test_column_stack_wrong_ndim1(self):
a = cupy.zeros(())
b = cupy.zeros((3,))
with pytest.raises(ValueError):
cupy.column_stack((a, b))
def test_column_stack_wrong_ndim2(self):
a = cupy.zeros((3, 2, 3))
b = cupy.zeros((3, 2))
with pytest.raises(ValueError):
cupy.column_stack((a, b))
def test_column_stack_wrong_shape(self):
a = cupy.zeros((3, 2))
b = cupy.zeros((4, 3))
with pytest.raises(ValueError):
cupy.column_stack((a, b))
@testing.for_all_dtypes(name='dtype')
@testing.numpy_cupy_array_equal()
def test_concatenate1(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = testing.shaped_reverse_arange((2, 3, 2), xp, dtype)
c = testing.shaped_arange((2, 3, 3), xp, dtype)
return xp.concatenate((a, b, c), axis=2)
@testing.for_all_dtypes(name='dtype')
@testing.numpy_cupy_array_equal()
def test_concatenate2(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = testing.shaped_reverse_arange((2, 3, 2), xp, dtype)
c = testing.shaped_arange((2, 3, 3), xp, dtype)
return xp.concatenate((a, b, c), axis=-1)
@testing.for_all_dtypes(name='dtype')
@testing.numpy_cupy_array_equal()
def test_concatenate_axis_none(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
b = testing.shaped_reverse_arange((3, 5, 2), xp, dtype)
c = testing.shaped_arange((7, ), xp, dtype)
return xp.concatenate((a, b, c), axis=None)
@testing.for_all_dtypes(name='dtype')
@testing.numpy_cupy_array_equal()
def test_concatenate_large_2(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = testing.shaped_reverse_arange((2, 3, 2), xp, dtype)
c = testing.shaped_arange((2, 3, 3), xp, dtype)
d = testing.shaped_arange((2, 3, 5), xp, dtype)
e = testing.shaped_arange((2, 3, 2), xp, dtype)
return xp.concatenate((a, b, c, d, e) * 2, axis=-1)
@testing.for_all_dtypes(name='dtype')
@testing.numpy_cupy_array_equal()
def test_concatenate_large_3(self, xp, dtype):
a = testing.shaped_arange((2, 3, 1), xp, dtype)
b = testing.shaped_reverse_arange((2, 3, 1), xp, dtype)
return xp.concatenate((a, b) * 10, axis=-1)
@testing.for_all_dtypes(name='dtype')
@testing.numpy_cupy_array_equal()
def test_concatenate_large_4(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = testing.shaped_reverse_arange((2, 3, 4), xp, dtype)
return xp.concatenate((a, b) * 10, axis=-1)
@testing.for_all_dtypes(name='dtype')
@testing.numpy_cupy_array_equal()
def test_concatenate_large_5(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = testing.shaped_reverse_arange((2, 3, 4), xp, 'i')
return xp.concatenate((a, b) * 10, axis=-1)
@testing.multi_gpu(2)
def test_concatenate_large_different_devices(self):
arrs = []
for i in range(10):
with cuda.Device(i % 2):
arrs.append(cupy.empty((2, 3, 4)))
with pytest.raises(ValueError):
cupy.concatenate(arrs)
@testing.for_all_dtypes(name='dtype')
@testing.numpy_cupy_array_equal()
def test_concatenate_f_contiguous(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = testing.shaped_arange((2, 3, 2), xp, dtype).T
c = testing.shaped_arange((2, 3, 3), xp, dtype)
return xp.concatenate((a, b, c), axis=-1)
@testing.for_all_dtypes(name='dtype')
@testing.numpy_cupy_array_equal()
def test_concatenate_large_f_contiguous(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = testing.shaped_arange((2, 3, 2), xp, dtype).T
c = testing.shaped_arange((2, 3, 3), xp, dtype)
d = testing.shaped_arange((2, 3, 2), xp, dtype).T
e = testing.shaped_arange((2, 3, 2), xp, dtype)
return xp.concatenate((a, b, c, d, e) * 2, axis=-1)
@testing.numpy_cupy_array_equal()
def test_concatenate_many_multi_dtype(self, xp):
a = testing.shaped_arange((2, 1), xp, 'i')
b = testing.shaped_arange((2, 1), xp, 'f')
return xp.concatenate((a, b) * 1024, axis=1)
@testing.slow
def test_concatenate_32bit_boundary(self):
a = cupy.zeros((2 ** 30,), dtype=cupy.int8)
b = cupy.zeros((2 ** 30,), dtype=cupy.int8)
ret = cupy.concatenate([a, b])
del a
del b
del ret
# Free huge memory for slow test
cupy.get_default_memory_pool().free_all_blocks()
def test_concatenate_wrong_ndim(self):
a = cupy.empty((2, 3))
b = cupy.empty((2,))
with pytest.raises(ValueError):
cupy.concatenate((a, b))
def test_concatenate_wrong_shape(self):
a = cupy.empty((2, 3, 4))
b = cupy.empty((3, 3, 4))
c = cupy.empty((4, 4, 4))
with pytest.raises(ValueError):
cupy.concatenate((a, b, c))
@testing.for_all_dtypes(name='dtype')
@testing.numpy_cupy_array_equal()
def test_concatenate_out(self, xp, dtype):
a = testing.shaped_arange((3, 4), xp, dtype)
b = testing.shaped_reverse_arange((3, 4), xp, dtype)
c = testing.shaped_arange((3, 4), xp, dtype)
out = xp.zeros((3, 12), dtype=dtype)
xp.concatenate((a, b, c), axis=1, out=out)
return out
@testing.numpy_cupy_array_equal()
def test_concatenate_out_same_kind(self, xp):
a = testing.shaped_arange((3, 4), xp, xp.float64)
b = testing.shaped_reverse_arange((3, 4), xp, xp.float64)
c = testing.shaped_arange((3, 4), xp, xp.float64)
out = xp.zeros((3, 12), dtype=xp.float32)
xp.concatenate((a, b, c), axis=1, out=out)
return out
def test_concatenate_out_invalid_shape(self):
for xp in (numpy, cupy):
a = testing.shaped_arange((3, 4), xp, xp.float64)
b = testing.shaped_reverse_arange((3, 4), xp, xp.float64)
c = testing.shaped_arange((3, 4), xp, xp.float64)
out = xp.zeros((4, 10), dtype=xp.float64)
with pytest.raises(ValueError):
xp.concatenate((a, b, c), axis=1, out=out)
def test_concatenate_out_invalid_shape_2(self):
for xp in (numpy, cupy):
a = testing.shaped_arange((3, 4), xp, xp.float64)
b = testing.shaped_reverse_arange((3, 4), xp, xp.float64)
c = testing.shaped_arange((3, 4), xp, xp.float64)
out = xp.zeros((2, 2, 10), dtype=xp.float64)
with pytest.raises(ValueError):
xp.concatenate((a, b, c), axis=1, out=out)
def test_concatenate_out_invalid_dtype(self):
for xp in (numpy, cupy):
a = testing.shaped_arange((3, 4), xp, xp.float64)
b = testing.shaped_reverse_arange((3, 4), xp, xp.float64)
c = testing.shaped_arange((3, 4), xp, xp.float64)
out = xp.zeros((3, 12), dtype=xp.int64)
with pytest.raises(TypeError):
xp.concatenate((a, b, c), axis=1, out=out)
@testing.for_all_dtypes_combination(names=['dtype1', 'dtype2'])
@testing.numpy_cupy_array_equal()
def test_concatenate_different_dtype(self, xp, dtype1, dtype2):
a = testing.shaped_arange((3, 4), xp, dtype1)
b = testing.shaped_arange((3, 4), xp, dtype2)
return xp.concatenate((a, b))
@testing.for_all_dtypes_combination(names=['dtype1', 'dtype2'])
@testing.numpy_cupy_array_equal(accept_error=TypeError)
def test_concatenate_out_different_dtype(self, xp, dtype1, dtype2):
a = testing.shaped_arange((3, 4), xp, dtype1)
b = testing.shaped_arange((3, 4), xp, dtype1)
out = xp.zeros((6, 4), dtype=dtype2)
return xp.concatenate((a, b), out=out)
@testing.with_requires('numpy>=1.20.0')
@testing.for_all_dtypes_combination(names=['dtype1', 'dtype2'])
@testing.numpy_cupy_array_equal(accept_error=TypeError)
def test_concatenate_dtype(self, xp, dtype1, dtype2):
a = testing.shaped_arange((3, 4), xp, dtype1)
b = testing.shaped_arange((3, 4), xp, dtype1)
return xp.concatenate((a, b), dtype=dtype2)
@testing.with_requires('numpy>=1.20.0')
def test_concatenate_dtype_invalid_out(self):
for xp in (numpy, cupy):
a = testing.shaped_arange((3, 4), xp, xp.float64)
b = testing.shaped_arange((3, 4), xp, xp.float64)
out = xp.zeros((6, 4), dtype=xp.int64)
with pytest.raises(TypeError):
xp.concatenate((a, b), out=out, dtype=xp.int64)
@testing.with_requires('numpy>=1.20.0')
@pytest.mark.parametrize('casting', [
'no',
'equiv',
'safe',
'same_kind',
'unsafe',
])
@testing.for_all_dtypes_combination(names=['dtype1', 'dtype2'])
@testing.numpy_cupy_array_equal(accept_error=TypeError)
def test_concatenate_casting(self, xp, dtype1, dtype2, casting):
a = testing.shaped_arange((3, 4), xp, dtype1)
b = testing.shaped_arange((3, 4), xp, dtype1)
# may raise TypeError
return xp.concatenate((a, b), dtype=dtype2)
@testing.numpy_cupy_array_equal()
def test_dstack(self, xp):
a = testing.shaped_arange((1, 3, 2), xp)
b = testing.shaped_arange((3,), xp)
c = testing.shaped_arange((1, 3), xp)
return xp.dstack((a, b, c))
@testing.numpy_cupy_array_equal()
def test_dstack_single_element(self, xp):
a = testing.shaped_arange((1, 2, 3), xp)
return xp.dstack((a,))
@testing.numpy_cupy_array_equal()
def test_dstack_single_element_2(self, xp):
a = testing.shaped_arange((1, 2), xp)
return xp.dstack((a,))
@testing.numpy_cupy_array_equal()
def test_dstack_single_element_3(self, xp):
a = testing.shaped_arange((1,), xp)
return xp.dstack((a,))
@testing.numpy_cupy_array_equal()
def test_hstack_vectors(self, xp):
a = xp.arange(3)
b = xp.arange(2, -1, -1)
return xp.hstack((a, b))
@testing.numpy_cupy_array_equal()
def test_hstack_scalars(self, xp):
a = testing.shaped_arange((), xp)
b = testing.shaped_arange((), xp)
c = testing.shaped_arange((), xp)
return xp.hstack((a, b, c))
@testing.numpy_cupy_array_equal()
def test_hstack(self, xp):
a = testing.shaped_arange((2, 1), xp)
b = testing.shaped_arange((2, 2), xp)
c = testing.shaped_arange((2, 3), xp)
return xp.hstack((a, b, c))
@testing.numpy_cupy_array_equal()
def test_vstack_vectors(self, xp):
a = xp.arange(3)
b = xp.arange(2, -1, -1)
return xp.vstack((a, b))
@testing.numpy_cupy_array_equal()
def test_vstack_single_element(self, xp):
a = xp.arange(3)
return xp.vstack((a,))
def test_vstack_wrong_ndim(self):
a = cupy.empty((3,))
b = cupy.empty((3, 1))
with pytest.raises(ValueError):
cupy.vstack((a, b))
@testing.numpy_cupy_array_equal()
def test_stack(self, xp):
a = testing.shaped_arange((2, 3), xp)
b = testing.shaped_arange((2, 3), xp)
c = testing.shaped_arange((2, 3), xp)
return xp.stack((a, b, c))
def test_stack_value(self):
a = testing.shaped_arange((2, 3), cupy)
b = testing.shaped_arange((2, 3), cupy)
c = testing.shaped_arange((2, 3), cupy)
s = cupy.stack((a, b, c))
assert s.shape == (3, 2, 3)
cupy.testing.assert_array_equal(s[0], a)
cupy.testing.assert_array_equal(s[1], b)
cupy.testing.assert_array_equal(s[2], c)
@testing.numpy_cupy_array_equal()
def test_stack_with_axis1(self, xp):
a = testing.shaped_arange((2, 3), xp)
return xp.stack((a, a), axis=1)
@testing.numpy_cupy_array_equal()
def test_stack_with_axis2(self, xp):
a = testing.shaped_arange((2, 3), xp)
return xp.stack((a, a), axis=2)
def test_stack_with_axis_over(self):
for xp in (numpy, cupy):
a = testing.shaped_arange((2, 3), xp)
with pytest.raises(ValueError):
xp.stack((a, a), axis=3)
def test_stack_with_axis_value(self):
a = testing.shaped_arange((2, 3), cupy)
s = cupy.stack((a, a), axis=1)
assert s.shape == (2, 2, 3)
cupy.testing.assert_array_equal(s[:, 0, :], a)
cupy.testing.assert_array_equal(s[:, 1, :], a)
@testing.numpy_cupy_array_equal()
def test_stack_with_negative_axis(self, xp):
a = testing.shaped_arange((2, 3), xp)
return xp.stack((a, a), axis=-1)
def test_stack_with_negative_axis_value(self):
a = testing.shaped_arange((2, 3), cupy)
s = cupy.stack((a, a), axis=-1)
assert s.shape == (2, 3, 2)
cupy.testing.assert_array_equal(s[:, :, 0], a)
cupy.testing.assert_array_equal(s[:, :, 1], a)
def test_stack_different_shape(self):
for xp in (numpy, cupy):
a = testing.shaped_arange((2, 3), xp)
b = testing.shaped_arange((2, 4), xp)
with pytest.raises(ValueError):
xp.stack([a, b])
def test_stack_out_of_bounds1(self):
for xp in (numpy, cupy):
a = testing.shaped_arange((2, 3), xp)
with pytest.raises(ValueError):
xp.stack([a, a], axis=3)
def test_stack_out_of_bounds2(self):
a = testing.shaped_arange((2, 3), cupy)
with pytest.raises(numpy.AxisError):
return cupy.stack([a, a], axis=3)
@testing.for_all_dtypes(name='dtype')
@testing.numpy_cupy_array_equal()
def test_stack_out(self, xp, dtype):
a = testing.shaped_arange((3, 4), xp, dtype)
b = testing.shaped_reverse_arange((3, 4), xp, dtype)
c = testing.shaped_arange((3, 4), xp, dtype)
out = xp.zeros((3, 3, 4), dtype=dtype)
xp.stack((a, b, c), axis=1, out=out)
return out
@testing.numpy_cupy_array_equal()
def test_stack_out_same_kind(self, xp):
a = testing.shaped_arange((3, 4), xp, xp.float64)
b = testing.shaped_reverse_arange((3, 4), xp, xp.float64)
c = testing.shaped_arange((3, 4), xp, xp.float64)
out = xp.zeros((3, 3, 4), dtype=xp.float32)
xp.stack((a, b, c), axis=1, out=out)
return out
def test_stack_out_invalid_shape(self):
for xp in (numpy, cupy):
a = testing.shaped_arange((3, 4), xp, xp.float64)
b = testing.shaped_reverse_arange((3, 4), xp, | |
<gh_stars>0
#!/usr/bin/ebv python
# coding=utf-8
"""
Author = <NAME>
License = MIT
Version = 1.0.1
Email = <EMAIL>
Status = Development
"""
import os
import sys
#import logging
import unittest
sys.path.insert(0, os.path.abspath(".."))
from pyredemet.src.pyredemet import pyredemet
class test_pyredemet(unittest.TestCase):
def test_init(self):
api_key = "banana"
with self.assertRaises(TypeError):
result = pyredemet(api_key=api_key)
server_url = 123
with self.assertRaises(TypeError):
result = pyredemet(api_key='<KEY>', server_url=server_url)
def setUp(self):
self.redemet = pyredemet(api_key = '<KEY>')
def test_get_aerodromos(self):
#API destina à retornar informações de Aeródromos de países disponíveis no banco de dados da REDEMET.
#https://api-redemet.decea.gov.br/aerodromos
#Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# pais Não Nome do país. Brasil Argentina
#Exemplo de Solicitação
# https://api-redemet.decea.gov.br/aerodromos?api_key=SUA_CHAVE_AQUI&pais=Argentina
pais = 123
with self.assertRaises(TypeError):
result = self.redemet.get_aerodromos(pais=pais)
def test_get_aerodromos_status(self):
# GET aerodromos/status
# API destina à retornar status das localidades em cores.
# As cores são obtidas através de avaliação de parâmetros baseados em visibilidade e teto da localidade, conforme tabela abaixo.
# Valor Visibilidade(m) Condição Teto(ft)
# g >= 5000 e >= 1500
# y < 5000 e >= 1500 e/ou < 1500 e > 500
# r < 1500 e/ou < 600
# Endereço de Acesso
# https://api-redemet.decea.gov.br/aerodromos/status
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# pais Sim País que se deseja as informações de status.
# Para obter informações de mais de um país, basta informar separados por vígula. BRASIL BRASIL,ARGENTINA
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/aerodromos/status?api_key=SUA_CHAVE_AQUI
pais = 123
with self.assertRaises(TypeError):
result = self.redemet.get_aerodromos_status(pais=pais)
def test_get_aerodromos_info(self):
# API destina à retornar informações das condições meteorológicas de uma localidade disponível no banco de dados da REDEMET.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/aerodromos/info
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# localidade Sim Indicativo de localidade ICAO. Não há SBBR
# metar Não METAR codificado da localidade. sim sim
# taf Não TAF codificado da localidade nao sim
# datahora Não Data no formato (YYYYMMDDHH) Data e hora atual 2019010100
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/aerodromos/info?api_key=SUA_CHAVE_AQUI&localidade=SBBR&datahora=2019010100
localidade = 123
with self.assertRaises(TypeError):
result = self.redemet.get_aerodromos_info(localidade=localidade)
localidade = 'SBB'
with self.assertRaises(TypeError):
result = self.redemet.get_aerodromos_info(localidade=localidade)
localidade = 'SBBR'
metar = 123
with self.assertRaises(TypeError):
result = self.redemet.get_aerodromos_info(localidade=localidade,metar=metar)
taf = 123
with self.assertRaises(TypeError):
result = self.redemet.get_aerodromos_info(localidade=localidade,taf=taf)
datahora = 123
with self.assertRaises(TypeError):
result = self.redemet.get_aerodromos_info(localidade=localidade,datahora=datahora)
datahora = '123'
with self.assertRaises(TypeError):
result = self.redemet.get_aerodromos_info(localidade=localidade,datahora=datahora)
def test_get_produtos_amdar(self):
# GET produtos/amdar
# API destina à retornar informações do AMDAR
# Endereço de Acesso
# https://api-redemet.decea.gov.br/produtos/amdar
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# data Não Data no formato YYYYMMDDHH Data atual 2020051200
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/produtos/amdar?api_key=SUA_CHAVE_AQUI&data_ini=2020030313&data=2020032415
data = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_amdar(data=data)
data = 'SBB'
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_amdar(data=data)
def test_get_produtos_modelo(self):
# GET produtos/modelo
# API destina à retornar informações de imagens geradas pela modelagem numérica disponíveis na REDEMET.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/produtos/modelo
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# modelo Sim wifs Não há wifs
# area Sim x Não há b1
# produto Sim x Não há cb_top
# nivel Sim x Não há 600
# anima Não x Não há 5
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/produtos/modelo?api_key=SUA_CHAVE_AQUI&modelo=wifs&area=b1&produto=vento-altitude-barb&nivel=600&anima=2
modelo = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo)
modelo = 'wifs'
produto = 'incldturb'
nivel = '850'
area = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo,area=area,produto=produto,nivel=nivel)
area = 'ab'
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo,area=area,produto=produto,nivel=nivel)
modelo = 'wifs'
nivel = '850'
area = 'as'
produto = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo,area=area,produto=produto,nivel=nivel)
produto = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo,area=area,produto=produto,nivel=nivel)
modelo = 'wifs'
produto = 'incldturb'
area = 'as'
nivel = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo,area=area,produto=produto,nivel=nivel)
nivel = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo,area=area,produto=produto,nivel=nivel)
modelo = 'wifs'
produto = 'incldturb'
nivel = '850'
area = 'as'
anima = '123'
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo,area=area,produto=produto,nivel=nivel,anima=anima)
anima = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produtos_modelo(modelo=modelo,area=area,produto=produto,nivel=nivel,anima=anima)
def test_get_produto_radar(self):
# GET produtos/radar
# API destina à retornar imagens de eco de Radar Meteorológico.
# Há disponibilidade de METAR desde 01/01/2006 até a presente data.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/produtos/radar/{tipo}
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# tipo Sim Tipo de eco disponíveis
# maxccapi 07km
# data Não Data no formato YYYYMMDDHH Data atual 2020031211
# area Sim Radares disponíveis
# Não há pv
# anima Não Informe a quantidade de ecos de radar que deseja animar.
# A animação tem como referência a opção data como última imagem.
# O valor máximo permitido para a animação é 15. 1 10
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/produtos/radar/maxcappi?api_key=SUA_CHAVE_AQUI&data=2020032410
area='al'
tipo = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_radar(tipo=tipo,area=area)
tipo = 'ab'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_radar(tipo=tipo,area=area)
tipo='maxcappi'
area = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_radar(tipo=tipo,area=area)
area = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_radar(tipo=tipo,area=area)
area='al'
tipo='maxcappi'
data = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_radar(tipo=tipo,area=area,data=data)
data = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_radar(tipo=tipo,area=area,data=data)
area='al'
tipo='maxcappi'
anima = '123'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_radar(tipo=tipo,area=area,anima=anima)
anima = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_radar(tipo=tipo,area=area,anima=anima)
def test_get_produto_satelite(self):
# GET produtos/satelite
# API destina à retornar informações de imagens de satélite disponíveis na REDEMET.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/produtos/satelite/{tipo}
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# tipo Sim Tipos disponíveis
# Não há realcada
# data Não Data no formato YYYYMMDDHH Data atual 2020051200
# anima Não Informe a quantidade de imagens que deseja animar.
# A animação tem como referência a opção data como última imagem.
# O valor máximo permitido para a animação é 15. 1 10
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/produtos/satelite/realcada?api_key=SUA_CHAVE_AQUI&data=2020032114
tipo = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_satelite(tipo=tipo)
tipo = 'ab'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_satelite(tipo=tipo)
tipo='realcada'
data = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_satelite(tipo=tipo,data=data)
data = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_satelite(tipo=tipo,data=data)
tipo='realcada'
anima = '123'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_satelite(tipo=tipo,anima=anima)
anima = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_satelite(tipo=tipo,anima=anima)
def test_get_produto_stsc(self):
# GET produtos/stsc
# API destina à retornar mensagens as informação de ocorrência de trovoada.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/produtos/stsc
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# data Não Data no formato YYYYMMDDHH Data atual 2020051200
# anima Não Informe a quantidade de ocorrências que deseja animar.
# A animação tem como referência a opção data como última imagem.
# O valor máximo permitido para a animação é 60. 1 10
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/produtos/satelite/stsc?api_key=SUA_CHAVE_AQUI&data=2020032114
data = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_stsc(data=data)
data = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_stsc(data=data)
tipo='realcada'
anima = '123'
with self.assertRaises(TypeError):
result = self.redemet.get_produto_stsc(anima=anima)
anima = 123
with self.assertRaises(TypeError):
result = self.redemet.get_produto_stsc(anima=anima)
def test_get_mensagens_aviso(self):
# GET mensagens/aviso
# API destina à retornar mensagens Aviso de Aeródromo das localidades disponíveis no banco de dados da REDEMET.
# Há disponibilidade de mensagens desde 01/01/2003 até a presente data.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/mensagens/aviso/{localidades}
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# localidades Sim Indicativo de localidade ICAO.
# Quando precisar informar mais de uma localidade, basta informar separado por vírgula sem intervalo. Não há SBBR
# data_ini Não Data no formato YYYYMMDDHH Data atual 2020051200
# data_fim Não Data no formato YYYYMMDDHH Data atual 2020051206
# page_tam Não Número de registros por página 150 100
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/mensagens/aviso/SBBG?api_key=SUA_CHAVE_AQUI&data_ini=2020030313&data_fim=2020030313
localidades = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_aviso(localidades=localidades)
localidades = 'SBBR, SBCF'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_aviso(localidades=localidades)
localidades = 'SBBR,SBCF'
data_ini = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_aviso(localidades=localidades,data_ini=data_ini)
data_ini = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_aviso(localidades=localidades,data_ini=data_ini)
data_fim = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_aviso(localidades=localidades,data_fim=data_fim)
data_fim = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_aviso(localidades=localidades,data_fim=data_fim)
def test_get_mensagens_gamet(self):
# GET mensagens/gamet
# API destina à retornar mensagens GAMET dos países disponíveis no banco de dados da REDEMET.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/mensagens/gamet
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# pais Sim Nome do País Brasil Argentina
# data_ini Não Data no formato YYYYMMDDHHII Data atual 202005120000
# data_fim Não Data no formato YYYYMMDDHHII Data atual 202005120600
# page_tam Não Número de registros por página 150 100
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/mensagens/gamet/?api_key=SUA_CHAVE_AQUI&data_ini=202006120300&data_fim=202006120300
pais = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_gamet(pais=pais)
pais = 'Brasil'
data_ini = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_gamet(pais=pais,data_ini=data_ini)
data_ini = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_gamet(pais=pais,data_ini=data_ini)
data_fim = 123
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_gamet(pais=pais,data_fim=data_fim)
data_fim = 'abc'
with self.assertRaises(TypeError):
result = self.redemet.get_mensagens_gamet(pais=pais,data_fim=data_fim)
def test_get_mensagens_metar(self):
# GET mensagens/metar
# API destina à retornar mensagens METAR das localidades disponíveis no banco de dados da REDEMET.
# Há disponibilidade de mensagens desde 01/01/2003 até a presente data.
# Endereço de Acesso
# https://api-redemet.decea.gov.br/mensagens/metar/{localidades}
# Parâmetros
# Nome Requerido Descrição Valor Padrão Exemplo
# localidades Sim Indicativo de localidade ICAO.
# Quando precisar informar mais de uma localidade, basta informar separado por vírgula sem intervalo. Não há SBBR
# data_ini Não Data no formato YYYYMMDDHH Data atual 2020051200
# data_fim Não Data no formato YYYYMMDDHH Data atual 2020051206
# page_tam Não Número de registros por página 150 100
# Exemplo de Solicitação
# https://api-redemet.decea.gov.br/mensagens/metar/SBGL,SBBR?api_key=SUA_CHAVE_AQUI&data_ini=2019010100&data_fim=2019010101
localidades = 123
with self.assertRaises(TypeError):
| |
<filename>idfx/dao/mysql.py
# -*- coding: UTF-8 -*-
__author__ = "d01"
__copyright__ = "Copyright (C) 2015-21, <NAME>"
__license__ = "All rights reserved"
__version__ = "0.3.0"
__date__ = "2021-05-06"
# Created: 2015-03-13 12:34
import datetime
import uuid
from typing import Optional, Dict, Tuple, Any
from flotils import Loadable, StartStopable, StartException
import pymysql
from ..errors import DAOException, AlreadyExistsException, ValueException
from ..model import Manga, User
class SqlConnector(Loadable, StartStopable):
""" Connect to mysql database """
def __init__(self, settings: Optional[Dict[str, Any]] = None):
if settings is None:
settings = {}
super().__init__(settings)
self._server = settings['server']
self._default_db = settings['database']
self._user = settings['user']
self._pw = settings['password']
self.connection: Optional[pymysql.Connection] = None
""" Current connection """
self.cursor: Optional[pymysql.cursors.Cursor] = None
""" Current cursor """
self._dbs: Dict[str, Tuple[pymysql.Connection, pymysql.cursors.Cursor]] = {}
""" """
self.database: Optional[str] = None
""" Current database """
def _connect(self, db=None, encoding="utf8"):
"""
Connect to database
:param db: Database to connect to
:type db: str | unicode
:param encoding: Database encoding
:type db: str | unicode
:rtype: None
:raises DAOException: Failed to connect
"""
if not db:
db = self._default_db
try:
con = pymysql.connect(
host=self._server,
user=self._user,
passwd=<PASSWORD>,
db=db,
charset=encoding
)
cur = con.cursor()
cur.execute("SET time_zone= '+00:00'")
except Exception as e:
self.exception("Failed to connect")
raise DAOException(e)
self._dbs[db] = (con, cur)
self.database = db
self.connection = con
self.cursor = cur
def _close(self, db=None):
"""
Commit and close database connection
:param db: Database to connect to (default: None)
None means close all
:type db: None | str | unicode
:rtype: None
"""
if db:
dbs = [db]
else:
dbs = list(self._dbs.keys())
for db in dbs:
con, cur = self._dbs[db]
try:
if cur:
cur.close()
if con:
con.commit()
con.close()
except Exception:
self.exception("Failed to close")
finally:
del self._dbs[db]
if db == self.database:
self.connection = None
self.cursor = None
self.database = None
def switch_database(self, db):
"""
Switch to database
:param db: Database to switch to
:type db: str | unicode
:rtype: None
:raises DAOException: Failed to connect
"""
if db is None:
db = self._default_db
if db == self.database:
return
if db in self._dbs:
# Update old values
self._dbs[self.database] = (self.connection, self.cursor)
# Get current
self.connection, self.cursor = self._dbs[db]
self.database = db
return
else:
self._connect(db)
def execute(self, cmd, args=None, db=None):
"""
Execute a sql command
:param cmd: Sql command to execute
:type cmd: str | unicode
:param args: Arguments to add (default: None)
:type args: None | tuple[str | unicode]
:param db: Database to execute on (default: None)
None means on current
:type db: str | unicode | None
:return: result of .execute()
:raises DAOException: Failed to connect
"""
if db is not None and db != self.database:
self.switch_database(db)
try:
if args:
return self.cursor.execute(cmd, args)
else:
return self.cursor.execute(cmd)
except Exception as e:
raise DAOException(e)
def fetchall(self, db=None):
"""
Get all rows for database
:param db: Database to get from (default: None)
None means current
:type db: None | str | unicode
:return: Result of .fetchall()
:rtype: collections.iterable
:raises DAOException: Failed to connect
"""
if db is not None and db != self.database:
self.switch_database(db)
try:
return self.cursor.fetchall()
except Exception as e:
raise DAOException(e)
def commit(self, db=None):
if db is not None and db != self.database:
self.switch_database(db)
try:
return self.connection.commit()
except Exception as e:
raise DAOException(e)
def rollback(self, db=None):
if db is not None and db != self.database:
self.switch_database(db)
try:
return self.connection.rollback()
except Exception as e:
raise DAOException(e)
def change_encoding(self, charset, db=None):
self.execute(
"""ALTER DATABASE python CHARACTER SET '%s'""",
args=charset,
db=db
)
return self.fetchall()
def get_encodings(self, db=None):
self.execute("""SHOW variables LIKE '%character_set%'""", db=db)
return self.fetchall()
def setup(self):
if not self.connection:
self._connect()
query = "CREATE FUNCTION IF NOT EXISTS UuidToBin(_uuid BINARY(36))" \
" RETURNS BINARY(16)" \
" LANGUAGE SQL DETERMINISTIC CONTAINS SQL " \
"SQL SECURITY INVOKER" \
" RETURN" \
" UNHEX(CONCAT(" \
" SUBSTR(_uuid, 15, 4)," \
" SUBSTR(_uuid, 10, 4)," \
" SUBSTR(_uuid, 1, 8)," \
" SUBSTR(_uuid, 20, 4)," \
" SUBSTR(_uuid, 25)" \
" ));" \
"\n\n" \
"CREATE FUNCTION IF NOT EXISTS UuidFromBin(_bin BINARY(16))" \
" RETURNS BINARY(36)" \
" LANGUAGE SQL DETERMINISTIC CONTAINS SQL " \
"SQL SECURITY INVOKER" \
" RETURN" \
" LCASE(CONCAT_WS('-'," \
" HEX(SUBSTR(_bin, 5, 4))," \
" HEX(SUBSTR(_bin, 3, 2))," \
" HEX(SUBSTR(_bin, 1, 2))," \
" HEX(SUBSTR(_bin, 9, 2))," \
" HEX(SUBSTR(_bin, 11))" \
" ));"
try:
res = self.execute(query)
if res:
self.info("Created functions")
except Exception:
self.exception("Failed to create functions")
raise DAOException('Functions failed')
query = "CREATE TABLE IF NOT EXISTS mangas (" \
" uuid BINARY(16) NOT NULL UNIQUE PRIMARY KEY," \
" created DATETIME NOT NULL," \
" updated DATETIME NOT NULL," \
" name VARCHAR(255) NOT NULL UNIQUE," \
" latest_chapter DECIMAL(8,2) " \
") DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci;"
try:
res = self.execute(query)
if res:
self.info("Created table mangas")
except Exception:
self.exception("Failed to create table mangas")
raise DAOException('Table create failed')
query = "CREATE TABLE IF NOT EXISTS users (" \
" uuid BINARY(16) NOT NULL UNIQUE PRIMARY KEY," \
" created DATETIME NOT NULL," \
" updated DATETIME NOT NULL," \
" firstname VARCHAR(255) NOT NULL," \
" lastname VARCHAR(255) NOT NULL," \
" role INTEGER NOT NULL," \
" CONSTRAINT users_name_uq UNIQUE (lastname, firstname)" \
") DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci;"
try:
res = self.execute(query)
if res:
self.info("Created table users")
except Exception:
self.exception("Failed to create table users")
raise DAOException('Table create failed')
query = "CREATE TABLE IF NOT EXISTS mangas_read (" \
" user_uuid BINARY(16) NOT NULL," \
" manga_uuid BINARY(16) NOT NULL," \
" created DATETIME NOT NULL," \
" updated DATETIME NOT NULL," \
" chapter DECIMAL(8,2)," \
" CONSTRAINT mangas_read_pk PRIMARY KEY (user_uuid, manga_uuid)," \
" CONSTRAINT mangas_read_user_fk FOREIGN KEY (user_uuid)" \
" REFERENCES users (uuid) ON DELETE CASCADE," \
" CONSTRAINT mangas_read_manga_fk FOREIGN KEY (manga_uuid)" \
" REFERENCES mangas (uuid) ON DELETE CASCADE," \
" CONSTRAINT mangas_read_pk_uq UNIQUE (user_uuid, manga_uuid)" \
") DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci;"
try:
res = self.execute(query)
if res:
self.info("Created table mangas_read")
except Exception:
self.exception("Failed to create table mangas_read")
raise DAOException('Table create failed')
def manga_create(self, manga):
"""
New manga
:param manga: Manga to create
:type manga: idefix.model.Manga
:return: Affected entries
:rtype: int
:raises DAOException: Failure
"""
if not manga:
raise ValueException("Invalid manga")
if not manga.created:
manga.created = datetime.datetime.utcnow()
if not manga.updated:
manga.updated = manga.created
if not manga.uuid:
manga.uuid = "{}".format(uuid.uuid4())
try:
affected = self.execute(
"INSERT INTO mangas ("
" uuid,created,updated,name,latest_chapter"
") VALUES "
"(UuidToBin(%s),%s,%s,%s,%s);",
(
manga.uuid, manga.created, manga.updated,
manga.name, manga.latest_chapter
)
)
# self.debug(affected)
return affected
except DAOException as e:
if e.args[0] and e.args[0].args and e.args[0].args[0] == 1062:
# Double entry
raise AlreadyExistsException(e.args[0].args[1])
raise
except Exception as e:
raise DAOException(e)
def manga_update(self, manga):
"""
Update manga
:param manga: Manga to update
:type manga: idefix.model.Manga
:return: Affected entries
:rtype: int
:raises DAOException: Failure
"""
if not manga or not manga.uuid:
raise ValueException("Invalid manga")
if not manga.updated:
manga.updated = datetime.datetime.utcnow()
query = "UPDATE mangas SET updated=%s"
args = (manga.updated,)
if manga.name is not None:
query += ",name=%s"
args += (manga.name,)
if manga.latest_chapter is not None:
query += ",latest_chapter=%s"
args += (manga.latest_chapter,)
try:
affected = self.execute(
query + " WHERE UuidFromBin(uuid)=%s",
args + (manga.uuid,)
)
# self.debug(affected)
return affected
except DAOException as e:
if e.args[0] and e.args[0].args and e.args[0].args[0] == 1062:
# Double entry
raise AlreadyExistsException(e.args[0].args[1])
raise
except Exception as e:
raise DAOException(e)
def manga_delete(self, manga):
"""
Delete manga
:param manga: Manga to delete
:type manga: idefix.model.Manga
:return: Affected entries
:rtype: int
:raises DAOException: Failure
"""
if not manga or not manga.uuid:
raise ValueException("Invalid manga")
try:
affected = self.execute(
"DELETE FROM mangas WHERE UuidFromBin(uuid)=%s",
(manga.uuid,)
)
# self.debug(affected)
return affected
except DAOException:
raise
except Exception as e:
raise DAOException(e)
def manga_get(self, manga=None, use_like=False):
"""
Get manga
:param manga: Manga to get or None for all (default: None)
:type manga: None | idefix.model.Manga
:param use_like: Use LIKE for comparison (default: False)
:type use_like: bool
:return: Found mangas
:rtype: list[idefix.model.Manga]
:raises DAOException: Failure
"""
args = ()
where_query = ""
where_query_parts = []
if manga:
if manga.name:
op = "LIKE" if use_like else "="
where_query_parts.append(("name", | |
26.5463136066661*m.x4615 + 41.2793113138707*m.x4616
+ 25.717262459619*m.x4617 + 25.4358707320878*m.x4618 + 34.0780323212046*m.x4619
+ 12.3258497900304*m.x4620 + 14.3091636014199*m.x4621 + 20.880064760763*m.x4622
+ 18.0234150979267*m.x4623 + 32.9894366470756*m.x4624 + 2.7817886900528*m.x4625
+ 28.0933612998658*m.x4626 + 21.7319079975375*m.x4627 + 28.1654558046908*m.x4628
+ 11.2376581167056*m.x4629 + 30.2016764192836*m.x4630 + 12.4587140791002*m.x4631
+ 16.4094947750435*m.x4632 + 36.4209171166347*m.x4633 + 30.2736128380662*m.x4634
+ 16.5954591592161*m.x4635 + 33.7886419278777*m.x4636 + 24.0266614980133*m.x4637
+ 1.54213941272831*m.x4638 + 37.2998004412711*m.x4639 + 26.5626574834034*m.x4640
+ 16.5516020659071*m.x4641 + 31.4539924535308*m.x4642 + 39.01721539587*m.x4643
+ 38.4709033860099*m.x4644 + 18.5014064809371*m.x4645 + 14.6257802674547*m.x4646
+ 34.7704565117559*m.x4647 + 13.9677647936879*m.x4648 + 12.0749862044544*m.x4649
+ 25.0800704184152*m.x4650 + 37.9396883960614*m.x4651 + 45.9645592801533*m.x4652
+ 14.5451293315056*m.x4653 + 22.0776425697342*m.x4654 + 33.2466275563415*m.x4655
+ 23.9612233282699*m.x4656 + 20.6312577664075*m.x4657 + 34.1571045220167*m.x4658
+ 41.0757011526234*m.x4659 + 9.85526313157658*m.x4660 + 34.2734675571881*m.x4661
+ 22.0682869159954*m.x4662 + 29.7719256822201*m.x4663 + 13.3422422692037*m.x4664
+ 6.37000528194919*m.x4665 + 27.1476721150532*m.x4666 + 29.6971443533569*m.x4667
+ 17.556599359965*m.x4668 + 11.4130858217178*m.x4669 + 12.0505475950928*m.x4670
+ 35.948747124281*m.x4671 + 47.2097105215568*m.x4672 + 46.4156134599402*m.x4673
+ 41.258619744484*m.x4674 + 54.7621825790403*m.x4675 + 4.22253813482922*m.x4676
+ 3.23657883381769*m.x4677 + 5.31019410774014*m.x4678 + 47.6063013185773*m.x4679
+ 19.192778172347*m.x4680 + 28.3307497157227*m.x4681 + 1.7002230319074*m.x4682
+ 38.347571659741*m.x4683 + 11.5109906531625*m.x4684 + 15.7698559492513*m.x4685
+ 20.754008833036*m.x4686 + 5.00364633581964*m.x4687 + 35.6135729673755*m.x4688
+ 46.8538380197498*m.x4689 + 28.4205511866999*m.x4690 + 26.7116300335783*m.x4691
+ 28.7873911788054*m.x4692 + 22.1596419375449*m.x4693 + 31.5864467092428*m.x4694
+ 41.9068366272822*m.x4695 + 12.3220185764819*m.x4696 + 34.3058271756574*m.x4697
+ 14.4220539903594*m.x4698 + 34.8228687201576*m.x4699 + 31.0581173573214*m.x4700
+ 41.8086559865787*m.x4701 + 49.9988816553996*m.x4702 + 47.8254722779987*m.x4703
+ 51.028092524752*m.x4704 + 37.2116728476779*m.x4705 + 31.4522327193091*m.x4706
+ 39.7580097903609*m.x4707 + 51.9493590367053*m.x4708 + 28.282977036587*m.x4709
+ 40.7599901211817*m.x4710 + 29.6145734574308*m.x4711 + 32.1793677346754*m.x4712
+ 41.7900717347275*m.x4713 + 51.2348268257942*m.x4714 + 54.4110336493849*m.x4715
+ 38.4175542604373*m.x4716 + 20.3788899970294*m.x4717 + 41.3813553537576*m.x4718
+ 19.9179709749297*m.x4719 + 31.0653661508735*m.x4720 + 19.4995921295542*m.x4721
+ 13.3818662828954*m.x4722 + 6.46550356975077*m.x4723 + 49.2257808964819*m.x4724
+ 34.3222997434917*m.x4725 + 42.5787488007407*m.x4726 + 2.30759272944607*m.x4727
+ 46.8579835937448*m.x4728 + 21.6842406183472*m.x4729 + 35.7863233086083*m.x4730
+ 37.4192685715794*m.x4731 + 10.2952873208244*m.x4732 + 27.0767460450223*m.x4733
+ 26.5132008197989*m.x4734 + 14.5464388970523*m.x4735 + 17.5539404280978*m.x4736
+ 58.5211739371186*m.x4737 + 27.3234607880677*m.x4738 + 36.9972425587017*m.x4739
+ 49.1762064338565*m.x4740 + 15.4750956107866*m.x4741 + 41.7334946399905*m.x4742
+ 28.8151150651027*m.x4743 + 43.2169450790961*m.x4744 + 19.9672311177921*m.x4745
+ 9.07357048361994*m.x4746 + 46.7111213126678*m.x4747 + 43.7739123059673*m.x4748
+ 32.034850001388*m.x4749 + 48.9521326671355*m.x4750 + 27.2595970986574*m.x4751
+ 51.4450378783604*m.x4752 + 37.5748112846566*m.x4753 + 31.902137355375*m.x4754
+ 17.2580950488718*m.x4755 + 36.8981171265907*m.x4756 + 3.6145737254708*m.x4757
+ 15.6540982251812*m.x4758 + 41.3765473426115*m.x4759 + 15.3698492665252*m.x4760
+ 6.26255243618015*m.x4761 + 34.1830229274675*m.x4762 + 35.2095102362142*m.x4763
+ 27.0120703777031*m.x4764 + 38.3429708966496*m.x4765 + 10.5231703732088*m.x4766
+ 36.4514049944238*m.x4767 + 32.6614713774635*m.x4768 + 12.2080010046786*m.x4769
+ 41.4482253182999*m.x4770 + 50.7452521617939*m.x4771 + 21.9312912234777*m.x4772
+ 31.4131019287463*m.x4773 + 24.9878384687079*m.x4774 + 45.435683932642*m.x4775
+ 39.9738468002726*m.x4776 + 31.021208562287*m.x4777 + 20.7218488889535*m.x4778
+ 40.5513565556417*m.x4779 + 17.6413329751689*m.x4780 + 54.9050513138121*m.x4781
+ 50.3612481223204*m.x4782 + 7.18097288206995*m.x4783 + 18.1831296081458*m.x4784
+ 29.0502061154199*m.x4785 + 35.7549088149121*m.x4786 + 18.9812909968721*m.x4787
+ 41.3861125245245*m.x4788 + 12.2349091693044*m.x4789 + 40.4203600063935*m.x4790
+ 30.0981205751723*m.x4791 + 38.716452118376*m.x4792 + 10.0415209871643*m.x4793
+ 5.30634551257583*m.x4794 + 44.822287814907*m.x4795 + 41.9747022389462*m.x4796
+ 20.0505988139596*m.x4797 + 50.8380073989403*m.x4798 + 38.3174997883066*m.x4799
+ 35.9799662186014*m.x4800 + 8.908949729247*m.x4801 + 4.3402883894748*m.x4802
+ 56.0483720427896*m.x4803 + 26.8090287101174*m.x4804 + 33.1803435848759*m.x4805
+ 25.7177689473939*m.x4806 + 23.4949578166498*m.x4807 + 35.8154550270418*m.x4808
+ 7.11153590456939*m.x4809 + 34.7721358889314*m.x4810 + 38.6238132706037*m.x4811
+ 46.134976326902*m.x4812 + 13.611611669533*m.x4813 + 33.5038229856432*m.x4814
+ 47.5265293457476*m.x4815 + 30.8333888103106*m.x4816 + 18.8748233458227*m.x4817
+ 25.521624471926*m.x4818 + 40.4081075845189*m.x4819 + 40.6561324980051*m.x4820
+ 8.44607852540147*m.x4821 + 21.1187982922687*m.x4822 + 21.845146899149*m.x4823
+ 18.7546339196384*m.x4824 + 27.62672884364*m.x4825 + 28.49097923527*m.x4826
+ 27.2464385537277*m.x4827 + 23.5768055512128*m.x4828 + 29.3635746926482*m.x4829
+ 24.2657142824802*m.x4830 + 5.87126789182998*m.x4831 + 26.3940245079767*m.x4832
+ 16.6484461275902*m.x4833 + 22.8370877446623*m.x4834 + 19.2948963267065*m.x4835
+ 21.320454940904*m.x4836 + 23.0557473382511*m.x4837 + 27.7514670482744*m.x4838
+ 20.7404771418561*m.x4839 + 1.54229943880671*m.x4840 + 25.4956213377654*m.x4841
+ 17.3890626712649*m.x4842 + 11.4133635808086*m.x4843 + 18.2045252493701*m.x4844
+ 15.9540300477223*m.x4845 + 23.8263750419438*m.x4846 + 17.5269003211255*m.x4847
+ 21.4237269550958*m.x4848 + 27.2063133034801*m.x4849 + 7.59255289852368*m.x4850
+ 16.3718445986335*m.x4851 + 22.6609937150957*m.x4852 + 25.4631851218269*m.x4853
+ 24.5624127124554*m.x4854 + 12.7319122791191*m.x4855 + 17.3568349663967*m.x4856
+ 19.6700514715035*m.x4857 + 25.1450147106273*m.x4858 + 23.795154708368*m.x4859
+ 17.7086612845014*m.x4860 + 6.28273556975114*m.x4861 + 21.0973556717109*m.x4862
+ 26.4283806047981*m.x4863 + 23.7329390399287*m.x4864 + 27.2676998855898*m.x4865
+ 12.7979900352503*m.x4866 + 21.312785028677*m.x4867 + 13.9512144717094*m.x4868
+ 17.6550663209752*m.x4869 + 12.6409267408616*m.x4870 + 8.0207337053437*m.x4871
+ 21.5337161207106*m.x4872 + 22.8769123588816*m.x4873 + 22.0294240550037*m.x4874
+ 27.9213853504814*m.x4875 + 15.6939981190572*m.x4876 + 28.4620094350391*m.x4877
+ 19.4063575477203*m.x4878 + 13.4299093392025*m.x4879 + 9.23298146789282*m.x4880
+ 9.98435296792508*m.x4881 + 17.4209754300637*m.x4882 + 21.6973816528803*m.x4883
+ 26.6366840703152*m.x4884 + 14.8686815850541*m.x4885 + 19.9640549413374*m.x4886
+ 31.0402427365973*m.x4887 + 4.99988892939056*m.x4888 + 9.64721959705795*m.x4889
+ 23.3235363829419*m.x4890 + 12.4581884996459*m.x4891 + 23.8627844011496*m.x4892
+ 12.4250512558901*m.x4893 + 16.0327981784701*m.x4894 + 21.487923747419*m.x4895
+ 19.4614913676544*m.x4896 + 29.0716476657219*m.x4897 + 29.6875898158785*m.x4898
+ 28.2311952952789*m.x4899 + 21.9431300435405*m.x4900 + 14.1102388332813*m.x4901
+ 23.9415028875795*m.x4902 + 23.1150011047536*m.x4903 + 8.82725048272939*m.x4904
+ 19.4104432075545*m.x4905 + 16.182395364099*m.x4906 + 29.8298016284301*m.x4907
+ 24.6325333049305*m.x4908 + 25.8093596552595*m.x4909 + 22.6861272161952*m.x4910
+ 22.3117215551708*m.x4911 + 15.6988215288095*m.x4912 + 14.4247997367844*m.x4913
+ 5.58363663265803*m.x4914 + 21.6596504227441*m.x4915 + 26.0839456065175*m.x4916
+ 19.9324666546441*m.x4917 + 21.4959881491483*m.x4918 + 20.4366351782354*m.x4919
+ 15.7766582326852*m.x4920 + 26.2511492604241*m.x4921 + 6.77134264305734*m.x4922
+ 9.97407328901669*m.x4923 + 24.2346310285045*m.x4924 + 18.106922125379*m.x4925
+ 23.6870582366393*m.x4926 + 17.1771410066881*m.x4927 + 14.2179615599634*m.x4928
+ 17.2601453283509*m.x4929 + 15.5590841063983*m.x4930 + 27.8003976840659*m.x4931
+ 24.5343219737684*m.x4932 + 21.7054577551677*m.x4933 + 15.7826545502361*m.x4934
+ 5.76966328063583*m.x4935 + 29.8017893284724*m.x4936 + 8.67225519089364*m.x4937
+ 14.2441155481451*m.x4938 + 23.9957386284301*m.x4939 + 26.8772217616388*m.x4940
+ 11.0581121517261*m.x4941 + 25.4245813373256*m.x4942 + 25.2463252260188*m.x4943
+ 23.1433334089649*m.x4944 + 21.1912360630005*m.x4945 + 20.268626598319*m.x4946
+ 23.9347103249511*m.x4947 + 26.1579823967298*m.x4948 + 15.7225475572425*m.x4949
+ 19.1678149131917*m.x4950 + 23.8561245021562*m.x4951 + 30.6573670863799*m.x4952
+ 29.4997014259691*m.x4953 + 14.3018017357749*m.x4954 + 28.1678717088136*m.x4955
+ 15.5982007493491*m.x4956 + 5.86088359300007*m.x4957 + 26.1191120605653*m.x4958
+ 25.7275886145216*m.x4959 + 10.1042259033047*m.x4960 + 31.5202327802288*m.x4961
+ 23.881548508765*m.x4962 + 14.4113780489492*m.x4963 + 8.1866142831364*m.x4964
+ 20.8905990611974*m.x4965 + 21.886786509008*m.x4966 + 15.3346634153293*m.x4967
+ 2.27595818311663*m.x4968 + 14.4680415871071*m.x4969 + 14.9437560415609*m.x4970
+ 19.9129768859468*m.x4971 + 28.4753294429312*m.x4972 + 34.415896384348*m.x4973
+ 31.0442005942545*m.x4974 + 36.6764115840814*m.x4975 + 22.0521077362381*m.x4976
+ 20.5827516089933*m.x4977 + 17.6370937083256*m.x4978 + 40.9488041304815*m.x4979
+ 11.7517436015092*m.x4980 + 10.1756159612203*m.x4981 + 18.9892364926723*m.x4982
+ 28.6762549145836*m.x4983 + 11.1668319971642*m.x4984 + 6.86769424550464*m.x4985
+ 9.13046567520529*m.x4986 + 14.4429813000978*m.x4987 + 20.0631104297129*m.x4988
+ 33.160481219084*m.x4989 + 14.0627708089668*m.x4990 + 14.5122683270075*m.x4991
+ 25.4307361279071*m.x4992 + 2.71984709155871*m.x4993 + 27.2991183043171*m.x4994
+ 28.4692340355188*m.x4995 + 11.8766755077374*m.x4996 + 27.9576516672442*m.x4997
+ 9.13536037772246*m.x4998 + 19.3285856192493*m.x4999 + 19.5308573621274*m.x5000
+ 28.9361827902822*m.x5001 + 34.1483884085374*m.x5002 + 37.8562921071667*m.x5003
+ 36.8030562940602*m.x5004 + 25.2232386475204*m.x5005 + 26.641967553278*m.x5006
+ 20.3579600785657*m.x5007 + 33.5657963837901*m.x5008 + 13.8817743345973*m.x5009
+ 30.0769506744567*m.x5010 + 11.3726148668507*m.x5011 + 14.5905424769864*m.x5012
+ 23.4288714623453*m.x5013 + 34.5037100578148*m.x5014 + 36.3439959420175*m.x5015
+ 25.3680629418045*m.x5016 + 24.0583147981274*m.x5017 + 25.5037421043717*m.x5018
+ 5.40969830495644*m.x5019 + 23.1359656293315*m.x5020 + 8.54862904007093*m.x5021
+ 20.3883616966915*m.x5022 + 13.3192869656586*m.x5023 + 31.3751686381238*m.x5024
+ 19.4880063216675*m.x5025 + 24.6082280533348*m.x5026 + 19.5138064630668*m.x5027
+ 30.6725390711264*m.x5028 + 18.3911623231031*m.x5029 + 21.7397951532865*m.x5030
+ 21.7318581839293*m.x5031 + 9.85342152536218*m.x5032 + 27.697158415967*m.x5033
+ 15.3670985348967*m.x5034 + 4.98222614176874*m.x5035 + 7.39326993973359*m.x5036
+ 41.211101072947*m.x5037 + 9.58978860323988*m.x5038 + 20.0907302046091*m.x5039
+ 30.2765193873919*m.x5040 + 6.03847507775923*m.x5041 + 35.1299147228597*m.x5042
+ 9.38361527309287*m.x5043 + 25.5854729540192*m.x5044 + 23.9795105751475*m.x5045
+ 14.8119709040467*m.x5046 + 40.4831360437446*m.x5047 + 40.0294690616972*m.x5048
+ 18.6195350601824*m.x5049 + 30.859166802492*m.x5050 + 8.15035945738291*m.x5051
+ 34.5810613854052*m.x5052 + 33.1734413468066*m.x5053 + 20.7532564832936*m.x5054
+ 6.84093065055125*m.x5055 + 17.452250401046*m.x5056 + 22.6835496606248*m.x5057
+ 12.1488945756185*m.x5058 + 36.4389991106891*m.x5059 + 22.5172564583202*m.x5060
+ 13.2353247641852*m.x5061 + 14.7871077705047*m.x5062 + 15.7779696396616*m.x5063
+ 9.03556493734827*m.x5064 + 32.4089188793957*m.x5065 + 22.9429007617306*m.x5066
+ 30.4904583328745*m.x5067 + 15.0988641220851*m.x5068 + 8.89634080591297*m.x5069
+ 28.3282194337563*m.x5070 + 31.4326992714189*m.x5071 + 5.96900824146607*m.x5072
+ 21.3926725320712*m.x5073 + 12.9038835536348*m.x5074 + 27.9671753219331*m.x5075
+ 34.4333255506067*m.x5076 + 12.2504790436417*m.x5077 + 18.4046732035097*m.x5078
+ 21.2254915183783*m.x5079 + 17.4244239139893*m.x5080 + 36.7747972301511*m.x5081
+ 36.9813719008935*m.x5082 + 12.3593556406527*m.x5083 + 18.0394893565605*m.x5084
+ 17.3425985946924*m.x5085 + 21.3385645392544*m.x5086 + 9.34294251283749*m.x5087
+ 23.7911447640582*m.x5088 + 12.0593286988116*m.x5089 + 22.608482686343*m.x5090
+ 10.7062984858142*m.x5091 + 35.2290678199175*m.x5092 + 13.8144666185411*m.x5093
+ 17.0155587986034*m.x5094 + 33.6991176796681*m.x5095 + 22.5285531442581*m.x5096
+ 11.5112953569472*m.x5097 + 31.5579014391059*m.x5098 + 18.9595798151741*m.x5099
+ 29.7685660188228*m.x5100 + 12.9129018805166*m.x5101 + 23.5186661700832*m.x5102
+ 37.3809892977948*m.x5103 + 7.79373850706352*m.x5104 + 19.0901819908661*m.x5105
+ 7.34145272990253*m.x5106 + 13.0473730919562*m.x5107 + 34.598718256357*m.x5108
+ 20.9730058176037*m.x5109 + 16.1235002838331*m.x5110 + 23.7771900114159*m.x5111
+ 36.2331766698471*m.x5112 + 11.4980399782103*m.x5113 + 20.693636784263*m.x5114
+ 29.1280553665762*m.x5115 + 14.0926849966712*m.x5116 + 18.1120555206178*m.x5117
+ 11.8838237705884*m.x5118 + 27.0017247629378*m.x5119 + 27.4977561833491*m.x5120
+ 16.3216476962248*m.x5121 + 6.73541459416904*m.x5122 + 20.7545289187039*m.x5123
+ 24.5291849843626*m.x5124 + 3.2441326919642*m.x5125 + 52.8326793989204*m.x5126
+ 51.5537643031585*m.x5127 + 47.9159161990256*m.x5128 + 34.5395045241788*m.x5129
+ 42.9597203339103*m.x5130 + 23.5472836464506*m.x5131 + 50.5742777683294*m.x5132
+ 25.596292160327*m.x5133 + 44.7497768049363*m.x5134 + 40.1065017885328*m.x5135
+ 39.3475366675883*m.x5136 + 46.8408366953458*m.x5137 + 35.8781648534877*m.x5138
+ 16.1250386785257*m.x5139 + 23.8451523122499*m.x5140 + 39.9949441769503*m.x5141
+ 35.7943435510963*m.x5142 + 31.1326936642379*m.x5143 + 34.6860834021669*m.x5144
+ 17.2640531140744*m.x5145 + 45.3633474119924*m.x5146 + 31.5476361737544*m.x5147
+ 42.4454049193487*m.x5148 + 35.8683477289806*m.x5149 + 24.4963875690394*m.x5150
+ 18.5704493377075*m.x5151 + 9.2882411598841*m.x5152 + 26.4205502349443*m.x5153
+ 15.1186443344253*m.x5154 + 21.4119040780537*m.x5155 + 33.841606939431*m.x5156
+ 20.150004857337*m.x5157 + 2.57516150714399*m.x5158 + 37.0484627489479*m.x5159
+ 23.4962276005425*m.x5160 + 22.2912769508219*m.x5161 + 30.8811646876185*m.x5162
+ 27.508612224126*m.x5163 + 5.90205571494402*m.x5164 + 2.88402659695933*m.x5165
+ 19.0989353532805*m.x5166 + 43.6799531245872*m.x5167 + 12.2844241253096*m.x5168
+ 36.6889414138528*m.x5169 + 29.16856752834*m.x5170 + 32.3145880983254*m.x5171
+ 45.5020370886015*m.x5172 + 46.2841905713857*m.x5173 + 2.35586055800411*m.x5174
+ 37.2165027983014*m.x5175 + 9.02866508110952*m.x5176 + 52.2471905417501*m.x5177
+ 8.58677515340579*m.x5178 + 35.9503476036515*m.x5179 + 18.7541921099132*m.x5180
+ 15.4705465898845*m.x5181 + 41.2783902133063*m.x5182 + 41.3425146548019*m.x5183
+ 41.391762907276*m.x5184 + 37.6823230977916*m.x5185 + 39.8119841319626*m.x5186
+ 8.15275508472407*m.x5187 + 24.3971312477528*m.x5188 + 14.7401318303982*m.x5189
+ 6.87138686554757*m.x5190 + 36.1076796097147*m.x5191 + 32.4594025177495*m.x5192
+ 26.1330356632692*m.x5193 + 8.36508127269799*m.x5194 + 43.9813712373155*m.x5195
+ 43.8345830782588*m.x5196 + 35.1597253464445*m.x5197 + 39.4588718702393*m.x5198
+ 39.4356656615205*m.x5199 + 2.78193204826756*m.x5200 + 28.776260324069*m.x5201
+ 5.35532840266266*m.x5202 + 35.7373818259396*m.x5203 + 24.5753775196543*m.x5204
+ 39.4979768668568*m.x5205 + 19.8989272314122*m.x5206 + 54.0995751227369*m.x5207
+ 44.8810392190235*m.x5208 + 35.8941062044463*m.x5209 + 46.2869427721925*m.x5210
+ 45.906012655994*m.x5211 + 22.9936010871305*m.x5212 + 20.40315999883*m.x5213
+ 24.8111594838321*m.x5214 + 32.9458666107213*m.x5215 + 50.2978058810319*m.x5216
+ 32.5119863446674*m.x5217 + 30.8784297371821*m.x5218 + 42.4910228540693*m.x5219
+ 18.0812517099641*m.x5220 + 11.3287052307499*m.x5221 + 29.6708972064899*m.x5222
+ 26.2203136006364*m.x5223 + 39.7375138869712*m.x5224 + 6.35847739721636*m.x5225
+ 34.1717007740803*m.x5226 + 27.9063153940093*m.x5227 + 36.9988025297683*m.x5228
+ 15.3046927644*m.x5229 + 39.1441197258058*m.x5230 + 3.43318399467199*m.x5231
+ 17.627459116926*m.x5232 + 45.1700399447604*m.x5233 + 39.1888192448709*m.x5234
+ 25.3686613395041*m.x5235 + 38.4682588505133*m.x5236 + 33.0394605681078*m.x5237
+ 10.1840159095025*m.x5238 + 45.548970678209*m.x5239 + 29.8224249989329*m.x5240
+ 23.9167576848466*m.x5241 + 37.9044161289253*m.x5242 + 47.4088862015378*m.x5243
+ 47.4591778735183*m.x5244 + 22.7667623590183*m.x5245 + 17.4178439901005*m.x5246
+ 42.2104021434014*m.x5247 + 10.7903030086922*m.x5248 + 17.1686027024882*m.x5249
+ 31.9753879651698*m.x5250 + 46.4543822009226*m.x5251 + 54.9363199390352*m.x5252
+ 6.33678630491226*m.x5253 + 29.3064923924538*m.x5254 + | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014 <NAME>
#
# Licensed under the terms of the BSD2 License
# See LICENSE.txt for details
# -----------------------------------------------------------------------------
"""Links module for the pythonic interface to SWMM5."""
# Local imports
from pyswmm.swmm5 import PYSWMMException
from pyswmm.toolkitapi import LinkParams, LinkResults, LinkPollut, LinkType, ObjectType
class Links(object):
"""
Link Iterator Methods.
:param object model: Open Model Instance
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... for link in Links(sim):
... print link
... print link.linkid
...
>>> <swmm5.Link object at 0x031B0350>
>>> C1
>>> <swmm5.Link object at 0x030693D0>
>>> C2
>>> <swmm5.Link object at 0x031B0350>
>>> C3
>>> <swmm5.Link object at 0x030693D0>
>>> C0
Iterating or Links Object
>>> links = Links(sim)
>>> for link in links:
... print link.linkid
>>> C1:C2
>>> C2
>>> C3
Testing Existence
>>> links = Links(sim)
>>> "C1:C2" in links
>>> True
Initializing a link Object
>>> links = Links(sim)
>>> c1c2 = links['C1:C2']
>>> c1c2.flow_limit = 12
>>> c1c2.flow_limit
>>> 12
"""
def __init__(self, model):
if not model._model.fileLoaded:
raise PYSWMMException("SWMM Model Not Open")
self._model = model._model
self._cuindex = 0
self._nLinks = self._model.getProjectSize(ObjectType.LINK.value)
def __len__(self):
"""
Return number of links. Use the expression 'len(Links)'.
:return: Number of Links
:rtype: int
"""
return self._model.getProjectSize(ObjectType.LINK.value)
def __contains__(self, linkid):
"""
Checks if Link ID exists.
:return: ID Exists
:rtype: bool
"""
return self._model.ObjectIDexist(ObjectType.LINK.value, linkid)
def __getitem__(self, linkid):
if self.__contains__(linkid):
ln = Link(self._model, linkid)
_ln = ln
if ln.is_conduit():
_ln.__class__ = Conduit
elif ln.is_pump():
_ln.__class__ = Pump
return _ln
else:
raise PYSWMMException("Link ID: {} Does not Exist".format(linkid))
def __iter__(self):
return self
def __next__(self):
if self._cuindex < self._nLinks:
linkobject = self.__getitem__(self._linkid)
self._cuindex += 1 # Next Iteration
return linkobject
else:
raise StopIteration()
next = __next__ # Python 2
@property
def _linkid(self):
"""Link ID."""
return self._model.getObjectId(ObjectType.LINK.value, self._cuindex)
class Link(object):
"""
Link Methods.
:param object model: Open Model Instance
:param str linkid: Link ID
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.flow
... for step in simulation:
... print c1c2.flow
... 0.0
"""
def __init__(self, model, linkid):
if not model.fileLoaded:
raise PYSWMMException("SWMM Model Not Open")
if linkid not in model.getObjectIDList(ObjectType.LINK.value):
raise PYSWMMException("ID Not valid")
self._model = model
self._linkid = linkid
# --- Get Parameters
# -------------------------------------------------------------------------
@property
def linkid(self):
"""
Get Link ID.
:return: Parameter Value
:rtype: float
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.linkid
>>> "C1"
"""
return self._linkid
def is_conduit(self):
"""
Check if link is a Conduit Type.
:return: is conduit
:rtype: bool
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.is_conduit()
>>> True
"""
return self._model.getLinkType(self._linkid) is LinkType.conduit.value
def is_pump(self):
"""
Check if link is a Pump Type.
:return: is pump
:rtype: bool
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.is_pump()
>>> False
"""
return self._model.getLinkType(self._linkid) is LinkType.pump.value
def is_orifice(self):
"""
Check if link is a Orifice Type.
:return: is orifie
:rtype: bool
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.is_orifice()
>>> False
"""
return self._model.getLinkType(self._linkid) is LinkType.orifice.value
def is_weir(self):
"""
Check if link is a Weir Type.
:return: is weir
:rtype: bool
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.is_weir()
>>> False
"""
return self._model.getLinkType(self._linkid) is LinkType.weir.value
def is_outlet(self):
"""
Check if link is a Outlet Type.
:return: is outlet
:rtype: bool
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.is_outlet()
>>> False
"""
return self._model.getLinkType(self._linkid) is LinkType.outlet.value
@property
def connections(self):
"""
Get link upstream and downstream node IDs.
:return: ("UpstreamNodeID","DownstreamNodeID")
:rtype: tuple
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.connections
>>> ("C1","C2")
"""
return self._model.getLinkConnections(self._linkid)
@property
def inlet_node(self):
"""
Get link inlet node ID.
:return: Inlet node ID
:rtype: str
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.inlet_node
>>> C1
"""
return self._model.getLinkConnections(self._linkid)[0]
@property
def outlet_node(self):
"""
Get link outlet node ID.
:return: Outlet node ID
:rtype: str
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.outlet_node
>>> C2
"""
return self._model.getLinkConnections(self._linkid)[1]
@property
def inlet_offset(self):
"""
Get/set Upstream Offset Depth.
:return: Parameter Value
:rtype: float
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.inlet_offset
>>> 0.1
Setting the value
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.inlet_offset
... c1c2.inlet_offset = 0.2
... print c1c2.inlet_offset
>>> 0.1
>>> 0.2
"""
return self._model.getLinkParam(self._linkid, LinkParams.offset1.value)
@inlet_offset.setter
def inlet_offset(self, param):
"""Set Link Upstream Link Offset."""
self._model.setLinkParam(self._linkid, LinkParams.offset1.value, param)
@property
def outlet_offset(self):
"""
Get/set Downstream Offset Depth.
:return: Parameter Value
:rtype: float
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.outlet_offset
>>> 0.1
Setting the value
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.outlet_offset
... c1c2.outlet_offset = 0.2
... print c1c2.outlet_offset
>>> 0.1
>>> 0.2
"""
return self._model.getLinkParam(self._linkid, LinkParams.offset2.value)
@outlet_offset.setter
def outlet_offset(self, param):
"""Set Link Downstream Link Offset."""
self._model.setLinkParam(self._linkid, LinkParams.offset2.value, param)
@property
def initial_flow(self):
"""
Get/set Link Initial Flow.
:return: Parameter Value
:rtype: float
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.initial_flow
>>> 0
Setting the Value
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.initial_flow
... c1c2.initial_flow = 0.2
... print c1c2.initial_flow
>>> 0.1
>>> 0.2
"""
return self._model.getLinkParam(self._linkid, LinkParams.q0.value)
@initial_flow.setter
def initial_flow(self, param):
"""Set Link Initial Flow Rate."""
self._model.setLinkParam(self._linkid, LinkParams.q0.value, param)
@property
def flow_limit(self):
"""
Get/set link flow limit.
:return: Parameter Value
:rtype: float
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.flow_limit
>>> 0
Setting the Value
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.flow_limit
... c1c2.flow_limit = 0.2
... print c1c2.flow_limit
>>> 0
>>> 0.2
"""
return self._model.getLinkParam(self._linkid, LinkParams.qLimit.value)
@flow_limit.setter
def flow_limit(self, param):
"""Set Link Flow Limit."""
self._model.setLinkParam(self._linkid, LinkParams.qLimit.value, param)
@property
def inlet_head_loss(self):
"""
Get/set Inlet Head Loss.
:return: Parameter Value
:rtype: float
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.inlet_head_loss
>>> 0
Setting the Value
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.inlet_head_loss
... c1c2.inlet_head_loss = 0.2
... print c1c2.inlet_head_loss
>>> 0
>>> 0.2
"""
return self._model.getLinkParam(self._linkid,
LinkParams.cLossInlet.value)
@inlet_head_loss.setter
def inlet_head_loss(self, param):
"""Set Link Inlet Head Loss."""
self._model.setLinkParam(self._linkid, LinkParams.cLossInlet.value,
param)
@property
def outlet_head_loss(self):
"""
Get/set Outlet Head Loss.
:return: Parameter Value
:rtype: float
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.outlet_head_loss
>>> 0
Setting the Value
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.outlet_head_loss
... c1c2.outlet_head_loss = 0.2
... print c1c2.outlet_head_loss
>>> 0
>>> 0.2
"""
return self._model.getLinkParam(self._linkid,
LinkParams.cLossOutlet.value)
@outlet_head_loss.setter
def outlet_head_loss(self, param):
"""Set Link Outlet Head Loss."""
self._model.setLinkParam(self._linkid, LinkParams.cLossOutlet.value,
param)
@property
def average_head_loss(self):
"""
Get/set Average Conduit Loss.
:return: Parameter Value
:rtype: float
Examples:
>>> from pyswmm import Simulation, Links
>>>
>>> with Simulation('tests/data/TestModel1_weirSetting.inp') as sim:
... c1c2 = Links(sim)["C1:C2"]
... print c1c2.average_head_loss
>>> 0
Setting the value
>>> from pyswmm import | |
str) else str(v) for v in self.todos]
if not isinstance(self.notes, list):
self.notes = [self.notes] if self.notes is not None else []
self.notes = [v if isinstance(v, str) else str(v) for v in self.notes]
if not isinstance(self.comments, list):
self.comments = [self.comments] if self.comments is not None else []
self.comments = [v if isinstance(v, str) else str(v) for v in self.comments]
if not isinstance(self.examples, list):
self.examples = [self.examples] if self.examples is not None else []
self.examples = [v if isinstance(v, Example) else Example(**as_dict(v)) for v in self.examples]
if not isinstance(self.in_subset, list):
self.in_subset = [self.in_subset] if self.in_subset is not None else []
self.in_subset = [v if isinstance(v, SubsetDefinitionName) else SubsetDefinitionName(v) for v in self.in_subset]
if self.from_schema is not None and not isinstance(self.from_schema, URI):
self.from_schema = URI(self.from_schema)
if self.imported_from is not None and not isinstance(self.imported_from, str):
self.imported_from = str(self.imported_from)
if self.source is not None and not isinstance(self.source, URIorCURIE):
self.source = URIorCURIE(self.source)
if self.in_language is not None and not isinstance(self.in_language, str):
self.in_language = str(self.in_language)
if not isinstance(self.see_also, list):
self.see_also = [self.see_also] if self.see_also is not None else []
self.see_also = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.see_also]
if self.deprecated_element_has_exact_replacement is not None and not isinstance(self.deprecated_element_has_exact_replacement, URIorCURIE):
self.deprecated_element_has_exact_replacement = URIorCURIE(self.deprecated_element_has_exact_replacement)
if self.deprecated_element_has_possible_replacement is not None and not isinstance(self.deprecated_element_has_possible_replacement, URIorCURIE):
self.deprecated_element_has_possible_replacement = URIorCURIE(self.deprecated_element_has_possible_replacement)
if not isinstance(self.aliases, list):
self.aliases = [self.aliases] if self.aliases is not None else []
self.aliases = [v if isinstance(v, str) else str(v) for v in self.aliases]
self._normalize_inlined_as_dict(slot_name="structured_aliases", slot_type=StructuredAlias, key_name="literal_form", keyed=False)
if not isinstance(self.mappings, list):
self.mappings = [self.mappings] if self.mappings is not None else []
self.mappings = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.mappings]
if not isinstance(self.exact_mappings, list):
self.exact_mappings = [self.exact_mappings] if self.exact_mappings is not None else []
self.exact_mappings = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.exact_mappings]
if not isinstance(self.close_mappings, list):
self.close_mappings = [self.close_mappings] if self.close_mappings is not None else []
self.close_mappings = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.close_mappings]
if not isinstance(self.related_mappings, list):
self.related_mappings = [self.related_mappings] if self.related_mappings is not None else []
self.related_mappings = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.related_mappings]
if not isinstance(self.narrow_mappings, list):
self.narrow_mappings = [self.narrow_mappings] if self.narrow_mappings is not None else []
self.narrow_mappings = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.narrow_mappings]
if not isinstance(self.broad_mappings, list):
self.broad_mappings = [self.broad_mappings] if self.broad_mappings is not None else []
self.broad_mappings = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.broad_mappings]
if self.rank is not None and not isinstance(self.rank, int):
self.rank = int(self.rank)
super().__post_init__(**kwargs)
@dataclass
class SchemaDefinition(Element):
"""
a collection of subset, type, slot and class definitions
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = LINKML.SchemaDefinition
class_class_curie: ClassVar[str] = "linkml:SchemaDefinition"
class_name: ClassVar[str] = "schema_definition"
class_model_uri: ClassVar[URIRef] = LINKML.SchemaDefinition
name: Union[str, SchemaDefinitionName] = None
id: Union[str, URI] = None
version: Optional[str] = None
imports: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list()
license: Optional[str] = None
prefixes: Optional[Union[Dict[Union[str, PrefixPrefixPrefix], Union[dict, "Prefix"]], List[Union[dict, "Prefix"]]]] = empty_dict()
emit_prefixes: Optional[Union[Union[str, NCName], List[Union[str, NCName]]]] = empty_list()
default_curi_maps: Optional[Union[str, List[str]]] = empty_list()
default_prefix: Optional[str] = None
default_range: Optional[Union[str, TypeDefinitionName]] = None
subsets: Optional[Union[Dict[Union[str, SubsetDefinitionName], Union[dict, "SubsetDefinition"]], List[Union[dict, "SubsetDefinition"]]]] = empty_dict()
types: Optional[Union[Dict[Union[str, TypeDefinitionName], Union[dict, "TypeDefinition"]], List[Union[dict, "TypeDefinition"]]]] = empty_dict()
enums: Optional[Union[Dict[Union[str, EnumDefinitionName], Union[dict, "EnumDefinition"]], List[Union[dict, "EnumDefinition"]]]] = empty_dict()
slots: Optional[Union[Dict[Union[str, SlotDefinitionName], Union[dict, "SlotDefinition"]], List[Union[dict, "SlotDefinition"]]]] = empty_dict()
classes: Optional[Union[Dict[Union[str, ClassDefinitionName], Union[dict, "ClassDefinition"]], List[Union[dict, "ClassDefinition"]]]] = empty_dict()
metamodel_version: Optional[str] = None
source_file: Optional[str] = None
source_file_date: Optional[Union[str, XSDDateTime]] = None
source_file_size: Optional[int] = None
generation_date: Optional[Union[str, XSDDateTime]] = None
slot_names_unique: Optional[Union[bool, Bool]] = None
settings: Optional[Union[Dict[Union[str, SettingSettingKey], Union[dict, "Setting"]], List[Union[dict, "Setting"]]]] = empty_dict()
categories: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list()
keywords: Optional[Union[str, List[str]]] = empty_list()
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.default_prefix is None:
self.default_prefix = sfx(str(self.id))
if self._is_empty(self.name):
self.MissingRequiredField("name")
if not isinstance(self.name, SchemaDefinitionName):
self.name = SchemaDefinitionName(self.name)
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, URI):
self.id = URI(self.id)
if self.version is not None and not isinstance(self.version, str):
self.version = str(self.version)
if not isinstance(self.imports, list):
self.imports = [self.imports] if self.imports is not None else []
self.imports = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.imports]
if self.license is not None and not isinstance(self.license, str):
self.license = str(self.license)
self._normalize_inlined_as_dict(slot_name="prefixes", slot_type=Prefix, key_name="prefix_prefix", keyed=True)
if not isinstance(self.emit_prefixes, list):
self.emit_prefixes = [self.emit_prefixes] if self.emit_prefixes is not None else []
self.emit_prefixes = [v if isinstance(v, NCName) else NCName(v) for v in self.emit_prefixes]
if not isinstance(self.default_curi_maps, list):
self.default_curi_maps = [self.default_curi_maps] if self.default_curi_maps is not None else []
self.default_curi_maps = [v if isinstance(v, str) else str(v) for v in self.default_curi_maps]
if self.default_prefix is not None and not isinstance(self.default_prefix, str):
self.default_prefix = str(self.default_prefix)
if self.default_range is not None and not isinstance(self.default_range, TypeDefinitionName):
self.default_range = TypeDefinitionName(self.default_range)
self._normalize_inlined_as_dict(slot_name="subsets", slot_type=SubsetDefinition, key_name="name", keyed=True)
self._normalize_inlined_as_dict(slot_name="types", slot_type=TypeDefinition, key_name="name", keyed=True)
self._normalize_inlined_as_dict(slot_name="enums", slot_type=EnumDefinition, key_name="name", keyed=True)
self._normalize_inlined_as_dict(slot_name="slots", slot_type=SlotDefinition, key_name="name", keyed=True)
self._normalize_inlined_as_dict(slot_name="classes", slot_type=ClassDefinition, key_name="name", keyed=True)
if self.metamodel_version is not None and not isinstance(self.metamodel_version, str):
self.metamodel_version = str(self.metamodel_version)
if self.source_file is not None and not isinstance(self.source_file, str):
self.source_file = str(self.source_file)
if self.source_file_date is not None and not isinstance(self.source_file_date, XSDDateTime):
self.source_file_date = XSDDateTime(self.source_file_date)
if self.source_file_size is not None and not isinstance(self.source_file_size, int):
self.source_file_size = int(self.source_file_size)
if self.generation_date is not None and not isinstance(self.generation_date, XSDDateTime):
self.generation_date = XSDDateTime(self.generation_date)
if self.slot_names_unique is not None and not isinstance(self.slot_names_unique, Bool):
self.slot_names_unique = Bool(self.slot_names_unique)
self._normalize_inlined_as_dict(slot_name="settings", slot_type=Setting, key_name="setting_key", keyed=True)
if not isinstance(self.categories, list):
self.categories = [self.categories] if self.categories is not None else []
self.categories = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.categories]
if not isinstance(self.keywords, list):
self.keywords = [self.keywords] if self.keywords is not None else []
self.keywords = [v if isinstance(v, str) else str(v) for v in self.keywords]
super().__post_init__(**kwargs)
@dataclass
class AnonymousTypeExpression(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = ["pattern", "structured_pattern", "equals_string", "equals_string_in", "equals_number", "minimum_value", "maximum_value"]
class_class_uri: ClassVar[URIRef] = LINKML.AnonymousTypeExpression
class_class_curie: ClassVar[str] = "linkml:AnonymousTypeExpression"
class_name: ClassVar[str] = "anonymous_type_expression"
class_model_uri: ClassVar[URIRef] = LINKML.AnonymousTypeExpression
pattern: Optional[str] = None
structured_pattern: Optional[Union[dict, "PatternExpression"]] = None
equals_string: Optional[str] = None
equals_string_in: Optional[Union[str, List[str]]] = empty_list()
equals_number: Optional[int] = None
minimum_value: Optional[int] = None
maximum_value: Optional[int] = None
none_of: Optional[Union[Union[dict, "AnonymousTypeExpression"], List[Union[dict, "AnonymousTypeExpression"]]]] = empty_list()
exactly_one_of: Optional[Union[Union[dict, "AnonymousTypeExpression"], List[Union[dict, "AnonymousTypeExpression"]]]] = empty_list()
any_of: Optional[Union[Union[dict, "AnonymousTypeExpression"], List[Union[dict, "AnonymousTypeExpression"]]]] = empty_list()
all_of: Optional[Union[Union[dict, "AnonymousTypeExpression"], List[Union[dict, "AnonymousTypeExpression"]]]] = empty_list()
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.pattern is not None and not isinstance(self.pattern, str):
self.pattern = str(self.pattern)
if self.structured_pattern is not None and not isinstance(self.structured_pattern, PatternExpression):
self.structured_pattern = PatternExpression(**as_dict(self.structured_pattern))
if self.equals_string is not None and not isinstance(self.equals_string, str):
self.equals_string = str(self.equals_string)
if not isinstance(self.equals_string_in, list):
self.equals_string_in = [self.equals_string_in] if self.equals_string_in is not None else []
self.equals_string_in = [v if isinstance(v, str) else str(v) for v in self.equals_string_in]
if self.equals_number is not None and not isinstance(self.equals_number, int):
self.equals_number = int(self.equals_number)
if self.minimum_value is not None and not isinstance(self.minimum_value, int):
self.minimum_value = int(self.minimum_value)
if self.maximum_value is not None and not isinstance(self.maximum_value, int):
self.maximum_value = int(self.maximum_value)
if not isinstance(self.none_of, list):
self.none_of = [self.none_of] if self.none_of is not None else []
self.none_of = [v if isinstance(v, AnonymousTypeExpression) else AnonymousTypeExpression(**as_dict(v)) for v in self.none_of]
if not isinstance(self.exactly_one_of, list):
self.exactly_one_of = [self.exactly_one_of] if self.exactly_one_of is not None else []
self.exactly_one_of = [v if isinstance(v, AnonymousTypeExpression) else AnonymousTypeExpression(**as_dict(v)) for v in self.exactly_one_of]
if not isinstance(self.any_of, list):
self.any_of = [self.any_of] if self.any_of is not None else []
self.any_of = [v if isinstance(v, AnonymousTypeExpression) else AnonymousTypeExpression(**as_dict(v)) for v in self.any_of]
if not isinstance(self.all_of, list):
self.all_of = [self.all_of] if self.all_of is not None else []
self.all_of = [v if isinstance(v, AnonymousTypeExpression) else AnonymousTypeExpression(**as_dict(v)) for v in self.all_of]
super().__post_init__(**kwargs)
@dataclass
class TypeDefinition(Element):
"""
A data type definition.
"""
_inherited_slots: ClassVar[List[str]] = ["base", "uri", "repr", "pattern", "structured_pattern", "equals_string", "equals_string_in", "equals_number", "minimum_value", "maximum_value"]
class_class_uri: ClassVar[URIRef] = LINKML.TypeDefinition
class_class_curie: ClassVar[str] = "linkml:TypeDefinition"
class_name: ClassVar[str] = "type_definition"
class_model_uri: ClassVar[URIRef] = LINKML.TypeDefinition
name: Union[str, TypeDefinitionName] = None
typeof: Optional[Union[str, TypeDefinitionName]] = None
base: Optional[str] = None
uri: Optional[Union[str, URIorCURIE]] = None
repr: Optional[str] = None
pattern: Optional[str] = None
structured_pattern: Optional[Union[dict, "PatternExpression"]] = None
equals_string: Optional[str] = None
equals_string_in: Optional[Union[str, List[str]]] = empty_list()
equals_number: Optional[int] = None
minimum_value: Optional[int] = | |
of the cluster's default database.
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database", value)
@property
@pulumi.getter
def engine(self) -> Optional[pulumi.Input[str]]:
"""
Database engine used by the cluster (ex. `pg` for PostreSQL, `mysql` for MySQL, `redis` for Redis, or `mongodb` for MongoDB).
"""
return pulumi.get(self, "engine")
@engine.setter
def engine(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "engine", value)
@property
@pulumi.getter(name="evictionPolicy")
def eviction_policy(self) -> Optional[pulumi.Input[str]]:
"""
A string specifying the eviction policy for a Redis cluster. Valid values are: `noeviction`, `allkeys_lru`, `allkeys_random`, `volatile_lru`, `volatile_random`, or `volatile_ttl`.
"""
return pulumi.get(self, "eviction_policy")
@eviction_policy.setter
def eviction_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eviction_policy", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Database cluster's hostname.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="maintenanceWindows")
def maintenance_windows(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DatabaseClusterMaintenanceWindowArgs']]]]:
"""
Defines when the automatic maintenance should be performed for the database cluster.
"""
return pulumi.get(self, "maintenance_windows")
@maintenance_windows.setter
def maintenance_windows(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DatabaseClusterMaintenanceWindowArgs']]]]):
pulumi.set(self, "maintenance_windows", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the database cluster.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nodeCount")
def node_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of nodes that will be included in the cluster.
"""
return pulumi.get(self, "node_count")
@node_count.setter
def node_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "node_count", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password for the cluster's default user.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
Network port that the database cluster is listening on.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="privateHost")
def private_host(self) -> Optional[pulumi.Input[str]]:
"""
Same as `host`, but only accessible from resources within the account and in the same region.
"""
return pulumi.get(self, "private_host")
@private_host.setter
def private_host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_host", value)
@property
@pulumi.getter(name="privateNetworkUuid")
def private_network_uuid(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the VPC where the database cluster will be located.
"""
return pulumi.get(self, "private_network_uuid")
@private_network_uuid.setter
def private_network_uuid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_network_uuid", value)
@property
@pulumi.getter(name="privateUri")
def private_uri(self) -> Optional[pulumi.Input[str]]:
"""
Same as `uri`, but only accessible from resources within the account and in the same region.
"""
return pulumi.get(self, "private_uri")
@private_uri.setter
def private_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_uri", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[Union[str, 'Region']]]:
"""
DigitalOcean region where the cluster will reside.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[Union[str, 'Region']]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[Union[str, 'DatabaseSlug']]]:
"""
Database Droplet size associated with the cluster (ex. `db-s-1vcpu-1gb`). See here for a [list of valid size slugs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Databases).
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[Union[str, 'DatabaseSlug']]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter(name="sqlMode")
def sql_mode(self) -> Optional[pulumi.Input[str]]:
"""
A comma separated string specifying the SQL modes for a MySQL cluster.
"""
return pulumi.get(self, "sql_mode")
@sql_mode.setter
def sql_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_mode", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of tag names to be applied to the database cluster.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def uri(self) -> Optional[pulumi.Input[str]]:
"""
The full URI for connecting to the database cluster.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uri", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
Username for the cluster's default user.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Engine version used by the cluster (ex. `11` for PostgreSQL 11).
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
class DatabaseCluster(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
engine: Optional[pulumi.Input[str]] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
maintenance_windows: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DatabaseClusterMaintenanceWindowArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
node_count: Optional[pulumi.Input[int]] = None,
private_network_uuid: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[Union[str, 'Region']]] = None,
size: Optional[pulumi.Input[Union[str, 'DatabaseSlug']]] = None,
sql_mode: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a DigitalOcean database cluster resource.
## Example Usage
### Create a new PostgreSQL database cluster
```python
import pulumi
import pulumi_digitalocean as digitalocean
postgres_example = digitalocean.DatabaseCluster("postgres-example",
engine="pg",
node_count=1,
region="nyc1",
size="db-s-1vcpu-1gb",
version="11")
```
### Create a new MySQL database cluster
```python
import pulumi
import pulumi_digitalocean as digitalocean
mysql_example = digitalocean.DatabaseCluster("mysql-example",
engine="mysql",
node_count=1,
region="nyc1",
size="db-s-1vcpu-1gb",
version="8")
```
### Create a new Redis database cluster
```python
import pulumi
import pulumi_digitalocean as digitalocean
redis_example = digitalocean.DatabaseCluster("redis-example",
engine="redis",
node_count=1,
region="nyc1",
size="db-s-1vcpu-1gb",
version="6")
```
### Create a new MongoDB database cluster
```python
import pulumi
import pulumi_digitalocean as digitalocean
mongodb_example = digitalocean.DatabaseCluster("mongodb-example",
engine="mongodb",
node_count=1,
region="nyc3",
size="db-s-1vcpu-1gb",
version="4")
```
## Import
Database clusters can be imported using the `id` returned from DigitalOcean, e.g.
```sh
$ pulumi import digitalocean:index/databaseCluster:DatabaseCluster mycluster 245bcfd0-7f31-4ce6-a2bc-475a116cca97
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] engine: Database engine used by the cluster (ex. `pg` for PostreSQL, `mysql` for MySQL, `redis` for Redis, or `mongodb` for MongoDB).
:param pulumi.Input[str] eviction_policy: A string specifying the eviction policy for a Redis cluster. Valid values are: `noeviction`, `allkeys_lru`, `allkeys_random`, `volatile_lru`, `volatile_random`, or `volatile_ttl`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DatabaseClusterMaintenanceWindowArgs']]]] maintenance_windows: Defines when the automatic maintenance should be performed for the database cluster.
:param pulumi.Input[str] name: The name of the database cluster.
:param pulumi.Input[int] node_count: Number of nodes that will be included in the cluster.
:param pulumi.Input[str] private_network_uuid: The ID of the VPC where the database cluster will be located.
:param pulumi.Input[Union[str, 'Region']] region: DigitalOcean region where the cluster will reside.
:param pulumi.Input[Union[str, 'DatabaseSlug']] size: Database Droplet size associated with the cluster (ex. `db-s-1vcpu-1gb`). See here for a [list of valid size slugs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Databases).
:param pulumi.Input[str] sql_mode: A comma separated string specifying the SQL modes for a MySQL cluster.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list of tag names to be applied to the database cluster.
:param pulumi.Input[str] version: Engine version used by the cluster (ex. `11` for PostgreSQL 11).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DatabaseClusterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a DigitalOcean database cluster resource.
## Example Usage
### Create a new PostgreSQL database cluster
```python
import pulumi
import pulumi_digitalocean as digitalocean
postgres_example = digitalocean.DatabaseCluster("postgres-example",
engine="pg",
node_count=1,
region="nyc1",
size="db-s-1vcpu-1gb",
version="11")
```
### Create a new MySQL database cluster
```python
import pulumi
import pulumi_digitalocean as digitalocean
mysql_example = digitalocean.DatabaseCluster("mysql-example",
engine="mysql",
node_count=1,
region="nyc1",
size="db-s-1vcpu-1gb",
version="8")
```
### Create a new Redis database cluster
```python
import pulumi
import pulumi_digitalocean as digitalocean
redis_example = digitalocean.DatabaseCluster("redis-example",
engine="redis",
node_count=1,
region="nyc1",
size="db-s-1vcpu-1gb",
version="6")
```
### Create a new MongoDB database cluster
```python
import pulumi
import pulumi_digitalocean as digitalocean
mongodb_example = digitalocean.DatabaseCluster("mongodb-example",
engine="mongodb",
node_count=1,
region="nyc3",
size="db-s-1vcpu-1gb",
version="4")
```
## Import
Database clusters can be imported using the `id` returned from DigitalOcean, e.g.
```sh
$ pulumi import digitalocean:index/databaseCluster:DatabaseCluster mycluster 245bcfd0-7f31-4ce6-a2bc-475a116cca97
```
:param str resource_name: The name of the resource.
:param DatabaseClusterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DatabaseClusterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
engine: Optional[pulumi.Input[str]] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
maintenance_windows: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DatabaseClusterMaintenanceWindowArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
node_count: Optional[pulumi.Input[int]] = None,
private_network_uuid: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[Union[str, 'Region']]] = None,
size: Optional[pulumi.Input[Union[str, 'DatabaseSlug']]] = None,
sql_mode: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DatabaseClusterArgs.__new__(DatabaseClusterArgs)
if engine is None and not opts.urn:
raise TypeError("Missing required property 'engine'")
__props__.__dict__["engine"] = engine
__props__.__dict__["eviction_policy"] = eviction_policy
__props__.__dict__["maintenance_windows"] = maintenance_windows
__props__.__dict__["name"] = name
if node_count is None and not opts.urn:
raise TypeError("Missing required property 'node_count'")
__props__.__dict__["node_count"] = | |
nhg_index = gen_nhg_index(self.nhg_count)
self.nhg_ps.set(nhg_index, fvs)
self.nhg_count += 1
# A temporary next hop should be elected to represent the group and
# thus a new labeled next hop should be created
self.asic_db.wait_for_n_keys(self.ASIC_NHS_STR, self.asic_nhs_count + 1)
# Delete a next hop group
delete_nhg()
# The group should be promoted and the other labeled NH should also get
# created
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.MAX_ECMP_COUNT)
self.asic_db.wait_for_n_keys(self.ASIC_NHS_STR, self.asic_nhs_count + 2)
# Save the promoted NHG index/ID
self.asic_nhgs[nhg_index] = self.get_nhg_id(nhg_index)
# Test scenario:
# - update route to own its NHG and assert no new NHG is added
# - remove a NHG and assert the temporary NHG is promoted and added to ASIC DB
def back_compatibility_test():
# Update the route with a RouteOrch's owned NHG
binary = self.gen_valid_binary()
nhg_fvs = self.gen_nhg_fvs(binary)
self.rt_ps.set('2.2.2.0/24', nhg_fvs)
# Assert no new group has been added
time.sleep(1)
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.MAX_ECMP_COUNT)
# Delete a next hop group
del_nhg_id = delete_nhg()
self.asic_db.wait_for_deleted_entry(self.ASIC_NHG_STR, del_nhg_id)
# The temporary group should be promoted
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.MAX_ECMP_COUNT)
# Test scenario:
# - create a NHG with all NHs not existing and assert the NHG is not created
# - update the NHG to have valid NHs and assert a temporary NHG is created
# - update the NHG to all invalid NHs again and assert the update is not performed and thus it has the same SAI
# ID
# - delete the temporary NHG
def invalid_temporary_test():
# Create a temporary NHG that contains only NHs that do not exist
nhg_fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.21,10.0.0.23'),
('ifname', 'Ethernet40,Ethernet44')])
nhg_index = gen_nhg_index(self.nhg_count)
self.nhg_count += 1
self.nhg_ps.set(nhg_index, nhg_fvs)
# Assert the group is not created
time.sleep(1)
assert not self.nhg_exists(nhg_index)
# Update the temporary NHG to a valid one
binary = self.gen_valid_binary()
nhg_fvs = self.gen_nhg_fvs(binary)
self.nhg_ps.set(nhg_index, nhg_fvs)
# Assert the temporary group was updated and the group got created
nhg_id = self.get_nhg_id(nhg_index)
assert nhg_id is not None
# Update the temporary NHG to an invalid one again
nhg_fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.21,10.0.0.23'),
('ifname', 'Ethernet40,Ethernet44')])
self.nhg_ps.set(nhg_index, nhg_fvs)
# The update should fail and the temporary NHG should still be pointing
# to the old valid NH
assert self.get_nhg_id(nhg_index) == nhg_id
# Delete the temporary group
self.nhg_ps._del(nhg_index)
self.init_test(dvs)
self.nhg_count = self.asic_nhgs_count
self.first_valid_nhg = self.nhg_count
self.asic_nhgs = {}
# Add first batch of next hop groups to reach the NHG limit
while self.nhg_count < self.MAX_ECMP_COUNT:
binary = self.gen_valid_binary()
nhg_fvs = self.gen_nhg_fvs(binary)
nhg_index = gen_nhg_index(self.nhg_count)
self.nhg_ps.set(nhg_index, nhg_fvs)
# Save the NHG index/ID pair
self.asic_nhgs[nhg_index] = self.get_nhg_id(nhg_index)
# Increase the number of NHGs in ASIC DB
self.nhg_count += 1
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.MAX_ECMP_COUNT)
temporary_group_promotion_test()
group_update_test()
create_delete_temporary_test()
update_temporary_group_test()
route_nhg_update_test()
labeled_nhg_temporary_promotion_test()
back_compatibility_test()
invalid_temporary_test()
# Cleanup
# Delete the route
self.rt_ps._del('2.2.2.0/24')
self.asic_db.wait_for_n_keys(self.ASIC_RT_STR, self.asic_rts_count)
# Delete the next hop groups
for k in self.asic_nhgs:
self.nhg_ps._del(k)
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count)
def test_route_nhg_exhaust(self, dvs, testlog):
"""
Test the situation of exhausting ECMP group, assume SAI_SWITCH_ATTR_NUMBER_OF_ECMP_GROUPS is 512
In order to achieve that, we will config
1. 9 ports
2. 512 routes with different nexthop group
See Also
--------
SwitchStateBase::set_number_of_ecmp_groups()
https://github.com/Azure/sonic-sairedis/blob/master/vslib/src/SwitchStateBase.cpp
"""
# TODO: check ECMP 512
def gen_ipprefix(r):
""" Construct route like 2.X.X.0/24 """
ip = ipaddress.IPv4Address(IP_INTEGER_BASE + r * 256)
ip = str(ip)
ipprefix = ip + "/24"
return ipprefix
def asic_route_nhg_fvs(k):
fvs = self.asic_db.get_entry(self.ASIC_RT_STR, k)
if not fvs:
return None
nhgid = fvs.get("SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID")
if nhgid is None:
return None
fvs = self.asic_db.get_entry(self.ASIC_NHG_STR, nhgid)
return fvs
if sys.version_info < (3, 0):
IP_INTEGER_BASE = int(ipaddress.IPv4Address(unicode("2.2.2.0")))
else:
IP_INTEGER_BASE = int(ipaddress.IPv4Address(str("2.2.2.0")))
self.init_test(dvs)
# Add first batch of routes with unique nexthop groups in AppDB
route_count = 0
while route_count < self.MAX_ECMP_COUNT:
binary = self.gen_valid_binary()
fvs = self.gen_nhg_fvs(binary)
route_ipprefix = gen_ipprefix(route_count)
self.rt_ps.set(route_ipprefix, fvs)
route_count += 1
# Wait and check ASIC DB the count of nexthop groups used
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.MAX_ECMP_COUNT)
# Wait and check ASIC DB the count of routes
self.asic_db.wait_for_n_keys(self.ASIC_RT_STR, self.asic_rts_count + self.MAX_ECMP_COUNT)
self.asic_rts_count += self.MAX_ECMP_COUNT
# Add a route with labeled NHs
self.asic_nhs_count = len(self.asic_db.get_keys(self.ASIC_NHS_STR))
route_ipprefix = gen_ipprefix(route_count)
fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1,10.0.0.3'),
('mpls_nh', 'push1,push3'),
('ifname', 'Ethernet0,Ethernet4')])
self.rt_ps.set(route_ipprefix, fvs)
route_count += 1
# A temporary route should be created
self.asic_db.wait_for_n_keys(self.ASIC_RT_STR, self.asic_rts_count + 1)
# A NH should be elected as the temporary NHG and it should be created
# as it doesn't exist.
self.asic_db.wait_for_n_keys(self.ASIC_NHS_STR, self.asic_nhs_count + 1)
# Delete the route. The route and the added labeled NH should be
# removed.
self.rt_ps._del(route_ipprefix)
route_count -= 1
self.asic_db.wait_for_n_keys(self.ASIC_RT_STR, self.asic_rts_count)
self.asic_db.wait_for_n_keys(self.ASIC_NHS_STR, self.asic_nhs_count)
# Add second batch of routes with unique nexthop groups in AppDB
# Add more routes with new nexthop group in AppDBdd
route_ipprefix = gen_ipprefix(route_count)
base_ipprefix = route_ipprefix
base = route_count
route_count = 0
while route_count < 10:
binary = self.gen_valid_binary()
fvs = self.gen_nhg_fvs(binary)
route_ipprefix = gen_ipprefix(base + route_count)
self.rt_ps.set(route_ipprefix, fvs)
route_count += 1
last_ipprefix = route_ipprefix
# Wait until we get expected routes and check ASIC DB on the count of nexthop groups used, and it should not increase
self.asic_db.wait_for_n_keys(self.ASIC_RT_STR, self.asic_rts_count + 10)
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.MAX_ECMP_COUNT)
# Check the route points to next hop group
# Note: no need to wait here
k = self.get_route_id("2.2.2.0/24")
assert k is not None
fvs = asic_route_nhg_fvs(k)
assert fvs is not None
# Check the second batch does not point to next hop group
k = self.get_route_id(base_ipprefix)
assert k is not None
fvs = asic_route_nhg_fvs(k)
assert not(fvs)
# Remove first batch of routes with unique nexthop groups in AppDB
route_count = 0
self.r = 0
while route_count < self.MAX_ECMP_COUNT:
route_ipprefix = gen_ipprefix(route_count)
self.rt_ps._del(route_ipprefix)
route_count += 1
self.asic_rts_count -= self.MAX_ECMP_COUNT
# Wait and check the second batch points to next hop group
# Check ASIC DB on the count of nexthop groups used, and it should not increase or decrease
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, 10)
k = self.get_route_id(base_ipprefix)
assert k is not None
fvs = asic_route_nhg_fvs(k)
assert fvs is not None
k = self.get_route_id(last_ipprefix)
assert k is not None
fvs = asic_route_nhg_fvs(k)
assert fvs is not None
# Cleanup
# Remove second batch of routes
for i in range(10):
route_ipprefix = gen_ipprefix(self.MAX_ECMP_COUNT + i)
self.rt_ps._del(route_ipprefix)
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, 0)
self.asic_db.wait_for_n_keys(self.ASIC_RT_STR, self.asic_rts_count)
class TestNextHopGroup(TestNextHopGroupBase):
def test_route_nhg(self, dvs, dvs_route, testlog):
self.init_test(dvs, 3)
rtprefix = "2.2.2.0/24"
dvs_route.check_asicdb_deleted_route_entries([rtprefix])
# nexthop group without weight
fvs = swsscommon.FieldValuePairs([("nexthop","10.0.0.1,10.0.0.3,10.0.0.5"),
("ifname", "Ethernet0,Ethernet4,Ethernet8")])
self.rt_ps.set(rtprefix, fvs)
# check if route was propagated to ASIC DB
rtkeys = dvs_route.check_asicdb_route_entries([rtprefix])
# assert the route points to next hop group
fvs = self.asic_db.get_entry(self.ASIC_RT_STR, rtkeys[0])
nhgid = fvs["SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID"]
fvs = self.asic_db.get_entry(self.ASIC_NHG_STR, nhgid)
assert bool(fvs)
keys = self.asic_db.get_keys(self.ASIC_NHGM_STR)
assert len(keys) == 3
for k in keys:
fvs = self.asic_db.get_entry(self.ASIC_NHGM_STR, k)
assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhgid
# verify weight attributes not in asic db
assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT") is None
# Remove route 2.2.2.0/24
self.rt_ps._del(rtprefix)
# Wait for route 2.2.2.0/24 to be removed
dvs_route.check_asicdb_deleted_route_entries([rtprefix])
# Negative test with nexthops with incomplete weight info
fvs = swsscommon.FieldValuePairs([("nexthop","10.0.0.1,10.0.0.3,10.0.0.5"),
("ifname", "Ethernet0,Ethernet4,Ethernet8"),
("weight", "10,30")])
self.rt_ps.set(rtprefix, fvs)
# check if route was propagated to ASIC DB
rtkeys = dvs_route.check_asicdb_route_entries([rtprefix])
# assert the route points to next hop group
fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", rtkeys[0])
nhgid = fvs["SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID"]
fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP", nhgid)
assert bool(fvs)
keys = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER")
assert len(keys) == 3
for k in keys:
fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", k)
assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhgid
# verify weight attributes not in asic db
assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT") is None
# Remove route 2.2.2.0/24
self.rt_ps._del(rtprefix)
# Wait for route 2.2.2.0/24 to be removed
dvs_route.check_asicdb_deleted_route_entries([rtprefix])
fvs = swsscommon.FieldValuePairs([("nexthop","10.0.0.1,10.0.0.3,10.0.0.5"),
("ifname", "Ethernet0,Ethernet4,Ethernet8"),
("weight", "10,30,50")])
self.rt_ps.set(rtprefix, fvs)
# check if route was propagated to ASIC DB
rtkeys = dvs_route.check_asicdb_route_entries([rtprefix])
# assert the route points to next hop group
fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", rtkeys[0])
nhgid = fvs["SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID"]
fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP", nhgid)
assert bool(fvs)
keys = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER")
assert len(keys) == 3
for k in keys:
fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", k)
assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhgid
# verify weight attributes in asic db
nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"]
weight = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT"]
fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid)
nhip = fvs["SAI_NEXT_HOP_ATTR_IP"].split('.')
expected_weight = int(nhip[3]) * 10
assert int(weight) == expected_weight
rtprefix2 = "3.3.3.0/24"
fvs = swsscommon.FieldValuePairs([("nexthop","10.0.0.1,10.0.0.3,10.0.0.5"),
("ifname", "Ethernet0,Ethernet4,Ethernet8"),
("weight", "20,30,40")])
self.rt_ps.set(rtprefix2, fvs)
# wait for route to be programmed
time.sleep(1)
keys = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP")
assert len(keys) == 2
keys = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER")
| |
action(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_prequelConstruct
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrequelConstruct" ):
listener.enterPrequelConstruct(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrequelConstruct" ):
listener.exitPrequelConstruct(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPrequelConstruct" ):
return visitor.visitPrequelConstruct(self)
else:
return visitor.visitChildren(self)
def prequelConstruct(self):
localctx = ANTLRv4Parser.PrequelConstructContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_prequelConstruct)
try:
self.state = 158
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.OPTIONS]:
self.enterOuterAlt(localctx, 1)
self.state = 153
self.optionsSpec()
pass
elif token in [ANTLRv4Parser.IMPORT]:
self.enterOuterAlt(localctx, 2)
self.state = 154
self.delegateGrammars()
pass
elif token in [ANTLRv4Parser.TOKENS]:
self.enterOuterAlt(localctx, 3)
self.state = 155
self.tokensSpec()
pass
elif token in [ANTLRv4Parser.CHANNELS]:
self.enterOuterAlt(localctx, 4)
self.state = 156
self.channelsSpec()
pass
elif token in [ANTLRv4Parser.AT]:
self.enterOuterAlt(localctx, 5)
self.state = 157
self.action()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OptionsSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def OPTIONS(self):
return self.getToken(ANTLRv4Parser.OPTIONS, 0)
def RBRACE(self):
return self.getToken(ANTLRv4Parser.RBRACE, 0)
def option(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.OptionContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.OptionContext,i)
def SEMI(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.SEMI)
else:
return self.getToken(ANTLRv4Parser.SEMI, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_optionsSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOptionsSpec" ):
listener.enterOptionsSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOptionsSpec" ):
listener.exitOptionsSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOptionsSpec" ):
return visitor.visitOptionsSpec(self)
else:
return visitor.visitChildren(self)
def optionsSpec(self):
localctx = ANTLRv4Parser.OptionsSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_optionsSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 160
self.match(ANTLRv4Parser.OPTIONS)
self.state = 166
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.TOKEN_REF or _la==ANTLRv4Parser.RULE_REF:
self.state = 161
self.option()
self.state = 162
self.match(ANTLRv4Parser.SEMI)
self.state = 168
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 169
self.match(ANTLRv4Parser.RBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OptionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.name = None # IdentifierContext
self.value = None # OptionValueContext
def ASSIGN(self):
return self.getToken(ANTLRv4Parser.ASSIGN, 0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def optionValue(self):
return self.getTypedRuleContext(ANTLRv4Parser.OptionValueContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_option
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOption" ):
listener.enterOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOption" ):
listener.exitOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOption" ):
return visitor.visitOption(self)
else:
return visitor.visitChildren(self)
def option(self):
localctx = ANTLRv4Parser.OptionContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_option)
try:
self.enterOuterAlt(localctx, 1)
self.state = 171
localctx.name = self.identifier()
self.state = 172
self.match(ANTLRv4Parser.ASSIGN)
self.state = 173
localctx.value = self.optionValue()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OptionValueContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_optionValue
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class StringOptionContext(OptionValueContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.OptionValueContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def STRING_LITERAL(self):
return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStringOption" ):
listener.enterStringOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStringOption" ):
listener.exitStringOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStringOption" ):
return visitor.visitStringOption(self)
else:
return visitor.visitChildren(self)
class IntOptionContext(OptionValueContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.OptionValueContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def INT(self):
return self.getToken(ANTLRv4Parser.INT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIntOption" ):
listener.enterIntOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIntOption" ):
listener.exitIntOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIntOption" ):
return visitor.visitIntOption(self)
else:
return visitor.visitChildren(self)
class ActionOptionContext(OptionValueContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.OptionValueContext
super().__init__(parser)
self.value = None # ActionBlockContext
self.copyFrom(ctx)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterActionOption" ):
listener.enterActionOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitActionOption" ):
listener.exitActionOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitActionOption" ):
return visitor.visitActionOption(self)
else:
return visitor.visitChildren(self)
class PathOptionContext(OptionValueContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.OptionValueContext
super().__init__(parser)
self._identifier = None # IdentifierContext
self.value = list() # of IdentifierContexts
self.copyFrom(ctx)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i)
def DOT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.DOT)
else:
return self.getToken(ANTLRv4Parser.DOT, i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPathOption" ):
listener.enterPathOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPathOption" ):
listener.exitPathOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPathOption" ):
return visitor.visitPathOption(self)
else:
return visitor.visitChildren(self)
def optionValue(self):
localctx = ANTLRv4Parser.OptionValueContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_optionValue)
self._la = 0 # Token type
try:
self.state = 186
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
localctx = ANTLRv4Parser.PathOptionContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 175
localctx._identifier = self.identifier()
localctx.value.append(localctx._identifier)
self.state = 180
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.DOT:
self.state = 176
self.match(ANTLRv4Parser.DOT)
self.state = 177
localctx._identifier = self.identifier()
localctx.value.append(localctx._identifier)
self.state = 182
self._errHandler.sync(self)
_la = self._input.LA(1)
pass
elif token in [ANTLRv4Parser.STRING_LITERAL]:
localctx = ANTLRv4Parser.StringOptionContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 183
localctx.value = self.match(ANTLRv4Parser.STRING_LITERAL)
pass
elif token in [ANTLRv4Parser.BEGIN_ACTION]:
localctx = ANTLRv4Parser.ActionOptionContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 184
localctx.value = self.actionBlock()
pass
elif token in [ANTLRv4Parser.INT]:
localctx = ANTLRv4Parser.IntOptionContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 185
localctx.value = self.match(ANTLRv4Parser.INT)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DelegateGrammarsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IMPORT(self):
return self.getToken(ANTLRv4Parser.IMPORT, 0)
def delegateGrammar(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.DelegateGrammarContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.DelegateGrammarContext,i)
def SEMI(self):
return self.getToken(ANTLRv4Parser.SEMI, 0)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_delegateGrammars
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDelegateGrammars" ):
listener.enterDelegateGrammars(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDelegateGrammars" ):
listener.exitDelegateGrammars(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDelegateGrammars" ):
return visitor.visitDelegateGrammars(self)
else:
return visitor.visitChildren(self)
def delegateGrammars(self):
localctx = ANTLRv4Parser.DelegateGrammarsContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_delegateGrammars)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 188
self.match(ANTLRv4Parser.IMPORT)
self.state = 189
self.delegateGrammar()
self.state = 194
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.COMMA:
self.state = 190
self.match(ANTLRv4Parser.COMMA)
self.state = 191
self.delegateGrammar()
self.state = 196
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 197
self.match(ANTLRv4Parser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DelegateGrammarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.value = None # IdentifierContext
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_delegateGrammar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDelegateGrammar" ):
listener.enterDelegateGrammar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDelegateGrammar" ):
listener.exitDelegateGrammar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDelegateGrammar" ):
return visitor.visitDelegateGrammar(self)
else:
return visitor.visitChildren(self)
def delegateGrammar(self):
localctx = ANTLRv4Parser.DelegateGrammarContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_delegateGrammar)
try:
self.enterOuterAlt(localctx, 1)
self.state = 199
localctx.value = self.identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TokensSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.defs = None # IdListContext
def TOKENS(self):
return self.getToken(ANTLRv4Parser.TOKENS, 0)
def RBRACE(self):
return self.getToken(ANTLRv4Parser.RBRACE, 0)
def idList(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdListContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_tokensSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTokensSpec" ):
listener.enterTokensSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTokensSpec" ):
listener.exitTokensSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTokensSpec" ):
return visitor.visitTokensSpec(self)
else:
return visitor.visitChildren(self)
def tokensSpec(self):
localctx = ANTLRv4Parser.TokensSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_tokensSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 201
self.match(ANTLRv4Parser.TOKENS)
self.state = 203
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.TOKEN_REF or _la==ANTLRv4Parser.RULE_REF:
self.state = 202
localctx.defs = self.idList()
self.state = 205
self.match(ANTLRv4Parser.RBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ChannelsSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CHANNELS(self):
return self.getToken(ANTLRv4Parser.CHANNELS, 0)
def RBRACE(self):
return self.getToken(ANTLRv4Parser.RBRACE, 0)
def idList(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdListContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_channelsSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterChannelsSpec" ):
listener.enterChannelsSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitChannelsSpec" ):
listener.exitChannelsSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitChannelsSpec" ):
return visitor.visitChannelsSpec(self)
else:
return visitor.visitChildren(self)
def channelsSpec(self):
localctx = ANTLRv4Parser.ChannelsSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_channelsSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 207
self.match(ANTLRv4Parser.CHANNELS)
self.state = 209
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.TOKEN_REF or _la==ANTLRv4Parser.RULE_REF:
self.state = 208
self.idList()
self.state = 211
self.match(ANTLRv4Parser.RBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdListContext(ParserRuleContext):
def __init__(self, parser, | |
is_filled else -1
)
new_lidx += 1
# Create new state
new_state = ProcessOrderState(
cash=exec_state.cash,
position=exec_state.position,
debt=exec_state.debt,
free_cash=exec_state.free_cash,
val_price=new_val_price,
value=new_value,
oidx=new_oidx,
lidx=new_lidx
)
return order_result, new_state
@njit(cache=True)
def order_nb(size: float = np.nan,
price: float = np.inf,
size_type: int = SizeType.Amount,
direction: int = Direction.Both,
fees: float = 0.,
fixed_fees: float = 0.,
slippage: float = 0.,
min_size: float = 0.,
max_size: float = np.inf,
size_granularity: float = np.nan,
reject_prob: float = 0.,
lock_cash: bool = False,
allow_partial: bool = True,
raise_reject: bool = False,
log: bool = False) -> Order:
"""Create an order.
See `vectorbt.portfolio.enums.Order` for details on arguments."""
return Order(
size=float(size),
price=float(price),
size_type=int(size_type),
direction=int(direction),
fees=float(fees),
fixed_fees=float(fixed_fees),
slippage=float(slippage),
min_size=float(min_size),
max_size=float(max_size),
size_granularity=float(size_granularity),
reject_prob=float(reject_prob),
lock_cash=bool(lock_cash),
allow_partial=bool(allow_partial),
raise_reject=bool(raise_reject),
log=bool(log)
)
@njit(cache=True)
def close_position_nb(price: float = np.inf,
fees: float = 0.,
fixed_fees: float = 0.,
slippage: float = 0.,
min_size: float = 0.,
max_size: float = np.inf,
size_granularity: float = np.nan,
reject_prob: float = 0.,
lock_cash: bool = False,
allow_partial: bool = True,
raise_reject: bool = False,
log: bool = False) -> Order:
"""Close the current position."""
return order_nb(
size=0.,
price=price,
size_type=SizeType.TargetAmount,
direction=Direction.Both,
fees=fees,
fixed_fees=fixed_fees,
slippage=slippage,
min_size=min_size,
max_size=max_size,
size_granularity=size_granularity,
reject_prob=reject_prob,
lock_cash=lock_cash,
allow_partial=allow_partial,
raise_reject=raise_reject,
log=log
)
@njit(cache=True)
def order_nothing_nb() -> Order:
"""Convenience function to order nothing."""
return NoOrder
# ############# Checks ############# #
@njit(cache=True)
def check_group_lens_nb(group_lens: tp.Array1d, n_cols: int) -> None:
"""Check `group_lens`."""
if np.sum(group_lens) != n_cols:
raise ValueError("group_lens has incorrect total number of columns")
@njit(cache=True)
def check_group_init_cash_nb(group_lens: tp.Array1d, n_cols: int, init_cash: tp.Array1d, cash_sharing: bool) -> None:
"""Check `init_cash`."""
if cash_sharing:
if len(init_cash) != len(group_lens):
raise ValueError("If cash sharing is enabled, init_cash must match the number of groups")
else:
if len(init_cash) != n_cols:
raise ValueError("If cash sharing is disabled, init_cash must match the number of columns")
@njit(cache=True)
def is_grouped_nb(group_lens: tp.Array1d) -> bool:
"""Check if columm,ns are grouped, that is, more than one column per group."""
return np.any(group_lens > 1)
# ############# Call sequence ############# #
@njit(cache=True)
def shuffle_call_seq_nb(call_seq: tp.Array2d, group_lens: tp.Array1d) -> None:
"""Shuffle the call sequence array."""
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
for i in range(call_seq.shape[0]):
np.random.shuffle(call_seq[i, from_col:to_col])
from_col = to_col
@njit(cache=True)
def build_call_seq_nb(target_shape: tp.Shape,
group_lens: tp.Array1d,
call_seq_type: int = CallSeqType.Default) -> tp.Array2d:
"""Build a new call sequence array."""
if call_seq_type == CallSeqType.Reversed:
out = np.full(target_shape[1], 1, dtype=np.int_)
out[np.cumsum(group_lens)[1:] - group_lens[1:] - 1] -= group_lens[1:]
out = np.cumsum(out[::-1])[::-1] - 1
out = out * np.ones((target_shape[0], 1), dtype=np.int_)
return out
out = np.full(target_shape[1], 1, dtype=np.int_)
out[np.cumsum(group_lens)[:-1]] -= group_lens[:-1]
out = np.cumsum(out) - 1
out = out * np.ones((target_shape[0], 1), dtype=np.int_)
if call_seq_type == CallSeqType.Random:
shuffle_call_seq_nb(out, group_lens)
return out
def require_call_seq(call_seq: tp.Array2d) -> tp.Array2d:
"""Force the call sequence array to pass our requirements."""
return np.require(call_seq, dtype=np.int_, requirements=['A', 'O', 'W', 'F'])
def build_call_seq(target_shape: tp.Shape,
group_lens: tp.Array1d,
call_seq_type: int = CallSeqType.Default) -> tp.Array2d:
"""Not compiled but faster version of `build_call_seq_nb`."""
call_seq = np.full(target_shape[1], 1, dtype=np.int_)
if call_seq_type == CallSeqType.Reversed:
call_seq[np.cumsum(group_lens)[1:] - group_lens[1:] - 1] -= group_lens[1:]
call_seq = np.cumsum(call_seq[::-1])[::-1] - 1
else:
call_seq[np.cumsum(group_lens[:-1])] -= group_lens[:-1]
call_seq = np.cumsum(call_seq) - 1
call_seq = np.broadcast_to(call_seq, target_shape)
if call_seq_type == CallSeqType.Random:
call_seq = require_call_seq(call_seq)
shuffle_call_seq_nb(call_seq, group_lens)
return require_call_seq(call_seq)
# ############# Helper functions ############# #
@njit(cache=True)
def get_col_elem_nb(ctx: tp.Union[RowContext, SegmentContext, FlexOrderContext], col: int,
a: tp.ArrayLike) -> tp.Scalar:
"""Get the current element using flexible indexing given the context and the column."""
return flex_select_auto_nb(a, ctx.i, col, ctx.flex_2d)
@njit(cache=True)
def get_elem_nb(ctx: tp.Union[OrderContext, PostOrderContext, SignalContext],
a: tp.ArrayLike) -> tp.Scalar:
"""Get the current element using flexible indexing given just the context."""
return flex_select_auto_nb(a, ctx.i, ctx.col, ctx.flex_2d)
@njit(cache=True)
def get_group_value_nb(from_col: int,
to_col: int,
cash_now: float,
last_position: tp.Array1d,
last_val_price: tp.Array1d) -> float:
"""Get group value."""
group_value = cash_now
group_len = to_col - from_col
for k in range(group_len):
col = from_col + k
if last_position[col] != 0:
group_value += last_position[col] * last_val_price[col]
return group_value
@njit(cache=True)
def get_group_value_ctx_nb(seg_ctx: SegmentContext) -> float:
"""Get group value from context.
Accepts `vectorbt.portfolio.enums.SegmentContext`.
Best called once from `pre_segment_func_nb`.
To set the valuation price, change `last_val_price` of the context in-place.
!!! note
Cash sharing must be enabled."""
if not seg_ctx.cash_sharing:
raise ValueError("Cash sharing must be enabled")
return get_group_value_nb(
seg_ctx.from_col,
seg_ctx.to_col,
seg_ctx.last_cash[seg_ctx.group],
seg_ctx.last_position,
seg_ctx.last_val_price
)
@njit(cache=True)
def approx_order_value_nb(size: float,
size_type: int,
direction: int,
cash_now: float,
position_now: float,
free_cash_now: float,
val_price_now: float,
value_now: float) -> float:
"""Approximate value of an order."""
if direction == Direction.ShortOnly:
size *= -1
asset_value_now = position_now * val_price_now
if size_type == SizeType.Amount:
return size * val_price_now
if size_type == SizeType.Value:
return size
if size_type == SizeType.Percent:
if size >= 0:
return size * cash_now
else:
if direction == Direction.LongOnly:
return size * asset_value_now
return size * (2 * max(asset_value_now, 0) + max(free_cash_now, 0))
if size_type == SizeType.TargetAmount:
return size * val_price_now - asset_value_now
if size_type == SizeType.TargetValue:
return size - asset_value_now
if size_type == SizeType.TargetPercent:
return size * value_now - asset_value_now
return np.nan
@njit(cache=True)
def sort_call_seq_out_nb(ctx: SegmentContext,
size: tp.ArrayLike,
size_type: tp.ArrayLike,
direction: tp.ArrayLike,
order_value_out: tp.Array1d,
call_seq_out: tp.Array1d,
ctx_select: bool = True) -> None:
"""Sort call sequence `call_seq_out` based on the value of each potential order.
Accepts `vectorbt.portfolio.enums.SegmentContext` and other arguments, sorts `call_seq_out` in place,
and returns nothing.
Arrays `size`, `size_type`, and `direction` utilize flexible indexing.
If `ctx_select` is True, selects the elements of each `size`, `size_type`, and `direction`
using `get_col_elem_nb` assuming that each array can broadcast to `target_shape`.
Otherwise, selects using `vectorbt.base.reshape_fns.flex_select_auto_nb` assuming that each array
can broadcast to `group_len`.
The lengths of `order_value_out` and `call_seq_out` should match the number of columns in the group.
Array `order_value_out` should be empty and will contain sorted order values after execution.
Array `call_seq_out` should be filled with integers ranging from 0 to the number of columns in the group
(in this exact order).
Best called once from `pre_segment_func_nb`.
!!! note
Cash sharing must be enabled and `call_seq_out` should follow `CallSeqType.Default`.
Should be used in flexible simulation functions."""
if not ctx.cash_sharing:
raise ValueError("Cash sharing must be enabled")
size_arr = np.asarray(size)
size_type_arr = np.asarray(size_type)
direction_arr = np.asarray(direction)
group_value_now = get_group_value_ctx_nb(ctx)
group_len = ctx.to_col - ctx.from_col
for k in range(group_len):
if call_seq_out[k] != k:
raise ValueError("call_seq_out should follow CallSeqType.Default")
col = ctx.from_col + k
if ctx_select:
_size = get_col_elem_nb(ctx, col, size_arr)
_size_type = get_col_elem_nb(ctx, col, size_type_arr)
_direction = get_col_elem_nb(ctx, col, direction_arr)
else:
_size = flex_select_auto_nb(size_arr, k, 0, False)
_size_type = flex_select_auto_nb(size_type_arr, k, 0, False)
_direction = flex_select_auto_nb(direction_arr, k, 0, False)
if ctx.cash_sharing:
cash_now = ctx.last_cash[ctx.group]
free_cash_now = ctx.last_free_cash[ctx.group]
else:
cash_now = ctx.last_cash[col]
free_cash_now = ctx.last_free_cash[col]
order_value_out[k] = approx_order_value_nb(
_size,
_size_type,
_direction,
cash_now,
ctx.last_position[col],
free_cash_now,
ctx.last_val_price[col],
group_value_now
)
# Sort by order value
insert_argsort_nb(order_value_out, call_seq_out)
@njit(cache=True)
def sort_call_seq_nb(ctx: SegmentContext,
size: tp.ArrayLike,
size_type: tp.ArrayLike,
direction: tp.ArrayLike,
order_value_out: tp.Array1d,
ctx_select: bool = True) -> None:
"""Sort call sequence attached to `vectorbt.portfolio.enums.SegmentContext`.
See `sort_call_seq_out_nb`.
!!! note
Can only be used in non-flexible simulation functions."""
if ctx.call_seq_now is None:
raise ValueError("Call sequence array is None. Use sort_call_seq_out_nb to sort a custom array.")
sort_call_seq_out_nb(
ctx,
size,
size_type,
direction,
order_value_out,
ctx.call_seq_now,
ctx_select=ctx_select
)
@njit(cache=True)
def replace_inf_price_nb(prev_close: float, close: float, order: Order) -> Order:
"""Replace infinity price in an order."""
order_price = order.price
if order_price > 0:
order_price = close # upper bound is close
else:
order_price = prev_close # lower bound is prev close
return order_nb(
size=order.size,
price=order_price,
size_type=order.size_type,
direction=order.direction,
fees=order.fees,
fixed_fees=order.fixed_fees,
slippage=order.slippage,
min_size=order.min_size,
max_size=order.max_size,
size_granularity=order.size_granularity,
reject_prob=order.reject_prob,
lock_cash=order.lock_cash,
allow_partial=order.allow_partial,
raise_reject=order.raise_reject,
log=order.log
)
@njit(cache=True)
def try_order_nb(ctx: OrderContext, order: Order) -> tp.Tuple[ExecuteOrderState, OrderResult]:
"""Execute an order without persistence."""
state = ProcessOrderState(
cash=ctx.cash_now,
position=ctx.position_now,
debt=ctx.debt_now,
free_cash=ctx.free_cash_now,
val_price=ctx.val_price_now,
value=ctx.value_now,
oidx=-1,
lidx=-1
)
if np.isinf(order.price):
if ctx.i > 0:
prev_close = flex_select_auto_nb(ctx.close, ctx.i - 1, ctx.col, ctx.flex_2d)
else:
prev_close = np.nan
close = flex_select_auto_nb(ctx.close, ctx.i, ctx.col, ctx.flex_2d)
order = replace_inf_price_nb(prev_close, close, order)
return execute_order_nb(state, order)
@njit(cache=True)
def init_records_nb(target_shape: tp.Shape,
max_orders: tp.Optional[int] = None,
max_logs: int = 0) -> tp.Tuple[tp.RecordArray, tp.RecordArray]:
"""Initialize order and log records."""
if max_orders is None:
max_orders = target_shape[0] * target_shape[1]
order_records = np.empty(max_orders, dtype=order_dt)
if max_logs == 0:
max_logs = 1
log_records = np.empty(max_logs, dtype=log_dt)
return order_records, log_records
@njit(cache=True)
def update_open_pos_stats_nb(record: tp.Record, position_now: float, price: float) -> None:
"""Update statistics of an open position record using custom price."""
if record['id'] >= 0 and record['status'] == TradeStatus.Open:
if np.isnan(record['exit_price']):
exit_price = price
else:
exit_size_sum = record['size'] - abs(position_now)
exit_gross_sum = exit_size_sum * record['exit_price']
exit_gross_sum += | |
times are 'relvant', and will be added
to the environment's 'times' attribute.
exclude:
A list of asset types to exclude in the determination of the
relevant times. Asset types included in this list are only
used to determine what to exlude if the 'include' parameter
has not been specified.
manual:
Automatically overrides include and exlcude arguments. Takes
a list of timelike arguments (either as strings or actual python
time objets) and uses those as the environment's relevant times.
useful for when you want evaluate an algorithm at a different
resolution than the data. For examples, passing manual =
["9:30 AM", "4:00 PM"] will ensure that the algorithms using
this environment will only valuate at 9:30:00 and 16:00:00 each
day, even if the data has a 15m resolution.
"""
if manual:
self._times = [utils.to_time(t) for t in manual]
else:
self._times = [t for data in self.data() for t in data.get_times()]
self._times = sorted(set(self._times))
self._reset_time_index()
def sync(self, date: Optional[DatetimeLike] = None) -> None:
"""Syncs all alphagradient objects in this environment to the given datetime
Valuates all assets to the given date, and sets the date of all
portfolios to the given date. This only occurs for objects
within this environment, rather than globally. Date defaults to
environment's current date if none is provided
Args:
date (datetime): The date to sync to
Returns:
None (NoneType): Modifies this environment in place
"""
if date is None:
self._date = self.start
else:
self._date = to_datetime(date)
def sync_asset(asset):
if getattr(asset, "reset", False):
asset.reset()
asset._valuate()
def sync_portfolio(portfolio):
portfolio.reset()
if self._portfolios:
deque(map(sync_portfolio, self._portfolios), maxlen=0)
if self.assets:
deque(map(sync_asset, self.assets), maxlen=0)
self._synced = False
def optimal_start(
self, end: Optional[DatetimeLike] = None, t: Optional[TimeLike] = "9:30 AM"
) -> datetime:
"""
Returns the optimal starting time for this environment based on
currently instantiated and tracked assets
Returns a backtest starting datetime that:
* Is guaranteed to be within the date range of all intantiated assets
* | Is guaranteed to have ample time for calculations of historical
| volatility, beta, percent change etc. BEFORE the start date
* Automatically adjusts to accomodate shorter ending periods
Parameters:
end (DatetimeLike):
The end point which provides context for the optimal start. The
optimal starting point for an algorithm will be different
depending on when the backtest is ending.
t (Optional[TimeLike]):
The time time of day of the returned optimal start date.
Returns:
An optimal start date for a backtest using this environment.
"""
data = list(self.data())
if not data:
return self.start
max_start = max([dataset.first for dataset in data])
min_end = min([dataset.last for dataset in data])
return utils.optimal_start(
start=self.start, max_start=max_start, min_end=min_end, end=end, t=t
)
def autosync(self) -> None:
"""Automatically syncs this environment to the optimal start time"""
self.sync(self.optimal_start())
def step(self, delta: Union[DateOrTime, timedelta, float] = None) -> None:
"""
Takes a single time step in this environment, moving all
alphagradient objects forward by the given delta
The function that should be called in algorithms to iterate
forward in time after everything has been accomplished and
evaluated in the current period. Automatically moves all ag
objects in this environment forward in time by the given delta,
which defaults to the environment.resolution if none is provided.
Parameters:
delta:
The magnitude of the time step taken
"""
# Want to use the default resolution for this environment as a the step size if None
self._date += (
self.resolution if delta is None else utils.to_step(self.date, delta)
)
# Valuating assets at the new date, calling step hook
for asset in self._assets:
asset._valuate()
asset._step(self.date)
# Updating portfolio value histories at new time step
for portfolio in self._portfolios:
portfolio.update_positions()
portfolio.update_history()
for algo in types.algorithm.instances.values():
if algo.env is self:
algo.stats._update()
# Cleaning out expired assets
self._assets = [asset for asset in self.assets if not asset.expired]
def next(self, make_step: bool = True) -> datetime:
"""
Automatically updates this environment and all of its tracked assets
to the next point of valuation
env.next() automatically determines the next optimal point of valuation
by dynamically calculating the next available datapoint for each
instantiated asset dataset. If self.times has been defined
(self.finalize() has been called), instead uses self.times to determine
the next optimal valuation period.
If make_step is passed as True (default), automatically jumps to the
next time period before returning it.
Parameters:
make_step:
Whether or not to automatically make the time step (iterate to
the next datetime) when called. Defaults to True
Returns:
The next optimal starting date
"""
nextt = self.date
# Operating procedures for when finalize() has been called on an environment
if self.times:
# Reset the time index after syncing or initialization
if not self._synced:
self._reset_time_index()
self._synced = True
# The new index is the next in order
new_index = self._time_index + 1
# Reset the index when we get to the end of a day, add one day to the valuation date
if new_index >= len(self._times):
nextt += timedelta(days=1)
new_index = 0
# Setting the time of to the time at the new (next) index
nextt = utils.set_time(nextt, self._times[new_index])
# Updating the time index
self._time_index = new_index
# Dynamically determining the next best valuation time at every time step; very costly
else:
nextt = min([asset.next for asset in self.assets])
# Perform the step function if requested
if make_step:
self.step(nextt)
return nextt
def _reset_time_index(self) -> None:
"""Resets the to what it should be after syncing or initialization"""
# We only want to compare the time
current = self.date.time()
for i, t in enumerate(self._times):
# Conditions for stopping point
if current < t:
# In this case, the proper index is the last index of times (the previous day)
if i == 0:
self._time_index = len(self.times) - 1
else:
self._time_index = i - 1
break
# If the current time is greater than all times, we must progress to the next day
else:
self._time_index = 0
def buy(self, asset: Asset, quantity: float, name: str = None) -> None:
"""Buys an asset using this environment's main portfolio, unless
specified otherwise.
TODO: ALL OF THESE FUNCTIONS (BUY, SELL, SHORT, COVER) ARE LIKELY
SUFFICIENTLY COVERED BY _redirect. SHOULD PROBABLY BE DELETED.
ALSO, CONSIDER MAKING TRANSACTIONS RETURN THE POSITION THAT THEY
CREATE OR ALTER. EG, return new_position
Creates a long position in the given asset with a purchase
volume given by 'quantity' within the respective portfolio
Parameters:
asset:
The asset in which to create a long position
quantity:
The purchase quantity
name:
The name of the Portfolio where the transaction will take place
Raises:
ValueError:
If environment has no active portfolios, or if name
is not specified when there are multiple portfolios none
of which are named "MAIN"
"""
# Transactions require a portfolio
if self.NONE:
raise ValueError(
"This environment has no active portfolios. "
"Please instantiate one in order to make "
"transactions"
)
# Transaction can only refer to one portfolio
elif self.SINGLE:
portfolio = self._portfolios[0]
portfolio.buy(asset, quantity)
# Portfolio of transaction must be specified
elif self.MULTIPLE:
# Default to main if none is provided
if name is None:
try:
self.portfolios["MAIN"].buy(asset, quantity)
except KeyError:
raise ValueError(
"This environment has multiple "
"portfolios. The portfolio name "
"for this transaction must be "
"specified"
)
# Try to access portfolio by name
else:
try:
portfolio = self.portfolios[name]
portfolio.buy(asset, quantity)
except KeyError:
raise ValueError(
f"Environment has no portfolio "
"instance named "
f"{name.__repr__()}"
)
def sell(self, asset: Asset, quantity: float, name: str = None) -> None:
"""
Sells an asset using this environment's main portfolio, unless
specified otherwise.
Decrements a long position in the given asset by 'quantity'.
Maximum sale quantity is the amount owned by the portfolio.
Parameters:
asset:
The asset of the corresponding decremented position
quantity:
The sale quantity
name:
The name of the Portfolio where the transaction will take place
Raises:
ValueError:
If environment has no active portfolios, or if name
is not specified when there | |
import os
import sys
import subprocess
import pickle
from .utils import format_return, insert_list, docker_error, get_mdi_standard, get_compose_path, get_package_path, get_mdimechanic_yaml
# Paths to enter each identified node
node_paths = { "@DEFAULT": "" }
# Paths associated with the edges for the node graph
node_edge_paths = [ ("@DEFAULT", "") ]
# Use MPI for the tests
use_mpi = False
def docker_up( base_path, docker_path ):
mdimechanic_yaml = get_mdimechanic_yaml( base_path )
# Create the docker environment
docker_env = os.environ
docker_env['MDIMECH_WORKDIR'] = base_path
docker_env['MDIMECH_PACKAGEDIR'] = get_package_path()
docker_env['MDIMECH_ENGINE_NAME'] = mdimechanic_yaml['docker']['image_name']
# Run "docker-compose up -d"
up_proc = subprocess.Popen( ["docker-compose", "up", "-d"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=docker_path, env=docker_env )
up_tup = up_proc.communicate()
up_out = format_return(up_tup[0])
up_err = format_return(up_tup[1])
return up_proc.returncode
def docker_down( base_path, docker_path ):
# Create the docker environment
docker_env = os.environ
docker_env['MDIMECH_WORKDIR'] = base_path
docker_env['MDIMECH_PACKAGEDIR'] = get_package_path()
# Run "docker-compose down"
down_proc = subprocess.Popen( ["docker-compose", "down"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=docker_path, env=docker_env )
down_tup = down_proc.communicate()
down_out = format_return(down_tup[0])
down_err = format_return(down_tup[1])
return down_proc.returncode
def test_command( base_path, command, nrecv, recv_type, nsend, send_type ):
global use_mpi
# Remove any leftover files from previous runs of min_driver.py
#base_path = get_base_path()
dat_file = os.path.join( base_path, ".mdimechanic", ".temp", "min_driver.dat" )
err_file = os.path.join( base_path, ".mdimechanic", ".temp", "min_driver.err" )
if os.path.exists( dat_file ):
os.remove( dat_file )
if os.path.exists( err_file ):
os.remove( err_file )
if use_mpi:
mdi_driver_options = "-role DRIVER -name driver -method MPI"
mdi_engine_options = "-role ENGINE -name TESTCODE -method MPI"
docker_path = get_compose_path( "mpi" )
else:
mdi_driver_options = "-role DRIVER -name driver -method TCP -port 8021"
mdi_engine_options = "-role ENGINE -name TESTCODE -method TCP -hostname mdi_mechanic -port 8021"
docker_path = get_compose_path( "tcp" )
# Create the script for MDI Mechanic
docker_file = os.path.join( base_path, ".mdimechanic", ".temp", "docker_mdi_mechanic.sh" )
docker_lines = [ "#!/bin/bash\n",
"\n",
"# Exit if any command fails\n",
"\n",
"cd /MDI_Mechanic/mdimechanic/drivers\n",
"python min_driver.py \\\n"
]
if command is not None:
docker_lines.append( " -command \'" + str(command) + "\' \\\n" )
if nrecv is not None:
docker_lines.append( " -nreceive \'" + str(nrecv) + "\' \\\n" )
if recv_type is not None:
docker_lines.append( " -rtype \'" + str(recv_type) + "\' \\\n" )
if nsend is not None:
docker_lines.append( " -nsend \'" + str(nsend) + "\' \\\n" )
if send_type is not None:
docker_lines.append( " -stype \'" + str(send_type) + "\' \\\n" )
docker_lines.append( " -mdi \"" + str(mdi_driver_options) + "\"\n" )
os.makedirs(os.path.dirname(docker_file), exist_ok=True)
with open(docker_file, 'w') as file:
file.writelines( docker_lines )
mdimechanic_yaml = get_mdimechanic_yaml( base_path )
script_lines = mdimechanic_yaml['engine_tests'][0]['script']
script = "#!/bin/bash\nset -e\ncd /repo\n"
script += "export MDI_OPTIONS=\'" + str(mdi_engine_options) + "\'\n"
for line in script_lines:
script += line + '\n'
# Write the script to run the test
script_path = os.path.join( base_path, ".mdimechanic", ".temp", "docker_mdi_engine.sh" )
os.makedirs(os.path.dirname(script_path), exist_ok=True)
with open(script_path, "w") as script_file:
script_file.write( script )
if use_mpi:
# Start the docker container
if docker_up( base_path, docker_path ) != 0:
raise Exception("Unable to start docker-compose")
# Create the docker environment
docker_env = os.environ
docker_env['MDIMECH_WORKDIR'] = base_path
docker_env['MDIMECH_PACKAGEDIR'] = get_package_path()
docker_env['MDIMECH_ENGINE_NAME'] = mdimechanic_yaml['docker']['image_name']
# Run "docker-compose exec"
exec_proc = subprocess.Popen( ["docker-compose", "exec", "-T", "--user", "mpiuser", "mdi_mechanic", "mpiexec", "-app", "/MDI_Mechanic/mdimechanic/docker/mpi/mdi_appfile"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=docker_path, env=docker_env )
exec_tup = exec_proc.communicate()
exec_out = format_return(exec_tup[0])
exec_err = format_return(exec_tup[1])
if exec_proc.returncode != 0:
print("FAILED", flush=True)
docker_down( base_path, docker_path )
return False
else:
# Create the docker environment
docker_env = os.environ
docker_env['MDIMECH_WORKDIR'] = base_path
docker_env['MDIMECH_PACKAGEDIR'] = get_package_path()
docker_env['MDIMECH_ENGINE_NAME'] = mdimechanic_yaml['docker']['image_name']
# Run the docker container
up_proc = subprocess.Popen( ["docker-compose", "up", "--exit-code-from", "mdi_mechanic", "--abort-on-container-exit"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=docker_path, env=docker_env )
up_tup = up_proc.communicate()
up_out = format_return(up_tup[0])
up_err = format_return(up_tup[1])
if up_proc.returncode != 0:
print("FAILED", flush=True)
docker_down( base_path, docker_path )
return False
if docker_down( base_path, docker_path ) != 0:
print("FAILED", flush=True)
return False
print("WORKED", flush=True)
return True
def find_nodes( base_path ):
global node_paths
global node_edge_paths
# List of all node commands in the MDI Standard
command_list = []
commands = None
#base_path = get_base_path()
standard = get_mdi_standard( base_path )
commands = standard['commands']
for command in commands:
if command[0] == '@' and command != '@':
command_list.append( command )
ordered_commands = sorted( command_list )
####################
#return
####################
# Check which of the MDI Standard commands work from the @DEFAULT node
for command in ordered_commands:
command_works = test_command( base_path, command, None, None, None, None )
if command_works:
node_paths[command] = command
node_edge_paths.append( (command, command) )
# From the nodes that have currently been identified, attempt to use the "@" command to identify more nodes
print("Searching for supported nodes", flush=True)
original_nodes = []
for node in node_paths.keys():
original_nodes.append(node)
ordered_nodes = sorted( original_nodes )
for node in ordered_nodes:
for ii in range(20):
new_path = node_paths[node]
for jj in range(ii+1):
new_path += " @"
command = new_path + " <@"
print("Checking for node at: " + str(new_path), end=" ")
command_works = test_command( base_path, command, "MDI_COMMAND_LENGTH", "MDI_CHAR", None, None )
# Read the name of the node
node_name = None
dat_file = os.path.join( base_path, ".mdimechanic", ".temp", "min_driver.dat" )
err_file = os.path.join( base_path, ".mdimechanic", ".temp", "min_driver.err" )
if os.path.isfile( dat_file ):
with open( dat_file, "r") as f:
node_name = f.read()
err_value = None
if os.path.isfile( err_file ):
with open( err_file, "r") as f:
err_value = f.read()
if node_name is not None and not node_name in node_paths.keys():
node_paths[node_name] = new_path
# Check whether this should be added to the node graph
if node_name is not None:
split_path = new_path.split()
include = True
for node_edge in node_edge_paths:
if node_edge[0] == node_name:
path = node_edge[1].split()
if path[0] == split_path[0]:
include = False
if include:
node_edge_paths.append( (node_name, new_path) )
print("Completed search for nodes.", flush=True)
print("Found the following nodes: " + str(node_paths.keys()) )
def write_supported_commands( base_path ):
global node_paths
# List of all commands in the MDI Standard
command_list = []
commands = None
#base_path = get_base_path()
standard = get_mdi_standard( base_path )
commands = standard['commands']
for command in commands.keys():
values = commands[command]
command_list.append( command )
ordered_commands = sorted( command_list )
# Identify all supported nodes, and find a path to them
find_nodes( base_path )
ordered_nodes = sorted( node_paths.keys() )
# Write the README.md section that lists all supported commands
command_sec = []
# Write the section header
command_sec.append( "## Supported Commands\n" )
command_sec.append( "\n" )
header_line = "| "
for node in ordered_nodes:
header_line += "| " + str(node) + " "
header_line += "|\n"
command_sec.append( header_line )
header_line = "| ------------- "
for node in ordered_nodes:
header_line += "| ------------- "
header_line += "|\n"
command_sec.append( header_line )
# Write the list of supported commands
for command in ordered_commands:
nrecv = None
recv_type = None
nsend = None
send_type = None
print("---------------------------------------")
print("Testing command: " + str(command))
if commands[command] is not None and 'recv' in commands[command].keys():
nrecv = commands[command]['recv']['count']
recv_type = commands[command]['recv']['datatype']
if commands[command] is not None and 'send' in commands[command].keys():
nsend = commands[command]['send']['count']
send_type = commands[command]['send']['datatype']
line = "| " + str(command) + " "
for node in ordered_nodes:
command_with_path = node_paths[node] + " " + command
padded_string = str(node).ljust(20, '.')
print(padded_string, end=" ")
command_works = test_command( base_path, command_with_path, nrecv, recv_type, nsend, send_type )
if command_works:
# Display a bright green box
command_status = ""
else:
# Display a light gray box
command_status = ""
line += "| " + str(command_status) + " "
line += "|\n"
command_sec.append( line )
# Replace all ">" or "<" symbols with Markdown escape sequences
for iline in range(len(command_sec)):
line = command_sec[iline]
line = line.replace(">", ">")
line = line.replace("<", "<")
command_sec[iline] = line
return command_sec
def node_graph( base_path ):
global node_edge_paths
print("*********************************************")
print("* Creating node graph *")
print("*********************************************")
print("node_edge_paths: " + str(node_edge_paths))
package_path = get_package_path()
nodes = {}
edges = []
for edge_path in node_edge_paths:
name = edge_path[0]
path = edge_path[1].split()
if '@' in path:
parent_cluster = str(path[0]) + '_'
if not parent_cluster in nodes.keys():
nodes[ parent_cluster ] = str(name)
edges.append( ( path[0], parent_cluster ) )
else:
nodes[ parent_cluster ] += '\n' + str(name)
else:
nodes[ name ] = name
if name != '@DEFAULT':
edges.append( ( '@DEFAULT', name ) )
print("nodes: " + str(nodes))
# | |
if 'x_path_expression' in params:
header_params['XPathExpression'] = params['x_path_expression'] # noqa: E501
if 'xml_value' in params:
header_params['XmlValue'] = params['xml_value'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/xml/edit/xpath/set-value', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='XmlSetValueWithXPathResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def convert_data_xml_filter_with_x_path(self, x_path_expression, input_file, **kwargs): # noqa: E501
"""Filter, select XML nodes using XPath expression, get results # noqa: E501
Return the reuslts of filtering, selecting an XML document with an XPath expression # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.convert_data_xml_filter_with_x_path(x_path_expression, input_file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_path_expression: Valid XML XPath query expression (required)
:param file input_file: Input file to perform the operation on. (required)
:return: XmlFilterWithXPathResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.convert_data_xml_filter_with_x_path_with_http_info(x_path_expression, input_file, **kwargs) # noqa: E501
else:
(data) = self.convert_data_xml_filter_with_x_path_with_http_info(x_path_expression, input_file, **kwargs) # noqa: E501
return data
def convert_data_xml_filter_with_x_path_with_http_info(self, x_path_expression, input_file, **kwargs): # noqa: E501
"""Filter, select XML nodes using XPath expression, get results # noqa: E501
Return the reuslts of filtering, selecting an XML document with an XPath expression # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.convert_data_xml_filter_with_x_path_with_http_info(x_path_expression, input_file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_path_expression: Valid XML XPath query expression (required)
:param file input_file: Input file to perform the operation on. (required)
:return: XmlFilterWithXPathResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['x_path_expression', 'input_file'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method convert_data_xml_filter_with_x_path" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'x_path_expression' is set
if ('x_path_expression' not in params or
params['x_path_expression'] is None):
raise ValueError("Missing the required parameter `x_path_expression` when calling `convert_data_xml_filter_with_x_path`") # noqa: E501
# verify the required parameter 'input_file' is set
if ('input_file' not in params or
params['input_file'] is None):
raise ValueError("Missing the required parameter `input_file` when calling `convert_data_xml_filter_with_x_path`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_path_expression' in params:
header_params['XPathExpression'] = params['x_path_expression'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/xml/select/xpath', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='XmlFilterWithXPathResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def convert_data_xml_query_with_x_query(self, input_file, x_query, **kwargs): # noqa: E501
"""Query an XML file using XQuery query, get results # noqa: E501
Return the reuslts of querying a single XML document with an XQuery expression. Supports XQuery 3.1 and earlier. This API is optimized for a single XML document as input. Provided XML document is automatically loaded as the default context; to access elements in the document, simply refer to them without a document reference, such as bookstore/book # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.convert_data_xml_query_with_x_query(input_file, x_query, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Input XML file to perform the operation on. (required)
:param str x_query: Valid XML XQuery 3.1 or earlier query expression; multi-line expressions are supported (required)
:return: XmlQueryWithXQueryResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.convert_data_xml_query_with_x_query_with_http_info(input_file, x_query, **kwargs) # noqa: E501
else:
(data) = self.convert_data_xml_query_with_x_query_with_http_info(input_file, x_query, **kwargs) # noqa: E501
return data
def convert_data_xml_query_with_x_query_with_http_info(self, input_file, x_query, **kwargs): # noqa: E501
"""Query an XML file using XQuery query, get results # noqa: E501
Return the reuslts of querying a single XML document with an XQuery expression. Supports XQuery 3.1 and earlier. This API is optimized for a single XML document as input. Provided XML document is automatically loaded as the default context; to access elements in the document, simply refer to them without a document reference, such as bookstore/book # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.convert_data_xml_query_with_x_query_with_http_info(input_file, x_query, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Input XML file to perform the operation on. (required)
:param str x_query: Valid XML XQuery 3.1 or earlier query expression; multi-line expressions are supported (required)
:return: XmlQueryWithXQueryResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input_file', 'x_query'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method convert_data_xml_query_with_x_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'input_file' is set
if ('input_file' not in params or
params['input_file'] is None):
raise ValueError("Missing the required parameter `input_file` when calling `convert_data_xml_query_with_x_query`") # noqa: E501
# verify the required parameter 'x_query' is set
if ('x_query' not in params or
params['x_query'] is None):
raise ValueError("Missing the required parameter `x_query` when calling `convert_data_xml_query_with_x_query`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_query' in params:
header_params['XQuery'] = params['x_query'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/xml/query/xquery', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='XmlQueryWithXQueryResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def convert_data_xml_query_with_x_query_multi(self, input_file1, x_query, **kwargs): # noqa: E501
"""Query multiple XML files using XQuery query, get results # noqa: E501
Return the reuslts of querying an XML document with an XQuery expression. Supports XQuery 3.1 and earlier. This API is optimized for multiple XML documents as input. You can refer to the contents of a given document by name, for example doc(\"books.xml\") or doc(\"restaurants.xml\") if you included two input files named books.xml and restaurants.xml. If input files contain no file name, they will default to file names input1.xml, input2.xml and so on. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.convert_data_xml_query_with_x_query_multi(input_file1, x_query, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file1: First input XML file to perform the operation on. (required)
:param str x_query: Valid XML XQuery 3.1 or earlier query expression; multi-line expressions are supported (required)
:param file input_file2: Second input XML file to perform the operation on.
:param file input_file3: Third input XML file to perform the operation on.
:param file input_file4: Fourth input XML file to perform the operation on.
:param file input_file5: Fifth input XML file to perform the operation on.
:param file input_file6: Sixth input XML file to perform the operation on.
:param file input_file7: Seventh input XML file to perform the operation on.
:param file input_file8: Eighth input XML file to perform the operation on.
:param file input_file9: Ninth input XML file to perform the operation on.
:param file input_file10: Tenth input XML file to perform the | |
is the number of samples and n_features
is the number of features.
y : None, default=None
Not used but kept for compatibility.
Returns
-------
log_likelihood : array, shape (n_samples,)
Log likelihood of each data point in X.
"""
return self._get_destructor().score_samples(X, y)
def get_support(self):
"""Get the support of this density (i.e. the positive density region).
Returns
-------
support : array-like, shape (2,) or shape (n_features, 2)
If shape is (2, ), then ``support[0]`` is the minimum and
``support[1]`` is the maximum for all features. If shape is
(`n_features`, 2), then each feature's support (which could
be different for each feature) is given similar to the first
case.
"""
return get_domain_or_default(self.destructor)
def _check_global_random_state(f):
"""Decorate function to save, set and reset the global random state.
This is useful for composite or deep destructors where one does not
want to set the random_state for each individual destructor but
wants exact reproducibility.
"""
@wraps(f)
def decorated(self, *args, **kwargs):
"""[Placeholder].
Parameters
----------
self :
args :
kwargs :
Returns
-------
obj : object
"""
# If random_state is None then Just call function directly
if self.random_state is None:
return f(self, *args, **kwargs)
# Save original global random state
# and seed global random state
saved_random_state = np.random.get_state()
rng = check_random_state(self.random_state)
np.random.set_state(rng.get_state())
# Call function and then reset global random state
ret_val = f(self, *args, **kwargs)
np.random.set_state(saved_random_state)
return ret_val
return decorated
class CompositeDestructor(BaseEstimator, DestructorMixin):
"""Meta destructor composed of multiple destructors.
This meta destructor composes multiple destructors or other
transformations (e.g. relative destructors like LinearProjector)
into a single composite destructor. This is a fundamental building
block for creating more complex destructors from simple atomic
destructors.
Parameters
----------
destructors : list
List of destructor estimators to use as subdestructors.
random_state : int, RandomState instance or None, optional (default=None)
Global random state used if any of the subdestructors are
random-based. By seeding the global :mod:`numpy.random`` via
`random_state` and then resetting to its previous state,
we can avoid having to carefully pass around random states for
random-based sub destructors.
If int, `random_state` is the seed used by the random number
generator; If :class:`~numpy.random.RandomState` instance,
`random_state` is the random number generator; If None, the random
number generator is the :class:`~numpy.random.RandomState` instance
used by :mod:`numpy.random`.
Attributes
----------
fitted_destructors_ : list
List of fitted (sub)destructors. (Note that these objects are cloned
via ``sklearn.base.clone`` from the ``destructors`` parameter so as
to avoid mutating the ``destructors`` parameter.)
density_ : estimator
*Implicit* density of composite destructor.
"""
def __init__(self, destructors=None, random_state=None):
self.destructors = destructors
self.random_state = random_state
def fit(self, X, y=None, **fit_params):
"""Fit estimator to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : None, default=None
Not used in the fitting process but kept for compatibility.
fit_params : dict, optional
Optional extra fit parameters.
Returns
-------
self : estimator
Returns the instance itself.
"""
self.fit_transform(X, y, **fit_params)
return self
@_check_global_random_state
def fit_transform(self, X, y=None, **fit_params):
"""Fit estimator to X and then transform X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : None, default=None
Not used in the fitting process but kept for compatibility.
fit_params : dict, optional
Parameters to pass to the fit method.
Returns
-------
X_new : array-like, shape (n_samples, n_features)
Transformed data.
"""
Z = check_array(X, copy=True)
# Fit and transform all destructors
destructors = []
for d in self._get_destructor_iterable():
Z = self._single_fit_transform(d, Z, y)
destructors.append(d)
if np.any(np.isnan(Z)):
raise RuntimeError('Need to check')
self.fitted_destructors_ = np.array(destructors)
self.density_ = create_implicit_density(self)
return Z
@classmethod
def create_fitted(cls, fitted_destructors, **kwargs):
"""Create fitted destructor.
Parameters
----------
fitted_destructors : array-like of Destructor
Fitted destructors.
**kwargs
Other parameters to pass to constructor.
Returns
-------
fitted_transformer : Transformer
Fitted transformer.
"""
destructor = cls(**kwargs)
destructor.fitted_destructors_ = np.array(fitted_destructors)
destructor.density_ = create_implicit_density(destructor)
return destructor
def _single_fit_transform(self, d, Z, y):
if y is not None:
pass
# warnings.warn('y is not None but this is not an adversarial composite/deep'
# 'destructor. '
# 'Did you mean to use an adversarial version of this destructor?')
return d.fit(Z, y).transform(Z, y)
def transform(self, X, y=None, partial_idx=None):
"""Apply destructive transformation to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : None, default=None
Not used in the transformation but kept for compatibility.
partial_idx : list or None, default=None
List of indices of the fitted destructor to use in
the transformation. The default of None uses all
the fitted destructors. Mainly used for visualization
or debugging.
Returns
-------
X_new : array-like, shape (n_samples, n_features)
Transformed data (possibly only partial transformation).
"""
self._check_is_fitted()
Z = check_array(X, copy=True)
fitted_destructors = self._get_partial_destructors(partial_idx)
for d in fitted_destructors:
Z = d.transform(Z, y)
return Z
def inverse_transform(self, X, y=None, partial_idx=None):
"""Apply inverse destructive transformation to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : None, default=None
Not used in the transformation but kept for compatibility.
partial_idx : list or None, default=None
List of indices of the fitted destructor to use in
the transformation. The default of None uses all
the fitted destructors. Mainly used for visualization
or debugging.
Returns
-------
X_new : array-like, shape (n_samples, n_features)
Transformed data (possibly only partial transformation).
"""
self._check_is_fitted()
Z = check_array(X, copy=True)
fitted_destructors = self._get_partial_destructors(partial_idx)
for d in reversed(fitted_destructors):
Z = d.inverse_transform(Z, y)
return Z
def sample(self, n_samples=1, y=None, random_state=None):
"""Sample from composite destructor.
Nearly the same as ``DestructorMixin.sample`` but the number of
features is found from first fitted destructor to avoid recursion.
"""
self._check_is_fitted()
rng = check_random_state(random_state)
n_features = get_n_features(self.fitted_destructors_[-1])
U = rng.rand(n_samples, n_features)
X = self.inverse_transform(U, y)
return X
def score_samples(self, X, y=None, partial_idx=None):
"""Compute log-likelihood (or log(det(Jacobian))) for each sample.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples and n_features
is the number of features.
y : None, default=None
Not used but kept for compatibility.
partial_idx : list or None, default=None
List of indices of the fitted destructor to use in
the computing the log likelihood. The default of None uses all
the fitted destructors. Mainly used for visualization
or debugging.
Returns
-------
log_likelihood : array, shape (n_samples,)
Log likelihood of each data point in X.
"""
return np.sum(self.score_samples_layers(X, y, partial_idx=partial_idx), axis=1)
def score_samples_layers(self, X, y=None, partial_idx=None):
"""[Placeholder].
Parameters
----------
X :
y :
partial_idx :
Returns
-------
obj : object
"""
self._check_is_fitted()
X = check_array(X, copy=True)
fitted_destructors = self._get_partial_destructors(partial_idx)
log_likelihood_layers = np.zeros((X.shape[0], len(fitted_destructors)))
for i, d in enumerate(fitted_destructors):
log_likelihood_layers[:, i] = d.score_samples(X, y)
# Don't transform for the last destructor
if i < len(fitted_destructors) - 1:
X = d.transform(X, y)
return log_likelihood_layers
def score(self, X, y=None, partial_idx=None):
"""Override super class to allow for partial_idx."""
return np.mean(self.score_samples(X, y, partial_idx=partial_idx))
def score_layers(self, X, y=None, partial_idx=None):
"""Override super class to allow for partial_idx."""
return np.mean(self.score_samples_layers(X, y, partial_idx=partial_idx), axis=0)
def get_domain(self):
"""Get the domain of this destructor.
Returns
-------
domain : array-like, shape (2,) or shape (n_features, 2)
If shape is (2, ), then ``domain[0]`` is the minimum and
``domain[1]`` is the maximum for all features. If shape is
(`n_features`, 2), then each feature's domain (which could
be different for each feature) is given similar to the first
case.
"""
# Get the domain of the first destructor (or relative destructor like LinearProjector)
return next(iter(self._get_destructor_iterable())).get_domain()
def _get_partial_destructors(self, partial_idx):
if partial_idx is not None:
return np.array(self.fitted_destructors_)[partial_idx]
else:
return self.fitted_destructors_
def _get_destructor_iterable(self):
if self.destructors is None:
return [IdentityDestructor()]
elif isinstance(self.destructors, (list, tuple, np.array)):
return [clone(d) for d in self.destructors]
else:
raise ValueError('`destructors` must be a list, tuple or numpy array. Sets are not '
'allowed because order is important and general iterators/generators '
'are not allowed because | |
from tkinter import ttk, filedialog, Label, Frame, W, Entry, E, Canvas, NW
from PIL import Image as PILImage,ImageTk
import tkinter as tk
import yaml
import copy
class GUI(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
#variable for file path
self.yamlPath = ""
self.mapPath = ""
self.tmapPath = ""
#intial frame all elements are stored in
container = tk.Frame(self)
container.grid(row=0,column=0, sticky="nsew")
#array of frames
self.frames = {}
#intialises start page
self.frames["Start"] = StartPage(container, self)
#intialises edit page
self.frames["Edit"] = EditingPage(container, self)
#builds start page
self.frames["Start"].grid(row=0,column=0, sticky="nsew")
#builds edit page
self.frames["Edit"].grid(row=0,column=0, sticky="nsew")
#calls show frame method to bring start page to front
self.show_frame("Start")
#brings page to front
def show_frame(self, page):
#shows page
frame = self.frames[page]
#bring frame to top
frame.tkraise()
#sets variables
frame.onShow()
#start page class
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.mainGUI = controller
#inserted text label for each file selection
YAML =Label(self, width= 20, text = "Insert YAML File Path")
TMAP=Label(self, width= 20, text ="Insert TMAP File Path")
Map =Label(self, width= 20, text = "Insert Map File Path")
#positioning of labels
YAML.grid(row=3, column=0)
TMAP.grid(row=3, column=2)
Map.grid(row=3, column=4)
#declaring buttons for opening files
YAMLButton = ttk.Button(self, text= "Open YAML File", command =self.YAML)
TMAPButton = ttk.Button(self, text= "Open TMAP File", command =self.TMAP)
MapButton = ttk.Button(self, text= "Open Map File", command =self.Map)
nextbutton = ttk.Button(self, text= "Generate", command= lambda: controller.show_frame("Edit"))
#postioning of buttons
YAMLButton.grid(row=3, column=1)
TMAPButton.grid(row=3, column=3)
MapButton.grid(row=3, column=5)
nextbutton.grid(row= 3, column= 6)
#selecting YAML file
def YAML(self):
#sets file type
self.yamlfilename = filedialog.askopenfilename(title = "Select File",filetypes =[('YAML Files', '*.yaml')])
#if user clicks on open file and then does not select path this will display
if(self.yamlfilename == ""):
self.yamlfilename = "/Insert Map File Path"
#sets variable for yaml path for use in file
self.mainGUI.yamlPath = self.yamlfilename
#displays chosen file path
YAMLP=Label(self, width= 20, text =self.mainGUI.yamlPath.split("/")[-1])
YAMLP.grid(row=3, column=0)
#selecting map file
def Map(self):
#sets file type
self.mapfilename = filedialog.askopenfilename(title = "Select File", filetypes =[('PGM Files', '*.pgm')])
#if user clicks on open file and then does not select path this will display
if(self.mapfilename == ""):
self.mapfilename = "/Insert Map File Path"
#sets variable for map path for use in file
self.mainGUI.mapPath = self.mapfilename
#displays chosen file path
MapP=Label(self, width= 20, text =self.mainGUI.mapPath.split("/")[-1])
MapP.grid(row=3, column=4)
#selecting tmap file
def TMAP(self):
#sets file type
self.tmapfilename = filedialog.askopenfilename(title = "Select A File", filetypes =[('TMAP Files', '*.tmap'),('YAML Files', '*.yaml')])
#if user clicks on open file and then does not select path this will display
if(self.tmapfilename == ""):
self.tmapfilename = "/Insert TMAP File Path"
#sets variable for map path for use in file
self.mainGUI.tmapPath = self.tmapfilename
#displays chosen file path
TMAPp=Label(self, width= 20, text = self.mainGUI.tmapPath.split("/")[-1])
TMAPp.grid(row=3, column=2)
def onShow(self):
return
#editing page class
class EditingPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
#for page showing
self.mainGUI = controller
#default of image scale
self.SCALE = 3
#header above image container
self.header = Frame(self)
self.header.grid(row=0, column = 1)
#change file button
FilePagebutton = ttk.Button(self.header, width= 20, text ="select different files", command= lambda:controller.show_frame("Start"))
FilePagebutton.grid()
#footer below image container
self.footer = Frame(self)
self.footer.grid(row=3, column = 1)
#button container left of image
self.buttonContainer = Frame(self)
self.buttonContainer.grid(row = 1, column=0)
#container for node buttons
self.nodeButtons = Frame(self.buttonContainer)
self.nodeButtons.grid(row = 1, column=0)
#blank line
Blank1=Label(self.buttonContainer, width= 20,text ="Insert Parameters:")
Blank1.grid(row=2, column=0)
#container for entry boxes of node edting
self.nodeButtonsEntry = Frame(self.buttonContainer)
self.nodeButtonsEntry.grid(row = 3, column=0)
#blank line
Blank2=Label(self.buttonContainer, width= 20,text ="")
Blank2.grid(row=5, column=0)
#container for paths buttons
self.pathButtons = Frame(self.buttonContainer)
self.pathButtons.grid(row = 6, column=0)
#container for path entry
self.pathButtonsEntry = Frame(self.buttonContainer)
self.pathButtonsEntry.grid(row = 8, column=0)
#container for scale entry
self.scaleContainer = Frame(self.buttonContainer)
self.scaleContainer.grid(row = 12, column=0)
#blank line
Blank4=Label(self.buttonContainer, width= 20,text ="")
Blank4.grid(row=9, column=0)
#Save button
Save = ttk.Button(self.buttonContainer, width= 20, text= "Save", command = self.mapSave)
Save.grid(row=10, column=0)
#blank line
Blank5=Label(self.buttonContainer, width= 20,text ="")
Blank5.grid(row=11, column=0)
#scale lable, entry and button
scaleLabel=Label(self.scaleContainer, width= 20,text ="Scale")
self.scaleEntry=Entry(self.scaleContainer, width= 30, text ="Scale")
scaleLabel.grid(row=0, column=0)
self.scaleEntry.grid(row=0, column=1)
Scale=ttk.Button(self.buttonContainer, width= 20,text ="Scale", command = self.scale)
Scale.grid(row=13, column=0)
#add node button
AddNode = ttk.Button(self.nodeButtons, width= 20, text= "Add New Node", command = self.addNode)
AddNode.grid(row=1, column=1)
#delete node button
DeleteNode = ttk.Button(self.nodeButtons, width= 20, text= "Delete A Node", command = self.nodeDelete)
DeleteNode.grid(row=1, column=2)
#edit node button
EditNode = ttk.Button(self.nodeButtons, width= 20, text= "Edit A Node" , command = self.nodeEdit)
EditNode.grid(row=1, column=3)
#node name entry
nodeName=Label(self.nodeButtonsEntry, width= 30,text ="Insert Node Name")
self.nodeNameEntry=Entry(self.nodeButtonsEntry, width= 30, text ="Insert Node Name ")
nodeName.grid(row=2, column=1)
self.nodeNameEntry.grid(row=2, column=2)
#x entry
X=Label(self.nodeButtonsEntry, width= 30,text ="Insert X Coordinate")
self.XEntry=Entry(self.nodeButtonsEntry, width= 30, text ="Insert X Coordinate")
X.grid(row=3, column=1,)
self.XEntry.grid(row=3, column=2)
#y entry
Y=Label(self.nodeButtonsEntry, width= 30,text ="Insert Y Coordinate")
self.YEntry=Entry(self.nodeButtonsEntry, width= 30, text ="Insert Y Coordinate")
Y.grid(row=4, column=1)
self.YEntry.grid(row=4, column=2)
#add path button
AddPath = ttk.Button(self.pathButtons, width= 20, text= "Add Path" , command = self.pathAdd)
AddPath.grid(row=1, column=1)
#delete path button
DeletePath = ttk.Button(self.pathButtons, width= 20, text= "Delete Path" , command = self.pathDelete)
DeletePath.grid(row=1, column=2)
#blank line
Blank3=Label(self.buttonContainer, width= 20,text ="Insert Parameters:")
Blank3.grid(row=7, column=0)
#path start entry
pathStart=Label(self.pathButtonsEntry, width= 30,text ="Insert Starting Node Name")
self.pathStartEntry=Entry(self.pathButtonsEntry, width= 30, text ="Insert Starting Node Name ")
pathStart.grid(row=2, column=1)
self.pathStartEntry.grid(row=2, column=2)
#path end entry
pathEnd=Label(self.pathButtonsEntry, width= 30,text ="Insert Destination Node Name")
self.pathEndEntry=Entry(self.pathButtonsEntry, width= 30, text ="Insert Destination Node Name ")
pathEnd.grid(row=3, column=1)
self.pathEndEntry.grid(row=3, column=2)
#add node click button
addNodeClick = ttk.Button(self.buttonContainer, width= 20, text= "Add Node", command = self.addNodeClick)
addNodeClick.grid(row=14, column=0)
#delete node click button
deleteNodeClick = ttk.Button(self.buttonContainer, width= 20, text= "Delete Node", command = self.deleteNodeClick)
deleteNodeClick.grid(row=15, column=0)
#add path click button
addPathClick = ttk.Button(self.buttonContainer, width= 20, text= "Add Path", command = self.addPathClick)
addPathClick.grid(row=17, column=0)
#delete path click button
deletePathClick = ttk.Button(self.buttonContainer, width= 20, text= "Delete Path", command = self.deletePathClick)
deletePathClick.grid(row=18, column=0)
#Opening the Yaml File to Print out the Contents
def readYaml(self, filename):
#opens file
with open(filename) as fileReader:
#reads file into array
node_coords = yaml.load(fileReader, Loader=yaml.FullLoader)
#returns file
return (node_coords)
def addNodeClick(self):
#sets command type for getxy function
self.type=1
#binds next mouse click to event variable and sends to getxy function
self.canvas.bind('<Button-1>', self.getxy)
def deleteNodeClick(self):
#sets commands type for getxy
self.type=2
#binds next mouse click to event variable and sends to getxy function
self.canvas.bind('<Button-1>', self.getxy)
def addPathClick(self):
#sets commands type for getxy
self.type=3
#binds next mouse click to event variable and sends to getxy function
self.canvas.bind('<Button-1>', self.getxy)
def deletePathClick(self):
#sets commands type for getxy
self.type=4
#binds next mouse click to event variable and sends to getxy function
self.canvas.bind('<Button-1>', self.getxy)
def getxy(self, event):
#scales x,y into map scale
x, y = ((event.x*self.SCALE)*0.05) + self.ORIGIN[0], (self.height*0.05) - (((event.y*self.SCALE)*0.05) - self.ORIGIN[1])
#based on type varible executes corrosponding command
if self.type == 1:
self.addNodeClickPlace(x, y)
if self.type == 2:
self.deleteNodeClickRemove(x, y)
if self.type == 3:
#sets first x and y variable
self.pathx1, self.pathy1 = x,y
#executes a second method to get a second x and y for paths
self.canvas.bind('<Button-1>', self.getxy2)
if self.type == 4:
#sets first x and y variable
self.pathx1, self.pathy1 = x,y
#executes a second method to get a second x and y for paths
self.canvas.bind('<Button-1>', self.getxy2)
def getxy2(self, event):
#scales x,y into map scale
x, y = ((event.x*self.SCALE)*0.05) + self.ORIGIN[0], (self.height*0.05) - (((event.y*self.SCALE)*0.05) - self.ORIGIN[1])
if self.type == 3:
#sets second x and y variable
self.pathx2, self.pathy2 = x,y
#sends both xy varibles to method
self.addPathClickPlace(self.pathx1,self.pathy1,self.pathx2,self.pathy2)
if self.type == 4:
#sets second x and y variable
self.pathx2, self.pathy2 = x,y
#sends both xy varibles to method
self.deletePathClickRemove(self.pathx1,self.pathy1,self.pathx2,self.pathy2)
def addNodeClickPlace(self, x, y):
#Get User Input
newX = x
newY = y
#Gets File, Takes a Node to be Used as a Template for our New Node
newNode = copy.deepcopy(self.nodeList[0])
#Calls next number in sequence of nodes to define this new one
newNumber = self.nodeNumber(self.mainGUI.tmapPath)
#More attributes can be changed but I've started with node definition, name, x and y
newNode["meta"]["node"] = "WayPoint"+str(newNumber)
newNode["node"]["name"] = "WayPoint"+str(newNumber)
newNode["node"]["pose"]["position"]["x"] = newX
newNode["node"]["pose"]["position"]["y"] = newY
#Appends new node to rest of the file then dumps into file
self.nodeList.append(newNode)
self.canvas.destroy()
self.loadImage()
def deleteNodeClickRemove(self, x | |
_id in closure]
def test_combine_all_parents_w_no_parents():
term = {'term_id': 'id1'}
term = go._combine_all_parents(term)
assert not term['all_parents'] # both should be empty lists
assert not term['development']
def test_combine_all_parents_w_empty_parents():
term = {'term_id': 'id1', 'parents': [], 'relationships': [],
'develops_from': [], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert not term['all_parents'] # both should be empty lists
assert not term['development']
def test_combine_all_parents_w_one_parent():
term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': [],
'develops_from': [], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert len(term['all_parents']) == 1
assert term['all_parents'][0] == 'id2'
assert term['development'] == term['all_parents']
def test_combine_all_parents_w_two_parents():
term = {'term_id': 'id1', 'parents': ['id2', 'id3'], 'relationships': [],
'develops_from': [], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert len(term['all_parents']) == 2
assert 'id2' in term['all_parents']
assert 'id3' in term['all_parents']
assert sorted(term['development']) == sorted(term['all_parents'])
def test_combine_all_parents_w_two_same_parents():
term = {'term_id': 'id1', 'parents': ['id2', 'id2'], 'relationships': [],
'develops_from': [], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert len(term['all_parents']) == 1
assert term['all_parents'][0] == 'id2'
assert term['development'] == term['all_parents']
def test_combine_all_parents_w_parent_and_relationship_diff():
term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': ['id3'],
'develops_from': [], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert len(term['all_parents']) == 2
assert 'id2' in term['all_parents']
assert 'id3' in term['all_parents']
assert sorted(term['development']) == sorted(term['all_parents'])
def test_combine_all_parents_w_parent_and_relationship_same():
term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': ['id2'],
'develops_from': [], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert len(term['all_parents']) == 1
assert term['all_parents'][0] == 'id2'
assert term['development'] == term['all_parents']
def test_combine_all_parents_w_parent_and_develops_from_diff():
term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': [],
'develops_from': ['id3'], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert len(term['all_parents']) == 1
assert len(term['development']) == 2
assert term['all_parents'][0] == 'id2'
assert 'id2' in term['development']
assert 'id3' in term['development']
def test_combine_all_parents_w_parent_and_develops_from_same():
term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': [],
'develops_from': ['id2'], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert len(term['all_parents']) == 1
assert term['all_parents'][0] == 'id2'
assert term['development'] == term['all_parents']
def test_combine_all_parents_w_only_develops_from():
term = {'term_id': 'id1', 'parents': [], 'relationships': [],
'develops_from': ['id2'], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert not term['all_parents']
assert len(term['development']) == 1
assert term['development'][0] == 'id2'
def test_combine_all_parents_w_has_part_inverse_only():
term = {'term_id': 'id1', 'parents': [], 'relationships': [],
'develops_from': [], 'has_part_inverse': ['id2']}
term = go._combine_all_parents(term)
assert not term['all_parents'] # both should be empty lists
assert not term['development']
def test_combine_all_parents_w_has_part_inverse_to_exclude():
term = {'term_id': 'id1', 'parents': [], 'relationships': [],
'develops_from': ['id2'], 'has_part_inverse': ['id2']}
term = go._combine_all_parents(term)
assert not term['all_parents'] # both should be empty lists
assert not term['development']
def test_combine_all_parents_w_has_part_inverse_to_exclude_plus_others():
term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': [],
'develops_from': ['id3', 'id4', 'id5'], 'has_part_inverse': ['id4', 'id5', 'id6']}
term = go._combine_all_parents(term)
assert len(term['all_parents']) == 1
assert len(term['development']) == 2
assert term['all_parents'][0] == 'id2'
assert 'id2' in term['development']
assert 'id3' in term['development']
def test_has_human_empty():
ll = []
assert not go._has_human(ll)
def test_has_human_no_human():
ll = ['http://purl.obolibrary.org/obo/BFO_0000051']
assert not go._has_human(ll)
def test_has_human_human():
ll = ['http://purl.obolibrary.org/obo/BFO_0000051', 'http://purl.obolibrary.org/obo/NCBITaxon_9606']
assert go._has_human(ll)
def test_has_human_uriref_human():
uri = 'http://purl.obolibrary.org/obo/NCBITaxon_9606'
uri = go.convert2URIRef(uri)
ll = [uri]
assert go._has_human(ll)
def test_get_termid_from_uri_no_uri():
uri = ''
assert not go.get_termid_from_uri(uri)
def test_get_termid_from_uri_valid_uri():
uri = 'http://www.ebi.ac.uk/efo/EFO_0002784'
tid = go.get_termid_from_uri(uri)
assert tid == 'EFO:0002784'
def test_get_termid_from_uri_funky_uri1():
uri = 'http://www.ebi.ac.uk/efo/EFO_UFO_0002784'
tid = go.get_termid_from_uri(uri)
assert tid == 'EFO:UFO:0002784'
def test_get_termid_from_uri_funky_uri2():
uri = 'http://www.ebi.ac.uk/efo/EFO0002784'
tid = go.get_termid_from_uri(uri)
assert tid == 'EFO0002784'
@pytest.fixture
def uberon_owler():
return Owler('src/encoded/tests/data/documents/test_uberon.owl')
@pytest.fixture
def uberon_owler2():
return Owler('src/encoded/tests/data/documents/test_uberon2.owl')
@pytest.fixture
def uberon_owler3():
return Owler('src/encoded/tests/data/documents/test_uberon3.owl')
@pytest.fixture
def uberon_owler4():
return Owler('src/encoded/tests/data/documents/test_uberon4.owl')
@pytest.fixture
def ll_class():
return go.convert2URIRef('http://purl.obolibrary.org/obo/UBERON_0000101')
def test_get_term_name_from_rdf_no_name(uberon_owler):
name = go.get_term_name_from_rdf('pickle', uberon_owler)
assert not name
def test_get_term_name_from_rdf_has_name(uberon_owler, ll_class):
name = go.get_term_name_from_rdf(ll_class, uberon_owler)
assert name == 'lobe of lung'
def test_get_term_name_from_rdf_no_term(uberon_owler):
class_ = go.convert2URIRef('http://purl.obolibrary.org/obo/UBERON_0000001')
name = go.get_term_name_from_rdf(class_, uberon_owler)
assert not name
def test_create_term_dict(ll_class, uberon_owler):
with mock.patch('encoded.commands.generate_ontology.get_term_name_from_rdf',
return_value='lung lobe'):
term = go.create_term_dict(ll_class, 'termid', uberon_owler, 'ontid')
assert term['term_name'] == 'lung lobe'
assert term['term_id'] == 'termid'
assert 'ontid' in term['source_ontologies']
assert term['namespace'] == 'http://purl.obolibrary.org/obo'
assert term['term_url'] == 'http://purl.obolibrary.org/obo/UBERON_0000101'
def test_add_term_and_info(uberon_owler2):
testid = 'UBERON:0001772'
relid = 'UBERON:0010304'
for c in uberon_owler2.allclasses:
if go.isBlankNode(c):
test_class = c
parent = go.convert2URIRef('http://purl.obolibrary.org/obo/UBERON_0001772')
terms = go._add_term_and_info(test_class, parent, 'test_rel', uberon_owler2, {})
assert testid in terms
term = terms[testid]
assert term['term_id'] == testid
assert relid in term['test_rel']
def test_process_intersection_of(uberon_owler3):
terms = {}
for c in uberon_owler3.allclasses:
for i in uberon_owler3.rdfGraph.objects(c, go.IntersectionOf):
terms = go.process_intersection_of(c, i, uberon_owler3, terms)
assert len(terms) == 1
term = list(terms.values())[0]
assert len(term['relationships']) == 1
assert term['relationships'][0] == 'UBERON:1'
assert len(term['develops_from']) == 1
assert term['develops_from'][0] == 'UBERON:2'
def test_process_blank_node(uberon_owler3):
terms = {}
for c in uberon_owler3.allclasses:
terms = go.process_blank_node(c, uberon_owler3, terms)
assert len(terms) == 1
assert 'UBERON:0001772' in terms
def test_find_and_add_parent_of(uberon_owler4):
tid = 'CL:0002553'
terms = {tid: {'term_id': tid}}
relids = ['UBERON:0002048', 'OBI:0000456', 'CL:0000058', 'CL:0000133']
relation = None
seen = False
for c in uberon_owler4.allclasses:
for _, p in enumerate(uberon_owler4.get_classDirectSupers(c, excludeBnodes=False)):
if go.isBlankNode(p):
has_part = False
if not seen:
has_part = True
seen = True
terms = go._find_and_add_parent_of(p, c, uberon_owler4, terms, has_part, relation)
assert len(terms) == 2
print(terms)
for termid, term in terms.items():
if termid == tid:
assert len(term['relationships']) == 3
for t in term['relationships']:
assert t in relids
else:
assert termid in relids
assert len(term['has_part_inverse']) == 1
assert term['has_part_inverse'][0] == tid
def test_process_parents(uberon_owler4):
tids = ['CL:0002553', 'CL:0000058']
relids = ['OBI:0000456', 'UBERON:0002048']
terms = {tids[0]: {'term_id': tids[0]}}
for c in uberon_owler4.allclasses:
terms = go.process_parents(c, uberon_owler4, terms)
print(terms)
assert len(terms) == 2
term1 = terms[tids[0]]
term2 = terms[tids[1]]
assert term1['develops_from'][0] == 'CL:0000133'
assert term1['parents'][0] == 'UBERON:0010313'
assert len(term1['relationships']) == 2
for r in relids:
assert r in term1['relationships']
assert term2['has_part_inverse'][0] == tids[0]
@pytest.fixture
def terms_w_stuff():
return {
'term1': {
'term_id': 't1',
'term_name': 'term1',
'relationships': ['rel1', 'rel2'],
'all_parents': ['p'],
'development': 'd',
'has_part_inverse': [],
'develops_from': '',
'part_of': ['p1'],
'closure': [],
'closure_with_develops_from': None
},
'term2': {
'term_id': 't1',
'term_name': 'term1'
},
'term3': {},
'term4': None
}
def test_cleanup_non_fields(terms_w_stuff):
to_delete = ['relationships', 'all_parents', 'development',
'has_part_inverse', 'develops_from', 'part_of',
'closure', 'closure_with_develops_from']
to_keep = ['term_id', 'term_name']
for d in to_delete + to_keep:
assert d in terms_w_stuff['term1']
terms = go._cleanup_non_fields(terms_w_stuff)
assert len(terms) == 2
assert terms['term1'] == terms['term2']
for d in to_delete:
assert d not in terms['term1']
for k in to_keep:
assert k in terms['term1']
@pytest.yield_fixture
def mock_get_synonyms():
syn_lists = [[], ['syn1'], ['syn1', 'syn2']]
with mock.patch('encoded.commands.generate_ontology.get_synonyms', side_effect=syn_lists) as mocked:
yield mocked
@pytest.yield_fixture
def mock_get_definitions():
def_lists = [[], ['def1'], ['def1', 'def2']]
with mock.patch('encoded.commands.generate_ontology.get_synonyms', side_effect=def_lists) as mocked:
yield mocked
@pytest.fixture
def simple_terms():
terms = {'t1': {'term_id': 't1', 'term_url': 'term1'},
't2': {'term_id': 't2', 'term_url': 'term2'},
't3': {'term_id': 't3', 'term_url': 'term3'}}
return OrderedDict(sorted(terms.items(), key=lambda t: t[0]))
def test_add_additional_term_info(simple_terms):
syn_lists = [[], ['syn1'], ['syn1', 'syn2']]
def_lists = [[], ['def1'], ['def1', 'def2']]
# terms = {'t1': {'term_id': 't1', 'term_url': 'term1'},
# 't2': {'term_id': 't2', 'term_url': 'term2'},
# 't3': {'term_id': 't3', 'term_url': 'term3'}}
# terms = OrderedDict(sorted(terms.items(), key=lambda t: t[0]))
with mock.patch('encoded.commands.generate_ontology.convert2URIRef', return_value='blah'):
with mock.patch('encoded.commands.generate_ontology.get_synonyms', side_effect=syn_lists):
with mock.patch('encoded.commands.generate_ontology.get_definitions', side_effect=def_lists):
result = go.add_additional_term_info(simple_terms, 'data', 'synterms', 'defterms')
for tid, term in result.items():
if tid == 't3':
assert 'UNK' in term['definitions']
assert 'def1' in term['definitions']['UNK']
assert 'def2' in term['definitions']['UNK']
assert len(term['synonyms']) == 2
assert 'syn1' in term['synonyms']
assert 'syn2' in term['synonyms']
elif tid == 't2':
assert 'UNK' in term['definitions']
assert 'def1' in term['definitions']['UNK']
assert len(term['synonyms']) == 1
assert term['synonyms'][0] == 'syn1'
else:
assert 'synonyms' not in term
assert 'definition' not in term
def test_write_outfile_pretty(simple_terms):
filename = 'tmp_test_file'
go.write_outfile(list(simple_terms.values()), filename, pretty=True)
infile = open(filename, 'r')
result = json.load(infile)
print(result)
for r in result:
assert r in simple_terms.values()
os.remove(filename)
def test_write_outfile_notpretty(simple_terms):
print(simple_terms)
filename = 'tmp_test_file'
go.write_outfile(list(simple_terms.values()), filename)
with open(filename, 'r') as infile:
for l in infile:
result = json.loads(l)
for v in simple_terms.values():
assert v in result
os.remove(filename)
@pytest.fixture
def ontology_list():
return [
{'uuid': '1', 'ontology_name': 'ont1', 'ontology_prefix': 'TO'},
{'uuid': '2', 'ontology_name': 'ont2', 'ontology_prefix': 'NN'}
]
@pytest.fixture
def matches(ontology_list):
return [
{'term_id': 'TO:t1', 'a': 1, 'b': 2, 'c': 3, 'source_ontologies': [ontology_list[0].get('uuid')]},
{'term_id': 'TO:t1', 'a': 1, 'b': 2, 'c': 3, 'source_ontologies': [ontology_list[0].get('uuid')]}
]
def test_terms_match_identical(matches):
assert go._terms_match(matches[0], matches[1])
def test_terms_match_w_parents(matches):
t1 = matches[0]
t2 = matches[1]
p1 = ['OBI:01', 'EFO:01']
p2 = [{'@id': '/ontology-terms/OBI:01/', 'display_title': 'blah'},
{'@id': '/ontology-terms/EFO:01/', 'display_title': 'hah'}]
t1['parents'] = p1
t2['parents'] = p2
assert go._terms_match(t1, t2)
def test_terms_match_unmatched_parents_1(matches):
t1 = matches[0]
t2 = matches[1]
p1 = ['OBI:01', 'EFO:01']
p2 = [{'@id': '/ontology-terms/OBI:01/', 'display_title': 'blah'}]
t1['parents'] = p1
t2['parents'] = p2
assert not go._terms_match(t1, t2)
def test_terms_match_unmatched_parents_2(matches):
t1 = matches[0]
t2 = matches[1]
p1 = ['OBI:01', 'EFO:01']
p2 = [{'@id': '/ontology-terms/OBI:01/', 'display_title': 'blah'},
{'@id': '/ontology-terms/EFO:02/', 'display_title': 'hah'}]
t1['parents'] = p1
t2['parents'] = p2
assert not go._terms_match(t1, t2)
def test_terms_match_w_ontology(matches):
t1 = matches[0]
t2 = matches[1]
o1 = '530016bc-8535-4448-903e-854af460b254'
o2 = {'@id': '/ontologys/530016bc-8535-4448-903e-854af460b254/', 'display_title': 'blah'}
t1['source_ontologies'] = [o1]
t2['source_ontologies'] = [o2]
assert go._terms_match(t1, t2)
@pytest.fixture
def ont_terms(matches, ontology_list):
t2 = matches[1]
t2['term_id'] = 'TO:t2'
t2['parents'] = ['OBI:01', 'EFO:01']
return {
'TO:t1': |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.