input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
from Danim.Contry import *
from Danim.Distribution import LogscaleNumberLine
hair_svg_dir = "Danim\\video1\\file\\T_hair.svg"
#坐标轴设置settings
population_per_circle_area = 1/1.4*1.2 # default: 1.4 billion people is a 1.2unit area circle
xmin = 0
xmax = 10
ymin = 0
ymax = 100
proportion_to_left = 0.9 #default x axis move to 10% empty space left, note its the proportion to the origin
proportion_to_bottom = 0.8
x_scale_factor = (proportion_to_left+1)*FRAME_X_RADIUS/(xmax-xmin)*0.96 # 0.96 is arrow buff
y_scale_factor = (proportion_to_bottom+1)*FRAME_Y_RADIUS/(ymax-ymin)*0.96
NEWORIGIN = [-FRAME_X_RADIUS*proportion_to_left,-FRAME_Y_RADIUS*proportion_to_bottom,0]
mode = "chinese"
#TODO: make it a mobject class
def create_eyes_for_a_circle(
circle,
right_eye_angle = PI/4,
height_width_ratio = 1.3,
width_radius_ratio = 0.2,
eye_color = WHITE,
pupil_color = BLACK,
pupil_to_eye_ratio = 0.4,
pupil_black_to_white = 0.2,
eye_to_origin = 1.5/3.,
pupil_black_direction = UR,
pupil_stroke = 4
):
assert(isinstance(circle,Circle))
assert(right_eye_angle < PI/2)
origin = circle.get_center()
radius = get_norm(origin - circle.points[0])
width = radius* width_radius_ratio
eyes = VGroup()
#create eyes:
right_eye_unit_vec = np.array([np.cos(right_eye_angle),np.sin(right_eye_angle),0])
left_eye_unit_vec = np.array([-np.cos(right_eye_angle),np.sin(right_eye_angle),0])
eyes.left_eye = Circle(radius = width,color = eye_color,num_components = 60)
eyes.right_eye = Circle(radius = width,color = eye_color,num_components = 60)
eyes.left_eye.set_fill(eye_color,1)
eyes.right_eye.set_fill(eye_color,1)
eyes.left_eye.move_to(origin)
eyes.right_eye.move_to(origin)
eyes.left_eye.shift(left_eye_unit_vec*radius*eye_to_origin)
eyes.right_eye.shift(right_eye_unit_vec*radius*eye_to_origin)
eyes.left_eye.stretch(height_width_ratio, dim = 1)
eyes.right_eye.stretch(height_width_ratio, dim = 1)
eyes.add(eyes.right_eye)
eyes.add(eyes.left_eye)
#create right pupils:
eyes.right_pupil = VGroup()
eyes.right_pupil.pupil_white = Circle(
radius = width*pupil_to_eye_ratio,
color = pupil_color,
stroke_width = pupil_stroke
).move_to(
eyes.right_eye.get_center()
)
eyes.right_pupil.pupil_white.set_fill(pupil_color,1)
#eyes.right_pupil.add(eyes.right_pupil.pupil_white)
eyes.right_pupil.pupil_black = Circle(
radius = width*pupil_to_eye_ratio*pupil_black_to_white,
color = eye_color,
num_components = 60,
stroke_width = pupil_stroke
).move_to(eyes.right_pupil.pupil_white.get_center())
eyes.right_pupil.pupil_black.shift(
eyes.right_pupil.pupil_white.get_boundary_point(pupil_black_direction) -
eyes.right_pupil.pupil_black.get_boundary_point(pupil_black_direction))
eyes.right_pupil.pupil_black.set_fill(eye_color,1)
#eyes.right_pupil.add(eyes.right_pupil.pupil_black)
#create left pupil:
eyes.left_pupil = VGroup()
eyes.left_pupil.pupil_white = Circle(
radius = width*pupil_to_eye_ratio,
color = pupil_color,
stroke_width = pupil_stroke
).move_to(
eyes.left_eye.get_center()
)
eyes.left_pupil.pupil_white.set_fill(pupil_color,1)
#eyes.left_pupil.add(eyes.left_pupil.pupil_white)
eyes.left_pupil.pupil_black = Circle(
radius = width*pupil_to_eye_ratio*pupil_black_to_white,
color = eye_color,
num_components = 60,
stroke_width = pupil_stroke
).move_to(eyes.left_pupil.pupil_white.get_center())
eyes.left_pupil.pupil_black.shift(
eyes.left_pupil.pupil_white.get_boundary_point(pupil_black_direction) -
eyes.left_pupil.pupil_black.get_boundary_point(pupil_black_direction))
eyes.left_pupil.pupil_black.set_fill(eye_color,1)
#eyes.left_pupil.add(eyes.left_pupil.pupil_black)
#add sequence matter
eyes.add(
eyes.right_pupil.pupil_white,
eyes.left_pupil.pupil_white,
eyes.right_pupil.pupil_black,
eyes.left_pupil.pupil_black
)
return eyes
def eyes_blink_animation(eyes,**kwargs):
assert(isinstance(eyes,VGroup)&hasattr(eyes,"right_eye"))
eye_bottom_y = eyes.left_eye.get_bottom()[1]
return ApplyMethod(
eyes.apply_function,
lambda p: [p[0], eye_bottom_y, p[2]],
rate_func = squish_rate_func(there_and_back),
**kwargs
)
def eyes_look_at(eyes,thing_to_look,return_animation):
#TODO:things_to_look can be a mobject or a coordinate
#now thing_to_look is just a mobject
assert(isinstance(eyes,VGroup)&hasattr(eyes,"right_eye"))
assert(isinstance(thing_to_look,Mobject))
#calculate the shift vector
mcenter = thing_to_look.get_center()
rcenter = eyes.right_eye.get_center()
lcenter = eyes.left_eye.get_center()
rstart = eyes.right_pupil.pupil_white.get_boundary_point(mcenter - rcenter)
rend = eyes.right_eye.get_boundary_point(mcenter - rcenter)
lstart = eyes.left_pupil.pupil_white.get_boundary_point(mcenter - lcenter)
lend = eyes.left_eye.get_boundary_point(mcenter - lcenter)
right_eye_shift_vec = - rstart + rend
left_eye_shift_vec = - lstart + lend
if return_animation:
return AnimationGroup(
ApplyMethod(
VGroup(
eyes.left_pupil.pupil_white,
eyes.left_pupil.pupil_black
).shift,
left_eye_shift_vec
),
ApplyMethod(
VGroup(
eyes.right_pupil.pupil_white,
eyes.right_pupil.pupil_black
).shift,
right_eye_shift_vec
)
)
else:
return right_eye_shift_vec, left_eye_shift_vec
def eyes_back_to_center(eyes):
rcenter = eyes.right_eye.get_center()
lcenter = eyes.left_eye.get_center()
VGroup(eyes.left_pupil.pupil_white,eyes.left_pupil.pupil_black).shift(lcenter)
class TheEnd(Scene):
def construct(self, mode = mode):
#logotag
logotag = TextMobject("引力子G @B站").scale(0.4)
logotag.to_edge(DR)
logotag.shift(0.4*DOWN+0.4*RIGHT)
logotag.asdfi = 1# no use, for not fadeout in the end
self.add(logotag)
#--------------------------------创建坐标系
y_axis = NumberLine(
x_min = ymin,
x_max = ymax,
label_direction = LEFT,
include_tip = True,
tick_frequency= 10,
).rotate(90 * DEGREES, about_point=ORIGIN)
x_axis = LogscaleNumberLine(x_max = xmax, log_factor = 250,log_base = 2,include_tip = True)
x_axis.move_to(ORIGIN,aligned_edge=LEFT)
y_axis.move_to(ORIGIN,aligned_edge=DOWN)
x_axis.stretch_about_point(x_scale_factor, 0, ORIGIN)
y_axis.stretch_about_point(y_scale_factor, 1, ORIGIN)
axes = VGroup()
axes.x_axis = x_axis
axes.y_axis = y_axis
axes.add(x_axis)
axes.add(y_axis)
axes.shift(NEWORIGIN)
axes.x_axis.add_numbers(*[1,2,3,4,5,6,7,8,9])
axes.new_num = VGroup()
for index in [5,6,7,8]:
location = axes.x_axis.numbers[index].get_center()
axes.x_axis.numbers[index].move_to([-15,-15,0])
num = TextMobject(str(int(axes.x_axis.numbers[index].get_value()/1000))+"k").scale(0.7)
num.move_to(location + 0.05*UP)
axes.new_num.add(num)
axes.add(axes.new_num)
axes.y_axis.add_numbers(*np.arange(10,100,10))
y_axis_label_text = TextMobject("人均寿命").scale(0.6)
y_axis_label_text.next_to(axes.y_axis.number_to_point(95), RIGHT+UP)
x_axis_label_text = TextMobject("人均GDP").scale(0.6)
x_axis_label_text.next_to(axes.x_axis.number_to_point(9.5), UP)
#create year lable:
year_lable = Integer(2018,group_with_commas = False,color = TEAL_E).scale(1.5)
year_lable.to_edge(UR)
#CREATE THE CIRCLES AND AREA RECT
area_rect = []
area_rect_lable = []
areas = []
rect = VGroup()
bubbles = VGroup()
for i,area in enumerate(THE_WHOLE_WORLD):
areas.append(Area(area,show_CN_name = False))
area_rect.append(Rectangle(height = 0.2, width = 0.5,color = AREA_COLOR_MAP[i],fill_opacity = 1))
if i == 0:
area_rect[i].next_to(year_lable,6*DOWN)
else:
area_rect[i].next_to(area_rect[i-1],DOWN)
area_rect_lable.append(TextMobject(CH_THE_WHOLE_WORLD[i]).scale(0.4))
area_rect_lable[i].next_to(area_rect[i],LEFT)
rect.add(area_rect[i],area_rect_lable[i])
#notice: before run this function
#change get_both_fertandlife_point()
#function in Contry.py to Version 2
areas[i].generate_all_contry_circles(
years_date=2018,
COLORMAT = [AREA_COLOR_MAP[i]],
if_switch_on =False,
Axes = axes
)
for contry in areas[i].contry_list:
if contry.name == "China":
china = contry
elif contry.name == "United States":
usa = contry
bubbles.add(contry.shape)
lable_CN = TextMobject("中国", color = RED).scale(0.8)
lable_CN.next_to(china.shape,UP)
self.add(VGroup(axes,y_axis_label_text,x_axis_label_text,year_lable,bubbles,rect,lable_CN))
self.wait()
#stage 1:Highlight china
#CHECK whether circle 2 is inside of china circle
origin1 = china.shape.get_center()
point1 = china.shape.points[0]
radius1 = get_norm(point1 - origin1)
def is_inside_china(circle2,origin1 = origin1,radius1 = radius1):
assert(isinstance(circle2,Circle))
origin2 = circle2.get_center()
dis = get_norm(origin2 - origin1)
if dis>radius1:
return False
else:
return True
circles_to_fadeout = VGroup()
circles_to_dim_animation = []
for area in areas:
for i,contry in enumerate(area.contry_list):
if (not contry.name == "China") and (is_inside_china(circle2 = contry.shape)):
circles_to_fadeout.add(contry.shape)
elif (not contry.name == "China") and (not is_inside_china(circle2 = contry.shape)):
circles_to_dim_animation.append(ApplyMethod(contry.shape.fade,0.8))
china.shape.eyes = create_eyes_for_a_circle(china.shape)
self.play(
AnimationGroup(
ApplyMethod(
china.shape.set_fill,
RED,
1
),
FadeOut(circles_to_fadeout),
FadeIn(china.shape.eyes),
*circles_to_dim_animation,
run_time = 2
)
)
self.wait()
self.play(eyes_blink_animation(china.shape.eyes,run_time = 0.6))
self.play(eyes_blink_animation(china.shape.eyes,run_time = 0.6))
self.play(eyes_blink_animation(china.shape.eyes,run_time = 0.6))
self.wait()
#stage 2: show Shanghai
def high_light_a_part(
pop,
life_expect,
gdp_per_cap,
contry,
direction,
part_name,
convert_from_local_currency_to_PPP_dollar,
contry_eyes = None,
stroke_width = 1,
color = RED,
num_components = 60,
lable_next_to_direction = DOWN,
creation_time = 3,
transfer_time = 3
):
part_radius = math.sqrt(pop/1000000000*population_per_circle_area/np.pi)
origin_x_GDPperCap = math.log(np.divide(gdp_per_cap/convert_from_local_currency_to_PPP_dollar,250),2)#log_factor = 250,log_base = 2
part_center = np.array(coordinate_origin_to_new(np.array([origin_x_GDPperCap,life_expect,0])))
part = Circle(radius = part_radius,color = BLACK,stroke_width = stroke_width,num_components =num_components)
part.set_fill(color,1)
shift_vec = contry.shape.get_boundary_point(direction) - part.get_boundary_point(direction)
part.shift(shift_vec)
part_backgroud = Circle(radius = part_radius,color = BLACK)
part_backgroud.set_fill(BLACK,1)
part_backgroud.shift(shift_vec)
part_lable = TextMobject(part_name, color = color).scale(0.3)
part_lable.next_to(part,lable_next_to_direction)
animations = []
animations.append(Write(part_lable))
animations.append(FadeIn(part_backgroud))
animations.append(FadeIn(part))
if contry_eyes is not None:
animations.append(eyes_look_at(contry_eyes,part,True))
self.play(
*animations,
run_time = creation_time
)
self.wait()
animations = []
animations.append(ApplyMethod(VGroup(part,part_lable).shift,part_center-part.get_center()))
if contry_eyes is not None:
dot = Dot().move_to(part_center)
dot.set_opacity(0)
animations.append(eyes_look_at(contry_eyes,dot,True))
self.play(
*animations,
run_time = transfer_time
)
#HIGHTLIGHT shanghai
high_light_a_part(
24200000,
83.63,
135000,
china,
DOWN+RIGHT,
"上海",
4,
contry_eyes = china.shape.eyes,
stroke_width = 0.5,
color = RED,
num_components = 60,
lable_next_to_direction = 0.2*DOWN+0.5*RIGHT,
creation_time = 3,
transfer_time = 3
)
self.wait()
self.play(
eyes_blink_animation(china.shape.eyes),
)
self.play(
eyes_blink_animation(china.shape.eyes),
)
USD_converate_ratio = 59531.66/54898# from US dollar to PPP dollar fixed in 2011
#HIGHTLIGHT 澳门
high_light_a_part(
630000,
89.68,
8.64*10000,
china,
DOWN+LEFT,
"澳门",
USD_converate_ratio,
contry_eyes = china.shape.eyes,
stroke_width = 0.5,
color = RED,
num_components = 60,
lable_next_to_direction = 0.1*DOWN+0.1*LEFT,
creation_time = 3,
transfer_time = 3
)
# hight light usa:---------------------------------------------------
origin1 = usa.shape.get_center()
point1 = usa.shape.points[0]
radius1 = get_norm(point1 - origin1)
def is_inside_usa(circle2,origin1 = origin1,radius1 = radius1):
assert(isinstance(circle2,Circle))
origin2 = circle2.get_center()
dis = get_norm(origin2 - origin1)
if dis>radius1:
return False
else:
return True
#------------------
circles_to_fadeout = VGroup()
for area in areas:
for i,contry in enumerate(area.contry_list):
if (not contry.name == "United States") and (is_inside_usa(circle2 = contry.shape)):
circles_to_fadeout.add(contry.shape)
usa.shape.eyes = create_eyes_for_a_circle(
usa.shape,
eye_to_origin = 1/3,
right_eye_angle = PI/8,
height_width_ratio=1.5,
pupil_to_eye_ratio = 0.4,
pupil_black_to_white = 0.15,
pupil_stroke = 1,
width_radius_ratio = 0.2)
# I'm sorry, I just can't resist...
# it's not meant to be offensive,
# just trying to make more fun.
'''
T_hair = SVGMobject(hair_svg_dir,color = ORANGE,stroke_width = 2).scale(0.25)
T_hair.set_fill(YELLOW,1)
T_hair.next_to(usa.shape,UP)
T_hair.shift(0.7*DOWN+0.1*RIGHT)
'''
usa_lable = TextMobject("美国",color = BLUE).scale(0.8)
usa_lable.next_to(usa.shape,DOWN)
self.play(
AnimationGroup(
ApplyMethod(
usa.shape.set_fill,
BLUE,
1
),
FadeOut(circles_to_fadeout),
FadeIn(usa.shape.eyes),
#ShowCreation(T_hair),
Write(usa_lable),
run_time = 2
)
)
self.wait()
self.play(
AnimationGroup(
eyes_blink_animation(china.shape.eyes),
eyes_blink_animation(usa.shape.eyes)
)
)
self.play(
AnimationGroup(
eyes_look_at(usa.shape.eyes,china.shape,True),
eyes_look_at(china.shape.eyes,usa.shape,True)
)
)
self.play(
AnimationGroup(
eyes_blink_animation(china.shape.eyes),
eyes_blink_animation(usa.shape.eyes)
)
)
self.play(
AnimationGroup(
eyes_blink_animation(china.shape.eyes),
eyes_blink_animation(usa.shape.eyes)
)
)
self.play()
# hight light DC, Massachusetts, Idaho,Mississippi :---------------------------------------------------
# data source :https://en.wikipedia.org/wiki/List_of_U.S._states_by_GDP_per_capita
# data source :https://en.wikipedia.org/wiki/List_of_U.S._states_and_territories_by_life_expectancy
# DC:
high_light_a_part(
200277, #in 2017
77.1,# in 2017
82989,
usa,
RIGHT+2*DOWN,
"华盛顿DC",
USD_converate_ratio,
contry_eyes = usa.shape.eyes,
stroke_width = 0.5,
color = BLUE,
num_components = 60,
lable_next_to_direction = 0.3*DOWN,
creation_time = 1,
transfer_time = 1
)
self.wait(0.5)
# Massachusetts:
high_light_a_part(
6.902*100*10000,
80.66,# in 2016
82480,# in 2017
usa,
DOWN,
"马萨诸塞州",
USD_converate_ratio,
contry_eyes = usa.shape.eyes,
stroke_width = 0.5,
color = BLUE,
num_components = 60,
lable_next_to_direction = 0.1*DOWN,
creation_time = 1,
transfer_time = 1
)
self.wait(0.5)
#Idaho
high_light_a_part(
1.754*100*10000,
79,# in 2017
43430,# in 2018
usa,
LEFT+DOWN,
"爱达荷州",
USD_converate_ratio,
contry_eyes = usa.shape.eyes,
stroke_width = 0.5,
color = BLUE,
num_components = 60,
lable_next_to_direction = 0.1*DOWN+0.2*LEFT,
creation_time = 1,
transfer_time = 1
)
self.wait()
#Mississippi
high_light_a_part(
2.987*100*10000,
74.5,# in 2017
37948,# in 2018
usa,
LEFT,
"密西西比",
USD_converate_ratio,
contry_eyes = usa.shape.eyes,
stroke_width = 0.5,
color = BLUE,
num_components = 60,
lable_next_to_direction = 0.1*LEFT,
creation_time = 1,
transfer_time = 1
)
self.wait(0.5)
# FADEOUT EVERYTHING
animations = []
| |
fournisseur déja entré) Pour éviter les doublons
if FinancialProvider.objects.filter(iban_ou_ccp=data['iban_ou_ccp'], deleted=False).exists():
fournisseur = FinancialProvider.objects.filter(iban_ou_ccp=data['iban_ou_ccp'], deleted=False).first()
if 'pk' not in data or fournisseur.pk != data['pk']:
raise forms.ValidationError(_(u'Le fournisseur {} ({}) est déja dans la base de donnée !'.format(fournisseur.name, fournisseur.iban_ou_ccp)))
if data['iban_ou_ccp'][0:2] != 'CH':
if not('bic' in data and data['bic']):
raise forms.ValidationError(_(u'BIC/SWIFT obligatoire pour un fournisseur étranger !'))
class _ProviderInvoice(GenericModel, GenericTaggableObject, GenericAccountingStateModel, GenericStateModel, GenericModelWithFiles, GenericModelWithLines, AccountingYearLinked, CostCenterLinked, UnitEditableModel, GenericGroupsModel, GenericContactableModel, LinkedInfoModel, AccountingGroupModels, SearchableModel):
"""Modèle pour les factures fournisseur"""
class MetaRightsUnit(UnitEditableModel.MetaRightsUnit):
access = ['TRESORERIE', 'SECRETARIAT']
class MetaRights(UnitEditableModel.MetaRights):
linked_unit_property = 'costcenter.unit'
name = models.CharField(_(u'Titre de la facture fournisseur'), max_length=255)
comment = models.TextField(_(u'Commentaire'), null=True, blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
reference_number = models.CharField(_(u'Numéro de Référence'), null=True, blank=True, max_length=255)
raw_pay_code = models.TextField(_(u'Raw Swiss Payment Code'), null=True, blank=True)
currency = models.CharField(_(u'Devise'), max_length=3, choices=map(lambda i: (i.value, i.value), Currency), default=Currency.chf.code)
provider = FalseFK('accounting_tools.models.FinancialProvider', verbose_name=_(u'Fournisseur'), blank=False, null=False)
class MetaData:
list_display = [
('name', _('Titre')),
('costcenter', _(u'Centre de coûts')),
('provider', _(u'Fournisseur')),
('get_total_ht', _(u'Total (HT)')),
('get_total', _(u'Total (TTC)')),
('status', _('Statut')),
]
details_display = [
('name', _('Titre')),
('costcenter', _(u'Centre de coûts')),
('provider', _(u'Fournisseur')),
('reference_number', _(u'Numéro de référence')),
('get_total_ht', _(u'Total (HT)')),
('get_total', _(u'Total (TTC)')),
('currency', _(u'Devise')),
('status', _('Statut')),
('accounting_year', _(u'Année comptable')),
('comment', _(u'Commentaire')),
('raw_pay_code', _(u'SPC'))
]
filter_fields = ('name', 'costcenter__name', 'costcenter__account_number', 'reference_number', 'currency', 'provider__name', 'provider__tva_number', 'provider__iban_ou_ccp')
default_sort = "[0, 'desc']" # Creation date (pk) descending
trans_sort = {'get_fullname': 'user__first_name'}
not_sortable_columns = ['get_total', 'get_total_ht']
base_title = _(u'Factures (fournisseur)')
list_title = _(u'Liste des factures (fournisseur)')
files_title = _(u'Justificatifs')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-shopping-cart'
@staticmethod
def extra_filter_for_list(request, current_unit, current_year, filtering):
if current_unit.is_user_in_groupe(request.user, access=['TRESORERIE', 'SECRETARIAT']) or request.user.is_superuser:
return lambda x: filtering(x)
else:
return lambda x: filtering(x).filter(user=request.user)
has_unit = True
menu_id = 'menu-compta-provider-invoices'
forced_widths = {
'1': '350px',
}
help_list = _(u"""Les factures fournisseurs permettent à une unité de payer des factures.
Il est nécéssaire de fournir la facture""")
class Meta:
abstract = True
class MetaEdit:
files_title = _(u'Scan')
files_help = _(u'scan de la facture')
all_users = True
class MetaLines:
lines_objects = [
{
'title': _(u'Lignes'),
'class': 'accounting_tools.models.ProviderInvoiceLine',
'form': 'accounting_tools.forms2.ProviderInvoiceLineForm',
'related_name': 'lines',
'field': 'providerInvoice',
'sortable': True,
'tva_fields': ['tva'],
'show_list': [
('label', _(u'Titre')),
('account', _(u'Compte')),
('value', _(u'Montant (HT)')),
('get_tva', _(u'TVA')),
('value_ttc', _(u'Montant (TTC)')),
]},
]
class MetaGroups(GenericGroupsModel.MetaGroups):
pass
class MetaState(GenericAccountingStateModel.MetaState):
pass
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u"Factures Fournisseur"
index_files = True
fields = [
'name',
'provider',
'comment',
'get_total',
]
linked_lines = {
'lines': ['label']
}
def __unicode__(self):
return u"{} - {}".format(self.name, self.costcenter)
def rights_can_EDIT(self, user):
if not self.pk or (self.get_creator() == user and self.status[0] == '0'):
return True
return super(_ProviderInvoice, self).rights_can_EDIT(user)
def rights_can_LIST(self, user):
return True # Tout le monde peut lister les factures de n'importe quelle unité (à noter qu'il y a un sous filtre qui affiche que les factures que l'user peut voir dans la liste)
def genericFormExtraInit(self, form, current_user, *args, **kwargs):
del form.fields['user']
form.fields['user'] = forms.CharField(widget=forms.HiddenInput(), initial=current_user, required=False)
def get_lines(self):
return self.lines.order_by('order')
def get_total(self):
return sum([line.value_ttc for line in self.get_lines()])
def get_total_ht(self):
return sum([line.value for line in self.get_lines()])
def is_unit_validator(self, user):
"""Check if user is a validator for the step '1_unit_validable'."""
return self.rights_in_linked_unit(user, self.MetaRightsUnit.access)
def genericFormExtraClean(self, data, form):
if self.get_creator():
data['user'] = self.get_creator()
else:
data['user'] = form.fields['user'].initial
class ProviderInvoiceLine(ModelUsedAsLine):
providerInvoice = models.ForeignKey('ProviderInvoice', related_name="lines")
label = models.CharField(_(u'Concerne'), max_length=255)
account = models.ForeignKey('accounting_core.Account', verbose_name=_('Compte'))
value = models.DecimalField(_(u'Montant (HT)'), max_digits=20, decimal_places=2)
tva = models.DecimalField(_(u'TVA'), max_digits=20, decimal_places=2)
value_ttc = models.DecimalField(_(u'Montant (TTC)'), max_digits=20, decimal_places=2)
def __unicode__(self):
return u'{}: {} + {}% == {}'.format(self.label, self.value, self.tva, self.value_ttc)
def get_tva(self):
from accounting_core.models import TVA
return TVA.tva_format(self.tva)
def display_amount(self):
return u'{} + {}% == {}'.format(self.value, self.tva, self.value_ttc)
class _CashBook(GenericModel, GenericTaggableObject, GenericAccountingStateModel, GenericStateModel, GenericModelWithFiles, GenericModelWithLines, AccountingYearLinked, CostCenterLinked, UnitEditableModel, GenericGroupsModel, GenericContactableModel, LinkedInfoModel, AccountingGroupModels, SearchableModel):
"""Modèle pour les journaux de caisse (JdC)"""
class MetaRightsUnit(UnitEditableModel.MetaRightsUnit):
access = ['TRESORERIE', 'SECRETARIAT']
class MetaRights(UnitEditableModel.MetaRights):
linked_unit_property = 'costcenter.unit'
name = models.CharField(_(u'Titre du journal de caisse'), max_length=255)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
nb_proofs = models.PositiveIntegerField(_(u'Nombre de justificatifs'), default=0)
comment = models.TextField(_(u'Commentaire'), null=True, blank=True)
content_type = models.ForeignKey(ContentType, blank=True, null=True)
object_id = models.PositiveIntegerField(blank=True, null=True)
proving_object = generic.GenericForeignKey('content_type', 'object_id')
class MetaData:
list_display = [
('name', _('Titre')),
('costcenter', _(u'Centre de coûts')),
('get_fullname', _(u'Personne')),
('get_total_ht', _(u'Total (HT)')),
('get_total', _(u'Total (TTC)')),
('status', _('Statut')),
]
details_display = list_display + [('nb_proofs', _(u'Nombre de justificatifs')), ('accounting_year', _(u'Année comptable')), ('comment', _(u'Commentaire'))]
filter_fields = ('name', 'costcenter__name', 'costcenter__account_number', 'user__first_name', 'user__last_name', 'user__username')
default_sort = "[0, 'desc']" # Creation date (pk) descending
trans_sort = {'get_fullname': 'user__first_name'}
not_sortable_columns = ['get_total', 'get_total_ht']
base_title = _(u'Journaux de caisse')
list_title = _(u'Liste des journaux de caisse')
files_title = _(u'Justificatifs')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-pencil-square-o'
@staticmethod
def extra_args_for_edit(request, current_unit, current_year):
return {'CS_account_number': settings.CS_ACCOUNT_NUMBER}
has_unit = True
menu_id = 'menu-compta-jdc'
help_list = _(u"""Les journaux de caisse servent à justifier des dépenses et des recettes liées à des versements à la banque ou des retraits cash.
Il est nécessaire de fournir les preuves d'achat et que celles-ci contiennent uniquement des choses qui doivent être remboursées.
Attention! Il faut faire une ligne par taux TVA par ticket. Par exemple, si certains achats à la Migros sont à 8% et d'autres à 0%, il faut les séparer en 2 lignes.""")
class Meta:
abstract = True
class MetaEdit:
files_title = _(u'Justificatifs')
files_help = _(u'Justificatifs liés aux lignes du journal de caisse.')
all_users = True
class MetaLines:
lines_objects = [
{
'title': _(u'Lignes'),
'class': 'accounting_tools.models.CashBookLine',
'form': 'accounting_tools.forms2.CashBookLineForm',
'related_name': 'lines',
'field': 'cashbook',
'sortable': True,
'tva_fields': ['tva'],
'date_fields': ['date'],
'show_list': [
('date', _(u'Date')),
('get_helper_display', _(u'Type')),
('label', _(u'Titre')),
('proof', _(u'Justificatif')),
('account', _(u'Compte')),
('value', _(u'Montant (HT)')),
('get_tva', _(u'TVA')),
('value_ttc', _(u'Montant (TTC)')),
]},
]
class MetaGroups(GenericGroupsModel.MetaGroups):
pass
class MetaState(GenericAccountingStateModel.MetaState):
def build_form_archive(request, obj):
class FormArchive(forms.Form):
archive_proving_obj = forms.BooleanField(label=_(u'Archiver le retrait cash lié?'), initial=True, required=False)
return FormArchive if obj.proving_object else None
states_bonus_form = {
'4_archived': build_form_archive
}
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u"JDC"
index_files = True
fields = [
'name',
'user',
'comment',
'get_total',
]
linked_lines = {
'lines': ['label', 'proof', 'amount']
}
def __unicode__(self):
return u"{} - {}".format(self.name, self.costcenter)
def genericFormExtraClean(self, data, form):
if 'withdrawal' in data.keys() and data['withdrawal']:
if 'user' not in data or 'costcenter' not in data:
client.captureMessage('Withdrawal linked to Cashbook is missing mandatory data (user / costcenter)!\n{}'.format(data))
if data['withdrawal'].user != data.get('user', '') or data['withdrawal'].costcenter != data.get('costcenter', ''):
raise forms.ValidationError(_(u'L\'utilisateur responsable et/ou le centre de coûts ne correspondent pas au retrait cash lié.'))
data['object_id'] = data['withdrawal'].pk
data['content_type'] = ContentType.objects.get(app_label=data['withdrawal']._meta.app_label, model=data['withdrawal']._meta.model_name)
del data['withdrawal']
else:
data['object_id'] = None
data['content_type'] = None
if 'user' in data and not data['user'].is_profile_ok():
form._errors["user"] = form.error_class([_(u"Le profil de cet utilisateur doit d'abord être completé.")]) # Until Django 1.6
# form.add_error("user", _(u"Le profil de cet utilisateur doit d'abord être completé.")) # From Django 1.7
if 'user' in data and data['user'] != form.truffe_request.user and not self.rights_in_linked_unit(form.truffe_request.user, self.MetaRightsUnit.access) and not form.truffe_request.user.is_superuser:
form._errors["user"] = form.error_class([_(u"Il faut plus de droits pour pouvoir faire une note de frais pour quelqu'un d'autre.")]) # Until Django 1.6
# form.add_error("user", _(u"Il faut plus de droits pour pouvoir faire une note de frais pour quelqu'un d'autre.")) # From Django 1.7
def genericFormExtraInit(self, form, current_user, *args, **kwargs):
"""Set related object correctly."""
from accounting_tools.models import Withdrawal
form.fields['withdrawal'] = forms.ModelChoiceField(queryset=Withdrawal.objects.order_by('-pk'), initial=self.proving_object, required=False, label=_(u'Retrait cash lié'))
for field in ['content_type', 'object_id']:
del form.fields[field]
def get_lines(self):
return self.lines.order_by('order')
def get_total(self):
return sum([line.get_line_delta() for line in self.get_lines()])
def get_total_ht(self):
return sum([line.get_line_delta_ht() for line in self.get_lines()])
def total_incomes(self):
return sum([line.input_amount() for line in self.get_lines()])
def total_outcomes(self):
return sum([line.output_amount() for line in self.get_lines()])
def is_unit_validator(self, user):
"""Check if user is a validator for the step '1_unit_validable'."""
return self.rights_in_linked_unit(user, self.MetaRightsUnit.access)
class CashBookLine(ModelUsedAsLine):
HELPER_TYPE = (
('0_withdraw', _(u'J\'ai fait un retrait cash : ')),
('1_deposit', _(u'J\'ai fait un versement à la banque : ')),
('2_sell', _(u'J\'ai vendu quelque chose : ')),
('3_invoice', _(u'J\'ai payé une facture avec la caisse : ')),
('4_buy', _(u'J\'ai acheté quelque chose avec la caisse : ')),
('5_reimburse', _(u'J\'ai remboursé quelqu\'un avec la caisse : ')),
('6_input', _(u'Je fais un Crédit manuel : ')),
('7_output', _(u'Je fais un Débit manuel : ')),
)
cashbook = models.ForeignKey('CashBook', related_name="lines")
date = models.DateField(_(u'Date'))
helper = models.CharField(max_length=15, choices=HELPER_TYPE)
label = models.CharField(_(u'Concerne'), max_length=255)
proof = models.CharField(_(u'Justificatif'), max_length=255, blank=True)
account = models.ForeignKey('accounting_core.Account', verbose_name=_('Compte'))
value = models.DecimalField(_(u'Montant (HT)'), max_digits=20, decimal_places=2)
tva = models.DecimalField(_(u'TVA'), | |
import math
import pytest
from generator import base_objects, materials
from ideal_learning_env import (
InteractableObjectConfig,
VectorFloatConfig,
find_bounds,
)
from ideal_learning_env.defs import ILEException
from ideal_learning_env.interactable_object_config import (
KeywordLocationConfig,
ObjectRepository,
)
from ideal_learning_env.object_services import KeywordLocation
@pytest.fixture(autouse=True)
def run_before_test():
ObjectRepository.get_instance().clear()
def test_find_bounds():
assert find_bounds([]) == []
assert find_bounds([
{'shows': [{'boundingBox': ['a', 'b']}]}
]) == [['a', 'b']]
assert find_bounds([
{'shows': [{'boundingBox': ['a', 'b']}]},
{'shows': [{'boundingBox': ['c', 'd']}]}
]) == [['a', 'b'], ['c', 'd']]
assert find_bounds([
{'shows': [{'boundingBox': ['a', 'b']}]},
{'shows': [{'boundingBox': ['c', 'd']}]},
{'shows': [{'boundingBox': ['a', 'd']}]},
]) == [['a', 'b'], ['c', 'd'], ['a', 'd']]
def test_interactable_object_config_create_instance_random_material():
config = InteractableObjectConfig(
position=VectorFloatConfig(1, 0, 2),
rotation=VectorFloatConfig(0, 90, 0),
scale=3,
shape='ball'
)
instance = config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
assert instance['type'] == 'ball'
assert instance['shows'][0]['position'] == {'x': 1, 'y': 0, 'z': 2}
assert instance['shows'][0]['rotation'] == {'x': 0, 'y': 90, 'z': 0}
assert instance['shows'][0]['scale'] == {'x': 3, 'y': 3, 'z': 3}
assert len(instance['materials']) > 0
assert instance['materials'][0] in materials.ALL_MATERIAL_STRINGS
def test_interactable_object_config_create_instance_random_position():
config = InteractableObjectConfig(
material='AI2-THOR/Materials/Plastics/BlackPlastic',
rotation=VectorFloatConfig(0, 90, 0),
scale=3,
shape='ball'
)
instance = config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
assert instance['type'] == 'ball'
assert instance['materials'] == [
'AI2-THOR/Materials/Plastics/BlackPlastic']
assert instance['shows'][0]['rotation'] == {'x': 0, 'y': 90, 'z': 0}
assert instance['shows'][0]['scale'] == {'x': 3, 'y': 3, 'z': 3}
assert -3.5 <= instance['shows'][0]['position']['x'] <= 3.5
assert instance['shows'][0]['position']['y'] == 0
assert -3.5 <= instance['shows'][0]['position']['z'] <= 3.5
def test_interactable_object_config_create_instance_random_rotation():
config = InteractableObjectConfig(
material='AI2-THOR/Materials/Plastics/BlackPlastic',
position=VectorFloatConfig(1, 0, 2),
scale=3,
shape='ball'
)
instance = config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
assert instance['type'] == 'ball'
assert instance['materials'] == [
'AI2-THOR/Materials/Plastics/BlackPlastic']
assert instance['shows'][0]['position'] == {'x': 1, 'y': 0, 'z': 2}
assert instance['shows'][0]['scale'] == {'x': 3, 'y': 3, 'z': 3}
assert instance['shows'][0]['rotation']['x'] == 0
assert instance['shows'][0]['rotation']['y'] in [
0, 45, 90, 135, 180, 225, 270, 315
]
assert instance['shows'][0]['rotation']['z'] == 0
def test_interactable_object_config_create_instance_random_scale():
config = InteractableObjectConfig(
material='AI2-THOR/Materials/Wood/DarkWoodSmooth2',
position=VectorFloatConfig(1, 0, 2),
rotation=VectorFloatConfig(0, 90, 0),
shape='ball'
)
instance = config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
assert instance['type'] == 'ball'
assert instance['materials'] == ['AI2-THOR/Materials/Wood/DarkWoodSmooth2']
assert instance['shows'][0]['position'] == {'x': 1, 'y': 0, 'z': 2}
assert instance['shows'][0]['rotation'] == {'x': 0, 'y': 90, 'z': 0}
assert instance['shows'][0]['scale'] == {'x': 1, 'y': 1, 'z': 1}
def test_interactable_object_config_create_instance_random_shape():
config = InteractableObjectConfig(
material='AI2-THOR/Materials/Metals/BrushedAluminum_Blue',
position=VectorFloatConfig(1, 0, 2),
rotation=VectorFloatConfig(0, 90, 0),
scale=3,
shape='ball'
)
instance = config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
assert instance['materials'] == [
'AI2-THOR/Materials/Metals/BrushedAluminum_Blue']
assert instance['shows'][0]['position'] == {'x': 1, 'y': 0, 'z': 2}
assert instance['shows'][0]['rotation'] == {'x': 0, 'y': 90, 'z': 0}
assert instance['shows'][0]['scale'] == {'x': 3, 'y': 3, 'z': 3}
assert instance['type'] in base_objects.FULL_TYPE_LIST
def test_interactable_object_config_create_instance_specific():
config = InteractableObjectConfig(
material='UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/red_1x1', # noqa
position=VectorFloatConfig(1, 0, 2),
rotation=VectorFloatConfig(0, 90, 0),
scale=3,
shape='ball'
)
instance = config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
assert instance['type'] == 'ball'
assert instance['materials'] == [
'UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/red_1x1'] # noqa
assert instance['shows'][0]['position'] == {'x': 1, 'y': 0, 'z': 2}
assert instance['shows'][0]['rotation'] == {'x': 0, 'y': 90, 'z': 0}
assert instance['shows'][0]['scale'] == {'x': 3, 'y': 3, 'z': 3}
def test_interactable_object_config_create_instance_specific_invalid_shape_material(): # noqa
config = InteractableObjectConfig(
material='AI2-THOR/Materials/Fabrics/Carpet2',
position=VectorFloatConfig(1, 0, 2),
rotation=VectorFloatConfig(0, 90, 0),
scale=3,
shape='ball'
)
with pytest.raises(ILEException):
config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
def test_interactable_object_config_create_instance_specific_list():
config = InteractableObjectConfig(
material=[
'UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/blue_1x1', # noqa
'UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/wood_1x1'], # noqa
position=[VectorFloatConfig(1, 0, 2), VectorFloatConfig(-1, 0, -2)],
rotation=[VectorFloatConfig(0, 90, 0), VectorFloatConfig(0, 180, 0)],
scale=[3, VectorFloatConfig(3.25, 3.5, 3.75)],
shape=['ball', 'block_blank_wood_cube']
)
instance = config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
assert instance['type'] in ['ball', 'block_blank_wood_cube']
assert instance['materials'] in [
['UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/wood_1x1'], # noqa
['UnityAssetStore/Wooden_Toys_Bundle/ToyBlocks/meshes/Materials/blue_1x1'] # noqa
]
assert (
instance['shows'][0]['position'] == {'x': 1, 'y': 0, 'z': 2} or
instance['shows'][0]['position'] == {'x': -1, 'y': 0, 'z': -2}
)
assert (
instance['shows'][0]['rotation'] == {'x': 0, 'y': 90, 'z': 0} or
instance['shows'][0]['rotation'] == {'x': 0, 'y': 180, 'z': 0}
)
assert (
instance['shows'][0]['scale'] == {'x': 3, 'y': 3, 'z': 3} or
instance['shows'][0]['scale'] == {'x': 3.25, 'y': 3.5, 'z': 3.75}
)
def test_interactable_object_config_create_keyword_location_front():
klc = KeywordLocationConfig(
KeywordLocation.FRONT_OF_PERFORMER)
config = InteractableObjectConfig(
material='AI2-THOR/Materials/Metals/WhiteMetal',
keyword_location=klc,
scale=1,
shape='ball'
)
instance = config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
assert instance['type'] == 'ball'
assert instance['materials'] == ['AI2-THOR/Materials/Metals/WhiteMetal']
assert instance['shows'][0]['position']['x'] == 0
assert instance['shows'][0]['position']['y'] == 0.5
assert instance['shows'][0]['position']['z'] > 0
assert instance['shows'][0]['scale'] == {'x': 1, 'y': 1, 'z': 1}
def test_interactable_object_config_create_keyword_location_back():
klc = KeywordLocationConfig(
KeywordLocation.BACK_OF_PERFORMER)
config = InteractableObjectConfig(
material='AI2-THOR/Materials/Metals/GenericStainlessSteel',
keyword_location=klc,
scale=1,
shape='ball'
)
instance = config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
assert instance['type'] == 'ball'
assert instance['materials'] == [
'AI2-THOR/Materials/Metals/GenericStainlessSteel']
assert -5 < instance['shows'][0]['position']['x'] < 5
assert instance['shows'][0]['position']['y'] == 0.5
assert instance['shows'][0]['position']['z'] < 0
assert instance['shows'][0]['scale'] == {'x': 1, 'y': 1, 'z': 1}
def test_interactable_object_config_create_keyword_location_between():
rel_config = InteractableObjectConfig(
material='AI2-THOR/Materials/Plastics/OrangePlastic',
position=VectorFloatConfig(4, 0, 2),
scale=1,
shape='ball',
labels="rel_label"
)
rel_config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
klc = KeywordLocationConfig(
KeywordLocation.BETWEEN_PERFORMER_OBJECT,
relative_object_label="rel_label")
config = InteractableObjectConfig(
material='AI2-THOR/Materials/Plastics/OrangePlastic',
keyword_location=klc,
scale=1,
shape='ball'
)
instance = config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
assert instance['type'] == 'ball'
assert instance['materials'] == [
'AI2-THOR/Materials/Plastics/OrangePlastic']
assert 0 < instance['shows'][0]['position']['x'] < 4
assert instance['shows'][0]['position']['y'] == 0.5
assert 0 < instance['shows'][0]['position']['z'] < 2
assert instance['shows'][0]['scale'] == {'x': 1, 'y': 1, 'z': 1}
def test_interactable_object_config_create_keyword_location_behind():
rel_config = InteractableObjectConfig(
material='AI2-THOR/Materials/Plastics/OrangePlastic',
position=VectorFloatConfig(0, 0, 2),
scale=1,
shape='ball',
labels="rel_label"
)
rel_config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
klc = KeywordLocationConfig(
KeywordLocation.BEHIND_OBJECT_FROM_PERFORMER,
relative_object_label="rel_label")
config = InteractableObjectConfig(
material='AI2-THOR/Materials/Plastics/OrangePlastic',
keyword_location=klc,
scale=1,
shape='ball'
)
instance = config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
assert instance['type'] == 'ball'
assert instance['materials'] == [
'AI2-THOR/Materials/Plastics/OrangePlastic']
assert instance['shows'][0]['position']['x'] == 0
assert instance['shows'][0]['position']['y'] == 0.5
assert instance['shows'][0]['position']['z'] > 2
assert instance['shows'][0]['scale'] == {'x': 1, 'y': 1, 'z': 1}
def test_interactable_object_config_create_keyword_location_adjacent():
rel_config = InteractableObjectConfig(
material='AI2-THOR/Materials/Plastics/OrangePlastic',
position=VectorFloatConfig(3, 0, 3),
scale=1,
shape='ball',
labels="rel_label"
)
rel_config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
klc = KeywordLocationConfig(
KeywordLocation.ADJACENT_TO_OBJECT,
relative_object_label="rel_label")
config = InteractableObjectConfig(
material='AI2-THOR/Materials/Plastics/OrangePlastic',
keyword_location=klc,
scale=1,
shape='ball'
)
instance = config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
assert instance['type'] == 'ball'
assert instance['materials'] == [
'AI2-THOR/Materials/Plastics/OrangePlastic']
assert 1.5 < instance['shows'][0]['position']['x'] < 4.5
assert instance['shows'][0]['position']['y'] == 0.5
assert 1.5 < instance['shows'][0]['position']['z'] < 4.5
assert instance['shows'][0]['scale'] == {'x': 1, 'y': 1, 'z': 1}
def test_interactable_object_config_create_keyword_location_in():
chest_config = InteractableObjectConfig(
material='AI2-THOR/Materials/Plastics/WhitePlastic',
position=VectorFloatConfig(-2, 0, -2),
scale=1,
shape='chest_3',
labels="chest_label"
)
chest = chest_config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
klc = KeywordLocationConfig(
KeywordLocation.IN_CONTAINER,
container_label="chest_label")
config = InteractableObjectConfig(
material='Custom/Materials/Black',
keyword_location=klc,
scale=1,
shape='crayon_blue'
)
instance = config.create_instance(
{'x': 10, 'y': 3, 'z': 10},
{'position': {'x': 0, 'y': 0, 'z': 0},
'rotation': {'x': 0, 'y': 0, 'z': 0}},
[]
)
assert instance['type'] == 'crayon_blue'
assert instance['shows'][0]['position']['x'] == 0
assert instance['shows'][0]['position']['z'] == 0
assert instance['locationParent'] == chest['id']
assert instance['shows'][0]['scale'] == {'x': 1, 'y': 1, 'z': 1}
def test_interactable_object_config_create_keyword_location_in_with():
rel_config = InteractableObjectConfig(
scale=1,
shape='crayon_blue',
labels="rel_label"
)
| |
Bar {
CREATE PROPERTY foo := (INSERT Foo).id;
};
""")
async def test_edgeql_ddl_bad_10(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r"mutations are invalid in alias definition"):
async with self.con.transaction():
await self.con.execute(r"""
CREATE TYPE Foo;
CREATE TYPE Bar;
CREATE ALIAS Baz := Bar {
foo := (INSERT Foo)
};
""")
async def test_edgeql_ddl_bad_11(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r"mutations are invalid in alias definition"):
async with self.con.transaction():
await self.con.execute(r"""
CREATE TYPE Foo;
CREATE TYPE Bar;
CREATE ALIAS Baz := Bar {
foo := (INSERT Foo).id
};
""")
async def test_edgeql_ddl_bad_12(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r"mutations are invalid in alias definition"):
async with self.con.transaction():
await self.con.execute(r"""
CREATE TYPE Foo;
CREATE TYPE Bar {
CREATE LINK foo -> Foo;
};
CREATE ALIAS Baz := Bar {
foo: {
fuz := (INSERT Foo)
}
};
""")
async def test_edgeql_ddl_bad_13(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r"mutations are invalid in alias definition"):
async with self.con.transaction():
await self.con.execute(r"""
CREATE TYPE Foo;
CREATE TYPE Bar {
CREATE LINK foo -> Foo;
};
CREATE ALIAS Baz := (
WITH x := (INSERT Foo)
SELECT Bar {
foo: {
fuz := x
}
}
);
""")
async def test_edgeql_ddl_link_long_01(self):
link_name = (
'f123456789_123456789_123456789_123456789'
'_123456789_123456789_123456789_123456789'
)
await self.con.execute(f"""
CREATE ABSTRACT LINK {link_name};
""")
await self.con.execute(f"""
CREATE TYPE Foo {{
CREATE LINK {link_name} -> Foo;
}};
""")
await self.con.query(f"SELECT Foo.{link_name}")
async def test_edgeql_ddl_link_bad_02(self):
with self.assertRaisesRegex(
edgedb.EdgeQLSyntaxError,
f'unexpected fully-qualified name'):
async with self.con.transaction():
await self.con.execute("""
CREATE TYPE Foo {
CREATE LINK foo::bar -> Foo;
};
""")
async def test_edgeql_ddl_link_bad_03(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
f"'default' is not a valid field for an abstract link"):
async with self.con.transaction():
await self.con.execute("""
CREATE ABSTRACT LINK bar {
SET default := Object;
};
""")
async def test_edgeql_ddl_property_long_01(self):
prop_name = (
'f123456789_123456789_123456789_123456789'
'_123456789_123456789_123456789_123456789'
)
await self.con.execute(f"""
CREATE ABSTRACT PROPERTY {prop_name}
""")
await self.con.execute(f"""
CREATE TYPE Foo {{
CREATE PROPERTY {prop_name} -> std::str;
}};
""")
await self.con.query(f"SELECT Foo.{prop_name}")
async def test_edgeql_ddl_property_bad_02(self):
with self.assertRaisesRegex(
edgedb.EdgeQLSyntaxError,
f'unexpected fully-qualified name'):
async with self.con.transaction():
await self.con.execute("""
CREATE TYPE Foo {
CREATE PROPERTY foo::bar -> Foo;
};
""")
async def test_edgeql_ddl_property_bad_03(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
f"'default' is not a valid field for an abstract property"):
async with self.con.transaction():
await self.con.execute("""
CREATE ABSTRACT PROPERTY bar {
SET default := 'bad';
};
""")
async def test_edgeql_ddl_function_01(self):
await self.con.execute("""
CREATE FUNCTION my_lower(s: std::str) -> std::str
USING SQL FUNCTION 'lower';
""")
with self.assertRaisesRegex(edgedb.DuplicateFunctionDefinitionError,
r'cannot create.*my_lower.*func'):
async with self.con.transaction():
await self.con.execute("""
CREATE FUNCTION my_lower(s: SET OF std::str)
-> std::str {
SET initial_value := '';
USING SQL FUNCTION 'count';
};
""")
await self.con.execute("""
DROP FUNCTION my_lower(s: std::str);
""")
await self.con.execute("""
CREATE FUNCTION my_lower(s: SET OF anytype)
-> std::str {
USING SQL FUNCTION 'count';
SET initial_value := '';
};
""")
with self.assertRaisesRegex(edgedb.DuplicateFunctionDefinitionError,
r'cannot create.*my_lower.*func'):
async with self.con.transaction():
await self.con.execute("""
CREATE FUNCTION my_lower(s: anytype) -> std::str
USING SQL FUNCTION 'lower';
""")
await self.con.execute("""
DROP FUNCTION my_lower(s: anytype);
""")
async def test_edgeql_ddl_function_02(self):
long_func_name = 'my_sql_func5_' + 'abc' * 50
await self.con.execute(f"""
CREATE FUNCTION my_sql_func1()
-> std::str
USING SQL $$
SELECT 'spam'::text
$$;
CREATE FUNCTION my_sql_func2(foo: std::str)
-> std::str
USING SQL $$
SELECT "foo"::text
$$;
CREATE FUNCTION my_sql_func4(VARIADIC s: std::str)
-> std::str
USING SQL $$
SELECT array_to_string(s, '-')
$$;
CREATE FUNCTION {long_func_name}()
-> std::str
USING SQL $$
SELECT '{long_func_name}'::text
$$;
CREATE FUNCTION my_sql_func6(a: std::str='a' ++ 'b')
-> std::str
USING SQL $$
SELECT $1 || 'c'
$$;
CREATE FUNCTION my_sql_func7(s: array<std::int64>)
-> std::int64
USING SQL $$
SELECT sum(s)::bigint FROM UNNEST($1) AS s
$$;
""")
await self.assert_query_result(
r"""
SELECT my_sql_func1();
""",
['spam'],
)
await self.assert_query_result(
r"""
SELECT my_sql_func2('foo');
""",
['foo'],
)
await self.assert_query_result(
r"""
SELECT my_sql_func4('fizz', 'buzz');
""",
['fizz-buzz'],
)
await self.assert_query_result(
fr"""
SELECT {long_func_name}();
""",
[long_func_name],
)
await self.assert_query_result(
r"""
SELECT my_sql_func6();
""",
['abc'],
)
await self.assert_query_result(
r"""
SELECT my_sql_func6('xy');
""",
['xyc'],
)
await self.assert_query_result(
r"""
SELECT my_sql_func7([1, 2, 3, 10]);
""",
[16],
)
await self.con.execute(f"""
DROP FUNCTION my_sql_func1();
DROP FUNCTION my_sql_func2(foo: std::str);
DROP FUNCTION my_sql_func4(VARIADIC s: std::str);
DROP FUNCTION {long_func_name}();
DROP FUNCTION my_sql_func6(a: std::str='a' ++ 'b');
DROP FUNCTION my_sql_func7(s: array<std::int64>);
""")
async def test_edgeql_ddl_function_03(self):
with self.assertRaisesRegex(edgedb.InvalidFunctionDefinitionError,
r'invalid default value'):
await self.con.execute(f"""
CREATE FUNCTION broken_sql_func1(
a: std::int64=(SELECT schema::ObjectType))
-> std::str
USING SQL $$
SELECT 'spam'::text
$$;
""")
async def test_edgeql_ddl_function_04(self):
await self.con.execute(f"""
CREATE FUNCTION my_edgeql_func1()
-> std::str
USING EdgeQL $$
SELECT 'sp' ++ 'am'
$$;
CREATE FUNCTION my_edgeql_func2(s: std::str)
-> schema::ObjectType
USING EdgeQL $$
SELECT
schema::ObjectType
FILTER schema::ObjectType.name = s
LIMIT 1
$$;
CREATE FUNCTION my_edgeql_func3(s: std::int64)
-> std::int64
USING EdgeQL $$
SELECT s + 10
$$;
CREATE FUNCTION my_edgeql_func4(i: std::int64)
-> array<std::int64>
USING EdgeQL $$
SELECT [i, 1, 2, 3]
$$;
""")
await self.assert_query_result(
r"""
SELECT my_edgeql_func1();
""",
['spam'],
)
await self.assert_query_result(
r"""
SELECT my_edgeql_func2('schema::Object').name;
""",
['schema::Object'],
)
await self.assert_query_result(
r"""
SELECT (SELECT my_edgeql_func2('schema::Object')).name;
""",
['schema::Object'],
)
await self.assert_query_result(
r"""
SELECT my_edgeql_func3(1);
""",
[11],
)
await self.assert_query_result(
r"""
SELECT my_edgeql_func4(42);
""",
[[42, 1, 2, 3]]
)
await self.con.execute(f"""
DROP FUNCTION my_edgeql_func1();
DROP FUNCTION my_edgeql_func2(s: std::str);
DROP FUNCTION my_edgeql_func3(s: std::int64);
DROP FUNCTION my_edgeql_func4(i: std::int64);
""")
async def test_edgeql_ddl_function_05(self):
await self.con.execute("""
CREATE FUNCTION attr_func_1() -> std::str {
CREATE ANNOTATION description := 'hello';
USING EdgeQL "SELECT '1'";
};
""")
await self.assert_query_result(
r"""
SELECT schema::Function {
annotations: {
@value
} FILTER .name = 'std::description'
} FILTER .name = 'default::attr_func_1';
""",
[{
'annotations': [{
'@value': 'hello'
}]
}],
)
await self.con.execute("""
DROP FUNCTION attr_func_1();
""")
async def test_edgeql_ddl_function_06(self):
await self.con.execute("""
CREATE FUNCTION int_func_1() -> std::int64 {
USING EdgeQL "SELECT 1";
};
""")
await self.assert_query_result(
r"""
SELECT int_func_1();
""",
[{}],
)
async def test_edgeql_ddl_function_07(self):
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*my_agg.*function:.+anytype.+cannot '
r'have a non-empty default'):
await self.con.execute(r"""
CREATE FUNCTION my_agg(
s: anytype = [1]) -> array<anytype>
USING SQL FUNCTION "my_agg";
""")
async def test_edgeql_ddl_function_08(self):
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'invalid declaration.*unexpected type of the default'):
await self.con.execute("""
CREATE FUNCTION ddlf_08(s: std::str = 1) -> std::str
USING EdgeQL $$ SELECT "1" $$;
""")
async def test_edgeql_ddl_function_09(self):
await self.con.execute("""
CREATE FUNCTION ddlf_09(
NAMED ONLY a: int64,
NAMED ONLY b: int64
) -> std::str
USING EdgeQL $$ SELECT "1" $$;
""")
with self.assertRaisesRegex(
edgedb.DuplicateFunctionDefinitionError,
r'already defined'):
async with self.con.transaction():
await self.con.execute("""
CREATE FUNCTION ddlf_09(
NAMED ONLY b: int64,
NAMED ONLY a: int64 = 1
) -> std::str
USING EdgeQL $$ SELECT "1" $$;
""")
await self.con.execute("""
CREATE FUNCTION ddlf_09(
NAMED ONLY b: str,
NAMED ONLY a: int64
) -> std::str
USING EdgeQL $$ SELECT "2" $$;
""")
await self.assert_query_result(
r'''
SELECT ddlf_09(a:=1, b:=1);
''',
['1'],
)
await self.assert_query_result(
r'''
SELECT ddlf_09(a:=1, b:='a');
''',
['2'],
)
async def test_edgeql_ddl_function_10(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'parameter `sum` is not callable',
_line=6, _col=39):
await self.con.execute('''
CREATE FUNCTION ddlf_10(
sum: int64
) -> int64
USING (
SELECT <int64>sum(sum)
);
''')
async def test_edgeql_ddl_function_11(self):
await self.con.execute(r'''
CREATE FUNCTION ddlf_11_1() -> str
USING EdgeQL $$
SELECT '\u0062'
$$;
CREATE FUNCTION ddlf_11_2() -> str
USING EdgeQL $$
SELECT r'\u0062'
$$;
CREATE FUNCTION ddlf_11_3() -> str
USING EdgeQL $$
SELECT $a$\u0062$a$
$$;
''')
try:
await self.assert_query_result(
r'''
SELECT ddlf_11_1();
''',
['b'],
)
await self.assert_query_result(
r'''
SELECT ddlf_11_2();
''',
[r'\u0062'],
)
await self.assert_query_result(
r'''
SELECT ddlf_11_3();
''',
[r'\u0062'],
)
finally:
await self.con.execute("""
DROP FUNCTION ddlf_11_1();
DROP FUNCTION ddlf_11_2();
DROP FUNCTION ddlf_11_3();
""")
async def test_edgeql_ddl_function_12(self):
with self.assertRaisesRegex(
edgedb.DuplicateFunctionDefinitionError,
r'cannot create.*ddlf_12\(a: std::int64\).*'
r'function with the same signature is already defined'):
await self.con.execute(r'''
CREATE FUNCTION ddlf_12(a: int64) -> int64
USING EdgeQL $$ SELECT 11 $$;
CREATE FUNCTION ddlf_12(a: int64) -> float64
USING EdgeQL $$ SELECT 11 $$;
''')
async def test_edgeql_ddl_function_13(self):
with self.assertRaisesRegex(
edgedb.UnsupportedFeatureError,
r'cannot create.*ddlf_13\(a: SET OF std::int64\).*'
r'SET OF parameters in user-defined EdgeQL functions are '
r'not supported'):
async with self.con.transaction():
await self.con.execute(r'''
CREATE FUNCTION ddlf_13(a: SET OF int64) -> int64
USING EdgeQL $$ SELECT 11 $$;
''')
with self.assertRaises(edgedb.InvalidReferenceError):
await self.con.execute("""
DROP FUNCTION ddlf_13(a: SET OF int64);
""")
async def test_edgeql_ddl_function_14(self):
await self.con.execute(r'''
CREATE FUNCTION ddlf_14(
a: int64, NAMED ONLY f: int64) -> int64
USING EdgeQL $$ SELECT 11 $$;
CREATE FUNCTION ddlf_14(
a: int32, NAMED ONLY f: str) -> int64
USING EdgeQL $$ SELECT 12 $$;
''')
try:
await self.assert_query_result(
r'''
SELECT ddlf_14(<int64>10, f := 11);
''',
| |
import logging
from dataclasses import dataclass, field
from enum import Enum
import click
from .click_common import command
from .miot_device import MiotDevice
_LOGGER = logging.getLogger(__name__)
class ChargeStatus(Enum):
Charging = 1
Not_charging = 2
Charging2 = 4
Go_charging = 5
class Error(Enum):
NoError = 0
Drop = 1
Cliff = 2
Bumper = 3
Gesture = 4
Bumper_repeat = 5
Drop_repeat = 6
Optical_flow = 7
No_box = 8
No_tankbox = 9
Waterbox_empty = 10
Box_full = 11
Brush = 12
Side_brush = 13
Fan = 14
Left_wheel_motor = 15
Right_wheel_motor = 16
Turn_suffocate = 17
Forward_suffocate = 18
Charger_get = 19
Battery_low = 20
Charge_fault = 21
Battery_percentage = 22
Heart = 23
Camera_occlusion = 24
Camera_fault = 25
Event_battery = 26
Forward_looking = 27
Gyroscope = 28
class VacuumStatus(Enum):
Sweeping = 1
Idle = 2
Paused = 3
Error = 4
Go_charging = 5
Charging = 6
class VacuumSpeed(Enum):
Silent = 0
Standard = 1
Medium = 2
Turbo = 3
class WaterLevel(Enum):
Low = 1
Medium = 2
High = 3
@dataclass
class DreameStatus:
_max_properties = 14
###############################################
# siid 2: (Robot Cleaner): 2 props, 2 actions #
###############################################
# piid: 1 (Status): (int8, unit: None) (acc: ['read', 'notify'], value-list: [{'value': 1, 'description': 'Sweeping'}, {'value': 2, 'description': 'Idle'}, {'value': 3, 'description': 'Paused'}, {'value': 4, 'description': 'Error'}, {'value': 5, 'description': 'Go Charging'}, {'value': 6, 'description': 'Charging'}], value-range: None)
status: int = field(
metadata={
"siid": 2,
"piid": 1,
"access": ["read", "notify"],
"enum": VacuumStatus,
},
default=None
)
# piid: 2 (Device Fault): (uint8, unit: None) (acc: ['read', 'notify'], value-list: [{'value': 0, 'description': 'No faults'}], value-range: None)
error: int = field(
metadata={
"siid": 2,
"piid": 2,
"access": ["read", "notify"],
"enum": Error
},
default=None
)
#########################################
# siid 3: (Battery): 2 props, 1 actions #
#########################################
# piid: 1 (Battery Level): (uint8, unit: percentage) (acc: ['read', 'notify'], value-list: [], value-range: [0, 100, 1])
battery: int = field(
metadata={
"siid": 3,
"piid": 1,
"access": ["read", "notify"]
},
default=None
)
# piid: 2 (Charging State): (uint8, unit: None) (acc: ['read', 'notify'], value-list: [{'value': 1, 'description': 'Charging'}, {'value': 2, 'description': 'Not Charging'}, {'value': 5, 'description': 'Go Charging'}], value-range: None)
state: int = field(
metadata={
"siid": 3,
"piid": 2,
"access": ["read", "notify"],
"enum": ChargeStatus,
},
default=None
)
########################################
# siid 4: (clean): 15 props, 2 actions #
########################################
# piid: 1 (operation mode): (int32, unit: none) (acc: ['read', 'notify'], value-list: [], value-range: [0, 50, 1])
operating_mode: int = field(
metadata={
"siid": 4,
"piid": 1,
"access": ["read", "notify"]
},
default=None
)
# piid: 2 (timer): (string, unit: minute) (acc: ['read', 'notify'], value-list: [], value-range: [0, 32767, 1])
timer: str = field(
metadata={
"siid": 4,
"piid": 2,
"access": ["read", "notify"]
},
default=None
)
# piid: 3 (area): (string, unit: None) (acc: ['read', 'notify'], value-list: [], value-range: [0, 32767, 1])
area: str = field(
metadata={
"siid": 4,
"piid": 3,
"access": ["read", "notify"]
},
default=None
)
# piid: 4 (fan speed): (int32, unit: none) (acc: ['read', 'notify', 'write'], value-list: [{'value': 0, 'description': '安静'}, {'value': 1, 'description': '标准'}, {'value': 2, 'description': '中档'}, {'value': 3, 'description': '强力'}], value-range: None)
fan_speed: int = field(
metadata={
"siid": 4,
"piid": 4,
"access": ["read", "notify", "write"],
"enum": VacuumSpeed,
},
default=None
)
# piid: 20 ():
water_level: int = field(
metadata={
"siid": 4,
"piid": 5,
"access": ["read", "write", "notify"],
"enum": WaterLevel,
},
default=None
)
################################################
# siid 5: (do-not-disturb): 3 props, 0 actions #
################################################
# piid: 1 (enable): (bool, unit: None) (acc: ['read', 'notify', 'write'], value-list: [], value-range: None)
dnd_enabled: bool = field(
metadata={
"siid": 5,
"piid": 1,
"access": ["read", "notify", "write"]
},
default=None
)
# piid: 2 (start-time): (string, unit: None) (acc: ['read', 'notify', 'write'], value-list: [], value-range: None)
dnd_start_time: str = field(
metadata={
"siid": 5,
"piid": 2,
"access": ["read", "notify", "write"]
},
default=None
)
# piid: 3 (stop-time): (string, unit: None) (acc: ['read', 'notify', 'write'], value-list: [], value-range: None)
dnd_stop_time: str = field(
metadata={
"siid": 5,
"piid": 3,
"access": ["read", "notify", "write"]
},
default=None
)
#####################################
# siid 6: (map): 6 props, 2 actions #
#####################################
# piid: 1 (map-view): (string, unit: None) (acc: ['notify'], value-list: [], value-range: None)
map_view: str = field(
metadata={
"siid": 6,
"piid": 1,
"access": ["notify"]
},
default=None
)
# piid: 2 (frame-info): (string, unit: None) (acc: ['write'], value-list: [], value-range: None)
frame_info: str = field(
metadata={
"siid": 6,
"piid": 2,
"access": ["write"]
},
default=None
)
#######################################
# siid 7: (audio): 4 props, 2 actions #
#######################################
# piid: 1 (volume): (int32, unit: None) (acc: ['read', 'notify', 'write'], value-list: [], value-range: [0, 100, 1])
audio_volume: int = field(
metadata={
"siid": 7,
"piid": 1,
"access": ["read", "notify", "write"]
},
default=None
)
# piid: 2 (语音包ID): (string, unit: none) (acc: ['read', 'notify', 'write'], value-list: [], value-range: None)
audio_language: str = field(
metadata={
"siid": 7,
"piid": 2,
"access": ["read", "notify", "write"]
},
default=None
)
##################################
# siid 8: (): 3 props, 1 actions #
##################################
# piid: 1 (): (string, unit: None) (acc: ['read', 'notify'], value-list: [], value-range: None)
timezone: str = field(
metadata={
"siid": 8,
"piid": 1,
"access": ["read", "notify"]
},
default=None
)
#####################################################
# siid 9: (Main Cleaning Brush): 2 props, 1 actions #
#####################################################
# piid: 1 (Brush Left Time): (uint16, unit: hour) (acc: ['read', 'notify'], value-list: [], value-range: [0, 300, 1])
brush_left_time: int = field(
metadata={
"siid": 9,
"piid": 1,
"access": ["read", "notify"]
},
default=None
)
# piid: 2 (Brush Life Level): (uint8, unit: percentage) (acc: ['read', 'notify'], value-list: [], value-range: [0, 100, 1])
brush_life_level: int = field(
metadata={
"siid": 9,
"piid": 2,
"access": ["read", "notify"]
},
default=None
)
######################################################
# siid 10: (Side Cleaning Brush): 2 props, 1 actions #
######################################################
# piid: 1 (Brush Left Time): (uint16, unit: hour) (acc: ['read', 'notify'], value-list: [], value-range: [0, 200, 1])
brush_left_time2: int = field(
metadata={
"siid": 10,
"piid": 1,
"access": ["read", "notify"]
},
default=None
)
# piid: 2 (Brush Life Level): (uint8, unit: percentage) (acc: ['read', 'notify'], value-list: [], value-range: [0, 100, 1])
brush_life_level2: int = field(
metadata={
"siid": 10,
"piid": 2,
"access": ["read", "notify"]
},
default=None
)
#########################################
# siid 11: (Filter): 2 props, 1 actions #
#########################################
# piid: 1 (Filter Life Level): (uint8, unit: percentage) (acc: ['read', 'notify'], value-list: [], value-range: [0, 100, 1])
filter_life_level: int = field(
metadata={
"siid": 11,
"piid": 1,
"access": ["read", "notify"]
},
default=None
)
# piid: 2 (Filter Left Time): (uint16, unit: hour) (acc: ['read', 'notify'], value-list: [], value-range: [0, 150, 1])
filter_left_time: int = field(
metadata={
"siid": 11,
"piid": 2,
"access": ["read", "notify"]
},
default=None
)
#############################################
# siid 12: (clean-logs): 4 props, 0 actions #
#############################################
# piid: 1 (): (uint32, unit: None) (acc: ['read', 'notify'], value-list: [], value-range: [0, 4294967295, 1])
total_log_start: int = field(
metadata={
"siid": 12,
"piid": 1,
"access": ["read", "notify"]
},
default=None
)
# piid: 3 (): (uint32, unit: None) (acc: ['read', 'notify'], value-list: [], value-range: [0, 4294967295, 1])
total_clean_count: int = field(
metadata={
"siid": 12,
"piid": 3,
"access": ["read", "notify"]
},
default=None
)
# piid: 4 (): (uint32, unit: None) (acc: ['read', 'notify'], value-list: [], value-range: [0, 4294967295, 1])
total_area: int = field(
metadata={
"siid": 12,
"piid": 4,
"access": ["read", "notify"]
},
default=None
)
# # piid: 17 (): (uint16, unit: None) (acc: ['read', 'notify'], value-list: [], value-range: [0, 100, 1])
# button_led: int = field(
# metadata={"siid": 18, "piid": 17, "access": ["read", "notify"]},
# default=None
# )
# # piid: 18 (): (uint8, unit: None) (acc: ['read', 'notify'], value-list: [{'value': 0, 'description': ''}, {'value': 1, 'description': ''}], value-range: None)
# clean_success: int = field(
# metadata={"siid": 18, "piid": 18, "access": ["read", "notify"]},
# default=None
# )
###############################################
# # siid 19: (consumable): 3 props, 0 actions #
###############################################
# # piid: 1 (life-sieve): (string, unit: None) (acc: ['read', 'write'], value-list: [], value-range: None)
# life_sieve: str = field(
# metadata={"siid": 19, "piid": 1, "access": ["read", "write"]},
# default=None
# )
| |
- 2*
m.b112*m.b164 + 10*m.b113*m.b114 + 12*m.b113*m.b115 + 2*m.b113*m.b117 + 10*m.b113*m.b118 + 10*
m.b113*m.b119 + 10*m.b113*m.b120 + 4*m.b113*m.b121 + 6*m.b113*m.b122 + 10*m.b113*m.b123 + 4*
m.b113*m.b125 + 20*m.b113*m.b126 + 20*m.b113*m.b127 + 2*m.b113*m.b128 + 10*m.b113*m.b129 + 20*
m.b113*m.b131 + 20*m.b113*m.b148 - 20*m.b113*m.b165 - 12*m.b113*m.b166 - 20*m.b113*m.b169 - 4*
m.b113*m.b170 - 20*m.b113*m.b171 - 2*m.b113*m.b172 - 10*m.b113*m.b173 - 10*m.b113*m.b174 - 4*
m.b113*m.b175 - 10*m.b113*m.b176 - 4*m.b113*m.b178 - 2*m.b113*m.b180 + 2*m.b114*m.b116 + 4*
m.b114*m.b117 + 2*m.b114*m.b118 + 12*m.b114*m.b123 + 12*m.b114*m.b124 + 8*m.b114*m.b125 + 10*
m.b114*m.b126 + 6*m.b114*m.b127 + 4*m.b114*m.b128 + 4*m.b114*m.b129 + 20*m.b114*m.b132 + 20*
m.b114*m.b149 + 10*m.b114*m.b165 - 12*m.b114*m.b181 - 20*m.b114*m.b184 - 4*m.b114*m.b185 - 20*
m.b114*m.b186 - 2*m.b114*m.b187 - 10*m.b114*m.b188 - 10*m.b114*m.b189 - 4*m.b114*m.b190 - 10*
m.b114*m.b191 - 4*m.b114*m.b193 - 2*m.b114*m.b195 + 4*m.b115*m.b116 + 8*m.b115*m.b118 + 4*
m.b115*m.b119 + 2*m.b115*m.b120 + 12*m.b115*m.b122 + 4*m.b115*m.b123 + 2*m.b115*m.b124 + 10*
m.b115*m.b125 + 2*m.b115*m.b128 + 10*m.b115*m.b129 + 20*m.b115*m.b133 + 20*m.b115*m.b150 + 10*
m.b115*m.b166 + 20*m.b115*m.b181 - 20*m.b115*m.b198 - 4*m.b115*m.b199 - 20*m.b115*m.b200 - 2*
m.b115*m.b201 - 10*m.b115*m.b202 - 10*m.b115*m.b203 - 4*m.b115*m.b204 - 10*m.b115*m.b205 - 4*
m.b115*m.b207 - 2*m.b115*m.b209 + 4*m.b116*m.b117 + 2*m.b116*m.b118 + 6*m.b116*m.b120 + 20*
m.b116*m.b121 + 8*m.b116*m.b124 + 8*m.b116*m.b127 + 4*m.b116*m.b128 + 10*m.b116*m.b129 + 20*
m.b116*m.b134 + 20*m.b116*m.b151 + 10*m.b116*m.b167 + 20*m.b116*m.b182 + 12*m.b116*m.b196 - 20
*m.b116*m.b211 - 4*m.b116*m.b212 - 20*m.b116*m.b213 - 2*m.b116*m.b214 - 10*m.b116*m.b215 - 10*
m.b116*m.b216 - 4*m.b116*m.b217 - 10*m.b116*m.b218 - 4*m.b116*m.b220 - 2*m.b116*m.b222 + 8*
m.b117*m.b118 + 10*m.b117*m.b119 + 2*m.b117*m.b121 + 10*m.b117*m.b123 + 10*m.b117*m.b127 + 2*
m.b117*m.b128 + 2*m.b117*m.b129 + 20*m.b117*m.b135 + 20*m.b117*m.b152 + 10*m.b117*m.b168 + 20*
m.b117*m.b183 + 12*m.b117*m.b197 - 20*m.b117*m.b223 - 4*m.b117*m.b224 - 20*m.b117*m.b225 - 2*
m.b117*m.b226 - 10*m.b117*m.b227 - 10*m.b117*m.b228 - 4*m.b117*m.b229 - 10*m.b117*m.b230 - 4*
m.b117*m.b232 - 2*m.b117*m.b234 + 4*m.b118*m.b121 + 4*m.b118*m.b122 + 4*m.b118*m.b124 + 10*
m.b118*m.b125 + 10*m.b118*m.b127 + 4*m.b118*m.b128 + 10*m.b118*m.b129 + 20*m.b118*m.b136 + 20*
m.b118*m.b153 + 10*m.b118*m.b169 + 20*m.b118*m.b184 + 12*m.b118*m.b198 - 4*m.b118*m.b235 - 20*
m.b118*m.b236 - 2*m.b118*m.b237 - 10*m.b118*m.b238 - 10*m.b118*m.b239 - 4*m.b118*m.b240 - 10*
m.b118*m.b241 - 4*m.b118*m.b243 - 2*m.b118*m.b245 + 4*m.b119*m.b120 + 12*m.b119*m.b124 + 6*
m.b119*m.b125 + 10*m.b119*m.b126 + 10*m.b119*m.b129 + 20*m.b119*m.b137 + 20*m.b119*m.b154 + 10
*m.b119*m.b170 + 20*m.b119*m.b185 + 12*m.b119*m.b199 + 20*m.b119*m.b235 - 20*m.b119*m.b246 - 2
*m.b119*m.b247 - 10*m.b119*m.b248 - 10*m.b119*m.b249 - 4*m.b119*m.b250 - 10*m.b119*m.b251 - 4*
m.b119*m.b253 - 2*m.b119*m.b255 + 10*m.b120*m.b122 + 10*m.b120*m.b123 + 2*m.b120*m.b124 + 10*
m.b120*m.b125 + 4*m.b120*m.b126 + 2*m.b120*m.b127 + 4*m.b120*m.b128 + 20*m.b120*m.b129 + 20*
m.b120*m.b138 + 20*m.b120*m.b155 + 10*m.b120*m.b171 + 20*m.b120*m.b186 + 12*m.b120*m.b200 + 20
*m.b120*m.b236 + 4*m.b120*m.b246 - 2*m.b120*m.b256 - 10*m.b120*m.b257 - 10*m.b120*m.b258 - 4*
m.b120*m.b259 - 10*m.b120*m.b260 - 4*m.b120*m.b262 - 2*m.b120*m.b264 + 10*m.b121*m.b122 + 4*
m.b121*m.b123 + 2*m.b121*m.b124 + 2*m.b121*m.b125 + 10*m.b121*m.b126 + 12*m.b121*m.b127 + 10*
m.b121*m.b128 + 10*m.b121*m.b129 + 20*m.b121*m.b139 + 20*m.b121*m.b156 + 10*m.b121*m.b172 + 20
*m.b121*m.b187 + 12*m.b121*m.b201 + 20*m.b121*m.b237 + 4*m.b121*m.b247 + 20*m.b121*m.b256 - 10
*m.b121*m.b265 - 10*m.b121*m.b266 - 4*m.b121*m.b267 - 10*m.b121*m.b268 - 4*m.b121*m.b270 - 2*
m.b121*m.b272 + 8*m.b122*m.b123 + 10*m.b122*m.b128 + 20*m.b122*m.b140 + 20*m.b122*m.b157 + 10*
m.b122*m.b173 + 20*m.b122*m.b188 + 12*m.b122*m.b202 + 20*m.b122*m.b238 + 4*m.b122*m.b248 + 20*
m.b122*m.b257 + 2*m.b122*m.b265 - 10*m.b122*m.b273 - 4*m.b122*m.b274 - 10*m.b122*m.b275 - 4*
m.b122*m.b277 - 2*m.b122*m.b279 + 10*m.b123*m.b124 + 8*m.b123*m.b125 + 8*m.b123*m.b126 + 10*
m.b123*m.b127 + 4*m.b123*m.b129 + 20*m.b123*m.b141 + 20*m.b123*m.b158 + 10*m.b123*m.b174 + 20*
m.b123*m.b189 + 12*m.b123*m.b203 + 20*m.b123*m.b239 + 4*m.b123*m.b249 + 20*m.b123*m.b258 + 2*
m.b123*m.b266 + 10*m.b123*m.b273 - 4*m.b123*m.b280 - 10*m.b123*m.b281 - 4*m.b123*m.b283 - 2*
m.b123*m.b285 + 8*m.b124*m.b125 + 8*m.b124*m.b126 + 2*m.b124*m.b127 + 4*m.b124*m.b129 + 20*
m.b124*m.b142 + 20*m.b124*m.b159 + 10*m.b124*m.b175 + 20*m.b124*m.b190 + 12*m.b124*m.b204 + 20
*m.b124*m.b240 + 4*m.b124*m.b250 + 20*m.b124*m.b259 + 2*m.b124*m.b267 + 10*m.b124*m.b274 + 10*
m.b124*m.b280 - 10*m.b124*m.b286 - 4*m.b124*m.b288 - 2*m.b124*m.b290 + 2*m.b125*m.b126 + 20*
m.b125*m.b128 + 2*m.b125*m.b129 + 20*m.b125*m.b143 + 20*m.b125*m.b160 + 10*m.b125*m.b176 + 20*
m.b125*m.b191 + 12*m.b125*m.b205 + 20*m.b125*m.b241 + 4*m.b125*m.b251 + 20*m.b125*m.b260 + 2*
m.b125*m.b268 + 10*m.b125*m.b275 + 10*m.b125*m.b281 + 4*m.b125*m.b286 - 4*m.b125*m.b292 - 2*
m.b125*m.b294 + 20*m.b126*m.b144 + 20*m.b126*m.b161 + 10*m.b126*m.b177 + 20*m.b126*m.b192 + 12
*m.b126*m.b206 + 20*m.b126*m.b242 + 4*m.b126*m.b252 + 20*m.b126*m.b261 + 2*m.b126*m.b269 + 10*
m.b126*m.b276 + 10*m.b126*m.b282 + 4*m.b126*m.b287 + 10*m.b126*m.b291 - 4*m.b126*m.b295 - 2*
m.b126*m.b297 + 20*m.b127*m.b145 + 20*m.b127*m.b162 + 10*m.b127*m.b178 + 20*m.b127*m.b193 + 12
*m.b127*m.b207 + 20*m.b127*m.b243 + 4*m.b127*m.b253 + 20*m.b127*m.b262 + 2*m.b127*m.b270 + 10*
m.b127*m.b277 + 10*m.b127*m.b283 + 4*m.b127*m.b288 + 10*m.b127*m.b292 - 2*m.b127*m.b299 + 4*
m.b128*m.b129 + 20*m.b128*m.b146 + 20*m.b128*m.b163 + 10*m.b128*m.b179 + 20*m.b128*m.b194 + 12
*m.b128*m.b208 + 20*m.b128*m.b244 + 4*m.b128*m.b254 + 20*m.b128*m.b263 + 2*m.b128*m.b271 + 10*
m.b128*m.b278 + 10*m.b128*m.b284 + 4*m.b128*m.b289 + 10*m.b128*m.b293 + 4*m.b128*m.b298 - 2*
m.b128*m.b300 + 20*m.b129*m.b147 + 20*m.b129*m.b164 + 10*m.b129*m.b180 + 20*m.b129*m.b195 + 12
*m.b129*m.b209 + 20*m.b129*m.b245 + 4*m.b129*m.b255 + 20*m.b129*m.b264 + 2*m.b129*m.b272 + 10*
m.b129*m.b279 + 10*m.b129*m.b285 + 4*m.b129*m.b290 + 10*m.b129*m.b294 + 4*m.b129*m.b299 + 20*
m.b130*m.b131 + 4*m.b130*m.b132 + 10*m.b130*m.b133 + 4*m.b130*m.b134 + 6*m.b130*m.b136 + 8*
m.b130*m.b140 + 10*m.b130*m.b142 + 10*m.b130*m.b144 + 4*m.b130*m.b145 + 4*m.b130*m.b146 + 10*
m.b130*m.b147 - 6*m.b130*m.b148 - 10*m.b130*m.b149 - 4*m.b130*m.b152 - 8*m.b130*m.b153 - 10*
m.b130*m.b154 - 20*m.b130*m.b155 - 12*m.b130*m.b156 - 10*m.b130*m.b158 - 10*m.b130*m.b159 - 10
*m.b130*m.b160 - 10*m.b130*m.b162 - 10*m.b130*m.b163 + 10*m.b131*m.b132 + 12*m.b131*m.b133 + 2
*m.b131*m.b135 + 10*m.b131*m.b136 + 10*m.b131*m.b137 + 10*m.b131*m.b138 + 4*m.b131*m.b139 + 6*
m.b131*m.b140 + 10*m.b131*m.b141 + 4*m.b131*m.b143 + 20*m.b131*m.b144 + 20*m.b131*m.b145 + 2*
m.b131*m.b146 + 10*m.b131*m.b147 + 2*m.b131*m.b148 - 10*m.b131*m.b165 - 4*m.b131*m.b168 - 8*
m.b131*m.b169 - 10*m.b131*m.b170 - 20*m.b131*m.b171 - 12*m.b131*m.b172 - 10*m.b131*m.b174 - 10
*m.b131*m.b175 - 10*m.b131*m.b176 - 10*m.b131*m.b178 - 10*m.b131*m.b179 + 2*m.b132*m.b134 + 4*
m.b132*m.b135 + 2*m.b132*m.b136 + 12*m.b132*m.b141 + 12*m.b132*m.b142 + 8*m.b132*m.b143 + 10*
m.b132*m.b144 + 6*m.b132*m.b145 + 4*m.b132*m.b146 + 4*m.b132*m.b147 + 2*m.b132*m.b149 + 6*
m.b132*m.b165 - 4*m.b132*m.b183 - 8*m.b132*m.b184 - 10*m.b132*m.b185 - 20*m.b132*m.b186 - 12*
m.b132*m.b187 - 10*m.b132*m.b189 - 10*m.b132*m.b190 - 10*m.b132*m.b191 - 10*m.b132*m.b193 - 10
*m.b132*m.b194 + 4*m.b133*m.b134 + 8*m.b133*m.b136 + 4*m.b133*m.b137 + 2*m.b133*m.b138 + 12*
m.b133*m.b140 + 4*m.b133*m.b141 + 2*m.b133*m.b142 + 10*m.b133*m.b143 + 2*m.b133*m.b146 + 10*
m.b133*m.b147 + 2*m.b133*m.b150 + 6*m.b133*m.b166 + 10*m.b133*m.b181 - 4*m.b133*m.b197 - 8*
m.b133*m.b198 - 10*m.b133*m.b199 - 20*m.b133*m.b200 - 12*m.b133*m.b201 - 10*m.b133*m.b203 - 10
*m.b133*m.b204 - 10*m.b133*m.b205 - 10*m.b133*m.b207 - 10*m.b133*m.b208 + 4*m.b134*m.b135 + 2*
m.b134*m.b136 + 6*m.b134*m.b138 + 20*m.b134*m.b139 + 8*m.b134*m.b142 + 8*m.b134*m.b145 + 4*
m.b134*m.b146 + 10*m.b134*m.b147 + 2*m.b134*m.b151 + 6*m.b134*m.b167 + 10*m.b134*m.b182 - 4*
m.b134*m.b210 - 8*m.b134*m.b211 - 10*m.b134*m.b212 - 20*m.b134*m.b213 - 12*m.b134*m.b214 - 10*
m.b134*m.b216 - 10*m.b134*m.b217 - 10*m.b134*m.b218 - 10*m.b134*m.b220 - 10*m.b134*m.b221 + 8*
m.b135*m.b136 + 10*m.b135*m.b137 + 2*m.b135*m.b139 + 10*m.b135*m.b141 + 10*m.b135*m.b145 + 2*
m.b135*m.b146 + 2*m.b135*m.b147 + 2*m.b135*m.b152 + 6*m.b135*m.b168 + 10*m.b135*m.b183 - 8*
m.b135*m.b223 - 10*m.b135*m.b224 - 20*m.b135*m.b225 - 12*m.b135*m.b226 - 10*m.b135*m.b228 - 10
*m.b135*m.b229 - 10*m.b135*m.b230 - 10*m.b135*m.b232 - 10*m.b135*m.b233 + 4*m.b136*m.b139 + 4*
m.b136*m.b140 + 4*m.b136*m.b142 + 10*m.b136*m.b143 + 10*m.b136*m.b145 + 4*m.b136*m.b146 + 10*
m.b136*m.b147 + 2*m.b136*m.b153 + 6*m.b136*m.b169 + 10*m.b136*m.b184 + 4*m.b136*m.b223 - 10*
m.b136*m.b235 - 20*m.b136*m.b236 - 12*m.b136*m.b237 - 10*m.b136*m.b239 - 10*m.b136*m.b240 - 10
*m.b136*m.b241 - 10*m.b136*m.b243 - 10*m.b136*m.b244 + 4*m.b137*m.b138 + 12*m.b137*m.b142 + 6*
m.b137*m.b143 + 10*m.b137*m.b144 + 10*m.b137*m.b147 + 2*m.b137*m.b154 + 6*m.b137*m.b170 + 10*
m.b137*m.b185 + 4*m.b137*m.b224 + 8*m.b137*m.b235 - 20*m.b137*m.b246 - 12*m.b137*m.b247 - 10*
m.b137*m.b249 - 10*m.b137*m.b250 - 10*m.b137*m.b251 - 10*m.b137*m.b253 - 10*m.b137*m.b254 + 10
*m.b138*m.b140 + 10*m.b138*m.b141 + 2*m.b138*m.b142 + 10*m.b138*m.b143 + 4*m.b138*m.b144 + 2*
m.b138*m.b145 + 4*m.b138*m.b146 + 20*m.b138*m.b147 + 2*m.b138*m.b155 + 6*m.b138*m.b171 + 10*
m.b138*m.b186 + 4*m.b138*m.b225 + 8*m.b138*m.b236 + 10*m.b138*m.b246 - 12*m.b138*m.b256 - 10*
m.b138*m.b258 - 10*m.b138*m.b259 - 10*m.b138*m.b260 - 10*m.b138*m.b262 - 10*m.b138*m.b263 + 10
*m.b139*m.b140 + 4*m.b139*m.b141 + 2*m.b139*m.b142 + 2*m.b139*m.b143 + 10*m.b139*m.b144 + 12*
m.b139*m.b145 + 10*m.b139*m.b146 + 10*m.b139*m.b147 + 2*m.b139*m.b156 + 6*m.b139*m.b172 + 10*
m.b139*m.b187 + 4*m.b139*m.b226 + 8*m.b139*m.b237 + 10*m.b139*m.b247 + 20*m.b139*m.b256 - 10*
m.b139*m.b266 - 10*m.b139*m.b267 - 10*m.b139*m.b268 - 10*m.b139*m.b270 - 10*m.b139*m.b271 + 8*
m.b140*m.b141 + 10*m.b140*m.b146 + 2*m.b140*m.b157 + 6*m.b140*m.b173 + 10*m.b140*m.b188 + 4*
m.b140*m.b227 + 8*m.b140*m.b238 + 10*m.b140*m.b248 + 20*m.b140*m.b257 + 12*m.b140*m.b265 - 10*
m.b140*m.b273 - 10*m.b140*m.b274 - 10*m.b140*m.b275 - 10*m.b140*m.b277 - 10*m.b140*m.b278 + 10
*m.b141*m.b142 + 8*m.b141*m.b143 + 8*m.b141*m.b144 + 10*m.b141*m.b145 + 4*m.b141*m.b147 + 2*
m.b141*m.b158 + 6*m.b141*m.b174 + 10*m.b141*m.b189 + 4*m.b141*m.b228 + 8*m.b141*m.b239 + 10*
m.b141*m.b249 + 20*m.b141*m.b258 + 12*m.b141*m.b266 - 10*m.b141*m.b280 - 10*m.b141*m.b281 - 10
*m.b141*m.b283 - 10*m.b141*m.b284 + 8*m.b142*m.b143 + 8*m.b142*m.b144 + 2*m.b142*m.b145 + 4*
m.b142*m.b147 + | |
'topic': 'Web Engineering'},
{'author': 'A',
'before': [923],
'message': 'Redecentralization of the Web',
'observation_id': 924,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [924],
'message': 'Redecentralization of the Web',
'observation_id': 925,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [925],
'message': 'Redecentralization of the Web',
'observation_id': 926,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [926],
'message': 'Redecentralization of the Web',
'observation_id': 927,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [927],
'message': 'Redecentralization of the Web',
'observation_id': 928,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [928],
'message': 'Redecentralization of the Web',
'observation_id': 929,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [929],
'message': 'Redecentralization of the Web',
'observation_id': 930,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [930],
'message': 'Redecentralization of the Web',
'observation_id': 931,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [931],
'message': 'Redecentralization of the Web',
'observation_id': 932,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [932],
'message': 'Redecentralization of the Web',
'observation_id': 933,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [933],
'message': 'Redecentralization of the Web',
'observation_id': 934,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [934],
'message': 'Redecentralization of the Web',
'observation_id': 935,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [935],
'message': 'Redecentralization of the Web',
'observation_id': 936,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [936],
'message': 'Redecentralization of the Web',
'observation_id': 937,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [937],
'message': 'Redecentralization of the Web',
'observation_id': 938,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [938],
'message': 'Redecentralization of the Web',
'observation_id': 939,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [939],
'message': 'Redecentralization of the Web',
'observation_id': 940,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [940],
'message': 'Redecentralization of the Web',
'observation_id': 941,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [941],
'message': 'Redecentralization of the Web',
'observation_id': 942,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [942],
'message': 'Redecentralization of the Web',
'observation_id': 943,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [943],
'message': 'Redecentralization of the Web',
'observation_id': 944,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [944],
'message': 'Redecentralization of the Web',
'observation_id': 945,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [945],
'message': 'Redecentralization of the Web',
'observation_id': 946,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [946],
'message': 'Redecentralization of the Web',
'observation_id': 947,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [947],
'message': 'Redecentralization of the Web',
'observation_id': 948,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [948],
'message': 'Redecentralization of the Web',
'observation_id': 949,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [949],
'message': 'Redecentralization of the Web',
'observation_id': 950,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [950],
'message': 'Redecentralization of the Web',
'observation_id': 951,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [951],
'message': 'Redecentralization of the Web',
'observation_id': 952,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [952],
'message': 'Redecentralization of the Web',
'observation_id': 953,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [953],
'message': 'Redecentralization of the Web',
'observation_id': 954,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [954],
'message': 'Redecentralization of the Web',
'observation_id': 955,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [955],
'message': 'Redecentralization of the Web',
'observation_id': 956,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [956],
'message': 'Redecentralization of the Web',
'observation_id': 957,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [957],
'message': 'Redecentralization of the Web',
'observation_id': 958,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [958],
'message': 'Redecentralization of the Web',
'observation_id': 959,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [959],
'message': 'Redecentralization of the Web',
'observation_id': 960,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [960],
'message': 'Redecentralization of the Web',
'observation_id': 961,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [961],
'message': 'Redecentralization of the Web',
'observation_id': 962,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [962],
'message': 'Redecentralization of the Web',
'observation_id': 963,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [963],
'message': 'Redecentralization of the Web',
'observation_id': 964,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [964],
'message': 'Redecentralization of the Web',
'observation_id': 965,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [965],
'message': 'Redecentralization of the Web',
'observation_id': 966,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [966],
'message': 'Redecentralization of the Web',
'observation_id': 967,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [967],
'message': 'Redecentralization of the Web',
'observation_id': 968,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [968],
'message': 'Redecentralization of the Web',
'observation_id': 969,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [969],
'message': 'Redecentralization of the Web',
'observation_id': 970,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [970],
'message': 'Redecentralization of the Web',
'observation_id': 971,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [971],
'message': 'Redecentralization of the Web',
'observation_id': 972,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [972],
'message': 'Redecentralization of the Web',
'observation_id': 973,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [973],
'message': 'Redecentralization of the Web',
'observation_id': 974,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [974],
'message': 'Redecentralization of the Web',
'observation_id': 975,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [975],
'message': 'Redecentralization of the Web',
'observation_id': 976,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [976],
'message': 'Redecentralization of the Web',
'observation_id': 977,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [977],
'message': 'Redecentralization of the Web',
'observation_id': 978,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [978],
'message': 'Redecentralization of the Web',
'observation_id': 979,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [979],
'message': 'Redecentralization of the Web',
'observation_id': 980,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [980],
'message': 'Redecentralization of the Web',
'observation_id': 981,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [981],
'message': 'Redecentralization of the Web',
'observation_id': 982,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [982],
'message': 'Redecentralization of the Web',
'observation_id': 983,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [983],
'message': 'Redecentralization of the Web',
'observation_id': 984,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [984],
'message': 'Redecentralization of the Web',
'observation_id': 985,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [985],
'message': 'Redecentralization of the Web',
'observation_id': 986,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [986],
'message': 'Redecentralization of the Web',
'observation_id': 987,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [987],
'message': 'Redecentralization of the Web',
'observation_id': 988,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [988],
'message': 'Redecentralization of the Web',
'observation_id': 989,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [989],
'message': 'Redecentralization of the Web',
'observation_id': 990,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [990],
'message': 'Redecentralization of the Web',
'observation_id': 991,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [991],
'message': 'Redecentralization of the Web',
'observation_id': 992,
'receiver': 'B',
'sender': 'A',
'topic': 'Web Engineering'},
{'author': 'C',
'before': [992],
'message': 'Redecentralization of the Web',
'observation_id': 993,
'receiver': 'B',
'sender': 'C',
'topic': 'Web Engineering'},
{'author': 'A',
'before': [993],
'message': 'Redecentralization of the Web',
'observation_id': 994,
'receiver': 'B',
'sender': 'A',
'topic': 'Web | |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AgreementContent(msrest.serialization.Model):
"""The integration account agreement content.
:param a_s2: The AS2 agreement content.
:type a_s2: ~azure.mgmt.logic.models.As2AgreementContent
:param x12: The X12 agreement content.
:type x12: ~azure.mgmt.logic.models.X12AgreementContent
:param edifact: The EDIFACT agreement content.
:type edifact: ~azure.mgmt.logic.models.EdifactAgreementContent
"""
_attribute_map = {
'a_s2': {'key': 'aS2', 'type': 'As2AgreementContent'},
'x12': {'key': 'x12', 'type': 'X12AgreementContent'},
'edifact': {'key': 'edifact', 'type': 'EdifactAgreementContent'},
}
def __init__(
self,
*,
a_s2: Optional["As2AgreementContent"] = None,
x12: Optional["X12AgreementContent"] = None,
edifact: Optional["EdifactAgreementContent"] = None,
**kwargs
):
super(AgreementContent, self).__init__(**kwargs)
self.a_s2 = a_s2
self.x12 = x12
self.edifact = edifact
class ApiDeploymentParameterMetadata(msrest.serialization.Model):
"""The API deployment parameter metadata.
:param type: The type.
:type type: str
:param is_required: Indicates whether its required.
:type is_required: bool
:param display_name: The display name.
:type display_name: str
:param description: The description.
:type description: str
:param visibility: The visibility. Possible values include: "NotSpecified", "Default",
"Internal".
:type visibility: str or ~azure.mgmt.logic.models.ApiDeploymentParameterVisibility
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'is_required': {'key': 'isRequired', 'type': 'bool'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'visibility': {'key': 'visibility', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[str] = None,
is_required: Optional[bool] = None,
display_name: Optional[str] = None,
description: Optional[str] = None,
visibility: Optional[Union[str, "ApiDeploymentParameterVisibility"]] = None,
**kwargs
):
super(ApiDeploymentParameterMetadata, self).__init__(**kwargs)
self.type = type
self.is_required = is_required
self.display_name = display_name
self.description = description
self.visibility = visibility
class ApiDeploymentParameterMetadataSet(msrest.serialization.Model):
"""The API deployment parameters metadata.
:param package_content_link: The package content link parameter.
:type package_content_link: ~azure.mgmt.logic.models.ApiDeploymentParameterMetadata
:param redis_cache_connection_string: The package content link parameter.
:type redis_cache_connection_string: ~azure.mgmt.logic.models.ApiDeploymentParameterMetadata
"""
_attribute_map = {
'package_content_link': {'key': 'packageContentLink', 'type': 'ApiDeploymentParameterMetadata'},
'redis_cache_connection_string': {'key': 'redisCacheConnectionString', 'type': 'ApiDeploymentParameterMetadata'},
}
def __init__(
self,
*,
package_content_link: Optional["ApiDeploymentParameterMetadata"] = None,
redis_cache_connection_string: Optional["ApiDeploymentParameterMetadata"] = None,
**kwargs
):
super(ApiDeploymentParameterMetadataSet, self).__init__(**kwargs)
self.package_content_link = package_content_link
self.redis_cache_connection_string = redis_cache_connection_string
class Resource(msrest.serialization.Model):
"""The base resource type.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class ApiOperation(Resource):
"""The api operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param properties: The api operations properties.
:type properties: ~azure.mgmt.logic.models.ApiOperationPropertiesDefinition
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'ApiOperationPropertiesDefinition'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
properties: Optional["ApiOperationPropertiesDefinition"] = None,
**kwargs
):
super(ApiOperation, self).__init__(location=location, tags=tags, **kwargs)
self.properties = properties
class ApiOperationAnnotation(msrest.serialization.Model):
"""The Api Operation Annotation.
:param status: The status annotation. Possible values include: "NotSpecified", "Preview",
"Production".
:type status: str or ~azure.mgmt.logic.models.StatusAnnotation
:param family: The family.
:type family: str
:param revision: The revision.
:type revision: int
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'revision': {'key': 'revision', 'type': 'int'},
}
def __init__(
self,
*,
status: Optional[Union[str, "StatusAnnotation"]] = None,
family: Optional[str] = None,
revision: Optional[int] = None,
**kwargs
):
super(ApiOperationAnnotation, self).__init__(**kwargs)
self.status = status
self.family = family
self.revision = revision
class ApiOperationListResult(msrest.serialization.Model):
"""The list of managed API operations.
:param value: The api operation definitions for an API.
:type value: list[~azure.mgmt.logic.models.ApiOperation]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ApiOperation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ApiOperation"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(ApiOperationListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ApiOperationPropertiesDefinition(msrest.serialization.Model):
"""The api operations properties.
:param summary: The summary of the api operation.
:type summary: str
:param description: The description of the api operation.
:type description: str
:param visibility: The visibility of the api operation.
:type visibility: str
:param trigger: The trigger type of api operation.
:type trigger: str
:param trigger_hint: The trigger hint for the api operation.
:type trigger_hint: str
:param pageable: Indicates whether the api operation is pageable.
:type pageable: bool
:param annotation: The annotation of api operation.
:type annotation: ~azure.mgmt.logic.models.ApiOperationAnnotation
:param api: The api reference.
:type api: ~azure.mgmt.logic.models.ApiReference
:param inputs_definition: The operation inputs definition schema.
:type inputs_definition: ~azure.mgmt.logic.models.SwaggerSchema
:param responses_definition: The operation responses definition schemas.
:type responses_definition: dict[str, ~azure.mgmt.logic.models.SwaggerSchema]
:param is_webhook: Indicates whether the API operation is webhook or not.
:type is_webhook: bool
:param is_notification: Indicates whether the API operation is notification or not.
:type is_notification: bool
"""
_attribute_map = {
'summary': {'key': 'summary', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'visibility': {'key': 'visibility', 'type': 'str'},
'trigger': {'key': 'trigger', 'type': 'str'},
'trigger_hint': {'key': 'triggerHint', 'type': 'str'},
'pageable': {'key': 'pageable', 'type': 'bool'},
'annotation': {'key': 'annotation', 'type': 'ApiOperationAnnotation'},
'api': {'key': 'api', 'type': 'ApiReference'},
'inputs_definition': {'key': 'inputsDefinition', 'type': 'SwaggerSchema'},
'responses_definition': {'key': 'responsesDefinition', 'type': '{SwaggerSchema}'},
'is_webhook': {'key': 'isWebhook', 'type': 'bool'},
'is_notification': {'key': 'isNotification', 'type': 'bool'},
}
def __init__(
self,
*,
summary: Optional[str] = None,
description: Optional[str] = None,
visibility: Optional[str] = None,
trigger: Optional[str] = None,
trigger_hint: Optional[str] = None,
pageable: Optional[bool] = None,
annotation: Optional["ApiOperationAnnotation"] = None,
api: Optional["ApiReference"] = None,
inputs_definition: Optional["SwaggerSchema"] = None,
responses_definition: Optional[Dict[str, "SwaggerSchema"]] = None,
is_webhook: Optional[bool] = None,
is_notification: Optional[bool] = None,
**kwargs
):
super(ApiOperationPropertiesDefinition, self).__init__(**kwargs)
self.summary = summary
self.description = description
self.visibility = visibility
self.trigger = trigger
self.trigger_hint = trigger_hint
self.pageable = pageable
self.annotation = annotation
self.api = api
self.inputs_definition = inputs_definition
self.responses_definition = responses_definition
self.is_webhook = is_webhook
self.is_notification = is_notification
class ResourceReference(msrest.serialization.Model):
"""The resource reference.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: The resource id.
:type id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(ResourceReference, self).__init__(**kwargs)
self.id = id
self.name = None
self.type = None
class ApiReference(ResourceReference):
"""The Api reference.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: The resource id.
:type id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param display_name: The display name of the api.
:type display_name: str
:param description: The description of the api.
:type description: str
:param icon_uri: The icon uri of the api.
:type icon_uri: str
:param swagger: The swagger of the api.
:type swagger: object
:param brand_color: The brand color of the api.
:type brand_color: str
:param category: The tier. Possible values include: "NotSpecified", "Enterprise", "Standard",
"Premium".
:type category: | |
glance_exceptions.NotFound:
pass
class FindOrUploadImage(BaseSymmetricSingletonTask):
def __init__(self, cfg, migration, obj, location):
self.var_name = location + '_image_id'
super(FindOrUploadImage, self).__init__(
cfg, migration, obj, location, provides=[self.var_name])
def migrate(self, *args, **kwargs):
with model.Session() as session:
image_id = self._find_supported_cirros_image(session)
if image_id is None:
try:
img = self._upload_cirros_image(session)
except clients.Timeout:
raise base.AbortMigration(
'Failed to upload transfer VM image')
image_obj = self.load_from_cloud(image.Image, self.cloud, img)
session.store(image_obj)
image_id = img.id
self.destructor = ImageDestructor(self.location, image_id)
return {self.var_name: image_id,
self.destructor_var: self.destructor}
def _find_supported_cirros_image(self, session):
image_client = clients.image_client(self.cloud)
for img in session.list(image.Image, self.cloud):
if img.checksum.lower() == _get_image_md5():
# Test if image is good
image_id = img.object_id.id
try:
next(image_client.images.data(image_id))
except Exception:
LOG.debug('Failed to download part of image %s from %s',
image_id, self.location)
continue
return image_id
return None
def _upload_cirros_image(self, session):
image_client = clients.image_client(self.cloud)
with open(_get_image_location(), 'r') as f:
img = image_client.images.create(
data=f, name=IMAGE_FILENAME,
container_format='bare',
disk_format='qcow2',
is_public=False, protected=False,
owner=_get_admin_tenant_id(self.cloud, session))
return clients.wait_for(_object_status_is, image_client, 'images',
img.id, 'active')
class FlavorDestructor(base.Destructor):
def __init__(self, location, flavor_id, object_id):
self.location = location
self.flavor_id = flavor_id
self.object_id = object_id
def get_signature(self):
return self.object_id
def run(self, cfg, migration):
cloud = cfg.clouds[getattr(migration, self.location)]
nova_client = clients.compute_client(cloud)
try:
with model.Session() as session:
session.delete(compute.Flavor, object_id=self.object_id)
clients.retry(nova_client.flavors.delete, self.flavor_id,
expected_exceptions=[nova_exceptions.NotFound])
except nova_exceptions.NotFound:
pass
class FindOrCreateFlavor(BaseSymmetricSingletonTask):
def __init__(self, cfg, migration, obj, location):
self.var_name = location + '_flavor'
super(FindOrCreateFlavor, self).__init__(
cfg, migration, obj, location,
provides=[self.var_name])
def migrate(self, *args, **kwargs):
with model.Session() as session:
flavor = self._find_existing_flavor(session)
if flavor is None:
flavor = self._create_flavor()
self.destructor = FlavorDestructor(
self.location, flavor.flavor_id, flavor.object_id)
return {self.var_name: flavor,
self.destructor_var: self.destructor}
def _find_existing_flavor(self, session):
for flavor in session.list(compute.Flavor, self.cloud):
if not flavor.is_disabled \
and not flavor.is_deleted \
and flavor.vcpus == 1 \
and 48 <= flavor.memory_mb <= 64 \
and flavor.root_gb == 0 \
and flavor.ephemeral_gb == 0 \
and flavor.swap_mb == 0:
return flavor
def _create_flavor(self):
nova_client = clients.compute_client(self.cloud)
flavor_id = str(uuid.uuid4())
clients.retry(nova_client.flavors.create, 'tmp.vol_tx', 64, 1, 0,
flavorid=flavor_id, is_public=False)
flavor_discoverer = discover.get_discoverer(self.config, self.cloud,
compute.Flavor)
flavor = flavor_discoverer.discover_by_flavor_id(flavor_id)
return flavor
class NetworkDestructor(base.Destructor):
def __init__(self, location, network_id, subnet_id):
self.location = location
self.network_id = network_id
self.subnet_id = subnet_id
def get_signature(self):
return self.location, self.network_id
def run(self, cfg, migration):
cloud = cfg.clouds[getattr(migration, self.location)]
network_client = clients.network_client(cloud)
try:
with model.Session() as session:
net_obj_id = model.ObjectId(self.network_id, cloud.name)
subnet_obj_id = model.ObjectId(self.subnet_id, cloud.name)
session.delete(network.Network, object_id=net_obj_id)
session.delete(network.Subnet, object_id=subnet_obj_id)
clients.retry(network_client.delete_network, self.network_id,
expected_exceptions=[neutron_exceptions.NotFound])
except neutron_exceptions.NotFound:
pass
class FindOrCreateNetwork(BaseSymmetricSingletonTask):
def __init__(self, cfg, migration, obj, location):
self.var_name = location + '_net'
super(FindOrCreateNetwork, self).__init__(
cfg, migration, obj, location,
provides=[self.var_name])
def migrate(self, *args, **kwargs):
with model.Session() as session:
net = self._find_existing_network(session)
if net is None:
net, net_id, subnet_id = self._create_net(session)
self.destructor = NetworkDestructor(
self.location, net_id, subnet_id)
return {self.var_name: net, self.destructor_var: self.destructor}
def _find_existing_network(self, session):
for net in session.list(network.Network, self.cloud):
if net.name == 'tmp_vol_tx' and len(net.subnets) == 1:
return net
return None
def _create_net(self, session):
network_client = clients.network_client(self.cloud)
raw_net = network_client.create_network({
'network': {
'name': 'tmp_vol_tx',
'shared': False,
},
})
raw_subnet = network_client.create_subnet({
'subnet': {
'cidr': '172.16.31.10/1',
'ip_version': 4,
'gateway_ip': None,
'network_id': raw_net['network']['id']
},
})
net = self.load_from_cloud(network.Network, self.cloud,
raw_net['network'])
session.store(net)
subnet = self.load_from_cloud(network.Subnet, self.cloud,
raw_subnet['subnet'])
session.store(subnet)
return net, raw_net['network']['id'], raw_subnet['subnet']['id']
class EnsureAdminRoleDestructor(base.Destructor):
def __init__(self, location, user_id, role_id, tenant_id):
self.location = location
self.user_id = user_id
self.role_id = role_id
self.tenant_id = tenant_id
def get_signature(self):
return self.location, self.user_id, self.role_id, self.tenant_id
def run(self, cfg, migration):
cloud = cfg.clouds[getattr(migration, self.location)]
identity_client = clients.identity_client(cloud)
try:
clients.retry(identity_client.roles.remove_user_role,
user=self.user_id, role=self.role_id,
tenant=self.tenant_id,
expected_exceptions=[
keystone_exceptions.NotFound])
except keystone_exceptions.NotFound:
pass
class EnsureAdminRole(BaseSymmetricSingletonTask):
def __init__(self, cfg, migration, obj, location):
super(EnsureAdminRole, self).__init__(cfg, migration, obj, location)
self.already_member = False
self.user_id = None
self.role_id = None
self.tenant_id = None
def get_singleton_key(self, source_obj, *args, **kwargs):
return self.location, _get_object_tenant_id(self.cloud, source_obj)
def _user_id(self, username):
with model.Session() as session:
for user in session.list(identity.User, self.cloud):
if user.name.lower() == username.lower():
return user.object_id.id
raise base.AbortMigration('User % not found in cloud %s', username,
self.cloud.name)
def _role_id(self, rolename):
with model.Session() as session:
for role in session.list(identity.Role, self.cloud):
if role.name.lower() == rolename.lower():
return role.object_id.id
raise base.AbortMigration('Role % not found in cloud %s', rolename,
self.cloud.name)
def migrate(self, source_obj, *args, **kwargs):
cloud = self.cloud
identity_client = clients.identity_client(cloud)
destructor_var = self.destructor_var
try:
self.user_id = self._user_id(cloud.credential.username)
self.role_id = self._role_id(cloud.admin_role)
self.tenant_id = _get_object_tenant_id(self.cloud, source_obj)
clients.retry(
identity_client.roles.add_user_role,
user=self.user_id, role=self.role_id, tenant=self.tenant_id,
expected_exceptions=[keystone_exceptions.Conflict])
self.destructor = EnsureAdminRoleDestructor(
self.location, self.user_id, self.role_id, self.tenant_id)
except keystone_exceptions.Conflict:
pass
return {
destructor_var: self.destructor
}
class RestoreQuotas(base.Destructor):
def __init__(self, location, admin_tenant_id, obj_tenant_id,
net_quota, compute_quota, storage_quota):
self.location = location
self.admin_tenant_id = admin_tenant_id
self.obj_tenant_id = obj_tenant_id
self.net_quota = net_quota
self.compute_quota = compute_quota
self.storage_quota = storage_quota
def get_signature(self):
return self.location, self.admin_tenant_id, self.obj_tenant_id
def run(self, cfg, migration):
cloud = cfg.clouds[getattr(migration, self.location)]
network_client = clients.network_client(cloud)
compute_client = clients.compute_client(cloud)
storage_client = clients.volume_client(cloud)
try:
if self.net_quota is None:
clients.retry(network_client.delete_quota,
self.admin_tenant_id)
else:
clients.retry(
network_client.update_quota, self.admin_tenant_id, {
'quota': {
'network': self.net_quota['network'],
'subnet': self.net_quota['subnet'],
'port': self.net_quota['port'],
}
})
except neutron_exceptions.NotFound:
pass
if self.compute_quota:
clients.retry(compute_client.quotas.update, self.admin_tenant_id,
**self.compute_quota)
if self.storage_quota:
clients.retry(storage_client.quotas.update, self.obj_tenant_id,
**self.storage_quota)
class SetUnlimitedQuotas(BaseSymmetricSingletonTask):
def __init__(self, cfg, migration, obj, location):
super(SetUnlimitedQuotas, self).__init__(cfg, migration, obj, location)
self.obj_tenant_id = None
with model.Session() as session:
self.admin_tenant_id = _get_admin_tenant_id(self.cloud, session)
def get_singleton_key(self, source_obj, *args, **kwargs):
return self.location, _get_object_tenant_id(self.cloud, source_obj)
def migrate(self, source_obj, *args, **kwargs):
self.obj_tenant_id = _get_object_tenant_id(self.cloud, source_obj)
net_quotas = self._set_network_quotas(self.admin_tenant_id)
compute_quotas = self._set_compute_quotas(self.admin_tenant_id)
storage_quotas = self._set_cinder_quotas(self.obj_tenant_id)
self.destructor = RestoreQuotas(
self.location, self.admin_tenant_id, self.obj_tenant_id,
net_quotas, compute_quotas, storage_quotas)
return {
self.destructor_var: self.destructor
}
def _set_network_quotas(self, tenant_id):
network_client = clients.network_client(self.cloud)
for quota in network_client.list_quotas(tenant_id=tenant_id)['quotas']:
if quota['tenant_id'] == tenant_id:
break
else:
quota = None
network_client.update_quota(tenant_id, {
'quota': {
'network': -1,
'subnet': -1,
'port': -1,
}
})
return quota
def _set_compute_quotas(self, tenant_id):
compute_client = clients.compute_client(self.cloud)
return self._set_quotas(compute_client, tenant_id, cores=-1, ram=-1,
injected_file_content_bytes=-1, instances=-1,
fixed_ips=-1)
def _set_cinder_quotas(self, tenant_id):
storage_client = clients.volume_client(self.cloud)
return self._set_quotas(storage_client, tenant_id, gigabytes=-1,
snapshots=-1, volumes=-1)
@staticmethod
def _set_quotas(client, tenant_id, **kwargs):
quotas = getattr(clients.retry(client.quotas.get, tenant_id), '_info')
original = {}
for item, value in kwargs.items():
if quotas[item] != value:
original[item] = quotas[item]
clients.retry(client.quotas.update, tenant_id, **kwargs)
return original
class VolumeMigrationFlowFactory(base.MigrationFlowFactory):
migrated_class = storage.Volume
def create_flow(self, cfg, migration, obj):
return [
SetUnlimitedQuotas(cfg, migration, obj, 'source'),
SetUnlimitedQuotas(cfg, migration, obj, 'destination'),
EnsureAdminRole(cfg, migration, obj, 'source'),
EnsureAdminRole(cfg, migration, obj, 'destination'),
FindOrCreateNetwork(cfg, migration, obj, 'source'),
FindOrCreateNetwork(cfg, migration, obj, 'destination'),
FindOrCreateFlavor(cfg, migration, obj, 'source'),
FindOrCreateFlavor(cfg, migration, obj, 'destination'),
FindOrUploadImage(cfg, migration, obj, 'source'),
FindOrUploadImage(cfg, migration, obj, 'destination'),
DetachMigratedVolume(cfg, migration, obj),
CreateVolume(cfg, migration, obj),
BootTransferVm(cfg, migration, obj, 'source'),
BootTransferVm(cfg, migration, obj, 'destination'),
AttachNodeLocalInterface(cfg, migration, obj, 'source'),
AttachNodeLocalInterface(cfg, migration, obj, 'destination'),
AttachSourceVolume(cfg, migration, obj),
AttachDestinationVolume(cfg, migration, obj),
TransferVolumeData(cfg, migration, obj),
DetachSourceVolume(cfg, migration, obj),
DetachDestinationVolume(cfg, migration, obj),
CleanupVms(cfg, migration, obj),
ReattachMigratedVolume(cfg, migration, obj),
base.RememberMigration(cfg, migration, obj),
]
def _random_mac():
mac = [0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return ':'.join("%02x" % x for x in mac)
def _first_unused_address(cloud):
result = netaddr.IPNetwork(cloud.unused_network)
result.value += 1
return result
def _allocate_ip_address(cloud):
global _ip_counter
with LOCK:
result = netaddr.IPNetwork(cloud.unused_network)
result.value += _ip_counter
_ip_counter += 1
assert result in cloud.unused_network
return result
def _allocate_port(host, cloud):
with LOCK:
min_port, max_port = cloud.unused_port_range
used_host_ports = _used_ports.setdefault(host, set())
while True:
port = random.randint(min_port, max_port)
if port not in used_host_ports:
used_host_ports.add(port)
return port
else:
LOG.warning('Port %d already used on host %s in cloud %s, '
'generating new one', port, host, cloud.name)
def _get_private_key(rsa_key):
pkey = cStringIO.StringIO()
rsa_key.write_private_key(pkey)
return pkey.getvalue()
def _deploy_pkey(rexec):
key_path = rexec.run('mktemp').strip()
rexec.run('echo "{private_key}" > {key_path}; chmod 600 {key_path}',
private_key=_get_private_key(RSA1024_KEY),
key_path=key_path)
return key_path
def _wait_ip_accessible(cloud, rexec, ip_address):
waited = 0.0
while waited <= cloud.operation_timeout:
before = time.time()
try:
rexec.run('ping -c 1 -W 1 {ip_address}', ip_address=ip_address)
return
except remote.RemoteFailure:
after = time.time()
delta = after - before
if delta < 1.0:
delta = 1.0
time.sleep(1.0)
waited += delta
raise base.AbortMigration('VM couldn\'t be reached through %s', ip_address)
def _object_status_is(client, manager_name, obj_id, status):
manager = getattr(client, manager_name)
obj = clients.retry(manager.get, obj_id)
LOG.debug('Checking object %s is in status \'%s\': actual status \'%s\'',
obj_id, status.lower(), obj.status.lower())
if obj.status.lower() == status.lower():
return obj
elif obj.status.lower() == 'error':
raise base.AbortMigration('Object %s ended up in ERROR state', obj_id)
else:
return None
def _object_is_deleted(client, manager, obj_id, expected_exception):
try:
manager_obj = getattr(client, manager)
clients.retry(manager_obj.get, obj_id,
expected_exceptions=[expected_exception])
return False
except expected_exception:
return True
def _scope(tenant_id):
return config.Scope(project_id=tenant_id,
project_name=None,
domain_id=None)
def _get_admin_tenant_id(cloud, session):
scope = cloud.scope
project_name = scope.project_name
if scope.project_id is not None:
return scope.project_id
elif project_name is not None:
for tenant in session.list(identity.Tenant, cloud):
if tenant.name.lower() == project_name.lower():
return tenant.object_id.id
raise base.AbortMigration(
'Unable to upload image: no admin tenant.')
def _get_object_tenant_id(cloud, obj):
tenant = obj.tenant
if tenant.object_id.cloud != cloud.name:
return tenant.find_link(cloud).primary_key.id
else:
return tenant.primary_key.id
def | |
# Leo colorizer control file for rebol mode.
# This file is in the public domain.
# Properties for rebol mode.
properties = {
"commentEnd": "}",
"commentStart": "comment {",
"indentCloseBrackets": "}])",
"indentOpenBrackets": "{[(",
"lineComment": ";",
"lineUpClosingBracket": "true",
"noWordSep": "_-",
"wordBreakChars": ",+-=<>/?^&*",
}
# Attributes dict for rebol_main ruleset.
rebol_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "\\",
"highlight_digits": "false",
"ignore_case": "true",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for rebol mode.
attributesDictDict = {
"rebol_main": rebol_main_attributes_dict,
}
# Keywords dict for rebol_main ruleset.
rebol_main_keywords_dict = {
"?": "keyword2",
"??": "keyword2",
"about": "keyword1",
"abs": "keyword1",
"absolute": "keyword1",
"across": "keyword1",
"action!": "keyword3",
"action?": "keyword2",
"add": "keyword1",
"alert": "keyword1",
"alias": "keyword1",
"all": "keyword1",
"alter": "keyword1",
"and~": "keyword1",
"any": "keyword1",
"any-block!": "keyword3",
"any-block?": "keyword2",
"any-function!": "keyword3",
"any-function?": "keyword2",
"any-string!": "keyword3",
"any-string?": "keyword2",
"any-type!": "keyword3",
"any-type?": "keyword2",
"any-word!": "keyword3",
"any-word?": "keyword2",
"append": "keyword1",
"arccosine": "keyword1",
"arcsine": "keyword1",
"arctangent": "keyword1",
"array": "keyword1",
"ask": "keyword1",
"at": "keyword1",
"back": "keyword1",
"backcolor": "keyword1",
"basic-syntax-header": "keyword1",
"below": "keyword1",
"binary!": "keyword3",
"binary?": "keyword2",
"bind": "keyword1",
"bitset!": "keyword3",
"bitset?": "keyword2",
"block!": "keyword3",
"block?": "keyword2",
"boot-prefs": "keyword1",
"break": "keyword1",
"browse": "keyword1",
"build-tag": "keyword1",
"call": "keyword1",
"caret-to-offset": "keyword1",
"catch": "keyword1",
"center-face": "keyword1",
"change": "keyword1",
"change-dir": "keyword1",
"char!": "keyword3",
"char?": "keyword2",
"charset": "keyword1",
"checksum": "keyword1",
"choose": "keyword1",
"clean-path": "keyword1",
"clear": "keyword1",
"clear-fields": "keyword1",
"close": "keyword1",
"comment": "keyword1",
"complement": "keyword1",
"compose": "keyword1",
"compress": "keyword1",
"confine": "keyword1",
"confirm": "keyword1",
"connected?": "keyword2",
"context": "keyword1",
"copy": "keyword1",
"cosine": "keyword1",
"cp": "keyword1",
"crlf": "keyword1",
"crypt-strength?": "keyword2",
"cvs-date": "keyword1",
"cvs-version": "keyword1",
"datatype!": "keyword3",
"datatype?": "keyword2",
"date!": "keyword3",
"date?": "keyword2",
"debase": "keyword1",
"decimal!": "keyword3",
"decimal?": "keyword2",
"decode-cgi": "keyword1",
"decode-url": "keyword1",
"decompress": "keyword1",
"deflag-face": "keyword1",
"dehex": "keyword1",
"delete": "keyword1",
"demo": "keyword1",
"desktop": "keyword1",
"detab": "keyword1",
"dh-compute-key": "keyword1",
"dh-generate-key": "keyword1",
"dh-make-key": "keyword1",
"difference": "keyword1",
"dir?": "keyword2",
"dirize": "keyword1",
"disarm": "keyword1",
"dispatch": "keyword1",
"divide": "keyword1",
"do": "keyword1",
"do-boot": "keyword1",
"do-events": "keyword1",
"do-face": "keyword1",
"do-face-alt": "keyword1",
"does": "keyword1",
"dsa-generate-key": "keyword1",
"dsa-make-key": "keyword1",
"dsa-make-signature": "keyword1",
"dsa-verify-signature": "keyword1",
"dump-face": "keyword1",
"dump-pane": "keyword1",
"echo": "keyword1",
"editor": "keyword1",
"either": "keyword1",
"else": "keyword1",
"email!": "keyword3",
"email?": "keyword2",
"emailer": "keyword1",
"emit": "keyword1",
"empty?": "keyword2",
"enbase": "keyword1",
"entab": "keyword1",
"equal?": "keyword2",
"error!": "keyword3",
"error?": "keyword2",
"even?": "keyword2",
"event!": "keyword3",
"event?": "keyword2",
"exclude": "keyword1",
"exists-key?": "keyword2",
"exists-thru?": "keyword2",
"exists?": "keyword2",
"exit": "keyword1",
"exp": "keyword1",
"extract": "keyword1",
"false": "literal2",
"fifth": "keyword1",
"file!": "keyword3",
"file?": "keyword2",
"find": "keyword1",
"find-by-type": "keyword1",
"find-key-face": "keyword1",
"find-window": "keyword1",
"first": "keyword1",
"flag-face": "keyword1",
"flag-face?": "keyword2",
"flash": "keyword1",
"focus": "keyword1",
"font-fixed": "keyword1",
"font-sans-serif": "keyword1",
"font-serif": "keyword1",
"for": "keyword1",
"forall": "keyword1",
"foreach": "keyword1",
"forever": "keyword1",
"form": "keyword1",
"forskip": "keyword1",
"found?": "keyword2",
"fourth": "keyword1",
"free": "keyword1",
"func": "keyword1",
"function": "keyword1",
"function!": "keyword3",
"function?": "keyword2",
"get": "keyword1",
"get-modes": "keyword1",
"get-net-info": "keyword1",
"get-style": "keyword1",
"get-word!": "keyword3",
"get-word?": "keyword2",
"greater-or-equal?": "keyword2",
"greater?": "keyword2",
"guide": "keyword1",
"halt": "keyword1",
"has": "keyword1",
"hash!": "keyword3",
"hash?": "keyword2",
"head": "keyword1",
"head?": "keyword2",
"help": "keyword1",
"hide": "keyword1",
"hide-popup": "keyword1",
"if": "keyword1",
"image!": "keyword3",
"image?": "keyword2",
"import-email": "keyword1",
"in": "keyword1",
"in-window?": "keyword2",
"indent": "keyword1",
"index?": "keyword2",
"info?": "keyword2",
"inform": "keyword1",
"input": "keyword1",
"input?": "keyword2",
"insert": "keyword1",
"insert-event-func": "keyword1",
"inside?": "keyword2",
"integer!": "keyword3",
"integer?": "keyword2",
"intersect": "keyword1",
"issue!": "keyword3",
"issue?": "keyword2",
"join": "keyword1",
"last": "keyword1",
"launch": "keyword1",
"launch-thru": "keyword1",
"layout": "keyword1",
"length?": "keyword2",
"lesser-or-equal?": "keyword2",
"lesser?": "keyword2",
"library!": "keyword3",
"library?": "keyword2",
"license": "keyword1",
"link-app?": "keyword2",
"link?": "keyword2",
"list!": "keyword3",
"list-dir": "keyword1",
"list-words": "keyword1",
"list?": "keyword2",
"lit-path!": "keyword3",
"lit-path?": "keyword2",
"lit-word!": "keyword3",
"lit-word?": "keyword2",
"load": "keyword1",
"load-image": "keyword1",
"load-prefs": "keyword1",
"load-thru": "keyword1",
"log-10": "keyword1",
"log-2": "keyword1",
"log-e": "keyword1",
"logic!": "keyword3",
"logic?": "keyword2",
"loop": "keyword1",
"lowercase": "keyword1",
"make": "keyword1",
"make-dir": "keyword1",
"make-face": "keyword1",
"max": "keyword1",
"maximum": "keyword1",
"maximum-of": "keyword1",
"min": "keyword1",
"minimum": "keyword1",
"minimum-of": "keyword1",
"modified?": "keyword2",
"mold": "keyword1",
"money!": "keyword3",
"money?": "keyword2",
"multiply": "keyword1",
"native!": "keyword3",
"native?": "keyword2",
"negate": "keyword1",
"negative?": "keyword2",
"net-error": "keyword1",
"next": "keyword1",
"none": "keyword1",
"none!": "keyword3",
"none?": "keyword2",
"not": "keyword1",
"not-equal?": "keyword2",
"now": "keyword1",
"number!": "keyword3",
"number?": "keyword2",
"object!": "keyword3",
"object?": "keyword2",
"odd?": "keyword2",
"offset-to-caret": "keyword1",
"offset?": "keyword2",
"op!": "keyword3",
"op?": "keyword2",
"open": "keyword1",
"open-events": "keyword1",
"origin": "keyword1",
"or~": "keyword1",
"outside?": "keyword2",
"outstr": "keyword1",
"pad": "keyword1",
"pair!": "keyword3",
"pair?": "keyword2",
"paren!": "keyword3",
"paren?": "keyword2",
"parse": "keyword1",
"parse-email-addrs": "keyword1",
"parse-header": "keyword1",
"parse-header-date": "keyword1",
"parse-xml": "keyword1",
"path!": "keyword3",
"path-thru": "keyword1",
"path?": "keyword2",
"pick": "keyword1",
"poke": "keyword1",
"port!": "keyword3",
"port?": "keyword2",
"positive?": "keyword2",
"power": "keyword1",
"prin": "keyword1",
"print": "keyword1",
"probe": "keyword1",
"protect": "keyword1",
"protect-system": "keyword1",
"q": "keyword1",
"query": "keyword1",
"quit": "keyword1",
"random": "keyword1",
"read": "keyword1",
"read-io": "keyword1",
"read-net": "keyword1",
"read-thru": "keyword1",
"reboot": "keyword1",
"recycle": "keyword1",
"reduce": "keyword1",
"refinement!": "keyword3",
"refinement?": "keyword2",
"reform": "keyword1",
"rejoin": "keyword1",
"remainder": "keyword1",
"remold": "keyword1",
"remove": "keyword1",
"remove-event-func": "keyword1",
"rename": "keyword1",
"repeat": "keyword1",
"repend": "keyword1",
"replace": "keyword1",
"request": "keyword1",
"request-color": "keyword1",
"request-date": "keyword1",
"request-download": "keyword1",
"request-file": "keyword1",
"request-list": "keyword1",
"request-pass": "keyword1",
"request-text": "keyword1",
"resend": "keyword1",
"return": "keyword1",
"reverse": "keyword1",
"routine!": "keyword3",
"routine?": "keyword2",
"rsa-encrypt": "keyword1",
"rsa-generate-key": "keyword1",
"rsa-make-key": "keyword1",
"same?": "keyword2",
"save": "keyword1",
"save-prefs": "keyword1",
"save-user": "keyword1",
"screen-offset?": "keyword2",
"script?": "keyword2",
"scroll-para": "keyword1",
"second": "keyword1",
"secure": "keyword1",
"select": "keyword1",
"self": "literal2",
"send": "keyword1",
"sense": "keyword1",
"series!": "keyword3",
"series?": "keyword2",
"set": "keyword1",
"set-font": "keyword1",
"set-modes": "keyword1",
"set-net": "keyword1",
"set-para": "keyword1",
"set-path!": "keyword3",
"set-path?": "keyword2",
"set-style": "keyword1",
"set-user": "keyword1",
"set-user-name": "keyword1",
"set-word!": "keyword3",
"set-word?": "keyword2",
"show": "keyword1",
"show-popup": "keyword1",
"sine": "keyword1",
"size": "keyword1",
"size-text": "keyword1",
"size?": "keyword2",
"skip": "keyword1",
"sort": "keyword1",
"source": "keyword1",
"space": "keyword1",
"span?": "keyword2",
"split-path": "keyword1",
"square-root": "keyword1",
"strict-equal?": "keyword2",
"strict-not-equal?": "keyword2",
"string!": "keyword3",
"string?": "keyword2",
"struct!": "keyword3",
"struct?": "keyword2",
"style": "keyword1",
"styles": "keyword1",
"stylize": "keyword1",
"subtract": "keyword1",
"switch": "keyword1",
"symbol!": "keyword3",
"tabs": "keyword1",
"tag!": "keyword3",
"tag?": "keyword2",
"tail": "keyword1",
"tail?": "keyword2",
"tangent": "keyword1",
"textinfo": "keyword1",
"third": "keyword1",
"throw": "keyword1",
"throw-on-error": "keyword1",
"time!": "keyword3",
"time?": "keyword2",
"to": "keyword1",
"to-binary": "keyword1",
"to-bitset": "keyword1",
"to-block": "keyword1",
"to-char": "keyword1",
"to-date": "keyword1",
"to-decimal": "keyword1",
"to-email": "keyword1",
"to-event": "keyword1",
"to-file": "keyword1",
"to-get-word": "keyword1",
"to-hash": "keyword1",
"to-hex": "keyword1",
"to-idate": "keyword1",
"to-image": "keyword1",
"to-integer": "keyword1",
"to-issue": "keyword1",
"to-list": "keyword1",
"to-lit-path": "keyword1",
"to-lit-word": "keyword1",
"to-local-file": "keyword1",
"to-logic": "keyword1",
"to-money": "keyword1",
"to-none": "keyword1",
"to-pair": "keyword1",
"to-paren": "keyword1",
"to-path": "keyword1",
"to-rebol-file": "keyword1",
"to-refinement": "keyword1",
"to-set-path": "keyword1",
"to-set-word": "keyword1",
"to-string": "keyword1",
"to-tag": "keyword1",
"to-time": "keyword1",
"to-tuple": "keyword1",
"to-url": "keyword1",
"to-word": "keyword1",
"trace": "keyword1",
"trim": "keyword1",
"true": "literal2",
"try": "keyword1",
"tuple!": "keyword3",
"tuple?": "keyword2",
"type?": "keyword2",
"unfocus": "keyword1",
"uninstall": "keyword1",
"union": "keyword1",
"unique": "keyword1",
"unprotect": "keyword1",
"unset": "keyword1",
"unset!": "keyword3",
"unset?": "keyword2",
"until": "keyword1",
"unview": "keyword1",
"update": "keyword1",
"upgrade": "keyword1",
"uppercase": "keyword1",
"url!": "keyword3",
"url?": "keyword2",
"usage": "keyword1",
"use": "keyword1",
"val": "keyword1",
"value": "keyword1",
"value?": "keyword2",
"vbug": "keyword1",
"view": "keyword1",
"view-install": "keyword1",
"view-prefs": "keyword1",
"view?": "keyword2",
"viewed?": "keyword2",
"wait": "keyword1",
"what": "keyword1",
"what-dir": "keyword1",
"while": "keyword1",
"win-offset?": "keyword2",
"within?": "keyword2",
"word!": "keyword3",
"word?": "keyword2",
"write": "keyword1",
"write-io": "keyword1",
"write-user": "keyword1",
"xor~": "keyword1",
"zero?": "keyword2",
}
# Dictionary of keywords dictionaries for rebol mode.
keywordsDictDict = {
"rebol_main": rebol_main_keywords_dict,
}
# Rules for rebol_main ruleset.
def rebol_rule0(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="comment {", end="}",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def rebol_rule1(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="comment{", end="}",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def rebol_rule2(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def rebol_rule3(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="{", end="}",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def rebol_rule4(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq=";",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def rebol_rule5(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def rebol_rule6(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def rebol_rule7(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def rebol_rule8(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<>",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def rebol_rule9(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="+",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def rebol_rule10(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="/",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def rebol_rule11(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="*",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def rebol_rule12(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def rebol_rule13(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def rebol_rule14(colorer, s, i):
return colorer.match_mark_following(s, i, kind="literal2", pattern="'",
at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=True)
def rebol_rule15(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for rebol_main ruleset.
rulesDict1 = {
"!": [rebol_rule15,],
"\"": | |
Oracle
(pdb,dbcur,deftDB) = connectJEDI('R','jedi')
t_table_DEFT = "%s.%s"%(deftDB,deft_conf.daemon['t_production_task'])
t_table_JEDI = "%s.%s"%(deftDB,deft_conf.daemon['t_task'])
t_table_projects= "%s.%s"%(deftDB,deft_conf.daemon['t_projects'])
i = 0
jedi_task_params = ''
for p in user_task_list :
jedi_tid = p['taskid']
#--sql = "SELECT dbms_lob.substr(jedi_task_parameters,80000,1) FROM %s WHERE taskid=%s"%(t_table_JEDI,jedi_tid)
#--sql = "SELECT dbms_lob.substr(jedi_task_parameters) FROM %s WHERE taskid=%s"%(t_table_JEDI,jedi_tid)
sql = "SELECT jedi_task_parameters FROM %s WHERE taskid=%s"%(t_table_JEDI,jedi_tid)
if verbose == True : print sql
tasksJEDI_CLOB = DButils.QueryAll(pdb,sql)
for t in tasksJEDI_CLOB :
task_param = t[0]
task_param_dict = json.loads(str(task_param))
Skip_Record = False
for jtn in jedi_task_names :
param = task_param_dict[jtn]
jtnC = jtn.upper()
if jtnC == 'TASKPRIORITY' :
deft_task_params[i]['CURRENT_PRIORITY'] = param
else :
deft_task_params[i][jtnC] = param
deft_task_params[i]['TASKID'] = jedi_tid
deft_task_params[i]['PARENT_TID'] = jedi_tid
deft_task_params[i]['CHAIN_TID'] = jedi_tid
deft_task_params[i]['STATUS'] = p['status']
if p['start_time'] != 'None' : deft_task_params[i]['START_TIME'] = p['start_time']
if p['submit_time']!= 'None' : deft_task_params[i]['SUBMIT_TIME']= p['submit_time']
if p['total_done_jobs'] != None : deft_task_params[i]['TOTAL_DONE_JOBS'] = p['total_done_jobs']
if p['total_events'] != None : deft_task_params[i]['TOTAL_EVENTS'] = p['total_events']
jj = deft_task_params[i]['TASKNAME'].split('.')
user_project_name = jj[0]
# form insert string
deft_names_0 = "TASKID,STEP_ID,PR_ID,PARENT_TID,TASKNAME,PROJECT,STATUS,TOTAL_EVENTS,TOTAL_REQ_JOBS,TOTAL_DONE_JOBS,"
deft_namea_1 = "SUBMIT_TIME, START_TIME,TIMESTAMP"
if deft_task_params[i]['TOTAL_REQ_JOBS'] == 0 :
if deft_task_params[i]['TOTAL_DONE_JOBS'] > 0 :
deft_task_params[i]['TOTAL_REQ_JOBS'] = deft_task_params[i]['TOTAL_DONE_JOBS']
sql = "INSERT INTO %s "%(t_table_DEFT)
sqlN= "(%s "%(deft_names_0)
sqlV = "VALUES(%s,%s,%s,%s,'%s',"%\
(deft_task_params[i]['TASKID'],user_task_step_id,user_task_request_id,\
deft_task_params[i]['TASKID'],deft_task_params[i]['TASKNAME'])
sqlV+="'%s','%s',%s,%s,%s,"%\
('user',deft_task_params[i]['STATUS'],deft_task_params[i]['TOTAL_EVENTS'],\
deft_task_params[i]['TOTAL_REQ_JOBS'],deft_task_params[i]['TOTAL_DONE_JOBS'])
if deft_task_params[i]['SUBMIT_TIME'] != 'None' :
sqlN += "SUBMIT_TIME,"
sqlV += "TO_TIMESTAMP('%s','YYYY-MM-DD HH24:MI:SS'),"%(deft_task_params[i]['SUBMIT_TIME'])
if deft_task_params[i]['START_TIME'] != 'None' and deft_task_params[i]['START_TIME'] != None:
sqlN += "START_TIME,"
sqlV += "TO_TIMESTAMP('%s','YYYY-MM-DD HH24:MI:SS'),"%(deft_task_params[i]['START_TIME'])
sqlN += "TIMESTAMP,"
sqlV += "current_timestamp,"
sqlN += "VO,PRODSOURCELABEL,USERNAME,CURRENT_PRIORITY,PRIORITY,CHAIN_TID,BUG_REPORT) "
sqlV += "'%s','%s','%s', %s,%s,%s,%s)"%\
(deft_task_params[i]['VO'],'user',deft_task_params[i]['USERNAME'],\
deft_task_params[i]['CURRENT_PRIORITY'],deft_task_params[i]['TASKPRIORITY'],\
deft_task_params[i]['CHAIN_TID'],-1)
sql += sqlN
sql += sqlV
#-
# # and insert the same string into t_production_task_listpart
# sqlP = sql.replace(t_table_DEFT,t_table_DEFT_P)
# print sqlP
#-
# check project
project_found = False
for p in projects_list :
if p == user_project_name :
project_found = True
break
if project_found : break
if project_found == False : projects_list.append(user_project_name)
sql_update.append(sql)
i += 1
DButils.closeDB(pdb,dbcur)
if dbupdate == True :
timenow = int(time.time())
(pdb,dbcur,deftDB) = connectDEFT('W')
# insert new projects (id any)
for tp in projects_list :
sql = "SELECT distinct project FROM %s ORDER by project"%(t_table_projects)
print sql
task_projects = DButils.QueryAll(pdb,sql)
project_found = False
for td in task_projects :
t_project = td[0]
if t_project == tp :
project_found = True
if project_found : break
if project_found == False :
print "INFO.SynchronizeJediDeftTasks. New project %s. Insert it into %s"%(tp,t_table_projects)
sql = "INSERT INTO %s (PROJECT,BEGIN_TIME,END_TIME,STATUS,TIMESTAMP) "%(t_table_projects)
sql+= "VALUES('%s',%s,%s,'%s',%s)"%(tp,timenow,timenow+10*365*24*60*60,'active',timenow)
print sql
sql_update.append(sql)
for sql in sql_update :
print sql
DButils.QueryUpdate(pdb,sql)
DButils.QueryCommit(pdb)
DButils.closeDB(pdb,dbcur)
def synchronizeJediDeftDatasets () :
#
# get list of all tasks updated in 12h
#
timeInterval = DATASET_SYNCH_INTERVAL # hours
JEDI_datasets_final_states = ''
for s in JEDI_datasets_final_statesL :
JEDI_datasets_final_states += "'%s',"%(s)
JEDI_datasets_final_states = JEDI_datasets_final_states[0:(len(JEDI_datasets_final_states)-1)]
DEFT_datasets_final_states = ''
for s in DEFT_datasets_final_statesL :
DEFT_datasets_final_states += "'%s',"%(s)
DEFT_datasets_final_states = DEFT_datasets_final_states[0:(len(DEFT_datasets_final_states)-1)]
# connect to Oracle
(pdb,dbcur,deftDB) = connectDEFT('R')
t_table_DEFT = "%s.%s"%(deftDB,deft_conf.daemon['t_production_task'])
t_table_datasets_DEFT = "%s.%s"%(deftDB,deft_conf.daemon['t_production_dataset'])
sql = "SELECT taskid, status, phys_group, timestamp, project, username, pr_id FROM %s "%(t_table_DEFT)
sql+= "WHERE TIMESTAMP > current_timestamp - %s AND taskid >= %s "%(timeInterval,MIN_DEFT_TASK_ID)
sql+= "ORDER BY taskid"
print sql
tasksDEFT = DButils.QueryAll(pdb,sql)
print "%s DEFT tasks match to the criteria"%(len(tasksDEFT))
if len(tasksDEFT) > 0 :
minTaskID = -1
maxTaskID = -1
sql = "SELECT min(taskid),max(taskid) FROM %s "%(t_table_DEFT)
sql +="WHERE TIMESTAMP > current_timestamp - %s AND taskid >= %s "%(timeInterval,MIN_DEFT_TASK_ID)
print sql
MMtasks = DButils.QueryAll(pdb,sql)
for t in MMtasks :
minTaskID = t[0]
maxTaskID = t[1]
print "INFO. Check datasets produced by %s - %s tasks"%(minTaskID,maxTaskID)
sql = "SELECT taskid, name, status, phys_group, timestamp "
sql+= "FROM %s WHERE taskid >= %s and taskid <= %s "%(t_table_datasets_DEFT,minTaskID,maxTaskID)
sql += "ORDER BY taskid"
datasetsDEFT = DButils.QueryAll(pdb,sql)
DButils.closeDB(pdb,dbcur)
sql_update = []
if len(tasksDEFT) > 0 and len(datasetsDEFT) > 0 :
# step #1 : synchronize DEFT t_production_task and t_production_dataset content
for t in tasksDEFT :
t_tid = t[0]
t_status = t[1]
t_phys_group = t[2]
t_project = t[4]
t_owner = t[5]
t_reqid = t[6]
if verbose : print "INFO. check status %s"%(t_status)
if task_aborted_states.find(t_status) >= 0 :
for d in datasetsDEFT :
d_tid = d[0]
if d_tid == t_tid :
d_status = d[2]
if d_status == None or d_status=='None' : d_status='unknown'
if datasets_deletion_states.find(d_status) < 0 :
sql = "UPDATE %s SET status='toBeDeleted',timestamp=current_timestamp,pr_id=%s WHERE taskid=%s"%\
(t_table_datasets_DEFT,t_tid,t_reqid)
sql_update.append(sql)
break
elif d_status == 'unknown' :
sql= "UPDATE %s SET status='%s',TIMESTAMP=current_timestamp, pr_id=%s WHERE taskid=%s"\
(t_table_datasets_DEFT,t_status,t_tid,t_reqid)
sql_update.append(sql)
elif d_tid > t_tid :
print "WARNING. Cannot find dataset in %s for task %s (project: %s)"%\
(t_table_datasets_DEFT,t_tid,t_project)
break
if len(sql_update) :
print "INFO. synchronizeJediDeftDatasets. Step1 : Start database update"
(pdb,dbcur,deftDB) = connectDEFT('W')
for sql in sql_update :
if verbose : print sql
DButils.QueryUpdate(pdb,sql)
DButils.QueryCommit(pdb)
DButils.closeDB(pdb,dbcur)
#step #2. synchronize DEFT t_production_dataset and JEDI atlas_panda.jedi_datasets content
#connect to JEDI and get list of production datasets
#
# form DEFT datasets list
#(pdb,dbcur,deftDB) = connectDEFT('R')
#sql = "SELECT taskid,status FROM %s WHERE status IN (%s) "%(t_table_datasets_DEFT, DEFT_datasets_final_states)
#sql+= "AND (taskid >= %s and taskid <= %s) ORDER BY taskid "%(minTaskID,maxTaskID)
#if verbose : print sql
#datasetsDEFT = DButils.QueryAll(pdb,sql)
#DButils.closeDB(pdb,dbcur)
# get JEDI datasets list
sql_update = []
(pdb,dbcur,jediDB) = connectPandaJEDI('R')
t_table_datasets_JEDI = "%s.%s"%(jediDB,deft_conf.daemon['t_jedi_datasets'])
sql = "SELECT jeditaskid, datasetname, status, nfilesfinished, nevents, creationtime, frozentime "
sql+= "FROM %s "%(t_table_datasets_JEDI)
sql+= "WHERE jeditaskid >= %s AND jeditaskid <= %s "%(minTaskID,maxTaskID)
sql+= "AND datasetname NOT LIKE '%s' "%('user%')
sql+= "AND status IN (%s) "%(JEDI_datasets_final_states)
sql+= "ORDER BY jeditaskid"
print sql
datasetsJEDI = DButils.QueryAll(pdb,sql)
DButils.closeDB(pdb,dbcur)
for d in datasetsDEFT :
d_tid = d[0]
d_name = d[1]
if d[2] == None :
d_status = 'unknown'
else :
d_status = d[2]
d_phys_group = d[3]
found = False
for j in datasetsJEDI :
j_tid = j[0]
j_name = j[1]
j_status = j[2]
if d_tid == j_tid :
if d_name == j_name :
try :
j_nfiles = int(j[3])
except :
j_nfiles = 0
try :
j_nevents = int(j[4])
except :
j_nevents = 0
found = True
if j_status != d_status :
if DEFT_datasets_final_states.find(d_status) < 0 :
if DEFT_datasets_postproduction_states.find(d_status) < 0 :
sql = "UPDATE %s "%(t_table_datasets_DEFT)
sql+= "SET EVENTS = %s, FILES = %s, STATUS = '%s', "%(j_nevents, j_nfiles, j_status)
sql+= "TIMESTAMP = current_timestamp "
sql+= "WHERE taskid = %s AND name = '%s' "%(d_tid,d_name)
print sql
sql_update.append(sql)
else :
if verbose :
print "Task : ",j_tid,d_tid
print "DEFT : ",d_name
print "JEDI : ",j_name
else :
print "INFO. dataset : ",d_name
print "DEFT state : %s, JEDI state : %s"%(d_status,j_status)
print "NO %s update. DEFT dataset state is final"%(t_table_datasets_DEFT)
elif j_tid > t_tid :
print "INFO. Dataset for %s task and states in '(%s)'"%(t_tid,JEDI_datasets_final_states)
break
# update database
if len(sql_update) :
(pdb,dbcur,deftDB) = connectDEFT('W')
print "INFO. synchronizeJediDeftDatasets. Step2 : Start database update"
for sql in sql_update :
if verbose : print sql
DButils.QueryUpdate(pdb,sql)
DButils.QueryCommit(pdb)
DButils.closeDB(pdb,dbcur)
else :
print "INFO. No tasks or/and datasets match to time interval"
def synchronizeJediDeftTasks() :
#
# read task information from t_task and update t_production_tasks accordingly
#
user_task_list = []
user_task_params = {'taskid' : -1,'total_done_jobs':-1,'status' :'','submit_time' : -1, 'start_time' : 'None',\
'priority' : '-1','total_req_jobs':-1, 'total_events':0}
updateIntervalHours = TASK_SYNCH_INTERVAL
timeIntervalOracleHours = "%s/%s"%(updateIntervalHours,24)
post_production_status = ['aborted','obsolete']
running_status = ['running','submitted','submitting','registered','assigned']
end_status = ['done','failed','finished','broken']
# connect to Oracle
(pdb,dbcur,deftDB) = connectDEFT('R')
t_table_DEFT = "%s.%s"%(deftDB,deft_conf.daemon['t_production_task'])
t_table_JEDI = "%s.%s"%(deftDB,deft_conf.daemon['t_task'])
sql_select = "SELECT taskid, status,total_req_jobs,total_done_jobs,submit_time, start_time, current_priority,total_events "
sql = sql_select
sql += "FROM %s WHERE timestamp > current_timestamp - %s "%(t_table_DEFT,timeIntervalOracleHours)
sql += "AND taskid > %s "%(MIN_DEFT_TASK_ID)
sql += "ORDER by taskid"
print sql
tasksDEFT = DButils.QueryAll(pdb,sql)
DButils.closeDB(pdb,dbcur)
print "%s DEFT tasks match to the criteria"%(len(tasksDEFT))
(pdb,dbcur,deftDB) = connectJEDI('R','jedi')
sql_select = "SELECT taskid, status,total_done_jobs,submit_time, start_time, prodsourcelabel,"
sql_select+= "priority,current_priority, taskname, total_req_jobs, total_events "
sql = sql_select
sql += "FROM %s WHERE timestamp > current_timestamp - %s "%(t_table_JEDI,timeIntervalOracleHours)
sql += "AND taskid > %s "%(MIN_DEFT_TASK_ID)
sql += "ORDER by taskid"
print sql
tasksJEDI = DButils.QueryAll(pdb,sql)
print "%s JEDI tasks match to the criteria"%(len(tasksJEDI))
DButils.closeDB(pdb,dbcur)
sql_update_deft = []
for tj in tasksJEDI :
tj_tid = tj[0]
tj_status = tj[1]
tj_done = tj[2]
if tj_done == None : tj_done = 0
tj_submit = tj[3]
tj_start = | |
luigi.Parameter(description="Path to overall_machine_util.raw.csv [output from UtilTask]")
directory = luigi.Parameter(description="Output directory", default=".")
x_type = param_x_type
y_title = luigi.Parameter(description="y-axis title", default=None)
suffix = luigi.Parameter(description="Add suffix to output files: MachineGPUUtil.{suffix}.{ext}", default=None)
# Plot attrs
rotation = luigi.FloatParameter(description="x-axis title rotation", default=45.)
width = luigi.FloatParameter(description="Width of plot in inches", default=None)
height = luigi.FloatParameter(description="Height of plot in inches", default=None)
# optional.
cupti_overhead_json = param_cupti_overhead_json_optional
LD_PRELOAD_overhead_json = param_LD_PRELOAD_overhead_json_optional
python_annotation_json = param_python_annotation_json_optional
python_clib_interception_tensorflow_json = param_python_clib_interception_tensorflow_json_optional
python_clib_interception_simulator_json = param_python_clib_interception_simulator_json_optional
debug = param_debug
debug_single_thread = param_debug_single_thread
debug_perf = param_debug_perf
line_numbers = param_line_numbers
# algo_env_from_dir = luigi.BoolParameter(description="Add algo/env columns based on directory structure of --rlscope-directories <algo>/<env>/rlscope_dir", default=True, parsing=luigi.BoolParameter.EXPLICIT_PARSING)
skip_output = False
def requires(self):
return []
def output(self):
return []
def run(self):
setup_logging_from_self(self)
kwargs = kwargs_from_task(self)
self.dumper = UtilPlot(**kwargs)
self.dumper.run()
class TrainingProgressTask(luigi.Task):
rlscope_directories = luigi.ListParameter(description="Multiple --rlscope-directory entries for finding overlap_type files: *.venn_js.js")
directory = luigi.Parameter(description="Output directory", default=".")
# suffix = luigi.Parameter(description="Add suffix to output files: TrainingProgress.{suffix}.{ext}", default=None)
debug = param_debug
debug_single_thread = param_debug_single_thread
debug_perf = param_debug_perf
line_numbers = param_line_numbers
algo_env_from_dir = luigi.BoolParameter(description="Add algo/env columns based on directory structure of --rlscope-directories <algo>/<env>/rlscope_dir", default=True, parsing=luigi.BoolParameter.EXPLICIT_PARSING)
baseline_config = luigi.Parameter(description="The baseline configuration to compare all others against; default: config_uninstrumented", default=None)
ignore_phase = luigi.BoolParameter(description="Bug workaround: for training progress files that didn't record phase, just ignore it.", default=False, parsing=luigi.BoolParameter.EXPLICIT_PARSING)
skip_output = False
def requires(self):
return []
def output(self):
return []
def run(self):
setup_logging_from_self(self)
kwargs = kwargs_from_task(self)
self.dumper = TrainingProgressParser(**kwargs)
self.dumper.run()
class ProfilingOverheadPlotTask(luigi.Task):
csv = luigi.Parameter(description="Path to overall_machine_util.raw.csv [output from UtilTask]")
directory = luigi.Parameter(description="Output directory", default=".")
x_type = param_x_type
y_title = luigi.Parameter(description="y-axis title", default='Total training time (seconds)')
suffix = luigi.Parameter(description="Add suffix to output files: MachineGPUUtil.{suffix}.{ext}", default=None)
stacked = luigi.BoolParameter(description="Make stacked bar-plot", default=False, parsing=luigi.BoolParameter.EXPLICIT_PARSING)
preset = luigi.Parameter(description="preset configuration for plot bar order and plot labels", default=None)
# Plot attrs
rotation = luigi.FloatParameter(description="x-axis title rotation", default=45.)
width = luigi.FloatParameter(description="Width of plot in inches", default=None)
height = luigi.FloatParameter(description="Height of plot in inches", default=None)
debug = param_debug
debug_single_thread = param_debug_single_thread
debug_perf = param_debug_perf
line_numbers = param_line_numbers
# algo_env_from_dir = luigi.BoolParameter(description="Add algo/env columns based on directory structure of --rlscope-directories <algo>/<env>/rlscope_dir", default=True, parsing=luigi.BoolParameter.EXPLICIT_PARSING)
skip_output = False
def requires(self):
return []
def output(self):
return []
def run(self):
setup_logging_from_self(self)
kwargs = kwargs_from_task(self)
self.dumper = ProfilingOverheadPlot(**kwargs)
self.dumper.run()
class ExtrapolatedTrainingTimeTask(IMLTaskDB):
dependency = luigi.Parameter(description="JSON file containing Hard-coded computational dependencies A.phase -> B.phase", default=None)
algo_env_from_dir = luigi.BoolParameter(description="Add algo/env columns based on directory structure of --rlscope-directories <algo>/<env>/rlscope_dir", default=True, parsing=luigi.BoolParameter.EXPLICIT_PARSING)
# x_type = param_x_type
# y_title = luigi.Parameter(description="y-axis title", default='Total training time (seconds)')
# suffix = luigi.Parameter(description="Add suffix to output files: MachineGPUUtil.{suffix}.{ext}", default=None)
# stacked = luigi.BoolParameter(description="Make stacked bar-plot", default=False, parsing=luigi.BoolParameter.EXPLICIT_PARSING)
# preset = luigi.Parameter(description="preset configuration for plot bar order and plot labels", default=None)
#
# # Plot attrs
# rotation = luigi.FloatParameter(description="x-axis title rotation", default=45.)
# width = luigi.FloatParameter(description="Width of plot in inches", default=None)
# height = luigi.FloatParameter(description="Height of plot in inches", default=None)
# algo_env_from_dir = luigi.BoolParameter(description="Add algo/env columns based on directory structure of --rlscope-directories <algo>/<env>/rlscope_dir", default=True, parsing=luigi.BoolParameter.EXPLICIT_PARSING)
# Needed by mk_SQL_tasks
cupti_overhead_json = param_cupti_overhead_json
LD_PRELOAD_overhead_json = param_LD_PRELOAD_overhead_json
python_annotation_json = param_python_annotation_json
python_clib_interception_tensorflow_json = param_python_clib_interception_tensorflow_json
python_clib_interception_simulator_json = param_python_clib_interception_simulator_json
def requires(self):
return [
mk_SQL_tasks(self),
]
def rlscope_run(self):
kwargs = kwargs_from_task(self)
assert 'directory' not in kwargs
kwargs['directory'] = kwargs['rlscope_directory']
del kwargs['rlscope_directory']
# logger.info(pprint_msg({'kwargs': kwargs}))
self.dumper = ExtrapolatedTrainingTimeParser(**kwargs)
self.dumper.run()
class GeneratePlotIndexTask(luigi.Task):
rlscope_directory = luigi.Parameter(description="Location of trace-files")
# out_dir = luigi.Parameter(description="Location of trace-files", default=None)
# replace = luigi.BoolParameter(description="debug", parsing=luigi.BoolParameter.EXPLICIT_PARSING)
debug = param_debug
debug_single_thread = param_debug_single_thread
debug_perf = param_debug_perf
line_numbers = param_line_numbers
postgres_password = param_postgres_password
postgres_user = param_postgres_user
postgres_host = param_postgres_host
# # Needed by mk_SQL_tasks
# cupti_overhead_json = param_cupti_overhead_json
# LD_PRELOAD_overhead_json = param_LD_PRELOAD_overhead_json
# python_annotation_json = param_python_annotation_json
# python_clib_interception_tensorflow_json = param_python_clib_interception_tensorflow_json
# python_clib_interception_simulator_json = param_python_clib_interception_simulator_json
skip_output = False
def requires(self):
# Requires that traces files have been collected...
# So, lets just depend on the SQL parser to have loaded everything.
return [
# mk_SQL_tasks(self),
]
def output(self):
# Q: What about --replace? Conditionally include this output...?
return [
luigi.LocalTarget(_j(self.rlscope_directory, 'rlscope_plot_index_data.py')),
]
def run(self):
setup_logging_from_self(self)
cmd = ['rls-generate-plot-index']
cmd.extend(['--rlscope-directory', self.rlscope_directory])
if self.debug:
cmd.extend(['--debug'])
print_cmd(cmd)
subprocess.check_call(cmd)
param_hack_upper_right_legend_bbox_x = luigi.FloatParameter(description="matplotlib hack: add to x-position of upper right legend so it's outside the plot area", default=None)
param_xtick_expression = luigi.Parameter(description="Python expression to generate xtick labels for plot. Expression has access to individual 'row' and entire dataframe 'df'", default=None)
class OverlapStackedBarTask(luigi.Task):
rlscope_directories = luigi.ListParameter(description="Multiple --rlscope-directory entries for finding overlap_type files: *.venn_js.js")
rlscope_config_directories = luigi.ListParameter(description="Multiple --rlscope-directory entries for finding rlscope_config.json files (needed for making uncorrected plots)", default=None)
unins_rlscope_directories = luigi.ListParameter(description="Multiple --rlscope-directory entries for finding total uninstrumented training time (NOTE: every rlscope_directory should have an unins_rlscope_directory)")
directory = luigi.Parameter(description="Output directory", default=".")
xtick_expression = param_xtick_expression
title = luigi.Parameter(description="Plot title", default=None)
x_title = luigi.Parameter(description="x-axis title", default=None)
x_order_by = luigi.Parameter(description="order x-field by this dataframe field", default=None)
rotation = luigi.FloatParameter(description="x-axis title rotation", default=15.)
hack_upper_right_legend_bbox_x = param_hack_upper_right_legend_bbox_x
overlap_type = luigi.ChoiceParameter(choices=OverlapStackedBarPlot.SUPPORTED_OVERLAP_TYPES, description="What type of <overlap_type>*.venn_js.js files should we read from?")
resource_overlap = luigi.ListParameter(description="What resources are we looking at for things like --overlap-type=OperationOverlap? e.g. ['CPU'], ['CPU', 'GPU']", default=None)
operation = luigi.Parameter(description="What operation are we looking at for things like --overlap-type=CategoryOverlap? e.g. ['step'], ['sample_action']", default=None)
training_time = luigi.BoolParameter(description="Plot a second y-axis with total training time", parsing=luigi.BoolParameter.EXPLICIT_PARSING)
extrapolated_training_time = luigi.BoolParameter(description="Extrapolate total training time if full uninstrumented run is not present", parsing=luigi.BoolParameter.EXPLICIT_PARSING)
detailed = luigi.BoolParameter(description="Provide detailed operation/category breakdown in a single view", parsing=luigi.BoolParameter.EXPLICIT_PARSING)
remap_df = luigi.Parameter(description="Transform df pandas.DataFrame object; useful for remapping regions to new ones", default=None)
y2_logscale = luigi.BoolParameter(description="Show training time y-axis in logscale", parsing=luigi.BoolParameter.EXPLICIT_PARSING)
# For some reason, (ppo2, MinitaurBulletEnv-v0) only has:
# - regions: [('sample_action',)]
# Whereas, for ppo2 we expect:
# - regions: [('compute_advantage_estimates',), ('optimize_surrogate',), ('sample_action',)]
# TODO: re-run MinitaurBulletEnv-v0
ignore_inconsistent_overlap_regions = luigi.BoolParameter(description="If *.venn_js.json overlap data have inconsistent overlap regions, just use files that agree the most and ignore the rest", parsing=luigi.BoolParameter.EXPLICIT_PARSING)
skip_plot = luigi.BoolParameter(description="Don't plot *.png file; just output the *.csv file we WOULD plot", parsing=luigi.BoolParameter.EXPLICIT_PARSING)
y_type = luigi.ChoiceParameter(choices=OverlapStackedBarPlot.SUPPORTED_Y_TYPES, default='percent',
description=textwrap.dedent("""\
What should we show on the y-axis of the stacked bar-plot?
percent:
Don't show total training time.
Useful if you just want to show a percent breakdown using a partial trace of training.
seconds:
Show absolute training time on the y-axis.
TODO: we should extrapolate total training time based on number of timestamps ran,
and number of timesteps that will be run.
"""))
x_type = param_x_type
# postgres_host = param_postgres_host
# postgres_user = param_postgres_user
# postgres_password = param_postgres_password
show_title = luigi.BoolParameter(description="Whether to add a title to the plot", default=False, parsing=luigi.BoolParameter.EXPLICIT_PARSING)
show_legend = luigi.BoolParameter(description="Whether show the legend", default=True, parsing=luigi.BoolParameter.EXPLICIT_PARSING)
width = luigi.FloatParameter(description="Width of plot in inches", default=None)
height = luigi.FloatParameter(description="Height of plot in inches", default=None)
long_env = luigi.BoolParameter(description="full environment name: Humanoid -> HumanoidBulletEnv-v0", default=None)
keep_zero = luigi.BoolParameter(description="If a stacked-bar element is zero in all the bar-charts, still show it in the legend.", default=True, parsing=luigi.BoolParameter.EXPLICIT_PARSING)
y_lim_scale_factor = luigi.FloatParameter(description="scale ylim.max by scale-factor (hack to make room for bar-labels)", default=None)
debug = param_debug
debug_single_thread = param_debug_single_thread
debug_perf = param_debug_perf
line_numbers = param_line_numbers
suffix = luigi.Parameter(description="Add suffix to output files: OverlapStackedBarPlot.overlap_type_*.{suffix}.{ext}", default=None)
# Needed by mk_SQL_tasks (for GeneratePlotIndexTask)
cupti_overhead_json = param_cupti_overhead_json
LD_PRELOAD_overhead_json = param_LD_PRELOAD_overhead_json
python_annotation_json = param_python_annotation_json
python_clib_interception_tensorflow_json = param_python_clib_interception_tensorflow_json
python_clib_interception_simulator_json = param_python_clib_interception_simulator_json
skip_output = False
def requires(self):
# TODO: we require (exactly 1) <overlap_type>.venn_js.js in each rlscope_dir.
# TODO: we need to sub-select if there are multiple venn_js.js files...need selector arguments
requires = []
for rlscope_dir in self.rlscope_directories:
kwargs = forward_kwargs(from_task=self, ToTaskKlass=GeneratePlotIndexTask)
requires.append(GeneratePlotIndexTask(
rlscope_directory=rlscope_dir,
**kwargs))
return requires
def output(self):
return []
def run(self):
setup_logging_from_self(self)
kwargs = kwargs_from_task(self)
self.dumper = OverlapStackedBarPlot(**kwargs)
self.dumper.run()
class GpuHwPlotTask(IMLTask):
gpu_hw_directories = luigi.ListParameter(description="Multiple --rlscope-directory containing GPUHwCounterSampler.csv from running \"rls-prof --config gpu-hw\"")
time_breakdown_directories = luigi.ListParameter(description="Multiple --rlscope-directory containing GPUHwCounterSampler.csv from running \"rls-prof --config gpu-hw\"")
directory = luigi.Parameter(description="Output directory", default=".")
xtick_expression = param_xtick_expression
x_title = luigi.Parameter(description="x-axis title", default=None)
title = luigi.Parameter(description="title", default=None)
width = luigi.FloatParameter(description="Width of plot in inches", default=None)
op_mapping = luigi.Parameter(description="Python expression defining a function mapping(algo) that returns a mapping that defines composite operations from rlscope.prof.operation annotations in the profiled code", default=None)
height = luigi.FloatParameter(description="Height of plot in inches", default=None)
rotation = luigi.FloatParameter(description="x-axis title rotation", default=None)
# IGNORED.
y2_logscale = luigi.BoolParameter(description="Show training time y-axis in | |
import abc
import asyncio
import collections
import contextlib
import fcntl
import functools
import inspect
import io
import mmap
import operator
import os
import pathlib
import random
import signal
import socket
import stat
import struct
import subprocess
import tempfile
import termios
import time
import types
import typing
import warnings
from typing import Any, BinaryIO, Callable, Dict, Generator, List, Optional, Set, Tuple
import pytest
import trio
from setuptools import Distribution
from hades import constants
from hades.leases.server import Server
# Available since CPython 3.10
F_GETPIPE_SZ = getattr(fcntl, "F_GETPIPE_SZ", 1032)
AncillaryData = List[Tuple[int, int, bytes]]
RECVMSG = Tuple[bytes, AncillaryData, int]
Result = Tuple[
int,
bytes,
bytes,
Optional[List[RECVMSG]],
Optional[bytes],
]
T = typing.TypeVar('T')
SIZEOF_INT = struct.calcsize("i")
ucred = struct.Struct("iII")
TIMEOUT = 1.0
RECVMSG_FLAGS = socket.MSG_CMSG_CLOEXEC
@pytest.fixture(scope="session")
def socket_path() -> bytes:
return os.fsencode(tempfile.mktemp(prefix="hades-", suffix=".sock"))
def read_int_sysctl(variable: str) -> int:
with (pathlib.PosixPath("/proc/sys") / variable).open("rb", 0) as f:
return int(f.read())
@pytest.fixture(scope="session")
def optmem_max() -> int:
return read_int_sysctl("net/core/optmem_max")
@pytest.fixture(scope="session")
def wmem_default() -> int:
return read_int_sysctl("net/core/wmem_default")
@pytest.fixture(scope="session")
def uid():
return os.getuid()
@pytest.fixture(scope="session")
def gid():
return os.getgid()
@pytest.fixture(scope="class")
def server(socket_path) -> socket.socket:
with contextlib.ExitStack() as stack:
type_ = socket.SOCK_STREAM | socket.SOCK_NONBLOCK | socket.SOCK_CLOEXEC
sock = stack.enter_context(socket.socket(socket.AF_UNIX, type_))
sock.bind(socket_path)
stack.callback(os.unlink, socket_path)
sock.listen()
yield sock
def test_short_write_possible(wmem_default):
"""On Linux only the sender can influence the size of a Unix stream socket
buffer."""
got = os.sysconf("SC_ARG_MAX")
expected = wmem_default + mmap.PAGESIZE
assert got > expected, "Cannot test short writes"
@contextlib.contextmanager
def chdir(directory):
prev_cwd = os.getcwd()
os.chdir(directory)
try:
yield directory
finally:
os.chdir(prev_cwd)
@pytest.fixture(scope="session")
def executable(request) -> pathlib.PosixPath:
"""Let setuptools compute the path to the built executable"""
with chdir(request.config.rootdir) as root_dir:
command = "build"
distribution = Distribution({
"script_name": __file__,
"script_args": [command],
})
distribution.parse_config_files()
distribution.parse_command_line()
command = distribution.get_command_obj(command)
command.ensure_finalized()
return (
pathlib.PosixPath(root_dir).absolute()
/ command.build_platlib
/ "hades-dhcp-script"
)
def test_executable_exists(executable: pathlib.PosixPath):
assert executable.exists()
class ChildStopped(Exception):
pass
class TimeoutExceeded(Exception):
pass
@contextlib.contextmanager
def pipe():
r, w = os.pipe2(os.O_CLOEXEC | os.O_NONBLOCK)
r = os.fdopen(r, "rb", buffering=0, closefd=True)
w = os.fdopen(w, "wb", buffering=0, closefd=True)
with r, w:
yield r, w
def drain_pipe(stream: io.FileIO, buffer: typing.BinaryIO) -> Optional[int]:
chunk = stream.readall()
if chunk is not None:
buffer.write(chunk)
return len(chunk)
else:
return None
async def read_pipe(stream: trio.abc.ReceiveStream, buffer: io.BytesIO):
async for chunk in stream:
buffer.write(chunk)
async def receive_messages(
client: trio.SocketStream,
wmem_default: int,
optmem_max: int,
messages: List[RECVMSG],
):
while (r := await client.socket.recvmsg(
wmem_default,
optmem_max,
RECVMSG_FLAGS,
))[0]:
messages.append(r[:3])
async def send_reply(client: trio.SocketStream, reply: bytes) -> bytes:
reply = memoryview(reply)
sent_total = 0
try:
while (
sent_total < len(reply)
and (sent := await client.socket.send(reply[sent_total:]))
):
sent_total += sent
except BrokenPipeError:
pass
return reply[:sent_total].tobytes()
async def track_process(process: trio.Process):
await process.wait()
raise ChildStopped()
async def run_with_trio(
executable: pathlib.PosixPath,
argv: List[bytes],
environ: Dict[bytes, bytes],
stdin: Tuple[io.FileIO, io.FileIO],
stdout: Tuple[io.FileIO, io.FileIO],
stderr: Tuple[io.FileIO, io.FileIO],
server: socket.socket,
reply: bytes,
wmem_default: int,
optmem_max: int,
uid: int,
gid: int,
) -> Result:
stdout_content = io.BytesIO()
stderr_content = io.BytesIO()
messages = None
sent = None
with trio.move_on_after(TIMEOUT):
process = await trio.open_process(
argv,
executable=executable,
env=environ,
bufsize=0,
text=False,
encoding=None,
stdin=os.dup(stdin[0].fileno()),
stdout=os.dup(stdout[1].fileno()),
stderr=os.dup(stderr[1].fileno()),
)
try:
async with trio.open_nursery() as nursery:
nursery: trio.Nursery = nursery
# Read stdout/stderr in the background
nursery.start_soon(track_process, process)
nursery.start_soon(read_pipe, trio.lowlevel.FdStream(os.dup(stdout[0].fileno())), stdout_content)
nursery.start_soon(read_pipe, trio.lowlevel.FdStream(os.dup(stderr[0].fileno())), stderr_content)
server = trio.socket.from_stdlib_socket(server)
client, _ = await server.accept()
client = trio.SocketStream(client)
credentials = ucred.unpack(client.getsockopt(
socket.SOL_SOCKET, socket.SO_PEERCRED, ucred.size,
))
assert (process.pid, uid, gid) == credentials
messages = []
await receive_messages(client, wmem_default, optmem_max, messages)
sent = await send_reply(client, reply)
await client.send_eof()
except ChildStopped:
pass
else:
process.kill()
drain_pipe(stdout[0], stdout_content)
drain_pipe(stderr[0], stderr_content)
return process.returncode, stdout_content.getvalue(), stderr_content.getvalue(), messages, sent
class BaseRun(abc.ABC):
RECVMSG_FLAGS = socket.MSG_CMSG_CLOEXEC
TIMEOUT = 5.0
@pytest.fixture(scope="class")
def environ(self, server: socket.socket) -> Dict[bytes, bytes]:
path = os.fsencode(server.getsockname())
return collections.OrderedDict((
(b"HADES_AUTH_DHCP_SCRIPT_SOCKET", path),
))
@pytest.fixture(scope="class")
def stdin(self) -> Tuple[io.FileIO, io.FileIO]:
with pipe() as p:
yield p
@pytest.fixture(scope="class")
def stdout(self) -> Tuple[io.FileIO, io.FileIO]:
with pipe() as p:
yield p
@pytest.fixture(scope="class")
def stderr(self) -> Tuple[io.FileIO, io.FileIO]:
with pipe() as p:
yield p
@pytest.fixture(scope="class")
def result(
self,
executable: pathlib.PosixPath,
argv: List[bytes],
environ: Dict[bytes, bytes],
stdin: Tuple[io.FileIO, io.FileIO],
stdout: Tuple[io.FileIO, io.FileIO],
stderr: Tuple[io.FileIO, io.FileIO],
server: socket.socket,
reply: bytes,
wmem_default: int,
optmem_max: int,
uid: int,
gid: int,
) -> Result:
return trio.run(
run_with_trio,
executable,
argv,
environ,
stdin,
stdout,
stderr,
server,
reply,
wmem_default,
optmem_max,
uid,
gid,
)
@pytest.fixture(scope="class")
def status(self, result: Result) -> int:
return result[0]
@pytest.fixture(scope="class")
def stdout_content(self, result: Result) -> bytes:
return result[1]
@pytest.fixture(scope="class")
def stderr_content(self, result: Result) -> bytes:
return result[2]
@pytest.fixture(scope="class")
def messages(self, result: Result) -> Optional[List[RECVMSG]]:
return result[3]
@pytest.fixture(scope="class")
def sent(self, result: Result) -> Optional[bytes]:
return result[4]
@property
@abc.abstractmethod
def expected_status(self) -> int:
pass
@pytest.fixture(scope="class")
def reply(self) -> bytes:
return struct.pack("b", self.expected_status)
def test_status(self, status: int):
assert status == self.expected_status
@property
@abc.abstractmethod
def expected_stdout(self) -> bytes:
pass
def test_stdout_content(self, stdout_content: bytes):
assert stdout_content == self.expected_stdout
@property
@abc.abstractmethod
def expected_stderr(self) -> bytes:
pass
def test_stderr_content(self, stderr_content: bytes):
assert stderr_content == self.expected_stderr
class SuccessfulRun(BaseRun, abc.ABC):
@property
def expected_status(self):
return os.EX_OK
class NoStdoutOutputRun(BaseRun, abc.ABC):
@property
def expected_stdout(self) -> bytes:
return b""
class PrematureExitRun(NoStdoutOutputRun, abc.ABC):
@property
def expected_stderr(self) -> bytes:
return inspect.cleandoc(
f"""
hades-dhcp-script ARGS...
Sends its command-line arguments, environment variables starting
with DNSMASQ_ and the stdin/stdout file descriptors to the UNIX
socket set via the HADES_AUTH_DHCP_SCRIPT_SOCKET environment
variable (defaults to {constants.AUTH_DHCP_SCRIPT_SOCKET}).
See the -6, --dhcp-script options of dnsmasq for details.
"""
).encode("ascii")
def test_messages(self, messages: Optional[List[RECVMSG]]):
assert messages is None
def test_sent(self, sent: Optional[bytes]):
assert sent is None
class TestUsageExit(PrematureExitRun):
@property
def expected_status(self) -> int:
return os.EX_USAGE
@pytest.fixture(scope="session")
def argv(self, executable: pathlib.PosixPath) -> List[bytes]:
return [bytes(executable)]
class TestHelpExit(PrematureExitRun, SuccessfulRun):
@pytest.fixture(
scope="session",
params=[[b"-h"], [b"--help"], [b"help"]]
)
def argv(self, request, executable: pathlib.PosixPath) -> List[bytes]:
return [bytes(executable)] + request.param
class ConnectedRun(BaseRun, abc.ABC):
@pytest.fixture(scope="class")
def messages(self, result: Result) -> List[RECVMSG]:
messages = result[3]
if messages is None:
pytest.fail("No messages")
return messages
@pytest.fixture(scope="class")
def file_descriptors(
self,
messages: List[RECVMSG],
) -> Generator[List[io.FileIO], None, None]:
streams = []
with contextlib.ExitStack() as stack:
for _, ancdata, _ in messages:
streams.extend(
stack.enter_context(stream)
for stream in Server.parse_ancillary_data(ancdata)
)
# Make received file descriptors non-blocking
for stream in streams:
os.set_blocking(stream.fileno(), False)
yield streams
@pytest.fixture(scope="class")
def passed_stdin(self, file_descriptors: List[io.FileIO]):
if len(file_descriptors) != 3:
pytest.fail("Wrong number of file descriptors")
return file_descriptors[0]
@pytest.fixture(scope="class")
def passed_stdout(self, file_descriptors: List[io.FileIO]):
if len(file_descriptors) != 3:
pytest.fail("Wrong number of file descriptors")
return file_descriptors[1]
@pytest.fixture(scope="class")
def passed_stderr(self, file_descriptors: List[io.FileIO]):
if len(file_descriptors) != 3:
pytest.fail("Wrong number of file descriptors")
return file_descriptors[2]
def test_sent(self, sent: Optional[bytes], reply: bytes):
assert sent == reply
def test_flags(self, messages: Optional[List[RECVMSG]]):
got = [flags for _, _, flags in messages]
expected = [self.RECVMSG_FLAGS for _, _, _ in messages]
assert got == expected
def test_ancillary_data(self, messages: Optional[List[RECVMSG]]):
expected = [
(
socket.SOL_SOCKET,
socket.SCM_RIGHTS,
len(cmsg_data) - len(cmsg_data) % SIZEOF_INT,
)
for _, ancdata, _ in messages
for _, _, cmsg_data in ancdata
]
got = [
(cmsg_level, cmsg_type, len(cmsg_data))
for _, ancdata, _ in messages
for cmsg_level, cmsg_type, cmsg_data in ancdata
]
assert got == expected
@pytest.mark.xfail(raises=BlockingIOError)
def test_file_descriptor_count(
self,
file_descriptors: List[BinaryIO],
):
assert len(file_descriptors) == 3
@staticmethod
def assert_file(our_file: io.FileIO, passed_file: io.FileIO):
our_readable = our_file.readable()
got_mode = passed_file.mode
our_stat = os.fstat(our_file.fileno())
passed_stat = os.fstat(passed_file.fileno())
is_fifo = stat.S_ISFIFO(passed_stat.st_mode)
expected_mode = "wb" if our_readable else "rb"
reader = our_file if our_readable else passed_file
writer = passed_file if our_readable else our_file
# Verify that we have a pipe with its two ends
if is_fifo and our_stat == passed_stat:
pipe_size = fcntl.fcntl(writer.fileno(), F_GETPIPE_SZ)
# Check for pending bytes in the pipe
pending_bytes = bytearray(SIZEOF_INT)
fcntl.ioctl(reader.fileno(), termios.FIONREAD, pending_bytes)
pending_bytes = struct.unpack_from("=i", pending_bytes)[0]
test_size = min(mmap.PAGESIZE, pipe_size - pending_bytes)
expected_bytes = random.randbytes(test_size)
writer.write(expected_bytes)
writer.flush()
got_bytes = reader.read(pipe_size)
else:
expected_bytes = None
got_bytes = None
assert (
got_mode, is_fifo, passed_stat, got_bytes
) == (
expected_mode, True, our_stat, expected_bytes
)
@pytest.mark.xfail(raises=BlockingIOError)
def test_passed_stdin(
self,
stdin: Tuple[io.FileIO, io.FileIO],
passed_stdin: io.FileIO,
):
self.assert_file(stdin[1], passed_stdin)
@pytest.mark.xfail(raises=BlockingIOError)
def test_passed_stdout(
self,
stdout: Tuple[io.FileIO, io.FileIO],
passed_stdout: io.FileIO,
):
self.assert_file(stdout[0], passed_stdout)
@pytest.mark.xfail(raises=BlockingIOError)
def test_passed_stderr(
self,
stderr: Tuple[io.FileIO, io.FileIO],
passed_stderr: io.FileIO,
):
self.assert_file(stderr[0], passed_stderr)
def test_message(
self,
argv: List[bytes],
environ: Dict[bytes, bytes],
messages: Optional[List[RECVMSG]],
):
got = b"".join(map(operator.itemgetter(0), messages))
expected = b"".join([
struct.pack("=i", len(argv)),
] + [
arg + b"\x00" for arg in argv
] + [
struct.pack("=i", len(environ)),
] + [
b"%b=%b\x00" % i for i in environ.items()
])
assert got == expected
class TestSuccess(ConnectedRun, SuccessfulRun, NoStdoutOutputRun):
@property
def expected_stderr(self) -> bytes:
return b""
@pytest.fixture(scope="session")
def argv(
self,
executable: pathlib.PosixPath,
wmem_default: int,
) -> List[bytes]:
random_args = random.randbytes(2 * wmem_default).split(b"\x00")
return [
bytes(executable),
b"add",
] + random_args
@pytest.fixture(scope="class")
def environ(self, server: socket.socket) -> Dict[bytes, bytes]:
path = os.fsencode(server.getsockname())
return collections.OrderedDict((
(b"NON_DNSMASQ_PREFIX_ENV", b"1"),
(b"DNSMASQ_PREFIX_ENV", b"2"),
(b"DNSMASQ_PREFIX_WITH_WHITESPACE", b" \twith\t whitespace\t "),
| |
<filename>src/a1_process_model.py
import math as _math
import geometry as _g
import sys as _sys
if _sys.version_info.major != 2: reload = lambda x: 0
reload(_g)
# Author: <NAME>
# Date: 2016
# Apr 2018: modified for Python 3
class ModelState(object):
# This class is as fast for member access by name
# as tuples are for member access by index. The
# purpose is to reduce errors when modifying the
# state.
# However, creating a new instance takes 8
# times as long as creating a tuple.
__slots__ = ('p_front_right',
'axis_dir',
'center_wheel_dir',
'distance_step',
'odometer_reading',
'status') # status in progression: running or terminated
def __init__(self, p_front_right, axis_dir, center_wheel_dir, distance_step, odometer_reading, status):
self.p_front_right = tuple(p_front_right)
self.axis_dir = axis_dir
self.center_wheel_dir = center_wheel_dir
self.distance_step = distance_step # distance rotated by the 'center' wheel
self.odometer_reading = odometer_reading
self.status = status
def __repr__(self):
return ('%s(p_front_right=%r, axis_dir=%r, center_wheel_dir=%r, distance_step=%r, status=%r)' %
(self.__class__.__name__,
self.p_front_right, self.axis_dir,
self.center_wheel_dir, self.distance_step,
self.status))
def __eq__(self, other):
"""Equal compares by value"""
return ((self.p_front_right[0] == other.p_front_right[0]) and
(self.p_front_right[1] == other.p_front_right[1]) and
(self.axis_dir == other.axis_dir) and
(self.center_wheel_dir == other.center_wheel_dir) and
(self.distance_step == other.distance_step))
def __ne__(self, other):
return not self.__eq__(other)
def __copy__(self):
"""Return a shallow copy of this instance."""
return ModelState(self.p_front_right, self.axis_dir, self.center_wheel_dir, self.distance_step)
def __lt__(self, other):
raise NotImplementedError('< comparison') # to catch typos
def __gt__(self, other):
raise NotImplementedError('> comparison') # to catch typos
def __le__(self, other):
raise NotImplementedError('<= comparison') # to catch typos
def __ge__(self, other):
raise NotImplementedError('>= comparison') # to catch typos
def __nonzero__(self, other):
raise NotImplementedError('bool conversion') # to catch typos
class Car(object):
_instatiation_counter = [0]
agent_ID = None
def __init__(self, init=None):
"""Define static parameters outside the scope of model state"""
self._instatiation_counter[0]+=1
self.agent_ID = hash('Car-%i' %self._instatiation_counter[0])
m=_math.pi/180.
# dimensions affecting the low-speed movement
self.wheelbase = 2.5
self.track = 1.5
self.center_angle_limits = 30*m
# dimensions used for plotting
self.overhang_front = .9
self.overhang_rear = .7
self.body_width = 2.
self.environment = None
if init is not None:
# overwrite with dictionary
for variable in init:
self.__setattr__(variable,init[variable])
def act(self, state, action):
"""Perform action on state, return new state."""
if state.status == 'terminated':
return state
if action is not None:
center_wheel_direction, center_distance_step = action
p_front_right, axis_dir = _move(self.wheelbase,self.track,
state.p_front_right,
state.axis_dir,
center_wheel_direction,
center_distance_step)
else:
# just check for crash
p_front_right = state.p_front_right
axis_dir = state.axis_dir
center_wheel_direction = state.center_wheel_dir
center_distance_step= 0. # we're actually not moving the car --> don't change odometer
new_status = self._check_crash(ModelState(p_front_right=p_front_right,
axis_dir=axis_dir,
center_wheel_dir=center_wheel_direction,
distance_step=state.distance_step,
odometer_reading=state.odometer_reading,
status=state.status))
return ModelState(p_front_right=p_front_right,
axis_dir=axis_dir,
center_wheel_dir=center_wheel_direction,
distance_step=center_distance_step,
odometer_reading=state.odometer_reading + abs(center_distance_step),
status=new_status)
def _check_crash(self, state):
corners=chassis_outline(state.p_front_right,state.axis_dir,
self.wheelbase,self.track,
self.overhang_front, self.overhang_rear,
self.body_width)
car_outline = _g._lines_from_points(corners)
car_outline_h = _g._lines_to_homogenous(car_outline)
self.environment.set_agent_properties(self, car_outline, car_outline_h)
env_lines, env_lines_h = self.environment.get_domain_lines_that_may_intersect(car_outline)
# car leaving the domain?
out_of_bounds = _g._any_intersect(car_outline, env_lines,
car_outline_h, env_lines_h)
if out_of_bounds:
return 'terminated'
if False:
# check for collision with other agents
for agent in self.environment.agents:
if agent == self: continue
collision = _g._any_intersect(car_outline, self.environment.agents[agent][0],
car_outline_h, self.environment.agents[agent][0])
if collision:
return 'terminated'
return 'running'
###################################################################
def _front_right_pos_from_center(wheelbase, track, p_center, axis_direction):
"""Specify center of car and orientation, return position of front right wheel"""
rot=lambda pt, a: (_math.cos(a)*pt[0]+ -_math.sin(a)*pt[1],
_math.sin(a)*pt[0]+ _math.cos(a)*pt[1])
vector=(wheelbase/2., -track/2.)
return _add(rot(vector, axis_direction), p_center)
def _move(wheelbase, track, p_front_right,
axis_direction, center_wheel_direction, distance_center_wheel):
"""Move car by moving front right wheel forward by 'distance'.
Specify the center wheel position."""
front_right_wheel_direction = wheel_angles_from_track_angle(wheelbase, track, center_wheel_direction)[0]
# convey reverse driving to move_on_curve() in a consistent way
used_wheel_direction = _confined_angle_pi(front_right_wheel_direction-_math.pi) if distance_center_wheel < 0 else front_right_wheel_direction
# since the left and right wheels follow different tracks,
# we use the "center" wheel to define distance (and speed)
# --> ensures that tracks are symmetric
R_right, R_center, R_left = absolute_curve_radii_from_center_angle(wheelbase, track, center_wheel_direction)
# use if/else construct since we cannot divide inf/inf when going straight
abs_right_front_wheel_distance = abs(distance_center_wheel)*R_right/R_center if R_right != R_center else abs(distance_center_wheel)
p_front_right, axis_direction = _move_on_curve(p_front_right, axis_direction, used_wheel_direction, wheelbase, abs_right_front_wheel_distance)
return p_front_right, axis_direction
def _move_on_curve(p_front,car_axis_dir,front_right_wheel_dir,axes_separation,distance):
wheel_dir = front_right_wheel_dir
if (abs(wheel_dir)<1e-6): # 1e-6 is the limit
# straight forward
return _move_straight(p_front,car_axis_dir,distance)
if abs(abs(wheel_dir)-_math.pi)<1e-6: # 1e-6 is the limit
# straight back
# change the "direction" rather than angle since the angle will be passed back to loop
return _move_straight(p_front,car_axis_dir,-distance)
curve_radius_front_wheel=axes_separation/_math.sin(wheel_dir)
#wheel_angle = car_axis_dir+wheel_dir
# The curve_angle applies to all wheels:
curve_angle = 2*_math.pi* distance/(2*_math.pi*curve_radius_front_wheel)
new_axis = car_axis_dir + curve_angle
while new_axis>=_math.pi:
new_axis-=2*_math.pi
while new_axis<-_math.pi:
new_axis+=2*_math.pi
if True:
# calculate based on front wheel
# (same result as back wheel)
dx = curve_radius_front_wheel * _math.cos(_math.pi/2+car_axis_dir+wheel_dir)
dy = curve_radius_front_wheel * _math.sin(_math.pi/2+car_axis_dir+wheel_dir)
turning_center= _add(p_front, (dx,dy))
start = _sub(p_front, turning_center)
a = curve_angle
end=[_math.cos(a)*start[0]+ -_math.sin(a)*start[1],
_math.sin(a)*start[0]+ _math.cos(a)*start[1]]
new_front = _add(end, turning_center)
else:
# calculate based on back wheel
# (same result as front wheel)
curve_radius_back_wheel=axes_separation/_math.tan(wheel_dir)
p_rear = _rear_wheel_pos(p_front,car_axis_dir,axes_separation)
dx = curve_radius_back_wheel * _math.cos(_math.pi/2+car_axis_dir)
dy = curve_radius_back_wheel * _math.sin(_math.pi/2+car_axis_dir)
turning_center= _add(p_rear, (dx,dy))
start = _sub(p_rear, turning_center)
a = curve_angle
end=[_math.cos(a)*start[0]+ -_math.sin(a)*start[1],
_math.sin(a)*start[0]+ _math.cos(a)*start[1]]
new_rear = _add(end, turning_center)
new_front__from_back = _rear_wheel_pos(new_rear,new_axis,-axes_separation)
#print 'back connected', new_front__from_back
new_front = new_front__from_back
return new_front, new_axis
def _move_straight(p_front,car_axis_dir,distance):
dx = distance * _math.cos(car_axis_dir)
dy = distance * _math.sin(car_axis_dir)
return _add(p_front, (dx,dy)), car_axis_dir
def _distance(p1,p2):
return ((p1[0]-p2[0])**2+(p1[1]-p2[1])**2)**.5
def _vector(p1,p2):
return _math.atan2(p2[1]-p1[1],p2[0]-p1[0])
def _add(p1,p2):
return (p1[0]+p2[0],p1[1]+p2[1])
def _sub(p1,p2):
return (p1[0]-p2[0],p1[1]-p2[1])
def _rear_wheel_pos(p_front,car_axis_dir,axes_separation):
x=p_front[0]-axes_separation*_math.cos(car_axis_dir)
y=p_front[1]-axes_separation*_math.sin(car_axis_dir)
return x,y
def _new_wheel_angle(start_angle, movement, max_deflection=None):
# TODO: implement different limits for in and out
# we assume that movement is increments |movement| < pi/2
# or that |movement| = +- pi (i.e., reversing direction)
# smooth transition between forward and backward is only supported
# if max_deflection==pi/2
epsilon = 1e-12
max_deflection=max_deflection if max_deflection is not None else 20*_math.pi/180.
ang=_confined_angle_pi(start_angle + movement)
if abs(max_deflection-_math.pi/2.)<epsilon: return ang
a=max_deflection
b=_math.pi-max_deflection
a_min,a_max = -a, a
b_neg,b_pos = -b, b
if (a_min<=ang<=a_max) or (ang<=b_neg) or (ang>=b_pos): return ang
reversing = abs(abs(movement)-_math.pi) < epsilon
s= -1 if reversing else 1
if (s*abs(start_angle)<=s*_math.pi/2):
# supposed to go forward after move
return min((a_max,max((a_min,ang))))
else:
# supposed to go backward after move
if (ang<0): return min((ang, b_neg))
return max((ang, b_pos))
def _confined_angle_pi(a):
"""Inclusive -pi, exclusive +pi"""
while a<-_math.pi:
a+=2*_math.pi
while a>=_math.pi:
a-=2*_math.pi
return a
#####################################
#
# helper functions to plot the state
# or caclulate collisions
#
def chassis_outline(p_front_right,car_axis_dir,wheelbase,track,
overhang_front, overhang_rear, car_width):
chassis_lr = (car_width-track)/2.
# positions wrt to front right corner
corners=[[0,0],
[-(overhang_front+overhang_rear+wheelbase),0],
[-(overhang_front+overhang_rear+wheelbase),car_width],
[0,car_width]]
# translate so
# positions wrt to front right wheel
corners = [_add(c,(overhang_front,-chassis_lr)) for c in corners]
# rotate according to car_axis_dir
a = car_axis_dir
rot=lambda pt: [_math.cos(a)*pt[0]+ -_math.sin(a)*pt[1],
_math.sin(a)*pt[0]+ _math.cos(a)*pt[1]]
chassis=[rot(p) for p in corners]
chassis = [_add(c,p_front_right) for c in chassis]
return chassis
def wheel_outlines(p_front_right,car_axis_dir,
center_wheel_dir,
wheelbase,track,
wheel_size = None):
rot=lambda pt, a: [_math.cos(a)*pt[0]+ -_math.sin(a)*pt[1],
_math.sin(a)*pt[0]+ _math.cos(a)*pt[1]]
rotate=lambda points, a: [rot(p,a) for p in points]
add = lambda many, offset: [_add(p, offset) for p in many]
sub = lambda many, offset: [_sub(p, offset) for p in many]
right_wheel_dir, left_wheel_dir = wheel_angles_from_center_angle(wheelbase, track, center_wheel_dir)
# center position of wheels wrt to front right wheel
wheel_centers=((0,0),(-wheelbase,0),(-wheelbase,track),(0,track))
wheel_pos = rotate(wheel_centers, car_axis_dir)
wheel_pos = add(wheel_pos, p_front_right)
# rotated wheels at their respective positions
wheels = []
# calculate outlines of wheels
# wheel, origin at center
if wheel_size is None:
wheel_size = '195/55R16'
width=195/1000.
diameter=2*width*.55+16*.0254
wh_width,wh_rubber_scale,wh_base = float(wheel_size.split('/')[0]),float(wheel_size.split('/')[1].split('R')[0])*.01,float(wheel_size.split('R')[1])
width=wh_width/1000.
diameter=2*width*wh_rubber_scale+wh_base*.0254
wheel=sub(((0,0),(0,width),(diameter,width),(diameter,0)), (diameter/2.,width/2.))
# rear wheels
corners = rotate(wheel, car_axis_dir)
wheels.append(add(corners,wheel_pos[1]))
wheels.append(add(corners,wheel_pos[2]))
# front right wheel
corners = rotate(wheel, car_axis_dir+right_wheel_dir)
wheels.append(add(corners,wheel_pos[0]))
# front left wheel
corners = rotate(wheel, car_axis_dir+left_wheel_dir)
wheels.append(add(corners,wheel_pos[3]))
return wheels
def front_left_wheel_angle(wheelbase, track, right_wheel_dir):
"""Geometric calculation valid for low speeds.
Returns angle of front left tire and curve radii front right and front left."""
# cf. https://en.wikipedia.org/wiki/Ackermann_steering_geometry
if abs(right_wheel_dir)<1e-6:
return right_wheel_dir, (float('inf'),float('inf'))
if True:
self = front_left_wheel_angle
params = wheelbase, track, right_wheel_dir
cache=self.cache=self.__dict__.get('cache',{})
if params in cache:
return cache[params]
inner=right_wheel_dir if right_wheel_dir >0 else _math.pi-right_wheel_dir
curve_radius_front_right=wheelbase/_math.sin(right_wheel_dir)
curve_radius_front_left=(track**2+curve_radius_front_right**2
-2*track*abs(curve_radius_front_right)*
_math.cos(inner))**.5
curve_radius_front_left=abs(curve_radius_front_left)*(1 if right_wheel_dir>=0 else -1)
left_wheel_dir=_math.acos((track-abs(curve_radius_front_right)*_math.cos(inner))/
abs(curve_radius_front_left))
left_wheel_dir=_math.pi-left_wheel_dir if right_wheel_dir >0 else -left_wheel_dir
result = left_wheel_dir, (curve_radius_front_right, curve_radius_front_left)
cache[params]=result
return result
def wheel_angles_from_center_angle(wheelbase, track, center_wheel_angle):
if True:
self = wheel_angles_from_center_angle
params = wheelbase, track, center_wheel_angle # this assignment takes about 0.15 us (about the same as the overhead of a function call)
cache=self.cache=self.__dict__.get('cache',{})
if params in cache:
return cache[params]
if abs(center_wheel_angle)<1e-6:
result = 0., 0.
else:
R_right, R_center, R_left = absolute_curve_radii_from_center_angle(wheelbase, track, center_wheel_angle)
sig = 1. if center_wheel_angle>0 else -1.
result = sig*_math.asin(wheelbase/R_right), sig*_math.asin(wheelbase/R_left)
cache[params]=result
return result
def wheel_angles_from_track_angle(wheelbase, track, center_wheel_angle):
| |
#coding:utf-8
import sys
#sys.path.append("../")
sys.path.insert(0,'..')
import numpy as np
import argparse
import os
import cPickle as pickle
import cv2
from train_models.mtcnn_model import P_Net,R_Net
from train_models.MTCNN_config import config
from loader import TestLoader
from Detection.detector import Detector
from Detection.fcn_detector import FcnDetector
from Detection.MtcnnDetector import MtcnnDetector
from utils import convert_to_square,IoU,convert_to_rect,IoU_self
from data_utils import get_path,read_annotation
import pdb
#net : 24(RNet)/48(ONet)
#data: dict()
'''
def args():
parser = argparse.ArgumentParser(description="gen_hard_example for Rnet Onet")
parser.add_argument('--net',type=str,required=True,default='RNet'
help='should be RNet of ONet')
return parser.parse_args()
'''
def save_hard_example(gen_anno_file, gen_imgs_dir,data,save_path,test_mode):
# load ground truth from annotation file
# format of each line: image/path [x1,y1,x2,y2] for each gt_box in this image
im_idx_list = data['images']
# print(images[0])
gt_boxes_list = data['bboxes']
num_of_images = len(im_idx_list)
print("processing %d images in total" % num_of_images)
# save files
print("saved hard example dir ",net)
#neg_label_file = "%s/neg_%d.txt" % (net, image_size)
neg_label_file = gen_anno_file[0]
neg_file = open(neg_label_file, 'w')
pos_label_file = gen_anno_file[1]
pos_file = open(pos_label_file, 'w')
part_label_file = gen_anno_file[2]
part_file = open(part_label_file, 'w')
#read detect result
det_boxes = pickle.load(open(os.path.join(save_path, 'detections.pkl'), 'rb'))
print("det boxes and image num: ",len(det_boxes), num_of_images)
#print len(det_boxes)
#print num_of_images
assert len(det_boxes) == num_of_images, "incorrect detections or ground truths"
# index of neg, pos and part face, used as their image names
n_idx = 0
p_idx = 0
d_idx = 0
image_done = 0
cnt_pass =0
#im_idx_list image index(list)
#det_boxes detect result(list)
#gt_boxes_list gt(list)
neg_dir,pos_dir,part_dir = gen_imgs_dir
if test_mode == "PNet" and not config.train_face:
X1_thresh = 0.45
Y1_thresh = -0.2
elif test_mode == "RNet" and not config.train_face:
Y1_thresh = -0.2
X1_thresh = 0.45
print("generate Onet")
else:
Y1_thresh = 1
X1_thresh = 1
for im_idx, dets, gts in zip(im_idx_list, det_boxes, gt_boxes_list):
gts = np.array(gts, dtype=np.float32).reshape(-1, 4)
if image_done % 100 == 0:
print("%d images done" % image_done)
image_done += 1
if dets.shape[0] == 0:
continue
img = cv2.imread(im_idx)
#change to square
dets = convert_to_square(dets)
dets[:, 0:4] = np.round(dets[:, 0:4])
neg_num = 0
for box in dets:
x_left, y_top, x_right, y_bottom, _ = box.astype(int)
width = x_right - x_left + 1
height = y_bottom - y_top + 1
# ignore box that is too small or beyond image border
#if width < 20 or x_left < 0 or y_top < 0 or x_right > img.shape[1] - 1 or y_bottom > img.shape[0] - 1:
if x_left < 0 or y_top < 0 or x_right > img.shape[1] - 1 or y_bottom > img.shape[0] - 1 or width <=10 :
#print("pass")
cnt_pass+=1
continue
# compute intersection over union(IoU) between current box and all gt boxes
Iou_ = IoU(box, gts)
Iou_gt = IoU_self(box,gts)
cropped_im = img[y_top:y_bottom + 1, x_left:x_right + 1, :]
resized_im = cv2.resize(cropped_im, (image_size, image_size),
interpolation=cv2.INTER_LINEAR)
# save negative images and write label
# Iou with all gts must below 0.3
union_max = np.max(Iou_)
gt_max = np.max(Iou_gt)
if union_max <=0.3 and neg_num < 60:
#save the examples
idx = np.argmax(union_max)
assigned_gt = gts[idx]
x1, y1, x2, y2 = assigned_gt
offset_x1 = (x1 - x_left) / float(width)
offset_y1 = (y1 - y_top) / float(height)
offset_x2 = (x2 - x_right) / float(width)
offset_y2 = (y2 - y_bottom) / float(height)
save_file = get_path(neg_dir, "%s.jpg" % n_idx)
# print(save_file)
#neg_file.write(save_file + ' 0\n')
neg_file.write(save_file + ' 0 %.2f %.2f %.2f %.2f\n' % (offset_x1, offset_y1, offset_x2, offset_y2))
cv2.imwrite(save_file, resized_im)
n_idx += 1
'''
if union_max>0:
if np.abs(offset_x1) < 1 :
neg_file.write(save_file + ' 0 %.2f %.2f %.2f %.2f\n' % (offset_x1, offset_y1, offset_x2, offset_y2))
#neg_file.write(' %.2f %.2f %.2f %.2f' % (x1, y1, x2, y2))
#neg_file.write(' %.2f %.2f %.2f %.2f ' % (x_left, y_top, x_right, y_bottom))
#neg_file.write(im_idx +'\n')
cv2.imwrite(save_file, resized_im)
n_idx += 1
else:
neg_file.write(save_file + ' 0 %.2f %.2f %.2f %.2f\n' % (offset_x1, offset_y1, offset_x2, offset_y2))
cv2.imwrite(save_file, resized_im)
n_idx += 1
'''
neg_num += 1
else:
# find gt_box with the highest iou
idx = np.argmax(Iou_)
assigned_gt = gts[idx]
x1, y1, x2, y2 = assigned_gt
# compute bbox reg label
offset_x1 = (x1 - x_left) / float(width)
offset_y1 = (y1 - y_top) / float(height)
offset_x2 = (x2 - x_right) / float(width)
offset_y2 = (y2 - y_bottom) / float(height)
# save positive and part-face images and write labels
if union_max >= 0.6:
#if np.max(Iou) >= 0.65:
#if union_max >= 0.7 and offset_y1>Y1_thresh and np.abs(offset_x1)<= X1_thresh:
save_file = get_path(pos_dir, "%s.jpg" % p_idx)
pos_file.write(save_file + ' 1 %.2f %.2f %.2f %.2f\n' % (
offset_x1, offset_y1, offset_x2, offset_y2))
cv2.imwrite(save_file, resized_im)
p_idx += 1
#elif np.max(Iou) >= 0.3:
elif union_max > 0.3 and union_max <=0.4:
#elif union_max <= 0.3 and union_max >0.1 and offset_y1 <Y1_thresh and np.abs(offset_x1)<= X1_thresh:
save_file = os.path.join(part_dir, "%s.jpg" % d_idx)
part_file.write(save_file + ' -1 %.2f %.2f %.2f %.2f\n' % (
offset_x1, offset_y1, offset_x2, offset_y2))
cv2.imwrite(save_file, resized_im)
d_idx += 1
print("%s images done, pos: %s part: %s neg: %s, pass: %s"%(image_done, p_idx, d_idx, n_idx,cnt_pass))
neg_file.close()
part_file.close()
pos_file.close()
print("neg image num: ",n_idx)
print("pos image num: ",p_idx)
print("pat image num: ",d_idx)
print("pass num : ",cnt_pass)
def rd_anotation(img_saved_dir,filename,data_set_name):
data = dict()
image_path_list = []
boxes_gd = []
with open(anno_file, 'r') as f:
annotations = f.readlines()
for annotation in annotations:
annotation = annotation.strip().split()
im_path = annotation[0]
if data_set_name == "WiderFace":
im_path = im_path +'.jpg'
im_path = os.path.join(img_saved_dir,im_path)
#print("img path ",im_path)
image_path_list.append(im_path)
#boxed change to float type
bbox = map(float, annotation[1:])
#print("box : ",bbox)
#gt
boxes = np.array(bbox, dtype=np.float32).reshape(-1, 4)
boxes_gd.append(boxes)
data['images'] = image_path_list
data['bboxes'] = boxes_gd
return data
def t_net(prefix, epoch,batch_size, img_saved_dir,anno_file,gen_anno_file,gen_imgs_dir,data_set_name,ignore_det=False,test_mode="PNet",thresh=[0.6, 0.6, 0.7], min_face_size=25,\
stride=2):
slide_window=False
detectors = [None, None, None]
print("Test model: ", test_mode)
#PNet-echo
print("epoch num ",epoch[0])
''' #for Pnet test
epoch_num = epoch[0]
epoch_c = np.arange(2,epoch_num,2)
prefix_c = []
prefix = prefix[0]
[prefix_c.append(prefix) for i in range(len(epoch_c))]
'''
print("prefixs is ",prefix)
model_path = ['%s-%s' % (x, y) for x, y in zip(prefix, epoch)]
#print("after zip model_path is ",model_path)
#model_path[0] = prefix + '-'+str(epoch_num) #for Pnet test
print("model_path 0 is ",model_path[0])
# load pnet model
if slide_window:
PNet = Detector(P_Net, 12, batch_size[0], model_path[0])
else:
PNet = FcnDetector(P_Net, model_path[0])
detectors[0] = PNet
# load rnet model
if test_mode in ["RNet", "ONet"]:
print("==================================", test_mode)
RNet = Detector(R_Net, 24, batch_size[1], model_path[1])
detectors[1] = RNet
# load onet model
if test_mode == "ONet":
print("==================================", test_mode)
ONet = Detector(O_Net, 48, batch_size[2], model_path[2])
detectors[2] = ONet
#read annatation(type:dict)
#img_box_dic = read_annotation(img_saved_dir,anno_file)
img_box_dic = rd_anotation(img_saved_dir,anno_file,data_set_name)
print("gen_hardexample threshold ",thresh)
if not ignore_det:
mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=min_face_size,
stride=stride, threshold=thresh)
print("==================================")
# 注意是在“test”模式下
# imdb = IMDB("wider", image_set, root_path, dataset_path, 'test')
# gt_imdb = imdb.gt_imdb()
test_data = TestLoader(img_box_dic['images'])
#list
if not ignore_det:
detections,_ = mtcnn_detector.detect_face(test_data)
if test_mode == "PNet":
save_net = "RNet"
save_path = '24/RNet'
elif test_mode == "RNet":
save_net = "ONet"
save_path = "48/ONet"
#save detect result
#save_path = os.path.join(data_dir, save_net)
print ("save path is",save_path)
if not os.path.exists(save_path):
os.mkdir(save_path)
save_file = os.path.join(save_path, "detections.pkl")
if not ignore_det:
with open(save_file, 'wb') as f:
pickle.dump(detections, f,1)
f.close()
print("%s Test is Over and begin OHEM" % image_size)
save_hard_example(gen_anno_file, gen_imgs_dir,img_box_dic, save_path,test_mode)
def parse_args():
parser = argparse.ArgumentParser(description='Test mtcnn',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--test_mode', dest='test_mode', help='test net type, can be PNet, RNet or ONet',
default='PNet', type=str)
parser.add_argument('--prefix', dest='prefix', help='prefix of model name', nargs="+",
default=["../data/MTCNN_model/PNet_landmark/v1_trained/PNet", "../data/MTCNN_model/RNet_landmark/RNet", "../data/MTCNN_model/ONet_landmark/ONet"],
type=str)
parser.add_argument('--epoch', dest='epoch', help='epoch number of model to load', nargs="+",
default=[32, 2900, 22], type=int)
parser.add_argument('--batch_size', dest='batch_size', help='list of batch size used in prediction', nargs="+",
default=[1, 2048, 16], type=int)
parser.add_argument('--thresh', dest='thresh', help='list of thresh for pnet, rnet, onet', nargs="+",
default=[0.4, 0.6, 0.7], type=float)
parser.add_argument('--min_face', dest='min_face', help='minimum face size for detection',
default=24, type=int)
parser.add_argument('--stride', dest='stride', help='stride of sliding window',
default=2, type=int)
parser.add_argument('--anno_file',type=str,default="./wider_face_train.txt",\
help='annotation saved file path')
parser.add_argument('--img_saved_dir',type=str,default="./WIDER_train/images/",\
help='images saved path')
parser.add_argument('--pos_txt',type=str,default="pos24.txt",\
help='positive images annotion file ')
parser.add_argument('--neg_txt',type=str,default="neg24.txt",\
help='negtive images annotion file ')
parser.add_argument('--part_txt',type=str,default="part24.txt",\
help='part images annotion file ')
parser.add_argument('--train_data_set',type=str,default="WiderFace",\
help='the model will be trained in the dataset ')
parser.add_argument('--ignore_det',type=bool,default=False,\
help='only run save_hard_example ')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
#net = 'RNet'
img_saved_dir = args.img_saved_dir
anno_file = args.anno_file
neg_label_file = args.train_data_set+"_"+args.neg_txt
pos_label_file = args.train_data_set+"_"+args.pos_txt
part_label_file = args.train_data_set+"_"+args.part_txt
prefix = args.prefix
epoch_list = args.epoch
batch_size = args.batch_size
stride = args.stride
test_mode = args.test_mode
score_thresh = args.thresh
min_face_size = args.min_face
ignore_det = args.ignore_det
if args.test_mode == "ONet":
image_size = 48
if args.test_mode =="PNet":
net = "RNet"
elif args.test_mode == "RNet":
net = "ONet"
if net | |
# -*- coding: utf-8 -*-
# pylint: skip-file
"""
This module provides methods for interacting with Hashicorp Vault via the HVAC
library.
"""
from __future__ import absolute_import
from builtins import super
import logging
import hashlib
import json
from collections import OrderedDict
log = logging.getLogger(__name__)
DEPS_INSTALLED = False
try:
import hvac
DEPS_INSTALLED = True
except ImportError:
pass
class InsufficientParameters(Exception):
pass
def __virtual__():
if DEPS_INSTALLED:
return "vault"
else:
return False, "Missing required dependency, `hvac`"
def get_policies_manager():
"""Retrieve an object containing helper methods for the policy manager
Returns:
[VaultPolicyManager] -- Policy Manager
"""
return VaultPolicyManager()
def get_secret_engines_manager():
"""Retrieve an object containing helper methods for the secrets engines manager
Returns:
[VaultSecretsManager] -- Secrets Engines Manager
"""
return VaultSecretsManager()
def get_auth_methods_manager():
"""Retrieve an object containing helper methods for the auth methods manager
Returns:
[VaultAuthManager] -- Auth Methods Manager
"""
return VaultAuthManager()
def get_audit_device_manager():
"""Retrieve an object containing helper methods for the audit device manager
Returns:
[VaultAuditManager] -- Audit Device Manager
"""
return VaultAuditManager()
def get_funcs_strategy(client, engine_type, engine, function):
"""Retrieve a function to setup Auth/Secrets Vault engines
Arguments:
client {hvac} -- hvac client
engine_type {string} - The type of engine (e.g., auth, secrets, audit)
engine {string} - The mechanism within the engine to configure
function {string} - The method within the engine needed
Returns:
[function] - configuration function for Auth/Secrets Vault engines
"""
funcs = {
"auth": {
"aws": {
"list": client.auth.aws.list_roles,
"create": client.auth.aws.create_role,
"delete": client.auth.aws.delete_role,
"configure": client.auth.aws.configure,
"key": "roles",
},
"ldap": {
"list": client.auth.ldap.list_groups,
"create": client.auth.ldap.create_or_update_group,
"delete": client.auth.ldap.delete_group,
"configure": client.auth.ldap.configure,
"key": "group_policy_map",
},
},
"secrets": {
"kv": {
"list": client.secrets.kv.list_secrets,
"configure": client.secrets.kv.configure,
"create": client.secrets.kv.create_or_update_secret,
},
"ad": {
"list": client.secrets.activedirectory.list_roles,
"create": client.secrets.activedirectory.create_or_update_role,
"delete": client.secrets.activedirectory.delete_role,
"configure": client.secrets.activedirectory.configure,
},
"database": {"configure": client.secrets.database.configure},
},
}
try:
return funcs[engine_type][engine][function]
except KeyError:
raise NotImplementedError("Functionality has not yet been implemented")
class VaultConfigBase(object):
type = None
path = None
description = None
config = None
def __init__(self, type, path, description, config):
"""Initialize classs
Arguments:
type {string} -- The type of the config
path {string} -- The path in which to enable the config
description {[type]} -- A human-friendly description
"""
config = config or {}
self.type = type
# Vault CLI treats a double forward slash ('//') as a single forward slash for a given path.
# To avoid issues with the requests module's redirection logic, we perform the same translation here.
self.path = str(path).replace("//", "/").strip("/")
self.description = description if description else ""
self.config = {k: v for k, v in config.items() if v != ""}
def get_unique_id(self):
"""Return a unique hash of the config by only using the type and path
Returns:
string -- unique hash of the type and path
"""
return self.hash_value(self.type + self.path)
def get_tuning_hash(self):
"""Return a unique ID per tuning configuration
Returns:
string -- unique hash of the configuration
"""
return self.hash_value(self.description + str(self.config))
def hash_value(self, value):
return hashlib.sha256(value.encode()).hexdigest()
def __eq__(self, other):
return self.get_unique_id() == other.get_unique_id()
class VaultAuthMethod(VaultConfigBase):
auth_config = None
extra_config = None
def __init__(
self, type, path, description, config=None, auth_config=None, extra_config=None
):
super().__init__(type, path, description, config)
self.auth_config = auth_config or {}
self.extra_config = extra_config or {}
class VaultSecretEngine(VaultConfigBase):
secret_config = None
extra_config = None
def __init__(
self,
type,
path,
description,
config=None,
secret_config=None,
extra_config=None,
):
super().__init__(type, path, description, config)
self.secret_config = secret_config or {}
self.extra_config = extra_config or {}
class VaultAuditDevice(VaultConfigBase):
def __init__(self, type, path, description, config=None):
super().__init__(type, path, description, config)
class VaultPolicyManager:
"""Module for handling Vault Policies"""
def __init__(self):
"""Initialize Vault Policies Manager"""
log.info("Initializing Vault Policies Manager...")
def push_policies(self, client, remote_policies, local_policies):
"""Push policies from local config to remote vault server
Arguments:
client {hvac} -- hvac client
remote_policies {dict} -- policies from the remote vault server
local_policies {dict} -- policies from local config
ret {dict} -- salt state result
"""
log.info("Pushing policies from local config folder to vault...")
for name, policy in local_policies.items():
client.sys.create_or_update_policy(name, policy)
log.debug(
'Policy ["%s"] has been %s.',
name,
"updated" if name in remote_policies else "created",
)
log.info("Finished pushing policies local config folder to vault.")
def cleanup_policies(self, client, remote_policies, local_policies):
"""Removes policies that are not present in the local config
Arguments:
client {hvac} -- hvac client
remote_policies {dict} -- policies current on the remote vault server
local_policies {dict} --policies from local config
ret {dict} -- salt state result
"""
log.info("Cleaning up vault policies...")
for policy in remote_policies:
if policy not in local_policies:
log.debug(
'Policy ["%s"] is not found in local config. '
"Removing it from vault...",
policy,
)
client.sys.delete_policy(policy)
log.debug('Policy ["%s"] is removed.', policy)
log.info("Finished cleaning up vault policies.")
class VaultAuthManager:
"""Module for managing Vault Authentication Methods"""
def __init__(self):
"""Initialize Authentication Manager"""
log.info("Initializing Vault Auth Manager...")
def populate_remote_auth_methods(self, methods):
"""Populating authentication methods from remote vault server
Arguments:
methods {dict} -- authentication methods configuration from remote vault server
Returns:
dict -- auth methods
"""
log.info("Populating auth methods from Vault...")
auth_methods = []
for auth_method in methods:
auth_methods.append(
VaultAuthMethod(
type=methods[auth_method]["type"],
path=(
methods[auth_method]["path"]
if "path" in methods[auth_method]
else auth_method
),
description=methods[auth_method]["description"],
config=OrderedDict(sorted(methods[auth_method]["config"].items())),
)
)
log.info("Finished populating auth methods from Vault.")
return auth_methods
def populate_local_auth_methods(self, configs):
"""Get auth methods from local config
Arguments:
configs {dict} -- auth methods information
Returns:
dict -- auth methods
"""
log.info("Populating local auth methods...")
auth_methods = []
for auth_method in configs:
config = OrderedDict(sorted(auth_method.get("config", {}).items()))
auth_config = OrderedDict(
sorted(auth_method.get("auth_config", {}).items())
)
extra_config = OrderedDict(
sorted(auth_method.get("extra_config", {}).items())
)
auth_methods.append(
VaultAuthMethod(
type=auth_method["type"],
path=auth_method["path"],
description=auth_method["description"],
config=config,
auth_config=auth_config,
extra_config=extra_config,
)
)
log.info("Finished populating local auth methods.")
return auth_methods
def configure_auth_methods(self, client, remote_methods, local_methods):
"""Compare and configure local authentication methods with remote vault server
Arguments:
client {hvac} -- hvac client
remote_methods {dict} -- auth methods from remote vault server
local_methods {dict} -- auth methods from local config
"""
log.info("Processing and configuring auth methods...")
for auth_method in local_methods:
log.debug('Checking if auth method "%s" is enabled...', auth_method.path)
if auth_method in remote_methods:
log.debug(
'Auth method "%s" is already enabled. ' "Tuning...",
auth_method.path,
)
client.sys.tune_auth_method(
path=auth_method.path,
description=auth_method.description,
default_lease_ttl=auth_method.config["default_lease_ttl"],
max_lease_ttl=auth_method.config["max_lease_ttl"],
)
log.debug('Auth method "%s" is tuned.', auth_method.type)
else:
log.debug(
'Auth method "%s" is not enabled. ' "Enabling now...",
auth_method.path,
)
client.sys.enable_auth_method(
method_type=auth_method.type,
path=auth_method.path,
description=auth_method.description,
config=auth_method.config,
)
log.debug('Auth method "%s" is enabled.', auth_method.type)
remote_extra_config = []
# Provision config for specific auth method
if auth_method.auth_config:
log.debug("Provisioning configuration for %s...", auth_method.type)
get_funcs_strategy(client, "auth", auth_method.type, "configure")(
**auth_method.auth_config
)
log.debug("Configuration for %s is provisioned.", auth_method.type)
else:
log.debug(
'Auth method "%s" does not contain any %s configurations '
"from pillar.",
auth_method.type,
auth_method.type,
)
# Retrieve extra configuration from vault
try:
log.debug(
'Retrieving extra configuration from Vault for auth method "%s"...',
auth_method.type,
)
# get the list function for the specified auth module
remote_extra_config = get_funcs_strategy(
client, "auth", auth_method.type, "list"
)(auth_method.path)
if auth_method.type in ["ldap"]:
remote_extra_config = remote_extra_config["data"]["keys"]
else:
remote_extra_config = remote_extra_config["keys"]
log.debug(
'Provisioned extra configuration for auth method "%s": %s',
auth_method.path,
",".join(remote_extra_config),
)
except hvac.exceptions.InvalidPath:
pass
except NotImplementedError:
log.debug(
'No methods defined to retrieve extra configuration for auth method "%s"...',
auth_method.type,
)
# Provision extra config for specific auth method
if auth_method.extra_config:
log.debug(
"Provisioning extra configurations for " 'auth method "%s"...',
auth_method.type,
)
# Update groups/roles mapping
for item, config in auth_method.extra_config[
get_funcs_strategy(client, "auth", auth_method.type, "key")
].items():
log.debug('"%s" -> Config %s', str(item), config)
# adding moint_point to all config
config.update({"mount_point": auth_method.path})
# call api to update the config
get_funcs_strategy(client, "auth", auth_method.type, "create")(
**config
)
log.debug(
"Finish provisioning extra configurations for "
'auth method "%s"...',
auth_method.type,
)
else:
log.debug(
'Auth method "%s" does not contain '
"any extra configurations from pillar.",
auth_method.type,
)
# Clean up groups/role mapping
if remote_extra_config:
log.debug(
'Cleaning up auth method "%s" extra configuration ' "from Vault...",
auth_method.type,
)
for item in remote_extra_config:
if (
item
in auth_method.extra_config.get(
get_funcs_strategy(client, "auth", auth_method.type, "key"),
{},
).keys()
):
log.debug(
'"%s" exists in local configuration, '
"no cleanup necessary",
item,
)
else:
log.debug(
'"%s" does not exist in configuration, ' "deleting...", item
)
get_funcs_strategy(client, "auth", auth_method.type, "delete")(
item, auth_method.path
)
log.debug('"%s" is deleted.', item)
log.debug(
'Finished cleaning up auth method "%s" extra configuration.',
auth_method.type,
)
log.info("Finished processing and configuring auth methods...")
def cleanup_auth_methods(self, client, remote_methods, local_methods):
"""Disabling any auth methods not present in the local config
Arguments:
client {hvac} -- hvac client
remote_methods {dict} -- auth | |
"""Creates various Sklearn models"""
import math
import pickle
import keras
from keras import backend as K
from keras.callbacks import EarlyStopping
from keras.callbacks import History
from keras.callbacks import ReduceLROnPlateau
from keras.engine.topology import Layer
from keras.layers import AveragePooling2D
from keras.layers import BatchNormalization
from keras.layers import Concatenate
from keras.layers import Conv2D
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers import Input
from keras.layers import Lambda
from keras.layers import MaxPooling2D
from keras.layers import multiply
from keras.models import load_model
from keras.models import Model
from keras.models import model_from_json
import matplotlib.pyplot as plt
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils import shuffle
import tensorflow as tf
K.set_image_dim_ordering('tf')
K.set_image_data_format('channels_last')
class SklearnModel:
"""Wrapper for sklearn classifiers.
This creates a wrapper that can be instantiated to any sklearn
classifer that takes input data with shape (samples, features)
and label data with shape (samples,). Using it's train method
can generate a trained model, and load the same trained model
using the load method at any point on the script.
"""
def __init__(self, Model, datagen=None, nb_augment=None, seed=0, **kwargs):
"""Attributes:
Model: Sklearn classifier.
datagen: The output of the data_gen function to apply random
augmentations to our data in real time
(a keras.preprocessing.image.ImageDataGenerator object)
nb_augment: Int. Factor by which the number of samples is
increased by random augmentations.
seed: Seed value to consistently initialize the random number
generator.
**kwargs: Keyword arguments passed to sklearn.
"""
self.Model = Model
self.datagen = datagen
self.nb_augment = nb_augment
self.seed = seed
self.kwargs = kwargs
self.model = None
self.path = None
self.name = Model.__name__
def fit(self, train_x, train_y):
"""Trains sklearn model.
# Arguments:
train_x: Array of images with shape [samples, dim, dim, 1].
train_y: Array of labels with shape [samples ,].
# Returns:
self
"""
# Shuffling
train_x, train_y = shuffle(train_x, train_y, random_state=self.seed)
# Augmenting data
if self.datagen is not None:
train_x, train_y = self.augment(train_x, train_y, batch_size=
train_x.shape[0], nb_augment=self.nb_augment)
# Shuffling
train_x, train_y = shuffle(train_x, train_y, random_state=self.seed)
# Flattening images
train_x = np.reshape(train_x, (np.shape(train_x)[0], -1))
try:
model = self.Model(random_state=self.seed, class_weight='balanced',
**self.kwargs)
except TypeError:
try:
model = self.Model(class_weight='balanced', **self.kwargs)
except TypeError:
model = self.Model(**self.kwargs)
model = model.fit(train_x, train_y)
self.model = model
return self
def predict_proba(self, test_x):
""" Probability estimates for samples.
# Arguments:
test_x: Array of images with shape [samples, dim, dim, 1].
# Returns:
predictions: Probability estimates for test_x.
"""
# Flattening images
test_x = np.reshape(test_x, (np.shape(test_x)[0], -1))
predictions = self.model.predict_proba(test_x)
return predictions
def predict(self, test_x):
"""Predicting class labels for samples in test_x.
# Arguments:
test_x: Array of images with shape [samples, dim, dim, 1].
# Returns:
predictions: Class predictions for test_x.
"""
predictions = self.predict_proba(test_x)
predictions = np.around(predictions)
return predictions
def save(self, path=None):
"""Saves model as pickle file.
# Arguments:
path: File path to save the model. Must be a .pk file.
# Returns:
self
"""
self.path = path
with open(path, 'wb') as f:
pickle.dump(self.model, f)
return self
def load(self, path=None):
"""Loads trained Sklearn Model from disk.
# Arguments:
path: File path load saved the model from..
# Returns:
self
"""
if path is None:
if self.path is not None:
path = self.path
else:
print ("no model can be found")
with open(path, 'rb') as f:
model = pickle.load(f)
self.model = model
return self
def augment(self, images, labels, datagen=None, batch_size=32, nb_augment=None):
"""Augments data for Sklearn models.
Using base set of images, a random combination is augmentations within
a defined range is applied to generate a new batch of data.
# Arguments
images: Array of images with shape (samples, dim, dim, 1)
labels: Array of labels
datagen: The data generator outputed by the data_gen function.
batch_size: Number of sample per batch.
nb_augment: factor that the data is increased by via augmentation
seed: Seed value to consistently initialize the random number
generator.
# Returns
Array of augmented images and their corresponding labels.
"""
if nb_augment is None:
nb_augment = self.nb_augment
if datagen is None:
datagen = self.datagen
# Number of images
samples = np.shape(images)[0]
# the .flow() command below generates batches of randomly transformed images
gen = datagen.flow(images, labels, batch_size=batch_size, shuffle=True, seed=self.seed)
# Generate empty data arrays
pro_images = np.zeros((images.shape[0] * nb_augment, images.shape[1],
images.shape[2], 1))
pro_labels = np.zeros((labels.shape[0] * nb_augment))
for step in range(1, nb_augment+1):
batch = 1
b = batch_size
b_start = samples * (step-1)
for X_batch, Y_batch in gen:
if batch < (samples / b):
cut_start = b_start + b * (batch-1)
cut_stop = b_start + batch * b
pro_images[cut_start:cut_stop, :, :, :] = X_batch
pro_labels[cut_start:cut_stop] = Y_batch
elif batch == samples // b:
break
else:
cut_start = b_start + b * (batch-1)
cut_stop = b_start + b * (batch-1) + X_batch.shape[0] % b
pro_images[cut_start:cut_stop, :, :, :] = X_batch
pro_labels[cut_start:cut_stop] = Y_batch
break
batch += 1
return pro_images, pro_labels
class HOGNet:
"""Wrapper for our hognet keras model.
This creates a class that acts as a wrapper around our custom
keras model, aka hognet. The train method trains the model on our
data generator to our specifed training paramerts.
Using the load method, we can load the fully trained model from the
disk at any point in the script. This method can be useful when using
notebooks, as training time can be significant.
"""
def __init__(self, datagen=None, batch_size=32, steps_per_epoch=50,
max_epoch=100, patience=5, gap=2, seed=None):
"""Attributes:
name: name of the file at which the model is saved.
No file extension.
datagen: The output of the data_gen function to apply random
augmentations to our data in real time
(a keras.preprocessing.image.ImageDataGenerator object)
batch_size: number of images per batch (i.e number of images
generated per batch)
steps_per_epoch: number of batchs per step (i.e number of
batches generated by datagen per step)
max_epoch: maximum number of epochs the model for. The model
should stop training automatically when the loss stops
decreasing.
patience: number of epochs with no improvement after which
training will be stopped.
gap: Number of layers that have thier weights unfrozen per
training cycle.
seed: Seed value to consistently initialize the random number
generator.
"""
self.datagen = datagen
self.batch_size = batch_size
self.steps_per_epoch = steps_per_epoch
self.max_epoch = max_epoch
self.patience = patience
self.gap = gap
self.seed = seed
self.model = None
self.history = None
self.class_weight = None
self.prewitt_x = None
self.prewitt_y = None
self.cent = None
self.name = "HOGNet"
# Setting random number generator seeds for numpy and tensorflow
np.random.seed(self.seed)
tf.set_random_seed(self.seed)
# Model HyperParameters
# The following values are base on both past research and much testing
bins = 8 # number of bins in histogram
cell_dim = 8 # height and width of the cells
block_dim = 2 # if changed, must add more block layers. Don't attempt.
bs = bin_stride_length = 1
# Number of cells along each dim
cell_nb = 256 // cell_dim
assert not 256 % cell_dim
# Defining Values
w = 2*np.pi/bins # width of each bin
centers = np.arange(-np.pi, np.pi, w) + 0.5 * w # centers of each bin
# Weights for the x and y convolutions to calculate image gradients
prewitt_x = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
prewitt_y = np.array([[-1, -1, -1], [0, 0, 0], [1, 1, 1]])
# Reshaping Prewitt opperators to required shape
self.prewitt_x = prewitt_x.reshape((1, 3, 3, 1, 1)).astype('float64')
self.prewitt_y = prewitt_y.reshape((1, 3, 3, 1, 1)).astype('float64')
# Adding tiny gaussian noise
self.prewitt_x += 0.01 * np.random.randn(1, 3, 3, 1, 1)
self.prewitt_y += 0.01 * np.random.randn(1, 3, 3, 1, 1)
# Generating weights for histogram construction
self.cent = np.vstack((np.sin(centers), np.cos(centers)))
self.cent = self.cent.reshape((1, 1, 1, 2, bins))
# Generating Filters for the block Operations
def create_block_filters(block_dim):
filters = np.zeros((block_dim ** 2, block_dim, block_dim))
count = 0
for i in range(block_dim):
for j in range(block_dim):
filters[count, i, j] = 1
count += 1
return filters | |
self.toons.append(avId)
toon = simbase.air.doId2do.get(avId)
if toon:
if hasattr(self, 'doId'):
toon.b_setBattleId(self.doId)
else:
toon.b_setBattleId(-1)
messageToonAdded = 'Battle adding toon %s' % avId
messenger.send(messageToonAdded, [avId])
if self.fsm != None and self.fsm.getCurrentState().getName() == 'PlayMovie':
self.responses[avId] = 1
else:
self.responses[avId] = 0
self.adjustingResponses[avId] = 0
if avId not in self.toonExp:
p = []
for t in Tracks:
p.append(toon.experience.getExp(t))
self.toonExp[avId] = p
if avId not in self.toonOrigMerits:
self.toonOrigMerits[avId] = toon.cogMerits[:]
if avId not in self.toonMerits:
self.toonMerits[avId] = [0,
0,
0,
0,
0]
if avId not in self.toonOrigQuests:
flattenedQuests = []
for quest in toon.quests:
flattenedQuests.extend(quest)
self.toonOrigQuests[avId] = flattenedQuests
if avId not in self.toonItems:
self.toonItems[avId] = ([], [])
return 1
def __joinToon(self, avId, pos):
self.joiningToons.append(avId)
toPendingTime = MAX_JOIN_T + SERVER_BUFFER_TIME
taskName = self.taskName('to-pending-av-%d' % avId)
self.__addJoinResponse(avId, taskName, toon=1)
taskMgr.doMethodLater(toPendingTime, self.__serverJoinDone, taskName, extraArgs=(avId, taskName))
self.taskNames.append(taskName)
def __updateEncounteredCogs(self):
for toon in self.activeToons:
if toon in self.newToons:
for suit in self.activeSuits:
if hasattr(suit, 'dna'):
self.suitsEncountered.append({'type': suit.dna.name,
'activeToons': self.activeToons[:]})
else:
self.notify.warning('Suit has no DNA in zone %s: toons involved = %s' % (self.zoneId, self.activeToons))
return
self.newToons.remove(toon)
for suit in self.activeSuits:
if suit in self.newSuits:
if hasattr(suit, 'dna'):
self.suitsEncountered.append({'type': suit.dna.name,
'activeToons': self.activeToons[:]})
else:
self.notify.warning('Suit has no DNA in zone %s: toons involved = %s' % (self.zoneId, self.activeToons))
return
self.newSuits.remove(suit)
def __makeToonRun(self, toonId, updateAttacks):
self.activeToons.remove(toonId)
self.toonGone = 1
self.runningToons.append(toonId)
taskName = self.taskName('running-toon-%d' % toonId)
taskMgr.doMethodLater(TOON_RUN_T, self.__serverRunDone, taskName, extraArgs=(toonId, updateAttacks, taskName))
self.taskNames.append(taskName)
def __serverRunDone(self, toonId, updateAttacks, taskName):
self.notify.debug('run for toon: %d timed out on server' % toonId)
self.__removeTaskName(taskName)
self.__removeToon(toonId)
self.d_setMembers()
if len(self.toons) == 0:
self.notify.debug('last toon is gone - battle is finished')
self.b_setState('Resume')
else:
if updateAttacks == 1:
self.d_setChosenToonAttacks()
self.needAdjust = 1
self.__requestAdjust()
return Task.done
def __requestAdjust(self):
if not self.fsm:
return
cstate = self.fsm.getCurrentState().getName()
if cstate == 'WaitForInput' or cstate == 'WaitForJoin':
if self.adjustFsm.getCurrentState().getName() == 'NotAdjusting':
if self.needAdjust == 1:
self.d_adjust()
self.adjustingSuits = []
for s in self.pendingSuits:
self.adjustingSuits.append(s)
self.adjustingToons = []
for t in self.pendingToons:
self.adjustingToons.append(t)
self.adjustFsm.request('Adjusting')
else:
self.notify.debug('requestAdjust() - dont need to')
else:
self.notify.debug('requestAdjust() - already adjusting')
else:
self.notify.debug('requestAdjust() - in state: %s' % cstate)
def __handleUnexpectedExit(self, avId):
#TODO: fixme
#disconnectCode = self.air.getAvatarDisconnectReason(avId)
disconnectCode = "placeHolder dc code, need self.air.getAvatarDisconnectReason(avId)"
self.notify.warning('toon: %d exited unexpectedly, reason %s' % (avId, disconnectCode))
#userAborted = disconnectCode == ToontownGlobals.DisconnectCloseWindow
#TODO: fixme
userAborted = False
self.__handleSuddenExit(avId, userAborted)
def __handleSuddenExit(self, avId, userAborted):
self.__removeToon(avId, userAborted=userAborted)
if self.fsm.getCurrentState().getName() == 'PlayMovie' or self.fsm.getCurrentState().getName() == 'MakeMovie':
self.exitedToons.append(avId)
self.d_setMembers()
if len(self.toons) == 0:
self.notify.debug('last toon is gone - battle is finished')
self.__removeAllTasks()
self.timer.stop()
self.adjustingTimer.stop()
self.b_setState('Resume')
else:
self.needAdjust = 1
self.__requestAdjust()
def __removeSuit(self, suit):
self.notify.debug('__removeSuit(%d)' % suit.doId)
self.suits.remove(suit)
self.activeSuits.remove(suit)
if self.luredSuits.count(suit) == 1:
self.luredSuits.remove(suit)
self.suitGone = 1
del suit.battleTrap
def __removeToon(self, toonId, userAborted = 0):
self.notify.debug('__removeToon(%d)' % toonId)
if self.toons.count(toonId) == 0:
return
self.battleCalc.toonLeftBattle(toonId)
self.__removeToonTasks(toonId)
self.toons.remove(toonId)
if self.joiningToons.count(toonId) == 1:
self.joiningToons.remove(toonId)
if self.pendingToons.count(toonId) == 1:
self.pendingToons.remove(toonId)
if self.activeToons.count(toonId) == 1:
activeToonIdx = self.activeToons.index(toonId)
self.notify.debug('removing activeToons[%d], updating suitAttacks SUIT_HP_COL to match' % activeToonIdx)
for i in range(len(self.suitAttacks)):
if activeToonIdx < len(self.suitAttacks[i][SUIT_HP_COL]):
del self.suitAttacks[i][SUIT_HP_COL][activeToonIdx]
else:
self.notify.warning("suitAttacks %d doesn't have an HP column for active toon index %d" % (i, activeToonIdx))
self.activeToons.remove(toonId)
if self.runningToons.count(toonId) == 1:
self.runningToons.remove(toonId)
if self.adjustingToons.count(toonId) == 1:
self.notify.warning('removeToon() - toon: %d was adjusting!' % toonId)
self.adjustingToons.remove(toonId)
self.toonGone = 1
if toonId in self.pets:
self.pets[toonId].requestDelete()
del self.pets[toonId]
self.__removeResponse(toonId)
self.__removeAdjustingResponse(toonId)
self.__removeJoinResponses(toonId)
event = simbase.air.getAvatarExitEvent(toonId)
self.avatarExitEvents.remove(event)
self.ignore(event)
event = 'inSafezone-%s' % toonId
self.avatarExitEvents.remove(event)
self.ignore(event)
toon = simbase.air.doId2do.get(toonId)
if toon:
toon.b_setBattleId(0)
messageToonReleased = 'Battle releasing toon %s' % toon.doId
messenger.send(messageToonReleased, [toon.doId])
if not userAborted:
toon = self.getToon(toonId)
if toon != None:
toon.hpOwnedByBattle = 0
toon.d_setHp(toon.hp)
toon.d_setInventory(toon.inventory.makeNetString())
self.air.cogPageManager.toonEncounteredCogs(toon, self.suitsEncountered, self.getTaskZoneId())
elif len(self.suits) > 0 and not self.streetBattle:
self.notify.info('toon %d aborted non-street battle; clearing inventory and hp.' % toonId)
toon = DistributedToonAI.DistributedToonAI(self.air)
toon.doId = toonId
empty = InventoryBase.InventoryBase(toon)
toon.b_setInventory(empty.makeNetString())
toon.b_setHp(0)
db = DatabaseObject.DatabaseObject(self.air, toonId)
db.storeObject(toon, ['setInventory', 'setHp'])
self.notify.info('killing mem leak from temporary DistributedToonAI %d' % toonId)
toon.deleteDummy()
def getToon(self, toonId):
if toonId in self.air.doId2do:
return self.air.doId2do[toonId]
else:
self.notify.warning('getToon() - toon: %d not in repository!' % toonId)
return
def toonRequestRun(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('ignoring response from toon: %d' % toonId)
return
self.notify.debug('toonRequestRun(%d)' % toonId)
if not self.isRunable():
self.notify.warning('toonRequestRun() - not runable')
return
updateAttacks = 0
if self.activeToons.count(toonId) == 0:
self.notify.warning('toon tried to run, but not found in activeToons: %d' % toonId)
return
for toon in self.activeToons:
if toon in self.toonAttacks:
ta = self.toonAttacks[toon]
track = ta[TOON_TRACK_COL]
level = ta[TOON_LVL_COL]
if ta[TOON_TGT_COL] == toonId or track == HEAL and attackAffectsGroup(track, level) and len(self.activeToons) <= 2:
healerId = ta[TOON_ID_COL]
self.notify.debug('resetting toon: %ds attack' % healerId)
self.toonAttacks[toon] = getToonAttack(toon, track=UN_ATTACK)
self.responses[healerId] = 0
updateAttacks = 1
self.__makeToonRun(toonId, updateAttacks)
self.d_setMembers()
self.needAdjust = 1
self.__requestAdjust()
def toonRequestJoin(self, x, y, z):
toonId = self.air.getAvatarIdFromSender()
self.notify.debug('toonRequestJoin(%d)' % toonId)
self.signupToon(toonId, x, y, z)
def toonDied(self):
toonId = self.air.getAvatarIdFromSender()
self.notify.debug('toonDied(%d)' % toonId)
if toonId in self.toons:
toon = self.getToon(toonId)
if toon:
toon.hp = -1
toon.inventory.zeroInv(1)
self.__handleSuddenExit(toonId, 0)
def signupToon(self, toonId, x, y, z):
if self.toons.count(toonId):
return
if self.toonCanJoin():
if self.addToon(toonId):
self.__joinToon(toonId, Point3(x, y, z))
self.d_setMembers()
else:
self.notify.warning('toonRequestJoin() - not joinable')
self.d_denyLocalToonJoin(toonId)
def d_denyLocalToonJoin(self, toonId):
self.notify.debug('network: denyLocalToonJoin(%d)' % toonId)
self.sendUpdateToAvatarId(toonId, 'denyLocalToonJoin', [])
def resetResponses(self):
self.responses = {}
for t in self.toons:
self.responses[t] = 0
self.ignoreResponses = 0
def allToonsResponded(self):
for t in self.toons:
if self.responses[t] == 0:
return 0
self.ignoreResponses = 1
return 1
def __allPendingActiveToonsResponded(self):
for t in self.pendingToons + self.activeToons:
if self.responses[t] == 0:
return 0
self.ignoreResponses = 1
return 1
def __allActiveToonsResponded(self):
for t in self.activeToons:
if self.responses[t] == 0:
return 0
self.ignoreResponses = 1
return 1
def __removeResponse(self, toonId):
del self.responses[toonId]
if self.ignoreResponses == 0 and len(self.toons) > 0:
currStateName = self.fsm.getCurrentState().getName()
if currStateName == 'WaitForInput':
if self.__allActiveToonsResponded():
self.notify.debug('removeResponse() - dont wait for movie')
self.__requestMovie()
elif currStateName == 'PlayMovie':
if self.__allPendingActiveToonsResponded():
self.notify.debug('removeResponse() - surprise movie done')
self.__movieDone()
elif currStateName == 'Reward' or currStateName == 'BuildingReward':
if self.__allActiveToonsResponded():
self.notify.debug('removeResponse() - surprise reward done')
self.handleRewardDone()
def __resetAdjustingResponses(self):
self.adjustingResponses = {}
for t in self.toons:
self.adjustingResponses[t] = 0
self.ignoreAdjustingResponses = 0
def __allAdjustingToonsResponded(self):
for t in self.toons:
if self.adjustingResponses[t] == 0:
return 0
self.ignoreAdjustingResponses = 1
return 1
def __removeAdjustingResponse(self, toonId):
if toonId in self.adjustingResponses:
del self.adjustingResponses[toonId]
if self.ignoreAdjustingResponses == 0 and len(self.toons) > 0:
if self.__allAdjustingToonsResponded():
self.__adjustDone()
def __addJoinResponse(self, avId, taskName, toon = 0):
if toon == 1:
for jr in self.joinResponses.values():
jr[avId] = 0
self.joinResponses[avId] = {}
for t in self.toons:
self.joinResponses[avId][t] = 0
self.joinResponses[avId]['taskName'] = taskName
def __removeJoinResponses(self, avId):
self.__removeJoinResponse(avId)
removedOne = 0
for j in self.joinResponses.values():
if avId in j:
del j[avId]
removedOne = 1
if removedOne == 1:
for t in self.joiningToons:
if self.__allToonsRespondedJoin(t):
self.__makeAvPending(t)
def __removeJoinResponse(self, avId):
if avId in self.joinResponses:
taskMgr.remove(self.joinResponses[avId]['taskName'])
del self.joinResponses[avId]
def __allToonsRespondedJoin(self, avId):
jr = self.joinResponses[avId]
for t in self.toons:
if jr[t] == 0:
return 0
return 1
def __cleanupJoinResponses(self):
for jr in self.joinResponses.values():
taskMgr.remove(jr['taskName'])
del jr
def adjustDone(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreAdjustingResponses == 1:
self.notify.debug('adjustDone() - ignoring toon: %d' % toonId)
return
elif self.adjustFsm.getCurrentState().getName() != 'Adjusting':
self.notify.warning('adjustDone() - in state %s' % self.fsm.getCurrentState().getName())
return
elif self.toons.count(toonId) == 0:
self.notify.warning('adjustDone() - toon: %d not in toon list' % toonId)
return
self.adjustingResponses[toonId] += 1
self.notify.debug('toon: %d done adjusting' % toonId)
if self.__allAdjustingToonsResponded():
self.__adjustDone()
def timeout(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('timeout() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'WaitForInput':
self.notify.warning('timeout() - in state: %s' % self.fsm.getCurrentState().getName())
return
elif self.toons.count(toonId) == 0:
self.notify.warning('timeout() - toon: %d not in toon list' % toonId)
return
self.toonAttacks[toonId] = getToonAttack(toonId)
self.d_setChosenToonAttacks()
self.responses[toonId] += 1
self.notify.debug('toon: %d timed out' % toonId)
if self.__allActiveToonsResponded():
self.__requestMovie(timeout=1)
def movieDone(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('movieDone() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'PlayMovie':
self.notify.warning('movieDone() - in state %s' % self.fsm.getCurrentState().getName())
return
elif self.toons.count(toonId) == 0:
self.notify.warning('movieDone() - toon: %d not in toon list' % toonId)
return
self.responses[toonId] += 1
self.notify.debug('toon: %d done with movie' % toonId)
if self.__allPendingActiveToonsResponded():
self.__movieDone()
else:
self.timer.stop()
self.timer.startCallback(TIMEOUT_PER_USER, self.__serverMovieDone)
def rewardDone(self):
toonId = self.air.getAvatarIdFromSender()
stateName = self.fsm.getCurrentState().getName()
if self.ignoreResponses == 1:
self.notify.debug('rewardDone() - ignoring toon: %d' % toonId)
return
elif | |
status = status and result
return status, output_dict
@mockready
def send_command(self, command, system_name, session_name=None,
start_prompt='.*', end_prompt='.*', int_timeout=60):
"""Sends a command to a system or a subsystem
:Arguments:
1. command(string) = the command to be sent to the system
2. system_name (string) = This can be name of the\
system or a subsystem. In case of subsystem only\
single subsystem is supported. Format for subsystem\
is "system_name[subsystem_name]"
3. session_name(string) = name of the session to the system
4. start_prompt(string) = starting prompt for the command
5. end_prompt(string) = ending prompt for the command
6. int_timeout (integer) = timeout for the command
:Returns:
1. command_status(bool)
"""
wdesc = "Send cli command to the provided system"
Utils.testcase_Utils.pSubStep(wdesc)
Utils.testcase_Utils.pNote(system_name)
Utils.testcase_Utils.pNote(self.datafile)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
session_object = Utils.data_Utils.get_object_from_datarepository(session_id)
if session_object:
if isinstance(session_object, WarriorCli):
command_status, _ = session_object.send_command(start_prompt, end_prompt,
command, int_timeout)
else:
command_status, _ = Utils.cli_Utils.send_command(session_object, start_prompt,
end_prompt, command, int_timeout)
else:
print_warning("%s-%s is not available for use" % (system_name, session_name))
command_status = False
Utils.testcase_Utils.report_substep_status(command_status)
return command_status
@mockready
def send_all_testdata_commands(self, system_name, session_name=None, var_sub=None,
description=None, td_tag=None, vc_tag=None):
"""
Sends all commands from all rows that are marked execute=yes from the testdata
This keyword expects the usage of warrior framework's
recommended testdata xml files, sample testdata file is
available in Warriorspace/Config_files/sample/testdata_sample.xml
:Datafile usage:
Tags or attributes to be used in input datafile for the system or subsystem
If both tag and attribute is provided the attribute will be used.
1. testdata = absolute/relative path of the testdata file.
2. variable_config = absolute/relative path of the variable\
config file.
By default the "testdata" and "variable_config" tag/attribute
will be used to get the details of testdata and variable config file.
If a different tag/attribute name is used, provide the tagnames
as the value to the arguments td_tag and vc_tag.
:Arguments:
1. system_name (string) = This can be name of the\
system or a subsystem. In case of subsystem only\
single subsystem is supported. Format for subsystem\
is "system_name[subsystem_name]"
2. session_name(string) = name of the session to the string
3. var_sub(string) = the pattern [var_sub] in the testdata commands,\
start_prompt, end_prompt, verification search\
will substituted with this value.
4. description(string) = optional description string that overwrites the\
default description(wdesc) of the keyword.
This string will be printed as the keyword description\
in console logs and result files.
5. td_tag = custom tag/attribute name of testdata file.
6. vc_tag = custom tag/attribute name of variable conig file.
:Returns:
1. status(bool)
2. response dictionary(dict): a dictionary having the responses of all\
commands sent to the particular system or subsystem. This dictionary\
is available in warrior frameworks global data_repository and can be\
retrieved using the key= "session_id + _td_response" where\
session_id="system_name+subsystem_name+session_name"
"""
wdesc = "Send commands from rows marked execute=yes in the test data of the system"
desc = wdesc if description is None else description
return self.send_testdata_command_kw(system_name, session_name, desc,
var_sub, td_tag, vc_tag)
@mockready
def send_commands_by_testdata_rownum(self, row_num, system_name,
session_name=None, var_sub=None,
description=None, td_tag=None, vc_tag=None):
"""Sends all the commands from testdata that has row equal to the
provided row_num
This keyword expects the usage of warrior framework's
recommended testdata xml files, sample testdata file is
available in Warriorspace/Config_files/sample/testdata_sample.xml
:Datafile usage:
Tags or attributes to be used in input datafile for the system or subsystem
If both tag and attribute is provided the attribute will be used.
1. testdata = absolute/relative path of the testdata file.
2. variable_config = absolute/relative path of the variable
config file.
By default the "testdata" and "variable_config" tag/attribute
will be used to get the details of testdata and variable config file.
If a different tag/attribute name is used, provide the tagnames
as the value to the arguments td_tag and vc_tag.
:Arguments:
1. row_num (string) = row number in string representation
2. system_name (string) = This can be name of the
system or a subsystem. In case of subsystem only
single subsystem is supported. Format for subsystem
is "system_name[subsystem_name]"
3. session_name(string) = name of the session to the string
4. var_sub(string) = the pattern [var_sub] in the testdata commands,
start_prompt, end_prompt, verification search
will substituted with this value.
5. description(string) = optional description string that overwrites the
default description(wdesc) of the keyword.
This string will be printed as the keyword description
in console logs and result files.
6. td_tag = custom tag/attribute name of testdata file
7. vc_tag = custom tag/attribute name of variable config file.
:Returns:
1. status(bool)
2. response dictionary(dict): a dictionary having the responses of all
commands sent to the particular system or subsystem. This dictionary
is available in warrior frameworks global data_repository and can be
retrieved using the key= "session_id + _td_response" where
session_id="system_name+subsystem_name+session_name"
"""
wdesc = "Send commands by row num of testdata file"
desc = wdesc if description is None else description
return self.send_testdata_command_kw(system_name, session_name,
desc, var_sub, row_num=row_num,
td_tag=td_tag, vc_tag=vc_tag)
@mockready
def send_commands_by_testdata_title(self, title, system_name, session_name=None,
var_sub=None, description=None,
td_tag=None, vc_tag=None):
"""Sends all the commands from testdata that has title equal to the
provided title
This keyword expects the usage of warrior framework's
recommended testdata xml files, sample testdata file is
available in Warriorspace/Config_files/sample/testdata_sample.xml
:Datafile usage:
Tags or attributes to be used in input datafile for the system or subsystem
If both tag and attribute is provided the attribute will be used.
1. testdata = absolute/relative path of the testdata file.
2. variable_config = absolute/relative path of the variable
config file.
By default the "testdata" and "variable_config" tag/attribute
will be used to get the details of testdata and variable config file.
If a different tag/attribute name is used, provide the tagname
as the value to the arguments td_tag and vc_tag.
:Arguments:
1. title (string) = title in string representation
2. system_name (string) = This can be name of the
system or a subsystem. In case of subsystem only
single subsystem is supported. Format for subsystem
is "system_name[subsystem_name]"
3. session_name(string) = name of the session to the string
4. var_sub(string) = the pattern [var_sub] in the testdata commands,
start_prompt, end_prompt, verification search
will substituted with this value.
5. description(string) = optional description string that overwrites the
default description(wdesc) of the keyword.
This string will be printed as the keyword description
in console logs and result files.
6. td_tag = custom tag/attribute name of testdata file.
7. vc_tag = custom tag/attribute name of variable config file.
:Returns:
1. status(bool)
2. response dictionary(dict): a dictionary having the responses of all
commands sent to the particular system or subsystem. This dictionary
is available in warrior frameworks global data_repository and can be
retrieved using the key= "session_id + _td_response" where
session_id="system_name+subsystem_name+session_name"
"""
wdesc = "Send commands by title of testdata file"
desc = wdesc if description is None else description
return self.send_testdata_command_kw(system_name, session_name, desc, var_sub,
title=title, td_tag=td_tag, vc_tag=vc_tag)
@mockready
def send_commands_by_testdata_title_rownum(self, title, row_num, system_name,
session_name=None, var_sub=None,
description=None, td_tag=None, vc_tag=None):
"""Sends all the commands from testdata that has title/row equal to the
provided title/row_num
This keyword expects the usage of warrior framework's
recommended testdata xml files, sample testdata file is
available in Warriorspace/Config_files/sample/testdata_sample.xml
:Datafile usage:
Tags or attributes to be used in input datafile for the system or subsystem
If both tag and attribute is provided the attribute will be used.
1. testdata = absolute/relative path of the testdata file.
2. variable_config = absolute/relative path of the variable
config file.
By default the "testdata" and "variable_config" tag/attribute
will be used to get the details of testdata and variable config file.
If a different tag/attribute name is used, provide the tagnames
as the value to the arguments td_tag and vc_tag.
:Arguments:
1. title = Title of the testdata block
2. row = Row number of the testdata block
3. system_name (string) = This can be name of the
system or a subsystem. In case of subsystem only
single subsystem is supported. Format for subsystem
is "system_name[subsystem_name]"
4. session_name(string) = name of the session to the string
| |
data directory
FilenamePrefix (str): the file name prefix
PlotDirectory (str): The directory into which the plots are saved
CustomExtent (list): If [-9999] then just use extent of raster. Otherwise a four lement list with extents of the area you want to plot
FigSizeFormat (str): The format of the figure you want. Try your favourite journal. It may or may not be there.
MDH
"""
# Save the figure
ImageName = PlotDirectory+FilenamePrefix+"_traces.png"
print(ImageName)
FigWidth_Inches = Get_FigWidth_Inches(FigSizeFormat)
HillshadeName = FilenamePrefix+"_hs.bil"
# create the map figure
MF = MapFigure(HillshadeName, DataDirectory, coord_type="UTM_km", colourbar_location='None')
#customise the extent of the plot if required
if len(CustomExtent) == 4:
xmin = CustomExtent[0]
xmax = CustomExtent[1]
ymin = CustomExtent[2]
ymax = CustomExtent[3]
MF.SetCustomExtent(xmin,xmax,ymin,ymax)
# add hilltops
HilltopPointsDF = ReadHillslopeData(DataDirectory, FilenamePrefix)
HilltopPoints = LSDP.LSDMap_PointData(HilltopPointsDF, data_type = "pandas", PANDEX = True)
MF.add_point_data(HilltopPoints,alpha=1,zorder=100,unicolor=[0.8,0,0],manual_size=2)
# add channel heads
ChannelHeadsDF = pd.read_csv(DataDirectory+FilenamePrefix+"_Wsources.csv")
ChannelHeadPoints = LSDP.LSDMap_PointData(ChannelHeadsDF, data_type = "pandas", PANDEX = True)
MF.add_point_data(ChannelHeadPoints,zorder=100,unicolor="blue",manual_size=8)
# add channels
ChannelDF = Helper.ReadChiDataMapCSV(DataDirectory,FilenamePrefix)
ChannelPoints = LSDP.LSDMap_PointData(ChannelDF, data_type = "pandas", PANDEX = True)
MF.add_point_data(ChannelPoints,show_colourbar="False", scale_points=True, max_point_size = 2.5, min_point_size = 0.5, column_for_scaling='drainage_area',zorder=90)
# add hillslope traces
ThinningFactor=1
#HillslopeTracesShp = DataDirectory+FilenamePrefix+"_hillslope_traces.shp"
#if os.path.exists(HillslopeTracesShp) == False:
WriteHillslopeTracesShp(DataDirectory,FilenamePrefix,ThinningFactor,CustomExtent)
MF.add_line_data(DataDirectory+FilenamePrefix+"_hillslope_traces.shp",zorder=80,alpha=0.9,linewidth=0.8)
#finalise and save figure
MF.SetRCParams(label_size=8)
MF.save_fig(fig_width_inches = FigWidth_Inches, FigFileName = ImageName, FigFormat="png", Fig_dpi = 300)
def PlotEStarRStarProgression(Sc=0.71):
"""
Plots the progression of hillslopes along Bolinas in Estar Rstar space. Maybe Fiona just copied this over since I'm not sure from where it will read data (SMM)
Args:
Sc (float): The critical slope
MDH, September 2017
"""
from scipy.stats import gaussian_kde
# setup the figure
Fig = CreateFigure(AspectRatio=1.2)
#choose colormap
ColourMap = cm.viridis
#Plot analytical relationship
plt.loglog()
PlotEStarRStarTheoretical()
#Store median values to plot the track through E* R* space
EStarMedian = np.zeros(NoBasins)
RStarMedian = np.zeros(NoBasins)
# Setup extent for data density calcs
ESmin = np.log10(0.1)
ESmax = np.log10(100.)
RSmin = np.log10(0.05)
RSmax = np.log10(1.5)
# setup grid for density calcs
ESgrid = np.logspace(ESmin,ESmax,(ESmax-ESmin)*100.)
RSgrid = np.logspace(RSmin,RSmax,(RSmax-RSmin)*100.)
#loop through the basins
for Basin in range(0,NoBasins):
#for Basin in range(0,1):
# Get the hillslope data for the basin
Data = CalculateEStarRStar(Basin)
# Get the convex hull
#Points = np.column_stack((Data.EStar,Data.RStar))
#Hull = ConvexHull(Points)
# calculate the 2D density of the data given
#Counts,Xbins,Ybins=np.histogram2d(Data.EStar,Data.RStar,bins=100)
#Counts = Counts.T
#X,Y = np.meshgrid(Xbins,Ybins)
#plt.pcolormesh(X,Y,Counts)
# calculate gaussian kernel density
Values = np.vstack([np.log10(Data.EStar), np.log10(Data.RStar)])
Density = gaussian_kde(Values)
ES,RS = np.meshgrid(np.log10(ESgrid),np.log10(RSgrid))
Positions = np.vstack([ES.ravel(), RS.ravel()])
# colour code by basin number
colour = float(Basin)/float(NoBasins)
Density = np.reshape(Density(Positions).T, ES.shape)
Density /= np.max(Density)
#plt.pcolormesh(10**ES,10**RS,Density,cmap=cm.Reds)
plt.contour(10**ES,10**RS,Density,[0.2,],colors=[ColourMap(colour),],linewidths=1.,alpha=0.5)
#plt.plot(Data.EStar,Data.RStar,'k.',ms=2,zorder=32)
# make the contour plot
#plt.contour(counts.transpose(),extent=[xbins.min(),xbins.max(),
# ybins.min(),ybins.max()],linewidths=3,colors='black',
# linestyles='solid')
# colour code by basin number
#colour = float(Basin)/float(NoBasins)
# Get median EStar RStar
EStarMedian[Basin] = Data.EStar.median()
RStarMedian[Basin] = Data.RStar.median()
plt.plot(Data.EStar.median(),Data.RStar.median(),'o',ms=5,color=ColourMap(colour), zorder=32)
# Plot the Hull
#if Basin % 4 == 0:
# Ind = np.append(Hull.vertices, Hull.vertices[0])
# plt.plot(Points[Ind,0], Points[Ind,1], '-', color=ColourMap(colour), lw=1,alpha=0.5)
#plot the path
#plt.plot(EStarMedian,RStarMedian,'k-')
# Finalise the figure
plt.xlabel('$E^*={{-2\:C_{HT}\:L_H}/{S_C}}$')
plt.ylabel('$R^*=S/S_C$')
plt.xlim(0.1,100)
plt.ylim(0.05,1.5)
# add colour bar
m = cm.ScalarMappable(cmap=ColourMap)
m.set_array(np.arange(0,NoBasins))
cbar = plt.colorbar(m)
tick_locator = ticker.MaxNLocator(nbins=5)
cbar.locator = tick_locator
cbar.update_ticks()
cbar.set_label('Basin No.')
#save the figure
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(PlotDirectory+FilenamePrefix + "_EStarRStarProgression.png", dpi=300)
plt.close(Fig)
# SMM Checked, revised and working 13/06/2018
# This is now working and produces lots of nice profile plots.
# It could be revised so that it shows the main stem only
def PlotChiProfileHillslopeData(DataDirectory, FilenamePrefix, PlotDirectory, Basins = [], PlotKsn = False, Sc = 0.71, mainstem_only = False, minimum_traces = 50,
common_max_Es = -99, common_max_Ksn = -99):
"""
This plots the data by basin showing the E*, R* and either the chi profile or the K_sn data as a function of chi
Args:
DataDirectory (str): the data directory
FilenamePrefix (str): the file name prefix
PlotDirectory (str): The directory into which the plots are saved
Basins (list): The basins to be plotted
PlotKsn (bool): If true, the profile plot will be Ksn instead of elevation
Sc (float): The critical slope
mainstem_only (bool): If true, only plot the data from the main stem
minimum_traces (int): The minimum number of traces required to plot the hillslope data
common_max_Es (float): If this is positive, use as the maximum Es for all plots
common_max_Ksn (float): If this is positive, use as the maximum Ksn for all plots
Author: MDH
"""
print("Hi there. Let me print some basin by basin plots for you.")
if PlotKsn:
print("You are plotting chi-k_sn rather than chi-elevation")
else:
print("You are plotting chi-elevation rather than chi-k_sn")
#Load hillslope metrics data
HillslopesDF = ReadHillslopeData(DataDirectory, FilenamePrefix)
# Read in the raw channel data
ChannelsDF = ReadChannelData(DataDirectory, FilenamePrefix)
# Basins list and keys
BasinsDict = np.loadtxt(DataDirectory+FilenamePrefix+'_junctions.list',dtype=int)
# loop through basins
for key in Basins:
Basin = BasinsDict[key]
# print basin to screen
print(key, Basin)
# isolate basin data
BasinChannelData = ChannelsDF[ChannelsDF.basin_key == key]
MinimumChi = BasinChannelData.chi.min()
MaximumMChi = BasinChannelData.m_chi.max()
MinKsn = BasinChannelData.m_chi.min()
MaxKsn = BasinChannelData.m_chi.max()
# how many segments are we dealing with?
Segments = BasinChannelData.segment_number.unique()
# try to figure out the source key
mainstem_source_key = BasinChannelData.source_key.iloc[0]
print("The mainstem source key is: " +str(mainstem_source_key))
# separate into main stem and trib data
MainStemChannelData = BasinChannelData[BasinChannelData.source_key == mainstem_source_key]
MainStemSegments = MainStemChannelData.segment_number.unique()
# setup the figure
Fig = CreateFigure(FigSizeFormat="EPSL",AspectRatio=1)
ax1 = Fig.add_axes([0.1,0.1,0.8,0.35])
ax2 = Fig.add_axes([0.1,0.45,0.8,0.32])
ax3 = Fig.add_axes([0.1,0.65,0.8,0.32])
#choose colormap
ColourMap = cm.viridis
# create new dataframe for plotting
PlotDF = pd.DataFrame(columns=['Chi','Ksn','EStarMedian','EStarLower',
'EStarUpper','RStarMedian','RStarLower','RStarUpper','NTraces'])
# Get the data columns for plotting
for i, Segment in np.ndenumerate(Segments):
if mainstem_only:
if Segments[i] in MainStemSegments:
# get metrics to plot
if PlotKsn:
KKsn = BasinChannelData.m_chi[BasinChannelData.segment_number == Segment]
Ksn = BasinChannelData.m_chi[BasinChannelData.segment_number == Segment].unique()[0]
Chi = BasinChannelData.chi[BasinChannelData.segment_number == Segment]
Elevation = BasinChannelData.elevation[BasinChannelData.segment_number == Segment]
#print("Sizes are:")
#print("Ksn: "+str(Ksn.size))
#print("Chi: "+str(Chi.size))
#print("Elevation: "+str(Elevation.size))
#normalise chi by outlet chi
Chi = Chi-MinimumChi
# plot the chi data
Colour = (Ksn-MinKsn)/(MaxKsn-MinKsn)
PlotMaxKsn = int(math.ceil(MaxKsn / 10.0)) * 10
if PlotKsn:
ax1.scatter(Chi,KKsn,marker='o', edgecolors='none', lw=0.5, c=[1.0,0.0,0.0], s=20, zorder=20)
else:
ax1.plot(Chi,Elevation,'-', lw=1.5,c=ColourMap(Colour), zorder=10)
# get hillslope data
SegmentHillslopeData = HillslopesDF[HillslopesDF.StreamID == Segment]
NTraces = len(SegmentHillslopeData["i"].tolist())
if NTraces<minimum_traces:
continue
# Calculate E* R*
EStar = -2*SegmentHillslopeData.Cht*SegmentHillslopeData.Lh/Sc
RStar = SegmentHillslopeData.S/Sc
EStarMedian = EStar.median()
EStarLower = EStar.quantile(0.25)
EStarUpper = EStar.quantile(0.75)
RStarMedian = RStar.median()
RStarLower = RStar.quantile(0.25)
RStarUpper = RStar.quantile(0.75)
# add to plot dataframe
PlotDF.loc[i] = [Chi.median(),Ksn,EStarMedian,EStarLower, EStarUpper, RStarMedian, RStarLower, RStarUpper,NTraces]
else:
# get metrics to plot
if PlotKsn:
KKsn = BasinChannelData.m_chi[BasinChannelData.segment_number == Segment]
Ksn = BasinChannelData.m_chi[BasinChannelData.segment_number == Segment].unique()[0]
Chi = BasinChannelData.chi[BasinChannelData.segment_number == Segment]
Elevation = BasinChannelData.elevation[BasinChannelData.segment_number == Segment]
#print("Sizes are:")
#print("Ksn: "+str(Ksn.size))
#print("Chi: "+str(Chi.size))
#print("Elevation: "+str(Elevation.size))
#normalise chi by outlet chi
Chi = Chi-MinimumChi
# plot the chi data
Colour = (Ksn-MinKsn)/(MaxKsn-MinKsn)
PlotMaxKsn = int(math.ceil(MaxKsn / 10.0)) * 10
if PlotKsn:
ax1.scatter(Chi,KKsn,marker='o', edgecolors='none',lw=0.5, c=[1.0,0.0,0.0], s=20, zorder=20)
else:
ax1.plot(Chi,Elevation,'-', lw=1.5,c=ColourMap(Colour), zorder=10)
# get hillslope data
SegmentHillslopeData = HillslopesDF[HillslopesDF.StreamID == Segment]
NTraces = len(SegmentHillslopeData["i"].tolist())
if NTraces<minimum_traces:
continue
# Calculate E* R*
EStar = -2*SegmentHillslopeData.Cht*SegmentHillslopeData.Lh/Sc
RStar = SegmentHillslopeData.S/Sc
EStarMedian = EStar.median()
EStarLower = EStar.quantile(0.25)
EStarUpper = EStar.quantile(0.75)
RStarMedian = RStar.median()
RStarLower = RStar.quantile(0.25)
RStarUpper = RStar.quantile(0.75)
# add to plot dataframe
PlotDF.loc[i] = [Chi.median(),Ksn,EStarMedian,EStarLower, EStarUpper, RStarMedian, RStarLower, RStarUpper,NTraces]
# reset indices
PlotDF = PlotDF.reset_index(drop=True)
# Zip errors for plotting
Es_max_err = PlotDF.EStarUpper.values-PlotDF.EStarMedian
Es_min_err = PlotDF.EStarMedian-PlotDF.EStarLower.values
Es_errors = np.array(zip(Es_min_err, Es_max_err)).T
Rs_max_err = PlotDF.RStarUpper.values-PlotDF.RStarMedian
Rs_min_err = PlotDF.RStarMedian-PlotDF.RStarLower.values
Rs_errors = np.array(zip(Rs_min_err, Rs_max_err)).T
#Get colours for plotting from Chi
#plot ksn vs EStar and Rstar, colouring by Chi
for i, row in PlotDF.iterrows():
ax2.plot([row.Chi,row.Chi],[row.EStarLower, row.EStarUpper],'-',c=[0.5,0.9,0.7],lw=2)
ax2.scatter(row.Chi, row.EStarMedian, marker='o', edgecolors='k',lw=0.5, c=[0.5,0.9,0.7], s=20, zorder=200)
ax3.plot([row.Chi,row.Chi],[row.RStarLower, row.RStarUpper],'-',c=[0.5,0.7,0.9],lw=2)
ax3.scatter(row.Chi, row.RStarMedian, marker='o', edgecolors='k',lw=0.5, c=[0.5,0.7,0.9], s=20, zorder=200)
# Finalise the figure
if PlotKsn:
ax1.set_ylabel(r"$k_{sn}$")
else:
ax1.set_ylabel('Elevation (m)')
ax1.set_xlabel(r"$\chi$ (m)")
ax2.set_ylabel('Dimensionless $C_{\mathit{HT}}$')
ax3.set_ylabel('Dimensionless Relief $(S/S_C)$')
#add colourbar if you have a profile plot
if not PlotKsn:
CAx = Fig.add_axes([0.6,0.17,0.25,0.02])
m = cm.ScalarMappable(cmap=ColourMap)
m.set_array(PlotDF.Ksn)
plt.colorbar(m, cax=CAx,orientation='horizontal')
plt.xlabel('$k_{sn}$',fontsize=8)
CAx.tick_params(axis='both', labelsize=8)
# turn off ax2 overlap and x axis for superimposed plots
ax1.patch.set_facecolor('none')
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.yaxis.set_ticks_position('left')
ax1.xaxis.set_ticks_position('bottom')
ax2.patch.set_facecolor('none')
ax2.spines['left'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax2.xaxis.set_visible(False)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax3.patch.set_facecolor('none')
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax3.spines['bottom'].set_visible(False)
ax3.xaxis.set_visible(False)
ax3.yaxis.set_label_position("left")
ax3.yaxis.set_ticks_position('left')
# fix axis limits
if PlotKsn:
if common_max_Ksn > 0:
ax1.set_ylim(0,common_max_Ksn)
else:
ax1.set_ylim(0,PlotMaxKsn)
if common_max_Es > 0:
ax2.set_ylim(0,common_max_Es)
else:
ax2.set_ylim(0,PlotDF.EStarUpper.max())
ax3.set_ylim(0,1)
#save output
plt.suptitle('Basin ID | |
import numpy as np
from scipy import interpolate, integrate
class Signal:
# Implements a signal that was sampled.
def __init__(self, time, samples):
if type(time) == list:
time = np.array(time)
if type(samples) == list:
samples = np.array(samples)
self.time = time
self.samples = samples
@property
def t(self):
# Returns the time samples.
return self.time
@property
def s(self):
# Returns the signal samples.
return self.samples
def signal_at(self, time):
# Returns the value of the signal at any time using a linear interpolation.
return interpolate.interp1d(self.t, self.s)(time)
class LGADSignal(Signal):
# Adapts the "Signal" class to signals that have the shape of that comming out
# from an LGAD detector, i.e. a pulse.
# It is assumed that the pulse is positive.
#
# DESIGN RULES
# ------------
#
# - Methods of type <@property> do not raise any error. They return
# the corresponding value or float('NaN') or whatever, but NO ERRORS!
# - Methods of type <@property> store calculated values in memory in
# order to increase processing speed. <find_...> methods do not
# store anything, they do the required processing each time they
# are called.
# - Methods of type <@property> call methods of type <find_...> where
# the actual calculation/processing is done. These <find_...> methods
# do raise errors if they consider it necessary.
# @property methods ------------------------------------------------
@property
def baseline(self):
if hasattr(self, '_baseline'):
return self._baseline
else:
try:
self._baseline = self.find_baseline()
except:
self._baseline = float('NaN')
return self._baseline
@property
def amplitude(self):
if hasattr(self, '_amplitude'):
return self._amplitude
else:
try:
self._amplitude = self.find_amplitude()
except:
self._amplitude = float('NaN')
return self._amplitude
@property
def noise(self):
if hasattr(self, '_noise'):
return self._noise
else:
try:
self._noise = self.find_noise()
except:
self._noise = float('NaN')
return self._noise
@property
def SNR(self):
try:
snr = self.amplitude/self.noise
except:
snr = float('NaN')
return snr
@property
def rise_time(self):
if hasattr(self, '_rise_time'):
return self._rise_time
else:
try:
self._rise_time = self.find_time_at_rising_edge(90) - self.find_time_at_rising_edge(10)
except:
self._rise_time = float('NaN')
return self._rise_time
@property
def rising_edge_indices(self):
if hasattr(self, '_rising_edge_indices'):
return self._rising_edge_indices
else:
try:
self._rising_edge_indices = self.find_rising_edge_indices(low = 10, high = 90)
except:
self._rising_edge_indices = [float('NaN'), float('NaN')]
return self._rising_edge_indices
@property
def falling_edge_indices(self):
if hasattr(self, '_falling_edge_indices'):
return self._falling_edge_indices
else:
try:
self._falling_edge_indices = self.find_falling_edge_indices(low = 10, high = 90)
except:
self._falling_edge_indices = [float('NaN'), float('NaN')]
return self._falling_edge_indices
@property
def time_over_noise(self):
if hasattr(self, '_time_over_noise'):
return self._time_over_noise
else:
try:
self._time_over_noise = self.find_time_over_threshold(threshold = self.noise/self.amplitude*100)
except:
self._time_over_noise = float('NaN')
return self._time_over_noise
@property
def collected_charge(self):
if hasattr(self, '_collected_charge'):
return self._collected_charge
else:
try:
self._collected_charge = self.find_collected_charge(threshold = self.noise/self.amplitude*100)
except:
self._collected_charge = float('NaN')
return self._collected_charge
def time_at_rising_edge(self, threshold: float):
"""Returns the time at a certain threshold value (percentage) within the rising edge doing a linear interpolation."""
try:
threshold = float(threshold)
if not isinstance(threshold, float):
raise ValueError()
except:
raise ValueError(f'<threshold> must be a float number, received object of type {type(threshold)}.')
try:
return self.find_time_at_rising_edge(threshold)
except:
return float('NaN')
def time_at_falling_edge(self, threshold: float):
"""Returns the time at a certain threshold value (percentage) within the falling edge doing a linear interpolation."""
try:
threshold = float(threshold)
if not isinstance(threshold, float):
raise ValueError()
except:
raise ValueError(f'<threshold> must be a float number, received object of type {type(threshold)}.')
try:
return self.find_time_at_falling_edge(threshold)
except:
return float('NaN')
# find_ methods ----------------------------------------------------
def find_baseline(self):
return np.median(self.samples[:np.argmax(self.samples)])
def find_amplitude(self):
return max(self.samples - self.baseline)
def find_noise(self):
k_start = self.rising_edge_indices[0]
if np.isnan(k_start):
raise RuntimeError(f'Cannot determine the begining of the rising edge.')
return self.samples[:k_start].std()
def find_rising_edge_indices(self, low: float, high: float):
# <low> and <high> are the percentage values to consider the rise window,
# e.g. low = 10 (percent) and high = 90 (percent).
low = float(low)
high = float(high)
if not low < high:
raise ValueError(f'<low> must be less than <high>, received low={low} and high={high}.')
k = self.samples.argmax()
k_start_rise = None
k_stop_rise = None
while k > 0:
if self.samples[k] - self.baseline > self.amplitude*high/100:
k_stop_rise = k+1
if self.samples[k] - self.baseline < self.amplitude*low/100:
k_start_rise = k
break
k -= 1
if k_start_rise is None or k_stop_rise is None or k_start_rise == k_stop_rise:
raise RuntimeError(f'Cannot find the rising edge of this signal. It is possible that the signal is very noisy, but please check.')
return [k for k in range(k_start_rise, k_stop_rise)]
def find_falling_edge_indices(self, low: float, high: float):
# <low> and <high> are the percentage values to consider the rise window,
# e.g. low = 10 (percent) and high = 90 (percent).
low = float(low)
high = float(high)
if not low < high:
raise ValueError(f'<low> must be less than <high>, received low={low} and high={high}.')
k = self.samples.argmax()
k_start_fall = None
k_stop_fall = None
while k < len(self.samples):
if self.samples[k] - self.baseline > self.amplitude*high/100:
k_start_fall = k
if self.samples[k] - self.baseline < self.amplitude*low/100:
k_stop_fall = k + 1
break
k += 1
if k_start_fall is None or k_stop_fall is None:
raise RuntimeError(f'Cannot find the falling edge of this signal. It is possible that the signal is very noisy, but please check.')
return [k for k in range(k_start_fall, k_stop_fall)]
def find_time_at_rising_edge(self, threshold: float):
# Returns the time at <threshold> in the rising edge using linear interpolation between the samples.
threshold = float(threshold)
_min_perc = 0
_max_perc = 90
if not _min_perc <= threshold <= _max_perc:
raise ValueError('<threshold> must be between ' + str(_min_perc) + ' and ' + str(_max_perc) + ', received ' + str(threshold))
if len(self.rising_edge_indices) == 2 and np.isnan(self.rising_edge_indices[0]) and np.isnan(self.rising_edge_indices[-1]):
raise RuntimeError('Cannot find rising edge of the signal.')
if 10 <= threshold <= 90:
rising_edge_indices = self.rising_edge_indices
else:
rising_edge_indices = self.find_rising_edge_indices(low = min(threshold, 10), high = max(threshold, 90))
time_vs_voltage_in_rising_edge = interpolate.interp1d(
x = self.samples[rising_edge_indices],
y = self.time[rising_edge_indices],
)
if np.isnan(self.amplitude):
raise RuntimeError('Cannot find the amplitude of the signal.')
if np.isnan(self.baseline):
raise RuntimeError('Cannot find the baseline of the signal.')
return float(time_vs_voltage_in_rising_edge(self.amplitude*threshold/100 + self.baseline))
def find_time_at_falling_edge(self, threshold: float):
# Returns the time at <threshold> in the rising edge using linear interpolation between the samples.
threshold = float(threshold)
_min_perc = 0
_max_perc = 100
if not _min_perc <= threshold <= _max_perc:
raise ValueError('<threshold> must be between ' + str(_min_perc) + ' and ' + str(_max_perc) + ', received ' + str(threshold))
if len(self.rising_edge_indices) == 2 and np.isnan(self.rising_edge_indices[0]) and np.isnan(self.rising_edge_indices[-1]):
raise RuntimeError('Cannot find rising edge of the signal.')
if 10 <= threshold <= 90:
falling_edge_indices = self.falling_edge_indices
else:
falling_edge_indices = self.find_falling_edge_indices(low = min(threshold, 10), high = max(threshold, 90))
time_vs_voltage_in_falling_edge = interpolate.interp1d(
x = self.samples[falling_edge_indices],
y = self.time[falling_edge_indices],
)
if np.isnan(self.amplitude):
raise RuntimeError('Cannot find the amplitude of the signal.')
if np.isnan(self.baseline):
raise RuntimeError('Cannot find the baseline of the signal.')
return time_vs_voltage_in_falling_edge(self.amplitude*threshold/100 + self.baseline)
def find_indices_over_threshold(self, threshold: float):
# Threshold is a percentage.
threshold = float(threshold)
if not 0 <= threshold <= 100:
raise ValueError(f'<threshold> must be a percentage, i.e. a real number between 0 and 100. Received {threshold}.')
v_threshold = self.baseline + threshold/100*self.amplitude
if np.isnan(v_threshold):
raise RuntimeError('Cannot calculate the threshold voltage for this signal because either the amplitude and/or the baseline cannot be calculated.')
k_top = np.argmax(self.samples)
k_start = k_top
while k_start >= 0:
if self.samples[k_start] < v_threshold:
break
k_start -= 1
k_start += 1
if k_start <= 0:
raise RuntimeError('Cannot find the beginning of the pulse.')
k_stop = k_top
while k_stop < len(self.samples):
if self.samples[k_stop] < v_threshold:
break
k_stop += 1
if k_stop >= len(self.samples)-1:
raise RuntimeError('Cannot find the end of the pulse.')
if k_start == k_stop:
raise RuntimeError('Cannot find the indices over threshold.')
return [k for k in range(k_start, k_stop)]
def find_over_threshold_times(self, threshold: float):
# <threshold> is a percentage.
threshold = float(threshold)
if not 0 <= threshold <= 100:
raise ValueError(f'<threshold> must be a percentage, i.e. a real number between 0 and 100. Received {threshold}.')
t_start = self.find_time_at_rising_edge(threshold)
t_stop = self.find_time_at_falling_edge(threshold)
return t_start, t_stop
def find_time_over_threshold(self, threshold=20):
# Threshold is a percentage.
tstart, tend = self.find_over_threshold_times(threshold=threshold)
return tend-tstart
def find_collected_charge(self, threshold: float):
# Threshold: Which part of the signal do we consider for calculating the charge. It is a percentage, e.g. threshold = 10 %.
threshold = float(threshold)
t_start, t_stop = self.find_over_threshold_times(threshold=threshold)
if np.isnan(self.baseline):
raise RuntimeError('Cannot find the baseline for this signal.')
Q, *_ = integrate.quad(lambda t: (self.signal_at(time=t)-self.baseline), t_start, t_stop)
return Q
# Other methods ----------------------------------------------------
def plot_myplotlib(self, fig):
# <fig> is a Figure object created with https://github.com/SengerM/myplotlib
from myplotlib.figure import MPLFigure
if not isinstance(fig, MPLFigure):
raise TypeError(f'<fig> must be an instance of MPLFigure, received an instance of type {type(fig)}. See https://github.com/SengerM/myplotlib')
fig.set(
xlabel = 'Time (s)',
ylabel = 'Amplitude (V)',
)
fig.plot(
[min(self.t), max(self.t)],
[self.baseline, self.baseline],
label = f'Baseline ({self.baseline:.2e} V)',
color = (0,0,0)
)
fig.plot(
[min(self.t), max(self.t)] + [max(self.t)] + [max(self.t), min(self.t)],
[self.baseline + self.noise, self.baseline + self.noise] + [float('NaN')] + [self.baseline - self.noise, self.baseline - self.noise],
label = f'Noise ({self.noise:.2e} V)',
color = (.6,)*3,
linestyle = 'dashed',
)
try:
fig.plot(
[self.t[np.argmax(self.s)-9],self.t[np.argmax(self.s)+9]] + 2*[self.t[np.argmax(self.s)]] + [self.t[np.argmax(self.s)-9],self.t[np.argmax(self.s)+9]],
2*[self.baseline] + [self.baseline, self.baseline + self.amplitude] + 2*[self.baseline+self.amplitude],
label = f'Amplitude ({self.amplitude:.2e} V)',
color = (0,.6,0),
)
except:
fig.plot(
[self.t[np.argmax(self.s)]]*2,
[self.baseline, self.baseline+self.amplitude],
label = f'Amplitude ({self.amplitude:.2e} V)',
color = (0,.6,0),
)
try:
t_start_rise = self.find_time_at_rising_edge(threshold=10)
fig.plot(
[t_start_rise, t_start_rise+self.rise_time, t_start_rise+self.rise_time, t_start_rise, t_start_rise],
self.baseline + np.array([self.amplitude*.1, self.amplitude*.1, self.amplitude*.9, self.amplitude*.9, self.amplitude*.1]),
label = f'Rise time ({self.rise_time:.2e} s)',
color = (0,0,0),
alpha = .5,
linestyle = 'dashed',
)
except:
pass
try:
threshold = 20
t_start, t_stop = self.find_over_threshold_times(threshold)
fig.plot(
[t_start,t_stop],
2*[self.baseline+threshold/100*self.amplitude],
label = f'Time over {threshold} % ({t_stop-t_start:.2e} s)',
linestyle = '--',
color = (.8,.3,.8)
)
except:
pass
fig.plot(
self.t,
self.s,
label = 'Signal',
marker = '.',
color = (.4,.5,.8),
)
# ~ fig.plot(
# ~ self.t[self.rising_edge_indices],
# ~ self.s[self.rising_edge_indices],
# ~ label = 'Rising edge',
# ~ color = (0,0,.3),
# ~ marker = 'o',
# ~ )
# ~ fig.plot(
# ~ self.t[self.falling_edge_indices],
# ~ self.s[self.falling_edge_indices],
# ~ label = 'Falling edge',
# ~ color = (0,0,.1),
# ~ marker = 'o',
# ~ )
try:
t_start, t_stop = self.find_over_threshold_times(threshold = self.noise/self.amplitude*100)
fig.plot(
[t_start] + list(self.time[(self.time>t_start)&(self.time<t_stop)]) + [t_start + self.time_over_noise] + [t_stop,t_start] + [t_start],
[self.signal_at(t_start)] + list(self.samples[(self.time>t_start)&(self.time<t_stop)]) + [self.signal_at(t_start + self.time_over_noise)] + 2*[self.baseline] + [self.signal_at(t_start)],
label = f'Collected charge ({self.collected_charge:.2e} a.u.)',
color = (1,0,0),
)
except:
pass
def plot_grafica(self, fig):
# <fig> is a Figure object created with grafica (not yet published).
from grafica.figure import Figure
if not isinstance(fig, Figure):
raise TypeError(f'<fig> must be an instance of {Figure}, received an object of type {type(fig)}.')
fig.xlabel = 'Time (s)'
fig.ylabel = 'Amplitude (V)'
fig.scatter(
[min(self.t), max(self.t)],
[self.baseline, self.baseline],
label = f'Baseline ({self.baseline:.2e} V)',
color = (0,0,0)
)
fig.scatter(
[min(self.t), max(self.t)] + [max(self.t)] + [max(self.t), min(self.t)],
[self.baseline + self.noise, self.baseline + self.noise] + [float('NaN')] + [self.baseline - self.noise, self.baseline | |
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility methods for working with Keras in TensorFlow Federated."""
import collections
from typing import List, Optional, Sequence, Union
import tensorflow as tf
from tensorflow_federated.python import core as tff
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.learning import model as model_lib
from tensorflow_federated.python.learning import model_utils
def assign_weights_to_keras_model(keras_model, tff_weights):
"""Assigns a nested structure of TFF weights to a Keras model.
This function may be used to retrieve the model parameters trained by the
federated averaging process for use in an existing `tf.keras.models.Model`,
e.g.:
```
keras_model = tf.keras.models.Model(inputs=..., outputs=...)
def model_fn():
return tff.learning.from_keras_model(keras_model)
fed_avg = tff.learning.build_federated_averaging_process(model_fn, ...)
state = fed_avg.initialize()
state = fed_avg.next(state, ...)
...
tff.learning.assign_weights_to_keras_model(keras_model, state.model)
```
Args:
keras_model: A `tf.keras.models.Model` instance to assign weights to.
tff_weights: A TFF value representing the weights of a model.
Raises:
TypeError: if `tff_weights` is not a TFF value, or `keras_model` is not a
`tf.keras.models.Model` instance.
"""
# TODO(b/123092620): Simplify this.
py_typecheck.check_type(tff_weights,
(structure.Struct, model_utils.ModelWeights))
py_typecheck.check_type(keras_model, tf.keras.models.Model)
if isinstance(tff_weights, structure.Struct):
weights_to_assign = model_utils.ModelWeights.from_tff_result(tff_weights)
else:
weights_to_assign = tff_weights
weights_to_assign.assign_weights_to(keras_model)
Loss = Union[tf.keras.losses.Loss, List[tf.keras.losses.Loss]]
def from_keras_model(
keras_model: tf.keras.Model,
loss: Loss,
input_spec,
loss_weights: Optional[List[float]] = None,
metrics: Optional[List[tf.keras.metrics.Metric]] = None) -> model_lib.Model:
"""Builds a `tff.learning.Model` from a `tf.keras.Model`.
The `tff.learning.Model` returned by this function uses `keras_model` for
its forward pass and autodifferentiation steps.
Notice that since TFF couples the `tf.keras.Model` and `loss`,
TFF needs a slightly different notion of "fully specified type" than
pure Keras does. That is, the model `M` takes inputs of type `x` and
produces predictions of type `p`; the loss function `L` takes inputs of type
`<p, y>` and produces a scalar. Therefore in order to fully specify the type
signatures for computations in which the generated `tff.learning.Model` will
appear, TFF needs the type `y` in addition to the type `x`.
Args:
keras_model: A `tf.keras.Model` object that is not compiled.
loss: A `tf.keras.losses.Loss`, or a list of losses-per-output if the model
has multiple outputs. If multiple outputs are present, the model will
attempt to minimize the sum of all individual losses (optionally weighted
using the `loss_weights` argument).
input_spec: A structure of `tf.TensorSpec`s specifying the type of arguments
the model expects. Notice this must be a compound structure of two
elements, specifying both the data fed into the model to generate
predictions, as its first element, as well as the expected type of the
ground truth as its second.
loss_weights: (Optional) A list of Python floats used to weight the loss
contribution of each model output.
metrics: (Optional) a list of `tf.keras.metrics.Metric` objects.
Returns:
A `tff.learning.Model` object.
Raises:
TypeError: If `keras_model` is not an instance of `tf.keras.Model`.
ValueError: If `keras_model` was compiled, or , or `input_spec` does not
contain two elements.
KeyError: If `loss` is a `dict` and does not have the same keys as
`keras_model.outputs`.
"""
# Validate `keras_model`
py_typecheck.check_type(keras_model, tf.keras.Model)
if keras_model._is_compiled: # pylint: disable=protected-access
raise ValueError('`keras_model` must not be compiled')
# Validate and normalize `loss` and `loss_weights`
if len(keras_model.outputs) == 1:
py_typecheck.check_type(loss, tf.keras.losses.Loss)
if loss_weights is not None:
raise ValueError('`loss_weights` cannot be used if `keras_model` has '
'only one output.')
loss = [loss]
loss_weights = [1.0]
else:
py_typecheck.check_type(loss, list)
if len(loss) != len(keras_model.outputs):
raise ValueError('`keras_model` must have equal number of '
'outputs and losses.\nloss: {}\nof length: {}.'
'\noutputs: {}\nof length: {}.'.format(
loss, len(loss), keras_model.outputs,
len(keras_model.outputs)))
for loss_fn in loss:
py_typecheck.check_type(loss_fn, tf.keras.losses.Loss)
if loss_weights is None:
loss_weights = [1.0] * len(loss)
else:
if len(loss) != len(loss_weights):
raise ValueError(
'`keras_model` must have equal number of losses and loss_weights.'
'\nloss: {}\nof length: {}.'
'\nloss_weights: {}\nof length: {}.'.format(loss, len(loss),
loss_weights,
len(loss_weights)))
for loss_weight in loss_weights:
py_typecheck.check_type(loss_weight, float)
if len(input_spec) != 2:
raise ValueError('The top-level structure in `input_spec` must contain '
'exactly two top-level elements, as it must specify type '
'information for both inputs to and predictions from the '
'model. You passed input spec {}.'.format(input_spec))
for input_spec_member in tf.nest.flatten(input_spec):
py_typecheck.check_type(input_spec_member, tf.TensorSpec)
if metrics is None:
metrics = []
else:
py_typecheck.check_type(metrics, list)
for metric in metrics:
py_typecheck.check_type(metric, tf.keras.metrics.Metric)
return model_utils.enhance(
_KerasModel(
keras_model,
input_spec=input_spec,
loss_fns=loss,
loss_weights=loss_weights,
metrics=metrics))
def federated_aggregate_keras_metric(
metrics: Union[tf.keras.metrics.Metric,
Sequence[tf.keras.metrics.Metric]], federated_values):
"""Aggregates variables a keras metric placed at CLIENTS to SERVER.
Args:
metrics: a single `tf.keras.metrics.Metric` or a `Sequence` of metrics . The
order must match the order of variables in `federated_values`.
federated_values: a single federated value, or a `Sequence` of federated
values. The values must all have `tff.CLIENTS` placement. If value is a
`Sequence` type, it must match the order of the sequence in `metrics.
Returns:
The result of performing a federated sum on federated_values, then assigning
the aggregated values into the variables of the corresponding
`tf.keras.metrics.Metric` and calling `tf.keras.metrics.Metric.result`. The
resulting structure has `tff.SERVER` placement.
"""
member_types = tf.nest.map_structure(lambda t: t.type_signature.member,
federated_values)
@tff.tf_computation
def zeros_fn():
# `member_type` is a (potentially nested) `tff.StructType`, which is an
# `structure.Struct`.
return structure.map_structure(lambda v: tf.zeros(v.shape, dtype=v.dtype),
member_types)
zeros = zeros_fn()
@tff.tf_computation(member_types, member_types)
def accumulate(accumulators, variables):
return tf.nest.map_structure(tf.add, accumulators, variables)
@tff.tf_computation(member_types, member_types)
def merge(a, b):
return tf.nest.map_structure(tf.add, a, b)
@tff.tf_computation(member_types)
def report(accumulators):
"""Insert `accumulators` back into the keras metric to obtain result."""
def finalize_metric(metric: tf.keras.metrics.Metric, values):
# Note: the following call requires that `type(metric)` have a no argument
# __init__ method, which will restrict the types of metrics that can be
# used. This is somewhat limiting, but the pattern to use default
# arguments and export the values in `get_config()` (see
# `tf.keras.metrics.TopKCategoricalAccuracy`) works well.
keras_metric = None
try:
# This is some trickery to reconstruct a metric object in the current
# scope, so that the `tf.Variable`s get created when we desire.
keras_metric = type(metric).from_config(metric.get_config())
except TypeError as e:
# Re-raise the error with a more helpful message, but the previous stack
# trace.
raise TypeError(
'Caught exception trying to call `{t}.from_config()` with '
'config {c}. Confirm that {t}.__init__() has an argument for '
'each member of the config.\nException: {e}'.format(
t=type(metric), c=metric.config(), e=e))
assignments = []
for v, a in zip(keras_metric.variables, values):
assignments.append(v.assign(a))
with tf.control_dependencies(assignments):
return keras_metric.result()
if isinstance(metrics, tf.keras.metrics.Metric):
# Only a single metric to aggregate.
return finalize_metric(metrics, accumulators)
else:
# Otherwise map over all the metrics.
return collections.OrderedDict([
(name, finalize_metric(metric, values))
for metric, (name, values) in zip(metrics, accumulators.items())
])
return tff.federated_aggregate(federated_values, zeros, accumulate, merge,
report)
class _KerasModel(model_lib.Model):
"""Internal wrapper class for tf.keras.Model objects."""
def __init__(self, keras_model: tf.keras.Model, input_spec,
loss_fns: List[tf.keras.losses.Loss], loss_weights: List[float],
metrics: List[tf.keras.metrics.Metric]):
self._keras_model = keras_model
self._input_spec = input_spec
self._loss_fns = loss_fns
self._loss_weights = loss_weights
self._metrics = metrics
# This is defined here so that it closes over the `loss_fn`.
class _WeightedMeanLossMetric(tf.keras.metrics.Mean):
"""A `tf.keras.metrics.Metric` wrapper for the loss function."""
def __init__(self, name='loss', dtype=tf.float32):
super().__init__(name, dtype)
self._loss_fns = loss_fns
self._loss_weights = loss_weights
def update_state(self, y_true, y_pred, sample_weight=None):
if len(self._loss_fns) == 1:
batch_size = tf.shape(y_pred)[0]
batch_loss = self._loss_fns[0](y_true, y_pred)
else:
batch_size = tf.shape(y_pred[0])[0]
batch_loss = tf.zeros(())
for i in range(len(self._loss_fns)):
batch_loss += self._loss_weights[i] * self._loss_fns[i](y_true[i],
y_pred[i])
return super().update_state(batch_loss, batch_size)
self._loss_metric = _WeightedMeanLossMetric()
metric_variable_type_dict = tf.nest.map_structure(
tf.TensorSpec.from_tensor, self.report_local_outputs())
federated_local_outputs_type = tff.FederatedType(metric_variable_type_dict,
tff.CLIENTS)
def federated_output(local_outputs):
return federated_aggregate_keras_metric(self.get_metrics(), local_outputs)
self._federated_output_computation = tff.federated_computation(
federated_output, federated_local_outputs_type)
@property
def trainable_variables(self):
return self._keras_model.trainable_variables
@property
def non_trainable_variables(self):
return self._keras_model.non_trainable_variables
@property
def local_variables(self):
local_variables = []
for metric in self.get_metrics():
local_variables.extend(metric.variables)
return local_variables
def get_metrics(self):
if not self._keras_model._is_compiled: # pylint: disable=protected-access
return self._metrics + [self._loss_metric]
else:
return self._keras_model.metrics + [self._loss_metric]
@property
def input_spec(self):
return self._input_spec
def _forward_pass(self, batch_input, training=True):
if hasattr(batch_input, '_asdict'):
batch_input = batch_input._asdict()
if isinstance(batch_input, collections.Mapping):
inputs = batch_input.get('x')
else:
inputs = batch_input[0]
if inputs is None:
raise KeyError('Received a batch_input that is missing required key `x`. '
'Instead have keys | |
{1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Cb u0 {1,S}
9 O2d u0 {5,D}
10 O2d u0 {6,D}
11 O2d u0 {7,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 757,
label = "Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd)Cb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Cb u0 {1,S}
9 O2d u0 {5,D}
10 O2d u0 {6,D}
11 C u0 {7,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Cb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 758,
label = "Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Cb u0 {1,S}
9 O2d u0 {5,D}
10 C u0 {6,D}
11 C u0 {7,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)Cb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-S2d)Cb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Cb u0 {1,S}
9 S2d u0 {5,D}
10 S2d u0 {6,D}
11 S2d u0 {7,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-Cd)Cb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Cb u0 {1,S}
9 S2d u0 {5,D}
10 S2d u0 {6,D}
11 C u0 {7,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Cb u0 {1,S}
9 S2d u0 {5,D}
10 C u0 {6,D}
11 C u0 {7,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 759,
label = "Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 Cb u0 {1,S}
9 C u0 {5,D}
10 C u0 {6,D}
11 C u0 {7,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)Cb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 760,
label = "Cs-CtCtCdsCds",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Ct u0 {1,S}
3 Ct u0 {1,S}
4 [Cd,CO] u0 {1,S}
5 [Cd,CO] u0 {1,S}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)CtCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 761,
label = "Cs-(Cds-O2d)(Cds-O2d)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CO u0 {1,S} {6,D}
3 CO u0 {1,S} {7,D}
4 Ct u0 {1,S}
5 Ct u0 {1,S}
6 O2d u0 {2,D}
7 O2d u0 {3,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-O2d)(Cds-Cds)(Cds-Cds)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 762,
label = "Cs-(Cds-O2d)(Cds-Cd)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CO u0 {1,S} {7,D}
3 Cd u0 {1,S} {6,D}
4 Ct u0 {1,S}
5 Ct u0 {1,S}
6 C u0 {3,D}
7 O2d u0 {2,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-Cds)CtCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 763,
label = "Cs-(Cds-O2d)(Cds-Cds)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CO u0 {1,S} {7,D}
3 Cd u0 {1,S} {6,D}
4 Ct u0 {1,S}
5 Ct u0 {1,S}
6 Cd u0 {3,D}
7 O2d u0 {2,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-Cds)(Cds-Cds)(Cds-Cds)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 764,
label = "Cs-(Cds-O2d)(Cds-Cdd)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CO u0 {1,S} {7,D}
3 Cd u0 {1,S} {6,D}
4 Ct u0 {1,S}
5 Ct u0 {1,S}
6 Cdd u0 {3,D}
7 O2d u0 {2,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-Cdd-Cd)CtCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 765,
label = "Cs-(Cds-O2d)(Cds-Cdd-O2d)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {5,S} {6,S}
2 Cd u0 {1,S} {4,D}
3 CO u0 {1,S} {7,D}
4 Cdd u0 {2,D} {8,D}
5 Ct u0 {1,S}
6 Ct u0 {1,S}
7 O2d u0 {3,D}
8 O2d u0 {4,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 766,
label = "Cs-(Cds-O2d)(Cds-Cdd-Cd)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {5,S} {6,S}
2 Cd u0 {1,S} {4,D}
3 CO u0 {1,S} {7,D}
4 Cdd u0 {2,D} {8,D}
5 Ct u0 {1,S}
6 Ct u0 {1,S}
7 O2d u0 {3,D}
8 C u0 {4,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-Cds)CtCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 767,
label = "Cs-(Cds-Cd)(Cds-Cd)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Ct u0 {1,S}
5 Ct u0 {1,S}
6 C u0 {2,D}
7 C u0 {3,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)CtCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 768,
label = "Cs-(Cds-Cds)(Cds-Cds)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Ct u0 {1,S}
5 Ct u0 {1,S}
6 Cd u0 {2,D}
7 Cd u0 {3,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([3.61,7.3,8.97,9.69,9.84,9.42,7.36],'cal/(mol*K)','+|-',[0.13,0.13,0.13,0.13,0.13,0.13,0.13]),
H298 = (5.48,'kcal/mol','+|-',0.26),
S298 = (-34.5,'cal/(mol*K)','+|-',0.13),
),
shortDesc = u"""Cs-CtCtCdCd BOZZELLI =3D Cs/Cs/Cd/Ct2 + (Cs/Cs3/Cd - Cs/Cs4)""",
longDesc =
u"""
""",
)
entry(
index = 769,
label = "Cs-(Cds-Cdd)(Cds-Cds)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Ct u0 {1,S}
5 Ct u0 {1,S}
6 Cdd u0 {2,D}
7 Cd u0 {3,D}
""",
thermo = u'Cs-(Cds-Cdd-Cd)(Cds-Cds)CtCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 770,
label = "Cs-(Cds-Cdd-O2d)(Cds-Cds)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {5,S} {6,S}
2 Cd u0 {1,S} {4,D}
3 Cd u0 {1,S} {7,D}
4 Cdd u0 {2,D} {8,D}
5 Ct u0 {1,S}
6 Ct u0 {1,S}
7 Cd u0 {3,D}
8 O2d u0 {4,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cds)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {5,S} {6,S}
2 Cd u0 {1,S} {4,D}
3 Cd u0 {1,S} {7,D}
4 Cdd u0 {2,D} {8,D}
5 Ct u0 {1,S}
6 Ct u0 {1,S}
7 Cd u0 {3,D}
8 S2d u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 771,
label = "Cs-(Cds-Cdd-Cd)(Cds-Cds)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {5,S} {6,S}
2 Cd u0 {1,S} {4,D}
3 Cd u0 {1,S} {7,D}
4 Cdd u0 {2,D} {8,D}
5 Ct u0 {1,S}
6 Ct u0 {1,S}
7 Cd u0 {3,D}
8 C u0 {4,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)CtCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 772,
label = "Cs-(Cds-Cdd)(Cds-Cdd)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Ct u0 {1,S}
5 Ct u0 {1,S}
6 Cdd u0 {2,D}
7 Cdd u0 {3,D}
""",
thermo = u'Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)CtCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 773,
label = "Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {6,S} {7,S}
2 Cd u0 {1,S} {4,D}
3 Cd u0 {1,S} {5,D}
4 Cdd u0 {2,D} {8,D}
5 Cdd u0 {3,D} {9,D}
6 Ct u0 {1,S}
7 Ct u0 {1,S}
8 O2d u0 {4,D}
9 O2d u0 {5,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 774,
label = "Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {6,S} {7,S}
2 Cd u0 {1,S} {4,D}
3 Cd u0 {1,S} {5,D}
4 Cdd u0 {2,D} {8,D}
5 Cdd u0 {3,D} {9,D}
6 Ct u0 {1,S}
7 Ct u0 {1,S}
8 O2d u0 {4,D}
9 C u0 {5,D}
""",
thermo = u'Cs-(Cds-Cdd-O2d)(Cds-Cds)CtCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {6,S} {7,S}
2 Cd u0 {1,S} {4,D}
3 Cd u0 {1,S} {5,D}
4 Cdd u0 {2,D} {8,D}
5 Cdd u0 {3,D} {9,D}
6 Ct u0 {1,S}
7 Ct u0 {1,S}
8 S2d u0 {4,D}
9 S2d u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cdd-Cd)CtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {6,S} {7,S}
2 Cd u0 {1,S} {4,D}
3 Cd u0 {1,S} {5,D}
4 Cdd u0 {2,D} {8,D}
5 Cdd u0 {3,D} {9,D}
6 Ct u0 {1,S}
7 Ct u0 {1,S}
8 S2d u0 {4,D}
9 C u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 775,
label = | |
<reponame>nudles/a2c
import numpy as np
import random
import math
import argparse
import logging
import pathlib
import datetime
# create logger
logger = logging.getLogger('Rafiki')
class Timer:
"""
This is a man-hand driven timer. The user should manually tick the timer.
"""
def __init__(self, start_time=0):
self.time = start_time
def tick(self, delta):
assert delta >= 0
self.time += delta
return self.time
def now(self):
return self.time
def reset(self, start_time=0):
self.time = start_time
class RequestGenerator(object):
def __init__(self, timer, rate, T, sigma=0.1, mu=0.01, seed=1):
# timer, is shared with the env
# rate, is the reference throughput
# sigma, decide the max arriving rate which is bigger than the max reference throughput
# mu, is the pram of Gauss function
self.timer = timer
self.T = T
self.mu = mu
# to make 20% time the arriving rate is larger than the reference throughput (rate)
# sin(0.5pi-0.2pi)*k + b = r; sin(0.5pi)*k + b = (1+sigma)*r
self.k = sigma * rate * 5
self.b = (1 + sigma) * rate - self.k
self.last_timestamp = timer.now()
random.seed(seed)
def num_of_new_requests(self, delta):
# 20% time the arriving rate is larger than the reference throughput
x = self.timer.now()
w = 2 * np.pi / self.T
num = math.sin(w * x) * self.k + self.b
num = int(max(0, num * (1 + random.gauss(0, self.mu))))
return int(num * delta+0.5)
def reset(self):
self.last_timestamp = self.timer.now()
def get(self):
"""
return new requests per time according to inner time model
:return: (request_id, timestamp)
"""
cur_time = self.timer.now()
# to make the requests unifromly located in [last_timestamp, now)
num = self.num_of_new_requests(cur_time - self.last_timestamp)
new_req = [(random.randint(0, 4999), # hard code the request id range, here
random.uniform(self.last_timestamp, cur_time)) for i in range(num)]
self.last_timestamp = cur_time
# sort the requests based on inqueue time
return sorted(new_req, key=lambda tup: tup[1])
class Discrete:
# only contains the value of batchsz
def __init__(self, num_output):
self.n = num_output
# to label the logger of different env
env_id = 0
class Env:
def __init__(self, requests_gen, timer, batchsz, tau, latency, max_latency, perf, beta=0.5, obs_size=50):
self.requests_gen = requests_gen
self.timer = timer
self.batchsz = batchsz # a list of candidate batchsz
self.tau = tau # time limit of each request
self.beta = beta # coefficient of the overdue requests in the reward function
self.obs_size = obs_size # valid queue length for queue feature into the RL model
self.max_latency = max_latency
self.latency = latency # a matrix with one row per model, one column per batchsz
self.perf = perf # a list performance/accuracy for (ensemble) model
self.num_models = self.latency.shape[0]
self.num_batchsz = self.latency.shape[1]
nbits = int(math.log2(self.num_batchsz))
assert (1 << nbits) == self.num_batchsz, 'num batchsz must be 2^x'
assert (1 << self.num_models) - 1 == self.perf.size, 'num of models not math perf file'
assert len(batchsz) == self.num_batchsz, \
'batchsz %d not match latency shape' % len(batchsz)
# 2^(self.num_models) includes all ensemble combinations. we manually
# exclude the case where no model is selected in the action
# the action for model selection and for batchsz selection is merged
# with the first num_models bits for model selection and the last
# log2(num_batchsz) for batch selection.
self.action_space = Discrete(((1 << self.num_models) - 1) * self.num_batchsz)
# the obs space includes the tau, latency for all models and all
# batchsz, waiting time of each model to finish existing requests and
# the queue states (queuing time)
self.observation_space = np.zeros((self.obs_size + self.latency.size + self.num_models + 1, ))
# self.reset()
global env_id
self.logger = logging.getLogger('Rafiki.env-%d' % env_id)
env_id += 1
def model_idx_to_model_action(self, model_idx):
# convert model selection into model action part
# model_idx is a binary array, pos k = 1 indicating k-th model is selected
# - 1 is to exclude [0,0,0...0]
return model_idx.dot(1 << np.arange(model_idx.size)[::-1]) - 1
def model_action_to_model_idx(self, action):
# extract model selection from the model action part
bstr = bin(action + 1)
pad = [False] * (self.num_models - (len(bstr) - 2)
) # pad with 0 on the left
model_idx = np.array(pad + [bool(int(x)) for x in bstr[2:]])
return model_idx
def accuracy(self, requests, model_idx):
return self.perf[self.model_idx_to_model_action(model_idx)]
def parse_action(self, action):
# parse full action from RL output into model_idx array and batchsz index
# first num_models bits for model selection; the rest bits for batchsz
# index; action value for models selection starting from 1 (0 means
# no model is selected)
batchsz_idx = action & (self.num_batchsz - 1)
nbits = int(math.log2(self.num_batchsz))
model_idx = self.model_action_to_model_idx(action >> nbits)
return model_idx > 0, batchsz_idx
def create_action(self, model_idx, batchsz_idx):
# reverse op of parse_action
nbits = int(math.log2(self.num_batchsz))
action = self.model_idx_to_model_action(model_idx) << nbits
action += batchsz_idx
return action
def step(self, action, sync=False):
"""
:return: obs s1 and cost c1
"""
model_idx, batchsz_idx = self.parse_action(action)
batchsz = self.batchsz[batchsz_idx]
self.logger.info("time: %.5f" % self.timer.now())
self.logger.info("len_req: %d" % len(self.requests))
self.logger.info("action: %d" % action)
self.logger.info("model %s batch_size %d" % (str(model_idx), self.batchsz[batchsz_idx]))
if len(self.requests)!=0:
self.logger.info("max_queue_wait: %.5f" %(float(self.timer.now()) - self.requests[0][1]))
cur_time = self.timer.now()
while len(self.requests) == 0 or (len(self.requests) < batchsz and
self.timer.now() - self.requests[0][1] +
np.max(self.waiting_time[model_idx] +
self.latency[model_idx, batchsz_idx])
+ 0.1 * self.tau < self.tau):
self.timer.tick(0.05)
self.waiting_time -= 0.05
self.update_obs()
# inc the processing time of the selected models
self.waiting_time[model_idx] += self.latency[model_idx, batchsz_idx]
# the latency of this batch of requests depends on the slowest model
max_process_time = np.max(self.waiting_time[model_idx])
cur_time = self.timer.now()
num = min(len(self.requests), batchsz)
num_overdue = 0
due_time = []
overdue_time = []
for _, inqueue_time in self.requests[:num]:
latency = cur_time - inqueue_time + max_process_time
if latency > self.max_latency:
num_overdue += 1
overdue_time.append(latency)
else:
due_time.append(latency)
acc = self.accuracy(self.requests[:num], model_idx)
reward = acc * num - self.beta * acc * num_overdue
self.logger.info('reward %.3f num %d overdue %d' % (reward, num, num_overdue))
# printing
delta = self.timer.now() - self.last_log_time
self.logr += reward
self.logt += num
self.logo += num_overdue
self.loga += acc * num
self.wait = self.waiting_time
if len(self.requests)!=0:
max_queue_wait = float(self.timer.now()) - self.requests[0][1]
else:
max_queue_wait = 0
self.wait = np.append(self.wait,max_queue_wait)
self.wait = np.append(self.wait, max_process_time)
self.wait = np.append(self.wait, max_process_time + max_queue_wait)
self.actions.append(action)
if delta >= 1:
self.wait = self.wait.reshape((-1,6))
self.wait = self.wait.T
self.info = {
'time': '%.10f' % self.timer.now(),
'num': '%.2d' % self.logt,
'overdue': '%.2d' % self.logo,
'overdue_rate': '%.5f' % (self.logo / delta),
'accu': '%.5f' % (self.loga / self.logt),
'reward': '%3.5f' % (self.logr / delta),
'throughput': '%4.5f' % (self.logt / delta),
'arrive_rate': '%.3d' % self.requests_gen.num_of_new_requests(1),
'len_q': '%.3d' % len(self.requests),
'batchsz': '%.2d' % batchsz,
'model_num': sum(model_idx),
'actions': self.actions,
'wait': self.wait
}
# self.logger.info(self.info)
self.last_log_time = self.timer.now()
self.logr, self.logt, self.logo, self.loga, self.actions, self.wait = 0, 0, 0, 0, [], []
# update timer to proceed with the next RL iter
if sync:
tick_time = np.max(self.waiting_time)
else:
tick_time = np.min(self.waiting_time)
self.timer.tick(tick_time)
# delta time has passed
self.waiting_time -= tick_time
# delete the dispatched requests from the queue
self.requests = self.requests[num:]
# update env queue status with new requests
self.update_obs()
# obs, reward, done, _
if delta >= 1:
return self.obs, reward, self.info
else:
return self.obs, reward, None
def update_obs(self):
mask = self.waiting_time >= 0
self.waiting_time *= mask
new_req = self.requests_gen.get()
total_size = len(new_req) + len(self.requests)
assert total_size < 2000 * self.obs_size, 'too many requests %d' % total_size
self.requests.extend(new_req)
size = min(self.obs_size, total_size)
# preare obserations for RL algorithm
self.obs = np.zeros(
(self.obs_size + self.latency.size + self.num_models + 1,))
self.obs[0] = self.tau
self.obs[1:1 + self.latency.size] = self.latency.reshape((-1,))
offset = 1 + self.latency.size + self.num_models
self.obs[1 + self.latency.size: offset] = self.waiting_time
self.obs[offset: offset + size] = self.timer.now() - \
np.array([r[1] for r in self.requests[:size]])
def reset(self):
# must be called after init env
self.timer.reset()
self.requests_gen.reset()
self.requests = []
self.waiting_time = np.zeros((self.num_models, ))
self.last_log_time = self.timer.now()
self.logr, self.logt, self.logo, self.loga, self.actions, self.wait = 0, 0, 0, 0, [], []
self.timer.tick(self.tau / 5)
self.update_obs()
return self.obs
class Envs(object):
# a list of envs
def __init__(self, num_processes, num_models, policy, beta, obs_size, max_latency, tau_times, cycle=200):
self.num_processes = num_processes
batchsz = range(16, 65, 16)
latency = np.loadtxt('latency.txt', delimiter=',')[:num_models]
perf = np.loadtxt('accuracy.txt', delimiter=',')[:(1 << num_models) - 1]
# max when every model is running the different data
max_rate = sum([batchsz[-1] / l[-1] for l in latency])
# min | |
0:
lEventNum = 4
else:
lEventNum = 5
else:
lEventNum = event.num
if lEventNum == 4:
self.set(self.value+self.oneTurn)
else:
self.set(self.value-self.oneTurn)
def get(self):
return self.type(self.value)
def printLabel(self):
if self.canvas is None:
return
self.canvas.itemconfigure(self.labelId2,
text=self.labelFormat%self.value)#newVal)
self.canvas.itemconfigure(self.labelId,
text=self.labelFormat%self.value)#newVal)
def set(self, val, update=1, force=0):
# if force is set to 1, we call this method regardless of the
# widget configuration. This is for example the case if the dial
# is set to continuous=0, but the value is set in the options panel
# snap to closest increment
if self.increment is not None and self.increment != 0. and not force:
offset = self.offsetValue%self.increment
dval = round(val/self.increment) * self.increment
if val < dval:
dval = dval + offset - self.increment
else:
dval = dval + offset
if self.min is not None and dval < self.min:
dval = self.min
elif self.max is not None and dval > self.max:
dval = self.max
# recompute vector and angle corresponding to val
self.angle = (dval%self.oneTurn)*self.threeSixtyOver1turn
if dval <0.0:
self.angle = self.angle - 360.0
a = self.angle*self.pyOver180
self.vector = [math.sin(a), math.cos(a)]
self.value = dval
self.offsetValue = dval
else:
# 'regular' mode, i.e. no step-wise increment
if self.min is not None and val < self.min: val = self.min
elif self.max is not None and val > self.max: val = self.max
# recompute vector and angle corresponding to val
self.angle = (val%self.oneTurn)*self.threeSixtyOver1turn
if val <0.0: self.angle = self.angle - 360.0
a = self.angle*self.pyOver180
self.vector = [math.sin(a), math.cos(a)]
self.value = val
self.offsetValue = val
#update arrow in display
self.drawArrow()
newVal = self.get()
if self.continuous or force:
if update and self.oldValue != newVal or force:
self.oldValue = newVal
self.callbacks.CallCallbacks(newVal)
if self.showLabel==2:
self.printLabel()
else:
if self.showLabel==2:
self.printLabel()
if self.showLabel==1:
self.printLabel()
if self.opPanel:
self.opPanel.valInput.set(self.labelFormat%newVal)
def drawArrow(self):
if self.canvas is None:
return
# end point
x1 = self.xm + self.vector[0]*self.rad
y1 = self.ym - self.vector[1]*self.rad
# point at arrow head base
xb = self.xm + self.vector[0]*self.radNoArrow
yb = self.xm - self.vector[1]*self.radNoArrow
# vector orthogonal to arrow
n = [-self.vector[1], -self.vector[0]]
pts1 = [ self.xm+n[0]*self.arrowWidth, self.ym+n[1]*self.arrowWidth,
xb+n[0]*self.arrowWidth, yb+n[1]*self.arrowWidth,
xb+n[0]*self.arrowHeadWidth, yb+n[1]*self.arrowHeadWidth,
x1, y1 ]
pts2 = [ x1, y1,
xb-n[0]*self.arrowHeadWidth, yb-n[1]*self.arrowHeadWidth,
xb-n[0]*self.arrowWidth, yb-n[1]*self.arrowWidth,
self.xm-n[0]*self.arrowWidth, self.ym-n[1]*self.arrowWidth ]
canvas = self.canvas
if self.vector[0] > 0.0:
col1 = '#DDDDDD'
col2 = 'black'
else:
col1 = 'black'
col2 = '#DDDDDD'
apply( canvas.coords, (self.arrowPolId,) + tuple(pts1+pts2) )
apply( canvas.coords, (self.arrowPolborder1,) + tuple(pts1) )
canvas.itemconfigure( self.arrowPolborder1, fill=col1 )
apply( canvas.coords, (self.arrowPolborder2,) + tuple(pts2) )
canvas.itemconfigure( self.arrowPolborder2, fill=col2 )
canvas.itemconfigure(self.arcId, extent = 0.0-self.angle)
def createCanvas(self, master):
size = self.size
self.frame = Tkinter.Frame(self, borderwidth=3, relief='sunken')
self.canvas = Tkinter.Canvas(self.frame, width=size+2, height=size+2)
self.xm = self.ym = size/2+2
self.rad = size/2
self.radNoArrow = self.rad-self.arrowLength
self.vector = [0, 1]
x1 = self.xm + self.vector[0]*self.rad
y1 = self.ym + self.vector[1]*self.rad
canvas = self.canvas
self.circleId = canvas.create_oval(2,2,size,size, width=1,
fill=self.unusedArcColor)
self.arcId = canvas.create_arc(2,2,size,size, start=90.,
extent=0, fill=self.usedArcColor)
canvas.create_line(2, self.ym, size+2, self.ym)
canvas.create_line(self.xm, 2, self.ym, size+2)
self.arrowPolId = canvas.create_polygon( 0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
fill='gray75' )
self.arrowPolborder1 = canvas.create_line( 0,0,0,0,0,0,0,0,
fill='black',
width = self.arrowBorderwidth)
self.arrowPolborder2 = canvas.create_line( 0,0,0,0,0,0,0,0,
fill='white',
width = self.arrowBorderwidth )
r = size/20
off = self.arrowBorderwidth
canvas.create_oval(self.xm-r,self.ym-r-off/2,self.xm+r,self.ym+r-off/2,
fill='#DDDDDD', outline='white')
canvas.create_oval(self.xm-r,self.ym-r+off,self.xm+r,self.ym+r+off,
fill='black', outline='black')
canvas.create_oval(self.xm-r,self.ym-r,self.xm+r,self.ym+r,
fill='gray70', outline='#DDDDDD')
self.labelId2 = canvas.create_text(self.xm+2, self.ym+2,
fill='black',
justify='center', text='',
font = self.labelFont)
self.labelId = canvas.create_text(self.xm, self.ym,
fill=self.labelColor,
justify='center', text='',
font = self.labelFont)
self.drawArrow()
self.opPanel = OptionsPanel(master = self, title="Dial Options")
# pack em up
self.canvas.pack(side=Tkinter.TOP)
self.frame.pack(expand=1, fill='x')
self.toggleWidgetLabel(self.showLabel)
def toggleWidgetLabel(self, val):
if val == 0:
# no widget labels
self.showLabel=0
self.canvas.itemconfigure(self.labelId2,
text='')
self.canvas.itemconfigure(self.labelId,
text='')
if val == 1:
# show always widget labels
self.showLabel=1
self.printLabel()
if val == 2:
# show widget labels only when mouse moves
self.showLabel=2
self.canvas.itemconfigure(self.labelId2,
text='')
self.canvas.itemconfigure(self.labelId,
text='')
def setValue(self, val):
if type(val) == types.StringType:
val = float(val)
assert type(val) in [types.IntType, types.FloatType],\
"Illegal type for value: expected %s or %s, got %s"%(
type(1), type(1.0), type(val) )
# setValue does NOT call a callback!
if self.min is not None and val < self.min: val = self.min
if self.max is not None and val > self.max: val = self.max
self.value = self.type(val)
self.offsetValue=self.value
self.oldValue = self.value
#update arrow in display
self.angle = (self.value%self.oneTurn)*self.threeSixtyOver1turn
if self.value <0.0: self.angle = self.angle - 360.0
a = self.angle*self.pyOver180
self.vector = [math.sin(a), math.cos(a)]
self.drawArrow()
if self.showLabel == 1:
self.printLabel()
if self.opPanel:
self.opPanel.valInput.set(self.labelFormat%self.value)
def setLabel(self, labCfg):
self.labCfg = labCfg
text = labCfg.get('text', None)
if text is None or text=='':
return
d={}
for k, w in self.labCfg.items():
if k == 'side': continue
else: d[k] = w
if not 'side' in self.labCfg.keys():
self.labCfg['side'] = 'left'
if not self.lab:
self.lab = Tkinter.Label(self, d)
self.lab.pack(side=self.labCfg['side'])
self.lab.bind("<Button-3>", self.toggleOptPanel)
else:
self.lab.configure(text)
#####################################################################
# the 'configure' methods:
#####################################################################
def configure(self, **kw):
for key,value in kw.items():
# the 'set' parameter callbacks
if key=='labCfg': self.setLabel(value)
elif key=='type': self.setType(value)
elif key=='min': self.setMin(value)
elif key=='max': self.setMax(value)
elif key=='increment': self.setIncrement(value)
elif key=='precision': self.setPrecision(value)
elif key=='showLabel': self.setShowLabel(value)
elif key=='continuous': self.setContinuous(value)
elif key=='oneTurn': self.setOneTurn(value)
# the 'lock' entries callbacks
elif key=='lockType': self.lockTypeCB(value)
elif key=='lockMin': self.lockMinCB(value)
elif key=='lockBMin': self.lockBMinCB(value)
elif key=='lockMax': self.lockMaxCB(value)
elif key=='lockBMax': self.lockBMaxCB(value)
elif key=='lockIncrement': self.lockIncrementCB(value)
elif key=='lockBIncrement': self.lockBIncrementCB(value)
elif key=='lockPrecision': self.lockPrecisionCB(value)
elif key=='lockShowLabel': self.lockShowLabelCB(value)
elif key=='lockValue': self.lockValueCB(value)
elif key=='lockContinuous': self.lockContinuousCB(value)
elif key=='lockOneTurn': self.lockOneTurnCB(value)
def setType(self, Type):
assert type(Type) in [types.StringType, types.TypeType],\
"Illegal type for datatype. Expected %s or %s, got %s"%(
type('a'), type(type), type(Type) )
if type(Type) == type(""): # type str
assert Type in ('int','float'),\
"Illegal type descriptor. Expected 'int' or 'float', got '%s'"%Type
self.type = eval(Type)
else:
self.type = Type
if self.type == int:
self.labelFormat = "%d"
self.int_value = self.value
else:
self.labelFormat = "%."+str(self.precision)+"f"
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togIntFloat']['widget']
if self.type == int:
w.setvalue('int')
elif self.type == 'float':
w.setvalue('float')
if self.opPanel:
self.opPanel.updateDisplay()
# and update the printed label
if self.canvas and self.showLabel == 1:
self.printLabel()
def setMin(self, min):
if min is not None:
assert type(min) in [types.IntType, types.FloatType],\
"Illegal type for minimum. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(min) )
if self.max and min > self.max:
min = self.max
self.min = self.type(min)
if self.showLabel == 1:
self.printLabel()
if self.value < self.min:
self.set(self.min)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.minInput.set(self.labelFormat%self.min)
self.opPanel.toggleMin.set(1)
self.opPanel.min_entry.configure(state='normal', fg='gray0')
self.minOld = self.min
else:
self.min = None
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleMin.set(0)
self.opPanel.min_entry.configure(state='disabled',
fg='gray40')
def setMax(self, max):
if max is not None:
assert type(max) in [types.IntType, types.FloatType],\
"Illegal type for maximum. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(max) )
if self.min and max < self.min:
max = self.min
self.max = self.type(max)
if self.showLabel == 1:
self.printLabel()
if self.value > self.max:
self.set(self.max)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.maxInput.set(self.labelFormat%self.max)
self.opPanel.toggleMax.set(1)
self.opPanel.max_entry.configure(state='normal', fg='gray0')
self.maxOld = self.max
else:
self.max = None
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleMax.set(0)
self.opPanel.max_entry.configure(state='disabled', fg='gray40')
def setIncrement(self, incr):
if incr is not None:
assert type(incr) in [types.IntType, types.FloatType],\
"Illegal type for increment. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(incr) )
self.increment = self.type(incr)
self.offsetValue = self.value
self.incrementOld = self.increment
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.incrInput.set(self.labelFormat%self.increment)
self.opPanel.toggleIncr.set(1)
self.opPanel.incr_entry.configure(state='normal', fg='gray0')
else:
self.increment = self.type(0)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleIncr.set(0)
self.opPanel.incrInput.set(self.labelFormat%0)
self.opPanel.incr_entry.configure(state='disabled',
fg='gray40')
def setPrecision(self, val):
assert type(val) in [types.IntType, types.FloatType],\
"Illegal type for precision. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(val) )
val = int(val)
if val > 10:
val = 10
if val < 1:
val = 1
self.precision = val
if self.type == float:
self.labelFormat = "%."+str(self.precision)+"f"
else:
self.labelFormat = "%d"
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['selPrec']['widget']
w.setvalue(val)
if self.opPanel:
self.opPanel.updateDisplay()
# and update the printed label
if self.canvas and self.showLabel == 1:
self.printLabel()
def setContinuous(self, cont):
""" cont can be None, 0 or 1 """
assert cont in [None, 0, 1],\
"Illegal value for continuous: expected None, 0 or 1, got %s"%cont
if cont != 1:
cont = None
self.continuous = cont
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togCont']['widget']
if cont:
w.setvalue('on')#i=1
else:
w.setvalue('off')#i=0
if self.opPanel:
self.opPanel.updateDisplay()
def setShowLabel(self, val):
"""Show label can be 0, 1 or 2
0: no label
1: label is always shown
2: show label | |
# This file is a modification of sklearn.metrics.pairwise
# Modifications by <NAME>
"""
New BSD License
Copyright (c) 2007–2018 The scikit-learn developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of the Scikit-learn Developers nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
from __future__ import division
import itertools
from functools import partial
from typing import Union
import numpy as np
import pandas as pd
from scipy.sparse import issparse
from scipy.spatial import distance
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import _VALID_METRICS, _return_float_dtype
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.utils import check_array
from .utils import masked_euclidean_distances
_MASKED_METRICS = ['masked_euclidean']
_VALID_METRICS += ['masked_euclidean']
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None,
accept_sparse='csr', force_all_finite=True,
copy=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
accept_sparse : string, boolean or list/tuple of strings
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
force_all_finite : bool
Whether to raise an error on np.inf and np.nan in X (or Y if it exists)
copy : bool
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
warn_on_dtype = dtype is not None
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
warn_on_dtype=warn_on_dtype, estimator=estimator)
else:
X = check_array(X, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
warn_on_dtype=warn_on_dtype, estimator=estimator)
Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
warn_on_dtype=warn_on_dtype, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
force_all_finite = False if callable(metric) else True
X, Y = check_pairwise_arrays(X, Y, force_all_finite=force_all_finite)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS['masked_euclidean'] = masked_euclidean_distances
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix
inputs.
Also, ['masked_euclidean'] but it does not yet support sparse matrices.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if
metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are | |
<gh_stars>0
import requests
from urllib.parse import quote_plus
import time
import re
import logging
from ssl import SSLError
from requests_oauthlib import OAuth1
from birbapi.resource_urls import SEARCH_TWEETS, FAVORITES_CREATE, FAVORITES_DESTROY, \
STATUSES_RETWEET, STATUSES_DESTROY, FRIENDSHIPS_CREATE, FRIENDSHIPS_DESTROY, STATUSES_UPDATE, \
FRIENDS_IDS, FOLLOWERS_IDS, USERS_LOOKUP, USERS_SHOW, FRIENDSHIPS_SHOW, RATE_LIMIT_STATUS, \
OAUTH_ACCESS_TOKEN, OAUTH_REQUEST_TOKEN
def timestr_to_timestamp(created_at):
""" Convert a Twitter-supplied 'created_at' time field to a timestamp """
regex = re.compile(r'(\+|\-)\d\d\d\d')
match = regex.search(created_at)
if not match:
logging.warning('Twitter gave unsupported time string')
return time.time()
return time.mktime(time.strptime(created_at, '%a %b %d %H:%M:%S ' + match.group() + ' %Y'))
class TwitterError(Exception):
def __init__(self, response):
self.response_raw = response
self.response = response.json()
self.http_code = response.status_code
self.error_msg = self.get_msg()
self.error_code = self.get_code()
def get_msg(self):
error_msg = 'Unknown Twitter Error'
if 'errors' in self.response:
if len(self.response['errors']) > 0:
if 'message' in self.response['errors'][0]:
error_msg = self.response['errors'][0]['message']
return error_msg
def get_code(self):
error_code = 0
if 'errors' in self.response:
if len(self.response['errors']) > 0:
if 'code' in self.response['errors'][0]:
error_code = self.response['errors'][0]['code']
return error_code
class RequestsError(Exception):
def __init__(self, msg=None):
self.error_msg = 'Requests Unknown/Catchall Error'
if msg:
self.error_msg = msg
def __str__(self):
return repr(self.error_msg)
class Twitter():
""" A wrapper interface to the Twitter API """
def __init__(self, conkey, consec, otoken=None, osecret=None, verifier=None, timeout=10, testing=False):
self.consumer_key = conkey
self.consumer_secret = consec
self.oauth_token = otoken
self.oauth_secret = osecret
self.verifier = verifier
self.timeout = timeout
self.testing = testing
# configure OAuth1 depending on what arguments are present
if otoken is None or osecret is None:
self.oauth = OAuth1(conkey, client_secret=consec)
elif verifier is not None:
self.oauth = OAuth1(conkey, client_secret=consec,
resource_owner_key=otoken, resource_owner_secret=osecret,
verifier=verifier)
else:
self.oauth = OAuth1(conkey, client_secret=consec,
resource_owner_key=otoken, resource_owner_secret=osecret)
def build_uri(self, args_dict):
uri = ''
for key, value in list(args_dict.items()):
uri = uri + '&' + '%s=%s' % (key, value)
return uri
def search_tweets(self, q, **kwargs):
""" GET search/tweets
q: Search query string
**kwargs: Arbitrary number of keyword arguments as specified by
the Twitter API, such as:
lang='en', result_type='popular', count=25
"""
# TODO: implement since_id on subsequent searches to avoid dupes?
if 'lang' in kwargs:
if kwargs['lang'] is None:
del kwargs['lang']
query = quote_plus(q)
uri = self.build_uri(kwargs)
try:
response = requests.get(SEARCH_TWEETS + '?q=' + query + uri,
auth=self.oauth, timeout=self.timeout)
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.RequestException,
requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, SSLError) as e:
raise RequestsError(str(e))
if response.status_code != 200:
raise TwitterError(response)
return response
def favorites_create(self, id):
""" Add favorite specified by id """
try:
response = requests.post(FAVORITES_CREATE,
data={ 'id' : id }, auth=self.oauth, timeout=self.timeout)
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.RequestException,
requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, SSLError) as e:
raise RequestsError(str(e))
if response.status_code != 200:
raise TwitterError(response)
return response
def favorites_destroy(self, id):
""" Remove favorite specified by id """
try:
response = requests.post(FAVORITES_DESTROY,
data={ 'id' : id }, auth=self.oauth, timeout=self.timeout)
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.RequestException,
requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, SSLError) as e:
raise RequestsError(str(e))
if response.status_code != 200 or response.status_code != 404:
raise TwitterError(response)
return response
def retweet(self, id):
""" Retweet the status specified by id """
argdict = { 'trim_user' : 1 }
uri = self.build_uri(argdict)
try:
response = requests.post(STATUSES_RETWEET + str(id) + '.json' + '?' + uri,
auth=self.oauth, timeout=self.timeout)
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.RequestException,
requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, SSLError) as e:
raise RequestsError(str(e))
if response.status_code != 200:
raise TwitterError(response)
return response
def statuses_destroy(self, id):
""" Destroy the status or retweet specified by id """
argdict = { 'trim_user' : 1 }
uri = self.build_uri(argdict)
try:
response = requests.post(STATUSES_DESTROY + str(id) + '.json' + '?' + uri,
auth=self.oauth, timeout=self.timeout)
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.RequestException,
requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, SSLError) as e:
raise RequestsError(str(e))
if response.status_code != 200:
raise TwitterError(response)
return response
def follow_user(self, user_id):
""" Follow the user specified by user_id """
try:
response = requests.post(FRIENDSHIPS_CREATE,
data={ 'user_id' : user_id }, auth=self.oauth, timeout=self.timeout)
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.RequestException,
requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, SSLError) as e:
raise RequestsError(str(e))
if response.status_code != 200:
raise TwitterError(response)
return response
def unfollow_user(self, user_id):
""" Unfollow the user specified by user_id """
try:
response = requests.post(FRIENDSHIPS_DESTROY,
data={ 'user_id' : user_id }, auth=self.oauth, timeout=self.timeout)
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.RequestException,
requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, SSLError) as e:
raise RequestsError(str(e))
if response.status_code != 200:
raise TwitterError(response)
return response
def send_tweet(self, status, reply_to=None, trim_user=1):
""" Send the tweets.
status: the text of the status update
reply_to: the ID of an existing status being replied to (optional)
trim_user: don't return full user object if 1 or true (optional)
"""
try:
if reply_to is None:
response = requests.post(STATUSES_UPDATE,
data={ 'status' : status, 'trim_user' : trim_user }, auth=self.oauth,
timeout=self.timeout)
else:
response = requests.post(STATUSES_UPDATE,
data={ 'status' : status, 'in_reply_to_status_id' : reply_to, 'trim_user' : trim_user },
auth=self.oauth, timeout=self.timeout)
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.RequestException,
requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, SSLError) as e:
raise RequestsError(str(e))
if response.status_code != 200:
raise TwitterError(response)
return response
def friends_ids(self, user_id, cursor=-1):
""" Return list of IDs of each user the specified user is following
Should be called from get_friends_list. """
try:
response = requests.get(FRIENDS_IDS + '?user_id=' + user_id +
'&cursor=' + str(cursor), auth=self.oauth, timeout=self.timeout)
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.RequestException,
requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, SSLError) as e:
raise RequestsError(str(e))
if response.status_code != 200:
raise TwitterError(response)
return response
def get_friends_recursive(self, twitter_id, cursor=-1, friends_list=[]):
""" Recursive function to assemble a list of users the specified
user is following (friends).
"""
if int(cursor) == 0 or len(friends_list) >= 75000:
return { 'friends' : friends_list, 'cursor' : cursor }
response = self.friends_ids(twitter_id, cursor)
friends_json = response.json()
cursor = friends_json['next_cursor_str']
for id in friends_json['ids']:
friends_list.append(str(id))
return self.get_friends_recursive(twitter_id, cursor, friends_list)
def followers_ids(self, user_id, cursor=-1):
""" Return list of IDs of each user the specified user is following.
Should only be called from get_followers_list.
"""
try:
response = requests.get(FOLLOWERS_IDS + '?user_id=' + user_id +
'&cursor=' + str(cursor), auth=self.oauth, timeout=self.timeout)
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.RequestException,
requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, SSLError) as e:
raise RequestsError(str(e))
if response.status_code != 200:
raise TwitterError(response)
return response
def get_followers_recursive(self, twitter_id, cursor=-1, followers_list=[]):
""" Recursive function to assemble a list of users who follow the
specified user.
"""
if int(cursor) == 0 or len(followers_list) >= 75000:
return { 'followers' : followers_list, 'cursor' : cursor }
response = self.followers_ids(twitter_id, cursor)
followers_json = response.json()
cursor = followers_json['next_cursor_str']
for id in followers_json['ids']:
followers_list.append(str(id))
return self.get_followers_recursive(twitter_id, cursor, followers_list)
def oauth_request_token(self, callback_url):
""" Step 1/3 in Twitter auth process """
try:
response = requests.post(url=OAUTH_REQUEST_TOKEN, auth=self.oauth,
data={ 'oauth_callback' : callback_url })
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.RequestException,
requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, SSLError) as e:
raise RequestsError(str(e))
if response.status_code != 200:
print(response.status_code)
print(response.text)
raise TwitterError(response)
return response
def oauth_access_token(self):
""" Step 3/3 in Twitter auth process """
try:
response = requests.post(url=OAUTH_ACCESS_TOKEN, auth=self.oauth)
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.RequestException,
requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, SSLError) as e:
raise RequestsError(str(e))
if response.status_code != 200:
raise TwitterError(response)
return response
def get_rate_limit_status(self, resources):
""" Return current rate limits for the specified resource families.
resources: string of comma-seperated resource families """
try:
response = requests.get(RATE_LIMIT_STATUS + '?resources=' + resources,
auth=self.oauth, timeout=self.timeout)
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.RequestException,
requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, SSLError) as e:
raise RequestsError(str(e))
if response.status_code != 200:
raise TwitterError(response)
return response
def get_rate_limit_status_all(self):
uri = quote_plus('help,users,search,statuses')
response = requests.get(RATE_LIMIT_STATUS + '?resources=' + uri,
auth=self.oauth, timeout=self.timeout)
return response
def friendships_show(self, source_id=None, target_id=None, source_name=None, target_name=None):
""" Return info about the relationship between two users """
if source_id and target_id:
argdict = { 'source_id' : source_id, 'target_id': target_id }
elif source_name and target_name:
argdict = { 'source_screen_name' : source_name, 'target_screen_name': target_name }
else:
logging.error('Creating argdict failed')
return None
uri = self.build_uri(argdict)
try:
response = requests.get(FRIENDSHIPS_SHOW + '?' + uri, auth=self.oauth,
timeout=self.timeout)
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.RequestException,
requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, SSLError) as e:
raise RequestsError(str(e))
if response.status_code != 200:
raise TwitterError(response)
return response
def users_show(self, user_id):
""" Return details on a single user specified by user_id """
try:
response = requests.get(USERS_SHOW + '?user_id=' + str(user_id),
auth=self.oauth, timeout=self.timeout)
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.RequestException,
requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, SSLError) as e:
raise RequestsError(str(e))
if response.status_code != 200:
raise TwitterError(response)
return response
def users_lookup(self, userlist, entities=False):
""" Return fully-hydrated user objects for up to 100 users per request """
if len(userlist) > 100:
raise Exception("userlist length must be <= 100")
# convert list to a CSV string
csv_list = ','.join(userlist)
try:
response = requests.post(url=USERS_LOOKUP, auth=self.oauth,
data={'user_id' : csv_list, 'include_entities' : entities})
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.RequestException,
requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, SSLError) as e:
| |
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import re
import stat
import time
import warnings
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import imageutils
from oslo_utils import units
import tenacity
from ironic_lib.common.i18n import _
from ironic_lib import disk_partitioner
from ironic_lib import exception
from ironic_lib import utils
opts = [
cfg.IntOpt('efi_system_partition_size',
default=200,
help='Size of EFI system partition in MiB when configuring '
'UEFI systems for local boot.'),
cfg.IntOpt('bios_boot_partition_size',
default=1,
help='Size of BIOS Boot partition in MiB when configuring '
'GPT partitioned systems for local boot in BIOS.'),
cfg.StrOpt('dd_block_size',
default='1M',
help='Block size to use when writing to the nodes disk.'),
cfg.IntOpt('partition_detection_attempts',
default=3,
min=1,
help='Maximum attempts to detect a newly created partition.'),
cfg.IntOpt('partprobe_attempts',
default=10,
help='Maximum number of attempts to try to read the '
'partition.'),
cfg.IntOpt('image_convert_memory_limit',
default=2048,
help='Memory limit for "qemu-img convert" in MiB. Implemented '
'via the address space resource limit.'),
cfg.IntOpt('image_convert_attempts',
default=3,
help='Number of attempts to convert an image.'),
]
CONF = cfg.CONF
CONF.register_opts(opts, group='disk_utils')
LOG = logging.getLogger(__name__)
_PARTED_PRINT_RE = re.compile(r"^(\d+):([\d\.]+)MiB:"
r"([\d\.]+)MiB:([\d\.]+)MiB:(\w*):(.*):(.*);")
_PARTED_TABLE_TYPE_RE = re.compile(r'^.*partition\s+table\s*:\s*(gpt|msdos)',
re.IGNORECASE | re.MULTILINE)
CONFIGDRIVE_LABEL = "config-2"
MAX_CONFIG_DRIVE_SIZE_MB = 64
GPT_SIZE_SECTORS = 33
# Maximum disk size supported by MBR is 2TB (2 * 1024 * 1024 MB)
MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR = 2097152
# Limit the memory address space to 1 GiB when running qemu-img
QEMU_IMG_LIMITS = None
def _qemu_img_limits():
global QEMU_IMG_LIMITS
if QEMU_IMG_LIMITS is None:
QEMU_IMG_LIMITS = processutils.ProcessLimits(
address_space=CONF.disk_utils.image_convert_memory_limit
* units.Mi)
return QEMU_IMG_LIMITS
def list_partitions(device):
"""Get partitions information from given device.
:param device: The device path.
:returns: list of dictionaries (one per partition) with keys:
number, start, end, size (in MiB), filesystem, partition_name,
flags, path.
"""
output = utils.execute(
'parted', '-s', '-m', device, 'unit', 'MiB', 'print',
use_standard_locale=True, run_as_root=True)[0]
if isinstance(output, bytes):
output = output.decode("utf-8")
lines = [line for line in output.split('\n') if line.strip()][2:]
# Example of line: 1:1.00MiB:501MiB:500MiB:ext4::boot
fields = ('number', 'start', 'end', 'size', 'filesystem', 'partition_name',
'flags')
result = []
for line in lines:
match = _PARTED_PRINT_RE.match(line)
if match is None:
LOG.warning("Partition information from parted for device "
"%(device)s does not match "
"expected format: %(line)s",
dict(device=device, line=line))
continue
# Cast int fields to ints (some are floats and we round them down)
groups = [int(float(x)) if i < 4 else x
for i, x in enumerate(match.groups())]
item = dict(zip(fields, groups))
item['path'] = partition_index_to_path(device, item['number'])
result.append(item)
return result
def count_mbr_partitions(device):
"""Count the number of primary and logical partitions on a MBR
:param device: The device path.
:returns: A tuple with the number of primary partitions and logical
partitions.
:raise: ValueError if the device does not have a valid MBR partition
table.
"""
# -d do not update the kernel table
# -s print a summary of the partition table
output, err = utils.execute('partprobe', '-d', '-s', device,
run_as_root=True, use_standard_locale=True)
if 'msdos' not in output:
raise ValueError('The device %s does not have a valid MBR '
'partition table' % device)
# Sample output: /dev/vdb: msdos partitions 1 2 3 <5 6 7>
# The partitions with number > 4 (and inside <>) are logical partitions
output = output.replace('<', '').replace('>', '')
partitions = [int(s) for s in output.split() if s.isdigit()]
return(sum(i < 5 for i in partitions), sum(i > 4 for i in partitions))
def get_disk_identifier(dev):
"""Get the disk identifier from the disk being exposed by the ramdisk.
This disk identifier is appended to the pxe config which will then be
used by chain.c32 to detect the correct disk to chainload. This is helpful
in deployments to nodes with multiple disks.
http://www.syslinux.org/wiki/index.php/Comboot/chain.c32#mbr:
:param dev: Path for the already populated disk device.
:raises OSError: When the hexdump binary is unavailable.
:returns: The Disk Identifier.
"""
disk_identifier = utils.execute('hexdump', '-s', '440', '-n', '4',
'-e', '''\"0x%08x\"''',
dev, run_as_root=True,
attempts=5, delay_on_retry=True)
return disk_identifier[0]
def get_partition_table_type(device):
"""Get partition table type, msdos or gpt.
:param device: the name of the device
:return: dos, gpt or None
"""
out = utils.execute('parted', '--script', device, '--', 'print',
run_as_root=True, use_standard_locale=True)[0]
m = _PARTED_TABLE_TYPE_RE.search(out)
if m:
return m.group(1)
LOG.warning("Unable to get partition table type for device %s", device)
return 'unknown'
def _blkid(device, probe=False, fields=None):
args = []
if probe:
args.append('--probe')
if fields:
args += sum((['--match-tag', field] for field in fields), [])
output, err = utils.execute('blkid', device, *args,
use_standard_locale=True, run_as_root=True)
if output.strip():
return output.split(': ', 1)[1]
else:
return ""
def _lsblk(device, deps=True, fields=None):
args = ['--pairs', '--bytes', '--ascii']
if not deps:
args.append('--nodeps')
if fields:
args.extend(['--output', ','.join(fields)])
else:
args.append('--output-all')
output, err = utils.execute('lsblk', device, *args,
use_standard_locale=True, run_as_root=True)
return output.strip()
def get_device_information(device, probe=False, fields=None):
"""Get information about a device using blkid.
Can be applied to all block devices: disks, RAID, partitions.
:param device: Device name.
:param probe: DEPRECATED, do not use.
:param fields: A list of fields to request (all by default).
:return: A dictionary with requested fields as keys.
:raises: ProcessExecutionError
"""
if probe:
output = _blkid(device, probe=True, fields=fields)
else:
output = _lsblk(device, fields=fields, deps=False)
if output:
return next(utils.parse_device_tags(output))
else:
return {}
def find_efi_partition(device):
"""Looks for the EFI partition on a given device.
A boot partition on a GPT disk is assumed to be an EFI partition as well.
:param device: the name of the device
:return: the EFI partition record from `list_partitions` or None
"""
is_gpt = get_partition_table_type(device) == 'gpt'
for part in list_partitions(device):
flags = {x.strip() for x in part['flags'].split(',')}
if 'esp' in flags or ('boot' in flags and is_gpt):
LOG.debug("Found EFI partition %s on device %s", part, device)
return part
else:
LOG.debug("No efi partition found on device %s", device)
def get_uefi_disk_identifier(dev):
"""Get the uuid from the disk being exposed by the ramdisk.
DEPRECATED: use find_efi_partition with get_device_information instead.
:param dev: Path for the already populated disk device.
:raises InstanceDeployFailure: Image is not UEFI bootable.
:returns: The UUID of the partition.
"""
warnings.warn("get_uefi_disk_identifier is deprecated, use "
"find_efi_partition and get_partition_information instead",
DeprecationWarning)
partition_id = None
try:
report, _ = utils.execute('fdisk', '-l', dev, run_as_root=True)
except processutils.ProcessExecutionError as e:
msg = _('Failed to find the partition on the disk %s ') % e
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
for line in report.splitlines():
if line.startswith(dev) and 'EFI System' in line:
vals = line.split()
partition_id = vals[0]
try:
lsblk_output = _lsblk(partition_id, fields=['UUID'])
disk_identifier = lsblk_output.split("=")[1].strip()
disk_identifier = disk_identifier.strip('"')
except processutils.ProcessExecutionError as e:
raise exception.InstanceDeployFailure("Image is not UEFI bootable. "
"Error: %s " % e)
return disk_identifier
_ISCSI_PREFIX = "iqn.2008-10.org.openstack:"
# TODO(dtantsur): deprecate node_uuid here, it's not overly useful (any iSCSI
# device should get the same treatment).
def is_iscsi_device(dev, node_uuid=None):
"""Check whether the device path belongs to an iSCSI device.
If node UUID is provided, checks that the device belongs to this UUID.
"""
if node_uuid:
return (_ISCSI_PREFIX + node_uuid) in dev
else:
return _ISCSI_PREFIX in dev
def is_last_char_digit(dev):
"""check whether device name ends with a digit"""
if len(dev) >= 1:
return dev[-1].isdigit()
return False
def partition_index_to_path(device, index):
"""Guess a partition path based on its device and index.
:param device: Device path.
:param index: Partition index.
"""
# the actual device names in the baremetal are like /dev/sda, /dev/sdb etc.
# While for the iSCSI device, the naming convention has a format which has
# iqn also embedded in it.
# When this function is called by ironic-conductor, the iSCSI device name
# should be appended by "part%d". While on the baremetal, it should name
# the device partitions as /dev/sda1 and not /dev/sda-part1.
if is_iscsi_device(device):
part_template = '%s-part%d'
elif is_last_char_digit(device):
part_template = '%sp%d'
else:
part_template = '%s%d'
return part_template % (device, index)
def make_partitions(dev, root_mb, swap_mb, ephemeral_mb,
configdrive_mb, node_uuid, commit=True,
boot_option="netboot", boot_mode="bios",
disk_label=None, cpu_arch=""):
"""Partition the disk device.
Create partitions for root, swap, ephemeral and configdrive on a
disk device.
:param dev: Path for the device to work on.
:param root_mb: Size of | |
<filename>asmd/dataset_utils.py
from copy import deepcopy
import numpy as np
from sklearn.utils import check_random_state
from . import utils
def choice(dataset, p=[0.6, 0.2, 0.2], random_state=None):
"""
Returns N non-overlapping datasets randomply sampled from `dataset`, where
N is `len(p)`; each song belong to a dataset according to the distribution
probability `p`. Note that `p` is always normalized to sum to 1.
`random_state` is an int or a np.random.RandomState object.
"""
# normalize p
p = np.asarray(p, dtype=np.float32)
p /= p.sum()
# generating non-overlapping splits
random_state = check_random_state(random_state)
splits = random_state.choice(np.arange(len(p)), p=p, size=(len(dataset), ))
# creating output datasets
out = []
for i in range(len(p)):
d = deepcopy(dataset)
d.paths = np.asarray(dataset.paths, dtype=object)[splits == i].tolist()
# excluding/including songs
for j, song in enumerate(d.get_songs()):
if splits[j] == i:
song['included'] = True
else:
song['included'] = False
out.append(d)
return tuple(out)
def chose_score_type(score_type, gts):
"""
Return the proper score type according to the following rules
Parameters
---
score_type : list of str
The key to retrieve the list of notes from the ground_truths. If
multiple keys are provided, only one is retrieved by using the
following criteria: if there is `precise_alignment` in the list of
keys and in the ground truth, use that; otherwise, if there is
`broad_alignment` in the list of keys and in the ground truth, use
that; otherwise if `misaligned` in the list of keys and in the ground
truth, use use `score`.
gts : list of dict
The list of ground truths from which you want to chose a score_type
"""
if len(score_type) > 1:
if 'precise_alignment' in score_type and len(
gts[0]['precise_alignment']['pitches']) > 0:
score_type = 'precise_alignment'
elif 'broad_alignment' in score_type and len(
gts[0]['broad_alignment']['pitches']) > 0:
score_type = 'broad_alignment'
elif 'misaligned' in score_type and len(
gts[0]['misaligned']['pitches']) > 0:
score_type = 'misaligned'
else:
score_type = 'score'
else:
score_type = score_type[0]
return score_type
def filter(dataset,
instruments=[],
ensemble=None,
mixed=True,
sources=False,
all=False,
composer='',
datasets=[],
groups=[],
ground_truth=[],
copy=False):
"""
Filter the paths of the songs which accomplish the filter described
in `kwargs`. If this dataset was already fltered, only filters those
paths that are already included.
For advanced usage:
So that a dataset can be filtered, it must have the following keys:
* songs
* name
* included
All the attributes are checked at the song level, except for:
* `ensemble`: this is checked at the dataset-level (i.e. each dataset can
be for ensemble or not) This may change in future releases
* `ground_truth`: this is checked at group level (i.e. each subgroup can
have different annotations)
Similarly, each song must have the key ``included`` and optionally the
other keys that you want to filter, as described by the arguments of
this function.
Arguments
---------
instruments : list of str
a list of strings representing the instruments that you
want to select (exact match with song)
ensemble : bool
if loading songs which are composed for an ensemble of
instrument. If None, ensemble field will not be checked and will
select both (default None)
mixed : bool
if returning the mixed track for ensemble song
(default True )
sources : bool
if returning the source track for ensemble recording
which provide it (default False )
all : bool
only valid if sources is True : if True , all
sources (audio and ground-truth) are returned, if
False, only the first target instrument is returned. Default False.
composer : string
the surname of the composer to filter
groups : list of strings
a list of strings containing the name of the groups that you want
to retrieve with a logic 'AND' among them. If empty, all groups are
used. Example of groups are: 'train', 'validation', 'test'. The
available groups depend on the dataset. Only Maestro dataset
supported for now.
datasets : list of strings
a list of strings containing the name of the datasets to be used.
If empty, all datasets are used. See :doc:`License` for the
list of default datasets. The matching is case insensitive.
ground_truth : dict[str, int]
a dictionary (string, int) representing the type of ground-truths
needed (logical AND among list elements).
Each entry has the form `needed_ground_truth_type` as key
and `level_of_truth` as value, where `needed_ground_truth_type` is the
key of the ground_truth dictionary and `level_of_truth` is an int
ranging from 0 to 2 (0->False, 1->True (manual annotation),
2->True(automatic annotation)).
If only part of a dataset contains a certain ground-truth type, you
should use the `group` attribute to only select those songs.
copy : bool
If True, a new Dataset object is returned, and the calling one is
leaved untouched
Returns
-------
The input dataset as modified: `d = Dataset().filter(...)`
If ``copy`` is True, return a new Dataset object.
"""
if copy:
ret = deepcopy(dataset)
else:
ret = dataset
# let's remove everything and put only the wanted ones
ret.paths = []
datasets = [d.lower() for d in datasets]
end = 0
for mydataset in ret.datasets:
FLAG = True
if not mydataset['included']:
FLAG = False
if len(datasets) > 0:
if mydataset['name'].lower() in datasets:
FLAG = True
else:
FLAG = False
# checking dataset-level filters
if ensemble is not None:
if ensemble != mydataset['ensemble']:
FLAG = False
# adding groups if ground_truth is checked
groups_gt = set()
for gt, val in ground_truth:
for group, group_gt in mydataset['ground_truth']:
if group_gt[gt] == val:
groups_gt.add(group)
if FLAG:
ret._chunks[mydataset['name']] = [end, end]
for song in mydataset['songs']:
FLAG = True
if not song['included']:
FLAG = False
# checking song levels filters
if instruments:
if instruments != song['instruments']:
FLAG = False
if composer:
if composer not in song['composer']:
FLAG = False
if groups:
for group in groups:
if group not in song['groups']:
FLAG = False
break
# checking groups taken for group-level filtering
if groups_gt:
if len(groups_gt.intersection(song['groups'])) == 0:
FLAG = False
if FLAG:
gts = song['ground_truth']
source = []
mix = []
if sources and "sources" in song.keys():
if all:
source = song['sources']['path']
else:
# find the index of the instrument
instrument = instruments[0]
idx = song['instruments'].index(instrument)
# take index of the target instrument
source = song['sources']['path'][idx]
gts = song['ground_truth'][idx]
if mixed:
mix = song['recording']['path']
ret.paths.append([mix, source, gts])
end += 1
else:
song['included'] = False
ret._chunks[mydataset['name']][1] = end
else:
# exclude dataset and all its songs
mydataset['included'] = False
for song in mydataset['songs']:
song['included'] = False
_check_consistency(ret, fix=True)
return ret
def _check_consistency(dataset, fix=False):
"""
Checks that is a dataset is included, then at least one of its songs is
included and that if a dataset is excluded, then all of its songs are
excluded.
If `fix` is True, if fixes the dataset inclusion, otherwise raise a
`RuntimeError`
"""
for d in dataset.datasets:
included_songs = [s['included'] for s in d['songs']]
if d['included']:
if not any(included_songs):
if fix:
d['included'] = False
else:
raise RuntimeError(
f"{d['name']} is included but no song is included")
else:
if any(included_songs):
if fix:
d['included'] = True
else:
raise RuntimeError(
f"{d['name']} is excluded but at least one song is included"
)
def get_score_mat(dataset, idx, score_type=['misaligned'], return_notes=''):
"""
Get the score of a certain score, with times of `score_type`
Arguments
---------
idx : int
The index of the song to retrieve.
score_type : list of str
The key to retrieve the list of notes from the ground_truths. see
`chose_score_type` for explanation
return_notes : str
``'missing'``, ``'extra'`` or ``'both'``; the notes that will be
returned together with the score; see
``asmd.asmd.Dataset.get_missing_extra_notes`` for more info
Returns
-------
numpy.ndarray :
A (n x 6) array where columns represent pitches, onsets (seconds),
offsets (seconds), velocities, MIDI program instrument and number of
the instrument. Ordered by onsets. If some information is not
available, value -255 is used.
The array is sorted by onset, pitch and offset (in this order)
numpy.ndarray :
A boolean array with True if the note is missing or extra (depending on
``return_notes``); only if ``return_notes is not None``
numpy.ndarray :
Another boolean array with True if the note is missing or extra (depending on
| |
map.get('Fingerprint')
self.common = map.get('Common')
self.issuer = map.get('Issuer')
self.last_time = map.get('LastTime')
return self
class DescribeVodCertificateListResponseCertificateListModelCertList(TeaModel):
def __init__(self, cert=None):
self.cert = []
def validate(self):
self.validate_required(self.cert, 'cert')
if self.cert:
for k in self.cert:
if k :
k.validate()
def to_map(self):
result = {}
result['Cert'] = []
if self.cert is not None:
for k in self.cert:
result['Cert'].append(k.to_map() if k else None)
else:
result['Cert'] = None
return result
def from_map(self, map={}):
self.cert = []
if map.get('Cert') is not None:
for k in map.get('Cert'):
temp_model = DescribeVodCertificateListResponseCertificateListModelCertListCert()
temp_model = temp_model.from_map(k)
self.cert.append(temp_model)
else:
self.cert = None
return self
class DescribeVodCertificateListResponseCertificateListModel(TeaModel):
def __init__(self, count=None, cert_list=None):
self.count = count
self.cert_list = cert_list
def validate(self):
self.validate_required(self.count, 'count')
self.validate_required(self.cert_list, 'cert_list')
if self.cert_list:
self.cert_list.validate()
def to_map(self):
result = {}
result['Count'] = self.count
if self.cert_list is not None:
result['CertList'] = self.cert_list.to_map()
else:
result['CertList'] = None
return result
def from_map(self, map={}):
self.count = map.get('Count')
if map.get('CertList') is not None:
temp_model = DescribeVodCertificateListResponseCertificateListModelCertList()
self.cert_list = temp_model.from_map(map['CertList'])
else:
self.cert_list = None
return self
class BatchStopVodDomainRequest(TeaModel):
def __init__(self, owner_id=None, security_token=None, domain_names=None):
self.owner_id = owner_id
self.security_token = security_token
self.domain_names = domain_names
def validate(self):
self.validate_required(self.domain_names, 'domain_names')
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['SecurityToken'] = self.security_token
result['DomainNames'] = self.domain_names
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.security_token = map.get('SecurityToken')
self.domain_names = map.get('DomainNames')
return self
class BatchStopVodDomainResponse(TeaModel):
def __init__(self, request_id=None):
self.request_id = request_id
def validate(self):
self.validate_required(self.request_id, 'request_id')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
return self
class DeleteVodDomainRequest(TeaModel):
def __init__(self, owner_id=None, owner_account=None, security_token=None, domain_name=None):
self.owner_id = owner_id
self.owner_account = owner_account
self.security_token = security_token
self.domain_name = domain_name
def validate(self):
self.validate_required(self.domain_name, 'domain_name')
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['OwnerAccount'] = self.owner_account
result['SecurityToken'] = self.security_token
result['DomainName'] = self.domain_name
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.owner_account = map.get('OwnerAccount')
self.security_token = map.get('SecurityToken')
self.domain_name = map.get('DomainName')
return self
class DeleteVodDomainResponse(TeaModel):
def __init__(self, request_id=None):
self.request_id = request_id
def validate(self):
self.validate_required(self.request_id, 'request_id')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
return self
class SetVodDomainCertificateRequest(TeaModel):
def __init__(self, owner_id=None, security_token=None, domain_name=None, cert_name=None, sslprotocol=None, sslpub=None, sslpri=None, region=None):
self.owner_id = owner_id
self.security_token = security_token
self.domain_name = domain_name
self.cert_name = cert_name
self.sslprotocol = sslprotocol
self.sslpub = sslpub
self.sslpri = sslpri
self.region = region
def validate(self):
self.validate_required(self.domain_name, 'domain_name')
self.validate_required(self.sslprotocol, 'sslprotocol')
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['SecurityToken'] = self.security_token
result['DomainName'] = self.domain_name
result['CertName'] = self.cert_name
result['SSLProtocol'] = self.sslprotocol
result['SSLPub'] = self.sslpub
result['SSLPri'] = self.sslpri
result['Region'] = self.region
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.security_token = map.get('SecurityToken')
self.domain_name = map.get('DomainName')
self.cert_name = map.get('CertName')
self.sslprotocol = map.get('SSLProtocol')
self.sslpub = map.get('SSLPub')
self.sslpri = map.get('SSLPri')
self.region = map.get('Region')
return self
class SetVodDomainCertificateResponse(TeaModel):
def __init__(self, request_id=None):
self.request_id = request_id
def validate(self):
self.validate_required(self.request_id, 'request_id')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
return self
class DeleteVodSpecificConfigRequest(TeaModel):
def __init__(self, owner_id=None, security_token=None, domain_name=None, config_id=None):
self.owner_id = owner_id
self.security_token = security_token
self.domain_name = domain_name
self.config_id = config_id
def validate(self):
self.validate_required(self.domain_name, 'domain_name')
self.validate_required(self.config_id, 'config_id')
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['SecurityToken'] = self.security_token
result['DomainName'] = self.domain_name
result['ConfigId'] = self.config_id
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.security_token = map.get('SecurityToken')
self.domain_name = map.get('DomainName')
self.config_id = map.get('ConfigId')
return self
class DeleteVodSpecificConfigResponse(TeaModel):
def __init__(self, request_id=None):
self.request_id = request_id
def validate(self):
self.validate_required(self.request_id, 'request_id')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
return self
class BatchSetVodDomainConfigsRequest(TeaModel):
def __init__(self, owner_id=None, owner_account=None, security_token=None, domain_names=None, functions=None):
self.owner_id = owner_id
self.owner_account = owner_account
self.security_token = security_token
self.domain_names = domain_names
self.functions = functions
def validate(self):
self.validate_required(self.domain_names, 'domain_names')
self.validate_required(self.functions, 'functions')
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['OwnerAccount'] = self.owner_account
result['SecurityToken'] = self.security_token
result['DomainNames'] = self.domain_names
result['Functions'] = self.functions
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.owner_account = map.get('OwnerAccount')
self.security_token = map.get('SecurityToken')
self.domain_names = map.get('DomainNames')
self.functions = map.get('Functions')
return self
class BatchSetVodDomainConfigsResponse(TeaModel):
def __init__(self, request_id=None):
self.request_id = request_id
def validate(self):
self.validate_required(self.request_id, 'request_id')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
return self
class AddVodDomainRequest(TeaModel):
def __init__(self, owner_id=None, owner_account=None, security_token=None, domain_name=None, sources=None, check_url=None, scope=None):
self.owner_id = owner_id
self.owner_account = owner_account
self.security_token = security_token
self.domain_name = domain_name
self.sources = sources
self.check_url = check_url
self.scope = scope
def validate(self):
self.validate_required(self.domain_name, 'domain_name')
self.validate_required(self.sources, 'sources')
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['OwnerAccount'] = self.owner_account
result['SecurityToken'] = self.security_token
result['DomainName'] = self.domain_name
result['Sources'] = self.sources
result['CheckUrl'] = self.check_url
result['Scope'] = self.scope
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.owner_account = map.get('OwnerAccount')
self.security_token = map.get('SecurityToken')
self.domain_name = map.get('DomainName')
self.sources = map.get('Sources')
self.check_url = map.get('CheckUrl')
self.scope = map.get('Scope')
return self
class AddVodDomainResponse(TeaModel):
def __init__(self, request_id=None):
self.request_id = request_id
def validate(self):
self.validate_required(self.request_id, 'request_id')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
return self
class DescribeVodRefreshQuotaRequest(TeaModel):
def __init__(self, owner_id=None, security_token=None):
self.owner_id = owner_id
self.security_token = security_token
def validate(self):
pass
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['SecurityToken'] = self.security_token
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.security_token = map.get('SecurityToken')
return self
class DescribeVodRefreshQuotaResponse(TeaModel):
def __init__(self, request_id=None, url_quota=None, dir_quota=None, url_remain=None, dir_remain=None, preload_quota=None, block_quota=None, preload_remain=None, block_remain=None):
self.request_id = request_id
self.url_quota = url_quota
self.dir_quota = dir_quota
self.url_remain = url_remain
self.dir_remain = dir_remain
self.preload_quota = preload_quota
self.block_quota = block_quota
self.preload_remain = preload_remain
self.block_remain = block_remain
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.url_quota, 'url_quota')
self.validate_required(self.dir_quota, 'dir_quota')
self.validate_required(self.url_remain, 'url_remain')
self.validate_required(self.dir_remain, 'dir_remain')
self.validate_required(self.preload_quota, 'preload_quota')
self.validate_required(self.block_quota, 'block_quota')
self.validate_required(self.preload_remain, 'preload_remain')
self.validate_required(self.block_remain, 'block_remain')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['UrlQuota'] = self.url_quota
result['DirQuota'] = self.dir_quota
result['UrlRemain'] = self.url_remain
result['DirRemain'] = self.dir_remain
result['PreloadQuota'] = self.preload_quota
result['BlockQuota'] = self.block_quota
result['PreloadRemain'] = self.preload_remain
result['blockRemain'] = self.block_remain
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.url_quota = map.get('UrlQuota')
self.dir_quota = map.get('DirQuota')
self.url_remain = map.get('UrlRemain')
self.dir_remain = map.get('DirRemain')
self.preload_quota = map.get('PreloadQuota')
self.block_quota = map.get('BlockQuota')
self.preload_remain = map.get('PreloadRemain')
self.block_remain = map.get('blockRemain')
return self
class DescribeVodRefreshTasksRequest(TeaModel):
def __init__(self, owner_id=None, security_token=None, task_id=None, object_path=None, page_number=None, object_type=None, domain_name=None, status=None, page_size=None, start_time=None, end_time=None, resource_group_id=None):
self.owner_id = owner_id
self.security_token = security_token
self.task_id = task_id
self.object_path = object_path
self.page_number = page_number
self.object_type = object_type
self.domain_name = domain_name
self.status = status
self.page_size = page_size
self.start_time = start_time
self.end_time = end_time
self.resource_group_id = resource_group_id
def validate(self):
pass
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['SecurityToken'] = self.security_token
result['TaskId'] = self.task_id
result['ObjectPath'] = self.object_path
result['PageNumber'] = self.page_number
result['ObjectType'] = self.object_type
result['DomainName'] = self.domain_name
result['Status'] = self.status
result['PageSize'] = self.page_size
result['StartTime'] = self.start_time
result['EndTime'] = self.end_time
result['ResourceGroupId'] = self.resource_group_id
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.security_token = map.get('SecurityToken')
self.task_id = map.get('TaskId')
self.object_path = map.get('ObjectPath')
self.page_number = map.get('PageNumber')
self.object_type = map.get('ObjectType')
self.domain_name = map.get('DomainName')
self.status = map.get('Status')
self.page_size = map.get('PageSize')
self.start_time = map.get('StartTime')
self.end_time = map.get('EndTime')
self.resource_group_id = map.get('ResourceGroupId')
return self
class DescribeVodRefreshTasksResponse(TeaModel):
def __init__(self, request_id=None, page_number=None, page_size=None, total_count=None, tasks=None):
self.request_id = request_id
self.page_number = page_number
self.page_size = page_size
self.total_count = total_count
self.tasks = tasks
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.page_number, 'page_number')
self.validate_required(self.page_size, 'page_size')
self.validate_required(self.total_count, 'total_count')
self.validate_required(self.tasks, 'tasks')
if self.tasks:
self.tasks.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['PageNumber'] = self.page_number
result['PageSize'] = self.page_size
result['TotalCount'] = self.total_count
if self.tasks is not None:
result['Tasks'] = self.tasks.to_map()
else:
result['Tasks'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.page_number = map.get('PageNumber')
self.page_size = map.get('PageSize')
self.total_count = map.get('TotalCount')
if map.get('Tasks') is not None:
temp_model = DescribeVodRefreshTasksResponseTasks()
self.tasks = temp_model.from_map(map['Tasks'])
else:
self.tasks = None
return self
class DescribeVodRefreshTasksResponseTasksTask(TeaModel):
def __init__(self, task_id=None, object_path=None, process=None, status=None, creation_time=None, description=None, object_type=None):
self.task_id = task_id
self.object_path = object_path
self.process = process
self.status = status
self.creation_time = creation_time
self.description = description
self.object_type = object_type
def validate(self):
self.validate_required(self.task_id, 'task_id')
self.validate_required(self.object_path, 'object_path')
self.validate_required(self.process, 'process')
self.validate_required(self.status, 'status')
self.validate_required(self.creation_time, 'creation_time')
self.validate_required(self.description, 'description')
self.validate_required(self.object_type, 'object_type')
def to_map(self):
result = {}
result['TaskId'] = self.task_id
result['ObjectPath'] = self.object_path
result['Process'] = self.process
result['Status'] = self.status
result['CreationTime'] = self.creation_time
result['Description'] = self.description
result['ObjectType'] = self.object_type
return result
def from_map(self, map={}):
self.task_id = map.get('TaskId')
self.object_path = map.get('ObjectPath')
self.process = map.get('Process')
self.status = map.get('Status')
self.creation_time = map.get('CreationTime')
self.description = map.get('Description')
self.object_type = map.get('ObjectType')
return self
class DescribeVodRefreshTasksResponseTasks(TeaModel):
def __init__(self, task=None):
self.task = []
def validate(self):
self.validate_required(self.task, 'task')
if self.task:
for k in self.task:
| |
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from datetime import datetime
from pprint import pformat
from six import iteritems
class BuildConfiguration(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
BuildConfiguration - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'name': 'str',
'build_script': 'str',
'repository_configuration': 'RepositoryConfiguration',
'scm_revision': 'str',
'description': 'str',
'product_version': 'ProductVersion',
'project': 'Project',
'build_environment': 'BuildEnvironment',
'build_configuration_sets': 'list[BuildConfigurationSet]',
'creation_time': 'datetime',
'last_modification_time': 'datetime',
'dependencies': 'list[BuildConfiguration]',
'dependants': 'list[BuildConfiguration]',
'generic_parameters': 'dict(str, str)',
'all_dependencies': 'list[BuildConfiguration]',
'indirect_dependencies': 'list[BuildConfiguration]',
'archived': 'bool',
'field_handler': 'FieldHandler',
'current_product_milestone': 'ProductMilestone'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'build_script': 'buildScript',
'repository_configuration': 'repositoryConfiguration',
'scm_revision': 'scmRevision',
'description': 'description',
'product_version': 'productVersion',
'project': 'project',
'build_environment': 'buildEnvironment',
'build_configuration_sets': 'buildConfigurationSets',
'creation_time': 'creationTime',
'last_modification_time': 'lastModificationTime',
'dependencies': 'dependencies',
'dependants': 'dependants',
'generic_parameters': 'genericParameters',
'all_dependencies': 'allDependencies',
'indirect_dependencies': 'indirectDependencies',
'archived': 'archived',
'field_handler': 'fieldHandler',
'current_product_milestone': 'currentProductMilestone'
}
self._id = None
self._name = None
self._build_script = None
self._repository_configuration = None
self._scm_revision = None
self._description = None
self._product_version = None
self._project = None
self._build_environment = None
self._build_configuration_sets = None
self._creation_time = None
self._last_modification_time = None
self._dependencies = None
self._dependants = None
self._generic_parameters = None
self._all_dependencies = None
self._indirect_dependencies = None
self._archived = None
self._field_handler = None
self._current_product_milestone = None
@property
def id(self):
"""
Gets the id of this BuildConfiguration.
:return: The id of this BuildConfiguration.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this BuildConfiguration.
:param id: The id of this BuildConfiguration.
:type: int
"""
self._id = id
@property
def name(self):
"""
Gets the name of this BuildConfiguration.
:return: The name of this BuildConfiguration.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this BuildConfiguration.
:param name: The name of this BuildConfiguration.
:type: str
"""
self._name = name
@property
def build_script(self):
"""
Gets the build_script of this BuildConfiguration.
:return: The build_script of this BuildConfiguration.
:rtype: str
"""
return self._build_script
@build_script.setter
def build_script(self, build_script):
"""
Sets the build_script of this BuildConfiguration.
:param build_script: The build_script of this BuildConfiguration.
:type: str
"""
self._build_script = build_script
@property
def repository_configuration(self):
"""
Gets the repository_configuration of this BuildConfiguration.
:return: The repository_configuration of this BuildConfiguration.
:rtype: RepositoryConfiguration
"""
return self._repository_configuration
@repository_configuration.setter
def repository_configuration(self, repository_configuration):
"""
Sets the repository_configuration of this BuildConfiguration.
:param repository_configuration: The repository_configuration of this BuildConfiguration.
:type: RepositoryConfiguration
"""
self._repository_configuration = repository_configuration
@property
def scm_revision(self):
"""
Gets the scm_revision of this BuildConfiguration.
:return: The scm_revision of this BuildConfiguration.
:rtype: str
"""
return self._scm_revision
@scm_revision.setter
def scm_revision(self, scm_revision):
"""
Sets the scm_revision of this BuildConfiguration.
:param scm_revision: The scm_revision of this BuildConfiguration.
:type: str
"""
self._scm_revision = scm_revision
@property
def description(self):
"""
Gets the description of this BuildConfiguration.
:return: The description of this BuildConfiguration.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this BuildConfiguration.
:param description: The description of this BuildConfiguration.
:type: str
"""
self._description = description
@property
def product_version(self):
"""
Gets the product_version of this BuildConfiguration.
:return: The product_version of this BuildConfiguration.
:rtype: ProductVersion
"""
return self._product_version
@product_version.setter
def product_version(self, product_version):
"""
Sets the product_version of this BuildConfiguration.
:param product_version: The product_version of this BuildConfiguration.
:type: ProductVersion
"""
self._product_version = product_version
@property
def project(self):
"""
Gets the project of this BuildConfiguration.
:return: The project of this BuildConfiguration.
:rtype: Project
"""
return self._project
@project.setter
def project(self, project):
"""
Sets the project of this BuildConfiguration.
:param project: The project of this BuildConfiguration.
:type: Project
"""
self._project = project
@property
def build_environment(self):
"""
Gets the build_environment of this BuildConfiguration.
:return: The build_environment of this BuildConfiguration.
:rtype: BuildEnvironment
"""
return self._build_environment
@build_environment.setter
def build_environment(self, build_environment):
"""
Sets the build_environment of this BuildConfiguration.
:param build_environment: The build_environment of this BuildConfiguration.
:type: BuildEnvironment
"""
self._build_environment = build_environment
@property
def build_configuration_sets(self):
"""
Gets the build_configuration_sets of this BuildConfiguration.
:return: The build_configuration_sets of this BuildConfiguration.
:rtype: list[BuildConfigurationSet]
"""
return self._build_configuration_sets
@build_configuration_sets.setter
def build_configuration_sets(self, build_configuration_sets):
"""
Sets the build_configuration_sets of this BuildConfiguration.
:param build_configuration_sets: The build_configuration_sets of this BuildConfiguration.
:type: list[BuildConfigurationSet]
"""
self._build_configuration_sets = build_configuration_sets
@property
def creation_time(self):
"""
Gets the creation_time of this BuildConfiguration.
:return: The creation_time of this BuildConfiguration.
:rtype: datetime
"""
return self._creation_time
@creation_time.setter
def creation_time(self, creation_time):
"""
Sets the creation_time of this BuildConfiguration.
:param creation_time: The creation_time of this BuildConfiguration.
:type: datetime
"""
self._creation_time = creation_time
@property
def last_modification_time(self):
"""
Gets the last_modification_time of this BuildConfiguration.
:return: The last_modification_time of this BuildConfiguration.
:rtype: datetime
"""
return self._last_modification_time
@last_modification_time.setter
def last_modification_time(self, last_modification_time):
"""
Sets the last_modification_time of this BuildConfiguration.
:param last_modification_time: The last_modification_time of this BuildConfiguration.
:type: datetime
"""
self._last_modification_time = last_modification_time
@property
def dependencies(self):
"""
Gets the dependencies of this BuildConfiguration.
:return: The dependencies of this BuildConfiguration.
:rtype: list[BuildConfiguration]
"""
return self._dependencies
@dependencies.setter
def dependencies(self, dependencies):
"""
Sets the dependencies of this BuildConfiguration.
:param dependencies: The dependencies of this BuildConfiguration.
:type: list[BuildConfiguration]
"""
self._dependencies = dependencies
@property
def dependants(self):
"""
Gets the dependants of this BuildConfiguration.
:return: The dependants of this BuildConfiguration.
:rtype: list[BuildConfiguration]
"""
return self._dependants
@dependants.setter
def dependants(self, dependants):
"""
Sets the dependants of this BuildConfiguration.
:param dependants: The dependants of this BuildConfiguration.
:type: list[BuildConfiguration]
"""
self._dependants = dependants
@property
def generic_parameters(self):
"""
Gets the generic_parameters of this BuildConfiguration.
:return: The generic_parameters of this BuildConfiguration.
:rtype: dict(str, str)
"""
return self._generic_parameters
@generic_parameters.setter
def generic_parameters(self, generic_parameters):
"""
Sets the generic_parameters of this BuildConfiguration.
:param generic_parameters: The generic_parameters of this BuildConfiguration.
:type: dict(str, str)
"""
self._generic_parameters = generic_parameters
@property
def all_dependencies(self):
"""
Gets the all_dependencies of this BuildConfiguration.
:return: The all_dependencies of this BuildConfiguration.
:rtype: list[BuildConfiguration]
"""
return self._all_dependencies
@all_dependencies.setter
def all_dependencies(self, all_dependencies):
"""
Sets the all_dependencies of this BuildConfiguration.
:param all_dependencies: The all_dependencies of this BuildConfiguration.
:type: list[BuildConfiguration]
"""
self._all_dependencies = all_dependencies
@property
def indirect_dependencies(self):
"""
Gets the indirect_dependencies of this BuildConfiguration.
:return: The indirect_dependencies of this BuildConfiguration.
:rtype: list[BuildConfiguration]
"""
return self._indirect_dependencies
@indirect_dependencies.setter
def indirect_dependencies(self, indirect_dependencies):
"""
Sets the indirect_dependencies of this BuildConfiguration.
:param indirect_dependencies: The indirect_dependencies of this BuildConfiguration.
:type: list[BuildConfiguration]
"""
self._indirect_dependencies = indirect_dependencies
@property
def archived(self):
"""
Gets the archived of this BuildConfiguration.
:return: The archived of this BuildConfiguration.
:rtype: bool
"""
return self._archived
@archived.setter
def archived(self, archived):
"""
Sets the archived of this BuildConfiguration.
:param archived: The archived of this BuildConfiguration.
:type: bool
"""
self._archived = archived
@property
def field_handler(self):
"""
Gets the field_handler of this BuildConfiguration.
:return: The field_handler of this BuildConfiguration.
:rtype: FieldHandler
"""
return self._field_handler
@field_handler.setter
def field_handler(self, field_handler):
"""
Sets the field_handler of this BuildConfiguration.
:param field_handler: The field_handler of this BuildConfiguration.
:type: FieldHandler
"""
self._field_handler = field_handler
@property
def current_product_milestone(self):
"""
Gets the current_product_milestone of this BuildConfiguration.
:return: The current_product_milestone of this BuildConfiguration.
:rtype: ProductMilestone
"""
return self._current_product_milestone
@current_product_milestone.setter
def current_product_milestone(self, current_product_milestone):
"""
Sets the current_product_milestone of this BuildConfiguration.
:param current_product_milestone: The current_product_milestone of this BuildConfiguration.
:type: ProductMilestone
"""
self._current_product_milestone = current_product_milestone
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, datetime):
result[attr] = str(value.date())
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
| |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class TaskRun(object):
"""
The information about a task run.
"""
#: A constant which can be used with the status property of a TaskRun.
#: This constant has a value of "NOT_STARTED"
STATUS_NOT_STARTED = "NOT_STARTED"
#: A constant which can be used with the status property of a TaskRun.
#: This constant has a value of "QUEUED"
STATUS_QUEUED = "QUEUED"
#: A constant which can be used with the status property of a TaskRun.
#: This constant has a value of "RUNNING"
STATUS_RUNNING = "RUNNING"
#: A constant which can be used with the status property of a TaskRun.
#: This constant has a value of "TERMINATING"
STATUS_TERMINATING = "TERMINATING"
#: A constant which can be used with the status property of a TaskRun.
#: This constant has a value of "TERMINATED"
STATUS_TERMINATED = "TERMINATED"
#: A constant which can be used with the status property of a TaskRun.
#: This constant has a value of "SUCCESS"
STATUS_SUCCESS = "SUCCESS"
#: A constant which can be used with the status property of a TaskRun.
#: This constant has a value of "ERROR"
STATUS_ERROR = "ERROR"
#: A constant which can be used with the expected_duration_unit property of a TaskRun.
#: This constant has a value of "SECONDS"
EXPECTED_DURATION_UNIT_SECONDS = "SECONDS"
#: A constant which can be used with the expected_duration_unit property of a TaskRun.
#: This constant has a value of "MINUTES"
EXPECTED_DURATION_UNIT_MINUTES = "MINUTES"
#: A constant which can be used with the expected_duration_unit property of a TaskRun.
#: This constant has a value of "HOURS"
EXPECTED_DURATION_UNIT_HOURS = "HOURS"
#: A constant which can be used with the expected_duration_unit property of a TaskRun.
#: This constant has a value of "DAYS"
EXPECTED_DURATION_UNIT_DAYS = "DAYS"
#: A constant which can be used with the auth_mode property of a TaskRun.
#: This constant has a value of "OBO"
AUTH_MODE_OBO = "OBO"
#: A constant which can be used with the auth_mode property of a TaskRun.
#: This constant has a value of "RESOURCE_PRINCIPAL"
AUTH_MODE_RESOURCE_PRINCIPAL = "RESOURCE_PRINCIPAL"
#: A constant which can be used with the auth_mode property of a TaskRun.
#: This constant has a value of "USER_CERTIFICATE"
AUTH_MODE_USER_CERTIFICATE = "USER_CERTIFICATE"
#: A constant which can be used with the task_type property of a TaskRun.
#: This constant has a value of "INTEGRATION_TASK"
TASK_TYPE_INTEGRATION_TASK = "INTEGRATION_TASK"
#: A constant which can be used with the task_type property of a TaskRun.
#: This constant has a value of "DATA_LOADER_TASK"
TASK_TYPE_DATA_LOADER_TASK = "DATA_LOADER_TASK"
#: A constant which can be used with the task_type property of a TaskRun.
#: This constant has a value of "PIPELINE_TASK"
TASK_TYPE_PIPELINE_TASK = "PIPELINE_TASK"
#: A constant which can be used with the task_type property of a TaskRun.
#: This constant has a value of "SQL_TASK"
TASK_TYPE_SQL_TASK = "SQL_TASK"
#: A constant which can be used with the task_type property of a TaskRun.
#: This constant has a value of "OCI_DATAFLOW_TASK"
TASK_TYPE_OCI_DATAFLOW_TASK = "OCI_DATAFLOW_TASK"
#: A constant which can be used with the task_type property of a TaskRun.
#: This constant has a value of "REST_TASK"
TASK_TYPE_REST_TASK = "REST_TASK"
def __init__(self, **kwargs):
"""
Initializes a new TaskRun object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param key:
The value to assign to the key property of this TaskRun.
:type key: str
:param model_type:
The value to assign to the model_type property of this TaskRun.
:type model_type: str
:param model_version:
The value to assign to the model_version property of this TaskRun.
:type model_version: str
:param parent_ref:
The value to assign to the parent_ref property of this TaskRun.
:type parent_ref: oci.data_integration.models.ParentReference
:param name:
The value to assign to the name property of this TaskRun.
:type name: str
:param description:
The value to assign to the description property of this TaskRun.
:type description: str
:param object_version:
The value to assign to the object_version property of this TaskRun.
:type object_version: int
:param config_provider:
The value to assign to the config_provider property of this TaskRun.
:type config_provider: oci.data_integration.models.ConfigProvider
:param status:
The value to assign to the status property of this TaskRun.
Allowed values for this property are: "NOT_STARTED", "QUEUED", "RUNNING", "TERMINATING", "TERMINATED", "SUCCESS", "ERROR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type status: str
:param start_time_millis:
The value to assign to the start_time_millis property of this TaskRun.
:type start_time_millis: int
:param end_time_millis:
The value to assign to the end_time_millis property of this TaskRun.
:type end_time_millis: int
:param last_updated:
The value to assign to the last_updated property of this TaskRun.
:type last_updated: int
:param records_written:
The value to assign to the records_written property of this TaskRun.
:type records_written: int
:param bytes_processed:
The value to assign to the bytes_processed property of this TaskRun.
:type bytes_processed: int
:param error_message:
The value to assign to the error_message property of this TaskRun.
:type error_message: str
:param expected_duration:
The value to assign to the expected_duration property of this TaskRun.
:type expected_duration: float
:param expected_duration_unit:
The value to assign to the expected_duration_unit property of this TaskRun.
Allowed values for this property are: "SECONDS", "MINUTES", "HOURS", "DAYS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type expected_duration_unit: str
:param task_key:
The value to assign to the task_key property of this TaskRun.
:type task_key: str
:param external_id:
The value to assign to the external_id property of this TaskRun.
:type external_id: str
:param retry_attempt:
The value to assign to the retry_attempt property of this TaskRun.
:type retry_attempt: int
:param task_schedule:
The value to assign to the task_schedule property of this TaskRun.
:type task_schedule: oci.data_integration.models.TaskSchedule
:param metrics:
The value to assign to the metrics property of this TaskRun.
:type metrics: dict(str, float)
:param outputs:
The value to assign to the outputs property of this TaskRun.
:type outputs: dict(str, ParameterValue)
:param execution_errors:
The value to assign to the execution_errors property of this TaskRun.
:type execution_errors: list[str]
:param termination_errors:
The value to assign to the termination_errors property of this TaskRun.
:type termination_errors: list[str]
:param auth_mode:
The value to assign to the auth_mode property of this TaskRun.
Allowed values for this property are: "OBO", "RESOURCE_PRINCIPAL", "USER_CERTIFICATE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type auth_mode: str
:param opc_request_id:
The value to assign to the opc_request_id property of this TaskRun.
:type opc_request_id: str
:param object_status:
The value to assign to the object_status property of this TaskRun.
:type object_status: int
:param task_type:
The value to assign to the task_type property of this TaskRun.
Allowed values for this property are: "INTEGRATION_TASK", "DATA_LOADER_TASK", "PIPELINE_TASK", "SQL_TASK", "OCI_DATAFLOW_TASK", "REST_TASK", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type task_type: str
:param identifier:
The value to assign to the identifier property of this TaskRun.
:type identifier: str
:param metadata:
The value to assign to the metadata property of this TaskRun.
:type metadata: oci.data_integration.models.ObjectMetadata
:param key_map:
The value to assign to the key_map property of this TaskRun.
:type key_map: dict(str, str)
"""
self.swagger_types = {
'key': 'str',
'model_type': 'str',
'model_version': 'str',
'parent_ref': 'ParentReference',
'name': 'str',
'description': 'str',
'object_version': 'int',
'config_provider': 'ConfigProvider',
'status': 'str',
'start_time_millis': 'int',
'end_time_millis': 'int',
'last_updated': 'int',
'records_written': 'int',
'bytes_processed': 'int',
'error_message': 'str',
'expected_duration': 'float',
'expected_duration_unit': 'str',
'task_key': 'str',
'external_id': 'str',
'retry_attempt': 'int',
'task_schedule': 'TaskSchedule',
'metrics': 'dict(str, float)',
'outputs': 'dict(str, ParameterValue)',
'execution_errors': 'list[str]',
'termination_errors': 'list[str]',
'auth_mode': 'str',
'opc_request_id': 'str',
'object_status': 'int',
'task_type': 'str',
'identifier': 'str',
'metadata': 'ObjectMetadata',
'key_map': 'dict(str, str)'
}
self.attribute_map = {
'key': 'key',
'model_type': | |
"""Contains functions that are used in the dynamic location and creation
of tabs and datatypes. Can be used both internally and externally.
"""
import os
import json
import pkg_resources
import importlib
import logging
from PyQt5 import QtWidgets
from meggie.utilities.uid import generate_uid
from meggie.utilities.messaging import exc_messagebox
from meggie.utilities.messaging import messagebox
from meggie.utilities.threading import threaded
def find_all_plugins():
"""Looks for plugins (installed packages with name meggie_*), that
can contain tabs or datatypes"""
plugins = []
package_keys = [dist.key.replace('-', '_') for dist
in pkg_resources.working_set]
for key in package_keys:
try:
if key.startswith('meggie_'):
# check that there exists configuration.json
if not os.path.exists(
pkg_resources.resource_filename(key, 'configuration.json')):
continue
plugins.append(key)
except Exception as exc:
logging.getLogger('ui_logger').exception('')
return plugins
def find_all_sources():
"""Returns all packages where to look for actions / datatypes.
"""
return ['meggie'] + find_all_plugins()
def find_all_package_specs():
"""Returns all package specifications found.
"""
package_specs = {}
sources = find_all_sources()
for source in sources:
config_path = pkg_resources.resource_filename(source, 'configuration.json')
if not os.path.exists(config_path):
continue
with open(config_path, 'r') as f:
config = json.load(f)
if config:
package_specs[source] = config
# add possibly missing fields
for package_spec in package_specs.values():
if 'name' not in package_spec:
package_spec['name'] = ""
if 'author' not in package_spec:
package_spec['author'] = ""
return package_specs
def find_all_datatype_specs():
"""Returns all datatype specifications found.
"""
datatype_specs = {}
found_keys = []
sources = find_all_sources()
for source in sources:
datatype_path = pkg_resources.resource_filename(source, 'datatypes')
if not os.path.exists(datatype_path):
continue
for package in os.listdir(datatype_path):
config_path = os.path.join(
datatype_path, package, 'configuration.json')
if os.path.exists(config_path):
with open(config_path, 'r') as f:
config = json.load(f)
if config:
if config['id'] in found_keys:
raise Exception('Datatype with the same id found ' +
'in multiple packages')
else:
found_keys.append(config['id'])
datatype_specs[config['id']] = source, package, config
# add possibly missing fields
for source, package, datatype_spec in datatype_specs.values():
if 'name' not in datatype_spec:
datatype_spec['name'] = datatype_spec['id']
return datatype_specs
def find_all_action_specs():
"""Returns all action specifications found.
"""
action_specs = {}
found_keys = []
sources = find_all_sources()
for source in sources:
action_path = pkg_resources.resource_filename(source, 'actions')
if not os.path.exists(action_path):
continue
for package in os.listdir(action_path):
config_path = os.path.join(action_path, package, 'configuration.json')
if os.path.exists(config_path):
with open(config_path, 'r') as f:
config = json.load(f)
if config:
if config['id'] in found_keys:
raise Exception('Action with the same id found ' +
'in multiple packages')
else:
found_keys.append(config['id'])
action_specs[config['id']] = source, package, config
# add possibly missing fields
for source, package, action_spec in action_specs.values():
if 'name' not in action_spec:
action_spec['name'] = action_spec['id']
return action_specs
def construct_tab(tab_spec, action_specs, datatype_specs, parent):
"""Constructs analysis tab dynamically. Returns a QDialog
that can be used within a QTabDialog of the main window.
Parameters
----------
tab_spec : dict
The specification of the tab read to a dict
action_specs : dict
Specifications of actions
datatype_specs : dict
Specifications of datatypes
parent : instance of main window
The main window, is passed to the handlers in the ui.py.
Returns
-------
instance of QDialog
The constructed tab than can be added to
main window.
"""
class DynamicTab(QtWidgets.QDialog):
""" Class defining a tab.
Parameters
----------
parent : instance of main window
The main window, is passed to the handlers in actions.
"""
def __init__(self, parent):
QtWidgets.QDialog.__init__(self)
self.parent = parent
self.tab_spec = tab_spec
self.action_specs = action_specs
self.datatype_specs = datatype_specs
# first create basic layout
self.gridLayoutContainer = QtWidgets.QGridLayout(self)
self.gridLayoutRoot = QtWidgets.QGridLayout()
self.gridLayoutContainer.addLayout(self.gridLayoutRoot, 0, 0, 1, 1)
if self.tab_spec['outputs']:
self.groupBoxOutputs = QtWidgets.QGroupBox(self)
self.groupBoxOutputs.setTitle('Outputs')
self.gridLayoutOutputs = QtWidgets.QGridLayout(
self.groupBoxOutputs)
if self.tab_spec['output_actions']:
self.groupBoxOutputActions = QtWidgets.QGroupBox(self)
self.groupBoxOutputActions.setTitle('')
self.gridLayoutOutputActions = QtWidgets.QGridLayout(
self.groupBoxOutputActions)
if self.tab_spec['input_actions']:
self.groupBoxInputActions = QtWidgets.QGroupBox(self)
self.groupBoxInputActions.setTitle('')
self.gridLayoutInputActions = QtWidgets.QGridLayout(
self.groupBoxInputActions)
if self.tab_spec['inputs']:
self.groupBoxInputs = QtWidgets.QGroupBox(self)
self.groupBoxInputs.setTitle('Inputs')
self.gridLayoutInputs = QtWidgets.QGridLayout(
self.groupBoxInputs)
if self.tab_spec['info']:
self.groupBoxInfo = QtWidgets.QGroupBox(self)
self.groupBoxInfo.setTitle('Info')
self.gridLayoutInfo = QtWidgets.QGridLayout(self.groupBoxInfo)
# add the (empty) input lists
for idx, input_name in enumerate(self.tab_spec['inputs']):
title = input_name.capitalize()
for source, package, datatype_spec in datatype_specs.values():
if datatype_spec['id'] == input_name:
title = datatype_spec['name']
groupBoxInputElement = QtWidgets.QGroupBox(self.groupBoxInputs)
groupBoxInputElement.setTitle(title)
gridLayoutInputElement = QtWidgets.QGridLayout(
groupBoxInputElement)
listWidgetInputElement = QtWidgets.QListWidget(
groupBoxInputElement)
listWidgetInputElement.setSelectionMode(
QtWidgets.QAbstractItemView.ExtendedSelection)
gridLayoutInputElement.addWidget(
listWidgetInputElement, idx, 0, 1, 1)
self.gridLayoutInputs.addWidget(groupBoxInputElement)
setattr(self, 'groupBoxInputElement_' + str(idx + 1),
groupBoxInputElement)
setattr(self, 'gridLayoutInputElement_' + str(idx + 1),
gridLayoutInputElement)
setattr(self, 'listWidgetInputElement_' + str(idx + 1),
listWidgetInputElement)
# add the (empty) output lists
for idx, output_name in enumerate(self.tab_spec['outputs']):
title = output_name.capitalize()
for source, package, datatype_spec in datatype_specs.values():
if datatype_spec['id'] == output_name:
title = datatype_spec['name']
groupBoxOutputElement = QtWidgets.QGroupBox(
self.groupBoxOutputs)
groupBoxOutputElement.setTitle(title)
gridLayoutOutputElement = QtWidgets.QGridLayout(
groupBoxOutputElement)
listWidgetOutputElement = QtWidgets.QListWidget(
groupBoxOutputElement)
listWidgetOutputElement.setSelectionMode(
QtWidgets.QAbstractItemView.ExtendedSelection)
gridLayoutOutputElement.addWidget(
listWidgetOutputElement, idx, 0, 1, 1)
self.gridLayoutOutputs.addWidget(groupBoxOutputElement)
setattr(self, 'groupBoxOutputElement_' + str(idx + 1),
groupBoxOutputElement)
setattr(self, 'gridLayoutOutputElement_' + str(idx + 1),
gridLayoutOutputElement)
setattr(self, 'listWidgetOutputElement_' + str(idx + 1),
listWidgetOutputElement)
# add input action buttons
for idx, action_name in enumerate(self.tab_spec['input_actions']):
action_spec = self.action_specs[action_name][2]
title = action_spec['name']
pushButtonInputActionElement = QtWidgets.QPushButton(
self.groupBoxInputActions)
pushButtonInputActionElement.setText(title)
self.gridLayoutInputActions.addWidget(
pushButtonInputActionElement, idx, 0, 1, 1)
setattr(self, 'pushButtonInputActionElement_' + str(idx + 1),
pushButtonInputActionElement)
if getattr(self, 'gridLayoutInputActions', None):
spacer = QtWidgets.QSpacerItem(20, 10, QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Expanding)
self.gridLayoutInputActions.addItem(spacer, idx + 1, 0, 1, 1)
# add action buttons
for idx, action_name in enumerate(self.tab_spec['output_actions']):
action_spec = self.action_specs[action_name][2]
title = action_spec['name']
pushButtonOutputActionElement = QtWidgets.QPushButton(
self.groupBoxOutputActions)
pushButtonOutputActionElement.setText(title)
self.gridLayoutOutputActions.addWidget(
pushButtonOutputActionElement, idx, 0, 1, 1)
setattr(self, 'pushButtonOutputActionElement_' + str(idx + 1),
pushButtonOutputActionElement)
if getattr(self, 'gridLayoutOutputActions', None):
spacer = QtWidgets.QSpacerItem(20, 10, QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Expanding)
self.gridLayoutOutputActions.addItem(spacer, idx + 1, 0, 1, 1)
# add info text boxes
for idx, info_name in enumerate(self.tab_spec['info']):
action_spec = self.action_specs[info_name][2]
title = action_spec['name']
groupBoxInfoElement = QtWidgets.QGroupBox(self.groupBoxInfo)
groupBoxInfoElement.setTitle(title)
gridLayoutInfoElement = QtWidgets.QGridLayout(
groupBoxInfoElement)
plainTextEditInfoElement = QtWidgets.QTextBrowser(
groupBoxInfoElement)
gridLayoutInfoElement.addWidget(
plainTextEditInfoElement, idx, 0, 1, 1)
self.gridLayoutInfo.addWidget(groupBoxInfoElement)
setattr(self, 'groupBoxInfoElement_' + str(idx + 1),
groupBoxInfoElement)
setattr(self, 'gridLayoutInfoElement_' + str(idx + 1),
gridLayoutInfoElement)
setattr(self, 'plainTextEditInfoElement_' + str(idx + 1),
plainTextEditInfoElement)
# lay out inputs, outputs, actions and info elements
# in a nice way to a grid
if self.tab_spec['inputs'] and not self.tab_spec['input_actions']:
self.gridLayoutRoot.addWidget(self.groupBoxInputs, 0, 0, 2, 1)
elif self.tab_spec['inputs']:
self.gridLayoutRoot.addWidget(self.groupBoxInputs, 0, 0, 1, 1)
if self.tab_spec['outputs'] and not self.tab_spec['output_actions']:
self.gridLayoutRoot.addWidget(self.groupBoxOutputs, 0, 1, 2, 1)
elif self.tab_spec['outputs']:
self.gridLayoutRoot.addWidget(self.groupBoxOutputs, 0, 1, 1, 1)
if self.tab_spec['input_actions'] and not self.tab_spec['inputs']:
self.gridLayoutRoot.addWidget(
self.groupBoxInputActions, 0, 0, 2, 1)
elif self.tab_spec['input_actions']:
self.gridLayoutRoot.addWidget(
self.groupBoxInputActions, 1, 0, 1, 1)
if self.tab_spec['output_actions'] and not self.tab_spec['outputs']:
self.gridLayoutRoot.addWidget(self.groupBoxOutputActions, 0, 1, 2, 1)
elif self.tab_spec['output_actions']:
self.gridLayoutRoot.addWidget(self.groupBoxOutputActions, 1, 1, 1, 1)
if self.tab_spec['info']:
self.gridLayoutRoot.addWidget(self.groupBoxInfo, 0, 2, 2, 1)
# add spacers to bottom and right to keep the window concise
spacerItemVertical = QtWidgets.QSpacerItem(
20, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayoutContainer.addItem(spacerItemVertical, 1, 0, 1, 1)
spacerItemHorizontal = QtWidgets.QSpacerItem(
10, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayoutContainer.addItem(spacerItemHorizontal, 0, 1, 1, 1)
# add handlers for list selection changed -> info updates
def connect_to_handler(list_element, info_element, info_name):
source, package, action_spec = self.action_specs[info_name]
module = importlib.import_module(
'.'.join([source, 'actions', package]))
entry = action_spec['entry']
@threaded
def handler(*args):
return getattr(module, entry)(*args).run()
def handler_wrapper():
experiment = self.parent.experiment
if not experiment:
return
subject = experiment.active_subject
if not subject:
return
data = self._get_data()
try:
info_content = handler(experiment, data,
parent, action_spec,
do_meanwhile=parent.update_ui)
info_element.setPlainText(info_content)
except Exception as exc:
exc_messagebox(self, exc)
list_element.itemSelectionChanged.connect(handler_wrapper)
for idx, info_name in enumerate(self.tab_spec['info']):
info_element = getattr(self, 'plainTextEditInfoElement_' +
str(idx + 1))
for idx, input_name in enumerate(self.tab_spec['inputs']):
input_element = getattr(
self, 'listWidgetInputElement_' + str(idx + 1))
connect_to_handler(input_element, info_element, info_name)
for idx, output_name in enumerate(self.tab_spec['outputs']):
output_element = getattr(
self, 'listWidgetOutputElement_' + str(idx + 1))
connect_to_handler(output_element, info_element, info_name)
# add button handlers
def connect_to_handler(button, name):
source, package, action_spec = self.action_specs[name]
module = importlib.import_module(
'.'.join([source, 'actions', package]))
entry = action_spec['entry']
handler = getattr(module, entry)
def handler_wrapper(checked):
experiment = self.parent.experiment
if not experiment:
return
subject = experiment.active_subject
if not subject:
return
data = self._get_data()
try:
handler(experiment, data, parent, action_spec).run()
except Exception as exc:
exc_messagebox(self, exc)
button.clicked.connect(handler_wrapper)
for idx, action_name in enumerate(self.tab_spec['input_actions']):
action_element = getattr(
self, 'pushButtonInputActionElement_' + str(idx + 1))
connect_to_handler(action_element, action_name)
for idx, action_name in enumerate(self.tab_spec['output_actions']):
action_element = getattr(
self, 'pushButtonOutputActionElement_' + str(idx + 1))
connect_to_handler(action_element, action_name)
def _get_data(self):
"""Returns data from input and output lists.
"""
data = {'inputs': {},
'outputs': {}}
data['tab_id'] = self.tab_spec['id']
inputs = []
for idx, name in enumerate(self.tab_spec['inputs']):
ui_element = getattr(
self, 'listWidgetInputElement_' + str(idx + 1))
try:
selected_items = [item.text()
for item in ui_element.selectedItems()]
except BaseException:
continue
data['inputs'][name] = selected_items
for idx, name in enumerate(self.tab_spec['outputs']):
ui_element = getattr(
self, 'listWidgetOutputElement_' + str(idx + 1))
try:
selected_items = [item.text() for item in
ui_element.selectedItems()]
except BaseException:
continue
data['outputs'][name] = selected_items
return data
def initialize_ui(self):
"""Updates (empties and refills) ui contents | |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
System tests for Create, Update, Delete. (CRUD)
"""
import datetime
import functools
import operator
import os
import random
import threading
import zlib
try:
from unittest import mock
except ImportError:
import mock
import pytest
import test_utils.system
from google.cloud import ndb
from google.cloud.ndb import _cache
from google.cloud.ndb import global_cache as global_cache_module
from tests.system import KIND, eventually
USE_REDIS_CACHE = bool(os.environ.get("REDIS_CACHE_URL"))
def _equals(n):
return functools.partial(operator.eq, n)
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
def test_retrieve_entity_with_caching(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
client_context.set_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
assert key.get() is entity
def test_retrieve_entity_with_global_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
cache_key = _cache.global_cache_key(key._key)
assert cache_key in cache_dict
patch = mock.patch("google.cloud.ndb._datastore_api._LookupBatch.add")
patch.side_effect = Exception("Shouldn't call this")
with patch:
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_retrieve_entity_with_redis_cache(ds_entity, redis_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
cache_key = _cache.global_cache_key(key._key)
assert redis_context.global_cache.redis.get(cache_key) is not None
patch = mock.patch("google.cloud.ndb._datastore_api._LookupBatch.add")
patch.side_effect = Exception("Shouldn't call this")
with patch:
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_not_found(ds_entity):
entity_id = test_utils.system.unique_resource_id()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_nested_tasklet(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
@ndb.tasklet
def get_foo(key):
entity = yield key.get_async()
raise ndb.Return(entity.foo)
key = ndb.Key(KIND, entity_id)
assert get_foo(key).result() == 42
@pytest.mark.usefixtures("client_context")
def test_retrieve_two_entities_in_parallel(ds_entity):
entity1_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity1_id, foo=42, bar="none")
entity2_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity2_id, foo=65, bar="naan")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key1 = ndb.Key(KIND, entity1_id)
key2 = ndb.Key(KIND, entity2_id)
@ndb.tasklet
def get_two_entities():
entity1, entity2 = yield key1.get_async(), key2.get_async()
raise ndb.Return(entity1, entity2)
entity1, entity2 = get_two_entities().result()
assert isinstance(entity1, SomeKind)
assert entity1.foo == 42
assert entity1.bar == "none"
assert isinstance(entity2, SomeKind)
assert entity2.foo == 65
assert entity2.bar == "naan"
@pytest.mark.usefixtures("client_context")
def test_insert_entity(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
# Make sure strings are stored as strings in datastore
ds_entity = ds_client.get(key._key)
assert ds_entity["bar"] == "none"
@pytest.mark.usefixtures("client_context")
def test_insert_entity_with_stored_name_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
bar = ndb.StringProperty(name="notbar")
entity = SomeKind(foo="something", bar="or other")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == "something"
assert retrieved.bar == "or other"
ds_entity = ds_client.get(key._key)
assert ds_entity["notbar"] == "or other"
@pytest.mark.usefixtures("client_context")
def test_insert_roundtrip_naive_datetime(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.DateTimeProperty()
entity = SomeKind(foo=datetime.datetime(2010, 5, 12, 2, 42))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == datetime.datetime(2010, 5, 12, 2, 42)
@pytest.mark.usefixtures("client_context")
def test_datetime_w_tzinfo(dispose_of, ds_client):
class timezone(datetime.tzinfo):
def __init__(self, offset):
self.offset = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.offset
def dst(self, dt):
return datetime.timedelta(0)
mytz = timezone(-4)
class SomeKind(ndb.Model):
foo = ndb.DateTimeProperty(tzinfo=mytz)
bar = ndb.DateTimeProperty(tzinfo=mytz)
entity = SomeKind(
foo=datetime.datetime(2010, 5, 12, 2, 42, tzinfo=timezone(-5)),
bar=datetime.datetime(2010, 5, 12, 2, 42),
)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == datetime.datetime(2010, 5, 12, 3, 42, tzinfo=mytz)
assert retrieved.bar == datetime.datetime(2010, 5, 11, 22, 42, tzinfo=mytz)
def test_parallel_threads(dispose_of, namespace):
client = ndb.Client(namespace=namespace)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def insert(foo):
with client.context(cache_policy=False):
entity = SomeKind(foo=foo, bar="none")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
assert retrieved.bar == "none"
thread1 = threading.Thread(target=insert, args=[42], name="one")
thread2 = threading.Thread(target=insert, args=[144], name="two")
thread1.start()
thread2.start()
thread1.join()
thread2.join()
@pytest.mark.usefixtures("client_context")
def test_large_json_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.JsonProperty()
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_json_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.JsonProperty(compressed=True)
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_blob_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.BlobProperty(compressed=True)
foo = b"abc" * 100
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_repeated_local_structured_property(dispose_of, ds_client):
class Dog(ndb.Model):
name = ndb.StringProperty()
class House(ndb.Model):
dogs = ndb.LocalStructuredProperty(Dog, repeated=True, compressed=True)
entity = House()
dogs = [Dog(name="Mika"), Dog(name="Mocha")]
entity.dogs = dogs
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.dogs == dogs
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_compressed_property(
ds_entity_with_meanings,
):
class SomeKind(ndb.Model):
blob = ndb.BlobProperty()
value = b"abc" * 1000
compressed_value = zlib.compress(value)
entity_id = test_utils.system.unique_resource_id()
ds_entity_with_meanings(
{"blob": (22, compressed_value)},
KIND,
entity_id,
**{"blob": compressed_value}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.blob == value
@pytest.mark.usefixtures("client_context")
def test_large_pickle_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.PickleProperty()
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_key_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.KeyProperty()
key_value = ndb.Key("Whatevs", 123)
entity = SomeKind(foo=key_value)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == key_value
@pytest.mark.usefixtures("client_context")
def test_multiple_key_properties(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.KeyProperty(kind="Whatevs")
bar = ndb.KeyProperty(kind="Whatevs")
foo = ndb.Key("Whatevs", 123)
bar = ndb.Key("Whatevs", 321)
entity = SomeKind(foo=foo, bar=bar)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
assert retrieved.bar == bar
assert retrieved.foo != retrieved.bar
def test_insert_entity_with_caching(client_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
client_context.set_cache_policy(None) # Use default
entity = SomeKind(foo=42, bar="none")
key = entity.put()
with client_context.new(cache_policy=False).use():
# Sneaky. Delete entity out from under cache so we know we're getting
# cached copy.
key.delete()
eventually(key.get, _equals(None))
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
def test_insert_entity_with_global_cache(dispose_of, client_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
cache_key = _cache.global_cache_key(key._key)
assert not cache_dict
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
assert cache_key in cache_dict
entity.foo = 43
entity.put()
# This is py27 behavior. I can see a case being made for caching the
# entity on write rather than waiting for a subsequent lookup.
assert cache_key not in cache_dict
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_insert_entity_with_redis_cache(dispose_of, redis_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
cache_key = _cache.global_cache_key(key._key)
assert redis_context.global_cache.redis.get(cache_key) is None
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
assert redis_context.global_cache.redis.get(cache_key) is not None
entity.foo = 43
entity.put()
# This is py27 behavior. I can see a case being made for caching the
# entity on write rather than waiting for a subsequent lookup.
assert redis_context.global_cache.redis.get(cache_key) is None
@pytest.mark.usefixtures("client_context")
def test_update_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
entity.foo = 56
entity.bar = "high"
assert entity.put() == key
retrieved = key.get()
assert retrieved.foo == 56
assert retrieved.bar == "high"
@pytest.mark.usefixtures("client_context")
def test_insert_entity_in_transaction(dispose_of):
commit_callback = mock.Mock()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
| |
of addresses that have been reserved for the interconnect
attachment, Used only for interconnect attachment that has the
encryption option as IPSEC.
The addresses must be RFC 1918 IP address ranges. When creating HA
VPN gateway over the interconnect attachment, if the attachment is
configured to use an RFC 1918 IP address, then the VPN gateway's IP
address will be allocated from the IP address range specified
here.
For example, if the HA VPN gateway's interface 0 is paired to this
interconnect attachment, then an RFC 1918 IP address for the VPN
gateway interface 0 will be allocated from the IP address specified
for this interconnect attachment.
If this field is not specified for interconnect attachment that has
encryption option as IPSEC, later on when creating HA VPN gateway on
this interconnect attachment, the HA VPN gateway's IP address will be
allocated from regional external IP address pool.
:param pulumi.Input[str] mtu: Maximum Transmission Unit (MTU), in bytes, of packets passing through
this interconnect attachment. Currently, only 1440 and 1500 are allowed. If not specified, the value will default to 1440.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression
`a-z?` which means the first character must be a
lowercase letter, and all following characters must be a dash, lowercase
letter, or digit, except the last character, which cannot be a dash.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: Region where the regional interconnect attachment resides.
:param pulumi.Input[str] router: URL of the cloud router to be used for dynamic routing. This router must be in
the same region as this InterconnectAttachment. The InterconnectAttachment will
automatically connect the Interconnect to the network & region within which the
Cloud Router is configured.
:param pulumi.Input[str] type: The type of InterconnectAttachment you wish to create. Defaults to
DEDICATED.
Possible values are `DEDICATED`, `PARTNER`, and `PARTNER_PROVIDER`.
:param pulumi.Input[int] vlan_tag8021q: The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. When
using PARTNER type this will be managed upstream.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: InterconnectAttachmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Represents an InterconnectAttachment (VLAN attachment) resource. For more
information, see Creating VLAN Attachments.
## Example Usage
### Interconnect Attachment Basic
```python
import pulumi
import pulumi_gcp as gcp
foobar_network = gcp.compute.Network("foobarNetwork", auto_create_subnetworks=False)
foobar_router = gcp.compute.Router("foobarRouter",
network=foobar_network.name,
bgp=gcp.compute.RouterBgpArgs(
asn=16550,
))
on_prem = gcp.compute.InterconnectAttachment("onPrem",
edge_availability_domain="AVAILABILITY_DOMAIN_1",
type="PARTNER",
router=foobar_router.id,
mtu="1500")
```
### Compute Interconnect Attachment Ipsec Encryption
```python
import pulumi
import pulumi_gcp as gcp
network = gcp.compute.Network("network", auto_create_subnetworks=False)
address = gcp.compute.Address("address",
address_type="INTERNAL",
purpose="IPSEC_INTERCONNECT",
address="192.168.1.0",
prefix_length=29,
network=network.self_link)
router = gcp.compute.Router("router",
network=network.name,
encrypted_interconnect_router=True,
bgp=gcp.compute.RouterBgpArgs(
asn=16550,
))
ipsec_encrypted_interconnect_attachment = gcp.compute.InterconnectAttachment("ipsec-encrypted-interconnect-attachment",
edge_availability_domain="AVAILABILITY_DOMAIN_1",
type="PARTNER",
router=router.id,
encryption="IPSEC",
ipsec_internal_addresses=[address.self_link])
```
## Import
InterconnectAttachment can be imported using any of these accepted formats
```sh
$ pulumi import gcp:compute/interconnectAttachment:InterconnectAttachment default projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}
```
```sh
$ pulumi import gcp:compute/interconnectAttachment:InterconnectAttachment default {{project}}/{{region}}/{{name}}
```
```sh
$ pulumi import gcp:compute/interconnectAttachment:InterconnectAttachment default {{region}}/{{name}}
```
```sh
$ pulumi import gcp:compute/interconnectAttachment:InterconnectAttachment default {{name}}
```
:param str resource_name: The name of the resource.
:param InterconnectAttachmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(InterconnectAttachmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
admin_enabled: Optional[pulumi.Input[bool]] = None,
bandwidth: Optional[pulumi.Input[str]] = None,
candidate_subnets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
edge_availability_domain: Optional[pulumi.Input[str]] = None,
encryption: Optional[pulumi.Input[str]] = None,
interconnect: Optional[pulumi.Input[str]] = None,
ipsec_internal_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
mtu: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
router: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
vlan_tag8021q: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = InterconnectAttachmentArgs.__new__(InterconnectAttachmentArgs)
__props__.__dict__["admin_enabled"] = admin_enabled
__props__.__dict__["bandwidth"] = bandwidth
__props__.__dict__["candidate_subnets"] = candidate_subnets
__props__.__dict__["description"] = description
__props__.__dict__["edge_availability_domain"] = edge_availability_domain
__props__.__dict__["encryption"] = encryption
__props__.__dict__["interconnect"] = interconnect
__props__.__dict__["ipsec_internal_addresses"] = ipsec_internal_addresses
__props__.__dict__["mtu"] = mtu
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["region"] = region
if router is None and not opts.urn:
raise TypeError("Missing required property 'router'")
__props__.__dict__["router"] = router
__props__.__dict__["type"] = type
__props__.__dict__["vlan_tag8021q"] = vlan_tag8021q
__props__.__dict__["cloud_router_ip_address"] = None
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["customer_router_ip_address"] = None
__props__.__dict__["google_reference_id"] = None
__props__.__dict__["pairing_key"] = None
__props__.__dict__["partner_asn"] = None
__props__.__dict__["private_interconnect_infos"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["state"] = None
super(InterconnectAttachment, __self__).__init__(
'gcp:compute/interconnectAttachment:InterconnectAttachment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
admin_enabled: Optional[pulumi.Input[bool]] = None,
bandwidth: Optional[pulumi.Input[str]] = None,
candidate_subnets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cloud_router_ip_address: Optional[pulumi.Input[str]] = None,
creation_timestamp: Optional[pulumi.Input[str]] = None,
customer_router_ip_address: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
edge_availability_domain: Optional[pulumi.Input[str]] = None,
encryption: Optional[pulumi.Input[str]] = None,
google_reference_id: Optional[pulumi.Input[str]] = None,
interconnect: Optional[pulumi.Input[str]] = None,
ipsec_internal_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
mtu: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
pairing_key: Optional[pulumi.Input[str]] = None,
partner_asn: Optional[pulumi.Input[str]] = None,
private_interconnect_infos: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InterconnectAttachmentPrivateInterconnectInfoArgs']]]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
router: Optional[pulumi.Input[str]] = None,
self_link: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
vlan_tag8021q: Optional[pulumi.Input[int]] = None) -> 'InterconnectAttachment':
"""
Get an existing InterconnectAttachment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] admin_enabled: Whether the VLAN attachment is enabled or disabled. When using
PARTNER type this will Pre-Activate the interconnect attachment
:param pulumi.Input[str] bandwidth: Provisioned bandwidth capacity for the interconnect attachment.
For attachments of type DEDICATED, the user can set the bandwidth.
For attachments of type PARTNER, the Google Partner that is operating the interconnect must set the bandwidth.
Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED,
Defaults to BPS_10G
Possible values are `BPS_50M`, `BPS_100M`, `BPS_200M`, `BPS_300M`, `BPS_400M`, `BPS_500M`, `BPS_1G`, `BPS_2G`, `BPS_5G`, `BPS_10G`, `BPS_20G`, and `BPS_50G`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] candidate_subnets: Up to 16 candidate prefixes that can be used to restrict the allocation
of cloudRouterIpAddress and customerRouterIpAddress for this attachment.
All prefixes must be within link-local address space (169.254.0.0/16)
and must be /29 or shorter (/28, /27, etc). Google will attempt to select
an unused /29 from the supplied candidate prefix(es). The request will
fail if all possible /29s are in use on Google's edge. If not supplied,
Google will randomly select an unused /29 from all of link-local space.
:param pulumi.Input[str] cloud_router_ip_address: IPv4 address + prefix length to be configured on Cloud Router Interface for this interconnect attachment.
:param pulumi.Input[str] creation_timestamp: Creation timestamp in RFC3339 text format.
:param pulumi.Input[str] customer_router_ip_address: IPv4 address + prefix length to be configured on the customer router subinterface for this interconnect attachment.
:param pulumi.Input[str] description: An optional description of this resource.
:param pulumi.Input[str] edge_availability_domain: Desired availability domain for the attachment. Only available for type
PARTNER, at creation time. For improved reliability, customers should
configure a pair of attachments with one per availability domain. The
selected availability domain will be provided to the Partner via the
pairing key so that the provisioned circuit will lie in the specified
domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.
:param pulumi.Input[str] encryption: Indicates the user-supplied encryption option of this interconnect
attachment:
NONE is the default value, which means that the attachment carries
unencrypted traffic. VMs can send traffic to, or receive traffic
from, this type of attachment.
| |
description(self) -> str:
"""
Description of the entry direction rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="networkAclEntryName")
def network_acl_entry_name(self) -> str:
"""
The name of the entry direction rule entry.
"""
return pulumi.get(self, "network_acl_entry_name")
@property
@pulumi.getter
def policy(self) -> str:
"""
The authorization policy.
"""
return pulumi.get(self, "policy")
@property
@pulumi.getter
def port(self) -> str:
"""
Source port range.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> str:
"""
Transport layer protocol.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="sourceCidrIp")
def source_cidr_ip(self) -> str:
"""
The source address field.
"""
return pulumi.get(self, "source_cidr_ip")
@pulumi.output_type
class GetNetworkAclsAclResourceResult(dict):
def __init__(__self__, *,
resource_id: str,
resource_type: str,
status: str):
"""
:param str resource_id: The ID of the associated resource.
:param str resource_type: The type of the associated resource.
:param str status: The state of the network ACL.
"""
pulumi.set(__self__, "resource_id", resource_id)
pulumi.set(__self__, "resource_type", resource_type)
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> str:
"""
The ID of the associated resource.
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="resourceType")
def resource_type(self) -> str:
"""
The type of the associated resource.
"""
return pulumi.get(self, "resource_type")
@property
@pulumi.getter
def status(self) -> str:
"""
The state of the network ACL.
"""
return pulumi.get(self, "status")
@pulumi.output_type
class GetNetworksVpcResult(dict):
def __init__(__self__, *,
cidr_block: str,
creation_time: str,
description: str,
id: str,
ipv6_cidr_block: str,
is_default: bool,
region_id: str,
resource_group_id: str,
route_table_id: str,
router_id: str,
secondary_cidr_blocks: Sequence[str],
status: str,
tags: Mapping[str, Any],
user_cidrs: Sequence[str],
vpc_id: str,
vpc_name: str,
vrouter_id: str,
vswitch_ids: Sequence[str]):
"""
:param str cidr_block: Filter results by a specific CIDR block. For example: "172.16.0.0/12".
:param str creation_time: Time of creation.
:param str description: Description of the VPC
:param str id: ID of the VPC.
:param str ipv6_cidr_block: The IPv6 CIDR block of the VPC.
:param bool is_default: Indicate whether the VPC is the default one in the specified region.
:param str region_id: ID of the region where the VPC is located.
:param str resource_group_id: The Id of resource group which VPC belongs.
:param str route_table_id: Route table ID of the VRouter.
:param str router_id: The ID of the VRouter.
:param Sequence[str] secondary_cidr_blocks: A list of secondary IPv4 CIDR blocks of the VPC.
:param str status: Filter results by a specific status. Valid value are `Pending` and `Available`.
:param Mapping[str, Any] tags: A mapping of tags to assign to the resource.
:param Sequence[str] user_cidrs: A list of user CIDRs.
:param str vpc_id: ID of the VPC.
:param str vpc_name: The name of the VPC.
:param str vrouter_id: ID of the VRouter.
:param Sequence[str] vswitch_ids: List of VSwitch IDs in the specified VPC
"""
pulumi.set(__self__, "cidr_block", cidr_block)
pulumi.set(__self__, "creation_time", creation_time)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "ipv6_cidr_block", ipv6_cidr_block)
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "region_id", region_id)
pulumi.set(__self__, "resource_group_id", resource_group_id)
pulumi.set(__self__, "route_table_id", route_table_id)
pulumi.set(__self__, "router_id", router_id)
pulumi.set(__self__, "secondary_cidr_blocks", secondary_cidr_blocks)
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "tags", tags)
pulumi.set(__self__, "user_cidrs", user_cidrs)
pulumi.set(__self__, "vpc_id", vpc_id)
pulumi.set(__self__, "vpc_name", vpc_name)
pulumi.set(__self__, "vrouter_id", vrouter_id)
pulumi.set(__self__, "vswitch_ids", vswitch_ids)
@property
@pulumi.getter(name="cidrBlock")
def cidr_block(self) -> str:
"""
Filter results by a specific CIDR block. For example: "172.16.0.0/12".
"""
return pulumi.get(self, "cidr_block")
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> str:
"""
Time of creation.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter
def description(self) -> str:
"""
Description of the VPC
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
ID of the VPC.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipv6CidrBlock")
def ipv6_cidr_block(self) -> str:
"""
The IPv6 CIDR block of the VPC.
"""
return pulumi.get(self, "ipv6_cidr_block")
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> bool:
"""
Indicate whether the VPC is the default one in the specified region.
"""
return pulumi.get(self, "is_default")
@property
@pulumi.getter(name="regionId")
def region_id(self) -> str:
"""
ID of the region where the VPC is located.
"""
return pulumi.get(self, "region_id")
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> str:
"""
The Id of resource group which VPC belongs.
"""
return pulumi.get(self, "resource_group_id")
@property
@pulumi.getter(name="routeTableId")
def route_table_id(self) -> str:
"""
Route table ID of the VRouter.
"""
return pulumi.get(self, "route_table_id")
@property
@pulumi.getter(name="routerId")
def router_id(self) -> str:
"""
The ID of the VRouter.
"""
return pulumi.get(self, "router_id")
@property
@pulumi.getter(name="secondaryCidrBlocks")
def secondary_cidr_blocks(self) -> Sequence[str]:
"""
A list of secondary IPv4 CIDR blocks of the VPC.
"""
return pulumi.get(self, "secondary_cidr_blocks")
@property
@pulumi.getter
def status(self) -> str:
"""
Filter results by a specific status. Valid value are `Pending` and `Available`.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> Mapping[str, Any]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="userCidrs")
def user_cidrs(self) -> Sequence[str]:
"""
A list of user CIDRs.
"""
return pulumi.get(self, "user_cidrs")
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> str:
"""
ID of the VPC.
"""
return pulumi.get(self, "vpc_id")
@property
@pulumi.getter(name="vpcName")
def vpc_name(self) -> str:
"""
The name of the VPC.
"""
return pulumi.get(self, "vpc_name")
@property
@pulumi.getter(name="vrouterId")
def vrouter_id(self) -> str:
"""
ID of the VRouter.
"""
return pulumi.get(self, "vrouter_id")
@property
@pulumi.getter(name="vswitchIds")
def vswitch_ids(self) -> Sequence[str]:
"""
List of VSwitch IDs in the specified VPC
"""
return pulumi.get(self, "vswitch_ids")
@pulumi.output_type
class GetRouteEntriesEntryResult(dict):
def __init__(__self__, *,
cidr_block: str,
instance_id: str,
next_hop_type: str,
route_table_id: str,
status: str,
type: str):
"""
:param str cidr_block: The destination CIDR block of the route entry.
:param str instance_id: The instance ID of the next hop.
:param str next_hop_type: The type of the next hop.
:param str route_table_id: The ID of the router table to which the route entry belongs.
:param str status: The status of the route entry.
:param str type: The type of the route entry.
"""
pulumi.set(__self__, "cidr_block", cidr_block)
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "next_hop_type", next_hop_type)
pulumi.set(__self__, "route_table_id", route_table_id)
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="cidrBlock")
def cidr_block(self) -> str:
"""
The destination CIDR block of the route entry.
"""
return pulumi.get(self, "cidr_block")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> str:
"""
The instance ID of the next hop.
"""
return pulumi.get(self, "instance_id")
@property
@pulumi.getter(name="nextHopType")
def next_hop_type(self) -> str:
"""
The type of the next hop.
"""
return pulumi.get(self, "next_hop_type")
@property
@pulumi.getter(name="routeTableId")
def route_table_id(self) -> str:
"""
The ID of the router table to which the route entry belongs.
"""
return pulumi.get(self, "route_table_id")
@property
@pulumi.getter
def status(self) -> str:
"""
The status of the route entry.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the route entry.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class GetRouteTablesTableResult(dict):
def __init__(__self__, *,
description: str,
id: str,
name: str,
resource_group_id: str,
route_table_id: str,
route_table_name: str,
route_table_type: str,
router_id: str,
router_type: str,
status: str,
tags: Mapping[str, Any],
vpc_id: str,
vswitch_ids: Sequence[str]):
"""
:param str description: The description of the route table instance.
:param str id: ID of the Route Table.
:param str name: Name of the route table.
:param str resource_group_id: The Id of resource group which route tables belongs.
:param str route_table_id: The route table id.
:param str route_table_name: The route table name.
:param str route_table_type: The type of route table.
:param str router_id: The router ID.
:param str router_type: The route type of route table. Valid values: `VRouter` and `VBR`.
:param str status: The status of resource. Valid values: `Available` and `Pending`.
:param Mapping[str, Any] tags: A mapping of tags to assign to the resource.
:param str vpc_id: Vpc id of the route table.
:param Sequence[str] vswitch_ids: A list of vswitch id.
"""
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resource_group_id", resource_group_id)
pulumi.set(__self__, "route_table_id", route_table_id)
pulumi.set(__self__, "route_table_name", route_table_name)
pulumi.set(__self__, "route_table_type", route_table_type)
pulumi.set(__self__, "router_id", router_id)
pulumi.set(__self__, "router_type", router_type)
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "tags", tags)
pulumi.set(__self__, "vpc_id", vpc_id)
pulumi.set(__self__, "vswitch_ids", vswitch_ids)
@property
@pulumi.getter
def description(self) -> str:
"""
The description of the route table instance.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
ID of the Route Table.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the route table.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> str:
"""
The Id of resource group which route tables belongs.
"""
return pulumi.get(self, "resource_group_id")
@property
@pulumi.getter(name="routeTableId")
def route_table_id(self) -> str:
"""
The route table id.
"""
return pulumi.get(self, "route_table_id")
@property
| |
"""Class to automatically select available and new primers for sequencing runs.
"""
from copy import deepcopy
import re
from collections import defaultdict
import pandas
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
from Bio import SeqIO
import flametree
from proglog import TqdmProgressBarLogger, ProgressBarLogger
from dnachisel import (AvoidPattern, RepeatedKmerPattern, HomopolymerPattern,
DnaOptimizationProblem)
from .sequencing_simulation import simulate_sequencing
from .biotools import (reverse_complement, find_non_unique_segments,
find_best_primer_locations)
from .tools import minimal_cover, segments_to_array, group_overlapping_segments
from .Primer import Primer
class PrimerSelectorLogger(TqdmProgressBarLogger):
"""Custom logger class adapted to the logger selector."""
def __init__(self, bars=('record', 'primer'), notebook='default'):
ignored_bars = set(('record', 'primer')).difference(bars)
TqdmProgressBarLogger.__init__(self, bars=bars, notebook=notebook,
ignored_bars=ignored_bars,
min_time_interval=0.2)
class PrimerSelector:
"""A selector to compute the best primers to sequence a set of constructs.
Examples
--------
>>> selector = PrimerSelector()
>>> selected_primers = selector.select_primers(records, available_primers)
>>> selector.plot_coverage(records, selected_primers, 'my_report.pdf'
Parameters
-----------
read_range
The experimentally measured range (start, end) so that, when a primer
anneals in the sequence at index i, the range ``[i + start, i + end]`` will
be correctly sequenced.
size_range
Size range (min, max) for the size of the primers, in nucleotides.
tm_range
Acceptable melting temperature range for the primers (in Celsius), as
computed using the self.read_rangec A/T=2C, G/C=4C
primer_conditions
A list of functions of the form ``primer_sequence => True/False``.
Primers for which at least one condition returns False will not be
considered.
primer_reuse_bonus
Weight that the availability of the primer should have in the decision
to select this primer. A higher value of this parameter leads to
solutions where a higher less new primershave to be ordered, but more
sequencing reactions have to be done. Set to e.g. 200 to test if there
exists solutions involving solely already-available primers.
logger
Leave to 'bars' for default progress-bar logger, to None for no logger,
or any Proglog ProgressBarLogger object.
coverage_resolution
When the user provides a record with "cover" features to indicate where
to cover, the coverage points used by the algorithm are the 1-in-N
nucleotides along the feature region, where N is this parameter.
nucleotide_resolution
If above 1, only every N primers will be considered when listing all
the potential new primers (one around each nucleotide), where N is this
number.
"""
def __init__(self, read_range=(150, 800), size_range=(16, 25),
tm_range=(55, 70), primer_conditions=(),
primer_reuse_bonus=2, logger='bars',
homology_percentage=80,
nucleotide_resolution=1,
coverage_resolution=5):
self.read_range = read_range
self.size_range = size_range
self.tm_range = tm_range
self.homology_percentage = homology_percentage
self.primers_conditions = primer_conditions
self.nucleotide_resolution = nucleotide_resolution
self.coverage_resolution = coverage_resolution
self.primer_reuse_bonus = 2
if logger == 'bars':
logger = PrimerSelectorLogger()
if logger is None:
logger = ProgressBarLogger()
self.logger = logger
self.extension = coverage_resolution + 1
def select_primers(self, records, available_primers=(),
strand='any', new_primers_prefix='P',
new_primers_digits=6):
"""Select primers to sequence the given records.
Parameters
----------
records
A list of biopython records to sequence. The zones to cover in the
record should be indicated by a feature of type ``misc_feature`` and
label ``cover``. The zones where no primers are desired should be
indicated by a feature of type ``misc_feature`` and label
``no_primer``.
available_primers
List of Primer objects representing the available primers.
new_primers_prefix
Prefix to use for names of the new primers
new_primers_digits
The new primers will have names of the form P000435, with a number
of digits provided by this parameter.
Returns
-------
selected_primers
A list of lists of primers, one list of primers for each consrtruct.
"""
if strand == 'both':
reverse_records = []
for record in records:
new_record = record.reverse_complement()
new_record.linear = record.linear
new_record.id = record.id
reverse_records.append(new_record)
primers_sets = []
for strand in self.logger.iter_bar(strand=['5-3', '3-5']):
extra_prefix, recs = {'3-5': ('_r', reverse_records),
'5-3': ('', records)}[strand]
primers_sets.append(self.select_primers(
recs,
available_primers=available_primers,
strand='5-3',
new_primers_prefix=new_primers_prefix + extra_prefix,
new_primers_digits=new_primers_digits
))
forward_primers, reverse_primers = primers_sets
return [
forward + rev
for forward, rev in zip(forward_primers, reverse_primers)
]
available_primers_dict = {p.sequence: p for p in available_primers}
available_primers_seqs = set([p.sequence for p in available_primers])
# COMPUTE PRIMERS AND COVERAGES
indices_to_cover = {}
primers_coverages = defaultdict(lambda *a: {'primary': set(),
'extended': set()})
self.logger(message='Analyzing the records...')
for record in self.logger.iter_bar(record=records):
indices_to_cover[record.id] = {
ind: '%s_%03d' % (record.id, ind)
for ind in self.compute_indices_to_cover(record)
}
coverages = self.compute_all_primers_coverage_on_record(
record, available_primers=available_primers_seqs,
indices_to_cover=indices_to_cover[record.id],
strand=strand)
for primer, cov in coverages.items():
primers_coverages[primer]['primary'].update(cov['primary'])
primers_coverages[primer]['extended'].update(cov['extended'])
# FIND GLOBAL MINIMAL COVER
self.last_coverages = coverages
self.logger(message='Selecting primers, please wait...')
elements_set = set(
index
for rec_id, named_indices in indices_to_cover.items()
for index in named_indices.values()
)
if len(elements_set) == 0:
raise ValueError(
"The provided problem has no specified locations "
"to cover. Make sure you specify which parts of the sequence "
"to cover, e.g. by adding annotations with label `cover` and "
"type misc_feature in your records.")
def heuristic(named_subset, selected):
name, subset = named_subset
primer_is_reused = name in available_primers_seqs
min_index = min([
int(re.match(r'.*_(\d*)', name_).groups()[0])
for name_ in subset['primary']
])
reuse_bonus = self.primer_reuse_bonus * primer_is_reused
return (len(subset['primary']) + reuse_bonus, -min_index)
subsets = primers_coverages.items()
primers_cover = minimal_cover(elements_set, subsets=subsets,
heuristic=heuristic)
# print(list(primers_coverages.keys()))
# REORGANIZE AND NAME THE SELECTED PRIMERS
available_primers_names = [p.name for p in available_primers]
selected_primers = []
selected_primer_from_seq = {}
for primer_seq in primers_cover:
if primer_seq in available_primers_dict:
name = available_primers_dict[primer_seq].name
primer = available_primers_dict[primer_seq]
infos = primer.metadata.get('infos', '')
meta = {'available': True, 'infos': infos}
meta.update(primer.metadata)
primer = Primer(primer.name, primer.sequence, metadata=meta)
else:
name = self.generate_primer_name(
available_primers_names=available_primers_names,
prefix=new_primers_prefix,
n_digits=new_primers_digits
)
try:
part, index = self.find_subsequence_in_records(
sequence=primer_seq, records=records)
infos = "From part %s (at position %d)" % (part, index)
except ValueError:
infos = "No containing part could be identified"
available_primers_names.append(name)
primer = Primer(name, primer_seq,
metadata={'available': False, 'infos': infos})
selected_primers.append(primer)
selected_primer_from_seq[primer_seq] = primer
# CHOOSE A MINIMAL PRIMER COVER FOR EACH CONSTRUCT
per_record_primers = []
self.logger(message='Finding minimal covers for each construct...')
for record in self.logger.iter_bar(record=records):
elements = set(indices_to_cover[record.id].values())
subcovers = {
prim_seq: {
'primary': primers_coverages[prim_seq]['primary']
.intersection(elements),
'extended': primers_coverages[prim_seq]['extended']
.intersection(elements)
}
for prim_seq in primers_cover
}
subsets = deepcopy(list(subcovers.items()))
# subsets = list(subcovers.items())
sub_primers_cover = minimal_cover(elements, subsets=subsets,
heuristic=heuristic)
# All primers selected for this construct, sorted by availability.
sub_selected_primers = sorted([
selected_primer_from_seq[primer_seq]
for primer_seq in sub_primers_cover
], key=lambda p: p.metadata['available'])
per_record_primers.append(sub_selected_primers)
return per_record_primers
def compute_indices_to_cover(self, record):
"""List all indices in the record which should be covered.
These indices are equidistant points inside the user-defined zones to
cover in the record.
The use determines the zones to cover via features of
type ``misc_feature`` and label 'cover'.
"""
segments_to_cover = [
sorted([int(f.location.start), int(f.location.end)])
for f in record.features
if f.location is not None
and f.type == 'misc_feature'
and "".join(f.qualifiers.get('label', '')) == 'cover'
]
res = self.coverage_resolution
return set([
int(indice) % len(record)
for (start, end) in segments_to_cover
for indice in np.linspace(start, end, int((end - start) / res))
])
@staticmethod
def locate_primer_sequence(primer, sequence):
"""Find the location (start, end, strand) of a primer in the sequence.
Return None if the primer sequence and its reverse complement are not
found in the sequence.
"""
ind = sequence.find(primer)
strand = 1
if ind == -1:
ind = sequence.find(reverse_complement(primer))
if ind == -1:
return None
else:
strand = -1
start, end = ind, ind + len(primer)
return start, end, strand
def compute_forbidden_patterns_locations(self, record):
"""Return an array where ``arr[i] == 1`` means that i is surrounded by
a user-forbidden pattern."""
pattern_constraints = [AvoidPattern(HomopolymerPattern(c, 5))
for c in 'ATGC']
kmer_constraints = [
AvoidPattern(RepeatedKmerPattern(n_repeats, kmer_size))
for n_repeats, kmer_size in [(4, 2), (3, 3), (2, 4)]
]
problem = DnaOptimizationProblem(
sequence=record,
constraints=pattern_constraints + kmer_constraints
)
constraints_breaches = group_overlapping_segments([
(f.location.start, f.location.end)
for ev in problem.constraints_evaluations()
for f in ev.locations_to_biopython_features()
if not ev.passes
])
return segments_to_array(constraints_breaches, len(record))
def compute_user_forbidden_locations(self, record):
"""Return an array where ``arr[i] == 1`` means that i is surrounded by
a user-forbidden location."""
forbidden_segments = [
sorted([int(f.location.start), int(f.location.end)])
for f in record.features
if f.location is not None
and f.type == 'misc_feature'
and "".join(f.qualifiers.get('label', '')) == 'no_primer'
]
return segments_to_array(forbidden_segments, len(record))
def compute_nonunique_segments_locations(self, record):
"""Return an array where ``arr[i] == 1`` means that i is surrounded by
a non-unique location."""
sequence = str(record.seq)
non_unique_segments = find_non_unique_segments(
sequence, perc_identity=self.homology_percentage)
return segments_to_array(non_unique_segments, len(record))
def compute_all_forbidden_locations(self, record):
"""Return an array indicating which positions should be avoided.
We take into account forbidden patterns, user-forbidden locations,
and non-unique locations.
``arr[i] == 1`` indicates that position i should be avoided in the
record.
"""
return np.maximum(*(f(record) for f in (
self.compute_nonunique_segments_locations,
self.compute_forbidden_patterns_locations,
self.compute_user_forbidden_locations
| |
Column(Int, Check(lambda s: s >= 0)),
"col2": Column(String, Check(lambda x: x <= 0)),
"col3": Column(Object, Check(lambda x: x == 0)),
},
strict=True,
)
assert schema2 == expected_schema_2
# test that remove_columns doesn't modify schema2:
schema3 = schema2.remove_columns(["col2"])
assert schema2 == schema2_exact_copy
# test that remove_columns has removed the changes as expected:
expected_schema_3 = DataFrameSchema(
{
"col1": Column(Int, Check(lambda s: s >= 0)),
"col3": Column(Object, Check(lambda x: x == 0)),
},
strict=True,
)
assert schema3 == expected_schema_3
# test that remove_columns can remove two columns:
schema4 = schema2.remove_columns(["col2", "col3"])
expected_schema_4 = DataFrameSchema(
{"col1": Column(Int, Check(lambda s: s >= 0))}, strict=True
)
assert schema4 == expected_schema_4 == schema1
# test raising error if column name is not in the schema
with pytest.raises(errors.SchemaInitError):
schema2.remove_columns(["foo", "bar"])
def test_schema_get_dtype() -> None:
"""Test that schema dtype and get_dtype methods handle regex columns."""
schema = DataFrameSchema(
{
"col1": Column(Int),
"var*": Column(Float, regex=True),
}
)
data = pd.DataFrame(
{
"col1": [1, 2, 3],
"var1": [1.0, 1.1, 1.2],
"var2": [1.0, 1.1, 1.2],
"var3": [1.0, 1.1, 1.2],
}
)
with pytest.warns(UserWarning) as record:
assert schema.dtype == {"col1": Int.str_alias}
assert len(record) == 1
assert (
record[0] # type: ignore[union-attr]
.message.args[0]
.startswith("Schema has columns specified as regex column names:")
)
assert schema.get_dtype(data) == {
"col1": Int.str_alias,
"var1": Float.str_alias,
"var2": Float.str_alias,
"var3": Float.str_alias,
}
def _boolean_update_column_case(
bool_kwarg: str,
) -> Tuple[
Column,
str,
Dict[str, bool],
Callable[[DataFrameSchema, DataFrameSchema], None],
]:
def _assert_bool_case(old_schema, new_schema):
assert not getattr(old_schema.columns["col"], bool_kwarg)
assert getattr(new_schema.columns["col"], bool_kwarg)
return (
Column(Int, **{bool_kwarg: False}), # type: ignore[arg-type]
"col",
{bool_kwarg: True},
_assert_bool_case,
)
@pytest.mark.parametrize(
"column, column_to_update, update, assertion_fn",
[
[
Column(Int),
"col",
{"pandas_dtype": String},
lambda old, new: [
old.columns["col"].pandas_dtype is Int,
new.columns["col"].pandas_dtype is String,
],
],
*[
_boolean_update_column_case(bool_kwarg)
for bool_kwarg in [
"nullable",
"allow_duplicates",
"coerce",
"required",
"regex",
]
],
[
Column(Int, checks=Check.greater_than(0)),
"col",
{"checks": Check.less_than(10)},
lambda old, new: [
old.columns["col"].checks == [Check.greater_than(0)],
new.columns["col"].checks == [Check.less_than(10)],
],
],
# error cases
[Column(Int), "col", {"name": "renamed_col"}, ValueError],
[Column(Int), "foobar", {}, ValueError],
],
)
def test_dataframe_schema_update_column(
column: Column,
column_to_update: str,
update: Dict[str, Any],
assertion_fn: Callable[[DataFrameSchema, DataFrameSchema], None],
) -> None:
"""Test that DataFrameSchema columns create updated copies."""
schema = DataFrameSchema({"col": column})
if assertion_fn is ValueError:
with pytest.raises(ValueError):
schema.update_column(column_to_update, **update)
return
new_schema = schema.update_column(column_to_update, **update)
assertion_fn(schema, new_schema)
def test_rename_columns() -> None:
"""Check that DataFrameSchema.rename_columns() method does its job"""
rename_dict = {"col1": "col1_new_name", "col2": "col2_new_name"}
schema_original = DataFrameSchema(
columns={"col1": Column(Int), "col2": Column(Float)}
)
schema_renamed = schema_original.rename_columns(rename_dict)
# Check if new column names are indeed present in the new schema
assert all(
col_name in rename_dict.values() for col_name in schema_renamed.columns
)
# Check if original schema didn't change in the process
assert all(col_name in schema_original.columns for col_name in rename_dict)
with pytest.raises(errors.SchemaInitError):
schema_original.rename_columns({"foo": "bar"})
# Test raising error if new column name is already in schema
for rename_dict in [{"col1": "col2"}, {"col2": "col1"}]:
with pytest.raises(errors.SchemaInitError):
schema_original.rename_columns(rename_dict)
@pytest.mark.parametrize(
"select_columns, schema",
[
(
["col1", "col2"],
DataFrameSchema(
columns={
"col1": Column(Int),
"col2": Column(Int),
"col3": Column(Int),
}
),
),
(
[("col1", "col1b"), ("col2", "col2b")],
DataFrameSchema(
columns={
("col1", "col1a"): Column(Int),
("col1", "col1b"): Column(Int),
("col2", "col2a"): Column(Int),
("col2", "col2b"): Column(Int),
}
),
),
],
)
def test_select_columns(
select_columns: List[Union[str, Tuple[str, str]]], schema: DataFrameSchema
) -> None:
"""Check that select_columns method correctly creates new subset schema."""
original_columns = list(schema.columns)
schema_selected = schema.select_columns(select_columns)
assert all(x in select_columns for x in schema_selected.columns)
assert all(x in original_columns for x in schema.columns)
with pytest.raises(errors.SchemaInitError):
schema.select_columns(["foo", "bar"])
def test_lazy_dataframe_validation_error() -> None:
"""Test exceptions on lazy dataframe validation."""
schema = DataFrameSchema(
columns={
"int_col": Column(Int, Check.greater_than(5)),
"int_col2": Column(Int),
"float_col": Column(Float, Check.less_than(0)),
"str_col": Column(String, Check.isin(["foo", "bar"])),
"not_in_dataframe": Column(Int),
},
checks=Check(
lambda df: df != 1, error="dataframe_not_equal_1", ignore_na=False
),
index=Index(String, name="str_index"),
strict=True,
)
dataframe = pd.DataFrame(
data={
"int_col": [1, 2, 6],
"int_col2": ["a", "b", "c"],
"float_col": [1.0, -2.0, 3.0],
"str_col": ["foo", "b", "c"],
"unknown_col": [None, None, None],
},
index=pd.Index(["index0", "index1", "index2"], name="str_index"),
)
expectation = {
# schema object context -> check failure cases
"DataFrameSchema": {
# check name -> failure cases
"column_in_schema": ["unknown_col"],
"dataframe_not_equal_1": [1],
"column_in_dataframe": ["not_in_dataframe"],
},
"Column": {
"greater_than(5)": [1, 2],
"pandas_dtype('int64')": ["object"],
"less_than(0)": [1, 3],
},
}
with pytest.raises(
errors.SchemaErrors, match="^A total of .+ schema errors were found"
):
schema.validate(dataframe, lazy=True)
try:
schema.validate(dataframe, lazy=True)
except errors.SchemaErrors as err:
# data in the caught exception should be equal to the dataframe
# passed into validate
assert err.data.equals(dataframe)
# make sure all expected check errors are in schema errors
for schema_context, check_failure_cases in expectation.items():
err_df = err.failure_cases.loc[
err.failure_cases.schema_context == schema_context
]
for check, failure_cases in check_failure_cases.items():
assert check in err_df.check.values
assert (
err_df.loc[err_df.check == check]
.failure_case.isin(failure_cases)
.all()
)
def test_lazy_validation_multiple_checks() -> None:
"""Lazy validation with multiple checks should report all failures."""
schema = DataFrameSchema(
{
"col1": Column(
Int,
checks=[
Check.in_range(1, 4),
Check(lambda s: s % 2 == 0, name="is_even"),
],
coerce=True,
nullable=False,
),
"col2": Column(Int, Check.gt(3), coerce=True, nullable=False),
}
)
data = pd.DataFrame(
{"col1": [0, 1, 2, 3, 4], "col2": [np.nan, 53, 23, np.nan, 2]}
)
expectation = {
"col1": {
"in_range(1, 4)": [0],
"is_even": [1, 3],
},
"col2": {
"coerce_dtype('int64')": ["float64"],
},
}
try:
schema.validate(data, lazy=True)
except errors.SchemaErrors as err:
for column_name, check_failure_cases in expectation.items():
err_df = err.failure_cases.loc[
err.failure_cases.column == column_name
]
for check, failure_cases in check_failure_cases.items(): # type: ignore
assert check in err_df.check.values
assert (
list(err_df.loc[err_df.check == check].failure_case)
== failure_cases
)
def test_lazy_dataframe_validation_nullable() -> None:
"""
Test that non-nullable column failure cases are correctly processed during
lazy validation.
"""
schema = DataFrameSchema(
columns={
"int_column": Column(Int, nullable=False),
"float_column": Column(Float, nullable=False),
"str_column": Column(String, nullable=False),
},
strict=True,
)
df = pd.DataFrame(
{
"int_column": [1, None, 3],
"float_column": [0.1, 1.2, None],
"str_column": [None, "foo", "bar"],
}
)
try:
schema.validate(df, lazy=True)
except errors.SchemaErrors as err:
# report not_nullable checks
assert (
err.failure_cases.query("check == 'not_nullable'")
.failure_case.isna()
.all()
)
# report invalid type in int_column
assert (
err.failure_cases.query(
"check == \"pandas_dtype('int64')\""
).failure_case
== "float64"
).all()
for col, index in [
("int_column", 1),
("float_column", 2),
("str_column", 0),
]:
# pylint: disable=cell-var-from-loop
assert (
err.failure_cases.loc[
lambda df: df.column == col, "index"
].iloc[0]
== index
)
def test_lazy_dataframe_validation_with_checks() -> None:
"""Test that all failure cases are reported for schemas with checks."""
schema = DataFrameSchema(
columns={
"analysis_path": Column(String),
"run_id": Column(String),
"sample_type": Column(String, Check.isin(["DNA", "RNA"])),
"sample_valid": Column(String, Check.isin(["Yes", "No"])),
},
strict=False,
coerce=True,
)
df = pd.DataFrame.from_dict(
{
"analysis_path": ["/", "/", "/", "/", "/"],
"run_id": ["1", "2", "3", "4", "5"],
"sample_type": ["DNA", "RNA", "DNA", "RNA", "RNA"],
"sample_valid": ["Yes", "YES", "YES", "NO", "NO"],
}
)
try:
schema(df, lazy=True)
except errors.SchemaErrors as err:
failure_case = err.failure_cases.failure_case.tolist()
assert failure_case == ["YES", "YES", "NO", "NO"]
def test_lazy_dataframe_validation_nullable_with_checks() -> None:
"""
Test that checks in non-nullable column failure cases are correctly
processed during lazy validation.
"""
schema = DataFrameSchema(
{
"id": Column(
String,
checks=Check.str_matches(r"^ID[\d]{3}$"),
name="id",
required=True,
allow_duplicates=False,
)
}
)
df = pd.DataFrame({"id": ["ID001", None, "XXX"]})
try:
schema(df, lazy=True)
except errors.SchemaErrors as err:
expected_failure_cases = pd.DataFrame.from_dict(
{
0: {
"schema_context": "Column",
"column": "id",
"check": "not_nullable",
"check_number": None,
"failure_case": None,
"index": 1,
},
1: {
"schema_context": "Column",
"column": "id",
"check": r"str_matches(re.compile('^ID[\\d]{3}$'))",
"check_number": 0,
"failure_case": "XXX",
"index": 2,
},
},
orient="index",
).astype({"check_number": object})
pd.testing.assert_frame_equal(
err.failure_cases, expected_failure_cases
)
@pytest.mark.parametrize(
"schema_cls, data",
[
[DataFrameSchema, pd.DataFrame({"column": [1]})],
[SeriesSchema, pd.Series([1, 2, 3])],
[partial(Column, name="column"), pd.DataFrame({"column": [1]})],
[
partial(Index, name="index"),
pd.DataFrame(index=pd.Index([1, 2, 3], name="index")),
],
],
)
def test_lazy_dataframe_scalar_false_check(
schema_cls: Type[Union[DataFrameSchema, SeriesSchema, Column, Index]],
data: Union[pd.DataFrame, pd.Series, pd.Index],
) -> None:
"""Lazy validation handles checks returning scalar False values."""
# define a check that always returns a scalare False value
check = Check(
check_fn=lambda _: False, element_wise=False, error="failing check"
)
schema = schema_cls(checks=check)
with pytest.raises(errors.SchemaErrors):
schema(data, lazy=True)
@pytest.mark.parametrize(
"schema, data, expectation",
[
# case: series name doesn't match schema name
[
SeriesSchema(name="foobar"),
pd.Series(range(3)),
{
"data": pd.Series(range(3)),
"schema_errors": {
"SeriesSchema": {"field_name('foobar')": [None]},
},
},
],
# case: series type doesn't match schema type
[
SeriesSchema(int),
pd.Series([0.1]),
{
"data": pd.Series([0.1]),
"schema_errors": {
"SeriesSchema": {"pandas_dtype('int64')": ["float64"]},
},
},
],
# case: series index doesn't satisfy schema index
[
SeriesSchema(index=Index(int)),
pd.Series([1, 2, 3], index=list("abc")),
{
"data": pd.Series([1, 2, 3], index=list("abc")),
"schema_errors": {
"Index": {"pandas_dtype('int64')": ["object"]},
},
},
],
# case: SeriesSchema data-type coercion error
[
SeriesSchema(float, coerce=True),
pd.Series(["1", "foo", "bar"]),
{
"data": pd.Series(["1", "foo", "bar"]),
"schema_errors": {
| |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Plugin to create a Quantum Espresso ph.x input file.
"""
import os
import numpy
from aiida.common.utils import classproperty
from aiida.common.exceptions import UniquenessError, InputValidationError,ValidationError
from aiida.common.datastructures import CalcInfo, CodeInfo
from aiida.orm.data.remote import RemoteData
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.array.kpoints import KpointsData
from aiida.orm.calculation.job import JobCalculation
from aiida_quantumespresso.calculations.pw import PwCalculation
from aiida_quantumespresso.calculations import BasePwCpInputGenerator
from aiida_quantumespresso.calculations import _lowercase_dict, _uppercase_dict
from aiida_quantumespresso.utils.convert import convert_input_to_namelist_entry
# List of namelists (uppercase) that are allowed to be found in the
# input_data, in the correct order
# in restarts, will not copy but use symlinks
_default_symlink_usage = False
class PhCalculation(JobCalculation):
"""
Phonon code (ph.x) of the Quantum ESPRESSO distribution.
For more information, refer to http://www.quantum-espresso.org/
"""
def _init_internal_params(self):
super(PhCalculation, self)._init_internal_params()
self._PREFIX = 'aiida'
self._INPUT_FILE_NAME = 'aiida.in'
self._OUTPUT_FILE_NAME = 'aiida.out'
self._OUTPUT_XML_TENSOR_FILE_NAME = 'tensors.xml'
# Default PH output parser provided by AiiDA
self._default_parser = 'quantumespresso.ph'
self._compulsory_namelists = ['INPUTPH']
# Keywords that cannot be set manually, only by the plugin
self._blocked_keywords = [('INPUTPH', 'outdir'),
('INPUTPH', 'iverbosity'),
('INPUTPH', 'prefix'),
('INPUTPH', 'fildyn'),
('INPUTPH', 'ldisp'),
('INPUTPH', 'nq1'),
('INPUTPH', 'nq2'),
('INPUTPH', 'nq3'),
('INPUTPH', 'qplot'),
]
# Default input and output files
self._DEFAULT_INPUT_FILE = 'aiida.in'
self._DEFAULT_OUTPUT_FILE = 'aiida.out'
@classproperty
def _OUTPUT_SUBFOLDER(cls):
return './out/'
@classproperty
def _FOLDER_DRHO(cls):
return 'FILDRHO'
@classproperty
def _DRHO_PREFIX(cls):
return 'drho'
@classproperty
def _DRHO_STAR_EXT(cls):
return 'drho_rot'
@classproperty
def _FOLDER_DYNAMICAL_MATRIX(cls):
return 'DYN_MAT'
@classproperty
def _OUTPUT_DYNAMICAL_MATRIX_PREFIX(cls):
return os.path.join(cls._FOLDER_DYNAMICAL_MATRIX,
'dynamical-matrix-')
@classproperty
def _use_methods(cls):
"""
Additional use_* methods for the ph class.
"""
retdict = JobCalculation._use_methods
retdict.update({
"settings": {
'valid_types': ParameterData,
'additional_parameter': None,
'linkname': 'settings',
'docstring': "Use an additional node for special settings",
},
"parameters": {
'valid_types': ParameterData,
'additional_parameter': None,
'linkname': 'parameters',
'docstring': ("Use a node that specifies the input parameters "
"for the namelists"),
},
"parent_folder": {
'valid_types': RemoteData,
'additional_parameter': None,
'linkname': 'parent_calc_folder',
'docstring': ("Use a remote folder as parent folder (for "
"restarts and similar"),
},
"qpoints": {
'valid_types': KpointsData,
'additional_parameter': None,
'linkname': 'qpoints',
'docstring': ("Specify the Qpoints on which to compute phonons"),
},
})
return retdict
def _prepare_for_submission(self,tempfolder,inputdict):
"""
This is the routine to be called when you want to create
the input files and related stuff with a plugin.
:param tempfolder: a aiida.common.folders.Folder subclass where
the plugin should put all its files.
:param inputdict: a dictionary with the input nodes, as they would
be returned by get_inputdata_dict (without the Code!)
"""
try:
code = inputdict.pop(self.get_linkname('code'))
except KeyError:
raise InputValidationError("No code specified for this calculation")
local_copy_list = []
remote_copy_list = []
remote_symlink_list = []
try:
parameters = inputdict.pop(self.get_linkname('parameters'))
except KeyError:
raise InputValidationError("No parameters specified for this calculation")
if not isinstance(parameters, ParameterData):
raise InputValidationError("parameters is not of type ParameterData")
try:
qpoints = inputdict.pop(self.get_linkname('qpoints'))
except KeyError:
raise InputValidationError("No qpoints specified for this calculation")
if not isinstance(qpoints, KpointsData):
raise InputValidationError("qpoints is not of type KpointsData")
# Settings can be undefined, and defaults to an empty dictionary.
# They will be used for any input that doen't fit elsewhere.
settings = inputdict.pop(self.get_linkname('settings'),None)
if settings is None:
settings_dict = {}
else:
if not isinstance(settings, ParameterData):
raise InputValidationError("settings, if specified, must be of "
"type ParameterData")
# Settings converted to uppercase
settings_dict = _uppercase_dict(settings.get_dict(),
dict_name='settings')
parent_calc_folder = inputdict.pop(self.get_linkname('parent_folder'),None)
if parent_calc_folder is None:
raise InputValidationError("No parent calculation found, needed to "
"compute phonons")
# TODO: to be a PwCalculation is not sufficient: it could also be a nscf
# calculation that is invalid for phonons
if not isinstance(parent_calc_folder, RemoteData):
raise InputValidationError("parent_calc_folder, if specified,"
"must be of type RemoteData")
restart_flag = False
# extract parent calculation
parent_calcs = parent_calc_folder.get_inputs(node_type=JobCalculation)
n_parents = len(parent_calcs)
if n_parents != 1:
raise UniquenessError("Input RemoteData is child of {} "
"calculation{}, while it should have "
"a single parent".format(n_parents,
"" if n_parents==0 else "s"))
parent_calc = parent_calcs[0]
# check that it is a valid parent
self._check_valid_parent(parent_calc)
if not isinstance(parent_calc, PwCalculation):
restart_flag = True
# Also, the parent calculation must be on the same computer
new_comp = self.get_computer()
old_comp = parent_calc.get_computer()
if ( not new_comp.uuid == old_comp.uuid ):
raise InputValidationError("PhCalculation must be launched on the same computer"
" of the parent: {}".format(old_comp.get_name()))
# put by default, default_parent_output_folder = ./out
try:
default_parent_output_folder = parent_calc._OUTPUT_SUBFOLDER
except AttributeError:
try:
default_parent_output_folder = parent_calc._get_output_folder()
except AttributeError:
raise InputValidationError("Parent of PhCalculation does not "
"have a default output subfolder")
#os.path.join(
# parent_calc.OUTPUT_SUBFOLDER,
# '{}.save'.format(parent_calc.PREFIX))
parent_calc_out_subfolder = settings_dict.pop('PARENT_CALC_OUT_SUBFOLDER',
default_parent_output_folder)
# Here, there should be no other inputs
if inputdict:
raise InputValidationError("The following input data nodes are "
"unrecognized: {}".format(inputdict.keys()))
##############################
# END OF INITIAL INPUT CHECK #
##############################
# I put the first-level keys as uppercase (i.e., namelist and card names)
# and the second-level keys as lowercase
# (deeper levels are unchanged)
input_params = _uppercase_dict(parameters.get_dict(),
dict_name='parameters')
input_params = {k: _lowercase_dict(v, dict_name=k)
for k, v in input_params.iteritems()}
prepare_for_d3 = settings_dict.pop('PREPARE_FOR_D3',False)
if prepare_for_d3:
self._blocked_keywords += [('INPUTPH', 'fildrho'),
('INPUTPH', 'drho_star%open'),
('INPUTPH', 'drho_star%ext'),
('INPUTPH', 'drho_star%dir')]
# I remove unwanted elements (for the moment, instead, I stop; to change when
# we setup a reasonable logging)
for nl, flag in self._blocked_keywords:
if nl in input_params:
if flag in input_params[nl]:
raise InputValidationError(
"You cannot specify explicitly the '{}' flag in the '{}' "
"namelist or card.".format(flag, nl))
# Set some variables (look out at the case! NAMELISTS should be uppercase,
# internal flag names must be lowercase)
if 'INPUTPH' not in input_params:
raise InputValidationError("No namelist INPUTPH found in input") # I cannot decide what to do in the calculation
input_params['INPUTPH']['outdir'] = self._OUTPUT_SUBFOLDER
input_params['INPUTPH']['iverbosity'] = 1 # in human language 1=high
input_params['INPUTPH']['prefix'] = self._PREFIX
input_params['INPUTPH']['fildyn'] = self._OUTPUT_DYNAMICAL_MATRIX_PREFIX
if prepare_for_d3:
input_params['INPUTPH']['fildrho'] = self._DRHO_PREFIX
input_params['INPUTPH']['drho_star%open'] = True
input_params['INPUTPH']['drho_star%ext'] = self._DRHO_STAR_EXT
input_params['INPUTPH']['drho_star%dir'] = self._FOLDER_DRHO
# qpoints part
try:
mesh,offset = qpoints.get_kpoints_mesh()
if any([i!=0. for i in offset]):
raise NotImplementedError("Computation of phonons on a mesh with"
" non zero offset is not implemented, at the level of ph.x")
input_params["INPUTPH"]["ldisp"] = True
input_params["INPUTPH"]["nq1"] = mesh[0]
input_params["INPUTPH"]["nq2"] = mesh[1]
input_params["INPUTPH"]["nq3"] = mesh[2]
postpend_text = None
except AttributeError:
# this is the case where no mesh was set. Maybe it's a list
try:
list_of_points = qpoints.get_kpoints(cartesian=True)
except AttributeError as e:
# In this case, there are no info on the qpoints at all
raise InputValidationError("Neither a qpoints mesh or a valid "
"list of qpoints was found in input",
e.message)
# change to 2pi/a coordinates
lattice_parameter = numpy.linalg.norm(qpoints.cell[0])
list_of_points *= lattice_parameter / (2.*numpy.pi)
# add here the list of point coordinates
if len(list_of_points)>1:
input_params["INPUTPH"]["qplot"] = True
input_params["INPUTPH"]["ldisp"] = True
postpend_text = "{}\n".format(len(list_of_points))
for points in list_of_points:
postpend_text += "{} {} {} 1\n".format(*points)
# Note: the weight is fixed to 1, because ph.x calls these
# things weights but they are not such. If they are going to
# exist with the meaning of weights, they will be supported
else:
input_params["INPUTPH"]["ldisp"] = False
postpend_text = ""
for points in list_of_points:
postpend_text += "{} {} {}\n".format(*points)
# =================== NAMELISTS ========================
# customized namelists, otherwise not present in the distributed ph code
try:
namelists_toprint = settings_dict.pop('NAMELISTS')
if not isinstance(namelists_toprint, list):
raise InputValidationError(
"The 'NAMELISTS' value, if specified in the settings input "
"node, must be a list of strings")
except KeyError: # list of namelists not specified in the settings; do automatic detection
namelists_toprint = self._compulsory_namelists
input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME)
# create a folder for the dynamical matrices
if not restart_flag: # if it is a restart, it will be copied over
tempfolder.get_subfolder(self._FOLDER_DYNAMICAL_MATRIX,
create=True)
with open(input_filename,'w') as infile:
infile.write('AiiDA calculation\n')
for namelist_name in namelists_toprint:
infile.write("&{0}\n".format(namelist_name))
# namelist content; set to {} if not present, so that we leave an
# empty namelist
namelist = input_params.pop(namelist_name,{})
for k, v in sorted(namelist.iteritems()):
infile.write(convert_input_to_namelist_entry(k,v))
infile.write("/\n")
# add list of qpoints if required
if postpend_text is not None:
infile.write(postpend_text)
#TODO: write nat_todo
if input_params:
raise InputValidationError(
"The following namelists are specified in input_params, but are "
"not valid namelists for the current type of calculation: "
"{}".format(",".join(input_params.keys())))
# copy the parent scratch
symlink = settings_dict.pop('PARENT_FOLDER_SYMLINK',
_default_symlink_usage) # a | |
-1 else None
if road is not None and (road.__isDuplex__() or road.__to__() == self.id_):
self.providerIndex.append(index)
if road is not None and (road.__isDuplex__() or road.__from__() == self.id_):
self.receiverIndex.append(index)
if road is not None:
self.validRoadIndex.append(index)
self.provider = [self.roadIds[index] for index in self.providerIndex]
self.receiver = [self.roadIds[index] for index in self.receiverIndex]
self.validRoad = [self.roadIds[index] for index in self.validRoadIndex]
self.provider.sort()
# **** dynamic parameters ****#
self.readyCars = []
self.carportCarNum = 0
self.finishCarNum = 0
# **** flag ****#
self.done = False
# main functions
def step(self):
for roadId in self.validRoad:
ROADDICT[roadId].setBucket(self.id_)
# data preapre
nextCarId, nextCar, nextRoad, nextCarPriority = [], [], [], []
for provideIndex in range(self.provider.__len__()):
nextCarId.append(ROADDICT[self.provider[provideIndex]].firstPriorityCar())
# if first priority car exists
if nextCarId[provideIndex] != -1:
nextCar.append(CARDICT[nextCarId[provideIndex]])
nextRoad.append(nextCar[provideIndex].__nextRoad__())
# nextRoad == -1 => terminal
if nextRoad[provideIndex] == -1:
nextCarPriority.append(2)
else:
nextCarPriority.append(self.prority(self.provider[provideIndex], nextRoad[provideIndex]))
else:
nextCar.append(-1)
nextRoad.append(-1)
nextCarPriority.append(-1)
# loop
for provideIndex in range(self.provider.__len__()):
conflict = False
while nextCar[provideIndex] != -1:
# same next road and high priority lead to conflict
provider = ROADDICT[self.provider[provideIndex]]
for i in range(self.provider.__len__()):
if nextRoad[i] == nextRoad[provideIndex] and nextCarPriority[i] > nextCarPriority[provideIndex]:
conflict = True
break
if conflict:
break
#
if nextRoad[provideIndex] == -1:
provider.firstPriorityCarAct(0)
CARDISTRIBUTION[1] -= 1
CARDISTRIBUTION[2] += 1
else:
nextroad_ = ROADDICT[nextRoad[provideIndex]]
action = nextroad_.receiveCar(nextCar[provideIndex].__id__())
if action == 2:
break
provider.firstPriorityCarAct(action)
nextCarId[provideIndex] = provider.firstPriorityCar()
if nextCarId[provideIndex] != -1:
nextCar[provideIndex] = CARDICT[nextCarId[provideIndex]]
nextRoad[provideIndex] = nextCar[provideIndex].__nextRoad__()
# nextRoad == -1 => terminal
if nextRoad[provideIndex] == -1:
nextCarPriority[provideIndex] = 2
else:
nextCarPriority[provideIndex] = self.prority(self.provider[provideIndex],
nextRoad[provideIndex])
else:
nextCar[provideIndex] = -1
nextRoad[provideIndex] = -1
nextCarPriority[provideIndex] = -1
done = True
for provideIndex in range(self.provider.__len__()):
if nextCar[provideIndex] != -1:
done = False
self.done = done
def outOfCarport(self):
self.readyCars = self.left
self.left = []
if TIME[0] in self.carport.keys():
self.carport[TIME[0]].sort()
self.readyCars.extend(self.carport[TIME[0]])
if self.readyCars.__len__() == 0:
return
# self.readyCars.sort()
for roadId in self.receiver:
ROADDICT[roadId].setBucket(self.id_)
for i in range(self.readyCars.__len__()):
carId = self.readyCars[i]
roadId = CARDICT[carId].__nextRoad__()
road = ROADDICT[roadId]
if roadId not in self.receiver:
print("Car(%d).Road(%d) not in cross(%d).function:class.outOfCarport" % (carId, roadId, self.id_))
act = road.receiveCar(carId)
if act != 0:
self.left = self.readyCars[i:]
break
# assert act==0, print("Time(%d),Cross(%d),Road(%d),Car(%d) can't pull away from carport"%(TIME[0],self.id_,roadId,carId))
self.carportCarNum -= 1
CARDISTRIBUTION[0] -= 1
CARDISTRIBUTION[1] += 1
#
# other functions
#
def prority(self, providerId, receiverId):
return self.priorityMap[providerId][receiverId]
def setDone(self, bool):
self.done = bool
def setLoc(self, x, y):
self.x, self.y = x, y
def setMapLoc(self, mapX, mapY):
self.mapX, self.mapY = mapX, mapY
def roadDirection(self, roadId):
if self.roadIds[0] == roadId:
return 0
elif self.roadIds[1] == roadId:
return 1
elif self.roadIds[2] == roadId:
return 2
elif self.roadIds[3] == roadId:
return 3
else:
return -1
def carportInitial(self, timePlan, carId):
if timePlan not in self.carport.keys():
self.carport[timePlan] = [carId]
else:
self.carport[timePlan].append(carId)
self.carportCarNum += 1
#
# show statistic parameters
#
def __id__(self):
return self.id_
def __roadIds__(self):
return self.roadIds
def __providerIndex__(self):
return self.providerIndex
def __receiverIndex__(self):
return self.receiverIndex
def __validRoadIndex__(self):
return self.validRoadIndex
def __provider__(self):
return self.provider
def __receiver__(self):
return self.receiver
def __validRoad__(self):
return self.validRoad
def __x__(self):
return self.x
def __y__(self):
return self.y
def __mapX__(self):
return self.mapX
def __mapY__(self):
return self.mapY
def __done__(self):
return self.done
#
# show dynamic parameters
#
def __carportCarNum__(self):
return self.carportCarNum
def __finishCarNum__(self):
return self.finishCarNum
#
# show some important info
#
def __loc__(self):
return self.x, self.y
def __mapLoc__(self):
return self.mapX, self.mapY
class simulation(object):
def __init__(self):
self.dead = False
def step(self):
print("time:%d" % TIME[0])
for crossId in CROSSNAMESPACE:
CROSSDICT[crossId].setDone(False)
print("pre-movement...")
for road in ROADNAMESPACE:
ROADDICT[road].stepInit()
print("while loop...")
crossDone = 0
while crossDone < CROSSNAMESPACE.__len__():
for crossId in CROSSNAMESPACE:
cross = CROSSDICT[crossId]
if not cross.__done__():
cross.step()
if cross.__done__():
crossDone += 1
print("car pulling away from carport")
for i in range(CROSSNAMESPACE.__len__()):
crossId = CROSSNAMESPACE[i]
for roadId in CROSSDICT[crossId].__validRoad__():
ROADDICT[roadId].setBucket(crossId)
CROSSDICT[crossId].outOfCarport()
def simulate(self, saveImagePath=None, draw=False):
visualize = visualization(saveImagePath)
visualize.crossLocGen()
while True:
self.step()
# huguodong 注释掉
if draw:
visualize.drawMap()
if CARDISTRIBUTION[2] == CARNAMESPACE.__len__():
print(CARDISTRIBUTION[2])
break
if self.dead:
break
TIME[0] += 1
class visualization(object):
def __init__(self, saveImagePath):
self.maxX, self.maxY = 0, 0
# 修改路径:保存图片的地方
if saveImagePath:
self.savePath = saveImagePath
shutil.rmtree(saveImagePath) # 清空文件夹的名
os.mkdir(saveImagePath) # 创建目录
# ** cross param **#
self.crossRadius = 14
self.crossDistance = 150
self.crossColor = [25, 200, 0]
# ** road param **#
self.roadColor = [0, 0, 0] # black
self.roadLineType = 4
self.channelWidth = 5
self.channelDistance = 3
self.lineWidth = 2
self.time = 0
#
# cross location gen
#
def crossLocGen(self):
# **** relative location ****#
# denote the first cross as the origin of coordinates
for crossId in CROSSNAMESPACE:
CROSSDICT[crossId].setDone(False)
crossList = [CROSSNAMESPACE[0]]
minX, minY = 0, 0
while (crossList.__len__() > 0):
nextCrossList = []
for crossId in crossList:
presentX, presntY = CROSSDICT[crossId].__loc__()
validRoad = CROSSDICT[crossId].__validRoad__()
for roadId in validRoad:
# next cross id
nextCrossId = ROADDICT[roadId].__from__() if ROADDICT[roadId].__from__() != crossId \
else ROADDICT[roadId].__to__()
# if next cross is visited
if not CROSSDICT[nextCrossId].__done__():
# visit sets true
CROSSDICT[nextCrossId].setDone(True)
# relative location of nextcross
nextX, nextY = self.crossRelativeLoc(presentX, presntY, crossId, roadId)
# update location
CROSSDICT[nextCrossId].setLoc(nextX, nextY)
minX, minY, self.maxX, self.maxY = \
min(nextX, minX), min(nextY, minY), max(nextX, self.maxX), max(nextY, self.maxY)
nextCrossList.append(nextCrossId)
crossList = nextCrossList
self.maxX, self.maxY = (self.maxX - minX + 2) * self.crossDistance, (self.maxY - minY + 2) * self.crossDistance
for crossId in CROSSNAMESPACE:
x, y = CROSSDICT[crossId].__loc__()
CROSSDICT[crossId].setLoc(x - minX, y - minY)
CROSSDICT[crossId].setMapLoc((x - minX + 1) * self.crossDistance, (y - minY + 1) * self.crossDistance)
def crossRelativeLoc(self, x, y, crossId, roadId):
roadDirection = CROSSDICT[crossId].roadDirection(roadId)
if roadDirection == 0:
return x, y - 1
elif roadDirection == 1:
return x + 1, y
elif roadDirection == 2:
return x, y + 1
elif roadDirection == 3:
return x - 1, y
else:
print("Cross(%d) don't interact with road(%d)" % (self.id_, roadId))
#
# draw functions
#
def drawMap(self):
img = np.ones((self.maxX, self.maxY, 3), np.uint8) * 255
# draw road
for roadId in ROADNAMESPACE:
self.plotRoad(roadId, img)
# draw cross
for crossId in CROSSNAMESPACE:
self.plotCross(crossId, img)
# plot info
self.plotInfo(img)
cv.imwrite(self.savePath + '/%d.jpg' % TIME[0], img)
def plotCross(self, crossId, img):
x, y = CROSSDICT[crossId].__mapLoc__()
cv.circle(img, (x, y), self.crossRadius, color=self.crossColor, thickness=-1, lineType=-1)
if crossId >= 10:
xx, yy = int(x - 4 * self.crossRadius / 5), int(y + self.crossRadius / 2)
else:
xx, yy = int(x - self.crossRadius / 2), int(y + self.crossRadius / 2)
cv.putText(img, str(crossId), (xx, yy), cv.FONT_HERSHEY_SIMPLEX, 0.6, [0, 0, 255], 2)
def plotRoad(self, roadId, img):
# get road info
road = ROADDICT[roadId]
fromX, fromY = CROSSDICT[road.__from__()].__mapLoc__()
toX, toY = CROSSDICT[road.__to__()].__mapLoc__()
# plot line
cv.line(img, (fromX, fromY), (toX, toY), color=self.roadColor, thickness=2)
# plot bucket
self.drawBucket(road, 'forward', img)
if road.__isDuplex__():
self.drawBucket(road, 'backward', img)
def drawBucket(self, road, lane, img):
bucket = road.__forwardBucket__() if lane != 'backward' else road.__backwardBucket__()
length = road.__length__()
channel = road.__channel__()
fromX, fromY = CROSSDICT[road.__from__()].__mapLoc__()
toX, toY = CROSSDICT[road.__to__()].__mapLoc__()
XY, intervalXY, rectangleSize, channel2XY, length2XY = self.bucketDrawInitial(fromX, fromY, toX, toY, lane,
length)
for i in range(length):
for j in range(channel):
xRD, yRD = int(XY[0] + rectangleSize[0]), int(XY[1] + rectangleSize[1])
if bucket[i][j] is None:
cv.rectangle(img, (int(XY[0]), int(XY[1])), (xRD, yRD), (0, 0, 0), 1)
else:
color = CARDICT[bucket[i][j]].__carColor__()
cv.rectangle(img, (int(XY[0]), int(XY[1])), (xRD, yRD), color=color, thickness=-1)
XY[channel2XY] = XY[channel2XY] + intervalXY[channel2XY]
XY[channel2XY] = XY[channel2XY] - intervalXY[channel2XY] * channel
XY[length2XY] = XY[length2XY] + intervalXY[length2XY]
def bucketDrawInitial(self, fromX, fromY, toX, toY, lane, length):
direction = self.bucketDirection(fromX, fromY, toX, toY, lane)
unitLength = (self.crossDistance - self.crossRadius * 4) / length
if lane == 'backward':
toY = fromY
toX = fromX
if direction == 'north':
XY = [fromX + self.channelDistance, toY + self.crossRadius * 2]
intervalXY = self.channelDistance + self.channelWidth, unitLength
rectangleSize = self.channelWidth, unitLength
channel2XY, length2XY = 0, 1
elif direction == 'south':
XY = [fromX - self.channelDistance - self.channelWidth, toY - self.crossRadius * 2 - unitLength]
intervalXY = -(self.channelDistance + self.channelWidth), -unitLength
rectangleSize = self.channelWidth, unitLength
channel2XY, length2XY = 0, 1
elif direction == 'east':
XY = [toX - self.crossRadius * 2 - unitLength, fromY + self.channelDistance]
intervalXY = -unitLength, self.channelDistance + self.channelWidth
rectangleSize = unitLength, self.channelWidth
channel2XY, length2XY = 1, 0
elif direction == 'west':
XY = [toX + self.crossRadius * 2, fromY - self.channelDistance - self.channelWidth]
intervalXY = unitLength, -(self.channelDistance + self.channelWidth)
| |
tags = filter_text.split(' ')
for tag in tags:
if tag not in text:
return False
return True
def hex_space(string):
if not app_config['hex_space_separation']:
return string
return ' '.join([string[i:i+2] for i in range(0, len(string), 2)])
def space_bindies(bindie):
if not app_config['hex_space_separation']:
return bindie
return ' '.join([bindie[i:i+8] for i in range(0, len(bindie), 8)])
def clear_error(key):
if isinstance(key, int):
key = '{}'.format(key)
if key in user_errors:
del user_errors[key]
def cursor_value(line, column):
return '{}.{}'.format(line, column)
def get_cursor(handle, cursor_tag = tk.INSERT):
cursor = handle.index(cursor_tag)
dot = cursor.find('.')
line = int(cursor[:dot])
column = int(cursor[dot + 1:])
return cursor, line, column
# To easily move cursor by x,y amount or floor/ceil the column
def modify_cursor(cursor, line_amount, column_amount, text):
# cursor value format:
# '{line}.{column}'
# 1-base 0-base
if isinstance(text, str):
text = text.split('\n')
dot = cursor.find('.')
line = int(cursor[:dot])
column = int(cursor[dot + 1:])
line = keep_within(line + line_amount, 1, len(text))
line_length = len(text[line - 1])
if isinstance(column_amount, int):
column = keep_within(column + column_amount, 0, line_length)
else:
if column_amount == 'min':
column = 0
if column_amount == 'max':
column = line_length
return cursor_value(line, column), line, column
def geometry(geo):
# geometry format:
# '{width}x{height}+{x_pos}+{y_pos}'
# |---optional---|
# |-when setting-|
mul_symbol = geo.find('x')
plus_symbol_one = geo.find('+')
plus_symbol_two = geo.find('+', plus_symbol_one + 1)
window_w = int(geo[:mul_symbol])
window_h = int(geo[mul_symbol + 1:plus_symbol_one])
window_x = int(geo[plus_symbol_one + 1:plus_symbol_two])
window_y = int(geo[plus_symbol_two + 1:])
return window_w, window_h, window_x, window_y
def get_word_at(list, line, column):
line_text = list[line - 1]
lower_bound_punc = line_text.rfind('(', 0, column)
# if lower_bound_punc < 0:
# lower_bound_punc = line_text.rfind('[', 0, column)
if lower_bound_punc < 0:
lower_bound_punc = line_text.rfind(' ', 0, column)
upper_bound_punc = line_text.find(',', column)
upp_punc_2 = line_text.find('], ', column)
if upper_bound_punc < 0 or upp_punc_2 >= 0:
upper_bound_punc = line_text.find('[', column)
if upper_bound_punc < 0:
upper_bound_punc = line_text.find('], ', column)
if upper_bound_punc < 0:
upper_bound_punc = line_text.find(')', column)
if upper_bound_punc < 0:
upper_bound_punc = len(line_text)
return list[line - 1][lower_bound_punc + 1: upper_bound_punc], lower_bound_punc
# Without this, if the mouse_x location is anywhere beyond halfway between the x positions of
# the end character and the right end of the text box, the text input cursor will move to
# the beginning of the next line
def correct_cursor(event):
try:
handle, cursor_x, cursor_y = event.widget, event.x, event.y
x, y, w, h = handle.bbox(tk.INSERT)
if cursor_x != keep_within(cursor_x, x, x+w) or cursor_y != keep_within(cursor_y, y, y+h):
cursor, line, column = get_cursor(handle)
if not column:
text_content = get_text_content(handle)
handle.mark_set(tk.INSERT, modify_cursor(cursor, -1, 'max', text_content)[0])
except:
''
# Is called pretty much after every time the view is changed
prev_reg_target, prev_address_target, prev_cursor_location = '', 0, 0
def highlight_stuff(widget=None, skip_moving_cursor=False, ctrl_held=False):
global prev_reg_target, prev_address_target, prev_cursor_location, targ_direction
if not disassembler_loaded():
return
[hack_file_text_box.tag_remove(tag, '1.0', tk.END) for tag in app_config['tag_config']
if tag not in ['highlighted_text_bg']]
[text_box.tag_remove(string, '1.0', tk.END) for text_box in ALL_TEXT_BOXES
for string in ['text_pasting', 'cursor_line', 'highlighted_text']]
[widg.place_forget() for widg in [paste_consume_comment_label, paste_consume_hack_label]]
if not widget:
cursor, c_line, column = get_cursor(hack_file_text_box)
hack_function = True
else:
cursor, c_line, column = get_cursor(widget)
hack_function = True if widget is hack_file_text_box else False
text = get_text_content(hack_file_text_box).split('\n')
targeting, _ = get_word_at(text, c_line, column)
if not prev_cursor_location:
prev_cursor_location = navigation + c_line - 1
elif not skip_moving_cursor:
this_handle = hack_file_text_box if not widget else widget
new_cursor = None
if prev_cursor_location <= navigation:
new_cursor = '1.0'
elif prev_cursor_location > navigation + max_lines:
new_cursor = cursor_value(max_lines, 0)
elif prev_cursor_location in range(navigation, navigation + max_lines):
new_cursor = cursor_value(prev_cursor_location - navigation, 0)
new_cursor = modify_cursor(new_cursor, 1, 'max', get_text_content(this_handle))[0]
if new_cursor:
this_handle.mark_set(tk.INSERT, new_cursor)
cursor, c_line, column = get_cursor(this_handle)
new_text = ''
if (widget is hack_file_text_box or widget is base_file_text_box) and prev_cursor_location >= 0x10:
file_navi = prev_cursor_location << 2
file = disasm.hack_file if widget is hack_file_text_box else disasm.base_file
decoded = disasm.decode(int_of_4_byte_aligned_region(file[file_navi:file_navi+4]), prev_cursor_location)
cut = decoded.find(' ')
if cut < 0:
cut = len(decoded)
mnemonic = decoded[:cut]
if mnemonic in disasm.documentation:
new_text = '{}: {}'.format(mnemonic, disasm.documentation[mnemonic])
if widget is hack_file_text_box and mnemonic in LOAD_AND_STORE_FUNCTIONS and \
not app_config['hex_mode'] and not app_config['bin_mode']:
pointers = disasm.get_pointers_in(prev_cursor_location)
if str(prev_cursor_location) in pointers:
targ_pointer = pointers[str(prev_cursor_location)]
_pointers = pointers.copy()
for key in _pointers:
if pointers[key] != targ_pointer:
del pointers[key]
if len(pointers) == 1:
pointers = {}
else:
pointers = {}
else:
pointers = {}
else:
pointers = {}
if new_text:
status_text.set(new_text)
if widget in [hack_file_text_box, comments_text_box] and ctrl_held:
try:
clip = window.clipboard_get() # Conventionally raises an exception if there is no clipboard contents
clip_count = clip.count('\n') + 1
lines_dif = min([clip_count, (max_lines - c_line) + 1])
widget.tag_add('text_pasting',
cursor_value(c_line, 0),
cursor_value(c_line + lines_dif, 0))
if lines_dif != clip_count:
extra_lines = clip_count - lines_dif
label_text = '+{} line{}'.format(extra_lines, 's' if extra_lines > 1 else '')
_x, _y, _w, _h, label = (paste_consume_hack_x, paste_consume_label_y,
paste_consume_hack_w, paste_consume_label_h, paste_consume_hack_label) \
if widget is hack_file_text_box else \
(paste_consume_comment_x, paste_consume_label_y,
paste_consume_comment_w, paste_consume_label_h, paste_consume_comment_label)
label.config(text=label_text, anchor=tk.N, pady=0)
label.place(x=_x, y=_y, width=_w)
# status_text.set('Lines that fall outside of your view will not be pasted.')
except:
''
jumps_from = {}
if prev_cursor_location in range(navigation, navigation + max_lines):
[text_box.tag_add('cursor_line',
cursor_value(c_line, 0),
cursor_value(c_line + 1, 0))
for text_box in ALL_TEXT_BOXES]
if prev_address_target or not column:
c_line = 0
# differ_labels[i][0].place(x=dif_label_x, y=y_pos, width=2, height=font_h - 6)
font_w, font_h = font_dimension(main_font_size)
for i in range(max_lines):
navi = (navigation + i) << 2
if int_of_4_byte_aligned_region(disasm.base_file[navi:navi+4]) != int_of_4_byte_aligned_region(disasm.hack_file[navi:navi+4]):
differ_labels[i][0].place(x=dif_label_x, y=differ_labels[i][1], width=2, height=font_h - 6)
else:
differ_labels[i][0].place_forget()
address = None if c_line else prev_address_target
for i in range(len(text)):
navi = navigation + i
key = str(navi)
if not app_config['hex_mode'] and not app_config['bin_mode']:
line = i + 1
line_text = text[i]
this_word = line_text[:line_text.find(' ')]
imm_id = text[i].find(app_config['immediate_identifier'])
# Highlight load/store instructions immediate and base parameters if they
# point to the same data as the selected load/store instruction
if key in pointers and imm_id > 0:
hack_file_text_box.tag_add('same_pointer',
cursor_value(line, imm_id),
cursor_value(line, len(text[i])))
# Highlight the end of each function
if text[i] == 'JR RA':
hack_file_text_box.tag_add('function_end',
cursor_value(line, 0),
cursor_value(line, 5))
# Highlight branch functions
elif this_word in BRANCH_FUNCTIONS:
hack_file_text_box.tag_add('branch',
cursor_value(line, 0),
cursor_value(line, len(text[i])))
hex_address = line_text[imm_id + 1:]
if line == c_line:
address = hex_address
try:
possibly_value_error = (deci(hex_address) - (disasm.game_offset if disasm.game_address_mode else 0)) >> 2
jumps_from[str(navi)] = possibly_value_error
except ValueError:
''
# Highlight jump functions
elif this_word in JUMP_FUNCTIONS:
hack_file_text_box.tag_add('jump',
cursor_value(line, 0),
cursor_value(line, len(text[i])))
hex_address = line_text[imm_id + 1:]
if line == c_line and this_word in ['J', 'JAL']:
address = hex_address
try:
possibly_value_error = (deci(hex_address) - (disasm.game_offset if disasm.game_address_mode else 0)) >> 2
jumps_from[str(navi)] = possibly_value_error
except ValueError as e:
''
elif line_text == 'NOP':
hack_file_text_box.tag_add('nop',
cursor_value(line, 0),
cursor_value(line, len(text[i])))
# Highlight instructions in which are a target of any jump or branch
if key in disasm.branches_to:
hack_file_text_box.tag_add('branch_to',
cursor_value(line, 0),
cursor_value(line + 1, 0))
navi_offset = navi << 2
navi_offset = disasm.region_align(navi_offset)
key2 = str(navi_offset >> 2)
if key2 in disasm.jumps_to:
hack_file_text_box.tag_add('jump_to',
cursor_value(line, 0),
cursor_value(line + 1, 0))
# End if not hex_mode
# Highlight errors
if key in user_errors:
err_code = user_errors[key][0]
hack_file_text_box.tag_add('bad' if err_code > -3 else 'out_of_range',
cursor_value(i + 1, 0),
cursor_value(i + 2, 0))
# Highlight jumps/branches to target instruction
if hack_function:
reg_offset_key = str(disasm.region_align(prev_cursor_location << 2) >> 2)
this_key = str(prev_cursor_location)
dictie = None
if this_key in disasm.branches_to:
dictie = disasm.branches_to
elif reg_offset_key in disasm.jumps_to:
dictie = disasm.jumps_to
this_key = reg_offset_key
if dictie:
had_ups = False
had_downs = False
for i in dictie[this_key]:
place = i - navigation
if place in range(max_lines):
hack_file_text_box.tag_add('jump_from',
cursor_value(place + 1, 0),
cursor_value(place + 2, 0))
elif place >= max_lines:
had_downs = True
target_of_down()
else:
had_ups = True
target_of_up()
if not had_ups:
target_of_up_label.place_forget()
if not had_downs:
target_of_down_label.place_forget()
else:
target_of_down_label.place_forget()
target_of_up_label.place_forget()
if address:
if hack_function:
if c_line:
try:
# Raises error if user types non-hex characters where an address/offset is
address = deci(address)
except:
address = -1
else:
address = disasm.region_unalign(address, game_offset=not disasm.game_address_mode)
if disasm.game_address_mode:
address -= disasm.game_offset
address >>= 2
prev_address_target = address
jumps_from[str((c_line + navigation) - 1)] = address
if address in range(navigation, navigation + max_lines):
target_none()
place = address - navigation
hack_file_text_box.tag_add('target',
cursor_value(place + 1, 0),
cursor_value(place + 2, 0))
elif address < navigation:
target_up()
elif address >= navigation + | |
import os
from datetime import datetime
from tqdm import tqdm
import pickle
import json
import numpy as np
import torch
def conceptgraph2id(
root='../data/conceptgraphEmbedding/TransE_l2_concetgraph_2/',
name1='entities', name2='relations', format=".tsv"):
'''
加载 conceptgraph中实体和关系及其对应的id
'''
print('starting:', datetime.now())
entity2id = {}
relation2id = {}
path1 = os.path.join(root, name1 + format)
path2 = os.path.join(root, name2 + format)
if not os.path.exists(path1):
print(path1)
print("[ERROR] {} file does not exist!".format(name1))
assert (0)
# if not os.path.exists(path2):
# print("[ERROR] {} file does not exist!".format(name2))
# assert (0)
with open(path1, mode='r') as f1:
for line in tqdm(f1):
id1, entity = line.split('\t')
entity = entity.strip('\n')
entity2id[entity] = int(id1)
# with open(path2, mode='r') as f2:
# for line in tqdm(f2):
# id2, relaiton = line.split('\t')
# relaiton = relaiton.strip('\n')
# relation2id[relaiton] = int(id2)
print('store entity2id')
with open(root + 'entities2id.pickle', mode='wb') as f3:
pickle.dump(entity2id, f3)
# print('store relaiont2id')
# with open(root + 'relations2id.pickle', mode='wb') as f4:
# pickle.dump(relation2id, f4)
# with open(root + 'entities2id.json', mode='w') as f3:
# json.dump(entity2id, f3)
# print('store relaiont2id')
# with open(root + 'relations2id.json', mode='w') as f4:
# json.dump(relation2id, f4)
loadingtime = datetime.now()
# with open(root + 'entities2id.pickle', mode='rb') as f5:
# entity2id = pickle.load(f5)
# with open(root + 'relations2id.pickle', mode='rb') as f6:
# relation2id = pickle.load(f6)
# with open(root + 'entities2id.json', mode='r') as f5:
# entity2id = json.load(f5)
# with open(root + 'relations2id.json', mode='r') as f6:
# relation2id = json.load(f6)
#
# donetime = datetime.now()
# print('loading time', donetime - loadingtime)
# print(len(entity2id))
# print(len(relation2id))
# print('ending:', datetime.now())
def entity2vec(entity: str, entity2id, entityEmbedding):
entityID = entity2id[entity]
entityVec = entityEmbedding[entityID, :]
return entityVec
def relation2vec(relation: str, relation2id, relationEmbedding):
relaiontID = relation2id[relation]
relationVec = relationEmbedding[relaiontID, :]
return relationVec
def conceptgraphInitial(root='../data/conceptgraphEmbedding/TransE_l2_concetgraph_2/'):
'''加载entity2id, relaion2id,entityEmbedding,relationEmbedding 文件'''
loadingtime = datetime.now()
with tqdm(total=4, desc=f'loading entity2id,relaion2id,entityEmbedding,relationEmbedding file') as pbar:
with open(root + 'entities2id.pickle', mode='rb') as f5:
entity2id = pickle.load(f5)
pbar.update(1)
with open(root + 'relations2id.pickle', mode='rb') as f6:
relation2id = pickle.load(f6)
pbar.update(1)
entityEmbedding = np.load(root + 'concetgraph_TransE_l2_entity.npy')
pbar.update(1)
relaitonEmbedding = np.load(root + 'concetgraph_TransE_l2_relation.npy')
pbar.update(1)
donetime = datetime.now()
print('initializing time', donetime - loadingtime)
return entity2id, relation2id, entityEmbedding, relaitonEmbedding
def loadingConceptGraphEntity(root='../data/conceptgraphEmbedding/TransE_l2_concetgraph_2/'):
'''加载conceptgraph中实体以及embedding'''
with tqdm(total=2, desc=f'loading entity2id, entityEmbeddingfile') as pbar:
with open(root + 'entities2id.pickle', mode='rb') as f5:
entity2id = pickle.load(f5)
pbar.update(1)
entityEmbedding = np.load(root + 'concetgraph_TransE_l2_entity.npy')
pbar.update(1)
return entity2id, entityEmbedding
def loadingConceptGraphEntity2ID(root, path='conceptgraphEmbedding/TransE_l2_concetgraph_2/'):
file = root + path + 'entities2id.pickle'
with tqdm(total=1, desc=f'loading entity2id in conceptgraph') as pbar:
with open(file, mode='rb') as f5:
entity2id = pickle.load(f5)
pbar.update(1)
return entity2id
def loadingInstance2concept(path='../data/conceptgraph/instance2concept.pickle'):
with tqdm(total=1, desc=f'loading Instance2concept file') as pbar:
with open(path, mode='rb') as f:
instance2concept = pickle.load(f)
pbar.update(1)
return instance2concept
def instance2conept(ins2cpt: dict, instance: str, top=2) -> list:
'''给定实例,返回其对应的概念,最多两个'''
concept = ins2cpt.get(instance)
if concept == None:
concept = ['unknowConcept1', 'unknowConcept2']
elif len(concept) == 1:
concept.append('unknowConcept1')
else:
concept = concept[:top]
return concept
def instance2coneptPlus(ins2cpt: dict, instance: str, top=2) -> list:
'''给定实例,返回其对应的概念,最多top个'''
concept = ins2cpt.get(instance)
if concept == None:
'''查找实体中的词在知识库中的概念'''
cpt_list = word2concept(ins2cpt, instance, top=top)
if len(cpt_list) == 0:
concept = ['unknowConcept']
else:
# print('zhao dao la')
# print(instance,cpt_list)
concept = cpt_list
else:
concept = concept[:top]
return concept
def entity2id(root='../data/conceptgraphEmbedding/TransE_l2_concetgraph_2', name='entities'):
path = os.path.join(root, name + ".tsv")
e2id = {}
if not os.path.exists(path):
print('file path', path)
print("[ERROR] Data file does not exist!")
assert (0)
with open(path, mode='r', encoding='utf-8') as f:
for line in f:
entityID, entity = line.split('\t')
entityID = int(entityID)
entity = entity.strip('\n')
e2id[entity] = entityID
return e2id
def generateZeroVec(shape=(256,), dtype="float32"):
zeroVec = np.zeros(shape, dtype)
return zeroVec
def concept2vec(cpt: list, entity2id, entityEmbedding):
cpt2vec = {}
for cpt in cpt:
if (cpt == 'unknowConcept1') or (cpt == 'unknowConcept2'):
cpt2vec[cpt] = generateZeroVec()
else:
cpt2vec[cpt] = entity2vec(cpt, entity2id, entityEmbedding)
return cpt2vec
def getConceptVec(entities: list, ins2cpt: dict, entity2id: dict, entityEmbedding):
'''
传入实体,查找实体对应的概念,然后返回概念对应的concept embedding
entities:((h1,r1),(h2,r2))
entity2id和entityEmbedding为训练conceptgraph pre-training kg embedding产生的文件
'''
h1 = entities[0][0]
r1 = entities[0][1]
h2 = entities[1][0]
r2 = entities[1][1]
h1_cpt = instance2conept(ins2cpt, h1, top=2)
r1_cpt = instance2conept(ins2cpt, r1, top=2)
h2_cpt = instance2conept(ins2cpt, h2, top=2)
r2_cpt = instance2conept(ins2cpt, r2, top=2)
h1_cpt2vec = concept2vec(h1_cpt, entity2id, entityEmbedding)
r1_cpt2vec = concept2vec(r1_cpt, entity2id, entityEmbedding)
h2_cpt2vec = concept2vec(h2_cpt, entity2id, entityEmbedding)
r2_cpt2vec = concept2vec(r2_cpt, entity2id, entityEmbedding)
'''取出向量concept embedding,去除concept name'''
h1_cpt2vec = [vec for vec in h1_cpt2vec.values()]
r1_cpt2vec = [vec for vec in r1_cpt2vec.values()]
h2_cpt2vec = [vec for vec in h2_cpt2vec.values()]
r2_cpt2vec = [vec for vec in r2_cpt2vec.values()]
h1_cpt2vec = np.array(h1_cpt2vec)
r1_cpt2vec = np.array(r1_cpt2vec)
h2_cpt2vec = np.array(h2_cpt2vec)
r2_cpt2vec = np.array(r2_cpt2vec)
return (h1_cpt2vec, r1_cpt2vec, h2_cpt2vec, r2_cpt2vec)
def getBatchConceptVec(batchEntities: list, ins2cpt: dict, e2id: dict, entityEmbedding):
'''
获取一个Batch里句子的头尾实体的对应的concept的kg embedding
'''
batch_h_r2vec = []
for entities in batchEntities[0]:
(h1_cpt2vec, r1_cpt2vec, h2_cpt2vec, r2_cpt2vec) = getConceptVec(entities, ins2cpt, e2id, entityEmbedding)
batch_h_r2vec.append((h1_cpt2vec, r1_cpt2vec, h2_cpt2vec, r2_cpt2vec))
batch_h_r2vec = np.array(batch_h_r2vec)
return batch_h_r2vec
def word2concept(instance2concept, word, top=2):
'''给定一个词查找其在conceptgrap中的concept'''
word = word.split(' ')
concept = []
for w in word:
cpt = instance2concept.get(w)
if cpt == None:
continue
else:
for c in cpt[:top]:
concept.append(c)
return concept
def load(model_path=None, first=0, normalize=False, log_every=0, load_concepts=True, format='bin',
concepts_pattern='id[0-9]+di'):
"""
load word2vec vocabulary vectors from binary/text file
"""
if format == 'txt':
return load_text(model_path, first, load_concepts, normalize, log_every, concepts_pattern)
else:
return load_binary(model_path, first, load_concepts, normalize, log_every, concepts_pattern)
if log_every > 0:
print('done loading!')
return titles, redirects, vector_size, W, id2word, word2id, get_all_titles(W, titles, redirects, word2id)
def load_text():
pass
def load_binary(model_path=None, first=0, load_concepts=True, normalize=False, log_every=0,
concepts_pattern='id[0-9]+di'):
"""
load word2vec vocabulary vectors from binary file
这部分代码源于论文Beyond Word Embeddings: Learning Entity and Concept Representations from Large Scale Knowledge Bases的开源代码
"""
import pickle
import numpy as np
import re
import os
if load_concepts == False:
concepts_re = re.compile(concepts_pattern)
with open(model_path, 'rb') as inp:
if log_every > 0:
print('start loading!')
# read titles meta
titles = pickle.load(inp)
if log_every > 0:
print('loaded ({0}) titles'.format(len(titles)))
# read redirects meta
redirects = pickle.load(inp)
if log_every > 0:
print('loaded ({0}) redirects'.format(len(redirects)))
# read vectors
vectors_pairs = []
while inp.tell() < os.fstat(inp.fileno()).st_size:
vectors_pairs.extend(pickle.load(inp))
num = len(vectors_pairs)
if num > 0:
vector_size = len(vectors_pairs[0][1])
else:
vector_size = 0
if log_every > 0:
print('loading ({0}) vectors of size ({1})'.format(len(vectors_pairs), vector_size))
W = np.zeros((num, vector_size))
id2word = []
word2id = {}
total = 0
for i in range(num):
term = vectors_pairs[i][0]
if load_concepts == False:
if concepts_re.match(term) != None:
continue
vec = vectors_pairs[i][1]
W[total] = vec
id2word.append(term)
word2id[term] = total
total += 1
if first > 0 and total >= first:
break
if log_every > 0 and total > 0 and total % log_every == 0:
print('loaded ({0}) vectors'.format(total))
if load_concepts == False:
W = W[:total, ] # take only loaded vectors
if normalize == True:
W = (W.T / (np.linalg.norm(W, axis=1))).T
if log_every > 0:
print('done loading ({0}) vectors!'.format(total))
return titles, redirects, vector_size, W, id2word, word2id, get_all_titles(W, titles, redirects, word2id)
def get_all_titles(model, titles, redirects, word2id, orig_titles=True, lower=True, prefix='', postfix=''):
"""
return a map of all wikipedia titles and redirects existing in the model
as keys and article id as values
"""
all_pairs = []
all_titles = {}
for i, j in sorted(titles.items()):
all_pairs.append((i, prefix + j + postfix, i))
for i, j in sorted(redirects.items()):
all_pairs.append((i, prefix + titles[j] + postfix, j))
for i, id, j in all_pairs:
if model is None or id in word2id:
if lower == True:
newi = i.lower()
if orig_titles == True:
oldval = all_titles.setdefault(newi, (id, j))
if oldval != (id, j): # this is a duplicate
if i.isupper() == False: # keep the lower version Iowa vs. IOWA and America vs. AMERICA
all_titles[newi] = (id, j)
# print('unexpected duplicate title ({0}) for orginal title ({1}) where old title ({2})'.format(i,j,oldval[1]))
else:
oldval = all_titles.setdefault(i, (id,))
# if oldval!= (id,):
# print('unexpected duplicate title ({0}) for orginal title ({1})'.format(i,j))
return all_titles
def loadJson(root, name):
path = os.path.join(root, name + ".json")
if not os.path.exists(path):
print("[ERROR] Data file does not exist!", path)
assert (0)
with tqdm(total=1, desc=f'loading' + path) as pbar:
with open(path, mode='r', encoding='utf-8') as fr:
data = json.load(fr)
pbar.update(1)
return data
def load_numpy_file_to_tensor(root,name):
path = os.path.join(root, name + ".npy")
if not os.path.exists(path):
print("[ERROR] Data file does not exist!", path)
assert (0)
with tqdm(total=1, desc=f'loading' + path) as pbar:
matrix = np.load(path,allow_pickle=True)
matrix = torch.from_numpy(matrix)
pbar.update(1)
return matrix
if __name__ == '__main__':
# print('starting loading')
# entity2id, relation2id, entityEmbedding, relaitonEmbedding = conceptgraphInitial()
e2id, entityEmbedding = loadingConceptGraphEntity()
# entity = 'age'
# entityVec = entity2vec(entity, entity2id, entityEmbedding)
# print(entityVec)
# | |
import scipy.signal
import numpy as np
from .cltools import HAVE_PYOPENCL, OpenCL_Helper
if HAVE_PYOPENCL:
import pyopencl
mf = pyopencl.mem_flags
#~ from pyacq.dsp.overlapfiltfilt import SosFiltfilt_Scipy
from .tools import FifoBuffer, median_mad
def offline_signal_preprocessor(sigs, sample_rate, common_ref_removal=True,
highpass_freq=300., lowpass_freq=None, output_dtype='float32', normalize=True, **unused):
#cast
sigs = sigs.astype(output_dtype)
#filter
if highpass_freq is not None:
b, a = scipy.signal.iirfilter(5, highpass_freq/sample_rate*2, analog=False,
btype = 'highpass', ftype = 'butter', output = 'ba')
filtered_sigs = scipy.signal.filtfilt(b, a, sigs, axis=0)
else:
filtered_sigs = sigs.copy()
if lowpass_freq is not None:
b, a = scipy.signal.iirfilter(5, lowpass_freq/sample_rate*2, analog=False,
btype = 'lowpass', ftype = 'butter', output = 'ba')
filtered_sigs = scipy.signal.filtfilt(b, a, filtered_sigs, axis=0)
# common reference removal
if common_ref_removal:
filtered_sigs = filtered_sigs - np.median(filtered_sigs, axis=1)[:, None]
# normalize
if normalize:
#~ med = np.median(filtered_sigs, axis=0)
#~ mad = np.median(np.abs(filtered_sigs-med),axis=0)*1.4826
med, mad = median_mad(filtered_sigs, axis=0)
normed_sigs = (filtered_sigs - med)/mad
else:
normed_sigs = filtered_sigs
return normed_sigs.astype(output_dtype)
def estimate_medians_mads_after_preprocesing(sigs, sample_rate, **params):
params2 = dict(params)
params2['normalize'] = False
filtered_sigs = offline_signal_preprocessor(sigs, sample_rate, **params2)
med, mad = median_mad(filtered_sigs, axis=0)
return med, mad
class SignalPreprocessor_base:
def __init__(self,sample_rate, nb_channel, chunksize, input_dtype):
self.sample_rate = sample_rate
self.nb_channel = nb_channel
self.chunksize = chunksize
self.input_dtype = input_dtype
def change_params(self, common_ref_removal=True,
highpass_freq=300.,
lowpass_freq=None,
smooth_size=0,
output_dtype='float32',
normalize=True,
pad_width = None,
signals_medians=None, signals_mads=None):
self.signals_medians = signals_medians
self.signals_mads = signals_mads
self.common_ref_removal = common_ref_removal
self.highpass_freq = highpass_freq
self.lowpass_freq = lowpass_freq
self.smooth_size = int(smooth_size)
self.output_dtype = np.dtype(output_dtype)
self.normalize = normalize
self.pad_width = pad_width
# set default pad_width if none is provided
if self.pad_width is None or self.pad_width<=0:
assert self.highpass_freq is not None, 'pad_width=None needs a highpass_freq'
self.pad_width = int(self.sample_rate/self.highpass_freq*3)
#~ print('self.pad_width', self.pad_width)
self.chunksize_1pad = self.chunksize + self.pad_width
self.chunksize_2pad = self.chunksize + 2 * self.pad_width
#~ print('self.pad_width', self.pad_width)
#~ print('self.chunksize_1pad', self.chunksize_1pad)
#~ assert self.chunksize_1pad>self.chunksize
self.coefficients = np.zeros((0, 6))
nyquist = self.sample_rate/2.
if self.highpass_freq is not None:
if self.highpass_freq>0 and self.highpass_freq<nyquist:
coeff_hp = scipy.signal.iirfilter(5, highpass_freq/self.sample_rate*2, analog=False,
btype = 'highpass', ftype = 'butter', output = 'sos')
self.coefficients = np.concatenate((self.coefficients, coeff_hp))
if self.lowpass_freq is not None:
if self.lowpass_freq>0 and self.lowpass_freq<nyquist:
#~ if self.lowpass_freq>(self.sample_rate/2.):
#~ self.lowpass_freq=(self.sample_rate/2.01)
coeff_lp = scipy.signal.iirfilter(5, lowpass_freq/self.sample_rate*2, analog=False,
btype = 'lowpass', ftype = 'butter', output = 'sos')
self.coefficients = np.concatenate((self.coefficients, coeff_lp))
if self.smooth_size>0:
b0 = (1./3)**.5
b1 = (1-b0)
b2 = 0.
coeff_smooth = np.array([[b0, b1, b2, 1,0,0]], dtype=self.output_dtype)
coeff_smooth = np.tile(coeff_smooth, (self.smooth_size, 1))
self.coefficients = np.concatenate((self.coefficients, coeff_smooth))
if self.coefficients.shape[0]==0:
#this is the null filter
self.coefficients = np.array([[1, 0, 0, 1,0,0]], dtype=self.output_dtype)
self.nb_section =self. coefficients.shape[0]
self.forward_buffer = FifoBuffer((self.chunksize_1pad, self.nb_channel), self.output_dtype)
self.zi = np.zeros((self.nb_section, 2, self.nb_channel), dtype= self.output_dtype)
#~ print('self.normalize', self.normalize)
if self.normalize:
assert self.signals_medians is not None
assert self.signals_mads is not None
def process_buffer(self, data):
# used for offline processing when parralisation is possible
raise(NotImplmentedError)
def initialize_stream(self):
# must be for each new segment when index
# start back
raise(NotImplmentedError)
def process_buffer_stream(self, pos, data):
# used in real time mode when chunk are given one after another
raise(NotImplmentedError)
class SignalPreprocessor_Numpy(SignalPreprocessor_base):
"""
This apply chunk by chunk on a multi signal:
* baseline removal
* hight pass filtfilt
* normalize (optional)
"""
def process_buffer(self, data):
data = data.astype(self.output_dtype)
processed_data = scipy.signal.sosfiltfilt(self.coefficients, data, axis=0)
# TODO find why sosfiltfilt reverse strides!!!
processed_data = np.ascontiguousarray(processed_data, dtype=self.output_dtype)
# removal ref
if self.common_ref_removal:
processed_data -= np.median(processed_data, axis=1)[:, None]
#normalize
if self.normalize:
processed_data -= self.signals_medians
processed_data /= self.signals_mads
return processed_data
def process_buffer_stream(self, pos, data):
# TODO rewrite this with self.process_buffer()
#Online filtfilt
chunk = data.astype(self.output_dtype)
forward_chunk_filtered, self.zi = scipy.signal.sosfilt(self.coefficients, chunk, zi=self.zi, axis=0)
forward_chunk_filtered = forward_chunk_filtered.astype(self.output_dtype)
self.forward_buffer.new_chunk(forward_chunk_filtered, index=pos)
backward_chunk = self.forward_buffer.buffer
backward_filtered = scipy.signal.sosfilt(self.coefficients, backward_chunk[::-1, :], zi=None, axis=0)
backward_filtered = backward_filtered[::-1, :]
backward_filtered = backward_filtered.astype(self.output_dtype)
pos2 = pos-self.pad_width
if pos2<0:
return None, None
i1 = self.chunksize_1pad-self.pad_width-chunk.shape[0]
i2 = self.chunksize
assert i1<i2
data2 = backward_filtered[i1:i2]
if (pos2-data2.shape[0])<0:
data2 = data2[data2.shape[0]-pos2:]
# removal ref
if self.common_ref_removal:
data2 -= np.median(data2, axis=1)[:, None]
#normalize
if self.normalize:
data2 -= self.signals_medians
data2 /= self.signals_mads
return pos2, data2
def initialize_stream(self):
self.forward_buffer.reset()
self.zi[:] = 0
class SignalPreprocessor_OpenCL(SignalPreprocessor_base, OpenCL_Helper):
"""
Implementation in OpenCL depending on material and nb_channel
this can lead to a smal speed improvement...
"""
def __init__(self,sample_rate, nb_channel, chunksize, input_dtype):
SignalPreprocessor_base.__init__(self,sample_rate, nb_channel, chunksize, input_dtype)
def _check_data(self, data):
if not data.flags['C_CONTIGUOUS'] or data.dtype!=self.output_dtype:
data = np.ascontiguousarray(data, dtype=self.output_dtype)
return data
def process_buffer(self, data):
data = self._check_data(data)
#~ print(data.shape, self.chunksize, self.chunksize_2pad, self.pad_width)
#~ assert data.shape[0] == self.chunksize_2pad
if data.shape[0] == self.chunksize_2pad:
# OK
unpad = 0
elif data.shape[0] < self.chunksize_2pad:
# put some zero
unpad = self.chunksize_2pad - data.shape[0]
data_pad = np.zeros((self.chunksize_2pad, data.shape[1]), dtype=data.dtype)
#~ print('Apply a data pad')
data = data_pad
else:
raise ValueError(f'data have wring shape{data.shape[0]} { self.chunksize_2pad}')
event = pyopencl.enqueue_copy(self.queue, self.input_2pad_cl, data)
event = self.kern_forward_backward_filter(self.queue, (self.nb_channel,), (self.nb_channel,),
self.input_2pad_cl, self.coefficients_cl, self.zi1_cl, self.zi2_cl,
self.signals_medians_cl, self.signals_mads_cl, self.output_2pad_cl)
#~ event.wait()
event = pyopencl.enqueue_copy(self.queue, self.output_2pad, self.output_2pad_cl)
event.wait()
data2 = self.output_2pad.copy()
if self.common_ref_removal:
# at the moment common_ref_removal is done on CPU
# and so to avoid transfer normalize is also done on CPU
#TODO implement OpenCL for removal ref
if self.common_ref_removal:
data2 -= np.median(data2, axis=1)[:, None]
#normalize
if self.normalize:
# OpenCL for this when no common_ref_removal
data2 -= self.signals_medians
data2 /= self.signals_mads
if unpad > 0:
data2 = data2[:-unpad, :]
return data2
def process_buffer_stream(self, pos, data):
assert data.shape[0]==self.chunksize
data = self._check_data(data)
#Online filtfilt
event = pyopencl.enqueue_copy(self.queue, self.input_cl, data)
event = self.kern_stream_forward_backward_filter(self.queue, (self.nb_channel,), (self.nb_channel,),
self.input_cl, self.coefficients_cl, self.zi1_cl, self.zi2_cl,
self.fifo_input_backward_cl, self.signals_medians_cl, self.signals_mads_cl, self.output_backward_cl)
event.wait()
#~ event.wait()
start = pos-self.chunksize_1pad
if start<-self.pad_width:
return None, None
pos2 = pos-self.pad_width
event = pyopencl.enqueue_copy(self.queue, self.output_backward, self.output_backward_cl)
if start>0:
data2 = self.output_backward[:self.chunksize, :]
else:
data2 = self.output_backward[self.pad_width:self.chunksize, :]
data2 = data2.copy()
if self.common_ref_removal:
# at the moment common_ref_removal is done on CPU
# and so to avoid transfer normalize is also done on CPU
#TODO implement OpenCL for removal ref
if self.common_ref_removal:
data2 -= np.median(data2, axis=1)[:, None]
#normalize
if self.normalize:
# OpenCL for this when no common_ref_removal
data2 -= self.signals_medians
data2 /= self.signals_mads
return pos2, data2
def change_params(self, **kargs):
cl_platform_index=kargs.pop('cl_platform_index', None)
cl_device_index=kargs.pop('cl_device_index', None)
ctx=kargs.pop('ctx', None)
queue=kargs.pop('queue', None)
OpenCL_Helper.initialize_opencl(self,cl_platform_index=cl_platform_index, cl_device_index=cl_device_index, ctx=ctx, queue=queue)
SignalPreprocessor_base.change_params(self, **kargs)
assert self.output_dtype=='float32', 'SignalPreprocessor_OpenCL support only float32 at the moment'
assert self.pad_width<self.chunksize, 'OpenCL fifo work only for self.pad_width<self.chunksize'
self.coefficients = np.ascontiguousarray(self.coefficients, dtype=self.output_dtype)
#~ print(self.coefficients.shape)
# this is for stream processing
self.zi1 = np.zeros((self.nb_channel, self.nb_section, 2), dtype= self.output_dtype)
self.zi2 = np.zeros((self.nb_channel, self.nb_section, 2), dtype= self.output_dtype)
self.output_forward = np.zeros((self.chunksize, self.nb_channel), dtype= self.output_dtype)
self.fifo_input_backward = np.zeros((self.chunksize_1pad, self.nb_channel), dtype= self.output_dtype)
self.output_backward = np.zeros((self.chunksize_1pad, self.nb_channel), dtype= self.output_dtype)
#GPU buffers
self.coefficients_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.coefficients)
self.zi1_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.zi1)
self.zi2_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.zi2)
self.input_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output_forward.nbytes)
self.output_forward_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output_forward.nbytes)
self.fifo_input_backward_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.fifo_input_backward)
self.output_backward_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output_backward.nbytes)
if self.signals_medians is not None:
self.signals_medians_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.signals_medians)
self.signals_mads_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.signals_mads)
else:
self.signals_medians_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=np.zeros(self.nb_channel, dtype= self.output_dtype))
self.signals_mads_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=np.zeros(self.nb_channel, dtype= self.output_dtype))
# this is for offline processing
self.input_2pad = np.zeros((self.chunksize_2pad, self.nb_channel), dtype= self.output_dtype)
self.output_2pad = np.zeros((self.chunksize_2pad, self.nb_channel), dtype= self.output_dtype)
self.input_2pad_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.input_2pad)
self.output_2pad_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.output_2pad)
#CL prog
if not self.common_ref_removal and self.normalize:
extra_code_nomalize = _extra_code_nomalize
extra_code_nomalize2 = _extra_code_nomalize2
else:
extra_code_nomalize = ''
extra_code_nomalize2 = ''
kernel_formated = processor_kernel%dict(chunksize=self.chunksize, chunksize_1pad=self.chunksize_1pad,
chunksize_2pad=self.chunksize_2pad,
pad_width=self.pad_width, nb_section=self.nb_section, nb_channel=self.nb_channel,
extra_code_nomalize=extra_code_nomalize, extra_code_nomalize2=extra_code_nomalize2)
#~ print(kernel_formated)
prg = pyopencl.Program(self.ctx, kernel_formated)
self.opencl_prg = prg.build(options='-cl-mad-enable')
self.max_wg_size = self.ctx.devices[0].get_info(pyopencl.device_info.MAX_WORK_GROUP_SIZE)
self.kern_stream_forward_backward_filter = getattr(self.opencl_prg, 'stream_forward_backward_filter')
self.kern_forward_backward_filter = getattr(self.opencl_prg, 'forward_backward_filter')
def initialize_stream(self):
self.output_forward[:] = 0
event = pyopencl.enqueue_copy(self.queue, self.output_backward_cl, self.output_backward)
event.wait()
self.zi1[:] = 0
event = pyopencl.enqueue_copy(self.queue, self.zi1_cl, self.zi1)
event.wait()
self.zi2[:] = 0
event = pyopencl.enqueue_copy(self.queue, self.zi2_cl, self.zi2)
event.wait()
processor_kernel = """
#define chunksize %(chunksize)d
#define chunksize_1pad %(chunksize_1pad)d
#define chunksize_2pad %(chunksize_2pad)d
#define | |
import colorsys
import json
import socket
import logging
from enum import Enum
from .decorator import decorator
from .flow import Flow
MUSIC_PORT = 37657
_LOGGER = logging.getLogger(__name__)
@decorator
def _command(f, *args, **kw):
"""
A decorator that wraps a function and enables effects.
"""
self = args[0]
effect = kw.get("effect", self.effect)
duration = kw.get("duration", self.duration)
method, params = f(*args, **kw)
if method in ["set_ct_abx", "set_rgb", "set_hsv", "set_bright",
"set_power"]:
# Add the effect parameters.
params += [effect, duration]
result = self.send_command(method, params).get("result", [])
if result:
return result[0]
class BulbException(Exception):
"""
The exception is raised when bulb informs about errors, e.g., when
trying to issue unsupported command on the bulb.
"""
pass
class BulbType(Enum):
"""
The BulbType enum specifies bulb's type, either White or Color,
or Unknown if the properties have not been fetched yet.
"""
Unknown = -1
White = 0
Color = 1
class Bulb(object):
def __init__(self, ip, port=55443, effect="smooth",
duration=300, auto_on=False):
"""
The main controller class of a physical YeeLight bulb.
:param str ip: The IP of the bulb.
:param int port: The port to connect to on the bulb.
:param str effect: The type of effect. Can be "smooth" or "sudden".
:param int duration: The duration of the effect, in milliseconds. The
minimum is 30. This is ignored for sudden effects.
:param bool auto_on: Whether to call :py:meth:`ensure_on()
<yeelight.Bulb.ensure_on>` to turn the bulb on
automatically before each operation, if it is off.
This renews the properties of the bulb before each
message, costing you one extra message per command.
Turn this off and do your own checking with
:py:meth:`get_properties()
<yeelight.Bulb.get_properties()>` or run
:py:meth:`ensure_on() <yeelight.Bulb.ensure_on>`
yourself if you're worried about rate-limiting.
"""
self._ip = ip
self._port = port
self.effect = effect
self.duration = duration
self.auto_on = auto_on
self.__cmd_id = 0 # The last command id we used.
self._last_properties = {} # The last set of properties we've seen.
self._music_mode = False # Whether we're currently in music mode.
self.__socket = None # The socket we use to communicate.
@property
def _cmd_id(self):
"""
Return the next command ID and increment the counter.
:rtype: int
"""
self.__cmd_id += 1
return self.__cmd_id - 1
@property
def _socket(self):
"Return, optionally creating, the communication socket."
if self.__socket is None:
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__socket.settimeout(5)
self.__socket.connect((self._ip, self._port))
return self.__socket
def ensure_on(self):
"""Turn the bulb on if it is off."""
if self._music_mode is True or self.auto_on is False:
return
self.get_properties()
if self._last_properties["power"] != "on":
self.turn_on()
@property
def last_properties(self):
"""
The last properties we've seen the bulb have.
This might potentially be out of date, as there's no background listener
for the bulb's notifications. To update it, call
:py:meth:`get_properties <yeelight.Bulb.get_properties()>`.
"""
return self._last_properties
@property
def bulb_type(self):
"""
Returns BulbType :py:enum:`BulbType <yeelight.BulbType>` describing type
of the bulb. Currently this is Color or White.
When trying to access before properties are known, the bulb type is unknown.
:rtype: BulbType
:return: The bulb's type.
"""
if not self._last_properties:
return BulbType.Unknown
if not all(name in self.last_properties for name in ['ct', 'rgb', 'hue', 'sat']):
return BulbType.White
else:
return BulbType.Color
def get_properties(self):
"""
Retrieve and return the properties of the bulb, additionally updating
``last_properties``.
:returns: A dictionary of param: value items.
:rtype: dict
"""
requested_properties = [
"power", "bright", "ct", "rgb", "hue", "sat",
"color_mode", "flowing", "delayoff", "flow_params",
"music_on", "name"
]
response = self.send_command("get_prop", requested_properties)
properties = response["result"]
properties = [x if x else None for x in properties]
self._last_properties = dict(zip(requested_properties, properties))
return self._last_properties
def send_command(self, method, params=None):
"""
Send a command to the bulb.
:param str method: The name of the method to send.
:param list params: The list of parameters for the method.
:raises BulbException: When the bulb indicates an error condition.
:returns: The response from the bulb.
"""
command = {
"id": self._cmd_id,
"method": method,
"params": params,
}
_LOGGER.debug("%s > %s", self, command)
try:
self._socket.send((json.dumps(command) + "\r\n").encode("utf8"))
except socket.error:
# Some error occurred, remove this socket in hopes that we can later
# create a new one.
self.__socket.close()
self.__socket = None
raise
if self._music_mode:
# We're in music mode, nothing else will happen.
return {"result": ["ok"]}
# The bulb will send us updates on its state in addition to responses,
# so we want to make sure that we read until we see an actual response.
response = None
while response is None:
try:
data = self._socket.recv(16 * 1024)
except socket.error:
# Some error occured like above, let's close and try later again..
self.__socket.close()
self.__socket = None
for line in data.split(b"\r\n"):
if not line:
continue
try:
line = json.loads(line.decode("utf8"))
_LOGGER.debug("%s < %s", self, line)
except ValueError:
line = {"result": ["invalid command"]}
if line.get("method") != "props":
# This is probably the response we want.
response = line
else:
self._last_properties.update(line["params"])
if "error" in response:
raise BulbException(response["error"])
return response
@_command
def set_color_temp(self, degrees, **kwargs):
"""
Set the bulb's color temperature.
:param int degrees: The degrees to set the color temperature to
(1700-6500).
"""
self.ensure_on()
degrees = max(1700, min(6500, degrees))
return "set_ct_abx", [degrees]
@_command
def set_rgb(self, red, green, blue, **kwargs):
"""
Set the bulb's RGB value.
:param int red: The red value to set (0-255).
:param int green: The green value to set (0-255).
:param int blue: The blue value to set (0-255).
"""
self.ensure_on()
red = max(0, min(255, red))
green = max(0, min(255, green))
blue = max(0, min(255, blue))
return "set_rgb", [red * 65536 + green * 256 + blue]
@_command
def set_adjust(self, action, prop):
"""
Adjust a parameter.
I don't know what this is good for. I don't know how to use it, or why.
I'm just including it here for completeness, and because it was easy,
but it won't get any particular love.
:param str action: The direction of adjustment. Can be "increase",
"decrease" or "circle".
:param str prop: The property to adjust. Can be "bright" for
brightness, "ct" for color temperature and "color"
for color. The only action for "color" can be
"circle". Why? Who knows.
"""
return "set_adjust", [action, prop]
@_command
def set_hsv(self, hue, saturation, value=None, **kwargs):
"""
Set the bulb's HSV value.
:param int hue: The hue to set (0-359).
:param int saturation: The saturation to set (0-100).
:param int value: The value to set (0-100). If omitted, the bulb's
brightness will remain the same as before the
change.
"""
self.ensure_on()
# We fake this using flow so we can add the `value` parameter.
hue = max(0, min(359, hue))
saturation = max(0, min(100, saturation))
if value is None:
# If no value was passed, use ``set_hsv`` to preserve luminance.
return "set_hsv", [hue, saturation]
else:
# Otherwise, use flow.
value = max(0, min(100, value))
if kwargs.get("effect", self.effect) == "sudden":
duration = 50
else:
duration = kwargs.get("duration", self.duration)
hue = max(0, min(359, hue)) / 359.0
saturation = max(0, min(100, saturation)) / 100.0
red, green, blue = [int(round(col * 255)) for col in colorsys.hsv_to_rgb(hue, saturation, 1)]
rgb = red * 65536 + green * 256 + blue
return "start_cf", [1, 1, "%s, 1, %s, %s" % (duration, rgb, value)]
@_command
def set_brightness(self, brightness, **kwargs):
"""
Set the bulb's brightness.
:param int brightness: The brightness value to set (1-100).
"""
self.ensure_on()
brightness = int(max(1, min(100, brightness)))
return "set_bright", [brightness]
@_command
def turn_on(self, **kwargs):
"Turn the bulb on."
return "set_power", ["on"]
@_command
def turn_off(self, **kwargs):
"Turn the bulb off."
return "set_power", ["off"]
@_command
def toggle(self):
"Toggle the bulb on or off."
return "toggle", []
@_command
def set_default(self):
"Set the bulb's current state as default."
return "set_default", []
@_command
def set_name(self, name):
"""
Set the bulb's name.
:param str name: The string you want to set as the bulb's name.
"""
return "set_name", [name]
@_command
def start_flow(self, flow):
"""
Start a flow.
:param yeelight.Flow flow: The Flow instance to start.
"""
if not isinstance(flow, Flow):
raise ValueError("Argument is not a Flow instance.")
self.ensure_on()
return "start_cf", [flow.count * len(flow.transitions), flow.action.value, flow.expression]
@_command
def stop_flow(self):
"""Stop a flow."""
return "stop_cf", []
def start_music(self):
"""
Start music mode.
Music mode essentially upgrades | |
which region will reside
:type division_name: String
:param data: The update action. QueueUpdateRequest() can be used for this argument also.
:type data: Dict[str, Any]
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
#
# Roles
#
@api_request(u'get', u'roles', (u'1.1', u'1.2', u'1.3',))
def get_roles(self):
"""
Get Roles.
:ref:`to-api-roles`
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
#
# Server
#
@api_request(u'get', u'servers', (u'1.1', u'1.2', u'1.3',))
def get_servers(self, query_params=None):
"""
Get Servers.
:ref:`to-api-servers`
:param query_params: The optional url query parameters for the call
:type query_params: Dict[str, Any]
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'get', u'servers/{server_id:d}', (u'1.1', u'1.2', u'1.3',))
def get_server_by_id(self, server_id=None):
"""
Get Server by Server ID
:ref:`to-api-servers-id`
:param server_id: The server id to retrieve
:type server_id: int
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'get', u'servers/{server_id:d}/deliveryservices', (u'1.1', u'1.2', u'1.3',))
def get_server_delivery_services(self, server_id=None):
"""
Retrieves all delivery services assigned to the server
:ref:`to-api-servers-id-deliveryservices`
:param server_id: The server id to retrieve
:type server_id: int
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'get', u'servers/total', (u'1.1', u'1.2', u'1.3',))
def get_server_type_count(self):
"""
Retrieves a count of CDN servers by type
:ref:`to-api-servers-total`
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'get', u'servers/status', (u'1.1', u'1.2', u'1.3',))
def get_server_status_count(self):
"""
Retrieves a count of CDN servers by status
:ref:`to-api-servers-status`
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'get', u'servers/hostname/{name}/details', (u'1.1', u'1.2', u'1.3',))
def get_server_details(self, name=None):
"""
Get server details from trafficOps
:ref:`to-api-servers-hostname-name-details`
:param hostname: Server hostname
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'post', u'servercheck', (u'1.1', u'1.2', u'1.3',))
def create_servercheck(self, data=None):
"""
Post a server check result to the serverchecks table.
:ref:`to-api-servercheck`
:param data: The parameter data to use for server creation
:type data: Dict[str, Any]
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'post', u'servers', (u'1.1', u'1.2', u'1.3',))
def create_server(self, data=None):
"""
Create a new Server.
:ref:`to-api-servers`
:param data: The parameter data to use for server creation
:type data: Dict[str, Any]
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'put', u'servers/{server_id:d}', (u'1.1', u'1.2', u'1.3',))
def update_server_by_id(self, server_id=None, data=None):
"""
Update a Server by Id.
:ref:`to-api-servers-id`
:param server_id: The server Id
:type server_id: int
:param data: The parameter data to edit
:type data: Dict[str, Any]
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'put', u'servers/{server_id:d}/status', (u'1.1', u'1.2', u'1.3',))
def update_server_status_by_id(self, server_id=None, data=None):
"""
Update server_status by Id.
:ref:`to-api-servers-id-status`
:param server_id: The server Id
:type server_id: int
:status: https://traffic-control-cdn.readthedocs.io/en/latest/api/server.html
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'delete', u'servers/{server_id:d}', (u'1.1', u'1.2', u'1.3',))
def delete_server_by_id(self, server_id=None):
"""
Delete a Server by Id.
:ref:`to-api-servers-id`
:param server_id: The server Id
:type server_id: int
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'post', u'servers/{server_id:d}/queue_update', (u'1.1', u'1.2', u'1.3',))
def servers_queue_update(self, server_id=None, data=None):
"""
Queue Updates by Server Id.
:ref:`to-api-servers-id-queue_update`
:param server_id: The server Id
:type server_id: int
:param data: The update action. QueueUpdateRequest() can be used for this argument also.
:type data: Dict[str, Any]
:rtype: Tuple[Dict[str, Any], requests.Response]
:raises: Union[LoginError, OperationError]
"""
#
# Static DNS Entries
#
@api_request(u'get', u'staticdnsentries', (u'1.1', u'1.2', ))
def get_static_dns_entries(self):
"""
Get Static DNS Entries.
:ref:`to-api-staticdnsentries`
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'get', u'staticdnsentries', (u'1.1', u'1.2', u'1.3',))
def get_staticdnsentries(self, query_params=None):
"""
Get static DNS entries associated with the delivery service
:ref:`to-api-staticdnsentries`
:param query_params: The optional url query parameters for the call
:type query_params: Dict[str, Any]
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'post', u'staticdnsentries', (u'1.3',))
def create_staticdnsentries(self, data=None):
"""
Create static DNS entries associated with the delivery service
:ref:`to-api-staticdnsentries`
:param data: The update action. QueueUpdateRequest() can be used for this argument also.
:type data: Dict[str, Any]
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'put', u'staticdnsentries', (u'1.3',))
def update_staticdnsentries(self, data=None, query_params=None):
"""
Update static DNS entries associated with the delivery service
:ref:`to-api-staticdnsentries`
:param data: The update action. QueueUpdateRequest() can be used for this argument also.
:type data: Dict[str, Any]
:param query_params: The optional url query parameters for the call
:type query_params: Dict[str, Any]
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'delete', u'staticdnsentries', (u'1.3',))
def delete_staticdnsentries(self, query_params=None):
"""
Delete static DNS entries associated with the delivery service
:ref:`to-api-staticdnsentries`
:param query_params: The optional url query parameters for the call
:type query_params: Dict[str, Any]
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
#
# Status
#
@api_request(u'get', u'statuses', (u'1.1', u'1.2', u'1.3',))
def get_statuses(self):
"""
Retrieves a list of the server status codes available.
:ref:`to-api-statuses`
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'get', u'statuses/{status_id:d}', (u'1.1', u'1.2', u'1.3',))
def get_statuses_by_id(self, status_id=None):
"""
Retrieves a server status by ID.
:ref:`to-api-statuses-id`
:param status_id: The status id to retrieve
:type status_id: int
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
#
# System
#
@api_request(u'get', u'system/info', (u'1.1', u'1.2', u'1.3',))
def get_system_info(self):
"""
Get information on the traffic ops system.
:ref:`to-api-system-info`
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
#
# Tenants
#
@api_request(u'get', u'tenants', (u'1.1', u'1.2', u'1.3',))
def get_tenants(self):
"""
Get all tenants.
:ref:`to-api-tenants`
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'get', u'tenants/{tenant_id:d}', (u'1.1', u'1.2', u'1.3',))
def get_tenant_by_id(self, tenant_id=None):
"""
Get a tenant by ID.
:ref:`to-api-tenants-id`
:param tenant_id: The tenant to retrieve
:type tenant_id: int
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'put', u'tenants/{tenant_id:d}', (u'1.1', u'1.2', u'1.3',))
def update_tenant(self, tenant_id=None):
"""
Update a tenant
:ref:`to-api-tenants-id`
:param tenant_id: The tenant to update
:type tenant_id: int
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'post', u'tenants', (u'1.1', u'1.2', u'1.3',))
def create_tenant(self, data=None):
"""
Create a tenant
:ref:`to-api-tenants`
:param data: The update action. QueueUpdateRequest() can be used for this argument also.
:type data: Dict[str, Any]
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
#
# TO Extensions
#
@api_request(u'get', u'to_extensions', (u'1.1', u'1.2', u'1.3',))
def get_to_extensions(self):
"""
Retrieves the list of extensions.
:ref:`to-api-to_extensions`
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'post', u'to_extensions', (u'1.1', u'1.2', u'1.3',))
def create_to_extension(self, data=None):
"""
Creates a Traffic Ops extension.
:ref:`to-api-to_extensions`
:param data: The update action. QueueUpdateRequest() can be used for this argument also.
:type data: Dict[str, Any]
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'post', u'to_extensions/{extension_id:d}/delete', (u'1.1', u'1.2', u'1.3',))
def delete_to_extension(self, extension_id=None):
"""
Deletes a Traffic Ops extension.
:ref:`to-api-to_extensions-id-delete`
:param extension_id: The extension id to delete
:type extension_id: int
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
#
# Types
#
@api_request(u'get', u'types', (u'1.1', u'1.2', u'1.3',))
def get_types(self, query_params=None):
"""
Get Data Types.
:ref:`to-api-types`
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'get', u'types/trimmed', (u'1.1', u'1.2', u'1.3',))
def get_types_only_names(self):
"""
Get Data Types with only the Names
:ref:`to-api-types-trimmed`
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'get', u'types/{type_id:d}', (u'1.1', u'1.2', u'1.3',))
def get_type_by_id(self, type_id=None):
"""
Get Data Type with the given type id
:ref:`to-api-types-id`
:param type_id: The ID of the type to retrieve
:type type_id: int
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
#
# Users
#
@api_request(u'get', u'users', (u'1.1', u'1.2', u'1.3',))
def get_users(self):
"""
Retrieves all users.
:ref:`to-api-users`
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'get', u'users/{user_id:d}', (u'1.1', u'1.2', u'1.3',))
def get_user_by_id(self, user_id=None):
"""
Retrieves user by ID.
:ref:`to-api-users-id`
:param user_id: The user to retrieve
:type user_id: int
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'post', u'users', (u'1.1', u'1.2', u'1.3',))
def create_user(self, data=None):
"""
Create a user.
:ref:`to-api-users`
:param data: The update action. QueueUpdateRequest() can be used for this argument also.
:type data: Dict[str, Any]
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'post', u'users/register', (u'1.1', u'1.2', u'1.3',))
def create_user_with_registration(self, data=None):
"""
Register a user and send registration email
:ref:`to-api-users-register`
:param data: The update action. QueueUpdateRequest() can be used for this argument also.
:type data: Dict[str, Any]
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'get', u'users/{user_id:d}/deliveryservices', (u'1.1', u'1.2', u'1.3',))
def get_user_delivery_services(self, user_id=None):
"""
Retrieves all delivery services assigned to the user.
:ref:`to-api-users-id-deliveryservices`
:param user_id: The user to retrieve
:type user_id: int
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'get', u'user/current', (u'1.1', u'1.2', u'1.3',))
def get_authenticated_user(self):
"""
Retrieves the profile for the authenticated user.
:ref:`to-api-user-current`
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'get', u'user/current/jobs', (u'1.1', u'1.2', u'1.3',))
def get_authenticated_user_jobs(self):
"""
Retrieves the user's list of jobs.
:ref:`to-api-user-current-jobs`
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'post', u'user/current/jobs', (u'1.1', u'1.2', u'1.3',))
def create_invalidation_job(self, data=None):
"""
Invalidating content on the CDN is sometimes necessary when the origin was mis-configured
and something is cached in the CDN that needs to be removed. Given the size of a typical
Traffic Control CDN and the amount of content that can be cached in it, removing the content
from all the caches may take a long time. To speed up content invalidation, Traffic Ops will
not try to remove the content from the caches, but it makes the content inaccessible using
the `regex_revalidate ATS plugin <https://docs.trafficserver.apache.org/en/latest/admin-guide/plugins/regex_revalidate.en.html>`_
This forces a revalidation of the content, rather than a new get.
:ref:`to-api-user-current-jobs`
:param data: The update action. QueueUpdateRequest() can be used for this argument also.
:type data: Dict[str, Any]
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
#
# Snapshot CRConfig
#
@api_request(u'get', u'cdns/{cdn_name}/snapshot', (u'1.2', u'1.3',))
def get_current_snapshot_crconfig(self, cdn_name=None):
"""
Retrieves the CURRENT snapshot for a CDN which doesn't necessarily represent the current
state of the CDN. The contents of this snapshot are currently used by Traffic Monitor and
Traffic Router.
:ref:`to-api-cdns-name-snapshot`
:param cdn_name: The CDN name
:type cdn_name: str
:rtype: Tuple[Dict[str, Any], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'get', u'cdns/{cdn_name}/snapshot/new', (u'1.2', u'1.3',))
def get_pending_snapshot_crconfig(self, cdn_name=None):
"""
Retrieves a PENDING snapshot for a CDN which represents the current state of the CDN. The
contents of this snapshot are NOT currently used by Traffic Monitor and Traffic Router. Once
a snapshot is performed, this snapshot will become the CURRENT snapshot and will be used by
Traffic Monitor and Traffic Router.
:ref:`to-api-cdns-name-snapshot-new`
:param cdn_name: The CDN name
:type cdn_name: str
:rtype: Tuple[Dict[str, Any], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'put', u'snapshot/{cdn_name}', (u'1.2', u'1.3',))
def snapshot_crconfig(self, cdn_name=None):
"""
Snapshot CRConfig by CDN Name.
:ref:`to-api-snapshot-name`
:param cdn_name: The CDN name
:type cdn_name: str
:rtype: Tuple[Dict[str, Any], requests.Response]
:raises: Union[LoginError, OperationError]
"""
#
# Coordinate
#
@api_request(u'get', u'coordinates', (u'1.3',))
def get_coordinates(self, query_params=None):
"""
Get all coordinates associated with the cdn
:ref:`to-api-coordinates`
:param query_params: The optional url query parameters for the call
:type query_params: Dict[str, Any]
:rtype: Tuple[Union[Dict[str, Any], List[Dict[str, Any]]], requests.Response]
:raises: Union[LoginError, OperationError]
"""
@api_request(u'post', u'coordinates', (u'1.3'))
def create_coordinates(self, data=None):
"""
Create coordinates
:ref:`to-api-coordinates`
:param data: The update action. QueueUpdateRequest() can be used for this argument also.
:type data: Dict[str, Any]
:rtype: | |
OFF_pt[i * delta_u + j] = pt[i * delta_u + j] + (0.1 * normals[i * delta_u + j])
# #
# OFF_pt_2[i * delta_u + j] = OFF_pt[i * delta_u + j] + (0.1 * normals[i * delta_u + j])
pass
# count = np.sum(edge_bool)
return pt, normals
pass
def compute_surf_offset(off_layers, ctrl_pts, knot_u, knot_v, degree_u, degree_v):
delta_u = 64
delta_v = 64
grid_1, grid_2 = np.meshgrid(np.linspace(0.0, 1.0, delta_u), np.linspace(0.0, 1.0, delta_v))
off_pts = np.empty([off_layers, grid_1.shape[0], grid_1.shape[1], 3], dtype=np.float32)
thickness = 5.0
for i in range(0, grid_1.shape[0]):
for j in range(0, grid_1.shape[1]):
# ptS, normals = compute_normal_surface(ctrl_pts, knot_u, knot_v, grid_1, grid_2)
pt = surf_pt(grid_1[i][j], grid_2[i][j], ctrl_pts, knot_u, knot_v, degree_u, degree_v)
deri = deri_surf(grid_1[i][j], grid_2[i][j], 1, ctrl_pts, knot_u, knot_v, degree_u, degree_v)
temp = np.cross(deri[0][1], deri[1][0])
normals = temp / np.linalg.norm(temp)
# if pt[1] < 12:
# thickness = 0.5
# elif 40 > pt[1] > 12:
# thickness = 0.25
# else:
# thickness = 0.1
for layer in range(0, off_layers):
off_pts[layer][i][j] = pt + (thickness * layer * normals)
return off_pts
def compute_volumes(ctrl_pts, Count_u, Count_v, Edge_point_count):
map_size = 4
normals = np.empty([ctrl_pts.shape[0], ctrl_pts.shape[1], 3])
edge_pts_idx = np.empty([ctrl_pts.shape[0], Edge_point_count, map_size], dtype=np.int)
knot_u = [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]
knot_v = [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]
for i in range(0, ctrl_pts.shape[0]): # ctrl_pts.shape[0]):
normals[i, :], edge_pts_idx[i, :] = compute_cntrlpts_normals(ctrl_pts[i], Count_u, Count_v,
Edge_point_count, map_size)
edge_pts_idx = map_edge_points(ctrl_pts, edge_pts_idx)
normals = normals_reassign(edge_pts_idx, normals)
compute_model_offset(ctrl_pts, normals, Count_u, Count_v, 3, knot_u, knot_v, off_layer=2, thickness=10)
pass
def compute_model_offset(cntrlpts, cntrlpts_normals, Count_u, Count_v, Degree, knot_u, knot_v, off_layer, thickness, ):
ctrlpts_offset = np.empty([cntrlpts.shape[0], cntrlpts.shape[1], 3], dtype=np.float32)
msurf = multi.SurfaceContainer()
msurf.delta = 0.05
Multi = multi.SurfaceContainer()
Multi.delta = 0.1
for i in range(0, cntrlpts_normals.shape[0]):
# Multi.delta_u = 0.1
# Multi.delta_v = 0.1
for off in range(1, off_layer):
ctrlpts_offset[i] = compute_cntrlpt_offset(cntrlpts[i], cntrlpts_normals[i], off, thickness)
# surf = NURBS.Surface()
# surf.delta = 0.1
# # surf.delta_u = 0.1
# # surf.delta_v = 0.1
#
# surf.degree_u = 3
# surf.degree_v = 3
#
# surf.set_ctrlpts(ctrlpts_offset[i].tolist(), Count_u, Count_v)
#
# surf.knotvector_u = knot_u
# surf.knotvector_v = knot_v
#
# Multi.add(surf)
# for i in range(1, off_layer):
# volume = construct.construct_volume('w', Multi[i - 1], Multi[i], degree=1)
# nvolume = convert.bspline_to_nurbs(volume)
#
# surfvol = construct.extract_isosurface(nvolume)
# msurf.add(surfvol)
# print(len(msurf))
# # vis_config = VisMpL.VisConfig(legend=False, axes=False, ctrlpts=False)
# msurf.vis = vis.VisSurface(vis.VisConfig(ctrlpts=False, figure_size=[1024, 768]))
# msurf.render() # , plot=False, filename="VLM_3DBeam_Before.pdf")
# exchange.export_obj(Multi, 'Wind_blade_%d_layers.obj' % off_layer)
# exchange.export_obj(Multi, 'HeartModel_layers_%d.obj' % off_layer)
# exchange.export_obj(msurf, 'Blade_combined_layer.obj')
# exchange.export_csv(Multi, 'WindBlade_surfpoint.csv', point_type='evalpts')
# vis_config = VisMpL.VisConfig(legend=False, axes=False, ctrlpts=False)
# vis_comp = VisMpL.VisSurface(vis_config)
# Multi.vis = vis.VisSurface(vis.VisConfig(ctrlpts=False, figure_size=[1024, 768]))
# Multi.render()
return ctrlpts_offset
pass
def compute_cntrlpt_offset(cntrlpts, cntrlpts_normals, off_layer, thickness):
off_cntrlpts = np.empty(cntrlpts.shape)
for i in range(0, cntrlpts.shape[0]):
off_cntrlpts[i][0:3] = (thickness * off_layer * cntrlpts_normals[i]) + cntrlpts[i][0:3]
# off_cntrlpts[i][3] = 1.0
return off_cntrlpts
def map_edge_points(cntrlpts, cntrlpts_edge_map):
for i in range(0, cntrlpts.shape[0]):
for j in range(0, cntrlpts.shape[0]):
if i != j:
for k in range(0, cntrlpts_edge_map[i].shape[0]):
pt1 = cntrlpts[i][cntrlpts_edge_map[i][k][0]]
for l in range(0, cntrlpts_edge_map[j].shape[0]):
pt2 = cntrlpts[j][cntrlpts_edge_map[j][l][0]]
if np.linalg.norm(pt1 - pt2) == 0:
cntrlpts_edge_map[i][k][1] += 1
if cntrlpts_edge_map[i][k][1] == 6:
print("5 Common")
cntrlpts_edge_map[i][k][(2 * cntrlpts_edge_map[i][k][1])] = j
cntrlpts_edge_map[i][k][(2 * cntrlpts_edge_map[i][k][1]) + 1] = cntrlpts_edge_map[j][l][0]
# if cntrlpts_edge_idk[i][k][1] == 0 and cntrlpts_edge_idk[j][l][1] == 0:
# # cntrlpts_edge_idk[i][k][1] = 1
# # cntrlpts_edge_idk[j][l][1] = 1
#
# temp_norm = (cntrlpts_normals[i][cntrlpts_edge_idk[i][k][0]] +
# cntrlpts_normals[j][cntrlpts_edge_idk[j][l][0]])
# temp_norm_mag = np.linalg.norm(temp_norm)
# if temp_norm_mag != 0.0:
# cntrlpts_normals[i][cntrlpts_edge_idk[i][k][0]] = temp_norm / temp_norm_mag
# cntrlpts_normals[j][cntrlpts_edge_idk[j][l][0]] = temp_norm / temp_norm_mag
# else:
# cntrlpts_normals[i][cntrlpts_edge_idk[i][k][0]] = temp_norm
# cntrlpts_normals[j][cntrlpts_edge_idk[j][l][0]] = temp_norm
return cntrlpts_edge_map
pass
def normals_reassign(cntrlpts_edge_map, cntrlpts_normals):
for i in range(0, len(cntrlpts_edge_map)):
for j in range(0, cntrlpts_edge_map[i].shape[0]):
temp = cntrlpts_normals[i][cntrlpts_edge_map[i][j][0]]
for k in range(0, cntrlpts_edge_map[i][j][1]):
idx_patch = cntrlpts_edge_map[i][j][(k + 1) * 2]
idx_cntrl_pt = cntrlpts_edge_map[i][j][((k + 1) * 2) + 1]
temp += cntrlpts_normals[idx_patch][idx_cntrl_pt]
temp_norm = np.linalg.norm(temp)
if temp_norm != 0.0:
temp = temp / temp_norm
# cntrlpts_normals[i][cntrlpts_edge_map[i][j][0]] = temp / temp_norm
cntrlpts_normals[i][cntrlpts_edge_map[i][j][0]] = temp
for k in range(0, cntrlpts_edge_map[i][j][1]):
idx_patch = cntrlpts_edge_map[i][j][(k + 1) * 2]
idx_cntrl_pt = cntrlpts_edge_map[i][j][((k + 1) * 2) + 1]
cntrlpts_normals[idx_patch][idx_cntrl_pt] = temp
pass
return cntrlpts_normals
pass
def compute_cntrlpts_normals(ctrl_pts, count_u, count_v, edge_pts_count, map_size):
vec_combo = [[0, 1], [1, 2], [2, 3], [3, 0]]
normals_res = np.empty([ctrl_pts.shape[0], 3])
edge_pts_idx = np.empty([edge_pts_count, map_size], dtype=np.int32)
count = 0
for i in range(0, ctrl_pts.shape[0]):
adj_pts = np.empty(4, dtype=np.int16)
normals = np.zeros([4, 3])
adj_pts[0] = i + count_v
adj_pts[1] = i + 1
adj_pts[2] = i - count_v
adj_pts[3] = i - 1
if adj_pts[0] >= count_u * count_v:
adj_pts[0] = -1
if adj_pts[1] == count_v * ((i // count_v) + 1):
adj_pts[1] = -1
if adj_pts[2] < 0:
adj_pts[2] = -1
if adj_pts[3] == (count_v * (i // count_v)) - 1:
adj_pts[3] = -1
for vec in range(0, 4):
if adj_pts[vec_combo[vec][0]] != -1 and adj_pts[vec_combo[vec][1]] != -1:
normals[vec] = unit_normal(np.array(ctrl_pts[i, 0:3]),
np.array(ctrl_pts[adj_pts[vec_combo[vec][0]], 0:3]),
np.array(ctrl_pts[adj_pts[vec_combo[vec][1]], 0:3]))
res_vec = [np.sum(normals[:, 0]), np.sum(normals[:, 1]), np.sum(normals[:, 2])]
if np.linalg.norm(res_vec) != 0.0:
normals_res[i] = res_vec / np.linalg.norm(res_vec)
else:
normals_res[i] = np.array(res_vec)
if np.any(adj_pts == -1):
edge_pts_idx[count, 0] = i
edge_pts_idx[count, 1] = 0
edge_pts_idx[count, 2:map_size] = -1
count += 1
pass
return normals_res, edge_pts_idx
def unit_normal(pt_0, pt_1, pt_2):
a = pt_1[0:3] - pt_0[0:3]
b = pt_2[0:3] - pt_0[0:3]
normal = np.zeros(3)
if np.all(a == 0.0) or np.all(b == 0.0):
return normal
else:
normal = np.cross(a, b)
return normal
# A3.6
def deri_surf(u, v, order, p, knot_u, knot_v, degree_u, degree_v):
d = np.array([min(3, order), min(3, order)])
count = knot_v.shape[0] - degree_v - 1
span_u = span_linear(knot_u.shape[0] - degree_u - 1, knot_u, u)
span_v = span_linear(knot_v.shape[0] - degree_v - 1, knot_v, v)
skl = np.zeros([2, 2, 3])
ders_u = basis_deri(u, order, span_u, degree_u, knot_u)
ders_v = basis_deri(v, order, span_v, degree_v, knot_v)
for k in range(0, d[0] + 1):
temp = np.zeros([4, 3])
for s in range(0, degree_v + 1):
for r in range(0, degree_u + 1):
temp[s][0] = temp[s][0] + ders_u[k][r] * p[((span_u - degree_u + r) * count) + (span_v - degree_v + s)][
0]
temp[s][1] = temp[s][1] + ders_u[k][r] * p[((span_u - degree_u + r) * count) + (span_v - degree_v + s)][
1]
temp[s][2] = temp[s][2] + ders_u[k][r] * p[((span_u - degree_u + r) * count) + (span_v - degree_v + s)][
2]
dd = min(order - k, d[1])
for l in range(0, dd + 1):
# skl[(k * 3) + l][0] = 0.0 ; skl[(k * 3) + l][1] = 0.0 ; skl[(k * 3) + l][2] = 0.0
for s in range(0, degree_v + 1):
skl[k][l][0] = skl[k][l][0] + (ders_v[l][s] * temp[s][0])
skl[k][l][1] = skl[k][l][1] + (ders_v[l][s] * temp[s][1])
skl[k][l][2] = skl[k][l][2] + (ders_v[l][s] * temp[s][2])
return skl
def basis_deri(u, order, span, degree, knot_v):
left = np.empty([4], dtype=np.float32)
right = np.empty([4], dtype=np.float32)
ndu = np.full([4, 4], 1.0) # ndu[0][0] = 1.0
ders = np.zeros([2, 4])
for j in range(1, degree + 1):
left[j] = u - knot_v[span + 1 - j]
right[j] = knot_v[span + j] - u
saved = 0.0
for r in range(0, j):
ndu[j][r] = right[r + 1] + left[j - r]
temp = ndu[r][j - 1] / ndu[j][r]
ndu[r][j] = saved + (right[r + 1] * temp)
saved = left[j - r] * temp
ndu[j][j] = saved
for j in range(0, degree + 1):
ders[0][j] = ndu[j][degree]
a = np.full([4, 2], 1.0)
for r in range(0, degree + 1):
s1 = 0
s2 = 1
a[0][0] = 1.0
for k in range(1, order + 1):
d = 0.0
rk = r - k
pk = degree - k
if r >= k:
a[s2][0] = a[s1][0] / ndu[pk + 1][rk]
d = a[s2][0] * ndu[rk][pk]
if rk >= -1:
j1 = 1
else:
j1 = -rk
if (r - 1) <= pk:
j2 = k - 1
else:
j2 = degree - r
for j in range(j1, j2 + 1):
a[s2][j] = (a[s1][j] - a[s1][j - 1]) / ndu[pk + 1][rk + j]
d += (a[s2][j] | |
<reponame>M-casado/ena-content-dataflow
#!/usr/bin/env python3.7
# Copyright [2020] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, shutil, re
from datetime import datetime
import cx_Oracle, argparse
from getpass import getpass
import pandas as pd
import json
description = """
Setup
-----
This script uses the cx_Oracle python module, which requires a little setup. For details, see:
https://cx-oracle.readthedocs.io/en/latest/user_guide/installation.html
The Oracle Instant Client is a requirement of this module. Please set the location of this library
using the $ORACLE_CLIENT_LIB environment variable before using this script.
Description
-----
This script will query ERAREAD for COVID-related projects and split the results into 5 logs:
- log1 : sars-cov-2 sequences
- log2 : other coronaviral sequences
- log3 : metagenomes
- log4 : human sequences
- log5 : other host sequences
The script will create an output directory containing:
- an .xlsx spreadsheet for import into Excel or similar (one sheet per-log)
- per-log
- list of project accessions that are not yet in an umbrella project
- an updated umbrella project XML listing new child projects
- a submission XML calling the MODIFY action to add new child projects
- if --submit flag is on, a receipt XML
"""
usage = """
Usage: fetch_covid_projects_from_db.py <OPTIONS>
Options:
--outdir : (optional) name of output directory (default: covid_logs_<timestamp>)
--ignore_projects : (optional) file containing list of project ids to ignore
--webin : (optional) username and password for your Webin account. Format 'User:Password'
--environment : (optional) 'dev' or 'prod' (default: dev)
--submit : (optional) run umbrella update submission automatically (default: printed curl commands)
"""
parser = argparse.ArgumentParser(
description=description+usage
)
parser.add_argument('--outdir', help="(optional) name of output directory (default: covid_logs_<timestamp>)");
parser.add_argument('--ignore_projects', help="(optional) file containing list of project ids to ignore (default: none)")
parser.add_argument('--webin', help="(optional) username and password for your Webin account. Format 'User:Password'")
parser.add_argument('--environment', help="(optional) 'dev' or 'prod' (default: dev)", default='dev')
parser.add_argument('--submit', help="(optional) run umbrella update submission automatically (default: printed curl commands)", action='store_true', default=False)
parser.add_argument('--debug', help="(optional) print additional debugging statements", action='store_true', default=False)
opts = parser.parse_args(sys.argv[1:])
# global variables for use throughout the script
umbrella_project_ids = ['PRJEB39908', 'PRJEB40349', 'PRJEB40770', 'PRJEB40771', 'PRJEB40772']
log1, log2, log3, log4, log5 = [], [], [], [], []
sars_tax_id, human_tax_id = '2697049', '9606'
# some very large projects take a long time to join to the sample tables - predefine their taxonomy info
custom_project_tax_info = {
'PRJEB37886' : [sars_tax_id, 'Severe acute respiratory syndrome coronavirus 2'], # COG-UK
'PRJEB44987' : [sars_tax_id, 'Severe acute respiratory syndrome coronavirus 2'], # RKI
'PRJEB43828' : [sars_tax_id, 'Severe acute respiratory syndrome coronavirus 2'], # Swiss
'PRJEB44803' : [sars_tax_id, 'Severe acute respiratory syndrome coronavirus 2'] # Iceland
}
# initialise libraries to query Oracle DBs
client_lib_dir = os.getenv('ORACLE_CLIENT_LIB')
if not client_lib_dir or not os.path.isdir(client_lib_dir):
sys.stderr.write("ERROR: Environment variable $ORACLE_CLIENT_LIB must point at a valid directory\n")
exit(1)
cx_Oracle.init_oracle_client(lib_dir=client_lib_dir)
"""
Return connection object given db credentials
"""
def setup_connection(host,port,service_name,user,pwd):
connection = None
try:
dsn = cx_Oracle.makedsn(host, port, service_name=service_name)
connection = cx_Oracle.connect(user, pwd, dsn, encoding="UTF-8")
return connection
except cx_Oracle.Error as error:
sys.stderr.write("Could not connect to {}...\n{}\n".format(service_name, error))
sys.exit(1)
return connection
"""
Return read-only connection to ERAREAD
"""
def setup_ERA_connection():
return setup_connection("ora-vm-069.ebi.ac.uk", 1541, "ERAREAD", 'era_reader', 'reader')
"""
Return read-only connection to ENAPRO
"""
def setup_ENA_connection():
return setup_connection("ora-vm5-008.ebi.ac.uk", 1531, "ENAPRO", 'ena_reader', 'reader')
"""
Query ERA to find projects (+ project metadata) that may contain COVID data
"""
def fetch_studies(connection):
where_clause_studies = " AND ".join([
" OR ".join([
"p.tax_id = 2697049",
"(lower(s.study_title) like '%sars%cov%2%'",
"lower(s.study_title) like '%covid%'",
"lower(s.study_title) like '%coronavirus%'",
"lower(s.study_title) like '%severe acute respiratory%')"
]),
"p.status_id not in (2, 3, 5)", # no private, cancelled or suppressed projects
"(s.study_id not like 'EGA%' AND s.project_id not like 'EGA%') "
])
# where_clause_studies = "s.project_id IN ('PRJNA656810', 'PRJEB43555', 'PRJNA683801')"
sql = """
SELECT p.project_id, s.study_id, p.tax_id, s.study_title, p.project_title, p.first_created,
p.scientific_name, l.to_id as umbrella_project_id, p.status_id
FROM project p LEFT JOIN study s ON p.project_id = s.project_id
LEFT JOIN (select * from ena_link where to_id in ({0})) l on l.from_id = s.project_id
WHERE {1} ORDER BY p.first_created, p.project_id DESC
""".format(",".join([f"'{u}'" for u in umbrella_project_ids]), where_clause_studies)
print(sql)
ignore_projects = []
if opts.ignore_projects:
ignore_projects = [line.strip() for line in open(opts.ignore_projects, 'r')]
# batch the ids into bits of 500 for use in SQL queries later
# performance of the `WHERE ... IN ()` clause degrades after 500
covid_study_list = [[]]
cursor = connection.cursor()
for row in cursor.execute(sql):
[project_id, study_id, tax_id, s_title, p_title, first_created, sci_name, umbrella, status] = row
if project_id in ignore_projects:
continue
try:
if project_id == covid_study_list[-1][-1]['project']:
covid_study_list[-1][-1]['umbrella'] += ",{}".format(umbrella)
continue
except IndexError:
pass
title = s_title if s_title else p_title.read() # .read to convert CLOB to string
this_study = {
'project': project_id, 'study': study_id, 'tax': tax_id,
'title': title, 'first_created': first_created.strftime("%Y-%m-%d"),
'sci_name': sci_name, 'umbrella': umbrella, 'status': status
}
covid_study_list[-1].append(this_study)
if len(covid_study_list[-1]) >= 500:
covid_study_list.append([])
cursor.close()
return covid_study_list
"""
Query ENA for number of sequences in (nested) list of projects
"""
def sequence_count(connection, study_lists):
cursor = connection.cursor()
seq_counts = {}
for batch in study_lists:
string_list = ",".join(["'{}'".format(i['project']) for i in batch])
for row in cursor.execute(f"SELECT study_id, count(*) FROM dbentry WHERE study_id IN ({string_list}) group by study_id"):
seq_counts[row[0]] = row[1]
cursor.close()
return seq_counts
"""
Query ERA for number of reads in (nested) list of projects
"""
def read_count(connection, study_lists):
cursor = connection.cursor()
read_counts = {}
for batch in study_lists:
string_list = ",".join(["'{0}'".format(i['study']) for i in batch])
for row in cursor.execute(f"SELECT study_id, count(*) FROM experiment e JOIN run r ON e.experiment_id = r.experiment_id WHERE e.study_id IN ({string_list}) group by study_id"):
read_counts[row[0]] = row[1]
cursor.close()
return read_counts
"""
Join ENA to ERA to link sequences to their samples and fetch taxon info
"""
def fetch_tax_from_seqs(connection, project_id):
cursor = connection.cursor()
tax_info = []
sql = """
SELECT s.tax_id, s.scientific_name
FROM dbentry d JOIN ena_xref x ON d.primaryacc# = x.acc
JOIN <EMAIL> s ON s.biosample_id = x.xref_acc
WHERE d.study_id = '{0}' group by s.tax_id, s.scientific_name
""".format(project_id)
for row in cursor.execute(sql):
tax_info.append((row[0], row[1] if row[1] else ''))
cursor.close()
return tax_info
"""
Link reads to their samples and fetch taxon info
"""
def fetch_tax_from_reads(connection, study_id):
cursor = connection.cursor()
tax_info = []
sql = """
SELECT s.tax_id, s.scientific_name FROM <EMAIL> e
LEFT JOIN <EMAIL> es ON es.experiment_id = e.experiment_id
LEFT JOIN <EMAIL> s ON es.sample_id = s.sample_id
WHERE e.study_id = '{0}' group by s.tax_id, s.scientific_name
""".format(study_id)
for row in cursor.execute(sql):
tax_info.append((row[0], row[1] if row[1] else ''))
cursor.close()
return tax_info
"""
Add a record to a log, with some formatting adjustments
"""
def add_to_log(log, entry):
try:
if type(entry['s_tax']) == list:
entry['s_tax'] = ','.join(entry['s_tax'])
if type(entry['s_sci_name']) == list:
entry['s_sci_name'] = ','.join(entry['s_sci_name'])
except KeyError:
entry['s_tax'] = 'NULL'
entry['s_sci_name'] = 'NULL'
for k in entry.keys():
if entry[k] == None:
entry[k] = 'NULL'
log.append(entry)
"""
Check project metadata and filter into 5 logs:
* log1 : sars-cov-2 sequences
* log2 : other coronaviral sequences
* log3 : metagenomes
* log4 : human sequences (infected with covid-19)
* log5 : other host sequences
"""
def filter_into_logs(entry_raw):
entry = entry_raw.copy()
# filter into different logs on taxon id, scientific name and study title
study_title = entry['title'] if entry['title'] else 'None'
[project_taxon_id, project_scientific_name] = [entry['tax'], entry['sci_name'] if entry['sci_name'] else '']
[sample_taxon_ids, sample_scientific_names] = [[],[]]
try:
[sample_taxon_ids, sample_scientific_names] = [entry['s_tax'], entry['s_sci_name']]
except KeyError: # no sample taxon info fetched - just use project info
pass
# first, find SARS-CoV-2
if project_taxon_id == sars_tax_id:
add_to_log(log1, entry)
print("--> A. assigned to log 1 (sars-cov-2)") if opts.debug else ''
project_scientific_name = ''
elif sars_tax_id in sample_taxon_ids:
add_to_log(log1, entry)
print("--> B. assigned to log 1 (sars-cov-2)") if opts.debug else ''
idx = sample_taxon_ids.index(sars_tax_id)
del sample_taxon_ids[idx]
del sample_scientific_names[idx]
# next, find human
if project_taxon_id == human_tax_id and 'metagenom' not in study_title:
add_to_log(log4, entry)
print("--> A. assigned to log 4 (human)") if opts.debug else ''
project_scientific_name = ''
elif human_tax_id in sample_taxon_ids and 'metagenom' not in study_title:
add_to_log(log4, entry)
print("--> B. assigned to log 4 (human)") if opts.debug else ''
idx = sample_taxon_ids.index(human_tax_id)
del sample_taxon_ids[idx]
del sample_scientific_names[idx]
if project_scientific_name == '' and | |
##
# @file This file is part of the ExaHyPE project.
# @author ExaHyPE Group (<EMAIL>)
#
# @section LICENSE
#
# Copyright (c) 2016 http://exahype.eu
# All rights reserved.
#
# The project has received funding from the European Union's Horizon
# 2020 research and innovation programme under grant agreement
# No 671698. For copyrights and licensing, please consult the webpage.
#
# Released under the BSD 3 Open Source License.
# For the full license text, see LICENSE.txt
#
#
# @section DESCRIPTION
#
# Generates the SpaceTimePredictor Kernel
#
# Call user function flux, source, ncp
#
from .abstractModelBaseClass import AbstractModelBaseClass
import copy
from ..utils import MatmulConfig
class FusedSpaceTimePredictorVolumeIntegralModel(AbstractModelBaseClass):
def generateCode(self):
self.context["nVarMinusOne_seq"] = range(self.context["nVar"] - 1)
self.context["nDataMinusOne_seq"] = range(self.context["nData"] - 1)
if self.context["isLinear"]:
if self.context["useSplitCK"]:
if self.context["useVectPDE"]:
if self.context["useAoSoA2"]:
template = "fusedSPTVI_linear_split_ck_aosoa2_cpp.template"
else:
template = "fusedSPTVI_linear_split_ck_vect_cpp.template"
else:
template = "fusedSPTVI_linear_split_ck_cpp.template"
self.render(("aderdg", template), "fusedSpaceTimePredictorVolumeIntegral.cpp")
if self.context["usePointSources"]:
self.context["usePointSources"] = False
self.context["nameSuffix"] = "_WithoutPS"
self.render(("aderdg", template), "fusedSpaceTimePredictorVolumeIntegral_WithoutPS.cpp")
else:
# size of the tmpArray
self.context["tmpArraySize"] = max((self.context["nDof"]*self.context["nVarPad"] if self.context["useFlux"] else 0), \
(self.context["nDim"]*self.context["nVarPad"] if self.context["useNCP"] else 0))
self.render(("aderdg", "fusedSPTVI_linear_v2_cpp.template"), "fusedSpaceTimePredictorVolumeIntegral.cpp")
if self.context["usePointSources"]:
self.context["usePointSources"] = False
self.context["nameSuffix"] = "_WithoutPS"
self.render(("aderdg", "fusedSPTVI_linear_cpp.template"), "fusedSpaceTimePredictorVolumeIntegral_WithoutPS.cpp")
else: #nonlinear
self.context["nDof_seq"] = range(0,self.context["nDof"])
self.context["i_seq"] = range(0,self.context["nDof"])
self.context["j_seq"] = range(0,self.context["nDof"]) if (self.context["nDim"] >= 3) else [0]
if self.context["predictorRecompute"]:
if self.context["useVectPDE"]:
self.render(("aderdg", "fusedSPTVI_nonlinear_mem_vect_cpp.template"), "fusedSpaceTimePredictorVolumeIntegral.cpp")
else:
self.render(("aderdg", "fusedSPTVI_nonlinear_mem_cpp.template"), "fusedSpaceTimePredictorVolumeIntegral.cpp")
else:
self.render(("aderdg", "fusedSPTVI_nonlinear_cpp.template"), "fusedSpaceTimePredictorVolumeIntegral.cpp")
# return required gemms
return {"matmulList": self.context["matmulConfigs"].values(), "fileName": "asm_fstpvi.c"}
def buildGemmsConfig(self):
# MatmulConfig: M, N, K, LDA, LDB, LDC, alpha, beta, Align A, Align B, Align C, name, prefetching, type
self.context["matmulConfigs"] = {}
# shortcut
nVar = self.context["nVar"]
nVarPad = self.context["nVarPad"]
nDataPad = self.context["nDataPad"]
nData = self.context["nData"]
nDof = self.context["nDof"]
nDof2 = nDof*nDof
nDof3 = nDof2*nDof
nDof3D = self.context["nDof3D"]
nDofPad = self.context["nDofPad"]
nDof2Pad = self.context["nDof2Pad"]
nDim = self.context["nDim"]
if self.context["isLinear"]:
if self.context["useSplitCK"]:
if self.context["useVectPDE"]:
if self.context["useAoSoA2"]: #split_ck aosoa2
if self.context["useFlux"]:
if self.context["useMaterialParam"]:
self.context["matmulConfigs"]["flux_x_sck_aosoa2"] = MatmulConfig(nDof, nDof, nDof, nDofPad , nDof, nDof , 1, 0, 1, 1, 1, "flux_x_sck_aosoa2") # beta, 0 => overwrite C
self.context["matmulConfigs"]["flux_y_sck_aosoa2"] = MatmulConfig(nDof, nDof, nDof, nDof , nDofPad, nDof , 1, 0, 1, 1, 1, "flux_y_sck_aosoa2") # beta, 0 => overwrite C
self.context["matmulConfigs"]["flux_z_sck_aosoa2"] = MatmulConfig(nDof2Pad*nVar, nDof, nDof, nDof2Pad*nVar , nDofPad, nDof2Pad*nVar , 1, 0, 1, 1, 1, "flux_z_sck_aosoa2") # beta, 0 => overwrite C
else:
self.context["matmulConfigs"]["flux_x_sck_aosoa2"] = MatmulConfig(nDof, nDof, nDof, nDofPad , nDof, nDof , 1, 1, 1, 1, 1, "flux_x_sck_aosoa2")
self.context["matmulConfigs"]["flux_y_sck_aosoa2"] = MatmulConfig(nDof, nDof, nDof, nDof , nDofPad, nDof , 1, 1, 1, 1, 1, "flux_y_sck_aosoa2")
self.context["matmulConfigs"]["flux_z_sck_aosoa2"] = MatmulConfig(nDof2Pad*nVar, nDof, nDof, nDof2Pad*nVar , nDofPad, nDof2Pad*nVar , 1, 1, 1, 1, 1, "flux_z_sck_aosoa2")
self.context["matmulConfigs"]["gradQ_x_sck_aosoa2"] = MatmulConfig(nDof, nDof, nDof, nDofPad , nDof, nDof , 1, 0, 1, 1, 1, "gradQ_x_sck_aosoa2") # beta, 0 => overwrite C
self.context["matmulConfigs"]["gradQ_y_sck_aosoa2"] = MatmulConfig(nDof, nDof, nDof, nDof , nDofPad, nDof , 1, 0, 1, 1, 1, "gradQ_y_sck_aosoa2") # beta, 0 => overwrite C
self.context["matmulConfigs"]["gradQ_z_sck_aosoa2"] = MatmulConfig(nDof2Pad*nVar, nDof, nDof, nDof2Pad*nVar, nDofPad, nDof2Pad*nVar, 1, 0, 1, 1, 1, "gradQ_z_sck_aosoa2") # beta, 0 => overwrite C
else:# split_ck vect
if self.context["useFlux"]:
if self.context["useMaterialParam"]:
self.context["matmulConfigs"]["flux_x_sck_vect"] = MatmulConfig(nDofPad, nVar, nDof, nDofPad , nDofPad, nDofPad , 1, 0, 1, 1, 1, "flux_x_sck_vect") # beta, 0 => overwrite C
self.context["matmulConfigs"]["flux_y_or_z_sck_vect"] = MatmulConfig(nDofPad*nVar, nVar, nDof, nDofPad*nVar , nDofPad, nDofPad*nVar , 1, 0, 1, 1, 1, "flux_y_or_z_sck_vect") # beta, 0 => overwrite C
else:
self.context["matmulConfigs"]["flux_x_sck_vect"] = MatmulConfig(nDofPad, nVar, nDof, nDofPad , nDofPad, nDofPad , 1, 1, 1, 1, 1, "flux_x_sck_vect")
self.context["matmulConfigs"]["flux_y_sck_vect"] = MatmulConfig(nDofPad*nVar, nDof, nDof, nDofPad*nVar , nDofPad, nDofPad*nVar , 1, 1, 1, 1, 1, "flux_y_sck_vect")
if self.context["nDim"]>=3:
self.context["matmulConfigs"]["flux_z_sck_vect"] = MatmulConfig(nDofPad*nVar, nDof, nDof, nDofPad*nVar , nDofPad, nDofPad*nVar*nDof, 1, 1, 1, 1, 1, "flux_z_sck_vect")
self.context["matmulConfigs"]["gradQ_x_sck_vect"] = MatmulConfig(nDofPad, nVar*nDof*nDof3D, nDof, nDofPad , nDofPad, nDofPad , 1, 0, 1, 1, 1, "gradQ_x_sck_vect") # beta, 0 => overwrite C
self.context["matmulConfigs"]["gradQ_y_sck_vect"] = MatmulConfig(nDofPad*nVar, nDof, nDof, nDofPad*nVar , nDofPad, nDofPad*nVar , 1, 0, 1, 1, 1, "gradQ_y_sck_vect") # beta, 0 => overwrite C
if self.context["nDim"]>=3:
self.context["matmulConfigs"]["gradQ_z_sck_vect"] = MatmulConfig(nDofPad*nVar*nDof, nDof, nDof, nDofPad*nVar*nDof, nDofPad, nDofPad*nVar*nDof, 1, 0, 1, 1, 1, "gradQ_z_sck_vect") # beta, 0 => overwrite C
else: # split_ck scalar
if self.context["useFlux"]:
self.context["matmulConfigs"]["flux_x_sck"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad , nDofPad, nVarPad , 1, 1, 1, 1, 1, "flux_x_sck")
self.context["matmulConfigs"]["flux_y_sck"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad , nDofPad, nVarPad*nDof , 1, 1, 1, 1, 1, "flux_y_sck")
if self.context["nDim"]>=3:
self.context["matmulConfigs"]["flux_z_sck"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad , nDofPad, nVarPad*nDof2, 1, 1, 1, 1, 1, "flux_z_sck")
self.context["matmulConfigs"]["gradQ_x_sck"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad , nDofPad, nVarPad , 1, 0, 1, 1, 1, "gradQ_x_sck") # beta, 0 => overwrite C
self.context["matmulConfigs"]["gradQ_y_sck"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad*nDof , nDofPad, nVarPad*nDof , 1, 0, 1, 1, 1, "gradQ_y_sck") # beta, 0 => overwrite C
if self.context["nDim"]>=3:
self.context["matmulConfigs"]["gradQ_z_sck"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad*nDof2, nDofPad, nVarPad*nDof2, 1, 0, 1, 1, "gradQ_z_sck", "nopf", "gemm") # beta, 0 => overwrite C
else: # default linear
if self.context["useFlux"]:
self.context["matmulConfigs"]["flux_x"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad , nDofPad, nVarPad, 1, 0, 1, 1, 1, "flux_x") # beta, 0 => overwrite C
self.context["matmulConfigs"]["flux_y"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad*nDof , nDofPad, nVarPad, 1, 0, 1, 1, 1, "flux_y") # beta, 0 => overwrite C
if self.context["nDim"]>=3:
self.context["matmulConfigs"]["flux_z"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad*nDof2, nDofPad, nVarPad, 1, 0, 1, 1, 1, "flux_z") # beta, 0 => overwrite C
if self.context["useNCP"]:
self.context["matmulConfigs"]["gradQ_x"] = MatmulConfig(nVar, nDof, nDof, nDataPad , nDofPad, nVarPad , 1, 1, 1, 1, 1, "gradQ_x")
self.context["matmulConfigs"]["gradQ_y"] = MatmulConfig(nVar, nDof, nDof, nDataPad*nDof , nDofPad, nVarPad*nDof , 1, 1, 1, 1, 1, "gradQ_y")
if self.context["nDim"]>=3:
self.context["matmulConfigs"]["gradQ_z"] = MatmulConfig(nVar, nDof, nDof, nDataPad*nDof2, nDofPad, nVarPad*nDof2, 1, 1, 1, 1, 1, "gradQ_z")
else: #NonLinear
if self.context["predictorRecompute"]: # TODO JMG matmuls for gradQ, rhs and lduh are exactly the same...
if self.context["useVectPDE"]:
if self.context["useFlux"]:
self.context["matmulConfigs"]["rhs_x"] = MatmulConfig(nDofPad, nVar, nDof, nDofPad, nDofPad, nDofPad , 1, 1, 1, 1, 1, "rhs_x", prefetchInput="B", prefetchOutput="C")
self.context["matmulConfigs"]["rhs_y"] = MatmulConfig(nDofPad*nVar, nDof, nDof, nDofPad*nVar, nDofPad, nDofPad*nVar , 1, 1, 1, 1, 1, "rhs_y", prefetchInput="A", prefetchOutput="C")
if self.context["nDim"]>=3:
self.context["matmulConfigs"]["rhs_z"] = MatmulConfig(nDofPad*nVar*nDof, nDof, nDof, nDofPad*nVar*nDof, nDofPad, nDofPad*nVar*nDof , 1, 1, 1, 1, 1, "rhs_z")
self.context["matmulConfigs"]["lduh_x"] = MatmulConfig(nDofPad, nVar, nDof, nDofPad, nDofPad, nDofPad , 1, 1, 1, 1, 1, "lduh_x", prefetchInput="B", prefetchOutput="C")
self.context["matmulConfigs"]["lduh_y"] = MatmulConfig(nDofPad*nVar, nDof, nDof, nDofPad*nVar, nDofPad, nDofPad*nVar , 1, 1, 1, 1, 1, "lduh_y", prefetchInput="A", prefetchOutput="C")
if self.context["nDim"]>=3:
self.context["matmulConfigs"]["lduh_z"] = MatmulConfig(nDofPad*nVar*nDof, nDof, nDof, nDofPad*nVar*nDof, nDofPad, nDofPad*nVar*nDof, 1, 1, 1, 1, 1, "lduh_z")
if self.context["useNCP"] or self.context['useViscousFlux']:
self.context["matmulConfigs"]["gradQ_x"] = MatmulConfig(nDofPad, nVar, nDof, nDofPad, nDofPad, nDofPad , 1, 1, 1, 1, 1, "gradQ_x")
self.context["matmulConfigs"]["gradQ_y"] = MatmulConfig(nDofPad*nVar, nDof, nDof, nDofPad*nVar, nDofPad, nDofPad*nVar , 1, 1, 1, 1, 1, "gradQ_y")
if self.context["nDim"]>=3:
self.context["matmulConfigs"]["gradQ_z"] = MatmulConfig(nDofPad*nVar*nDof, nDof, nDof, nDofPad*nVar*nDof, nDofPad, nDofPad*nVar*nDof, 1, 1, 1, 1, 1, "gradQ_z")
self.context["matmulConfigs"]["lqi"] = MatmulConfig(nDofPad*nVar, nDof, nDof, nDofPad*nVar*nDof*nDof3D, nDofPad, nDofPad*nVar, 1, 0, 1, 1, 1, "lqi") # beta, 0 => overwrite C
else: #scalar predictor recompute
if self.context["useFlux"]:
self.context["matmulConfigs"]["rhs_x"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad , nDofPad, nVarPad , 1, 1, 1, 1, 1, "rhs_x")
self.context["matmulConfigs"]["rhs_y"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad*nDof , nDofPad, nVarPad*nDof , 1, 1, 1, 1, 1, "rhs_y")
if self.context["nDim"]>=3:
self.context["matmulConfigs"]["rhs_z"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad*nDof2, nDofPad, nVarPad*nDof2, 1, 1, 1, 1, 1, "rhs_z")
self.context["matmulConfigs"]["lduh_x"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad, nDofPad, nVarPad , 1, 1, 1, 1, 1, "lduh_x")
self.context["matmulConfigs"]["lduh_y"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad*nDof, nDofPad, nVarPad*nDof , 1, 1, 1, 1, 1, "lduh_y")
if self.context["nDim"]>=3:
self.context["matmulConfigs"]["lduh_z"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad*nDof2, nDofPad, nVarPad*nDof2, 1, 1, 1, 1, 1, "lduh_z")
if self.context["useNCP"] or self.context['useViscousFlux']:
self.context["matmulConfigs"]["gradQ_x"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad , nDofPad, nVarPad , 1, 1, 1, 1, 1, "gradQ_x")
self.context["matmulConfigs"]["gradQ_y"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad*nDof, nDofPad, nVarPad*nDof , 1, 1, 1, 1, 1, "gradQ_y")
if self.context["nDim"]>=3:
self.context["matmulConfigs"]["gradQ_z"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad*nDof2, nDofPad, nVarPad*nDof2, 1, 1, 1, 1, 1, "gradQ_z")
self.context["matmulConfigs"]["lqi"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad*(nDof**nDim), nDofPad, nVarPad, 1, 0, 1, 1, 1, "lqi") # beta, 0 => overwrite C
else: # default nonlinear
if self.context["useFlux"]:
self.context["matmulConfigs"]["rhs_x"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad , nDofPad, nVarPad , 1, 1, 1, 1, 1, "rhs_x")
self.context["matmulConfigs"]["rhs_y"] = MatmulConfig(nVarPad, nDof, nDof, nVarPad*nDof , nDofPad, nVarPad*nDof , 1, 1, 1, 1, | |
indexes[idx] += 1
if indexes[idx] < len(self.cgi_options[idx][1]):
break
indexes[idx] = 0
idx += 1
if idx==len(self.cgi_options):
done=True
self.progress(total_tests, total_tests)
def test_availability_start_time(self):
"""Control of MPD@availabilityStartTime using the start parameter"""
self.setup_media()
self.logoutCurrentUser()
drm_options = None
for o in self.cgi_options:
if o[0] == 'drm':
drm_options = o[1]
break
self.assertIsNotNone(drm_options)
pr = drm.PlayReady(self.templates)
media_files = models.MediaFile.all()
self.assertGreaterThan(len(media_files), 0)
filename = 'hand_made.mpd'
manifest = manifests.manifest[filename]
ref_now = real_datetime_class(2019,1,1,4,5,6, tzinfo=utils.UTC())
ref_today = real_datetime_class(2019,1,1, tzinfo=utils.UTC())
ref_yesterday = ref_today - datetime.timedelta(days=1)
testcases = [
('', ref_now, ref_today),
('today', ref_now, ref_today),
('2019-09-invalid-iso-datetime', ref_now, ref_today),
('now', ref_now, ref_now),
('epoch', ref_now, datetime.datetime(1970, 1, 1, 0, 0, tzinfo=utils.UTC())),
('2009-02-27T10:00:00Z', ref_now, datetime.datetime(2009,2,27,10,0,0, tzinfo=utils.UTC()) ),
('2013-07-25T09:57:31Z', ref_now, datetime.datetime(2013,7,25,9,57,31, tzinfo=utils.UTC()) ),
# special case when "now" is midnight, use yesterday midnight as availabilityStartTime
('', ref_today, ref_yesterday),
]
msg=r'When start="%s" is used, expected MPD@availabilityStartTime to be %s but was %s'
for option, now, start_time in testcases:
with mock_datetime_now(now):
baseurl = self.from_uri('dash-mpd-v3', manifest=filename, stream='bbb', mode='live')
if option:
baseurl += '?start=' + option
response = self.app.get(baseurl)
dv = ViewsTestDashValidator(self.app, 'live', response.xml, baseurl)
dv.validate(depth=3)
if option=='now':
start_time = dv.manifest.publishTime - dv.manifest.timeShiftBufferDepth
self.assertEqual(dv.manifest.availabilityStartTime, start_time,
msg=msg%(option, start_time.isoformat(),
dv.manifest.availabilityStartTime.isoformat()))
def test_get_vod_media_using_live_profile(self):
"""Get VoD segments for each DRM type (live profile)"""
self.setup_media()
self.logoutCurrentUser()
drm_options = None
for o in self.cgi_options:
if o[0] == 'drm':
drm_options = o[1]
break
self.assertIsNotNone(drm_options)
pr = drm.PlayReady(self.templates)
media_files = models.MediaFile.all()
self.assertGreaterThan(len(media_files), 0)
total_tests = len(drm_options)
test_count = 0
filename = 'hand_made.mpd'
manifest = manifests.manifest[filename]
for drm_opt in drm_options:
self.progress(test_count, total_tests)
test_count += 1
baseurl = self.from_uri('dash-mpd-v2', manifest=filename, stream='bbb')
baseurl += '?mode=vod&' + drm_opt
response = self.app.get(baseurl)
mpd = ViewsTestDashValidator(self.app, 'vod', response.xml, baseurl)
mpd.validate()
self.progress(total_tests, total_tests)
def test_get_live_media_using_live_profile(self):
"""Get segments from a live stream for each DRM type (live profile)"""
self.setup_media()
self.logoutCurrentUser()
drm_options = None
for o in self.cgi_options:
if o[0] == 'drm':
drm_options = o[1]
break
self.assertIsNotNone(drm_options)
pr = drm.PlayReady(self.templates)
media_files = models.MediaFile.all()
self.assertGreaterThan(len(media_files), 0)
total_tests = len(drm_options)
test_count = 0
filename = 'hand_made.mpd'
manifest = manifests.manifest[filename]
for drm_opt in drm_options:
self.progress(test_count, total_tests)
test_count += 1
now = datetime.datetime.now(tz=utils.UTC())
availabilityStartTime = now - datetime.timedelta(minutes=test_count)
availabilityStartTime = utils.toIsoDateTime(availabilityStartTime)
baseurl = self.from_uri('dash-mpd-v2', manifest=filename, stream='bbb')
baseurl += '?mode=live&' + drm_opt + '&start='+availabilityStartTime
response = self.app.get(baseurl)
self.assertEqual(response.status_int, 200)
mpd = ViewsTestDashValidator(self.app, "live", response.xml, baseurl)
mpd.validate()
self.progress(total_tests, total_tests)
def test_get_vod_media_using_on_demand_profile(self):
"""Get VoD segments (on-demand profile)"""
self.logoutCurrentUser()
self.setup_media()
media_files = models.MediaFile.all()
self.assertGreaterThan(len(media_files), 0)
for filename, manifest in manifests.manifest.iteritems():
if 'odvod' not in manifest['modes']:
continue
baseurl = self.from_uri('dash-mpd-v3', mode='odvod', manifest=filename, stream='bbb')
response = self.app.get(baseurl)
self.assertIn("urn:mpeg:dash:profile:isoff-on-demand:2011", response.xml.get('profiles'))
mpd = ViewsTestDashValidator(self.app, "odvod", response.xml, baseurl)
mpd.validate()
def test_request_unknown_media(self):
url = self.from_uri("dash-media", mode="vod", filename="notfound", segment_num=1, ext="mp4")
response = self.app.get(url, status=404)
def test_injected_http_error_codes(self):
self.setup_media()
self.logoutCurrentUser()
media_files = models.MediaFile.all()
self.assertGreaterThan(len(media_files), 0)
for seg in range(1,5):
url = self.from_uri("dash-media", mode="vod",
filename=media_files[0].representation.id,
segment_num=seg, ext="mp4", absolute=True)
response = self.app.get(url)
for code in [404, 410, 503, 504]:
if seg in [1,3]:
status=code
else:
status=200
response = self.app.get(url, {str(code): '1,3'}, status=status)
def test_video_corruption(self):
self.setup_media()
self.logoutCurrentUser()
media_files = models.MediaFile.all()
self.assertGreaterThan(len(media_files), 0)
for seg in range(1,5):
url = self.from_uri("dash-media", mode="vod",
filename=media_files[0].representation.id,
segment_num=seg, ext="mp4", absolute=True)
clean = self.app.get(url)
corrupt = self.app.get(url, {'corrupt': '1,2'})
if seg < 3:
self.assertNotEqual(clean.body, corrupt.body)
else:
self.assertEqual(clean.body, corrupt.body)
@staticmethod
def cgi_combinations(cgi_options):
"""convert a list of CGI options into a set of all possible combinations"""
indexes = [0] * len(cgi_options)
result = set()
done = False
while not done:
params = {}
mode = None
for idx, option in enumerate(cgi_options):
name, values = option
value = values[indexes[idx]]
if name=='mode':
mode = value[5:]
if value:
params[name] = value
if mode in manifests.manifest[filename]['modes']:
if mode != "live":
if params.has_key("mup"):
del params["mup"]
if params.has_key("time"):
del params["time"]
cgi = '&'.join(params.values())
result.add(cgi)
idx = 0
while idx < len(cgi_options):
indexes[idx] += 1
if indexes[idx] < len(cgi_options[idx][1]):
break
indexes[idx] = 0
idx += 1
if idx == len(cgi_options):
done = True
return result
def test_video_playback(self):
"""Test generating the video HTML page.
Checks every manifest with every CGI parameter causes a valid
HTML page that allows the video to be watched using a <video> element.
"""
def opt_choose(item):
return item[0] in ['mode', 'acodec', 'drm']
self.setup_media()
self.logoutCurrentUser()
media_files = models.MediaFile.all()
self.assertGreaterThan(len(media_files), 0)
url = self.from_uri("video", absolute=True)
options = filter(opt_choose, self.cgi_options)
options = self.cgi_combinations(options)
num_tests = (len(options) * len(models.Stream.all()) *
len(manifests.manifest))
count = 0
for filename, manifest in manifests.manifest.iteritems():
for stream in models.Stream.all():
for opt in options:
html_url = url+'?mpd={prefix}/{mpd}&{opt}'.format(
prefix=stream.prefix, mpd=filename, opt=opt)
self.progress(count, num_tests)
response = self.app.get(html_url)
html = response.html
self.assertEqual(html.title.string, manifest['title'])
for script in html.find_all('script'):
if script.get("src"):
continue
script = script.get_text()
self.assertIn('var dashParameters', script)
start = script.index('{')
end = script.rindex('}') + 1
script = json.loads(script[start:end])
for field in ['title', 'prefix', 'playready_la_url', 'marlin_la_url']:
self.assertEqual(script['stream'][field], getattr(stream, field))
count += 1
self.progress(num_tests, num_tests)
def test_add_stream(self):
self.assertEqual(len(models.Stream.all()), 0)
request = {
'title': 'Big Buck Bunny',
'prefix': 'bbb',
'marlin_la_url': 'ms3://unit.test/bbb.sas',
'playready_la_url':''
}
params = []
for k, v in request.iteritems():
params.append('{0}={1}'.format(k, urllib.quote(v)))
url = self.from_uri('stream', absolute=True)
url = '{0}?{1}'.format(url, '&'.join(params))
# user must be logged in to use stream API
self.logoutCurrentUser()
response = self.app.put(url, status=401)
# user must be logged in as admin to use stream API
self.setCurrentUser(is_admin=False)
response = self.app.put(url, status=401)
# user must be logged in as admin to use stream API
self.setCurrentUser(is_admin=True)
# request should fail due to lack of CSRF token
response = self.app.put(url)
self.assertTrue(response.json.has_key("error"))
self.assertIn("CsrfFailureException", response.json["error"])
media_url = self.from_uri('media-index', absolute=True)
media = self.app.get(media_url)
streams_table = media.html.find(id="streams")
request['csrf_token'] = streams_table.get('data-csrf')
url += '&csrf_token=' + request['csrf_token']
response = self.app.put(url)
expected_result = copy.deepcopy(request)
expected_result['playready_la_url'] = None
del expected_result['csrf_token']
expected_result['csrf'] = response.json["csrf"]
expected_result['id'] = response.json["id"]
self.assertEqual(expected_result, response.json)
streams = models.Stream.all()
self.assertEqual(len(streams), 1)
for k, v in request.iteritems():
if k == 'csrf_token':
continue
self.assertEqual(getattr(streams[0], k), expected_result[k],
'Field {0}: expected "{1}" got "{2}"'.format(k, getattr(streams[0], k),
expected_result[k]))
url = self.from_uri('media-index', absolute=True)
response = self.app.get(url)
response.mustcontain(expected_result['title'], expected_result['prefix'])
def test_delete_stream(self):
self.assertEqual(len(models.Stream.all()), 0)
bbb = models.Stream(title='Big Buck Bunny', prefix='bbb')
bbb.put()
tears = models.Stream(title='Tears of Steel', prefix='tears')
tears.put()
self.assertEqual(len(models.Stream.all()), 2)
url = self.from_uri('del-stream', id=bbb.key.urlsafe(), absolute=True)
# user must be logged in to use stream API
self.logoutCurrentUser()
response = self.app.delete(url, status=401)
self.assertEqual(len(models.Stream.all()), 2)
# user must be logged in as admin to use stream API
self.setCurrentUser(is_admin=False)
response = self.app.delete(url, status=401)
self.assertEqual(response.status_int,401)
self.assertEqual(len(models.Stream.all()), 2)
# user must be logged in as admin to use stream API
self.setCurrentUser(is_admin=True)
# request without CSRF token should fail
response = self.app.delete(url)
self.assertTrue(response.json.has_key("error"))
self.assertIn("CsrfFailureException", response.json["error"])
self.assertEqual(len(models.Stream.all()), 2)
media_url = self.from_uri('media-index', absolute=True)
media = self.app.get(media_url)
streams_table = media.html.find(id="streams")
csrf_url = url + '?csrf_token=' + streams_table.get('data-csrf')
response = self.app.delete(csrf_url)
streams = models.Stream.all()
self.assertEqual(len(streams), 1)
next_csrf_token = response.json["csrf"]
# try to re-use a CSRF token
reuse_url = self.from_uri('del-stream', id=tears.key.urlsafe(), absolute=True)
reuse_url += '?csrf_token=' + streams_table.get('data-csrf')
response = self.app.delete(reuse_url)
self.assertTrue(response.json.has_key("error"))
self.assertIn("CsrfFailureException", response.json["error"])
# try to delete a stream that does not exist
response = self.app.delete(url+'?csrf_token='+next_csrf_token, status=404)
def test_add_full_key_pair(self):
self.assertEqual(len(models.Key.all()), 0)
url = self.from_uri('key', absolute=True)
request = {
'kid': '1AB45440532C439994DC5C5AD9584BAC',
'key': '<KEY>',
}
url = '{}?kid={}&key={}'.format(url, request['kid'], request['key'])
# user must be logged in to use keys API
self.logoutCurrentUser()
response = self.app.put(url, status=401)
self.assertEqual(response.status_int,401)
# user must be logged in as admin to use keys API
self.setCurrentUser(is_admin=False)
response = self.app.put(url, status=401)
self.assertEqual(response.status_int,401)
# user must be logged in as admin to use keys API
self.setCurrentUser(is_admin=True)
# request should fail due to lack of CSRF token
response = self.app.put(url)
self.assertTrue(response.json.has_key("error"))
self.assertIn("CsrfFailureException", response.json["error"])
media_url = self.from_uri('media-index', absolute=True)
media = self.app.get(media_url)
keys_table = media.html.find(id="keys")
url += '&csrf_token=' + keys_table.get('data-csrf')
response = self.app.put(url)
expected_result = {
'kid': request['kid'].lower(),
'key': request['key'].lower(),
'computed': False,
'csrf': response.json["csrf"],
}
self.assertEqual(expected_result, response.json)
keys = models.Key.all()
self.assertEqual(len(keys), 1)
self.assertEqual(keys[0].hkid, expected_result['kid'])
self.assertEqual(keys[0].hkey, expected_result['key'])
self.assertEqual(keys[0].computed, False)
url = self.from_uri('media-index', absolute=True)
response = self.app.get(url)
self.assertEqual(response.status_int, 200)
response.mustcontain(expected_result['kid'], expected_result['key'])
def test_add_computed_keys(self):
self.assertEqual(len(models.Key.all()), 0)
kid='01020304-0506-0708-090A-AABBCCDDEEFF'.replace('-','').lower()
url = '{}?kid={}'.format(self.from_uri('key', absolute=True), kid)
self.setCurrentUser(is_admin=True)
response = self.app.put(url)
# request without CSRF token should fail
self.assertTrue(response.json.has_key("error"))
self.assertIn("CsrfFailureException", response.json["error"])
media_url = self.from_uri('media-index', absolute=True)
media = self.app.get(media_url)
keys_table = media.html.find(id="keys")
url += '&csrf_token=' + keys_table.get('data-csrf')
response = self.app.put(url)
expected_result = {
'kid': kid,
'key': base64.b64decode('GUf166PQbx+sgBADjyBMvw==').encode('hex'),
'computed': True,
'csrf': response.json["csrf"],
}
self.assertEqual(expected_result, response.json)
keys = models.Key.all()
self.assertEqual(len(keys), 1)
self.assertEqual(keys[0].hkid, expected_result['kid'])
self.assertEqual(keys[0].hkey, expected_result['key'])
self.assertEqual(keys[0].computed, True)
def test_delete_key(self):
self.assertEqual(len(models.Key.all()), 0)
kid='1AB45440532C439994DC5C5AD9584BAC'.lower()
keypair = models.Key(hkid=kid,
| |
3614288.9880884974, 3448028.61050569,
2613897.3925167606, 10.676487893570695, 0.013473647569965668, 2.369752188687355e-06],
[1121.556326096255, 1225.477552275013, 1423.939707863312, 2833336.6064214385, 4056131.6666533365,
4235528.204404335, 11.244808146052, 0.018099038041, 2.432848e-06],
[1006.917429294439, 1253.4752967539239, 1434.96949675091, 3991524.1814932027, 2715754.5081717256,
5499059.652247055, 10.200095357793, 0.013625707946, 2.3722169999999998e-06],
[1147.501727915812, 1122.316640929817, 1304.635397750834, 2667086.1590371584, 3207967.3793078577,
4107081.9288311386, 10.765864310135, 0.018402202852, 1.7812109999999999e-06],
[1006.7804880189733, 1107.897956860713, 1436.8036091630076, 3991524.1814932027, 2715483.6685275612,
5499059.652247055, 10.200095357793, 0.013625707946, 1.8527773810483195e-06],
[1157.058011260272, 1240.3487101897, 1332.891017396078, 2522732.7973377593, 6921050.360550507,
3461589.4238329353, 11.919213951914514, 0.018048349898, 2.019091e-06],
[1002.3679828289813, 1233.8201449012863, 1434.5695663290069, 2867030.052889139, 5041021.649316114,
3083413.354012654, 9.962370627938, 0.016812713242489365, 2.110682e-06],
[1154.407498959948, 1236.655253905034, 1351.174773511498, 4487711.842441496, 2586063.573298348,
2756512.928705282, 11.43677145574, 0.013341161615, 1.716072e-06],
[1017.0400643510857, 1253.4752967539239, 1434.96949675091, 3991524.1814932027, 2715754.5081717256,
5499059.652247055, 10.200095357793, 0.013625707946, 2.3722169999999998e-06],
[1002.8735112225839, 1222.315873787476, 1434.5695663290069, 2867030.052889139, 5041021.649316114,
3083413.354012654, 9.962370627938, 0.016459337889, 2.110682e-06],
[1089.956830650977, 1114.607364489448, 1419.872158616546, 3617780.108636307, 3381410.7237955173,
5793876.509322431, 9.551867507206268, 0.013809671326999999, 1.937855678932045e-06],
[1148.9327261066758, 1153.6304141964065, 1304.551838382366, 2667086.1590371584, 3207967.3793078577,
4107081.9288311386, 10.76748429421807, 0.018402202852, 1.7815010318259866e-06],
[1098.218987361545, 1228.2800522789857, 1362.310913050378, 3949022.787889535, 2765020.178343959,
5685500.530032963, 9.477858635073746, 0.017868050845, 2.167855466685067e-06],
[1137.9157350997789, 1264.202938587985, 1362.310913050378, 3935633.4189007236, 3092961.8274809974,
5685500.530032963, 10.713570074941, 0.017868050845, 2.165278e-06],
[1011.1651636778292, 1219.449161088396, 1351.174773511498, 4531951.9996198295, 3494078.511359096,
2756512.928705282, 11.845560061499354, 0.013221990874991763, 1.716072e-06],
[1011.1651636778292, 1219.449161088396, 1351.174773511498, 4531951.9996198295, 3494078.511359096,
2756512.928705282, 11.845560061499354, 0.013705077041655573, 1.718482291015711e-06],
[1121.556326096255, 1225.477552275013, 1423.939707863312, 2500575.673267536, 4056131.6666533365,
4235528.204404335, 11.244808146052, 0.018099038041, 2.432848e-06],
[1157.058011260272, 1240.3487101897, 1332.891017396078, 2533132.283573338, 6921050.360550507,
3461589.4238329353, 9.457999534029, 0.018048349898, 2.019091e-06],
[1154.5397873167135, 1264.202938587985, 1362.310913050378, 3921988.0613203924, 3092961.8274809974,
5772927.094828865, 10.634894158475426, 0.017868050845, 2.165278e-06],
[1119.645645441935, 1166.051027068192, 1410.400490357985, 2839590.7843750915, 3105552.0740890396,
4280687.504420381, 9.013062737532, 0.015566562652, 1.816343e-06],
[1103.683044712124, 1117.8172959384456, 1419.872158616546, 3589704.202519048, 3381410.7237955173,
5785882.1544099115, 12.013233977881999, 0.013809671326999999, 1.912446838761154e-06],
[1128.0511659807232, 1264.202938587985, 1362.310913050378, 3935633.4189007236, 3381585.1063012956,
5685500.530032963, 10.713570074941, 0.017868050845, 2.1650025557814735e-06],
[1170.0160238258507, 1219.449161088396, 1351.174773511498, 4565079.831118726, 3553816.363672882,
2822066.382887337, 11.43677145574, 0.013222653340200709, 1.7776952010619029e-06],
[1137.9157350997789, 1264.202938587985, 1362.310913050378, 3935633.4189007236, 3381585.1063012956,
5685500.530032963, 10.713570074941, 0.017868050845, 2.165278e-06],
[1103.683044712124, 1114.607364489448, 1419.872158616546, 3589704.202519048, 3381410.7237955173,
5793876.509322431, 12.013233977881999, 0.013809671326999999, 1.936649e-06],
[1128.0511659807232, 1257.4239927153367, 1362.310913050378, 3935633.4189007236, 3381585.1063012956,
5685500.530032963, 10.713570074941, 0.017868050845, 2.1650025557814735e-06],
[1103.683044712124, 1114.607364489448, 1419.872158616546, 3625963.6402100134, 3381410.7237955173,
5793876.509322431, 11.71199876673876, 0.01367658030408831, 2.4001855210031537e-06],
[1039.018276207625, 1145.028891106496, 1313.910403182829, 2933184.899608702, 5890961.479807982,
5730384.392981274, 11.257555580852, 0.015413925091, 1.781494e-06],
[1143.4387951608267, 1264.202938587985, 1362.310913050378, 3935633.4189007236, 3381585.1063012956,
5920157.272578891, 10.713570074941, 0.018024370678357375, 2.1650025557814735e-06],
[1104.4239043486102, 1114.607364489448, 1419.872158616546, 3625963.6402100134, 3381410.7237955173,
5793876.509322431, 11.71199876673876, 0.01367658030408831, 2.4001855210031537e-06],
[1039.018276207625, 1153.8582415325982, 1313.910403182829, 2933184.899608702, 2850090.527418211,
2951607.045186893, 11.257555580852, 0.015413925091, 1.781494e-06],
[1183.4996967827772, 1219.449161088396, 1351.174773511498, 4892238.0689851735, 3021703.552791283,
2825500.380147861, 11.266590332781, 0.013341161615, 1.716072e-06],
[1025.56871224164, 1109.5634116524457, 1306.648311432572, 4065276.9287967673, 2715754.5081717256,
5499059.652247055, 10.200095357793, 0.013625707946, 2.3722169999999998e-06],
[1181.71642134566, 1106.1331358152229, 1300.392678923062, 3188724.243480558, 3611549.2338148607,
6040859.706667053, 11.608820336322, 0.015677646923999998, 2.337928e-06],
[1000.135095386044, 1105.4306348685059, 1330.5493242905914, 4713546.073109317, 2662353.0558696883,
4484561.382528685, 10.334296325294885, 0.014366939106, 2.0449849189212174e-06],
[1000.6018390865513, 1100.7447212558154, 1381.4951036145428, 4846321.381285451, 2701805.7086814954,
4484561.382528685, 10.352115864391, 0.014327089983809603, 1.9053230239915903e-06],
[1039.018276207625, 1122.5444686286983, 1326.5430142598552, 2791094.7810776485, 2697731.1452343613,
3158915.6229445105, 11.255935596768929, 0.015413925091, 1.7812039681740136e-06],
[1113.362945387948, 1156.142543911252, 1318.7628524468, 4025728.5804803325, 6917121.327522967,
4324002.329611495, 8.410287456499, 0.013058706477, 1.860162e-06],
[1170.0160238258507, 1219.449161088396, 1351.174773511498, 4892238.0689851735, 3553816.363672882,
2825500.380147861, 11.43677145574, 0.013341161615, 1.716072e-06],
[1100.1063624541289, 1228.280521663599, 1327.008153045644, 3627678.357077309, 3038662.9625544758,
2532740.0207836498, 9.697531154957, 0.013744620276, 2.340356e-06],
[1162.985408391334, 1271.107772815279, 1426.265554917064, 2849919.1458090344, 6292860.96194586,
4700731.238906128, 10.835973545147, 0.016005238592, 1.891574e-06],
[1100.1063624541289, 1228.280521663599, 1327.008153045644, 3627678.357077309, 3552984.980490215,
2532740.0207836498, 9.440776451404325, 0.013744620276, 2.369752188687355e-06],
[1099.8334006943442, 1228.280521663599, 1319.2890643444373, 3627678.357077309, 3552984.980490215,
2532740.0207836498, 9.578804891430824, 0.013744620276, 2.369752188687355e-06],
[1099.8334006943442, 1228.280521663599, 1319.2890643444373, 3627678.357077309, 3552984.980490215,
2554942.3385524517, 9.340562434534428, 0.013744620276, 2.381564648675539e-06],
[1100.1063624541289, 1228.280521663599, 1327.008153045644, 3627678.357077309, 3552984.980490215,
2532740.0207836498, 9.440776451404325, 0.013744620276, 2.369752188687355e-06],
[1115.466323398068, 1257.584793961963, 1424.6533094655529, 2711709.7515290566, 5422188.248604289,
6125197.144713911, 9.869052328516, 0.016459766461, 1.64751e-06],
[1010.594960195638, 1114.607364489448, 1440.140121879473, 3826649.954397131, 3381410.7237955173,
2541919.4174867366, 11.71199876673876, 0.01367658030408831, 2.4001855210031537e-06],
[1115.466323398068, 1246.5797646495644, 1424.6533094655529, 2727002.312869729, 5177667.749296468,
6055735.99685296, 9.869052328516, 0.01678981800236394, 1.64751e-06],
[1181.71642134566, 1241.2917167358653, 1430.9982219213007, 3130727.9042818854, 3611549.2338148607,
6040859.706667053, 11.216198135126099, 0.015677646923999998, 2.337928e-06],
[1056.2370978971562, 1228.0621474080876, 1331.3214771996602, 3627678.357077309, 2998055.7538120616,
2532740.0207836498, 9.697531154957, 0.01319801981388231, 2.340356e-06],
[1115.466323398068, 1246.5797646495644, 1424.6533094655529, 2711709.7515290566, 5177667.749296468,
6125197.144713911, 9.869052328516, 0.01678981800236394, 1.64751e-06],
[1115.466323398068, 1246.5797646495644, 1424.6533094655529, 2711709.7515290566, 5177667.749296468,
6125197.144713911, 9.869052328516, 0.01678981800236394, 1.64751e-06],
[1100.1063624541289, 1228.280521663599, 1327.008153045644, 3627678.357077309, 2562820.6700545694,
2532740.0207836498, 9.697531154957, 0.013744620276, 2.340356e-06],
[1115.466323398068, 1257.584793961963, 1424.6533094655529, 2711709.7515290566, 5177667.749296468,
6125197.144713911, 9.869052328516, 0.016459766461, 1.64751e-06],
[1064.317474852996, 1125.693111980194, 1373.150130234432, 3726676.39046288, 4629853.564052632,
2709285.3525210335, 11.562097953987161, 0.012851621732462407, 2.065492e-06],
[1115.4440971511028, 1257.584793961963, 1335.811778824276, 2711709.7515290566, 5177667.749296468,
6125197.144713911, 8.41622197839763, 0.01810061756308735, 1.6652576526071669e-06],
[1121.556326096255, 1225.477552275013, 1419.57153976711, 2824159.735864557, 4668788.44424598,
4235528.204404335, 11.244808146052, 0.01805712000324544, 2.432848e-06],
[1134.3052340247398, 1266.4455156803888, 1430.258879312566, 2849919.1458090344, 5771719.995759816,
4700731.238906128, 9.427550057683757, 0.01638956964339133, 1.8465579641647771e-06],
[1003.6429065603127, 1107.897956860713, 1436.8036091630076, 3773196.911885251, 2715483.6685275612,
5499059.652247055, 10.200095357793, 0.013625707946, 1.8527773810483195e-06],
[1112.3524010915671, 1257.584793961963, 1310.427373334314, 2711709.7515290566, 5177667.749296468,
6125197.144713911, 8.41622197839763, 0.016637333018644673, 1.6652576526071669e-06],
[1128.0891752962004, 1257.584793961963, 1310.427373334314, 2711709.7515290566, 5177667.749296468,
6125197.144713911, 8.399765828035274, 0.017857864436581865, 1.6872256181843123e-06],
[1162.985408391334, 1271.107772815279, 1426.265554917064, 2849919.1458090344, 5911610.327873674,
4700731.238906128, 9.427550057683757, 0.015939573082178627, 1.8770206541805113e-06],
[1053.745648916394, 1125.693111980194, 1373.150130234432, 3726676.39046288, 4629853.564052632,
2709285.3525210335, 11.369921485045, 0.013701864499999999, 2.065492e-06],
[1147.501727915812, 1122.316640929817, 1304.635397750834, 2667086.1590371584, 3207967.3793078577,
4107081.9288311386, 11.198032372030648, 0.018402202852, 1.7812109999999999e-06],
[1006.917429294439, 1253.4752967539239, 1434.96949675091, 3991524.1814932027, 2715754.5081717256,
5499059.652247055, 10.200095357793, 0.013625707946, 2.3722169999999998e-06],
[1138.8994908064808, 1264.2034079590524, 1326.3993608570645, 3614288.9880884974, 3448028.61050569,
2613897.3925167606, 10.676487893570695, 0.013473647569965668, 2.369752188687355e-06],
[1089.945804052426, 1238.8150696891787, 1419.872158616546, 3617780.108636307, 3381410.7237955173,
5793876.509322431, 9.940957317476279, 0.013890054540421578, 1.937855678932045e-06],
[1121.556326096255, 1225.477552275013, 1423.939707863312, 2833336.6064214385, 4056131.6666533365,
4235528.204404335, 11.244808146052, 0.018099038041, 2.432848e-06],
[1135.680586055101, 1122.316640929817, 1304.635397750834, 2667086.1590371584, 3207967.3793078577,
4087908.14487714, 10.765864310135, 0.018402202852, 1.7812109999999999e-06],
[1017.0400643510857, 1253.4752967539239, 1434.96949675091, 3991524.1814932027, 2715754.5081717256,
5499059.652247055, 10.200095357793, 0.013625707946, 2.3722169999999998e-06],
[1098.218987361545, 1228.3923530007805, 1362.8611286943965, 3750480.6814249475, 2857960.75831493,
2974008.506144651, 9.477858635073746, 0.013821536692075082, 1.7741876259115089e-06],
[1154.407498959948, 1236.655253905034, 1351.174773511498, 4427474.235537298, 2542437.981751782,
2756512.928705282, 11.43677145574, 0.013341161615, 1.716072e-06],
[1006.7804880189733, 1107.897956860713, 1436.8036091630076, 3991524.1814932027, 2715483.6685275612,
5499059.652247055, 10.200095357793, 0.013625707946, 1.8527773810483195e-06],
[1157.058011260272, 1240.3487101897, 1332.891017396078, 2522732.7973377593, 6921050.360550507,
3461589.4238329353, 11.919213951914514, 0.018048349898, 2.019091e-06],
[1017.0400643510857, 1253.4752967539239, 1434.96949675091, 3991524.1814932027, 2715754.5081717256,
5499059.652247055, 10.200095357793, 0.013625707946, 2.3722169999999998e-06],
[1147.501727915812, 1122.316640929817, 1304.635397750834, 2667086.1590371584, 3207967.3793078577,
4107081.9288311386, 10.765864310135, 0.018402202852, 1.7812109999999999e-06],
[1002.8735112225839, 1222.315873787476, 1434.5695663290069, 2867030.052889139, 5041021.649316114,
3083413.354012654, 9.962370627938, 0.016459337889, 2.110682e-06],
[1015.7284995164233, 1253.4752967539239, 1434.96949675091, 3991524.1814932027, 2715754.5081717256,
5607520.642758014, 10.386857748765365, 0.013625707946, 2.3722169999999998e-06],
[1002.3679828289813, 1233.8201449012863, 1434.5695663290069, 2867030.052889139, 5041021.649316114,
3083413.354012654, 9.962370627938, 0.016812713242489365, 2.110682e-06],
[1147.501727915812, 1114.1127163538993, 1304.635397750834, 2667086.1590371584, 3207967.3793078577,
4107081.9288311386, 10.765864310135, 0.01837329413225449, 1.7812109999999999e-06],
[1154.407498959948, 1236.655253905034, 1351.174773511498, 4487711.842441496, 2586063.573298348,
2756512.928705282, 11.43677145574, 0.013341161615, 1.716072e-06],
[1157.058011260272, 1240.3487101897, 1307.5075760627185, 2522732.7973377593, 6921050.360550507,
3461589.4238329353, 11.919213951914514, 0.016585063159245916, 2.019091e-06],
[1135.736010842282, 1117.8172959384456, 1419.872158616546, 3589704.202519048, 3381410.7237955173,
5785882.1544099115, 12.013233977881999, 0.013809671326999999, 1.8866730357254492e-06],
[1154.407498959948, 1236.655253905034, 1342.9555486456536, 4487711.842441496, 2586063.573298348,
2756512.928705282, 11.43677145574, 0.013355360412473294, 1.716072e-06],
[1157.058011260272, 1240.3487101897, 1325.7833258513153, 2526832.2962593758, 6921050.360550507,
3555721.1998439874, 10.398248441320256, 0.018048349898, 2.019091e-06],
[1089.956830650977, 1114.607364489448, 1419.872158616546, 3617780.108636307, 3381410.7237955173,
5793876.509322431, 9.551867507206268, 0.013809671326999999, 1.937855678932045e-06],
[1148.9327261066758, 1153.6304141964065, 1304.551838382366, 2667086.1590371584, 3207967.3793078577,
4107081.9288311386, 10.76748429421807, 0.018402202852, 1.7815010318259866e-06],
[1121.556326096255, 1225.477552275013, 1423.939707863312, 2624686.8443433163, 4056131.6666533365,
4235528.204404335, 11.244808146052, 0.01801296639855309, 2.4490968385460116e-06],
[1098.218987361545, 1228.2800522789857, 1362.310913050378, 3949022.787889535, 2765020.178343959,
5685500.530032963, 9.477858635073746, 0.017868050845, 2.167855466685067e-06],
[1100.1063624541289, 1228.280521663599, 1327.008153045644, 3627678.357077309, 3038662.9625544758,
2532740.0207836498, 9.697531154957, 0.013744620276, 2.340356e-06]]
self.y_data = [[2612.6108434344833, 130.39766484633668, 0.09942136188993068],
[0.0008582001632409618, 69.44727686412561, 0.3071014948342446],
[84.39742275948912, 33.78314907324829, 265.4014460620532],
[932.6069641175573, 400.59055218545626, 749.2704358802002],
[1219.4217619547032, 483.40873541839017, 827.8547810464862],
[590.3969996969831, 361.1751758113917, 897.3239825360729],
[28.069171949228203, 1003.1312033418135, 1027.3516684209444],
[3293.6417749350526, 4000.296651387464, 2749.9811204802127],
[2689.888797413033, 1995.423712500253, 2913.153419949472],
[3641.408349259386, 4149.027032556554, 2979.8648961738068],
[64.95870380673843, 2037.0352581288441, 3142.3452838768544],
[5050.042406848, 3353.631362261292, 4046.1731460186083],
[3211.138664604769, 5346.419128787817, 4918.9261583296375],
[4258.619318468715, 5989.02266715898, 5220.525994374762],
[8936.189342070667, 9965.239758350204, 7563.000813830158],
[6021.767634834487, 8903.495264146026, 7814.127457858662],
[6017.482913124648, 9878.662058584368, 9219.928973267353],
[10383.663899872603, 11692.453275702828, 9778.423392895807],
[9959.714499578859, 12100.473650714735, 10815.113656904918],
[4728.725795408572, 9833.320115679087, 11373.539981017966],
[7040.3231430446585, 11659.214148088358, 11790.152463711856],
[6625.1594545319385, 10968.926234932243, 12625.282931427295],
[15642.10277555957, 16893.988266075045, 14863.564593947294],
[18607.931674621763, 19082.778723217332, 15013.161810006111],
[10626.98560266889, 14816.17950387497, 15506.383314966086],
[19339.61783872236, 19682.730138362174, 15674.580866406783],
[22189.138934738672, 21973.758051439312, 17867.900673260734],
[23690.59769921251, 25783.93771324694, 20809.23996609462],
[14967.7196505778, 22248.082939376614, 22731.8285500197],
[23423.407106244136, 28591.62813777676, 26664.35093389845],
[19075.645612367698, 27207.437259519545, 28128.538674419888],
[29997.930337349753, 33405.527835033296, 28873.62124640369],
[24820.89628524226, 33586.311728094224, 31719.51145267233],
[37282.62405854246, 38046.09208233556, 33387.387843928715],
[28410.627000789853, 35686.40139112738, 36220.30055895746],
[40471.242966725076, 43007.140870292154, 37868.621210244215],
[37398.14699282434, 42565.10811820573, 37870.35351039514],
[45479.57029289774, 47732.36757601205, 41595.563139189384],
[32552.29129704946, 44403.59499708624, 45751.504789978455],
[45786.72576366185, 50791.11058567032, 48223.58538251199],
[51020.88136265659, 54357.122848508094, 50465.40361704311],
[61881.73733921197, 59644.849846935205, 51400.45602940709],
[60457.47777367473, 60647.47618547739, 51432.39988001411],
[62646.79677225086, 58733.17045634589, 53097.700319228185],
[71247.0421208742, 72098.19782720233, 67199.0151309894],
[63957.85300621107, 77501.03448860586, 81158.49902713434],
[93260.27723239115, 94410.9579982488, 86854.8936256963],
[69247.51335865381, 84142.67184010833, 89262.52599409055],
[96023.63022667098, 99075.37552357498, 97413.0256048264],
[115089.56740480401, 109259.95186664716, 99354.46311566826],
[2612.6108434344833, 130.39766484633668, 0.09942136188993068],
[0.0008582001632409618, 69.44727686412561, 0.3071014948342446],
[1865.6343979646495, 4.4856785492880515, 112.17277986515508],
[23.975982777695172, 6.757366123987301, 191.0233853494245],
[84.39742275948912, 33.78314907324829, 265.4014460620532],
[28.069171949228203, 1003.1312033418135, 1027.3516684209444],
[3293.5437591554783, 130.18670022608515, 25.37306097654482],
[932.6069641175573, 400.59055218545626, 749.2704358802002],
[64.95870380673843, 2037.0352581288441, 3142.3452838768544],
[590.3969996969831, 361.1751758113917, 897.3239825360729],
[1195.5075793930594, 526.1504936826086, 932.2775739448363],
[615.7980966174894, 2371.6328956458747, 2197.19994390353],
[84.94494447429759, 2139.8419082286405, 3265.87568834472],
[1219.4217619547032, 483.40873541839017, 827.8547810464862],
[2724.5059366381524, 3420.7700687544716, 2292.325941125391],
[2144.00329464219, 4811.868522654193, 4324.293371510733],
[2689.888797413033, 1995.423712500253, 2913.153419949472],
[3227.723554235312, 3931.1954332489513, 2694.6979836887685],
[3102.5415684431687, 5156.512214548655, 4696.911509918105],
[5050.042406848, 3353.631362261292, 4046.1731460186083],
[3211.138664604769, 5346.419128787817, 4918.9261583296375],
[3293.6417749350526, 4000.296651387464, 2749.9811204802127],
[3636.4519255889963, 7701.036499127755, 8649.018536861058],
[3641.408349259386, 4149.027032556554, 2979.8648961738068],
[7094.934117160581, 4963.715116596285, 5681.255547927741],
[4258.619318468715, 5989.02266715898, 5220.525994374762],
[4728.725795408572, 9833.320115679087, 11373.539981017966],
[6021.767634834487, 8903.495264146026, 7814.127457858662],
[8936.189342070667, 9965.239758350204, 7563.000813830158],
[5108.537042399923, 8846.416734250804, 8264.988876899557],
[6017.482913124648, 9878.662058584368, 9219.928973267353],
[6749.343731353359, 9786.717933043677, 8863.214695950994],
[8699.036586466838, 11186.733448158204, 9991.249310163385],
[9685.64062202062, 10986.969888115693, 9156.021071890882],
[6854.662907936784, 11100.471712774037, 10984.746603997097],
[6625.1594545319385, 10968.926234932243, 12625.282931427295],
[7040.3231430446585, 11659.214148088358, 11790.152463711856],
[9955.857397038104, 11269.85863024913, 9404.157063167368],
[10383.663899872603, 11692.453275702828, 9778.423392895807],
[9959.714499578859, 12100.473650714735, 10815.113656904918],
[7855.450025109316, 13741.228884635286, 13987.2032403042],
[9539.580737744978, 15726.640872161857, 18121.942676224597],
[15272.943154076545, 16049.89530306775, 12544.873510185207],
[10626.98560266889, 14816.17950387497, 15506.383314966086],
[15642.10277555957, 16893.988266075045, 14863.564593947294],
[11972.885691299613, 16740.618540004456, 15604.373301592324],
[14967.7196505778, 22248.082939376614, 22731.8285500197],
[18607.931674621763, 19082.778723217332, 15013.161810006111],
[15600.764007884087, 20229.29363534731, 19865.958258264185],
[17299.3462527832, 22185.51377411193, 20908.129208334474],
[2612.6108434344833, 130.39766484633668, 0.09942136188993068],
[0.0008582001632409618, 69.44727686412561, 0.3071014948342446],
[1865.6343979646495, 4.4856785492880515, 112.17277986515508],
[1595.095376997128, 0.06101481612750454, 150.23668667291665],
[23.975982777695172, 6.757366123987301, 191.0233853494245],
[84.39742275948912, 33.78314907324829, 265.4014460620532],
[0.8807537900138819, 308.8706699734264, 244.54305628933315],
[348.7682075032219, 79.03123825619325, 251.45129387305434],
[3293.5437591554783, 130.18670022608515, 25.37306097654482],
[222.55731422260303, 146.9065009953099, 554.3482836092844],
[28.069171949228203, 1003.1312033418135, 1027.3516684209444],
[875.379542803012, 99.28324447428258, 466.83123685949204],
[932.6069641175573, 400.59055218545626, 749.2704358802002],
[64.95870380673843, 2037.0352581288441, 3142.3452838768544],
[590.3969996969831, 361.1751758113917, 897.3239825360729],
[419.4517590701555, 1833.403663275858, 1907.3039740444506],
[666.5041189662938, 369.17520959179217, 852.6852888192585],
[1195.5075793930594, 526.1504936826086, 932.2775739448363],
[615.7980966174894, 2371.6328956458747, 2197.19994390353],
[84.94494447429759, 2139.8419082286405, 3265.87568834472],
[1219.4217619547032, 483.40873541839017, 827.8547810464862],
[806.4250159418375, 2573.897296351246, 2788.011669077916],
[2189.475081270695, 1751.4863945013165, 2779.906539602099],
[1511.207677762512, 2088.7074393216253, 1254.3211433445492],
[2724.5059366381524, 3420.7700687544716, 2292.325941125391],
[2144.00329464219, 4811.868522654193, 4324.293371510733],
[2265.587989319462, 4618.719697500087, 3911.8825835162193],
[3337.817233042934, 2204.27579141391, 2869.87751326041],
[2689.888797413033, 1995.423712500253, 2913.153419949472],
[2850.780079119669, 3539.006290856695, 2384.2371130748134],
[5050.042406848, 3353.631362261292, 4046.1731460186083],
[3102.5415684431687, 5156.512214548655, 4696.911509918105],
[3227.723554235312, 3931.1954332489513, 2694.6979836887685],
[3155.6670736396673, 6458.660214702686, 6019.507672510742],
[3293.6417749350526, 4000.296651387464, 2749.9811204802127],
[3211.138664604769, 5346.419128787817, 4918.9261583296375],
[3333.657830110825, 4036.580611849707, 2778.6004285400895],
[3636.4519255889963, 7701.036499127755, 8649.018536861058],
[3641.408349259386, 4149.027032556554, 2979.8648961738068],
[3881.6456447322603, 4524.469986396184, 3141.099951069475],
[3641.863977178802, 7708.587433808557, 8656.844594135235],
[7094.934117160581, 4963.715116596285, 5681.255547927741],
[4028.4909074635952, 7529.888366003115, 7053.717520267663],
[3904.834571608711, 8901.031785138372, 10520.56571402266],
[4258.619318468715, 5989.02266715898, 5220.525994374762],
[4714.951634369974, 7990.049683599002, 7473.374103872339],
[4896.420563119286, 7895.896204411419, 7067.661070062295],
[8830.145285436984, 6453.843953175076, 7234.323435982808],
[4728.725795408572, 9833.320115679087, 11373.539981017966],
[5108.537042399923, 8846.416734250804, 8264.988876899557],
[2612.6108434344833, 130.39766484633668, 0.09942136188993068],
[0.0008582001632409618, 69.44727686412561, 0.3071014948342446],
[1865.6343979646495, 4.4856785492880515, 112.17277986515508],
[1595.095376997128, 0.06101481612750454, 150.23668667291665],
[1698.424036289286, 0.01299795111754957, 168.37538497510388],
[1865.6343979646495, 4.4856785492880515, 112.17277986515508],
[10.666841821962958, 0.10829104953011984, 120.397148384552],
| |
<gh_stars>1-10
#!/usr/bin/env python
import pandas as pd
import numpy as np
import os, sys, enum, re, json, logging, time
import functools as fct
from .utils import timeit
logger = logging.getLogger(__name__)
__all__ = [
'RecomposeReplayResultProcessor',
'MonolithicReplayResultProcessor'
]
class AlignExitCode(enum.Enum):
OPTIMAL_ALIGNMENT = 1
FAILED_ALIGNMENT = 2
ENABLING_BLOCKED_BY_OUTPUT = 4
COST_FUNCTION_OVERFLOW = 8
HEURISTIC_FUNCTION_OVERFLOW = 16
TIMEOUT_REACHED = 32
STATE_LIMITED_REACHED = 64
COST_LIMIT_REACHED = 128
CANCELED = 256
class StatsColname(enum.Enum):
SP_LABEL = 'SP label'
ALIGNMENT_EXITCODE = 'Exit code for alignment'
ALIGNMENT_COST = 'Cost of the alignment'
TRANS_FIRED = 'Transitions fired'
MARKINGS_POLLED = 'Markings polled from queue'
MARKINGS_CLOSED = 'Markings added to closed set'
MARKINGS_QUEUED = 'Markings queued'
MARKINGS_REACHED = 'Markings reached'
HEURISTICS_COMPUTED = 'Heuristics computed'
HEURISTICS_ESTIMATED = 'Heuristics estimated'
HEURISTICS_DERIVED = 'Heuristics derived'
ALIGNMENT_COMPUTE_TIME = 'Time to compute alignment (us)'
HEURISTICS_COMPUTE_TIME = 'Time to compute heuristics (us)'
SETUP_TIME = 'Time to setup algorithm (us)'
TOTAL_TIME = 'Total Time including setup (us)'
N_SPLITS = 'Number of splits when splitting marking'
LOG_MOVE_COST = 'Log move cost of alignment'
MODEL_MOVE_COST = 'Model move cost of alignment'
SYNC_MOVE_COST = 'Synchronous move cost of alignment'
PREPROCESS_TIME = 'Pre-processing time (us)'
CONSTRAINTSET_SIZE = 'Size of the constraintset'
N_RESTARTS = 'Number of times replay was restarted'
MEMORY_TOTAL = 'total Memory (MB)'
MAX_QUEUE_LENGTH = 'Maximum queue length (elts)'
MAX_QUEUE_CAPACITY = 'Maximum queue capacity (elts)'
MAX_VISITED_SET_CAPACITY = 'Maximum capacity visited set (elts)'
MEMORY_APPROX_PEAK = 'Approximate peak memory used (kb)'
MEMORY_MAX = 'max Memory (MB)'
class MonolithicConfigToGrab(enum.Enum):
__order__ = 'MODEL ' \
'LOG ' \
'ALGORITHM_TYPE ' \
'DEBUG ' \
'MOVE_ON_LOG_COSTS ' \
'MOVE_ON_MODEL_COSTS ' \
'TIMEOUT_PER_TRACE_IN_SECS ' \
'MOVE_SORT ' \
'QUEUE_SORT ' \
'USE_INT ' \
'MAXIMUM_NUMBER_OF_STATES ' \
'COST_UPPER_BOUND ' \
'PREPROCESS_USING_PLACE_BASED_CONSTRAINTS ' \
'PREFER_EXACT ' \
'PARTIALLY_ORDER_EVENTS ' \
'INITIAL_SPLIT ' \
'N_THREADS'
MODEL = 'model'
LOG = 'log'
ALGORITHM_TYPE = 'algorithmType'
DEBUG = 'debug'
MOVE_ON_LOG_COSTS = 'moveOnLogCosts'
MOVE_ON_MODEL_COSTS = 'moveOnModelCosts'
TIMEOUT_PER_TRACE_IN_SECS = 'timeoutPerTraceInSecs'
MOVE_SORT = 'moveSort'
QUEUE_SORT = 'queueSort'
USE_INT = 'useInt'
MAXIMUM_NUMBER_OF_STATES = 'maximumNumberOfStates'
COST_UPPER_BOUND = 'costUpperBound'
PREPROCESS_USING_PLACE_BASED_CONSTRAINTS = 'preProcessUsingPlaceBasedConstraints'
PREFER_EXACT = 'preferExact'
PARTIALLY_ORDER_EVENTS = 'partiallyOrderEvents'
INITIAL_SPLIT = 'initialSplits'
N_THREADS = 'nThreads'
class RecomposeConfigToGrab(enum.Enum):
__order__ = 'MODEL ' \
'LOG ' \
'ALGORITHM_TYPE ' \
'DEBUG ' \
'MOVE_ON_LOG_COSTS ' \
'MOVE_ON_MODEL_COSTS ' \
'TIMEOUT_PER_TRACE_IN_SECS ' \
'MOVE_SORT ' \
'QUEUE_SORT ' \
'USE_INT ' \
'MAXIMUM_NUMBER_OF_STATES ' \
'COST_UPPER_BOUND ' \
'PREPROCESS_USING_PLACE_BASED_CONSTRAINTS ' \
'PREFER_EXACT ' \
'PARTIALLY_ORDER_EVENTS ' \
'INITIAL_SPLIT ' \
'N_THREADS ' \
'GLOBAL_DURATION ' \
'ALIGNMENT_PERCENTAGE ' \
'INTERVAL_ABSOLUTE ' \
'INTERVAL_RELATIVE ' \
'MAX_CONFLICTS ' \
'USE_HIDE_AND_REDUCE_ABSTRACTION ' \
'PREFER_BORDER_TRANS ' \
'DECOMPOSITION ' \
'LOG_CREATION_STRATEGY ' \
'RECOMPOSE_STRATEGY'
MODEL = 'model'
LOG = 'log'
ALGORITHM_TYPE = 'algorithmType'
DEBUG = 'debug'
MOVE_ON_LOG_COSTS = 'moveOnLogCosts'
MOVE_ON_MODEL_COSTS = 'moveOnModelCosts'
TIMEOUT_PER_TRACE_IN_SECS = 'timeoutPerTraceInSecs'
MOVE_SORT = 'moveSort'
QUEUE_SORT = 'queueSort'
USE_INT = 'useInt'
MAXIMUM_NUMBER_OF_STATES = 'maximumNumberOfStates'
COST_UPPER_BOUND = 'costUpperBound'
PREPROCESS_USING_PLACE_BASED_CONSTRAINTS = 'preProcessUsingPlaceBasedConstraints'
PREFER_EXACT = 'preferExact'
PARTIALLY_ORDER_EVENTS = 'partiallyOrderEvents'
INITIAL_SPLIT = 'initialSplits'
N_THREADS = 'nThreads'
GLOBAL_DURATION = 'globalDuration'
ALIGNMENT_PERCENTAGE = 'alignmentPercentage'
INTERVAL_ABSOLUTE = 'intervalAbsolute'
INTERVAL_RELATIVE = 'intervalRelative'
MAX_CONFLICTS = 'maxConflicts'
USE_HIDE_AND_REDUCE_ABSTRACTION = 'useHideAndReduceAbstraction'
PREFER_BORDER_TRANS = 'preferBorderTransitions'
DECOMPOSITION = 'decomposition'
LOG_CREATION_STRATEGY = 'logCreationStrategy'
RECOMPOSE_STRATEGY = 'recomposeStrategy'
class MonolithicReplayResultProcessor:
PROM_LOG_FILENAME = 'prom.log'
REPLAY_CONFIGS_FILENAME = 'configs.json'
N_ALIGNS = 'n_aligns'
N_VALID_ALIGNS = 'n_valid_aligns'
CLOCK_TIME = 'clock_time(ms)'
LOG_ALIGN_COST = 'log_align_cost'
# additional alignment statistics
N_LOG_MOVE = 'n_log_move'
N_MODEL_MOVE = 'n_model_move'
N_INVIS_MOVE = 'n_invis_move'
N_SYNC_MOVE = 'n_sync_move'
LOG_TRACE_LENGTH = 'log_trace_length'
MODEL_TRACE_LENGTH = 'model_trace_length'
def __init__(self, output_trace_df = True, output_log_df = True):
self.output_trace_df = output_trace_df
self.output_log_df = output_log_df
def get_alignment_stats(self, align_dirpath, log_move_cost, model_move_cost) -> pd.DataFrame:
"""Get statistics from alignments
:param align_dirpath: directory path to alignments
:param log_move_cost: log move cost
:param model_move_cost: model move cost
:return: dataframe containing statistics on alignments
"""
alignment_stats = dict()
for f in os.listdir(align_dirpath):
fpath = os.path.join(align_dirpath, f)
if os.path.isfile(fpath):
with open(fpath, 'r') as f:
_exitcode_name = f.readline()
exitcode = int(float(f.readline().rsplit()[0]))
_representative_caseid_name = f.readline()
representative_caseid = f.readline().rsplit()[0]
_caseids_name = f.readline()
_caseids = f.readline()
_move_name = f.readline()
if exitcode == AlignExitCode.OPTIMAL_ALIGNMENT.value:
cost, n_log, n_model, n_invis, n_sync, align_length, log_length, model_length = 0, 0, 0, 0, 0, 0, 0, 0
while True:
line = f.readline()
if not line:
break
align_length += 1
move_type, log_step, model_step = line.split(',')
if move_type == 'LMGOOD':
n_sync += 1
log_length += 1
model_length += 1
elif move_type == 'MINVI':
n_invis += 1
model_length += 1
elif move_type == 'L':
n_log += 1
log_length += 1
elif move_type == 'MREAL':
n_model += 1
model_length += 1
assert n_sync + n_invis + n_log + n_model == align_length, 'Sum of all move types should equal alignment length'
alignment_stats[representative_caseid] = [n_log, n_model, n_invis, n_sync, log_length, model_length]
else:
alignment_stats[representative_caseid] = [-1, -1, -1, -1, -1, -1, -1]
# create dataframe
column_names = [
self.N_LOG_MOVE,
self.N_MODEL_MOVE,
self.N_INVIS_MOVE,
self.N_SYNC_MOVE,
self.LOG_TRACE_LENGTH,
self.MODEL_TRACE_LENGTH
]
column_values = list(zip(*list(alignment_stats.values())))
column_dict = dict(zip(column_names, column_values))
column_dict[StatsColname.SP_LABEL.value] = list(alignment_stats.keys())
df = pd.DataFrame(column_dict)
return df
def get_clock_time(self, replay_dirpath) -> float:
"""Get clock time of replay
:param replay_dirpath: directory path of replay results
:return: clock time in milliseconds
"""
SEGMENT = 'Clock time (ms): '
clocktime = -1
fpath = os.path.join(replay_dirpath, self.PROM_LOG_FILENAME)
with open(fpath, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith(SEGMENT):
search_results = re.search(r'[^0-9]+([0-9]+\.[0-9]+)[^0-9]?', line)
clocktime = float(search_results.group(1))
break
if clocktime < 0:
raise RuntimeError('Cannot get clock time')
return clocktime
def get_log_alignment_cost(self, replay_dirpath) -> int:
"""Get log alignment cost
:param replay_dirpath: directory path to replay results
:return: alignment cost
"""
SEGMENT = 'Total costs: '
cost = -1
fpath = os.path.join(replay_dirpath, self.PROM_LOG_FILENAME)
with open(fpath, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith(SEGMENT):
search_results = re.search(r'[^0-9]+([0-9]+)[^0-9]?', line)
cost = int(search_results.group(1))
break
if cost < 0:
raise RuntimeError('Cannot get log alignment cost')
return cost
def get_replay_configs(self, replay_dirpath) -> dict:
"""Get replay configurations
:param replay_dirpath: directory path to replay results
:return: replay configurations
"""
result = dict()
fpath = os.path.join(replay_dirpath, self.REPLAY_CONFIGS_FILENAME)
with open(fpath, 'r') as f:
json_dict = json.load(f)
for key in MonolithicConfigToGrab:
result[key.value] = json_dict[key.value]
return result
def get_n_valid_alignments(self, trace_stats_df) -> int:
"""Get the number of valid alignments. Assuming the input is a dataframe containing alignment statistics, we
just need to get the number of rows excluding the empty trace alignment that has optimal alignment exitcode.
:param trace_stats_df: dataframe containing alignment statistics on the trace level
:return: number of valid alignments
"""
assert isinstance(trace_stats_df, pd.DataFrame)
valid_aligns_df = trace_stats_df[(trace_stats_df[StatsColname.ALIGNMENT_EXITCODE.value] == AlignExitCode.OPTIMAL_ALIGNMENT.value)]
return valid_aligns_df.shape[0]
def get_log_size(self, replay_dirpath) -> int:
"""Get the size of the log, i.e., number of cases in the event log. Note that multiple cases can have the same
unique traces.
:param replay_dirpath: directory path to replay results
:return: log size
"""
SEGMENT = 'Aligned: '
size = -1
fpath = os.path.join(replay_dirpath, self.PROM_LOG_FILENAME)
with open(fpath, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith(SEGMENT):
search_results = re.search(r'[^0-9]+([0-9]+)[^0-9]?', line)
size = int(search_results.group(1))
return size
def enrich_trace_stats(self, replay_dirpath) -> pd.DataFrame:
"""Enrich trace statistics with additional alignment statistics
:param replay_dirpath: directory path to replay results
:return: tdataframe containing replay statistics per trace
"""
fpath = os.path.join(replay_dirpath, 'trace-stats.csv')
df = pd.read_csv(fpath)
align_dirpath = os.path.join(replay_dirpath, 'alignment')
replay_configs = self.get_replay_configs(replay_dirpath)
log_move_cost = int(replay_configs[MonolithicConfigToGrab.MOVE_ON_LOG_COSTS.value])
model_move_cost = int(replay_configs[MonolithicConfigToGrab.MOVE_ON_MODEL_COSTS.value])
alignment_stats_df = self.get_alignment_stats(align_dirpath, log_move_cost, model_move_cost)
df = df.merge(alignment_stats_df, on=StatsColname.SP_LABEL.value, how='left')
return df
# getting stats dataframes
def get_log_stats(self, replay_dirpath) -> pd.DataFrame:
"""Get the aggregated alignment statistics dataframe on the log level. Aggregation is done across all traces in
the log.
:param replay_dirpath: directory path to replay experiment results
:return: dataframe containing aggregated alignment statistics
"""
clock_time = self.get_clock_time(replay_dirpath)
config_dict = self.get_replay_configs(replay_dirpath)
log_align_cost = self.get_log_alignment_cost(replay_dirpath)
trace_stats_df = self.process_trace_stats(replay_dirpath)
# aggregate across traces
to_sum = [
StatsColname.ALIGNMENT_COST.value,
StatsColname.TRANS_FIRED.value,
StatsColname.MARKINGS_POLLED.value,
StatsColname.MARKINGS_CLOSED.value,
StatsColname.MARKINGS_QUEUED.value,
StatsColname.MARKINGS_REACHED.value,
StatsColname.HEURISTICS_COMPUTED.value,
StatsColname.HEURISTICS_ESTIMATED.value,
StatsColname.HEURISTICS_DERIVED.value,
StatsColname.ALIGNMENT_COMPUTE_TIME.value,
StatsColname.HEURISTICS_COMPUTE_TIME.value,
StatsColname.SETUP_TIME.value,
StatsColname.TOTAL_TIME.value,
StatsColname.N_SPLITS.value,
StatsColname.LOG_MOVE_COST.value,
StatsColname.MODEL_MOVE_COST.value,
StatsColname.SYNC_MOVE_COST.value,
StatsColname.PREPROCESS_TIME.value,
StatsColname.CONSTRAINTSET_SIZE.value,
StatsColname.N_RESTARTS.value,
StatsColname.MEMORY_TOTAL.value
]
to_max = [
StatsColname.MAX_QUEUE_LENGTH.value,
StatsColname.MAX_QUEUE_CAPACITY.value,
StatsColname.MAX_VISITED_SET_CAPACITY.value,
StatsColname.MEMORY_MAX.value
]
no_empty_df = trace_stats_df[trace_stats_df[StatsColname.SP_LABEL.value] != 'Empty']
assert no_empty_df.shape[0] == trace_stats_df.shape[0] - 1, 'dataframe excluding empty trace should have 1 less row'
log_sum_df = no_empty_df[to_sum].sum(axis=0).to_frame().transpose()
log_max_df = no_empty_df[to_max].max(axis=0).to_frame().transpose()
log_stats_df = pd.concat([log_sum_df, log_max_df], axis=1)
log_stats_df[self.N_ALIGNS] = no_empty_df.shape[0]
n_valid_aligns_df = no_empty_df[StatsColname.ALIGNMENT_EXITCODE.value].value_counts()
if AlignExitCode.OPTIMAL_ALIGNMENT.value in n_valid_aligns_df.index:
n_valid_aligns = n_valid_aligns_df.loc[AlignExitCode.OPTIMAL_ALIGNMENT.value]
else:
n_valid_aligns = 0
log_stats_df[self.CLOCK_TIME] = clock_time
| |
[6], [4], [4]],
[[4], [4], [6], [6], [4], [4]],
[[6], [6], [9], [9], [6], [6]],
[[6], [6], [9], [9], [6], [6]],
[[4], [4], [6], [6], [4], [4]],
[[4], [4], [6], [6], [4], [4]]]])
def test_depthwise_conv2d_3x3_dilation2_stride2_error(self):
with self.assertRaises(ValueError):
_ = layers.DepthwiseConv2D(
kernel_size=(3, 3),
strides=2,
dilation_rates=2,
depthwise_initializer=tf.initializers.ones())
def test_depthwise_conv2d_3x3_stride2_int_kernel_size_and_strides(self):
layer = layers.DepthwiseConv2D(
kernel_size=3,
strides=2,
depthwise_initializer=tf.initializers.ones())
inputs = tf.ones([1, 6, 6, 1])
output_shape = layer.build(inputs.shape)
output = layer.apply(inputs, training=True)
self.assertEqual(output_shape, output.shape)
self.evaluate(tf.global_variables_initializer())
self.assertAllEqual(
self.evaluate(output),
[[[[9], [9], [6]],
[[9], [9], [6]],
[[6], [6], [4]]]])
def test_depthwise_conv2d_output_shape(self):
for image_size in [1, 2, 3, 32, 201, 224]:
inputs = tf.ones([32, image_size, image_size, 1])
for kernel_size in [1, 2, 3, 4, 5]:
for strides in [1, 2, 3]:
layer = layers.DepthwiseConv2D(
kernel_size=(kernel_size, kernel_size),
strides=(strides, strides),
depthwise_initializer=tf.initializers.ones())
output_shape = layer.build(inputs.shape)
output = layer.apply(inputs, training=True)
self.assertEqual(output.shape, output_shape)
def test_depthwise_conv2d_trainable_tensors(self):
layer = layers.DepthwiseConv2D(
kernel_size=(3, 3),
depthwise_initializer=tf.initializers.ones())
input_shape = tf.TensorShape([1, 6, 6, 1])
layer.build(input_shape)
trainable_tensors = layer.trainable_tensors()
self.assertNotEmpty(trainable_tensors)
for tensor in trainable_tensors:
self.assertIsInstance(tensor, tf.Tensor)
self.evaluate(tf.global_variables_initializer())
self.assertAllClose(
self.evaluate(layer.trainable_tensors()),
self.evaluate(layer.trainable_variables()))
def test_depthwise_conv2d_kernel_regularization_loss(self):
layer = layers.DepthwiseConv2D(
kernel_size=(3, 3),
depthwise_initializer=tf.initializers.constant(0.5),
depthwise_regularizer=tf.keras.regularizers.l2(3.0))
inputs = tf.random_uniform([32, 28, 28, 8])
layer.build(inputs.shape)
layer.apply(inputs, training=True)
# Number of parameters in the convolutional kernel.
self.evaluate(tf.global_variables_initializer())
kernel_params = 3 * 3 * 8 * 1
self.assertAllClose(
kernel_params * 3.0 * 0.5**2,
self.evaluate(layer.regularization_loss()))
def test_stateless_batch_norm(self):
layer = layers.BatchNorm(
center=True,
scale=True,
beta_initializer=tf.initializers.zeros(),
gamma_initializer=tf.initializers.ones(),
epsilon=1e-12,
stateful=False)
inputs = tf.random_uniform([32, 28, 28, 16])
output_shape = layer.build(inputs.shape)
output = layer.apply(inputs, training=True)
self.assertEqual(output.shape, output_shape)
mean, std = tf.nn.moments(output, axes=[0, 1, 2])
self.evaluate(tf.global_variables_initializer())
self.assertAllClose(
self.evaluate(mean),
np.zeros([16]),
rtol=1e-3,
atol=1e-3)
self.assertAllClose(
self.evaluate(std),
np.ones([16]),
rtol=1e-3,
atol=1e-3)
self.assertLen(layer.trainable_tensors(), 2)
self.assertAllClose(
self.evaluate(layer.trainable_tensors()),
self.evaluate(layer.trainable_variables()))
def test_stateful_batch_norm(self):
layer = layers.BatchNorm(
momentum=0.0,
center=True,
scale=True,
beta_initializer=tf.initializers.zeros(),
gamma_initializer=tf.initializers.ones(),
epsilon=1e-12,
stateful=True)
inputs_bias = tf.placeholder(dtype=tf.float32, shape=())
inputs = tf.random_normal([32, 28, 28, 16]) + inputs_bias
output_shape = layer.build(inputs.shape)
train_output = layer.apply(inputs, training=True)
eval_output = layer.apply(inputs, training=False)
self.assertEqual(train_output.shape, output_shape)
self.assertEqual(eval_output.shape, output_shape)
update_ops = layer.updates()
self.assertLen(update_ops, 2)
self.assertCountEqual(
update_ops,
tf.get_collection(tf.GraphKeys.UPDATE_OPS))
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
# After initialization, moving average will be 0s and moving variance will
# be 1s. Evaluating with training=False on any input should return similar
# input values (also assuming gamma=1 and beta=0).
inputs_array, eval_array = sess.run([inputs, eval_output],
{inputs_bias: 5.0})
self.assertAllClose(eval_array, inputs_array, atol=0.0001)
# Since the batch norm momentum is 0, we'll set the moving average
# statistics for the batch norm equal to the statistics for the current
# batch.
sess.run(update_ops, {inputs_bias: 2.0})
# Evaluate a batch of input examples with the same input distribution
# that was seen during training.
eval_array = sess.run(eval_output, {inputs_bias: 2.0})
self.assertAllClose(
np.mean(eval_array, axis=(0, 1, 2)),
np.zeros([16]),
rtol=0.1,
atol=0.1)
self.assertAllClose(
np.std(eval_array, axis=(0, 1, 2)),
np.ones([16]),
rtol=0.1,
atol=0.1)
# Verify that the batch norm op is actually stateful and running in eval
# mode by changing the mean of the input distribution and verifying that
# the mean of the output distribution also changes.
eval_array = sess.run(eval_output, {inputs_bias: 4.0})
self.assertAllClose(
np.mean(eval_array, axis=(0, 1, 2)),
np.full([16], 2.0),
rtol=0.1,
atol=0.1)
def test_masked_conv2d_3x3(self):
layer = layers.MaskedConv2D(
kernel_size=(3, 3),
input_mask=layers.create_mask([1, 2], 0),
output_mask=layers.create_mask([1, 2], 0),
kernel_initializer=tf.initializers.ones())
inputs = tf.ones([1, 6, 6, 2])
output_shape = layer.build(inputs.shape)
output = layer.apply(inputs, training=True)
self.assertEqual(output_shape, output.shape)
self.evaluate(tf.global_variables_initializer())
self.assertAllEqual(
self.evaluate(output),
[[[[4, 0], [6, 0], [6, 0], [6, 0], [6, 0], [4, 0]],
[[6, 0], [9, 0], [9, 0], [9, 0], [9, 0], [6, 0]],
[[6, 0], [9, 0], [9, 0], [9, 0], [9, 0], [6, 0]],
[[6, 0], [9, 0], [9, 0], [9, 0], [9, 0], [6, 0]],
[[6, 0], [9, 0], [9, 0], [9, 0], [9, 0], [6, 0]],
[[4, 0], [6, 0], [6, 0], [6, 0], [6, 0], [4, 0]]]])
def test_masked_conv2d_3x3_explicit_padding(self):
layer = layers.MaskedConv2D(
kernel_size=(3, 3),
input_mask=layers.create_mask([1, 2], 0),
output_mask=layers.create_mask([1, 2], 0),
kernel_initializer=tf.initializers.ones(),
use_explicit_padding=True)
inputs = tf.ones([1, 6, 6, 2])
output_shape = layer.build(inputs.shape)
output = layer.apply(inputs, training=True)
self.assertEqual(output_shape, output.shape)
self.evaluate(tf.global_variables_initializer())
self.assertAllEqual(
self.evaluate(output),
[[[[4, 0], [6, 0], [6, 0], [6, 0], [6, 0], [4, 0]],
[[6, 0], [9, 0], [9, 0], [9, 0], [9, 0], [6, 0]],
[[6, 0], [9, 0], [9, 0], [9, 0], [9, 0], [6, 0]],
[[6, 0], [9, 0], [9, 0], [9, 0], [9, 0], [6, 0]],
[[6, 0], [9, 0], [9, 0], [9, 0], [9, 0], [6, 0]],
[[4, 0], [6, 0], [6, 0], [6, 0], [6, 0], [4, 0]]]])
def test_masked_conv2d_3x3_stride2_explicit_padding(self):
layer = layers.MaskedConv2D(
kernel_size=(3, 3),
strides=(2, 2),
input_mask=layers.create_mask([1, 2], 0),
output_mask=layers.create_mask([1, 2], 0),
kernel_initializer=tf.initializers.ones(),
use_explicit_padding=True)
inputs = tf.ones([1, 6, 6, 2])
output_shape = layer.build(inputs.shape)
output = layer.apply(inputs, training=True)
self.assertEqual(output_shape, output.shape)
self.evaluate(tf.global_variables_initializer())
self.assertAllEqual(
self.evaluate(output),
[[[[4, 0], [6, 0], [6, 0]],
[[6, 0], [9, 0], [9, 0]],
[[6, 0], [9, 0], [9, 0]]]])
def test_masked_conv2d_3x3_with_none_input_mask(self):
layer = layers.MaskedConv2D(
kernel_size=(3, 3),
input_mask=None,
output_mask=layers.create_mask([1, 2], 0),
kernel_initializer=tf.initializers.ones())
inputs = tf.ones([1, 6, 6, 2])
output_shape = layer.build(inputs.shape)
output = layer.apply(inputs, training=True)
self.assertEqual(output_shape, output.shape)
self.evaluate(tf.global_variables_initializer())
self.assertAllEqual(
self.evaluate(output),
[[[[8, 0], [12, 0], [12, 0], [12, 0], [12, 0], [8, 0]],
[[12, 0], [18, 0], [18, 0], [18, 0], [18, 0], [12, 0]],
[[12, 0], [18, 0], [18, 0], [18, 0], [18, 0], [12, 0]],
[[12, 0], [18, 0], [18, 0], [18, 0], [18, 0], [12, 0]],
[[12, 0], [18, 0], [18, 0], [18, 0], [18, 0], [12, 0]],
[[8, 0], [12, 0], [12, 0], [12, 0], [12, 0], [8, 0]]]])
def test_masked_conv2d_3x3_bias(self):
layer = layers.MaskedConv2D(
kernel_size=(3, 3),
input_mask=layers.create_mask([1, 2], 0),
output_mask=layers.create_mask([1, 2], 0),
kernel_initializer=tf.initializers.ones(),
bias_initializer=tf.initializers.constant(0.5),
use_bias=True)
inputs = tf.ones([1, 6, 6, 2])
output_shape = layer.build(inputs.shape)
output = layer.apply(inputs, training=True)
self.assertEqual(output_shape, output.shape)
self.evaluate(tf.global_variables_initializer())
self.assertAllEqual(
self.evaluate(output),
[[[[4.5, 0], [6.5, 0], [6.5, 0], [6.5, 0], [6.5, 0], [4.5, 0]],
[[6.5, 0], [9.5, 0], [9.5, 0], [9.5, 0], [9.5, 0], [6.5, 0]],
[[6.5, 0], [9.5, 0], [9.5, 0], [9.5, 0], [9.5, 0], [6.5, 0]],
[[6.5, 0], [9.5, 0], [9.5, 0], [9.5, 0], [9.5, 0], [6.5, 0]],
[[6.5, 0], [9.5, 0], [9.5, 0], [9.5, 0], [9.5, 0], [6.5, 0]],
[[4.5, 0], [6.5, 0], [6.5, 0], [6.5, 0], [6.5, 0], [4.5, 0]]]])
def test_masked_conv2d_3x3_int_kernel_size_and_strides(self):
layer = layers.MaskedConv2D(
kernel_size=3,
input_mask=layers.create_mask([1, 2], 0),
output_mask=layers.create_mask([1, 2], 0),
strides=2,
kernel_initializer=tf.initializers.ones())
inputs = tf.ones([1, 6, 6, 2])
output_shape = layer.build(inputs.shape)
output = layer.apply(inputs, training=True)
self.assertEqual(output_shape, output.shape)
self.evaluate(tf.global_variables_initializer())
self.assertAllEqual(
self.evaluate(output),
[[[[9, 0], [9, 0], [6, 0]],
[[9, 0], [9, 0], [6, 0]],
[[6, 0], [6, 0], [4, 0]]]])
def test_masked_conv2d_output_shape(self):
for image_size in [1, 2, 3, 32, 201, 224]:
inputs = tf.ones([32, image_size, image_size, 2])
for kernel_size in [1, 2, 3, 4, 5]:
for strides in [1, 2, 3]:
layer = layers.MaskedConv2D(
kernel_size=(kernel_size, kernel_size),
input_mask=layers.create_mask([1, 2], 0),
output_mask=layers.create_mask([1, 2], 0),
strides=(strides, strides),
kernel_initializer=tf.initializers.ones())
output_shape = layer.build(inputs.shape)
output = layer.apply(inputs, training=True)
self.assertEqual(output.shape, output_shape)
def test_masked_conv2d_trainable_tensors(self):
layer = layers.MaskedConv2D(
kernel_size=(3, 3),
input_mask=layers.create_mask([1, 2], 0),
output_mask=layers.create_mask([1, 2], 0),
kernel_initializer=tf.initializers.ones())
input_shape = tf.TensorShape([1, 6, 6, 2])
layer.build(input_shape)
trainable_tensors = layer.trainable_tensors()
self.assertNotEmpty(trainable_tensors)
for tensor in trainable_tensors:
self.assertIsInstance(tensor, tf.Tensor)
self.evaluate(tf.global_variables_initializer())
self.assertAllClose(
self.evaluate(layer.trainable_tensors()),
self.evaluate(layer.trainable_variables()))
def test_masked_conv2d_kernel_regularization_loss(self):
layer = layers.MaskedConv2D(
kernel_size=(3, 3),
input_mask=layers.create_mask([2, 4, 8], 1),
output_mask=layers.create_mask([3, 6, 12], 1),
kernel_initializer=tf.initializers.constant(0.5),
kernel_regularizer=tf.keras.regularizers.l2(3.0))
inputs = tf.random_uniform([32, 28, 28, 8])
layer.build(inputs.shape)
layer.apply(inputs, training=True)
# Number of parameters in the convolutional kernel.
self.evaluate(tf.global_variables_initializer())
kernel_params = 3 * 3 * 4 * 6
self.assertAllClose(
kernel_params * 3.0 * 0.5**2,
self.evaluate(layer.regularization_loss()))
def test_masked_conv2d_kernel_regularization_loss_with_none_input_mask(self):
layer = layers.MaskedConv2D(
kernel_size=(3, 3),
input_mask=None,
output_mask=layers.create_mask([3, 6, 12], 1),
kernel_initializer=tf.initializers.constant(0.5),
kernel_regularizer=tf.keras.regularizers.l2(3.0))
inputs = tf.random_uniform([32, 28, 28, 8])
layer.build(inputs.shape)
layer.apply(inputs, training=True)
# Number of parameters in the convolutional kernel.
self.evaluate(tf.global_variables_initializer())
kernel_params = 3 * 3 * 8 * 6
self.assertAllClose(
kernel_params * 3.0 * 0.5**2,
self.evaluate(layer.regularization_loss()))
def test_masked_conv2d_bias_regularization_loss(self):
layer = layers.MaskedConv2D(
kernel_size=(3, 3),
input_mask=layers.create_mask([2, 4, 8], 1),
output_mask=layers.create_mask([3, 6, 12], 1),
bias_initializer=tf.initializers.constant(0.5),
bias_regularizer=tf.keras.regularizers.l2(3.0),
use_bias=True)
inputs = tf.random_uniform([32, 28, 28, 8])
layer.build(inputs.shape)
layer.apply(inputs, training=True)
# Number of parameters in the convolutional kernel.
self.evaluate(tf.global_variables_initializer())
bias_params = 6
self.assertAllClose(
bias_params * 3.0 * 0.5**2,
self.evaluate(layer.regularization_loss()))
def test_masked_depthwise_conv2d_3x3(self):
layer = layers.MaskedDepthwiseConv2D(
kernel_size=(3, 3),
mask=layers.create_mask([1, 2], 0),
depthwise_initializer=tf.initializers.ones())
inputs = tf.ones([1, 6, 6, 2])
output_shape = layer.build(inputs.shape)
output = layer.apply(inputs, training=True)
self.assertEqual(output_shape, output.shape)
self.evaluate(tf.global_variables_initializer())
self.assertAllEqual(
self.evaluate(output),
[[[[4, 0], [6, 0], [6, 0], [6, 0], [6, 0], [4, 0]],
[[6, 0], [9, 0], [9, 0], [9, 0], [9, 0], [6, 0]],
[[6, 0], [9, 0], [9, 0], [9, 0], [9, 0], | |
not report_buffer:
return -1
print("\nReading Input Report (length {})...".format(size))
r = usb.control_transfer(handle,
usb.LIBUSB_ENDPOINT_IN |
usb.LIBUSB_REQUEST_TYPE_CLASS |
usb.LIBUSB_RECIPIENT_INTERFACE,
HID_GET_REPORT,
(HID_REPORT_TYPE_INPUT << 8) | 0x00, 0,
report_buffer, ct.c_uint16(size),
5000)
if r >= 0:
display_buffer_hex(report_buffer, size)
else:
if r == usb.LIBUSB_ERROR_TIMEOUT:
print(" Timeout! Please make sure you act on the device within the 5 seconds allocated...")
elif r == usb.LIBUSB_ERROR_PIPE:
print(" Detected stall - resetting pipe...")
usb.clear_halt(handle, 0)
else:
print(" Error: {}".format(usb.strerror(usb.error(r))))
# Attempt a bulk read from endpoint 0 (this should just return a raw input report)
print("\nTesting interrupt read using endpoint {:02X}...".format(endpoint_in))
r = usb.interrupt_transfer(handle, endpoint_in, report_buffer, size, ct.byref(size), 5000)
if r >= 0:
display_buffer_hex(report_buffer, size)
else:
print(" {}".format(usb.strerror(usb.error(r))))
free(report_buffer)
return 0
#static
#@annotate(handle=ct.POINTER(usb.device_handle), ct.c_uint8 bRequest, int iface_number)
def read_ms_winsub_feature_descriptors(handle, bRequest, iface_number):
# Read the MS WinUSB Feature Descriptors, that are used on Windows 8 for automated driver installation
MAX_OS_FD_LENGTH = 256
#int r;
os_desc = (ct.c_uint8 * MAX_OS_FD_LENGTH)()
class struct_os_fd(ct.Structure):
_fields_ = [
("desc", ct.c_char_p),
("recipient", ct.c_uint8),
("index", ct.c_uint16),
("header_size", ct.c_uint16),
]
os_fd = [
struct_os_fd(b"Extended Compat ID", usb.LIBUSB_RECIPIENT_DEVICE, 0x0004, 0x10),
struct_os_fd(b"Extended Properties", usb.LIBUSB_RECIPIENT_INTERFACE, 0x0005, 0x0A),
]
if iface_number < 0:
return
# WinUSB has a limitation that forces wIndex to the interface number when issuing
# an Interface Request. To work around that, we can force a Device Request for
# the Extended Properties, assuming the device answers both equally.
if force_device_request:
os_fd[1].recipient = usb.LIBUSB_RECIPIENT_DEVICE
for i in range(2):
print("\nReading {} OS Feature Descriptor (wIndex = 0x%04d):".format(
os_fd[i].desc, os_fd[i].index))
# Read the header part
r = usb.control_transfer(handle,
ct.c_uint8(usb.LIBUSB_ENDPOINT_IN |
usb.LIBUSB_REQUEST_TYPE_VENDOR |
os_fd[i].recipient),
bRequest,
ct.c_uint16((iface_number << 8) | 0x00), os_fd[i].index,
os_desc, os_fd[i].header_size,
1000)
if r < os_fd[i].header_size:
perr(" Failed: {}", usb.strerror(usb.error(r)) if r < 0 else "header size is too small")
return
le_type_punning_IS_fine = ct.cast(os_desc, ct.c_void_p)
length = ct.cast(le_type_punning_IS_fine, ct.POINTER(ct.c_uint32))[0].value # ct.c_uint32
length = min(length, MAX_OS_FD_LENGTH)
# Read the full feature descriptor
r = usb.control_transfer(handle,
ct.c_uint8(usb.LIBUSB_ENDPOINT_IN |
usb.LIBUSB_REQUEST_TYPE_VENDOR |
os_fd[i].recipient),
bRequest,
ct.c_uint16((iface_number << 8) | 0x00), os_fd[i].index,
os_desc, ct.c_uint16(length),
1000)
if r < 0:
perr(" Failed: {}", usb.strerror(usb.error(r)))
return
else:
display_buffer_hex(os_desc, r)
#@annotate(dev_cap=ct.POINTER(usb.bos_dev_capability_descriptor))
def print_device_cap(dev_cap):
if dev_cap[0].bDevCapabilityType == usb.LIBUSB_BT_USB_2_0_EXTENSION:
usb_2_0_ext = ct.POINTER(usb.usb_2_0_extension_descriptor)()
usb.get_usb_2_0_extension_descriptor(None, dev_cap, ct.byref(usb_2_0_ext))
if usb_2_0_ext:
print(" USB 2.0 extension:")
print(" attributes : {:02X}".format(usb_2_0_ext[0].bmAttributes))
usb.free_usb_2_0_extension_descriptor(usb_2_0_ext)
elif dev_cap[0].bDevCapabilityType == usb.LIBUSB_BT_SS_USB_DEVICE_CAPABILITY:
ss_usb_device_cap = ct.POINTER(usb.ss_usb_device_capability_descriptor)()
usb.get_ss_usb_device_capability_descriptor(None, dev_cap, ct.byref(ss_usb_device_cap))
if ss_usb_device_cap:
print(" USB 3.0 capabilities:")
print(" attributes : {:02X}".format(ss_usb_device_cap[0].bmAttributes))
print(" supported speeds : {:04X}".format(ss_usb_device_cap[0].wSpeedSupported))
print(" supported functionality: {:02X}".format(ss_usb_device_cap[0].bFunctionalitySupport))
usb.free_ss_usb_device_capability_descriptor(ss_usb_device_cap)
elif dev_cap[0].bDevCapabilityType == usb.LIBUSB_BT_CONTAINER_ID:
container_id = ct.POINTER(usb.container_id_descriptor)()
usb.get_container_id_descriptor(None, dev_cap, ct.byref(container_id))
if container_id:
print(" Container ID:\n {}".format(uuid_to_string(container_id[0].ContainerID)))
usb.free_container_id_descriptor(container_id)
else:
print(" Unknown BOS device capability {:02x}:".format(dev_cap[0].bDevCapabilityType))
#static
#@annotate(int, ct.c_uint16 vid, ct.c_uint16 pid)
def test_device(vid, pid):
#int r;
speed_name = [
"Unknown",
"1.5 Mbit/s (USB LowSpeed)",
"12 Mbit/s (USB FullSpeed)",
"480 Mbit/s (USB HighSpeed)",
"5000 Mbit/s (USB SuperSpeed)",
"10000 Mbit/s (USB SuperSpeedPlus)",
]
print("Opening device {:04X}:{:04X}...".format(vid, pid))
#handle = ct.POINTER(usb.device_handle)()
handle = usb.open_device_with_vid_pid(None, vid, pid)
if not handle:
perr(" Failed.\n")
return -1
endpoint_in = 0 # default IN endpoint
endpoint_out = 0 # default OUT endpoint
try:
dev = usb.get_device(handle) # usb.device*
bus = usb.get_bus_number(dev) # ct.c_uint8
if extra_info:
port_path = (ct.c_uint8 * 8)()
r = usb.get_port_numbers(dev, port_path, ct.sizeof(port_path))
if r > 0:
print("\nDevice properties:")
print(" bus number: {}".format(bus))
print(" port path: {}".format(port_path[0]), end="")
for i in range(1, r):
print("->{}".format(port_path[i]), end="")
print(" (from root hub)")
r = usb.get_device_speed(dev)
if r < 0 or r > 5: r = 0
print(" speed: {}".format(speed_name[r]))
print("\nReading device descriptor:")
dev_desc = usb.device_descriptor()
r = usb.get_device_descriptor(dev, ct.byref(dev_desc))
if r < 0:
return err_exit(r)
print(" length: {}".format(dev_desc.bLength))
print(" device class: {}".format(dev_desc.bDeviceClass))
print(" S/N: {}".format(dev_desc.iSerialNumber))
print(" VID:PID: {:04X}:{:04X}".format(dev_desc.idVendor,
dev_desc.idProduct))
print(" bcdDevice: {:04X}".format(dev_desc.bcdDevice))
print(" iMan:iProd:iSer: {}:{}:{}".format(
dev_desc.iManufacturer, dev_desc.iProduct, dev_desc.iSerialNumber))
print(" nb confs: {}".format(dev_desc.bNumConfigurations))
# Copy the string descriptors for easier parsing
string_index = (ct.c_uint8 * 3)() # indexes of the string descriptors
string_index[0] = dev_desc.iManufacturer
string_index[1] = dev_desc.iProduct
string_index[2] = dev_desc.iSerialNumber
print("\nReading BOS descriptor: ", end="")
bos_desc = usb.bos_descriptor*()
if usb.get_bos_descriptor(handle, ct.byref(bos_desc)) == usb.LIBUSB_SUCCESS:
print("{} caps".format(bos_desc[0].bNumDeviceCaps))
for i in range(bos_desc[0].bNumDeviceCaps):
print_device_cap(bos_desc[0].dev_capability[i])
usb.free_bos_descriptor(bos_desc)
else:
print("no descriptor")
print("\nReading first configuration descriptor:")
conf_desc = usb.config_descriptor*()
r = usb.get_config_descriptor(dev, 0, ct.byref(conf_desc))
if r < 0:
return err_exit(r)
nb_ifaces = conf_desc[0].bNumInterfaces # int
print(" nb interfaces: {}".format(nb_ifaces))
first_iface = (conf_desc[0].usb_interface[0].altsetting[0].bInterfaceNumber
if nb_ifaces > 0 else -1)
for i in range(nb_ifaces):
usb_interface = conf_desc[0].usb_interface[i]
print(" interface[{}]: id = {}".format(
i, usb_interface.altsetting[0].bInterfaceNumber))
for j in range(usb_interface.num_altsetting):
altsetting = usb_interface.altsetting[j]
print("interface[{}].altsetting[{}]: num endpoints = {}".format(
i, j, altsetting.bNumEndpoints))
print(" Class.SubClass.Protocol: {:02X}.{:02X}.{:02X}".format(
altsetting.bInterfaceClass,
altsetting.bInterfaceSubClass,
altsetting.bInterfaceProtocol))
if (altsetting.bInterfaceClass == usb.LIBUSB_CLASS_MASS_STORAGE and
(altsetting.bInterfaceSubClass == 0x01 or
altsetting.bInterfaceSubClass == 0x06) and
altsetting.bInterfaceProtocol == 0x50):
# Mass storage devices that can use basic SCSI commands
test_mode = USE_SCSI
for k in range(altsetting.bNumEndpoints):
endpoint = altsetting.endpoint[k] # const usb.endpoint_descriptor*
print(" endpoint[{}].address: {:02X}".format(
k, endpoint.bEndpointAddress))
# Use the first interrupt or bulk IN/OUT endpoints as default for testing
if ((endpoint.bmAttributes & usb.LIBUSB_TRANSFER_TYPE_MASK) &
(usb.LIBUSB_TRANSFER_TYPE_BULK | usb.LIBUSB_TRANSFER_TYPE_INTERRUPT)):
if endpoint.bEndpointAddress & usb.LIBUSB_ENDPOINT_IN:
if not endpoint_in:
endpoint_in = endpoint.bEndpointAddress
else:
if not endpoint_out:
endpoint_out = endpoint.bEndpointAddress
print(" max packet size: {:04X}".format(endpoint.wMaxPacketSize))
print(" polling interval: {:02X}".format(endpoint.bInterval))
ep_comp = ct.POINTER(usb.ss_endpoint_companion_descriptor)()
usb.get_ss_endpoint_companion_descriptor(None, ct.byref(altsetting.endpoint[k]),
ct.byref(ep_comp))
if ep_comp:
print(" max burst: {:02X} (USB 3.0)".format(ep_comp[0].bMaxBurst))
print(" bytes per interval: {:04X} (USB 3.0)".format(ep_comp[0].wBytesPerInterval))
usb.free_ss_endpoint_companion_descriptor(ep_comp)
usb.free_config_descriptor(conf_desc)
usb.set_auto_detach_kernel_driver(handle, 1)
for iface in range(nb_ifaces):
print("\nClaiming interface {}...".format(iface))
r = usb.claim_interface(handle, iface)
if r != usb.LIBUSB_SUCCESS:
perr(" Failed.\n")
print("\nReading string descriptors:")
string = (ct.c_char * 128)()
for i in range(3):
if string_index[i] == 0:
continue
if usb.get_string_descriptor_ascii(handle, string_index[i],
ct.cast(string, ct.POINTER(ct.c_ubyte)), ct.sizeof(string)) > 0:
print(" String ({:#04X}): \"{}\"".format(string_index[i], string))
# Read the OS String Descriptor
r = usb.get_string_descriptor(handle, MS_OS_DESC_STRING_INDEX, 0,
ct.cast(string, ct.POINTER(ct.c_ubyte)), MS_OS_DESC_STRING_LENGTH)
if r == MS_OS_DESC_STRING_LENGTH and memcmp(ms_os_desc_string, string, sizeof(ms_os_desc_string)) == 0:
# If this is a Microsoft OS String Descriptor,
# attempt to read the WinUSB extended Feature Descriptors
read_ms_winsub_feature_descriptors(handle, string[MS_OS_DESC_VENDOR_CODE_OFFSET], first_iface)
if test_mode == USE_PS3:
r = display_ps3_status(handle)
if r < 0:
return err_exit(r)
elif test_mode == USE_XBOX:
r = display_xbox_status(handle)
if r < 0:
return err_exit(r)
r = set_xbox_actuators(handle, 128, 222)
if r < 0:
return err_exit(r)
msleep(2000)
r = set_xbox_actuators(handle, 0, 0)
if r < 0:
return err_exit(r)
elif test_mode == USE_HID:
test_hid(handle, endpoint_in)
elif test_mode == USE_SCSI:
r = test_mass_storage(handle, endpoint_in, endpoint_out)
if r < 0:
return err_exit(r)
elif test_mode == USE_GENERIC:
pass
print()
for iface in range(nb_ifaces):
print("Releasing interface {}...".format(iface))
usb.release_interface(handle, iface)
print("Closing device...")
finally:
usb.close(handle)
return 0
def main(argv=sys.argv):
global VID, PID
global test_mode
global binary_dump
global binary_name
show_help = False # bool
debug_mode = False # bool
error_lang = None # char*
# Default to generic, expecting VID:PID
VID = 0
PID = 0
test_mode = USE_GENERIC
endian_test = ct.c_uint16(0xBE00)
if ct.cast(ct.pointer(endian_test), ct.POINTER(ct.c_uint8))[0] == 0xBE:
print("Despite their natural superiority for end users, big endian\n"
"CPUs are not supported with this program, sorry.")
return 0
#if len(argv) >= 2:
for j in range(1, len(argv)):
arglen = len(argv[j])
if argv[j][0] in ('-', '/') and arglen >= 2:
opt = argv[j][1]
if opt == 'd':
debug_mode = True
elif opt == 'i':
extra_info = True
elif opt == 'w':
force_device_request = True
elif opt == 'b':
j += 1
if j >= len(argv) or argv[j][0] in ('-', '/'):
print(" Option -b requires a file name")
return 1
binary_name = argv[j]
binary_dump = True
elif opt == 'l':
j += 1
if j >= len(argv) or argv[j][0] in ('-', '/'):
print(" Option -l requires an ISO 639-1 language parameter")
return 1
error_lang = argv[j]
elif opt == 'j':
# OLIMEX ARM-USB-TINY JTAG, 2 channel composite device - 2 interfaces
if not VID and not PID:
VID = 0x15BA
PID = 0x0004
elif opt == 'k':
# Generic 2 GB USB Key (SCSI Transparent/Bulk Only) - 1 interface
if not VID and not PID:
VID = 0x0204
PID = 0x6025
# The following tests will force VID:PID if already provided
elif opt == 'p':
# Sony PS3 Controller - 1 interface
VID = 0x054C
PID = 0x0268
test_mode = USE_PS3
elif opt == 's':
# Microsoft Sidewinder Precision Pro Joystick - 1 HID interface
VID = 0x045E
PID = 0x0008
| |
<filename>tests/python/relay/test_vm.py<gh_stars>0
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
import numpy as np
from tvm import relay
from tvm.relay.scope_builder import ScopeBuilder
from tvm.relay.testing.config import ctx_list
from tvm.relay.prelude import Prelude
import pytest
def check_result(args, expected_result, mod=None):
"""
Check that evaluating `expr` applied to the arguments produces
`result` on Relay VM.
Parameters
----------
args: list of Expr
The arguments to supply the expr.
expected_result:
The expected result of running the expression.
"""
for target, ctx in ctx_list():
vm = relay.create_executor('vm', ctx=ctx, target=target, mod=mod)
rts_result = vm.evaluate()(*args)
tvm.testing.assert_allclose(expected_result, rts_result.asnumpy())
def veval(f, *args, ctx=tvm.cpu(), target="llvm"):
if isinstance(f, relay.Expr):
mod = relay.Module()
mod["main"] = f
else:
assert isinstance(f, relay.Module), "expected expression or module"
mod = f
exe = relay.vm.compile(mod, target)
vm = relay.vm.VirtualMachine(exe)
vm.init(ctx)
return vm.invoke("main", *args)
def vmobj_to_list(o):
if isinstance(o, tvm.relay.backend.vm.Tensor):
return [o.asnumpy().tolist()]
elif isinstance(o, tvm.relay.backend.vm.ADT):
result = []
for f in o:
result.extend(vmobj_to_list(f))
return result
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def test_split():
x = relay.var('x', shape=(12,))
y = relay.split(x, 3, axis=0).astuple()
f = relay.Function([x], y)
x_data = np.random.rand(12,).astype('float32')
res = veval(f, x_data)
ref_res = np.split(x_data, 3, axis=0)
for i in range(3):
tvm.testing.assert_allclose(res[i].asnumpy(), ref_res[i])
def test_split_no_fuse():
x = relay.var('x', shape=(12,))
y = relay.split(x, 3, axis=0).astuple()
z = relay.concatenate([relay.TupleGetItem(y, 0)], axis=0)
z = relay.annotation.stop_fusion(z)
f = relay.Function([x], z)
x_data = np.random.rand(12,).astype('float32')
res = veval(f, x_data)
tvm.testing.assert_allclose(res.asnumpy(), np.split(x_data, 3, axis=0)[0])
def test_id():
x = relay.var('x', shape=(10, 10), dtype='float64')
f = relay.Function([x], x)
x_data = np.random.rand(10, 10).astype('float64')
mod = relay.Module()
mod["main"] = f
check_result([x_data], x_data, mod=mod)
def test_op():
x = relay.var('x', shape=(10, 10))
f = relay.Function([x], x + x)
x_data = np.random.rand(10, 10).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([x_data], 2 * x_data, mod=mod)
def any(x):
x = relay.op.nn.batch_flatten(x)
return relay.op.min(x, axis=[0, 1])
def test_cond():
x = relay.var('x', shape=(10, 10))
y = relay.var('y', shape=(10, 10))
# f = relay.Function([x, y], relay.op.equal(x, y))
f = relay.Function([x, y], any(relay.op.equal(x, y)))
x_data = np.random.rand(10, 10).astype('float32')
y_data = np.random.rand(10, 10).astype('float32')
mod = relay.Module()
mod["main"] = f
# same
check_result([x_data, x_data], True, mod=mod)
# diff
check_result([x_data, y_data], False, mod=mod)
def test_simple_if():
x = relay.var('x', shape=(10, 10))
y = relay.var('y', shape=(10, 10))
f = relay.Function([x, y],
relay.If(any(relay.op.equal(x, y)), x, y))
x_data = np.random.rand(10, 10).astype('float32')
y_data = np.random.rand(10, 10).astype('float32')
mod = relay.Module()
mod["main"] = f
# same
check_result([x_data, x_data], x_data, mod=mod)
# diff
check_result([x_data, y_data], y_data, mod=mod)
def test_simple_call():
mod = relay.module.Module({})
sum_up = relay.GlobalVar('sum_up')
i = relay.var('i', shape=[], dtype='int32')
sb = ScopeBuilder()
sb.ret(i)
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], 'int32'))
mod[sum_up] = func
i_data = np.array(0, dtype='int32')
iarg = relay.var('iarg', shape=[], dtype='int32')
mod["main"] = relay.Function([iarg], sum_up(iarg))
check_result([i_data], i_data, mod=mod)
def test_count_loop():
mod = relay.module.Module({})
sum_up = relay.GlobalVar('sum_up')
i = relay.var('i', shape=[], dtype='int32')
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype='int32'))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype='int32'))
rec_call = relay.Call(sum_up, [one_less])
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], 'int32'))
mod[sum_up] = func
i_data = np.array(0, dtype='int32')
iarg = relay.var('i', shape=[], dtype='int32')
mod["main"] = relay.Function([iarg], sum_up(iarg))
result = veval(mod, i_data)
tvm.testing.assert_allclose(result.asnumpy(), i_data)
check_result([i_data], i_data, mod=mod)
def test_sum_loop():
mod = relay.module.Module({})
sum_up = relay.GlobalVar('sum_up')
i = relay.var('i', shape=[], dtype='int32')
accum = relay.var('accum', shape=[], dtype='int32')
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, 'int32'))):
sb.ret(accum)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, 'int32'))
new_accum = relay.add(accum, i)
sb.ret(relay.Call(sum_up, [one_less, new_accum]))
func = relay.Function([i, accum], sb.get())
mod[sum_up] = func
loop_bound = 0
i_data = np.array(loop_bound, dtype='int32')
accum_data = np.array(0, dtype='int32')
iarg = relay.var('i', shape=[], dtype='int32')
aarg = relay.var('accum', shape=[], dtype='int32')
mod["main"] = relay.Function([iarg, aarg], sum_up(iarg, aarg))
check_result([i_data, accum_data], sum(range(1, loop_bound + 1)), mod=mod)
def test_tuple_fst():
ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])
tup = relay.var('tup', type_annotation=ttype)
f = relay.Function([tup], relay.TupleGetItem(tup, 0))
i_data = np.random.rand(41).astype('float32')
j_data = np.random.rand(10).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([(i_data, j_data)], i_data, mod=mod)
def test_tuple_second():
ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])
tup = relay.var('tup', type_annotation=ttype)
f = relay.Function([tup], relay.TupleGetItem(tup, 1))
i_data = np.random.rand(41).astype('float32')
j_data = np.random.rand(10).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([(i_data, j_data)], j_data, mod=mod)
def test_list_constructor():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
l = p.l
one2 = cons(relay.const(1), nil())
one3 = cons(relay.const(2), one2)
one4 = cons(relay.const(3), one3)
f = relay.Function([], one4)
mod["main"] = f
result = veval(mod)
assert len(result) == 2
assert len(result[1]) == 2
obj = vmobj_to_list(result)
tvm.testing.assert_allclose(obj, np.array([3,2,1]))
def test_let_tensor():
sb = relay.ScopeBuilder()
shape = (1,)
x = relay.var('x', shape=shape, dtype='float32')
x1 = relay.var('x1', shape=shape, dtype='float32')
x1 = sb.let(x1, x)
xplusone = x1 + relay.const(42.0, 'float32')
sb.ret(xplusone)
body = sb.get()
f = relay.Function([x], body)
x_data = np.random.rand(*shape).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([x_data], x_data + 42.0, mod=mod)
def test_let_scalar():
sb = relay.ScopeBuilder()
x = relay.var('x', 'float32')
x1 = sb.let('x1', x)
xplusone = x1 + relay.const(42.0, 'float32')
sb.ret(xplusone)
body = sb.get()
f = relay.Function([x], body)
x_data = np.array(np.random.rand()).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([x_data], x_data + 42.0, mod=mod)
def test_compose():
mod = relay.Module()
p = Prelude(mod)
compose = p.compose
# add_one = fun x -> x + 1
sb = relay.ScopeBuilder()
x = relay.var('x', 'float32')
x1 = sb.let('x1', x)
xplusone = x1 + relay.const(1.0, 'float32')
sb.ret(xplusone)
body = sb.get()
add_one = relay.GlobalVar("add_one")
add_one_func = relay.Function([x], body)
# add_two = compose(add_one, add_one)
sb = relay.ScopeBuilder()
y = relay.var('y', 'float32')
add_two_func = sb.let('add_two', compose(add_one_func, add_one_func))
add_two_res = add_two_func(y)
sb.ret(add_two_res)
add_two_body = sb.get()
mod[add_one] = add_one_func
f = relay.Function([y], add_two_body)
mod["main"] = f
x_data = np.array(np.random.rand()).astype('float32')
result = veval(mod, [x_data])
tvm.testing.assert_allclose(result.asnumpy(), x_data + 2.0)
def test_list_hd():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
l = p.l
hd = p.hd
one2 = cons(relay.const(1), nil())
one3 = cons(relay.const(2), one2)
one4 = cons(relay.const(3), one3)
three = hd(one4)
f = relay.Function([], three)
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(result.asnumpy(), 3)
@pytest.mark.xfail
def test_list_tl_empty_list():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
l = p.l
tl = p.tl
f = relay.Function([], tl(nil()))
mod["main"] = f
result = veval(mod)
print(result)
def test_list_tl():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
l = p.l
tl = p.tl
one2 = cons(relay.const(1), nil())
one3 = cons(relay.const(2), one2)
one4 = cons(relay.const(3), one3)
f = relay.Function([], tl(one4))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([2,1]))
def test_list_nth():
expected = list(range(10))
for i in range(len(expected)):
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
nth = p.nth
l = nil()
for i in reversed(expected):
l = cons(relay.const(i), l)
f = relay.Function([], nth(l, relay.const(i)))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(result.asnumpy(), expected[i])
def test_list_update():
expected = list(range(10))
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
update = p.update
l = nil()
# create zero initialized list
for i in range(len(expected)):
l = cons(relay.const(0), l)
# set value
for i, v in enumerate(expected):
l = update(l, relay.const(i), relay.const(v))
f = relay.Function([], l)
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array(expected))
def test_list_length():
expected = list(range(10))
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
length = p.length
l = nil()
# create zero initialized list
for i in range(len(expected)):
l = cons(relay.const(0), l)
l = length(l)
f = relay.Function([], l)
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(result.asnumpy(), 10)
def test_list_map():
mod = relay.Module()
p = Prelude(mod)
x = relay.var('x', 'int32')
add_one_func = relay.Function([x], relay.const(1) + x)
nil = p.nil
cons = p.cons
map = p.map
l = cons(relay.const(2), cons(relay.const(1), nil()))
f = relay.Function([], map(add_one_func, l))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 2]))
def test_list_foldl():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
foldl = p.foldl
x = | |
'INSERTIONS_IN_GENES', 'INSERTIONS_IN_MATURE_MIRNA',
'INSERTIONS_IN_SPLICE_SITE_REGIONS',
'INSERTIONS_IN_UTR_REGIONS',
'INSERTIONS_PERCENT_FOUND_IN_DBSNP',
'INSERTION_HET_HOM_RATIO', 'MEAN_COVERAGE',
'MISMATCH_RATE_READ_1', 'MISMATCH_RATE_READ_2',
'NON_SYNONYMOUS_DELETIONS', 'NON_SYNONYMOUS_INSERTIONS',
'NON_SYNONYMOUS_SNVS', 'PAIRED_END',
'PERCENT_ALIGNED_BASES_READ_1',
'PERCENT_ALIGNED_BASES_READ_2', 'PERCENT_ALIGNED_READ_1',
'PERCENT_ALIGNED_READ_2', 'PERCENT_DUPLICATE_PAIRED_READS',
'PERCENT_Q30_BASES', 'PERCENT_Q30_BASES_READ_1',
'PERCENT_Q30_BASES_READ_2', 'REFERENCE_GENOME', 'RUNFOLDER',
'SAMPLE_ID', 'SAMPLE_NAME', 'SNVS', 'SNVS_ALL',
'SNVS_IN_CODING_REGIONS', 'SNVS_IN_EXONS', 'SNVS_IN_GENES',
'SNVS_IN_MATURE_MIRNA', 'SNVS_IN_SPLICE_SITE_REGIONS',
'SNVS_IN_UTR_REGIONS', 'SNVS_PERCENT_FOUND_IN_DBSNP',
'SNV_HET_HOM_RATIO', 'SNV_TS_TV_RATIO',
'STOP_GAINED_DELETIONS', 'STOP_GAINED_INSERTIONS',
'STOP_GAINED_SNVS', 'STOP_LOST_DELETIONS',
'STOP_LOST_INSERTIONS', 'STOP_LOST_SNVS', 'SV_DELETIONS',
'SV_DELETIONS_IN_GENES', 'SV_INSERTIONS',
'SV_INSERTIONS_IN_GENES', 'SV_INVERSIONS',
'SV_INVERSIONS_IN_GENES', 'SV_PERCENT_DELETIONS_IN_GENES',
'SV_PERCENT_INSERTIONS_IN_GENES',
'SV_PERCENT_INVERSIONS_IN_GENES',
'SV_PERCENT_TANDEM_DUPLICATIONS_IN_GENES',
'SV_TANDEM_DUPLICATIONS', 'SV_TANDEM_DUPLICATIONS_IN_GENES',
'SYNONYMOUS_SNVS', 'TOTAL_ALIGNED_BASES_READ_1',
'TOTAL_ALIGNED_BASES_READ_2', 'TOTAL_ALIGNED_READ_1',
'TOTAL_ALIGNED_READ_2', 'TOTAL_PF_BASES',
'TOTAL_PF_BASES_READ_1', 'TOTAL_PF_BASES_READ_2',
'TOTAL_PF_READS', 'illumina_version'
]
def __init__(self, **kwargs):
self.CNV = kwargs.get(
'CNV', None)
self.CNV_IN_GENES = kwargs.get(
'CNV_IN_GENES', None)
self.CNV_PERCENT_IN_GENES = kwargs.get(
'CNV_PERCENT_IN_GENES', None)
self.DELETIONS = kwargs.get(
'DELETIONS', None)
self.DELETIONS_ALL = kwargs.get(
'DELETIONS_ALL', None)
self.DELETIONS_IN_CODING_REGIONS = kwargs.get(
'DELETIONS_IN_CODING_REGIONS', None)
self.DELETIONS_IN_EXONS = kwargs.get(
'DELETIONS_IN_EXONS', None)
self.DELETIONS_IN_GENES = kwargs.get(
'DELETIONS_IN_GENES', None)
self.DELETIONS_IN_MATURE_MIRNA = kwargs.get(
'DELETIONS_IN_MATURE_MIRNA', None)
self.DELETIONS_IN_SPLICE_SITE_REGIONS = kwargs.get(
'DELETIONS_IN_SPLICE_SITE_REGIONS', None)
self.DELETIONS_IN_UTR_REGIONS = kwargs.get(
'DELETIONS_IN_UTR_REGIONS', None)
self.DELETIONS_PERCENT_FOUND_IN_DBSNP = kwargs.get(
'DELETIONS_PERCENT_FOUND_IN_DBSNP', None)
self.DELETION_HET_HOM_RATIO = kwargs.get(
'DELETION_HET_HOM_RATIO', None)
self.DIVERSITY = kwargs.get(
'DIVERSITY', None)
self.FRAGMENT_LENGTH_MAX = kwargs.get(
'FRAGMENT_LENGTH_MAX', None)
self.FRAGMENT_LENGTH_MEDIAN = kwargs.get(
'FRAGMENT_LENGTH_MEDIAN', None)
self.FRAGMENT_LENGTH_MIN = kwargs.get(
'FRAGMENT_LENGTH_MIN', None)
self.FRAGMENT_LENGTH_SD = kwargs.get(
'FRAGMENT_LENGTH_SD', None)
self.FRAMESHIFT_DELETIONS = kwargs.get(
'FRAMESHIFT_DELETIONS', None)
self.FRAMESHIFT_INSERTIONS = kwargs.get(
'FRAMESHIFT_INSERTIONS', None)
self.INDELS = kwargs.get(
'INDELS', None)
self.INDELS_ALL = kwargs.get(
'INDELS_ALL', None)
self.INDELS_PERCENT_FOUND_IN_DBSNP = kwargs.get(
'INDELS_PERCENT_FOUND_IN_DBSNP', None)
self.INDEL_HET_HOM_RATIO = kwargs.get(
'INDEL_HET_HOM_RATIO', None)
self.INSERTIONS = kwargs.get(
'INSERTIONS', None)
self.INSERTIONS_ALL = kwargs.get(
'INSERTIONS_ALL', None)
self.INSERTIONS_IN_CODING_REGIONS = kwargs.get(
'INSERTIONS_IN_CODING_REGIONS', None)
self.INSERTIONS_IN_EXONS = kwargs.get(
'INSERTIONS_IN_EXONS', None)
self.INSERTIONS_IN_GENES = kwargs.get(
'INSERTIONS_IN_GENES', None)
self.INSERTIONS_IN_MATURE_MIRNA = kwargs.get(
'INSERTIONS_IN_MATURE_MIRNA', None)
self.INSERTIONS_IN_SPLICE_SITE_REGIONS = kwargs.get(
'INSERTIONS_IN_SPLICE_SITE_REGIONS', None)
self.INSERTIONS_IN_UTR_REGIONS = kwargs.get(
'INSERTIONS_IN_UTR_REGIONS', None)
self.INSERTIONS_PERCENT_FOUND_IN_DBSNP = kwargs.get(
'INSERTIONS_PERCENT_FOUND_IN_DBSNP', None)
self.INSERTION_HET_HOM_RATIO = kwargs.get(
'INSERTION_HET_HOM_RATIO', None)
self.MEAN_COVERAGE = kwargs.get(
'MEAN_COVERAGE', None)
self.MISMATCH_RATE_READ_1 = kwargs.get(
'MISMATCH_RATE_READ_1', None)
self.MISMATCH_RATE_READ_2 = kwargs.get(
'MISMATCH_RATE_READ_2', None)
self.NON_SYNONYMOUS_DELETIONS = kwargs.get(
'NON_SYNONYMOUS_DELETIONS', None)
self.NON_SYNONYMOUS_INSERTIONS = kwargs.get(
'NON_SYNONYMOUS_INSERTIONS', None)
self.NON_SYNONYMOUS_SNVS = kwargs.get(
'NON_SYNONYMOUS_SNVS', None)
self.PAIRED_END = kwargs.get(
'PAIRED_END', None)
self.PERCENT_ALIGNED_BASES_READ_1 = kwargs.get(
'PERCENT_ALIGNED_BASES_READ_1', None)
self.PERCENT_ALIGNED_BASES_READ_2 = kwargs.get(
'PERCENT_ALIGNED_BASES_READ_2', None)
self.PERCENT_ALIGNED_READ_1 = kwargs.get(
'PERCENT_ALIGNED_READ_1', None)
self.PERCENT_ALIGNED_READ_2 = kwargs.get(
'PERCENT_ALIGNED_READ_2', None)
self.PERCENT_DUPLICATE_PAIRED_READS = kwargs.get(
'PERCENT_DUPLICATE_PAIRED_READS', None)
self.PERCENT_Q30_BASES = kwargs.get(
'PERCENT_Q30_BASES', None)
self.PERCENT_Q30_BASES_READ_1 = kwargs.get(
'PERCENT_Q30_BASES_READ_1', None)
self.PERCENT_Q30_BASES_READ_2 = kwargs.get(
'PERCENT_Q30_BASES_READ_2', None)
self.REFERENCE_GENOME = kwargs.get(
'REFERENCE_GENOME', None)
self.RUNFOLDER = kwargs.get(
'RUNFOLDER', None)
self.SAMPLE_ID = kwargs.get(
'SAMPLE_ID', None)
self.SAMPLE_NAME = kwargs.get(
'SAMPLE_NAME', None)
self.SNVS = kwargs.get(
'SNVS', None)
self.SNVS_ALL = kwargs.get(
'SNVS_ALL', None)
self.SNVS_IN_CODING_REGIONS = kwargs.get(
'SNVS_IN_CODING_REGIONS', None)
self.SNVS_IN_EXONS = kwargs.get(
'SNVS_IN_EXONS', None)
self.SNVS_IN_GENES = kwargs.get(
'SNVS_IN_GENES', None)
self.SNVS_IN_MATURE_MIRNA = kwargs.get(
'SNVS_IN_MATURE_MIRNA', None)
self.SNVS_IN_SPLICE_SITE_REGIONS = kwargs.get(
'SNVS_IN_SPLICE_SITE_REGIONS', None)
self.SNVS_IN_UTR_REGIONS = kwargs.get(
'SNVS_IN_UTR_REGIONS', None)
self.SNVS_PERCENT_FOUND_IN_DBSNP = kwargs.get(
'SNVS_PERCENT_FOUND_IN_DBSNP', None)
self.SNV_HET_HOM_RATIO = kwargs.get(
'SNV_HET_HOM_RATIO', None)
self.SNV_TS_TV_RATIO = kwargs.get(
'SNV_TS_TV_RATIO', None)
self.STOP_GAINED_DELETIONS = kwargs.get(
'STOP_GAINED_DELETIONS', None)
self.STOP_GAINED_INSERTIONS = kwargs.get(
'STOP_GAINED_INSERTIONS', None)
self.STOP_GAINED_SNVS = kwargs.get(
'STOP_GAINED_SNVS', None)
self.STOP_LOST_DELETIONS = kwargs.get(
'STOP_LOST_DELETIONS', None)
self.STOP_LOST_INSERTIONS = kwargs.get(
'STOP_LOST_INSERTIONS', None)
self.STOP_LOST_SNVS = kwargs.get(
'STOP_LOST_SNVS', None)
self.SV_DELETIONS = kwargs.get(
'SV_DELETIONS', None)
self.SV_DELETIONS_IN_GENES = kwargs.get(
'SV_DELETIONS_IN_GENES', None)
self.SV_INSERTIONS = kwargs.get(
'SV_INSERTIONS', None)
self.SV_INSERTIONS_IN_GENES = kwargs.get(
'SV_INSERTIONS_IN_GENES', None)
self.SV_INVERSIONS = kwargs.get(
'SV_INVERSIONS', None)
self.SV_INVERSIONS_IN_GENES = kwargs.get(
'SV_INVERSIONS_IN_GENES', None)
self.SV_PERCENT_DELETIONS_IN_GENES = kwargs.get(
'SV_PERCENT_DELETIONS_IN_GENES', None)
self.SV_PERCENT_INSERTIONS_IN_GENES = kwargs.get(
'SV_PERCENT_INSERTIONS_IN_GENES', None)
self.SV_PERCENT_INVERSIONS_IN_GENES = kwargs.get(
'SV_PERCENT_INVERSIONS_IN_GENES', None)
self.SV_PERCENT_TANDEM_DUPLICATIONS_IN_GENES = kwargs.get(
'SV_PERCENT_TANDEM_DUPLICATIONS_IN_GENES', None)
self.SV_TANDEM_DUPLICATIONS = kwargs.get(
'SV_TANDEM_DUPLICATIONS', None)
self.SV_TANDEM_DUPLICATIONS_IN_GENES = kwargs.get(
'SV_TANDEM_DUPLICATIONS_IN_GENES', None)
self.SYNONYMOUS_SNVS = kwargs.get(
'SYNONYMOUS_SNVS', None)
self.TOTAL_ALIGNED_BASES_READ_1 = kwargs.get(
'TOTAL_ALIGNED_BASES_READ_1', None)
self.TOTAL_ALIGNED_BASES_READ_2 = kwargs.get(
'TOTAL_ALIGNED_BASES_READ_2', None)
self.TOTAL_ALIGNED_READ_1 = kwargs.get(
'TOTAL_ALIGNED_READ_1', None)
self.TOTAL_ALIGNED_READ_2 = kwargs.get(
'TOTAL_ALIGNED_READ_2', None)
self.TOTAL_PF_BASES = kwargs.get(
'TOTAL_PF_BASES', None)
self.TOTAL_PF_BASES_READ_1 = kwargs.get(
'TOTAL_PF_BASES_READ_1', None)
self.TOTAL_PF_BASES_READ_2 = kwargs.get(
'TOTAL_PF_BASES_READ_2', None)
self.TOTAL_PF_READS = kwargs.get(
'TOTAL_PF_READS', None)
self.illumina_version = kwargs.get(
'illumina_version', None)
class IlluminaSummaryV4(ProtocolElement):
"""
This is the summary provided by Illumina V4 germline samples in
file SAMPLE_ID.summary.csv
"""
_schemaSource = """
{"type": "record", "name": "IlluminaSummaryV4", "namespace": "org.gel.models.report.avro", "doc":
"", "fields": [{"name": "illumina_version", "type": {"type": "enum", "name": "IlluminaVersion",
"symbols": ["IlluminaSummaryV1", "IlluminaSummaryV2", "IlluminaSummaryV4",
"IlluminaSummaryCancerV2", "IlluminaSummaryCancerV4"]}}, {"name": "ARRAY_CONCORDANCE", "type":
["null", "double"]}, {"name": "ARRAY_CONCORDANCE_USAGE", "type": ["null", "double"]}, {"name":
"AUTOSOME_CALLABILITY", "type": "double"}, {"name": "AUTOSOME_COVERAGE_AT_10X", "type": "double"},
{"name": "AUTOSOME_COVERAGE_AT_15X", "type": "double"}, {"name": "AUTOSOME_COVERAGE_AT_1X", "type":
"double"}, {"name": "AUTOSOME_EXON_CALLABILITY", "type": "double"}, {"name":
"AUTOSOME_EXON_COVERAGE_AT_10X", "type": "double"}, {"name": "AUTOSOME_EXON_COVERAGE_AT_15X",
"type": "double"}, {"name": "AUTOSOME_EXON_COVERAGE_AT_1X", "type": "double"}, {"name":
"AUTOSOME_EXON_MEAN_COVERAGE", "type": "double"}, {"name": "AUTOSOME_MEAN_COVERAGE", "type":
"double"}, {"name": "CALLABILITY", "type": "double"}, {"name": "CNVS", "type": "long"}, {"name":
"CNVS_ALL", "type": "long"}, {"name": "CNVS_IN_GENES", "type": "long"}, {"name": "CONTAMINATION",
"type": "double"}, {"name": "COVERAGE_AT_10X", "type": "double"}, {"name": "COVERAGE_AT_15X",
"type": "double"}, {"name": "COVERAGE_AT_1X", "type": "double"}, {"name": "DELETIONS", "type":
"long"}, {"name": "DELETIONS_ALL", "type": "long"}, {"name": "DELETIONS_IN_CODING_REGIONS", "type":
"long"}, {"name": "DELETIONS_IN_EXONS", "type": "long"}, {"name": "DELETIONS_IN_GENES", "type":
"long"}, {"name": "DELETIONS_IN_MATURE_MIRNA", "type": "long"}, {"name":
"DELETIONS_IN_SPLICE_SITE_REGIONS", "type": "long"}, {"name": "DELETIONS_IN_UTR_REGIONS", "type":
"long"}, {"name": "DELETIONS_PERCENT_FOUND_IN_DBSNP", "type": "double"}, {"name":
"DELETION_HET_HOM_RATIO", "type": "double"}, {"name": "DIVERSITY", "type": "long"}, {"name":
"FRAGMENT_LENGTH_MAX", "type": "long"}, {"name": "FRAGMENT_LENGTH_MEDIAN", "type": "long"}, {"name":
"FRAGMENT_LENGTH_MIN", "type": "long"}, {"name": "FRAGMENT_LENGTH_SD", "type": "long"}, {"name":
"FRAMESHIFT_DELETIONS", "type": "long"}, {"name": "FRAMESHIFT_INSERTIONS", "type": "long"}, {"name":
"INDELS", "type": "long"}, {"name": "INDELS_ALL", "type": "long"}, {"name":
"INDELS_PERCENT_FOUND_IN_DBSNP", "type": "double"}, {"name": "INDEL_HET_HOM_RATIO", "type":
"double"}, {"name": "INSERTIONS", "type": "long"}, {"name": "INSERTIONS_ALL", "type": "long"},
{"name": "INSERTIONS_IN_CODING_REGIONS", "type": "long"}, {"name": "INSERTIONS_IN_EXONS", "type":
"long"}, {"name": "INSERTIONS_IN_GENES", "type": "long"}, {"name": "INSERTIONS_IN_MATURE_MIRNA",
"type": "long"}, {"name": "INSERTIONS_IN_SPLICE_SITE_REGIONS", "type": "long"}, {"name":
"INSERTIONS_IN_UTR_REGIONS", "type": "long"}, {"name": "INSERTIONS_PERCENT_FOUND_IN_DBSNP", "type":
"double"}, {"name": "INSERTION_HET_HOM_RATIO", "type": "double"}, {"name":
"MAPQ_GT_10_AUTOSOME_COVERAGE_AT_15X", "type": "double"}, {"name":
"MAPQ_GT_10_AUTOSOME_EXON_COVERAGE_AT_15X", "type": "double"}, {"name":
"MAPQ_GT_10_AUTOSOME_MEDIAN_COVERAGE", "type": "double"}, {"name": "MEAN_COVERAGE", "type":
"double"}, {"name": "MEDIAN_READ_LENGTH", "type": "string"}, {"name": "MEDIAN_READ_LENGTH_READ_1",
"type": "string"}, {"name": "MEDIAN_READ_LENGTH_READ_2", "type": "string"}, {"name":
"METRICS_DELIVERABLE", "type": "string"}, {"name": "METRICS_VERSION", "type": "string"}, {"name":
"MISMATCH_RATE", "type": "double"}, {"name": "MISMATCH_RATE_READ_1", "type": "double"}, {"name":
"MISMATCH_RATE_READ_2", "type": "double"}, {"name": "NON_SYNONYMOUS_DELETIONS", "type": "long"},
{"name": "NON_SYNONYMOUS_INSERTIONS", "type": "long"}, {"name": "NON_SYNONYMOUS_SNVS", "type":
"long"}, {"name": "PAIRED_END", "type": "boolean"}, {"name": "PERCENT_ALIGNED_BASES", "type":
"double"}, {"name": "PERCENT_ALIGNED_BASES_READ_1", "type": "double"}, {"name":
"PERCENT_ALIGNED_BASES_READ_2", "type": "double"}, {"name": "PERCENT_ALIGNED_READS", "type":
"double"}, {"name": "PERCENT_ALIGNED_READ_1", "type": "double"}, {"name": "PERCENT_ALIGNED_READ_2",
"type": "double"}, {"name": "PERCENT_AT_DROPOUT", "type": "double"}, {"name":
"PERCENT_CNVS_IN_GENES", "type": "double"}, {"name": "PERCENT_DUPLICATE_ALIGNED_READS", "type":
"double"}, {"name": "PERCENT_DUPLICATE_PROPER_READ_PAIRS", "type": "double"}, {"name":
"PERCENT_GC_DROPOUT", "type": "double"}, {"name": "PERCENT_NON_SPATIAL_DUPLICATE_READ_PAIRS",
"type": "double"}, {"name": "PERCENT_OVERLAPPING_BASES", "type": "double"}, {"name":
"PERCENT_Q25_BASES_READ_1", "type": "double"}, {"name": "PERCENT_Q25_BASES_READ_2", "type":
"double"}, {"name": "PERCENT_Q30_BASES", "type": "double"}, {"name": "PERCENT_Q30_BASES_READ_1",
"type": "double"}, {"name": "PERCENT_Q30_BASES_READ_2", "type": "double"}, {"name":
"PERCENT_READ_PAIRS_ALIGNED_TO_DIFFERENT_CHROMOSOMES", "type": "double"}, {"name":
"PERCENT_SOFT_CLIPPED_BASES", "type": "double"}, {"name":
"Q30_BASES_EXCLUDING_CLIPPED_AND_DUPLICATE_READ_BASES", "type": "long"}, {"name":
"READ_ENRICHMENT_AT_75_GC", "type": "double"}, {"name": "READ_ENRICHMENT_AT_80_GC", "type":
"double"}, {"name": "REFERENCE_GENOME", "type": "string"}, {"name": "RUN_FOLDER", "type": "string"},
{"name": "SAMPLE_ID", "type": "string"}, {"name": "SAMPLE_NAME", "type": "string"}, {"name": "SNVS",
"type": "long"}, {"name": "SNVS_ALL", "type": "long"}, {"name": "SNVS_IN_CODING_REGIONS", "type":
"long"}, {"name": "SNVS_IN_EXONS", "type": "long"}, {"name": "SNVS_IN_GENES", "type": "long"},
{"name": "SNVS_IN_MATURE_MIRNA", "type": "long"}, {"name": "SNVS_IN_SPLICE_SITE_REGIONS", "type":
"long"}, {"name": "SNVS_IN_UTR_REGIONS", "type": "long"}, {"name": "SNVS_PERCENT_FOUND_IN_DBSNP",
"type": "double"}, {"name": "SNV_HET_HOM_RATIO", "type": "double"}, {"name": "SNV_TS_TV_RATIO",
"type": "double"}, {"name": "STOP_GAINED_DELETIONS", "type": "long"}, {"name":
"STOP_GAINED_INSERTIONS", "type": "long"}, {"name": "STOP_GAINED_SNVS", "type": "long"}, {"name":
"STOP_LOST_DELETIONS", "type": "long"}, {"name": "STOP_LOST_INSERTIONS", "type": "long"}, {"name":
"STOP_LOST_SNVS", "type": "long"}, {"name": "SV_BREAKENDS", "type": "long"}, {"name":
"SV_BREAKENDS_ALL", "type": "long"}, {"name": "SV_BREAKENDS_IN_GENES", "type": "long"}, {"name":
"SV_DELETIONS", "type": "long"}, {"name": "SV_DELETIONS_ALL", "type": "long"}, {"name":
"SV_DELETIONS_IN_GENES", "type": "long"}, {"name": "SV_INSERTIONS", "type": "long"}, {"name":
"SV_INSERTIONS_ALL", "type": "long"}, {"name": "SV_INSERTIONS_IN_GENES", "type": "long"}, {"name":
"SV_INVERSIONS", "type": "long"}, {"name": "SV_INVERSIONS_ALL", "type": "long"}, {"name":
"SV_INVERSIONS_IN_GENES", "type": "long"}, {"name": "SV_PERCENT_BREAKENDS_IN_GENES", "type":
"double"}, {"name": "SV_PERCENT_DELETIONS_IN_GENES", "type": "double"}, {"name":
"SV_PERCENT_INSERTIONS_IN_GENES", "type": "double"}, {"name": "SV_PERCENT_INVERSIONS_IN_GENES",
"type": "double"}, {"name": "SV_PERCENT_TANDEM_DUPLICATIONS_IN_GENES", "type": "double"}, {"name":
"SV_TANDEM_DUPLICATIONS", "type": "long"}, {"name": "SV_TANDEM_DUPLICATIONS_ALL", "type": "long"},
{"name": "SV_TANDEM_DUPLICATIONS_IN_GENES", "type": "long"}, {"name": "SYNONYMOUS_SNVS", "type":
"long"}, {"name": "TOTAL_ALIGNED_BASES", "type": "long"}, {"name": "TOTAL_ALIGNED_BASES_READ_1",
"type": "long"}, {"name": "TOTAL_ALIGNED_BASES_READ_2", "type": "long"}, {"name":
"TOTAL_ALIGNED_READS", "type": "long"}, {"name": "TOTAL_ALIGNED_READ_1", "type": "long"}, {"name":
"TOTAL_ALIGNED_READ_2", "type": "long"}, {"name": "TOTAL_ALIGNED_READ_PAIRS", "type": "long"},
{"name": "TOTAL_DUPLICATE_ALIGNED_READS", "type": "long"}, {"name":
"TOTAL_DUPLICATE_PROPER_READ_PAIRS", "type": "long"}, {"name": "TOTAL_MAPQ_GT_10_READS", "type":
"long"}, {"name": "TOTAL_PF_BASES", "type": "long"}, {"name": "TOTAL_PF_BASES_READ_1", "type":
"long"}, {"name": "TOTAL_PF_BASES_READ_2", "type": "long"}, {"name": "TOTAL_PF_READS", "type":
"long"}, {"name": "TOTAL_PF_READ_1", "type": "long"}, {"name": "TOTAL_PF_READ_2", "type": "long"},
{"name": "TOTAL_PROPER_READ_PAIRS", "type": "long"}, {"name": "UNIQUE_ALIGNED_READS", "type":
"long"}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"ARRAY_CONCORDANCE",
"ARRAY_CONCORDANCE_USAGE",
"AUTOSOME_CALLABILITY",
"AUTOSOME_COVERAGE_AT_10X",
"AUTOSOME_COVERAGE_AT_15X",
"AUTOSOME_COVERAGE_AT_1X",
"AUTOSOME_EXON_CALLABILITY",
"AUTOSOME_EXON_COVERAGE_AT_10X",
"AUTOSOME_EXON_COVERAGE_AT_15X",
"AUTOSOME_EXON_COVERAGE_AT_1X",
"AUTOSOME_EXON_MEAN_COVERAGE",
"AUTOSOME_MEAN_COVERAGE",
"CALLABILITY",
"CNVS",
"CNVS_ALL",
"CNVS_IN_GENES",
"CONTAMINATION",
"COVERAGE_AT_10X",
"COVERAGE_AT_15X",
"COVERAGE_AT_1X",
"DELETIONS",
"DELETIONS_ALL",
"DELETIONS_IN_CODING_REGIONS",
"DELETIONS_IN_EXONS",
"DELETIONS_IN_GENES",
"DELETIONS_IN_MATURE_MIRNA",
"DELETIONS_IN_SPLICE_SITE_REGIONS",
"DELETIONS_IN_UTR_REGIONS",
"DELETIONS_PERCENT_FOUND_IN_DBSNP",
"DELETION_HET_HOM_RATIO",
"DIVERSITY",
"FRAGMENT_LENGTH_MAX",
"FRAGMENT_LENGTH_MEDIAN",
"FRAGMENT_LENGTH_MIN",
"FRAGMENT_LENGTH_SD",
"FRAMESHIFT_DELETIONS",
"FRAMESHIFT_INSERTIONS",
"INDELS",
"INDELS_ALL",
"INDELS_PERCENT_FOUND_IN_DBSNP",
"INDEL_HET_HOM_RATIO",
"INSERTIONS",
"INSERTIONS_ALL",
"INSERTIONS_IN_CODING_REGIONS",
"INSERTIONS_IN_EXONS",
"INSERTIONS_IN_GENES",
"INSERTIONS_IN_MATURE_MIRNA",
"INSERTIONS_IN_SPLICE_SITE_REGIONS",
"INSERTIONS_IN_UTR_REGIONS",
"INSERTIONS_PERCENT_FOUND_IN_DBSNP",
"INSERTION_HET_HOM_RATIO",
"MAPQ_GT_10_AUTOSOME_COVERAGE_AT_15X",
"MAPQ_GT_10_AUTOSOME_EXON_COVERAGE_AT_15X",
"MAPQ_GT_10_AUTOSOME_MEDIAN_COVERAGE",
"MEAN_COVERAGE",
"MEDIAN_READ_LENGTH",
"MEDIAN_READ_LENGTH_READ_1",
"MEDIAN_READ_LENGTH_READ_2",
"METRICS_DELIVERABLE",
"METRICS_VERSION",
"MISMATCH_RATE",
"MISMATCH_RATE_READ_1",
"MISMATCH_RATE_READ_2",
"NON_SYNONYMOUS_DELETIONS",
"NON_SYNONYMOUS_INSERTIONS",
"NON_SYNONYMOUS_SNVS",
"PAIRED_END",
"PERCENT_ALIGNED_BASES",
"PERCENT_ALIGNED_BASES_READ_1",
"PERCENT_ALIGNED_BASES_READ_2",
"PERCENT_ALIGNED_READS",
"PERCENT_ALIGNED_READ_1",
"PERCENT_ALIGNED_READ_2",
"PERCENT_AT_DROPOUT",
"PERCENT_CNVS_IN_GENES",
"PERCENT_DUPLICATE_ALIGNED_READS",
"PERCENT_DUPLICATE_PROPER_READ_PAIRS",
"PERCENT_GC_DROPOUT",
"PERCENT_NON_SPATIAL_DUPLICATE_READ_PAIRS",
"PERCENT_OVERLAPPING_BASES",
"PERCENT_Q25_BASES_READ_1",
"PERCENT_Q25_BASES_READ_2",
"PERCENT_Q30_BASES",
"PERCENT_Q30_BASES_READ_1",
"PERCENT_Q30_BASES_READ_2",
"PERCENT_READ_PAIRS_ALIGNED_TO_DIFFERENT_CHROMOSOMES",
"PERCENT_SOFT_CLIPPED_BASES",
"Q30_BASES_EXCLUDING_CLIPPED_AND_DUPLICATE_READ_BASES",
"READ_ENRICHMENT_AT_75_GC",
"READ_ENRICHMENT_AT_80_GC",
"REFERENCE_GENOME",
"RUN_FOLDER",
"SAMPLE_ID",
"SAMPLE_NAME",
"SNVS",
"SNVS_ALL",
"SNVS_IN_CODING_REGIONS",
"SNVS_IN_EXONS",
"SNVS_IN_GENES",
"SNVS_IN_MATURE_MIRNA",
"SNVS_IN_SPLICE_SITE_REGIONS",
"SNVS_IN_UTR_REGIONS",
"SNVS_PERCENT_FOUND_IN_DBSNP",
"SNV_HET_HOM_RATIO",
"SNV_TS_TV_RATIO",
"STOP_GAINED_DELETIONS",
"STOP_GAINED_INSERTIONS",
"STOP_GAINED_SNVS",
"STOP_LOST_DELETIONS",
"STOP_LOST_INSERTIONS",
"STOP_LOST_SNVS",
"SV_BREAKENDS",
"SV_BREAKENDS_ALL",
"SV_BREAKENDS_IN_GENES",
"SV_DELETIONS",
"SV_DELETIONS_ALL",
"SV_DELETIONS_IN_GENES",
"SV_INSERTIONS",
"SV_INSERTIONS_ALL",
"SV_INSERTIONS_IN_GENES",
"SV_INVERSIONS",
"SV_INVERSIONS_ALL",
"SV_INVERSIONS_IN_GENES",
"SV_PERCENT_BREAKENDS_IN_GENES",
"SV_PERCENT_DELETIONS_IN_GENES",
"SV_PERCENT_INSERTIONS_IN_GENES",
"SV_PERCENT_INVERSIONS_IN_GENES",
"SV_PERCENT_TANDEM_DUPLICATIONS_IN_GENES",
"SV_TANDEM_DUPLICATIONS",
"SV_TANDEM_DUPLICATIONS_ALL",
"SV_TANDEM_DUPLICATIONS_IN_GENES",
"SYNONYMOUS_SNVS",
"TOTAL_ALIGNED_BASES",
"TOTAL_ALIGNED_BASES_READ_1",
"TOTAL_ALIGNED_BASES_READ_2",
"TOTAL_ALIGNED_READS",
"TOTAL_ALIGNED_READ_1",
"TOTAL_ALIGNED_READ_2",
"TOTAL_ALIGNED_READ_PAIRS",
"TOTAL_DUPLICATE_ALIGNED_READS",
"TOTAL_DUPLICATE_PROPER_READ_PAIRS",
"TOTAL_MAPQ_GT_10_READS",
"TOTAL_PF_BASES",
"TOTAL_PF_BASES_READ_1",
"TOTAL_PF_BASES_READ_2",
"TOTAL_PF_READS",
"TOTAL_PF_READ_1",
"TOTAL_PF_READ_2",
"TOTAL_PROPER_READ_PAIRS",
"UNIQUE_ALIGNED_READS",
"illumina_version",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'ARRAY_CONCORDANCE', 'ARRAY_CONCORDANCE_USAGE',
'AUTOSOME_CALLABILITY', 'AUTOSOME_COVERAGE_AT_10X',
'AUTOSOME_COVERAGE_AT_15X', 'AUTOSOME_COVERAGE_AT_1X',
'AUTOSOME_EXON_CALLABILITY', 'AUTOSOME_EXON_COVERAGE_AT_10X',
'AUTOSOME_EXON_COVERAGE_AT_15X',
'AUTOSOME_EXON_COVERAGE_AT_1X', 'AUTOSOME_EXON_MEAN_COVERAGE',
'AUTOSOME_MEAN_COVERAGE', 'CALLABILITY', 'CNVS', 'CNVS_ALL',
'CNVS_IN_GENES', 'CONTAMINATION', 'COVERAGE_AT_10X',
| |
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# <EMAIL>. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
import errno
import os
import random
import sys
import unittest
from exceptions import IOError
from iptest import IronPythonTestCase, is_cli, is_netcoreapp, is_posix, run_test, skipUnlessIronPython, stderr_trapper
if not is_posix:
import nt
@unittest.skipIf(is_posix, 'Windows specific test')
class NtTest(IronPythonTestCase):
def test_computername(self):
self.assertEqual(nt.environ.has_key('COMPUTERNAME') or nt.environ.has_key('computername'), True)
def test_mkdir(self):
nt.mkdir('dir_create_test')
self.assertEqual(nt.listdir(nt.getcwd()).count('dir_create_test'), 1)
nt.rmdir('dir_create_test')
self.assertEqual(nt.listdir(nt.getcwd()).count('dir_create_test'), 0)
def test_mkdir_negative(self):
nt.mkdir("dir_create_test")
try:
nt.mkdir("dir_create_test")
self.assertUnreachabale("Cannot create the same directory twice")
except WindowsError, e:
self.assertEqual(e.errno, 17)
#if it fails once...it should fail again
self.assertRaises(WindowsError, nt.mkdir, "dir_create_test")
nt.rmdir('dir_create_test')
nt.mkdir("dir_create_test")
self.assertRaises(WindowsError, nt.mkdir, "dir_create_test")
nt.rmdir('dir_create_test')
def test_listdir(self):
self.assertRaises(TypeError, nt.listdir, None)
self.assertEqual(nt.listdir(nt.getcwd()), nt.listdir('.'))
# stat,lstat
def test_stat(self):
# stat
self.assertRaises(nt.error, nt.stat, 'doesnotexist.txt')
#lstat
self.assertRaises(nt.error, nt.lstat, 'doesnotexist.txt')
self.assertRaisesNumber(WindowsError, 2, nt.stat, 'doesnotexist.txt')
self.assertRaisesNumber(WindowsError, 22, nt.stat, 'bad?path.txt')
# stat should accept bytes as argument
def test_stat_cp34910(self):
self.assertEqual(nt.stat('/'), nt.stat(b'/'))
self.assertEqual(nt.lstat('/'), nt.lstat(b'/'))
# getcwdu test
def test_getcwdu(self):
self.assertEqual(nt.getcwd(),nt.getcwdu())
nt.mkdir('dir_create_test')
self.assertEqual(nt.listdir(nt.getcwdu()).count('dir_create_test'), 1)
nt.rmdir('dir_create_test')
# getpid test
def test_getpid(self):
result = None
result = nt.getpid()
self.assertTrue(result>=0,
"processPID should not be less than zero")
result2 = nt.getpid()
self.assertTrue(result2 == result,
"The processPID in one process should be same")
# environ test
def test_environ(self):
non_exist_key = "_NOT_EXIST_"
iron_python_string = "Iron_pythoN"
try:
nt.environ[non_exist_key]
raise self.assertTrueionError
except KeyError:
pass
# set
nt.environ[non_exist_key] = iron_python_string
self.assertEqual(nt.environ[non_exist_key], iron_python_string)
import sys
if is_cli:
import System
self.assertEqual(System.Environment.GetEnvironmentVariable(non_exist_key), iron_python_string)
# update again
swapped = iron_python_string.swapcase()
nt.environ[non_exist_key] = swapped
self.assertEqual(nt.environ[non_exist_key], swapped)
if is_cli:
self.assertEqual(System.Environment.GetEnvironmentVariable(non_exist_key), swapped)
# remove
del nt.environ[non_exist_key]
if is_cli :
self.assertEqual(System.Environment.GetEnvironmentVariable(non_exist_key), None)
self.assertEqual(type(nt.environ), type({}))
# startfile
def test_startfile(self):
self.assertRaises(OSError, nt.startfile, "not_exist_file.txt")
self.assertRaises(WindowsError, nt.startfile, __file__, 'bad')
# chdir tests
def test_chdir(self):
currdir = nt.getcwd()
nt.mkdir('tsd')
nt.chdir('tsd')
self.assertEqual(os.path.join(currdir, 'tsd'), nt.getcwd())
nt.chdir(currdir)
self.assertEqual(currdir, nt.getcwd())
nt.rmdir('tsd')
# the directory is empty or does not exist
self.assertRaisesNumber(WindowsError, 22, lambda:nt.chdir(''))
self.assertRaisesNumber(WindowsError, 2, lambda:nt.chdir('tsd'))
# fdopen tests
def test_fdopen(self):
fd_lambda = lambda x: nt.dup(x)
# fd = 0
result = None
result = nt.fdopen(fd_lambda(0),"r",1024)
self.assertTrue(result!=None,"1,The file object was not returned correctly")
result = None
result = nt.fdopen(fd_lambda(0),"w",2048)
self.assertTrue(result!=None,"2,The file object was not returned correctly")
result = None
result = nt.fdopen(fd_lambda(0),"a",512)
self.assertTrue(result!=None,"3,The file object was not returned correctly")
# fd = 1
result = None
result = nt.fdopen(fd_lambda(1),"a",1024)
self.assertTrue(result!=None,"4,The file object was not returned correctly")
result = None
result = nt.fdopen(fd_lambda(1),"r",2048)
self.assertTrue(result!=None,"5,The file object was not returned correctly")
result = None
result = nt.fdopen(fd_lambda(1),"w",512)
self.assertTrue(result!=None,"6,The file object was not returned correctly")
# fd = 2
result = None
result = nt.fdopen(fd_lambda(2),"r",1024)
self.assertTrue(result!=None,"7,The file object was not returned correctly")
result = None
result = nt.fdopen(fd_lambda(2),"a",2048)
self.assertTrue(result!=None,"8,The file object was not returned correctly")
result = None
result = nt.fdopen(fd_lambda(2),"w",512)
self.assertTrue(result!=None,"9,The file object was not returned correctly")
if not is_cli:
result.close()
# The file descriptor is not valid
self.assertRaises(OSError,nt.fdopen,3000)
self.assertRaises(OSError,nt.fdopen,-1)
self.assertRaises(OSError,nt.fdopen,3000, "w")
self.assertRaises(OSError,nt.fdopen,3000, "w", 1024)
# The file mode does not exist
self.assertRaises(ValueError,nt.fdopen,0,"p")
stuff = "\x00a\x01\x02b\x03 \x04 \x05\n\x06_\0xFE\0xFFxyz"
name = "cp5633.txt"
fd = nt.open(name, nt.O_CREAT | nt.O_BINARY | nt.O_TRUNC | nt.O_WRONLY)
f = nt.fdopen(fd, 'wb')
f.write(stuff)
f.close()
f = file(name, 'rb')
try:
self.assertEqual(stuff, f.read())
finally:
f.close()
nt.remove(name)
# fstat,unlink tests
def test_fstat(self):
result = nt.fstat(1)
self.assertTrue(result!=0,"0,The file stat object was not returned correctly")
result = None
tmpfile = "tmpfile1.tmp"
f = open(tmpfile, "w")
result = nt.fstat(f.fileno())
self.assertTrue(result!=None,"0,The file stat object was not returned correctly")
f.close()
nt.unlink(tmpfile)
# stdxx file descriptor
self.assertEqual(10, len(nt.fstat(0)))
self.assertEqual(10, len(nt.fstat(1)))
self.assertEqual(10, len(nt.fstat(2)))
# invalid file descriptor
self.assertRaises(OSError,nt.fstat,3000)
self.assertRaises(OSError,nt.fstat,-1)
def test_chmod(self):
# chmod tests:
# BUG 828,830
nt.mkdir('tmp2')
nt.chmod('tmp2', 256) # NOTE: change to flag when stat is implemented
self.assertRaises(OSError, lambda:nt.rmdir('tmp2'))
nt.chmod('tmp2', 128)
nt.rmdir('tmp2')
# /BUG
################################################################################################
# popen/popen2/popen3/unlink tests
def test_popen(self):
# open a pipe just for reading...
pipe_modes = [["ping 127.0.0.1 -n 1", "r"],
["ping 127.0.0.1 -n 1"]]
if is_cli:
pipe_modes.append(["ping 127.0.0.1 -n 1", ""])
for args in pipe_modes:
x = nt.popen(*args)
text = x.read()
self.assertTrue(text.lower().index('pinging') != -1)
self.assertEqual(x.close(), None)
# write to a pipe
x = nt.popen('sort', 'w')
x.write('hello\nabc\n')
x.close()
# bug 1146
#x = nt.popen('sort', 'w')
#x.write('hello\nabc\n')
#self.assertEqual(x.close(), None)
# once w/ default mode
self.assertRaises(ValueError, nt.popen, "ping 127.0.0.1 -n 1", "a")
# popen uses cmd.exe to run stuff -- at least sometimes
dir_pipe = nt.popen('dir')
dir_pipe.read()
dir_pipe.close()
# once w/ no mode
stdin, stdout = nt.popen2('sort')
stdin.write('hello\nabc\n')
self.assertEqual(stdin.close(), None)
self.assertEqual(stdout.read(), 'abc\nhello\n')
self.assertEqual(stdout.close(), None)
# bug 1146
# and once w/ each mode
#for mode in ['b', 't']:
# stdin, stdout = nt.popen2('sort', mode)
# stdin.write('hello\nabc\n')
# self.assertEqual(stdin.close(), None)
# self.assertEqual(stdout.read(), 'abc\nhello\n')
# self.assertEqual(stdout.close(), None)
# popen3: once w/ no mode
stdin, stdout, stderr = nt.popen3('sort')
stdin.write('hello\nabc\n')
self.assertEqual(stdin.close(), None)
self.assertEqual(stdout.read(), 'abc\nhello\n')
self.assertEqual(stdout.close(), None)
self.assertEqual(stderr.read(), '')
self.assertEqual(stderr.close(), None)
# bug 1146
# popen3: and once w/ each mode
#for mode in ['b', 't']:
# stdin, stdout, stderr = nt.popen3('sort', mode)
# stdin.write('hello\nabc\n')
# self.assertEqual(stdin.close(), None)
# self.assertEqual(stdout.read(), 'abc\nhello\n')
# self.assertEqual(stdout.close(), None)
# self.assertEqual(stderr.read(), '')
# self.assertEqual(stderr.close(), None)
tmpfile = 'tmpfile.tmp'
f = open(tmpfile, 'w')
f.close()
nt.unlink(tmpfile)
try:
nt.chmod('tmpfile.tmp', 256)
except Exception:
pass #should throw when trying to access file deleted by unlink
else:
self.assertTrue(False,"Error! Trying to access file deleted by unlink should have thrown.")
try:
tmpfile = "tmpfile2.tmp"
f = open(tmpfile, "w")
f.write("testing chmod")
f.close()
nt.chmod(tmpfile, 256)
self.assertRaises(OSError, nt.unlink, tmpfile)
nt.chmod(tmpfile, 128)
nt.unlink(tmpfile)
self.assertRaises(IOError, file, tmpfile)
finally:
try:
nt.chmod(tmpfile, 128)
nt.unlink(tmpfile)
except Exception, e:
print "exc", e
# verify that nt.stat reports times in seconds, not ticks...
import time
tmpfile = 'tmpfile.tmp'
f = open(tmpfile, 'w')
f.close()
t = time.time()
mt = nt.stat(tmpfile).st_mtime
nt.unlink(tmpfile) # this deletes the file
self.assertTrue(abs(t-mt) < 60, "time differs by too much " + str(abs(t-mt)))
tmpfile = 'tmpfile.tmp' # need to open it again since we deleted it with 'unlink'
f = open(tmpfile, 'w')
f.close()
nt.chmod('tmpfile.tmp', 256)
nt.chmod('tmpfile.tmp', 128)
nt.unlink('tmpfile.tmp')
# utime tests
def test_utime(self):
f = file('temp_file_does_not_exist.txt', 'w')
f.close()
import nt
x = nt.stat('.')
nt.utime('temp_file_does_not_exist.txt', (x[7], x[8]))
y = nt.stat('temp_file_does_not_exist.txt')
self.assertEqual(x[7], y[7])
self.assertEqual(x[8], y[8])
nt.unlink('temp_file_does_not_exist.txt')
def test_tempnam_broken_prefixes(self):
for prefix in ["pre", None]:
self.assertEqual(type(nt.tempnam("", prefix)), str)
def test_tempnam(self):
#sanity checks
self.assertEqual(type(nt.tempnam()), str)
self.assertEqual(type(nt.tempnam("garbage name should still work")), str)
#Very basic case
joe = nt.tempnam()
last_dir = joe.rfind("\\")
temp_dir = joe[:last_dir+1]
self.assertTrue(os.path.exists(temp_dir))
self.assertTrue(not os.path.exists(joe))
#Basic case where we give it an existing directory and ensure
#it uses that directory
joe = nt.tempnam(self.temp_dir)
last_dir = joe.rfind("\\")
temp_dir = joe[:last_dir+1]
self.assertTrue(os.path.exists(temp_dir))
self.assertTrue(not os.path.exists(joe))
# The next line is not guaranteed to be true in some scenarios.
#self.assertEqual(nt.stat(temp_dir.strip("\\")), nt.stat(get_temp_dir()))
#few random prefixes
prefix_names = ["", "a", "1", "_", ".", "sillyprefix",
" ",
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
]
#test a few directory names that shouldn't really work
dir_names = ["b", "2", "_", ".", "anotherprefix",
"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
None]
for dir_name in dir_names:
#just try the directory name on it's own
joe = nt.tempnam(dir_name)
last_dir = joe.rfind("\\")
temp_dir = joe[:last_dir+1]
self.assertTrue(os.path.exists(temp_dir))
self.assertTrue(not os.path.exists(joe))
self.assertTrue(temp_dir != dir_name)
#now try every prefix
for prefix_name in prefix_names:
joe = nt.tempnam(dir_name, prefix_name)
last_dir = joe.rfind("\\")
temp_dir = joe[:last_dir+1]
file_name = joe[last_dir+1:]
self.assertTrue(os.path.exists(temp_dir))
self.assertTrue(not os.path.exists(joe))
self.assertTrue(temp_dir != dir_name)
self.assertTrue(file_name.startswith(prefix_name))
@unittest.skipIf(is_cli, 'CodePlex 24299')
def test_tempnam_warning(self):
with stderr_trapper() as trapper:
temp = nt.tempnam()
self.assertTrue(trapper.messages[0].endswith("RuntimeWarning: tempnam is a potential security risk to your program"), trapper.messages)
# BUG 8777,Should IronPython throw a warning when tmpnam is called ?
# tmpnam test
def test_tmpnam(self):
str = nt.tmpnam()
self.assertEqual(isinstance(str,type("string")),True)
if is_cli:
self.assertTrue(str.find(':')!=-1,
"1,the returned path is invalid")
self.assertTrue(str.find(os.sep)!=-1,
"2,the returned path is invalid")
# times test
def test_times(self):
'''
'''
#simple sanity check
utime, stime, zero1, zero2, zero3 | |
import os
import sys
import re
import json
import math
from difflib import SequenceMatcher
import plotly.graph_objects as go
import requests
import networkx as nx
import pandas as pd
import numpy as np
import scipy
import matplotlib
import matplotlib.pyplot as plt
from ipywidgets import interactive, HBox, VBox
import ipywidgets as widgets
from IPython.display import HTML, display
import tabulate
from dotenv import dotenv_values
# load REST API creds from .env file
dcat_config = dotenv_values(".env")
def show_iris_query_ui(domain_list_ui, search_hash_ui):
lookup_ui = widgets.VBox([
widgets.Label(value="Enter a return delimited list of domains to lookup (no commas, no quotes)"),
domain_list_ui,
widgets.Label(value="Or..."),
widgets.Label(value="Enter an Iris search hassh to lookup"),
search_hash_ui,
])
return lookup_ui
def clean_domain_list(domain_list_ui):
# remove any quotes, spaces, or defanging square brackets
full_domain_list = domain_list_ui.value.strip().replace(' ', '').replace('"', '').replace("'", "").replace('[', '').replace(']', '')
# replace commas with new lines
full_domain_list = full_domain_list.replace(",", "\n")
# update the widget
domain_list_ui.value = full_domain_list
# split into array
return full_domain_list.split("\n")
def get_rest_api_creds(api_username_ui, api_pw_ui):
api_username = api_username_ui.value
if len(api_username) == 0:
api_username = dcat_config["IRIS_API_USERNAME"]
api_key = api_pw_ui.value
if len(api_key) == 0:
api_key = dcat_config["IRIS_API_KEY"]
return api_username, api_key
def query_iris_rest_api(api_username_ui, api_pw_ui, domain_list_ui, search_hash_ui):
api_username, api_key = get_rest_api_creds(api_username_ui, api_pw_ui)
if len(domain_list_ui.value) > 0:
# split list of domains into groups of 100 because of API restrictions
results = []
full_domain_list = clean_domain_list(domain_list_ui)
max_domains = 100
start = 0
end = max_domains
for x in range(math.ceil(len(full_domain_list) / max_domains)):
# slice out max domains to query
partial_domain_list = full_domain_list[start:end]
# build query string
domain_list = ",".join(partial_domain_list)
iris_query = {"api_username": api_username, "api_key": api_key, "domain": domain_list}
# query rest api
print(f"...querying Iris REST API for {len(partial_domain_list)} domains")
iris_results = _query_iris_rest_api(api_username, api_key, iris_query)
# build up the set of return domain objects
results = results + iris_results["response"]["results"]
# update slice indexes
start = end
end += max_domains
return results
elif len(search_hash_ui.value) > 0:
iris_query = {"api_username": api_username, "api_key": api_key, "search_hash": search_hash_ui.value}
iris_results = _query_iris_rest_api(api_username, api_key, iris_query)
iris_results = iris_results["response"]["results"]
return iris_results
else:
print("Domain List and Search Hash text boxes are empty. Please enter either a list of domains or search hash to lookup")
raise Exception("Domain List and Search Hash text boxes are empty")
def _query_iris_rest_api(api_username: str, api_key: str, iris_query: str):
root_api_url = "https://api.domaintools.com/v1/iris-investigate/"
resp = requests.post(root_api_url, data=iris_query)
if resp.status_code != 200:
raise Exception(f'POST /iris-investigate/ {resp.status_code}: {resp.text}')
iris_results = resp.json()
return iris_results
def remove_domains_from_graph(graph, remove_domains_ui):
domains = clean_domain_list(remove_domains_ui)
for domain in domains:
if graph.has_node(domain):
graph.remove_node(domain)
return graph
class Config(object):
""" Little helper class to hold all the config values"""
class Domain(object):
""" Little helper class to hold the domain name and risk score
"""
def __init__(self, domain_json):
self.json = domain_json
self.name = domain_json["domain"]
self.risk_score = domain_json["domain_risk"]['risk_score']
self.pivot_categories = {}
self.label=f"{self.name} ({self.risk_score})"
def __str__(self):
return f"name: {self.name}, risk: {self.risk_score}"
def __repr__(self):
return str(self)
class DomainRelationship(object):
def __init__(self, weight: float, category: str):
# this is the maximum weight that an edge can have.
# Adjust this if you want to play around with stronger edge weights
self.max_weight = 5.0
self.weight = weight
self.categories = [category]
def __str__(self):
return f"weight: {self.weight}, categories: {self.categories}"
def __repr__(self):
return str(self)
def add(self, weight: float, category: str):
""" Note: certain pivot categories can be added more than once for 2 domains;
things like IP and name server. For example, two domains could be on the same set of 5
IP addreese. For now the weights are just summed if there are more than one pivots of
the same category, but maybe we need a different strategy. Since IPs have multiple pivots
(ip address, country code, asn, isp) this means if there were 5 shared IPs between two
domains, the weight would be: 4 * 5 * pivot_weight.
This might over amplify the edge strength
"""
if category not in self.categories:
# this helps by not overly boosting the edge weight if two domains share
# multipel IP addresses
self.weight += weight
if self.weight > self.max_weight:
self.weight = self.max_weight
self.categories.append(category)
def get_description(self):
return "<br>".join(sorted(self.categories))
class PivotValue(object):
def __init__(self, pivot_value, pivot_count):
self.pivot_value = pivot_value
self.pivot_count = pivot_count
self.domains = set()
def union(self, other: "PivotValue"):
self.domains.union(other.domains)
def __str__(self):
return f"pivot_value: {self.pivot_value}, " \
f"pivot_count: {self.pivot_count}, " \
f"domains: {self.domains}"
def __repr__(self):
return str(self)
def get_edge_count(n: int):
# for a complete graph, the edge count is: n(n-1)/2
return n * (n - 1) / 2
def build_domain_pivot_graph(iris_results: list, config: "Config"):
""" Main workflow function that takes the results from an Iris Investigate query and
builds the graph object of how each of the domains in the query are connected to each other"""
# parse the Iris API Result to build the pivot data structure
graph, pivot_categories = init_local_pivot_graph(iris_results, config)
# normalize registrar pivots (see note in function comments)
#if "registrar" in pivot_categories and config.normalize_registrars:
# normalize_similar_registrars(pivot_categories["registrar"])
# create pivots for longest common substrings
pivot_on_matching_substrings(graph, pivot_categories, config)
# trim pivots from graph that have less than the set count threshold or contain all domains
trim_pivots(pivot_categories, len(graph.nodes), config)
# trim unconnected domains and domains with only a create date pivot
trimmed_unconnected_domains = trim_unconnected_domains(graph, pivot_categories, config)
trimmed_create_date_domains = trim_domains_with_only_create_date_pivot(graph, pivot_categories)
print(f"{len(trimmed_unconnected_domains)} "
f"domains trimmed because they were not connected to other domains")
print(f"{len(trimmed_create_date_domains)} "
f"domains trimmed because create_date was the only pivit")
print(f"{len(graph.nodes)} domains in pivot structure \n")
# build the graph structure based on the domain pivots
graph = build_domain_graph(graph, pivot_categories, config)
return (graph,
pivot_categories,
{"unconnected": trimmed_unconnected_domains,
"create_date": trimmed_create_date_domains})
def init_local_pivot_graph(iris_results: list, config: "Config"):
""" Collect pivot categories found in result set ("ssl_hash" for example)"""
# init empty graph
graph = nx.Graph()
# init pivot categories dict
pivot_categories = {}
for domain_json in iris_results:
# check if domain is active or not
if domain_json['active'] == False and config.active_domains_only:
continue
# create a domain object
domain = Domain(domain_json)
# add domain node to graph
graph.add_node(domain.name, domain=domain)
append_value_with_count(pivot_categories, 'adsense', domain_json, domain, config)
append_value_with_count(pivot_categories, 'google_analytics', domain_json, domain, config)
append_value_with_count(pivot_categories, 'create_date', domain_json, domain, config)
append_value_with_count(pivot_categories, 'redirect_domain', domain_json, domain, config)
append_value_with_count(pivot_categories, 'registrar', domain_json, domain, config)
# haven't seen "ssl_email" in the wild yet, so not sure if it is a value/count or just value
append_values_with_counts(pivot_categories, 'ssl_email', domain_json, domain, config)
# IPs are composite objects, so pull out each value for each IP
for ip_json in domain_json["ip"]:
# at some point add logic to add /24 in here
append_value_with_count(pivot_categories, 'address', ip_json, domain, config, 'ip_address')
append_value_with_count(pivot_categories, 'country_code', ip_json, domain, config, 'ip_country_code')
append_value_with_count(pivot_categories, 'isp', ip_json, domain, config, 'ip_isp')
append_values_with_counts(pivot_categories, 'asn', ip_json, domain, config, 'ip_asn')
# name servers are composite objects, so pull out each value for each name server
for ns_json in domain_json["name_server"]:
append_value_with_count(pivot_categories, 'host', ns_json, domain, config, 'ns_host')
append_value_with_count(pivot_categories, 'domain', ns_json, domain, config, 'ns_domain')
append_values_with_counts(pivot_categories, 'ip', ns_json, domain, config, 'ns_ip')
append_value(pivot_categories, 'tld', domain_json, domain, config)
# ssl certs are composite objects, so pull out each value for each ssl cert
for ssl_json in domain_json['ssl_info']:
append_value_with_count(pivot_categories, 'hash', ssl_json, domain, config, "ssl_hash")
append_value_with_count(pivot_categories, 'subject', ssl_json, domain, config, "ssl_subject")
append_value_with_count(pivot_categories, 'organization', ssl_json, domain, config, "ssl_org")
# mx servers are composite objects, so pull out each value for each mx server
for mx_json in domain_json['mx']:
append_value_with_count(pivot_categories, 'host', mx_json, domain, config, "mx_host")
append_value_with_count(pivot_categories, 'domain', mx_json, domain, config, "mx_domain")
append_values_with_counts(pivot_categories, 'ip', mx_json, domain, config, "mx_ip")
# mx priority might be interesting at some point to node stringth
return graph, pivot_categories
def append_value(pivot_categories: dict,
pivot_category: str,
json_data: dict,
domain: "Domain",
config: "Config",
new_pivot_category: str = None):
# check if pivot is in domain json
if pivot_category in json_data:
pivot_value = str(json_data[pivot_category]).strip()
# check we have a value to add
if len(pivot_value) > 0:
_append_value_to_pivot(pivot_categories, pivot_category, pivot_value, None,
domain, config, new_pivot_category)
def append_value_with_count(pivot_categories: dict,
pivot_category: str,
json_data: dict,
domain: "Domain",
config: "Config",
new_pivot_category: str = None):
# check if pivot is in domain json
if pivot_category in json_data:
if isinstance(json_data[pivot_category], dict):
pivot_value = str(json_data[pivot_category]["value"]).strip()
global_pivot_count = json_data[pivot_category]["count"]
# trim pivots that are above the threshold (except create_date)
if global_pivot_count < config.global_count_threshold or pivot_category == "create_date":
# check we have a value to add
if len(pivot_value) > 0 and global_pivot_count > 0:
_append_value_to_pivot(pivot_categories, pivot_category, pivot_value,
global_pivot_count, domain, config, new_pivot_category)
def append_values_with_counts(pivot_categories: dict,
| |
<gh_stars>0
#!/usr/bin/env python
import csv
import json
import operator
import os
import re
from argparse import ArgumentParser
from collections import defaultdict
import atexit
import pandas as pd
import psycopg2
import psycopg2.extras
import requests
from ebi_eva_common_pyutils.config_utils import get_pg_metadata_uri_for_eva_profile
from ebi_eva_common_pyutils.logger import logging_config
from ebi_eva_common_pyutils.pg_utils import execute_query, get_all_results_for_query
logger = logging_config.get_logger(__name__)
logging_config.add_stdout_handler()
eutils_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/'
esearch_url = eutils_url + 'esearch.fcgi'
esummary_url = eutils_url + 'esummary.fcgi'
efetch_url = eutils_url + 'efetch.fcgi'
ensembl_url = 'http://rest.ensembl.org/info/assembly'
cache_file = 'cache.json'
def load_cache():
global cache
if os.path.exists(cache_file):
with open(cache_file) as open_file:
cache.update(json.load(open_file))
def save_cache():
with open(cache_file, 'w') as open_file:
json.dump(cache, open_file)
atexit.register(save_cache)
cache = defaultdict(dict)
load_cache()
def retrieve_assembly_summary_from_species_name(species):
"""Search for all ids of assemblies associated with a species by depaginating the results of the search query"""
payload = {'db': 'Assembly', 'term': '"{}[ORGN]"'.format(species), 'retmode': 'JSON', 'retmax': 100}
response = requests.get(esearch_url, params=payload)
data = response.json()
search_results = data.get('esearchresult', {})
id_list = search_results.get('idlist', [])
while int(search_results.get('retstart')) + int(search_results.get('retmax')) < int(search_results.get('count')):
payload['retstart'] = int(search_results.get('retstart')) + int(search_results.get('retmax'))
response = requests.get(esearch_url, params=payload)
data = response.json()
search_results = data.get('esearchresult', {})
id_list += search_results.get('idlist', [])
response = requests.get(esummary_url, params={'db': 'Assembly', 'id': ','.join(id_list), 'retmode': 'JSON'})
summary_list = response.json()
if summary_list and 'result' in summary_list:
return [summary_list.get('result').get(uid) for uid in summary_list.get('result').get('uids')]
def most_recent_assembly(assembly_list):
"""Based on assembly summaries find the one submitted the most recently"""
if assembly_list:
return sorted(assembly_list, key=operator.itemgetter('submissiondate'))[-1]
def best_assembly(assembly_list):
"""Based on assembly summaries find the one with the highest scaffold N50"""
if assembly_list:
return sorted(assembly_list, key=operator.itemgetter('scaffoldn50'))[-1]
def retrieve_species_names_from_tax_id(taxid):
"""Search for a species scientific name based on the taxonomy id"""
if str(taxid) not in cache['taxid_to_name']:
logger.info(f'Query NCBI for taxonomy {taxid}', )
payload = {'db': 'Taxonomy', 'id': taxid}
r = requests.get(efetch_url, params=payload)
match = re.search('<Rank>(.+?)</Rank>', r.text, re.MULTILINE)
rank = None
if match:
rank = match.group(1)
if rank not in ['species', 'subspecies']:
logger.warning('Taxonomy id %s does not point to a species', taxid)
match = re.search('<ScientificName>(.+?)</ScientificName>', r.text, re.MULTILINE)
if match:
cache['taxid_to_name'][str(taxid)] = match.group(1)
else:
logger.warning('No species found for %s' % taxid)
cache['taxid_to_name'][str(taxid)] = None
return taxid, cache['taxid_to_name'].get(str(taxid))
def retrieve_species_name_from_assembly_accession(assembly_accession):
"""Search for a species scientific name based on an assembly accession"""
if assembly_accession not in cache['assembly_to_species']:
logger.info(f'Query NCBI for assembly {assembly_accession}', )
payload = {'db': 'Assembly', 'term': '"{}"'.format(assembly_accession), 'retmode': 'JSON'}
data = requests.get(esearch_url, params=payload).json()
if data:
assembly_id_list = data.get('esearchresult').get('idlist')
payload = {'db': 'Assembly', 'id': ','.join(assembly_id_list), 'retmode': 'JSON'}
summary_list = requests.get(esummary_url, params=payload).json()
all_species_names = set()
for assembly_id in summary_list.get('result', {}).get('uids', []):
assembly_info = summary_list.get('result').get(assembly_id)
all_species_names.add((assembly_info.get('speciestaxid'), assembly_info.get('speciesname')))
if len(all_species_names) == 1:
cache['assembly_to_species'][assembly_accession] = all_species_names.pop()
else:
logger.warning('%s taxons found for assembly %s ' % (len(all_species_names), assembly_accession))
return cache['assembly_to_species'].get(assembly_accession) or (None, None)
def retrieve_current_ensembl_assemblies(taxid_or_assembly):
"""
Retrieve the assembly accession currently supported by ensembl for the provided taxid or assembly accession
In both case it looks up the associated species name in NCBI and using the species name returns the currently
supported assembly for this species.
"""
logger.debug('Search for species name for %s', taxid_or_assembly)
scientific_name = None
if taxid_or_assembly and str(taxid_or_assembly).isdigit():
# assume it is a taid
taxid, scientific_name = retrieve_species_names_from_tax_id(taxid_or_assembly)
elif taxid_or_assembly:
# assume it is an assembly accession
taxid, scientific_name = retrieve_species_name_from_assembly_accession(taxid_or_assembly)
if scientific_name:
logger.debug('Found %s', scientific_name)
if scientific_name not in cache['scientific_name_to_ensembl']:
logger.info(f'Query Ensembl for species {scientific_name}', )
url = ensembl_url + '/' + scientific_name.lower().replace(' ', '_')
response = requests.get(url, params={'content-type': 'application/json'})
data = response.json()
assembly_accession = str(data.get('assembly_accession'))
cache['scientific_name_to_ensembl'][scientific_name] = assembly_accession
return [str(taxid), str(scientific_name), cache['scientific_name_to_ensembl'].get(scientific_name)]
return ['NA', 'NA', 'NA']
def find_all_eva_studies(accession_counts, private_config_xml_file):
metadata_uri = get_pg_metadata_uri_for_eva_profile("development", private_config_xml_file)
with psycopg2.connect(metadata_uri, user="evadev") as pg_conn:
query = (
'SELECT DISTINCT a.vcf_reference_accession, pt.taxonomy_id, p.project_accession '
'FROM project p '
'LEFT OUTER JOIN project_analysis pa ON p.project_accession=pa.project_accession '
'LEFT OUTER JOIN analysis a ON pa.analysis_accession=a.analysis_accession '
'LEFT OUTER JOIN project_taxonomy pt ON p.project_accession=pt.project_accession '
'WHERE p.ena_status=4 ' # Ensure that the project is public
'ORDER BY pt.taxonomy_id, a.vcf_reference_accession'
)
data = []
for assembly, tax_id, study in filter_studies(get_all_results_for_query(pg_conn, query)):
taxid_from_ensembl, scientific_name, ensembl_assembly_from_taxid = retrieve_current_ensembl_assemblies(tax_id)
_, _, ensembl_assembly_from_assembly = retrieve_current_ensembl_assemblies(assembly)
count_ssid = 0
if study in accession_counts:
assembly_from_mongo, taxid_from_mongo, project_accession, count_ssid = accession_counts.pop(study)
if assembly_from_mongo != assembly:
logger.error(
'For study %s, assembly from accessioning (%s) is different'
' from assembly from metadata (%s) database.', study, assembly_from_mongo, assembly
)
if taxid_from_mongo != tax_id:
logger.error(
'For study %s, taxonomy from accessioning (%s) is different'
' from taxonomy from metadata (%s) database.', study, taxid_from_mongo, tax_id
)
data.append({
'Source': 'EVA',
'Assembly': assembly,
'Taxid': tax_id,
'Scientific Name': scientific_name,
'Study': study,
'Number Of Variants (submitted variants)': count_ssid or 0,
'Ensembl assembly from taxid': ensembl_assembly_from_taxid,
'Ensembl assembly from assembly': ensembl_assembly_from_assembly,
'Target Assembly': ensembl_assembly_from_taxid or ensembl_assembly_from_assembly
})
if len(accession_counts) > 0:
logger.error('Accessioning database has studies (%s) absent from the metadata database', ', '.join(accession_counts))
df = pd.DataFrame(data)
df = df.groupby(
['Source', 'Assembly', 'Taxid', 'Scientific Name', 'Ensembl assembly from taxid',
'Ensembl assembly from assembly', 'Target Assembly']
).agg(
{'Study': 'count', 'Number Of Variants (submitted variants)': 'sum'}
)
df.rename(columns={'Study': 'number Of Studies'}, inplace=True)
return df.reset_index()
def parse_accession_counts(accession_counts_file):
accession_count = {}
with open(accession_counts_file) as open_file:
for line in open_file:
sp_line = line.strip().split()
accession_count[sp_line[0]] = int(sp_line[1])
return accession_count
def get_accession_counts_per_study(private_config_xml_file, source):
accession_count = {}
with psycopg2.connect(get_pg_metadata_uri_for_eva_profile("development", private_config_xml_file),
user="evadev") as pg_conn:
query = (
'SELECT assembly_accession, taxid, project_accession, SUM(number_submitted_variants) '
'FROM eva_stats.submitted_variants_load_counts '
"WHERE source='%s'"
'GROUP BY assembly_accession, taxid, project_accession ' % source
)
for assembly_accession, taxid, project_accession, count_ssid in get_all_results_for_query(pg_conn, query):
accession_count[project_accession] = (assembly_accession, taxid, project_accession, count_ssid)
return accession_count
def get_accession_counts_per_assembly(private_config_xml_file, source):
accession_count = {}
with psycopg2.connect(get_pg_metadata_uri_for_eva_profile("development", private_config_xml_file),
user="evadev") as pg_conn:
query = (
'SELECT assembly_accession, taxid, SUM(number_submitted_variants) '
'FROM eva_stats.submitted_variants_load_counts '
"WHERE source='%s'"
'GROUP BY assembly_accession, taxid ' % source
)
for assembly_accession, taxid, count_ssid in get_all_results_for_query(pg_conn, query):
accession_count[assembly_accession] = (assembly_accession, taxid, count_ssid)
return accession_count
def filter_studies(query_results):
"""
Remove studies from the EVA list that are either missing information (assembly or taxid)
or that are human and therefore cannot be released
"""
for assembly, tax_id, study in query_results:
if not assembly or not tax_id:
logger.error('Study %s is missing assembly (%s) or taxonomy id (%s)', study, assembly, tax_id)
elif tax_id == 9606:
logger.debug("Study %s is human and won't be released", study)
else:
yield assembly, tax_id, study
def parse_dbsnp_csv(input_file, accession_counts):
"""Parse the CSV file generated in the past year to get the DBSNP data"""
df = pd.read_csv(input_file)
df = df[df.Source != 'EVA']
taxids = []
scientific_names = []
ensembl_assemblies_from_taxid = []
ensembl_assemblies_from_assembly = []
target_assemblies = []
for index, record in df.iterrows():
taxid, scientific_name, ensembl_assembly_from_taxid = retrieve_current_ensembl_assemblies(record['Taxid'])
_, _, ensembl_assembly_from_assembly = retrieve_current_ensembl_assemblies(record['Assembly'])
taxids.append(taxid)
scientific_names.append(scientific_name)
ensembl_assemblies_from_taxid.append(ensembl_assembly_from_taxid)
ensembl_assemblies_from_assembly.append(ensembl_assembly_from_assembly)
target_assemblies.append(ensembl_assembly_from_taxid or ensembl_assembly_from_assembly)
if record['Assembly'] != 'Unmapped':
_, _, count = accession_counts[record['Assembly']]
if count != int(record['Number Of Variants (submitted variants)'].replace(',', '')):
logger.error(
'Count in spreadsheet (%s) and in database (%s) are different for accession %s',
record['Number Of Variants (submitted variants)'], count, record['Assembly']
)
df['Scientific Name'] = scientific_names
df['Ensembl assembly from taxid'] = ensembl_assemblies_from_taxid
df['Ensembl assembly from assembly'] = ensembl_assemblies_from_assembly
df['Target Assembly'] = target_assemblies
df.replace(',', '', inplace=True)
return df
def create_table_for_progress(private_config_xml_file):
with psycopg2.connect(get_pg_metadata_uri_for_eva_profile("development", private_config_xml_file),
user="evadev") as metadata_connection_handle:
query_create_table = (
'CREATE TABLE IF NOT EXISTS remapping_progress '
'(source TEXT, taxid INTEGER, scientific_name TEXT, assembly_accession TEXT, number_of_study INTEGER NOT NULL,'
'number_submitted_variants BIGINT NOT NULL, release_number INTEGER, `target_assembly_accession` TEXT, '
'report_time TIMESTAMP DEFAULT NOW(), progress_status TEXT, start_time TIMESTAMP, '
'completion_time TIMESTAMP, remapping_version TEXT, nb_variant_extracted INTEGER, '
'nb_variant_remapped INTEGER, nb_variant_ingested INTEGER, '
'primary key(source, taxid, assembly_accession, release_number))'
)
execute_query(metadata_connection_handle, query_create_table)
def insert_remapping_progress_to_db(private_config_xml_file, dataframe):
list_to_remap = dataframe.values.tolist()
if len(list_to_remap) > 0:
with psycopg2.connect(get_pg_metadata_uri_for_eva_profile("development", private_config_xml_file),
user="evadev") as metadata_connection_handle:
with metadata_connection_handle.cursor() as cursor:
query_insert = (
'INSERT INTO remapping_progress '
'(source, taxid, scientific_name, assembly_accession, number_of_study, '
'number_submitted_variants, target_assembly_accession, release_number) '
'VALUES %s'
)
psycopg2.extras.execute_values(cursor, query_insert, list_to_remap)
def main():
argparse = ArgumentParser(
description='Gather the current set of studies from both EVA and dbSNP that can be remapped, clustered '
'and released. The source of EVA studies is the metadata database and the source of dbSNP studies '
"is last year's spreadsheet. The number of variants are populated from counts retrieved from "
' eva_stats.')
argparse.add_argument('--input', help='Path to the file containing the taxonomies and assemblies', required=True)
argparse.add_argument('--output', help='Path to the file that will contain the input plus annotation',
required=True)
argparse.add_argument('--private_config_xml_file', required=True,
help='Path to the file containing the username/passwords tp access '
'production and development databases')
args = argparse.parse_args()
output_header = ['Source', 'Taxid', 'Scientific Name', 'Assembly', 'number Of Studies',
'Number Of Variants (submitted variants)', 'Ensembl assembly from taxid',
'Ensembl assembly from assembly', 'Target | |
import numpy as np
import torch
from torch.autograd import Variable
from tqdm import tqdm
import utils
from Classifiers.Fashion_Classifier import Fashion_Classifier
from Classifiers.Mnist_Classifier import Mnist_Classifier
from Classifiers.Cifar_Classifier import Cifar_Classifier
from Data.load_dataset import load_dataset_full, load_dataset_test, get_iter_dataset
from log_utils import *
from Data.data_loader import DataLoader
from Evaluation.tools import calculate_frechet_distance
mpl.use('Agg')
class Reviewer(object):
def __init__(self, args):
# parameters
self.args = args
self.epoch_Review = args.epoch_Review
self.sample_num = 64
self.batch_size = args.batch_size
self.save_dir = args.save_dir
self.result_dir = args.result_dir
self.sample_dir = args.sample_dir
self.dataset = args.dataset
self.log_dir = args.log_dir
self.gpu_mode = args.gpu_mode
self.model_name = args.gan_type
self.data_dir = args.data_dir
self.gen_dir = args.gen_dir
self.verbose = args.verbose
self.lr = args.lrC
self.momentum = args.momentum
self.log_interval = 100
self.sample_num = 100
self.size_epoch = args.size_epoch
self.gan_type = args.gan_type
self.conditional = args.conditional
self.device = args.device
self.trainEval = args.trainEval
self.num_task = args.num_task
self.task_type = args.task_type
self.context = args.context
self.seed = args.seed
if self.conditional:
self.model_name = 'C' + self.model_name
# Load the generator parameters
# The reviewer evaluate generate dataset (loader train) on true data (loader test)
# not sur yet if valid should be real or not (it was before)
dataset_train, dataset_valid, list_class_train, list_class_valid = load_dataset_full(self.data_dir,
args.dataset)
dataset_test, list_class_test = load_dataset_test(self.data_dir, args.dataset, args.batch_size)
# create data loader for validation and testing
self.valid_loader = get_iter_dataset(dataset_valid)
self.test_loader = get_iter_dataset(dataset_test)
if self.dataset == 'mnist':
self.input_size = 1
self.size = 28
elif self.dataset == 'fashion':
self.input_size = 1
self.size = 28
elif self.dataset == 'cifar10':
self.input_size = 3
self.size = 32
if self.dataset == 'mnist':
self.Classifier = Mnist_Classifier(args)
elif self.dataset == 'fashion':
self.Classifier = Fashion_Classifier(args)
elif self.dataset == 'cifar10':
self.Classifier = Cifar_Classifier(args)
else:
print('Not implemented')
# this should be train on task
def train_classifier(self, epoch, data_loader_train, ind_task):
self.Classifier.net.train()
train_loss_classif, train_accuracy = self.Classifier.train_on_task(data_loader_train, ind_task=ind_task,
epoch=epoch,
additional_loss=None)
val_loss_classif, valid_accuracy, classe_prediction, classe_total, classe_wrong = self.Classifier.eval_on_task(
self.valid_loader, self.verbose)
if self.verbose:
print(
'Epoch: {} Train set: Average loss: {:.4f}, Accuracy: ({:.2f}%)\n Valid set: Average loss: {:.4f}, Accuracy: ({:.2f}%)'.format(
epoch, train_loss_classif, train_accuracy, val_loss_classif, valid_accuracy))
return train_loss_classif, train_accuracy, val_loss_classif, valid_accuracy, (
100. * classe_prediction) / classe_total
def compute_all_tasks_FID(self, args, Best=False):
if Best:
id = "Best_"
else:
id = ''
list_FID = []
for ind_task in range(self.num_task):
list_FID.append(self.compute_FID(args, ind_task, Best))
assert len(list_FID) == self.num_task
list_FID = np.array(list_FID)
np.savetxt(os.path.join(self.log_dir, id + 'Frechet_Inception_Distance_All_Tasks.txt'), list_FID)
def compute_FID(self, args, ind_task, Best=False):
if Best:
id = "Best_"
else:
id = ''
# load true data : upperbound_disjoint
if 'upperbound' in self.task_type:
test_file = self.task_type + '_' + str(self.num_task) + '_test.pt'
else:
test_file = 'upperbound_' + self.task_type + '_' + str(self.num_task) + '_test.pt'
true_DataLoader = DataLoader(torch.load(os.path.join(self.data_dir, 'Tasks', self.dataset, test_file)), args)[
self.num_task-1]
# load generated data
path = os.path.join(self.gen_dir, id + 'train_Task_' + str(ind_task) + '.pt')
gen_DataLoader = DataLoader(torch.load(path), args)
# compute FID
return self.Frechet_Inception_Distance(gen_DataLoader, true_DataLoader, ind_task)
def review(self, data_loader_train, task, Best=False):
if Best:
id = "Best_"
else:
id = ''
if self.dataset == 'mnist':
self.Classifier = Mnist_Classifier(self.args)
elif self.dataset == 'fashion':
self.Classifier = Fashion_Classifier(self.args)
else:
print('Not implemented')
best_accuracy = -1
train_loss = []
train_acc = []
val_loss = []
val_acc = []
valid_acc = []
valid_acc_classes = []
if self.verbose:
print("some sample from the generator")
path = os.path.join(self.sample_dir, id + 'samples4review_task_' + str(task) + '.png')
data_loader_train.visualize_sample(path, self.sample_num, [self.size, self.size, self.input_size])
print("Task : " + str(task))
early_stop = 0.
# Training classifier
for epoch in range(self.epoch_Review):
tr_loss, tr_acc, v_loss, v_acc, v_acc_classes = self.train_classifier(epoch, data_loader_train, task)
train_loss.append(tr_loss)
train_acc.append(tr_acc)
val_loss.append(v_loss)
val_acc.append(v_acc)
# Save best model
if v_acc > best_accuracy:
if self.verbose:
print("New Best Classifier")
print(v_acc)
best_accuracy = v_acc
self.save(best=True)
early_stop = 0.
if early_stop == 60:
break
else:
early_stop += 1
valid_acc.append(np.array(v_acc))
valid_acc_classes.append(np.array(v_acc_classes))
# Then load best model
self.load()
loss, test_acc, classe_prediction, classe_total, classe_wrong = self.Classifier.eval_on_task(
self.test_loader, self.verbose)
test_acc_classes = 100. * classe_prediction / classe_total
if self.verbose:
print('\nTest set: Average loss: {:.4f}, Accuracy : ({:.2f}%)'.format(
loss, test_acc))
for i in range(10):
print('Classe {} Accuracy: {}/{} ({:.3f}%, Wrong : {})'.format(
i, classe_prediction[i], classe_total[i],
100. * classe_prediction[i] / classe_total[i], classe_wrong[i]))
print('\n')
# loss, test_acc, test_acc_classes = self.test() # self.test_classifier(epoch)
np.savetxt(os.path.join(self.log_dir, id + 'data_classif_' + self.dataset + '-task' + str(task) + '.txt'),
np.transpose([train_loss, train_acc, val_loss, val_acc]))
np.savetxt(os.path.join(self.log_dir, id + 'best_score_classif_' + self.dataset + '-task' + str(task) + '.txt'),
np.transpose([test_acc]))
np.savetxt(
os.path.join(self.log_dir, id + 'data_classif_classes' + self.dataset + '-task' + str(task) + '.txt'),
np.transpose([test_acc_classes]))
return valid_acc, valid_acc_classes
def eval_on_train(self, data_loader_train, task):
if self.dataset == 'mnist':
self.Classifier = Mnist_Classifier(self.args)
elif self.dataset == 'fashion':
self.Classifier = Fashion_Classifier(self.args)
else:
print('Not implemented')
self.Classifier.load_expert()
self.Classifier.net.eval()
print("trainEval Task : " + str(task))
loss, train_acc, classe_prediction, classe_total, classe_wrong = self.Classifier.eval_on_task(data_loader_train, self.verbose)
train_acc_classes = 100. * classe_prediction / classe_total
if self.verbose:
print('\nTest set: Average loss: {:.4f}, Accuracy : ({:.2f}%)'.format(
loss, train_acc))
for i in range(10):
print('Classe {} Accuracy: {}/{} ({:.3f}%, Wrong : {})'.format(
i, classe_prediction[i], classe_total[i],
100. * classe_prediction[i] / classe_total[i], classe_wrong[i]))
print('\n')
return train_acc, train_acc_classes
def eval_balanced_on_train(self, data_loader_train):
cpt_classes = np.zeros(10)
for i, (data, target) in enumerate(data_loader_train):
for i in range(target.shape[0]):
cpt_classes[target[i]] += 1
print(cpt_classes.astype(int))
return cpt_classes.astype(int)
def review_all_tasks(self, args, Best=False):
# before launching the programme we check that all files are here to nnot lose time
for i in range(self.num_task):
if Best: # Best can be use only for Baseline
path = os.path.join(self.gen_dir, 'Best_train_Task_' + str(i) + '.pt')
else:
path = os.path.join(self.gen_dir, 'train_Task_' + str(i) + '.pt')
assert os.path.isfile(path)
for i in range(self.num_task):
if Best: # Best can be use only for Baseline
path = os.path.join(self.gen_dir, 'Best_train_Task_' + str(i) + '.pt')
else:
path = os.path.join(self.gen_dir, 'train_Task_' + str(i) + '.pt')
data_loader_train = DataLoader(torch.load(path), args)
self.review(data_loader_train, i, Best)
def review_all_trainEval(self, args, Best=False):
if Best:
id = "Best_"
else:
id = ''
list_trainEval = []
list_trainEval_classes = []
list_balance_classes = []
# before launching the programme we check that all files are here to nnot lose time
for i in range(self.num_task):
if Best: # Best can be use only for Baseline
path = os.path.join(self.gen_dir, 'Best_train_Task_' + str(i) + '.pt')
else:
path = os.path.join(self.gen_dir, 'train_Task_' + str(i) + '.pt')
assert os.path.isfile(path)
for i in range(self.num_task):
if Best: # Best can be use only for Baseline
path = os.path.join(self.gen_dir, 'Best_train_Task_' + str(i) + '.pt')
else:
path = os.path.join(self.gen_dir, 'train_Task_' + str(i) + '.pt')
data_loader_train = DataLoader(torch.load(path), args)
if self.conditional or Best:
train_acc, train_acc_classes = self.eval_on_train(data_loader_train, self.verbose)
list_trainEval.append(train_acc)
list_trainEval_classes.append(train_acc_classes)
else:
classe_balance = self.eval_balanced_on_train(data_loader_train)
list_balance_classes.append(classe_balance)
if self.conditional or Best:
assert len(list_trainEval) == self.num_task
list_trainEval = np.array(list_trainEval)
list_trainEval_classes = np.array(list_trainEval)
np.savetxt(os.path.join(self.log_dir, id + 'TrainEval_All_Tasks.txt'), list_trainEval)
np.savetxt(os.path.join(self.log_dir, id + 'TrainEval_classes_All_Tasks.txt'), list_trainEval_classes)
else:
assert len(list_balance_classes) == self.num_task
np.savetxt(os.path.join(self.log_dir, id + 'Balance_classes_All_Tasks.txt'), list_balance_classes)
# save a classifier or the best classifier
def save(self, best=False):
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
if best:
torch.save(self.Classifier.net.state_dict(),
os.path.join(self.save_dir, self.model_name + '_Classifier_Best.pkl'))
else:
torch.save(self.Classifier.net.state_dict(),
os.path.join(self.save_dir, self.model_name + '_Classifier.pkl'))
# load the best classifier or the reference classifier trained on true data only
def load(self, reference=False):
if reference:
save_dir = os.path.join(self.save_dir, "..", "..", "..", "Classifier", 'seed_' + str(self.seed))
self.Classifier.net.load_state_dict(torch.load(os.path.join(save_dir, 'Classifier_Classifier_Best.pkl')))
else:
self.Classifier.net.load_state_dict(
torch.load(os.path.join(self.save_dir, self.model_name + '_Classifier_Best.pkl')))
def load_best_baseline(self):
# best seed searched in the list define in get_best_baseline function, liste_seed = [1, 2, 3, 4, 5, 6, 7, 8]
best_seed = utils.get_best_baseline(self.log_dir, self.dataset)
save_dir = os.path.join(self.save_dir, "..", "..", "..", "Classifier", 'seed_' + str(best_seed))
self.Classifier.net.load_state_dict(torch.load(os.path.join(save_dir, 'Classifier_Classifier_Best.pkl')))
def Frechet_Inception_Distance(self, Gen_DataLoader, True_DataLoader, ind_task):
eval_size = 50
# 0. load reference classifier
# self.load_best_baseline() # we load the best classifier
self.Classifier.load_expert()
self.Classifier.net.eval()
if self.dataset == "mnist":
latent_size = 320
elif self.dataset == "fashion":
latent_size = 320
real_output_table = torch.FloatTensor(eval_size * self.batch_size, latent_size)
gen_output_table = torch.FloatTensor(eval_size * self.batch_size, latent_size)
# print("get activations on test data")
for i, (data, target) in enumerate(True_DataLoader):
if i >= eval_size or i >= (
int(len(True_DataLoader) / self.batch_size) - 1): # (we throw away the last batch)
break
if self.gpu_mode:
data, target = data.cuda(self.device), target.cuda(self.device)
batch = Variable(data)
label = Variable(target.squeeze())
activation = self.Classifier.net(batch, FID=True)
real_output_table[i * self.batch_size:(i + 1) * self.batch_size, :] = activation.data
# print("get activations on generated data")
Gen_DataLoader.shuffle_task()
for i, (data, target) in enumerate(Gen_DataLoader):
if i >= eval_size or i >= (
int(len(Gen_DataLoader) / self.batch_size) - 1): # (we throw away the last batch)
break
# 2. use the reference classifier to compute the output vector
if self.gpu_mode:
data, target = data.cuda(self.device), target.cuda(self.device)
batch = | |
<gh_stars>100-1000
import sys
from copy import deepcopy
import pytest
import numpy as np
from numpy.random import random, randint
from numpy import isclose
from flare.kernels.utils import from_mask_to_args, str_to_kernel_set
from flare.kernels.cutoffs import quadratic_cutoff_bound
from flare.parameters import Parameters
from flare.utils.parameter_helper import ParameterHelper
from .fake_gp import generate_mb_envs, generate_mb_twin_envs
list_to_test = [
["twobody"],
["threebody"],
["twobody", "threebody"],
["twobody", "threebody", "manybody"],
]
multi_cut = [False, True]
@pytest.mark.parametrize("kernels", list_to_test)
@pytest.mark.parametrize("multi_cutoff", multi_cut)
def test_force_en_multi_vs_simple(kernels, multi_cutoff):
"""Check that the analytical kernel matches the one implemented
in mc_simple.py"""
d1 = 1
d2 = 2
tol = 1e-4
cell = 1e7 * np.eye(3)
# set hyperparameters
cutoffs, hyps1, hyps2, hm1, hm2 = generate_same_hm(kernels, multi_cutoff)
delta = 1e-8
env1 = generate_mb_envs(cutoffs, cell, delta, d1)
env2 = generate_mb_envs(cutoffs, cell, delta, d2)
env1 = env1[0][0]
env2 = env2[0][0]
# mc_simple
kernel0, kg0, en_kernel0, force_en_kernel0, _, _, _ = str_to_kernel_set(
kernels, "mc", None
)
args0 = from_mask_to_args(hyps1, cutoffs)
# mc_sephyps
# args1 and args 2 use 1 and 2 groups of hyper-parameters
# if (diff_cutoff), 1 or 2 group of cutoffs
# but same value as in args0
kernel, kg, en_kernel, force_en_kernel, _, _, _ = str_to_kernel_set(
kernels, "mc", hm2
)
args1 = from_mask_to_args(hyps1, cutoffs, hm1)
args2 = from_mask_to_args(hyps2, cutoffs, hm2)
funcs = [
[kernel0, kg0, en_kernel0, force_en_kernel0],
[kernel, kg, en_kernel, force_en_kernel],
]
# compare whether mc_sephyps and mc_simple
# yield the same values
i = 2
reference = funcs[0][i](env1, env2, *args0)
result = funcs[1][i](env1, env2, *args1)
print(kernels, i, reference, result)
assert isclose(reference, result, rtol=tol)
result = funcs[1][i](env1, env2, *args2)
print(kernels, i, reference, result)
assert isclose(reference, result, rtol=tol)
i = 3
reference = funcs[0][i](env1, env2, d1, *args0)
result = funcs[1][i](env1, env2, d1, *args1)
print(kernels, i, reference, result)
assert isclose(reference, result, rtol=tol)
result = funcs[1][i](env1, env2, d1, *args2)
print(kernels, i, reference, result)
assert isclose(reference, result, rtol=tol)
i = 0
reference = funcs[0][i](env1, env2, d1, d2, *args0)
result = funcs[1][i](env1, env2, d1, d2, *args1)
assert isclose(reference, result, rtol=tol)
print(kernels, i, reference, result)
result = funcs[1][i](env1, env2, d1, d2, *args2)
assert isclose(reference, result, rtol=tol)
print(kernels, i, reference, result)
i = 1
reference = funcs[0][i](env1, env2, d1, d2, *args0)
result = funcs[1][i](env1, env2, d1, d2, *args1)
print(kernels, i, reference, result)
assert isclose(reference[0], result[0], rtol=tol)
assert isclose(reference[1], result[1], rtol=tol).all()
result = funcs[1][i](env1, env2, d1, d2, *args2)
print(kernels, i, reference, result)
assert isclose(reference[0], result[0], rtol=tol)
joint_grad = np.zeros(len(result[1]) // 2)
for i in range(joint_grad.shape[0]):
joint_grad[i] = result[1][i * 2] + result[1][i * 2 + 1]
assert isclose(reference[1], joint_grad, rtol=tol).all()
@pytest.mark.parametrize("kernels", list_to_test)
@pytest.mark.parametrize("diff_cutoff", multi_cut)
def test_check_sig_scale(kernels, diff_cutoff):
"""Check whether the grouping is properly assign
with four environments
* env1 and env2 are computed from two structures with four
atoms each. There are two species 1, 2
* env1_t and env2_t are derived from the same structure, but
species 2 atoms are removed.
* only the sigma of 1-1 are non-zero
* so using env1 and env1_t should produce the same value
* if the separate group of hyperparameter is properly
applied, the result should be 2**2 times of
the reference
"""
d1 = 1
d2 = 2
tol = 1e-4
scale = 2
cutoffs, hyps0, hm = generate_diff_hm(kernels, diff_cutoff)
delta = 1e-8
env1, env1_t = generate_mb_twin_envs(cutoffs, np.eye(3) * 100, delta, d1, hm)
env2, env2_t = generate_mb_twin_envs(cutoffs, np.eye(3) * 100, delta, d2, hm)
env1 = env1[0][0]
env2 = env2[0][0]
env1_t = env1_t[0][0]
env2_t = env2_t[0][0]
# make the second sigma zero
hyps1 = np.copy(hyps0)
hyps0[0::4] = 0 # 1e-8
hyps1[0::4] = 0 # 1e-8
hyps1[1::4] *= scale
kernel, kg, en_kernel, force_en_kernel, _, _, _ = str_to_kernel_set(
kernels, "mc", hm
)
args0 = from_mask_to_args(hyps0, cutoffs, hm)
args1 = from_mask_to_args(hyps1, cutoffs, hm)
reference = en_kernel(env1, env2, *args0)
result = en_kernel(env1_t, env2_t, *args1)
print(en_kernel.__name__, result, reference)
if reference != 0:
assert isclose(result / reference, scale ** 2, rtol=tol)
reference = force_en_kernel(env1, env2, d1, *args0)
result = force_en_kernel(env1_t, env2_t, d1, *args1)
print(force_en_kernel.__name__, result, reference)
if reference != 0:
assert isclose(result / reference, scale ** 2, rtol=tol)
reference = kernel(env1, env2, d1, d2, *args0)
result = kernel(env1_t, env2_t, d1, d2, *args1)
print(kernel.__name__, result, reference)
if reference != 0:
assert isclose(result / reference, scale ** 2, rtol=tol)
reference = kg(env1, env2, d1, d2, *args0)
result = kg(env1_t, env2_t, d1, d2, *args1)
print(kg.__name__, result, reference)
if reference[0] != 0:
assert isclose(result[0] / reference[0], scale ** 2, rtol=tol)
for idx in range(reference[1].shape[0]):
# check sig0
if reference[1][idx] != 0 and (idx % 4) == 0:
assert isclose(result[1][idx] / reference[1][idx], scale, rtol=tol)
# check the rest, but skip sig 1
elif reference[1][idx] != 0 and (idx % 4) != 1:
assert isclose(result[1][idx] / reference[1][idx], scale ** 2, rtol=tol)
@pytest.mark.parametrize("kernels", list_to_test)
@pytest.mark.parametrize("diff_cutoff", multi_cut)
def test_force_bound_cutoff_compare(kernels, diff_cutoff):
"""Check that the analytical kernel matches the one implemented
in mc_simple.py"""
d1 = 1
d2 = 2
tol = 1e-4
cell = 1e7 * np.eye(3)
delta = 1e-8
cutoffs, hyps, hm = generate_diff_hm(kernels, diff_cutoff)
kernel, kg, en_kernel, force_en_kernel, _, _, _ = str_to_kernel_set(
kernels, "mc", hm
)
args = from_mask_to_args(hyps, cutoffs, hm)
np.random.seed(10)
env1 = generate_mb_envs(cutoffs, cell, delta, d1, hm)
env2 = generate_mb_envs(cutoffs, cell, delta, d2, hm)
env1 = env1[0][0]
env2 = env2[0][0]
reference = kernel(env1, env2, d1, d2, *args, quadratic_cutoff_bound)
result = kernel(env1, env2, d1, d2, *args)
assert isclose(reference, result, rtol=tol)
reference = kg(env1, env2, d1, d2, *args, quadratic_cutoff_bound)
result = kg(env1, env2, d1, d2, *args)
assert isclose(reference[0], result[0], rtol=tol)
assert isclose(reference[1], result[1], rtol=tol).all()
reference = en_kernel(env1, env2, *args, quadratic_cutoff_bound)
result = en_kernel(env1, env2, *args)
assert isclose(reference, result, rtol=tol)
reference = force_en_kernel(env1, env2, d1, *args, quadratic_cutoff_bound)
result = force_en_kernel(env1, env2, d1, *args)
assert isclose(reference, result, rtol=tol)
@pytest.mark.parametrize("kernels", [["twobody", "threebody"]])
@pytest.mark.parametrize("diff_cutoff", multi_cut)
def test_constraint(kernels, diff_cutoff):
"""Check that the analytical force/en kernel matches finite difference of
energy kernel."""
if "manybody" in kernels:
return
d1 = 1
d2 = 2
cell = 1e7 * np.eye(3)
delta = 1e-8
cutoffs, hyps, hm = generate_diff_hm(
kernels, diff_cutoff=diff_cutoff, constraint=True
)
_, __, en_kernel, force_en_kernel, _, _, _ = str_to_kernel_set(kernels, "mc", hm)
args0 = from_mask_to_args(hyps, cutoffs, hm)
np.random.seed(10)
env1 = generate_mb_envs(cutoffs, cell, delta, d1, hm)
env2 = generate_mb_envs(cutoffs, cell, delta, d2, hm)
kern_finite_diff = 0
if "twobody" in kernels:
_, _, en2_kernel, fek2, _, _, _ = str_to_kernel_set(["twobody"], "mc", hm)
calc1 = en2_kernel(env1[1][0], env2[0][0], *args0)
calc2 = en2_kernel(env1[0][0], env2[0][0], *args0)
kern_finite_diff += 4 * (calc1 - calc2) / 2.0 / delta
if "threebody" in kernels:
_, _, en3_kernel, fek3, _, _, _ = str_to_kernel_set(["threebody"], "mc", hm)
calc1 = en3_kernel(env1[1][0], env2[0][0], *args0)
calc2 = en3_kernel(env1[0][0], env2[0][0], *args0)
kern_finite_diff += 9 * (calc1 - calc2) / 3.0 / delta
kern_analytical = force_en_kernel(env1[0][0], env2[0][0], d1, *args0)
tol = 1e-4
print(kern_finite_diff, kern_analytical)
assert isclose(-kern_finite_diff, kern_analytical, rtol=tol)
@pytest.mark.parametrize("kernels", list_to_test)
@pytest.mark.parametrize("diff_cutoff", multi_cut)
def test_force_en(kernels, diff_cutoff):
"""Check that the analytical force/en kernel matches finite difference of
energy kernel."""
delta = 1e-5
d1 = 1
d2 = 2
cell = 1e7 * np.eye(3)
np.random.seed(0)
cutoffs, hyps, hm = generate_diff_hm(kernels, diff_cutoff)
args = from_mask_to_args(hyps, cutoffs, hm)
env1 = generate_mb_envs(cutoffs, cell, delta, d1, hm)
env2 = generate_mb_envs(cutoffs, cell, delta, d2, hm)
_, _, en_kernel, force_en_kernel, _, _, _ = str_to_kernel_set(kernels, "mc", hm)
kern_analytical = force_en_kernel(env1[0][0], env2[0][0], d1, *args)
kern_finite_diff = 0
if "manybody" in kernels:
kernel, _, enm_kernel, efk, _, _, _ = str_to_kernel_set(["manybody"], "mc", hm)
calc = 0
for i in range(len(env1[0])):
calc += enm_kernel(env1[1][i], env2[0][0], *args)
calc -= enm_kernel(env1[2][i], env2[0][0], *args)
kern_finite_diff += (calc) / (2 * delta)
if "twobody" in kernels or "threebody" in kernels:
args23 = from_mask_to_args(hyps, cutoffs, hm)
if "twobody" in kernels:
kernel, _, en2_kernel, efk, _, _, _ = str_to_kernel_set(["2b"], "mc", hm)
calc1 = en2_kernel(env1[1][0], env2[0][0], *args23)
calc2 = en2_kernel(env1[2][0], env2[0][0], *args23)
diff2b = 4 * (calc1 - calc2) / 2.0 / delta / 2.0
kern_finite_diff += diff2b
if "threebody" in kernels:
kernel, _, en3_kernel, efk, _, _, _ = str_to_kernel_set(["3b"], "mc", hm)
calc1 = en3_kernel(env1[1][0], env2[0][0], *args23)
calc2 = en3_kernel(env1[2][0], env2[0][0], *args23)
diff3b = 9 * (calc1 - calc2) / 3.0 / delta / 2.0
kern_finite_diff += diff3b
tol = 1e-3
print("\nforce_en", kernels, kern_finite_diff, kern_analytical)
assert isclose(-kern_finite_diff, kern_analytical, rtol=tol)
@pytest.mark.parametrize("kernels", list_to_test)
@pytest.mark.parametrize("diff_cutoff", multi_cut)
def test_force(kernels, diff_cutoff):
"""Check that the analytical force kernel matches finite difference of
energy kernel."""
d1 | |
R = np.asarray(R, dtype=np.float)
if R.ndim != 2 or R.shape[0] != 3 or R.shape[1] != 3:
raise ValueError("Expected rotation matrix with shape (3, 3), got "
"array-like object with shape %s" % (R.shape,))
RRT = np.dot(R, R.T)
if not np.allclose(RRT, np.eye(3), atol=tolerance):
error_msg = ("Expected rotation matrix, but it failed the test "
"for inversion by transposition. np.dot(R, R.T) "
"gives %r" % RRT)
if strict_check:
raise ValueError(error_msg)
else:
warnings.warn(error_msg)
R_det = np.linalg.det(R)
if abs(R_det - 1) > tolerance:
error_msg = ("Expected rotation matrix, but it failed the test "
"for the determinant, which should be 1 but is %g; "
"that is, it probably represents a rotoreflection"
% R_det)
if strict_check:
raise ValueError(error_msg)
else:
warnings.warn(error_msg)
return R
def check_axis_angle(a):
"""Input validation of axis-angle representation.
Parameters
----------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle)
Returns
-------
a : array, shape (4,)
Validated axis of rotation and rotation angle: (x, y, z, angle)
"""
a = np.asarray(a, dtype=np.float)
if a.ndim != 1 or a.shape[0] != 4:
raise ValueError("Expected axis and angle in array with shape (4,), "
"got array-like object with shape %s" % (a.shape,))
return norm_axis_angle(a)
def check_compact_axis_angle(a):
"""Input validation of compact axis-angle representation.
Parameters
----------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z)
Returns
-------
a : array, shape (3,)
Validated axis of rotation and rotation angle: angle * (x, y, z)
"""
a = np.asarray(a, dtype=np.float)
if a.ndim != 1 or a.shape[0] != 3:
raise ValueError("Expected axis and angle in array with shape (3,), "
"got array-like object with shape %s" % (a.shape,))
return norm_compact_axis_angle(a)
def check_quaternion(q, unit=True):
"""Input validation of quaternion representation.
Parameters
----------
q : array-like, shape (4,)
Quaternion to represent rotation: (w, x, y, z)
unit : bool, optional (default: True)
Normalize the quaternion so that it is a unit quaternion
Returns
-------
q : array-like, shape (4,)
Validated quaternion to represent rotation: (w, x, y, z)
"""
q = np.asarray(q, dtype=np.float)
if q.ndim != 1 or q.shape[0] != 4:
raise ValueError("Expected quaternion with shape (4,), got "
"array-like object with shape %s" % (q.shape,))
if unit:
return norm_vector(q)
else:
return q
def check_quaternions(Q, unit=True):
"""Input validation of quaternion representation.
Parameters
----------
Q : array-like, shape (n_steps, 4)
Quaternions to represent rotations: (w, x, y, z)
unit : bool, optional (default: True)
Normalize the quaternions so that they are unit quaternions
Returns
-------
Q : array-like, shape (n_steps, 4)
Validated quaternions to represent rotations: (w, x, y, z)
"""
Q_checked = np.asarray(Q, dtype=np.float)
if Q_checked.ndim != 2 or Q_checked.shape[1] != 4:
raise ValueError(
"Expected quaternion array with shape (n_steps, 4), got "
"array-like object with shape %s" % (Q_checked.shape,))
if unit:
for i in range(len(Q)):
Q_checked[i] = norm_vector(Q_checked[i])
return Q_checked
def matrix_from_two_vectors(a, b):
"""Compute rotation matrix from two vectors.
We assume that the two given vectors form a plane so that we can compute
a third, orthogonal vector with the cross product.
The x-axis will point in the same direction as a, the y-axis corresponds
to the normalized vector rejection of b on a, and the z-axis is the
cross product of the other basis vectors.
Parameters
----------
a : array-like, shape (3,)
First vector, must not be 0
b : array-like, shape (3,)
Second vector, must not be 0 or parallel to v1
Returns
-------
R : array, shape (3, 3)
Rotation matrix
"""
if np.linalg.norm(a) == 0:
raise ValueError("a must not be the zero vector.")
if np.linalg.norm(b) == 0:
raise ValueError("b must not be the zero vector.")
c = perpendicular_to_vectors(a, b)
if np.linalg.norm(c) == 0:
raise ValueError("a and b must not be parallel.")
a = norm_vector(a)
b_on_a_projection = vector_projection(b, a)
b_on_a_rejection = b - b_on_a_projection
b = norm_vector(b_on_a_rejection)
c = norm_vector(c)
return np.column_stack((a, b, c))
def matrix_from_axis_angle(a):
"""Compute rotation matrix from axis-angle.
This is called exponential map or Rodrigues' formula.
This typically results in an active rotation matrix.
Parameters
----------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
a = check_axis_angle(a)
ux, uy, uz, theta = a
c = math.cos(theta)
s = math.sin(theta)
ci = 1.0 - c
R = np.array([[ci * ux * ux + c,
ci * ux * uy - uz * s,
ci * ux * uz + uy * s],
[ci * uy * ux + uz * s,
ci * uy * uy + c,
ci * uy * uz - ux * s],
[ci * uz * ux - uy * s,
ci * uz * uy + ux * s,
ci * uz * uz + c],
])
# This is equivalent to
# R = (np.eye(3) * np.cos(a[3]) +
# (1.0 - np.cos(a[3])) * a[:3, np.newaxis].dot(a[np.newaxis, :3]) +
# cross_product_matrix(a[:3]) * np.sin(a[3]))
return R
def matrix_from_compact_axis_angle(a):
"""Compute rotation matrix from compact axis-angle.
This is called exponential map or Rodrigues' formula.
This typically results in an active rotation matrix.
Parameters
----------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
a = axis_angle_from_compact_axis_angle(a)
return matrix_from_axis_angle(a)
def matrix_from_quaternion(q):
"""Compute rotation matrix from quaternion.
This typically results in an active rotation matrix.
Parameters
----------
q : array-like, shape (4,)
Unit quaternion to represent rotation: (w, x, y, z)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
q = check_quaternion(q)
uq = norm_vector(q)
w, x, y, z = uq
x2 = 2.0 * x * x
y2 = 2.0 * y * y
z2 = 2.0 * z * z
xy = 2.0 * x * y
xz = 2.0 * x * z
yz = 2.0 * y * z
xw = 2.0 * x * w
yw = 2.0 * y * w
zw = 2.0 * z * w
R = np.array([[1.0 - y2 - z2, xy - zw, xz + yw],
[xy + zw, 1.0 - x2 - z2, yz - xw],
[xz - yw, yz + xw, 1.0 - x2 - y2]])
return R
def matrix_from_angle(basis, angle):
"""Compute passive rotation matrix from rotation about basis vector.
Parameters
----------
basis : int from [0, 1, 2]
The rotation axis (0: x, 1: y, 2: z)
angle : float
Rotation angle
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
c = np.cos(angle)
s = np.sin(angle)
if basis == 0:
R = np.array([[1.0, 0.0, 0.0],
[0.0, c, s],
[0.0, -s, c]])
elif basis == 1:
R = np.array([[c, 0.0, -s],
[0.0, 1.0, 0.0],
[s, 0.0, c]])
elif basis == 2:
R = np.array([[c, s, 0.0],
[-s, c, 0.0],
[0.0, 0.0, 1.0]])
else:
raise ValueError("Basis must be in [0, 1, 2]")
return R
passive_matrix_from_angle = matrix_from_angle
def active_matrix_from_angle(basis, angle):
"""Compute active rotation matrix from rotation about basis vector.
Parameters
----------
basis : int from [0, 1, 2]
The rotation axis (0: x, 1: y, 2: z)
angle : float
Rotation angle
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
c = np.cos(angle)
s = np.sin(angle)
if basis == 0:
R = np.array([[1.0, 0.0, 0.0],
[0.0, c, -s],
[0.0, s, c]])
elif basis == 1:
R = np.array([[c, 0.0, s],
[0.0, 1.0, 0.0],
[-s, 0.0, c]])
elif basis == 2:
R = np.array([[c, -s, 0.0],
[s, c, 0.0],
[0.0, 0.0, 1.0]])
else:
raise ValueError("Basis must be in [0, 1, 2]")
return R
def matrix_from_euler_xyz(e):
"""Compute passive rotation matrix from intrinsic xyz Tait-Bryan angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around x-, y'-, and z''-axes (intrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = passive_matrix_from_angle(0, alpha).dot(
passive_matrix_from_angle(1, beta)).dot(
passive_matrix_from_angle(2, gamma))
return R
def matrix_from_euler_zyx(e):
"""Compute passive | |
"""
Copyright (c) 2015, 2016, 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import copy
import glob
import json
import os
import fnmatch
from pkg_resources import parse_version
import shutil
import six
import yaml
from copy import deepcopy
from osbs.build.build_request import BuildRequest
from osbs.api import OSBS
from osbs.constants import (DEFAULT_OUTER_TEMPLATE,
DEFAULT_INNER_TEMPLATE, SECRETS_PATH,
ORCHESTRATOR_INNER_TEMPLATE, WORKER_INNER_TEMPLATE,
DEFAULT_ARRANGEMENT_VERSION,
REACTOR_CONFIG_ARRANGEMENT_VERSION,
BUILD_TYPE_WORKER, BUILD_TYPE_ORCHESTRATOR,
ADDITIONAL_TAGS_FILE)
from osbs.exceptions import OsbsValidationException
from osbs import __version__ as expected_version
from osbs.conf import Configuration
from osbs.repo_utils import RepoInfo, RepoConfiguration, AdditionalTagsConfig
from flexmock import flexmock
import pytest
from tests.constants import (INPUTS_PATH, TEST_BUILD_CONFIG, TEST_BUILD_JSON,
TEST_COMPONENT, TEST_GIT_BRANCH, TEST_GIT_REF,
TEST_GIT_URI, TEST_GIT_URI_HUMAN_NAME,
TEST_FILESYSTEM_KOJI_TASK_ID, TEST_SCRATCH_BUILD_NAME,
TEST_ISOLATED_BUILD_NAME, TEST_FLATPAK_BASE_IMAGE)
USE_DEFAULT_TRIGGERS = object()
# Don't use REACTOR_CONFIG_ARRANGEMENT templates
TEST_ARRANGEMENT_VERSION = min(DEFAULT_ARRANGEMENT_VERSION,
REACTOR_CONFIG_ARRANGEMENT_VERSION - 1)
class NoSuchPluginException(Exception):
pass
def get_sample_prod_params():
return {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'user': 'john-foo',
'component': TEST_COMPONENT,
'base_image': 'fedora:latest',
'name_label': 'fedora/resultingimage',
'registry_uri': 'registry.example.com',
'source_registry_uri': 'registry.example.com',
'openshift_uri': 'http://openshift/',
'builder_openshift_url': 'http://openshift/',
'koji_target': 'koji-target',
'kojiroot': 'http://root/',
'kojihub': 'http://hub/',
'sources_command': 'make',
'vendor': 'Foo Vendor',
'authoritative_registry': 'registry.example.com',
'distribution_scope': 'authoritative-source-only',
'registry_api_versions': ['v1'],
'smtp_host': 'smtp.example.com',
'smtp_from': '<EMAIL>',
'proxy': 'http://proxy.example.com',
'platforms': ['x86_64'],
'filesystem_koji_task_id': TEST_FILESYSTEM_KOJI_TASK_ID,
'build_from': 'image:buildroot:latest',
'osbs_api': MockOSBSApi()
}
def MockOSBSApi(config_map_data=None):
class MockConfigMap(object):
def __init__(self, data):
self.data = data or {}
def get_data_by_key(self, key=None):
return self.data
mock_osbs = flexmock(OSBS)
flexmock(mock_osbs).should_receive('get_config_map').and_return(MockConfigMap(config_map_data))
return mock_osbs
def get_plugins_from_build_json(build_json):
env_vars = build_json['spec']['strategy']['customStrategy']['env']
plugins = None
for d in env_vars:
if d['name'] == 'ATOMIC_REACTOR_PLUGINS':
plugins = json.loads(d['value'])
break
assert plugins is not None
return plugins
def get_plugin(plugins, plugin_type, plugin_name):
plugins = plugins[plugin_type]
for plugin in plugins:
if plugin["name"] == plugin_name:
return plugin
else:
raise NoSuchPluginException()
def has_plugin(plugins, plugin_type, plugin_name):
try:
get_plugin(plugins, plugin_type, plugin_name)
except NoSuchPluginException:
return False
return True
def plugin_value_get(plugins, plugin_type, plugin_name, *args):
result = get_plugin(plugins, plugin_type, plugin_name)
for arg in args:
result = result[arg]
return result
def get_secret_mountpath_by_name(build_json, name):
secrets = build_json['spec']['strategy']['customStrategy']['secrets']
named_secrets = [secret for secret in secrets
if secret['secretSource']['name'] == name]
assert len(named_secrets) == 1
secret = named_secrets[0]
assert 'mountPath' in secret
return secret['mountPath']
class TestBuildRequest(object):
def assert_import_image_plugin(self, plugins, name_label, registry_uri,
openshift_uri, use_auth, insecure_registry):
phase = 'postbuild_plugins'
plugin = 'import_image'
assert get_plugin(plugins, phase, plugin)
plugin_args = plugin_value_get(plugins, phase, plugin, 'args')
assert plugin_args['imagestream'] == name_label.replace('/', '-')
expected_repo = os.path.join(registry_uri, name_label)
expected_repo = expected_repo.replace('https://', '')
expected_repo = expected_repo.replace('http://', '')
assert plugin_args['docker_image_repo'] == expected_repo
assert plugin_args['url'] == openshift_uri
assert plugin_args.get('use_auth') == use_auth
assert plugin_args.get('insecure_registry', False) == insecure_registry
def assert_koji_upload_plugin(self, plugins, use_auth, prefer_schema1_digest, valid=True):
phase = 'postbuild_plugins'
name = 'koji_upload'
if not valid:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, phase, name)
else:
assert get_plugin(plugins, phase, name)
plugin_args = plugin_value_get(plugins, phase, name, 'args')
assert plugin_args.get('koji_upload_dir')
if use_auth is not None:
assert plugin_args['use_auth'] == use_auth
else:
assert 'use_auth' not in plugin_args
if prefer_schema1_digest is not None:
assert plugin_args['prefer_schema1_digest'] == prefer_schema1_digest
else:
assert 'prefer_schema1_digest' not in plugin_args
@pytest.mark.parametrize('kojihub', ("http://hub/", None))
@pytest.mark.parametrize('use_auth', (True, False, None))
@pytest.mark.parametrize('prefer_schema1_digest', (True, False, None))
def test_render_koji_upload(self, use_auth, kojihub, prefer_schema1_digest):
inner_template = WORKER_INNER_TEMPLATE.format(
arrangement_version=TEST_ARRANGEMENT_VERSION)
build_request = BuildRequest(INPUTS_PATH, inner_template=inner_template,
outer_template=None, customize_conf=None)
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'user': "john-foo",
'component': TEST_COMPONENT,
'registry_uris': [],
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'base_image': 'fedora:latest',
'name_label': 'fedora/resultingimage',
'registry_api_versions': ['v1'],
'kojihub': kojihub,
'koji_upload_dir': 'upload',
'build_from': 'image:buildroot:latest',
'osbs_api': MockOSBSApi(),
}
if use_auth is not None:
kwargs['use_auth'] = use_auth
if prefer_schema1_digest is not None:
kwargs['prefer_schema1_digest'] = prefer_schema1_digest
build_request.set_params(**kwargs)
build_json = build_request.render()
plugins = get_plugins_from_build_json(build_json)
self.assert_koji_upload_plugin(plugins, use_auth, prefer_schema1_digest, kojihub)
@pytest.mark.parametrize(('koji_hub', 'base_image', 'scratch', 'enabled'), (
("http://hub/", 'fedora:latest', False, True),
(None, 'fedora:latest', False, False),
("http://hub/", 'fedora:latest', True, False),
(None, 'fedora:latest', True, False),
("http://hub/", 'koji/image-build', False, False),
("http://hub/", 'koji/image-build', True, False),
))
@pytest.mark.parametrize(('certs_dir', 'certs_dir_set'), (
('/my/super/secret/dir', True),
(None, False),
))
def test_render_koji_parent(self, koji_hub, base_image, scratch, enabled, certs_dir,
certs_dir_set):
plugin_type = 'prebuild_plugins'
plugin_name = 'koji_parent'
build_request = BuildRequest(INPUTS_PATH)
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'user': "john-foo",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'base_image': base_image,
'name_label': 'fedora/resultingimage',
'registry_api_versions': ['v1', 'v2'],
'kojihub': koji_hub,
'koji_certs_secret': certs_dir,
'scratch': scratch,
'build_from': 'image:buildroot:latest',
'osbs_api': MockOSBSApi(),
}
build_request.set_params(**kwargs)
build_json = build_request.render()
plugins = get_plugins_from_build_json(build_json)
if not enabled:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, plugin_type, plugin_name)
return
assert get_plugin(plugins, plugin_type, plugin_name)
actual_plugin_args = plugin_value_get(plugins, plugin_type, plugin_name, 'args')
expected_plugin_args = {'koji_hub': koji_hub}
if certs_dir_set:
expected_plugin_args['koji_ssl_certs_dir'] = certs_dir
assert actual_plugin_args == expected_plugin_args
@pytest.mark.parametrize(('koji_hub', 'base_image', 'scratch', 'enabled'), (
("http://hub/", 'fedora:latest', False, True),
(None, 'fedora:latest', False, False),
("http://hub/", 'fedora:latest', True, False),
(None, 'fedora:latest', True, False),
("http://hub/", 'koji/image-build', False, False),
("http://hub/", 'koji/image-build', True, False),
))
@pytest.mark.parametrize(('certs_dir', 'certs_dir_set'), (
('/my/super/secret/dir', True),
(None, False),
))
def test_render_koji_import(self, koji_hub, base_image, scratch, enabled, certs_dir,
certs_dir_set):
plugin_type = 'exit_plugins'
plugin_name = 'koji_import'
if enabled:
inner_template = ORCHESTRATOR_INNER_TEMPLATE.format(
arrangement_version=TEST_ARRANGEMENT_VERSION)
else:
inner_template = None
build_request = BuildRequest(INPUTS_PATH, inner_template=inner_template)
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'user': "john-foo",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'base_image': base_image,
'name_label': 'fedora/resultingimage',
'registry_api_versions': ['v1', 'v2'],
'kojihub': koji_hub,
'koji_certs_secret': certs_dir,
'scratch': scratch,
'build_from': 'image:buildroot:latest',
'osbs_api': MockOSBSApi(),
}
build_request.set_params(**kwargs)
build_json = build_request.render()
plugins = get_plugins_from_build_json(build_json)
if not enabled:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, plugin_type, plugin_name)
return
assert get_plugin(plugins, plugin_type, plugin_name)
actual_plugin_args = plugin_value_get(plugins, plugin_type, plugin_name, 'args')
expected_plugin_args = {'kojihub': koji_hub,
'koji_keytab': False,
'url': 'http://openshift/',
'verify_ssl': False}
if certs_dir_set:
expected_plugin_args['koji_ssl_certs'] = certs_dir
assert actual_plugin_args == expected_plugin_args
def test_build_request_has_ist_trigger(self):
build_json = copy.deepcopy(TEST_BUILD_JSON)
br = BuildRequest('something')
flexmock(br).should_receive('template').and_return(build_json)
assert br.has_ist_trigger() is True
def test_build_request_isnt_auto_instantiated(self):
build_json = copy.deepcopy(TEST_BUILD_JSON)
build_json['spec']['triggers'] = []
br = BuildRequest('something')
flexmock(br).should_receive('template').and_return(build_json)
assert br.has_ist_trigger() is False
def test_set_label(self):
build_json = copy.deepcopy(TEST_BUILD_JSON)
br = BuildRequest('something')
flexmock(br).should_receive('template').and_return(build_json)
assert br.template['metadata'].get('labels') is None
br.set_label('label-1', 'value-1')
br.set_label('label-2', 'value-2')
br.set_label('label-3', 'value-3')
assert br.template['metadata']['labels'] == {
'label-1': 'value-1',
'label-2': 'value-2',
'label-3': 'value-3',
}
@pytest.mark.parametrize(('extra_kwargs', 'valid'), (
({'scratch': True}, True),
({'is_auto': True}, True),
({'isolated': True, 'release': '1.0'}, True),
({'scratch': True, 'isolated': True, 'release': '1.0'}, False),
({'scratch': True, 'is_auto': True}, False),
({'is_auto': True, 'isolated': True, 'release': '1.0'}, False),
))
def test_mutually_exclusive_build_variation(self, extra_kwargs, valid):
kwargs = get_sample_prod_params()
kwargs.update(extra_kwargs)
build_request = BuildRequest(INPUTS_PATH)
if valid:
build_request.set_params(**kwargs)
build_request.render()
else:
with pytest.raises(OsbsValidationException) as exc_info:
build_request.set_params(**kwargs)
assert 'mutually exclusive' in str(exc_info.value)
@pytest.mark.parametrize('registry_uris', [
[],
["registry.example.com:5000"],
["registry.example.com:5000", "localhost:6000"],
])
def test_render_simple_request(self, registry_uris):
build_request = BuildRequest(INPUTS_PATH)
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'user': "john-foo",
'component': TEST_COMPONENT,
'registry_uris': registry_uris,
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'build_image': 'fancy_buildroot:latestest',
'base_image': 'fedora:latest',
'name_label': 'fedora/resultingimage',
'registry_api_versions': ['v1'],
'osbs_api': MockOSBSApi(),
}
build_request.set_params(**kwargs)
build_json = build_request.render()
assert build_json["metadata"]["name"] is not None
assert "triggers" not in build_json["spec"]
assert build_json["spec"]["source"]["git"]["uri"] == TEST_GIT_URI
assert build_json["spec"]["source"]["git"]["ref"] == TEST_GIT_REF
expected_output = "john-foo/component:none-"
if registry_uris:
expected_output = registry_uris[0] + "/" + expected_output
assert build_json["spec"]["output"]["to"]["name"].startswith(expected_output)
plugins = get_plugins_from_build_json(build_json)
pull_base_image = get_plugin(plugins, "prebuild_plugins",
"pull_base_image")
assert pull_base_image is not None
assert ('args' not in pull_base_image or
'parent_registry' not in pull_base_image['args'] or
not pull_base_image['args']['parent_registry'])
assert plugin_value_get(plugins, "exit_plugins", "store_metadata_in_osv3", "args",
"url") == "http://openshift/"
for r in registry_uris:
assert plugin_value_get(plugins, "postbuild_plugins", "tag_and_push", "args",
"registries", r) == {"insecure": True}
rendered_build_image = build_json["spec"]["strategy"]["customStrategy"]["from"]["name"]
assert rendered_build_image == 'fancy_buildroot:latestest'
@pytest.mark.parametrize('proxy', [
None,
'http://proxy.example.com',
])
@pytest.mark.parametrize(('build_image', 'build_imagestream', 'valid'), (
(None, None, False),
('ultimate-buildroot:v1.0', None, True),
(None, 'buildroot-stream:v1.0', True),
('ultimate-buildroot:v1.0', 'buildroot-stream:v1.0', False)
))
def test_render_prod_request_with_repo(self, build_image, build_imagestream, proxy, valid):
build_request = BuildRequest(INPUTS_PATH)
name_label = "fedora/resultingimage"
vendor = "Foo Vendor"
authoritative_registry = "registry.example.com"
distribution_scope = "authoritative-source-only"
koji_task_id = 4756
assert isinstance(build_request, BuildRequest)
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'user': "john-foo",
'component': TEST_COMPONENT,
'base_image': 'fedora:latest',
'name_label': name_label,
'registry_uri': "registry.example.com",
'source_registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'koji_target': "koji-target",
'kojiroot': "http://root/",
'kojihub': "http://hub/",
'koji_task_id': koji_task_id,
'sources_command': "make",
'vendor': vendor,
'authoritative_registry': authoritative_registry,
'distribution_scope': distribution_scope,
'yum_repourls': ["http://example.com/my.repo"],
'registry_api_versions': ['v1'],
'build_image': build_image,
'build_imagestream': build_imagestream,
'proxy': proxy,
'osbs_api': MockOSBSApi(),
}
if valid:
build_request.set_params(**kwargs)
else:
with pytest.raises(OsbsValidationException):
build_request.set_params(**kwargs)
return
build_json = build_request.render()
assert fnmatch.fnmatch(build_json["metadata"]["name"], TEST_BUILD_CONFIG)
assert build_json["metadata"]["labels"]["koji-task-id"] == str(koji_task_id)
assert "triggers" not in build_json["spec"]
assert build_json["spec"]["source"]["git"]["uri"] == TEST_GIT_URI
assert build_json["spec"]["source"]["git"]["ref"] == TEST_GIT_REF
assert build_json["spec"]["output"]["to"]["name"].startswith(
"registry.example.com/john-foo/component:"
)
plugins = get_plugins_from_build_json(build_json)
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "check_and_set_rebuild")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins",
"stop_autorebuild_if_disabled")
assert plugin_value_get(plugins, "prebuild_plugins", "bump_release",
"args", "hub") == "http://hub/"
assert plugin_value_get(plugins, "prebuild_plugins", "distgit_fetch_artefacts",
"args", "command") == "make"
assert plugin_value_get(plugins, "prebuild_plugins", "pull_base_image",
"args", "parent_registry") == "registry.example.com"
assert plugin_value_get(plugins, "exit_plugins", "store_metadata_in_osv3",
"args", "url") == "http://openshift/"
assert plugin_value_get(plugins, "postbuild_plugins", "tag_and_push", "args",
"registries", "registry.example.com") == {"insecure": True}
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "koji")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_push")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_sync")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_pull")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, 'exit_plugins', 'delete_from_registry')
assert get_plugin(plugins, "postbuild_plugins", "import_image")
assert plugin_value_get(plugins, "prebuild_plugins", "add_yum_repo_by_url",
"args", "repourls") == ["http://example.com/my.repo"]
if proxy:
assert plugin_value_get(plugins, "prebuild_plugins", "add_yum_repo_by_url",
"args", "inject_proxy") == proxy
else:
with pytest.raises(KeyError):
plugin_value_get(plugins, "prebuild_plugins", "add_yum_repo_by_url",
"args", "inject_proxy")
labels = plugin_value_get(plugins, "prebuild_plugins", "add_labels_in_dockerfile",
"args", "labels")
assert labels is not None
assert labels['authoritative-source-url'] == | |
m.b829**2 + m.b142**2 + m.b683**2 + m.b684**2 + m.b696**2 + m.b707**2 + m.b712**2 + m.b719**2 +
m.b721**2 + m.b735**2 + m.b747**2 + m.b752**2 + m.b782**2 + m.b807**2 + m.b818**2 + m.b143**2 +
m.b690**2 + m.b695**2 + m.b731**2 + m.b834**2 + m.b144**2 + m.b686**2 + m.b687**2 + m.b737**2 +
m.b739**2 + m.b814**2 + m.b145**2 + m.b322**2 + m.b404**2 + m.b418**2 + m.b457**2 + m.b513**2 +
m.b626**2 + m.b146**2 + m.b317**2 + m.b330**2 + m.b346**2 + m.b353**2 + m.b385**2 + m.b409**2 +
m.b496**2 + m.b527**2 + m.b532**2 + m.b534**2 + m.b539**2 + m.b560**2 + m.b617**2 + m.b649**2 +
m.b147**2 + m.b407**2 + m.b412**2 + m.b427**2 + m.b449**2 + m.b557**2 + m.b704**2 + m.b749**2 +
m.b759**2 + m.b788**2 + m.b789**2 + m.b148**2 + m.b250**2 + m.b258**2 + m.b260**2 + m.b279**2 +
m.b291**2 + m.b367**2 + m.b368**2 + m.b388**2 + m.b435**2 + m.b443**2 + m.b445**2 + m.b446**2 +
m.b464**2 + m.b494**2 + m.b551**2 + m.b564**2 + m.b573**2 + m.b593**2 + m.b598**2 + m.b622**2 +
m.b643**2 + m.x847**2 + m.x848**2 + m.x849**2 + m.b187**2 + m.b691**2 + m.b701**2 + m.b710**2 +
m.b716**2 + m.b718**2 + m.b725**2 + m.b746**2 + m.b753**2 + m.b764**2 + m.b783**2 + m.b793**2 +
m.b797**2 + m.b806**2 + m.b828**2 + m.b830**2 + m.b833**2 + m.b188**2 + m.b590**2 + m.b189**2 +
m.b259**2 + m.b272**2 + m.b276**2 + m.b281**2 + m.b303**2 + m.b324**2 + m.b351**2 + m.b355**2 +
m.b372**2 + m.b376**2 + m.b390**2 + m.b394**2 + m.b423**2 + m.b424**2 + m.b428**2 + m.b458**2 +
m.b467**2 + m.b477**2 + m.b482**2 + m.b488**2 + m.b490**2 + m.b497**2 + m.b499**2 + m.b500**2 +
m.b526**2 + m.b531**2 + m.b566**2 + m.b570**2 + m.b572**2 + m.b587**2 + m.b603**2 + m.b608**2 +
m.b623**2 + m.b628**2 + m.b664**2 + m.b670**2 + m.b673**2 + m.b674**2 + m.b676**2 + m.b681**2 +
m.b190**2 + m.b706**2 + m.b723**2 + m.b754**2 + m.b778**2 + m.b784**2 + m.b191**2 + m.b192**2 +
m.b693**2 + m.b697**2 + m.b709**2 + m.b794**2 + m.b805**2 + m.b193**2 + m.b692**2 + m.b703**2 +
m.b722**2 + m.b732**2 + m.b742**2 + m.b772**2 + m.b774**2 + m.b796**2 + m.b800**2 + m.b802**2 +
m.b810**2 + m.b815**2 + m.b824**2 + m.b831**2 + m.b835**2 + m.b194**2 + m.b195**2 + m.b776**2 +
m.b196**2 + m.b386**2 + m.b393**2 + m.b440**2 + m.b450**2 + m.b465**2 + m.b549**2 + m.b561**2 +
m.b567**2 + m.b581**2 + m.b582**2 + m.b595**2 + m.b611**2 + m.b614**2 + m.b619**2 + m.b627**2 +
m.b632**2 + m.b635**2 + m.b657**2 + m.b197**2 + m.b198**2 + m.b773**2 + m.b199**2 + m.b200**2 +
m.b771**2 + m.b201**2 + m.b254**2 + m.b262**2 + m.b266**2 + m.b286**2 + m.b289**2 + m.b321**2 +
m.b328**2 + m.b331**2 + m.b335**2 + m.b347**2 + m.b348**2 + m.b352**2 + m.b354**2 + m.b362**2 +
m.b365**2 + m.b371**2 + m.b430**2 + m.b436**2 + m.b456**2 + m.b461**2 + m.b470**2 + m.b472**2 +
m.b474**2 + m.b480**2 + m.b492**2 + m.b515**2 + m.b555**2 + m.b633**2 + m.b660**2 + m.b677**2 +
m.b202**2 + m.b341**2 + m.b360**2 + m.b380**2 + m.b401**2 + m.b405**2 + m.b447**2 + m.b528**2 +
m.b575**2 + m.b577**2 + m.b579**2 + m.b667**2 + m.b203**2 + m.b327**2 + m.b343**2 + m.b344**2 +
m.b350**2 + m.b364**2 + m.b429**2 + m.b455**2 + m.b506**2 + m.b516**2 + m.b517**2 + m.b548**2 +
m.b596**2 + m.b204**2 + m.b319**2 + m.b325**2 + m.b338**2 + m.b378**2 + m.b384**2 + m.b415**2 +
m.b433**2 + m.b459**2 + m.b495**2 + m.b504**2 + m.b508**2 + m.b518**2 + m.b543**2 + m.b634**2 +
m.b640**2 + m.b654**2 + m.b655**2 + m.b665**2 + m.b675**2 + m.b205**2 + m.b399**2 + m.b554**2 +
m.b558**2 + m.b206**2 + m.b379**2 + m.b417**2 + m.b466**2 + m.b514**2 + m.b521**2 + m.b523**2 +
m.b612**2 + m.b207**2 + m.b332**2 + m.b333**2 + m.b340**2 + m.b448**2 + m.b463**2 + m.b503**2 +
m.b542**2 + m.b644**2 + m.b668**2 + m.b679**2 + m.b208**2 + m.b209**2 + m.b210**2 + m.b342**2 +
m.b361**2 + m.b363**2 + m.b419**2 + m.b485**2 + m.b493**2 + m.b537**2 + m.b638**2 + m.b639**2 +
m.b646**2 + m.b211**2 + m.b212**2 + m.b213**2 + m.b214**2 + m.b215**2 + m.b216**2 + m.b217**2 +
m.b218**2 + m.b219**2 + m.b220**2 + m.b221**2 + m.b222**2 + m.b223**2 + m.b224**2 + m.b225**2 +
m.b226**2 + m.b227**2 + m.b228**2 + m.b229**2 + m.b230**2 + m.b231**2 + m.b232**2 + m.b233**2 +
m.b234**2 + m.b235**2 + m.b236**2 + m.b237**2 + m.b238**2 + m.b239**2 + m.b240**2 + m.b241**2 +
m.b242**2 + m.b243**2 + m.b244**2 + m.b245**2 + m.b246**2 + m.b247**2 + m.b248**2 + m.b249**2 +
m.x850**2 + m.x851**2 + m.b306**2 + m.b307**2 + m.b308**2 + m.b309**2 + m.b310**2 + m.b311**2 +
m.b312**2 + m.b313**2 + m.b314**2 + m.b315**2 + m.b316**2 + m.x852**2 + m.x853**2 + m.x854**2 +
m.x855**2 + m.x856**2 + m.x857**2 + m.x858**2 + m.x859**2 + m.x860**2 + m.x861**2 + m.x862**2 +
m.x863**2 + m.x864**2 + m.x865**2 + m.x866**2 + m.x867**2 + m.x868**2 + m.x869**2 + m.x870**2 +
m.x871**2 + m.x872**2 + m.x873**2 + m.x874**2 + m.x875**2 + m.x876**2 + m.x877**2 + m.x878**2 +
m.x879**2 + m.x880**2 + m.x881**2 + m.x882**2 + m.x883**2 + m.x884**2 + m.x885**2 + m.x886**2 +
m.x887**2 + m.x888**2 + m.x889**2 + m.x890**2 + m.x891**2 + m.x892**2 + m.x893**2 + m.x894**2 +
m.x895**2 + m.x896**2 + m.x897**2 + m.x898**2 + m.x899**2 + m.x900**2 + m.x901**2 + m.x902**2 +
m.x903**2 + m.x904**2 + m.x905**2 + m.x906**2 + m.x907**2 + m.x908**2 + m.x909**2 + m.x910**2 +
m.x911**2 + m.x912**2 + m.x913**2 + m.x914**2 + m.x915**2 + m.x916**2 + m.x917**2 + m.x918**2 +
m.x919**2 + m.x920**2 + m.x921**2 + m.x922**2 + m.x923**2 + m.x924**2 + m.x925**2 + m.x926**2 +
m.x927**2 + m.x928**2 + m.x929**2 + m.x930**2 + m.x931**2 + m.x932**2 + m.x933**2 + m.x934**2 +
m.x935**2 + m.x936**2 + m.x937**2 + m.x938**2 + m.x939**2 + m.x940**2 + m.x941**2 + m.x942**2 +
m.x943**2 + m.x944**2 + m.x945**2 + m.x946**2 + m.x947**2 + m.x948**2 + m.x949**2 + m.x950**2 +
m.x951**2 + m.x952**2 + m.x953**2 + m.x954**2 + m.x955**2 + m.x956**2 + m.x957**2 + m.x958**2 +
m.x959**2 + m.x960**2 + m.x961**2 + m.x962**2 + m.x963**2 + m.x964**2 + m.x965**2 + m.x966**2 +
m.x967**2 + m.x968**2 + m.b4*m.b345 + m.b4*m.b406 + m.b4*m.b413 + m.b5*m.b62 + m.b5*m.b63 + m.b5*
m.b64 + m.b5*m.b65 + m.b5*m.b66 + m.b5*m.b127 + m.b5*m.b128 + m.b5*m.b132 + m.b5*m.b139 + m.b5*
m.b150 + m.b5*m.b152 + m.b5*m.b154 + m.b5*m.b155 + m.b5*m.b158 + m.b5*m.b161 + m.b5*m.b163 + m.b5
*m.b164 + m.b5*m.b165 + m.b5*m.b166 + m.b5*m.b180 + m.b5*m.b181 + m.b5*m.b252 + m.b5*m.b253 +
m.b5*m.b265 + m.b5*m.b298 + m.b5*m.b300 + m.b5*m.b318 + m.b5*m.b334 + m.b5*m.b337 + m.b5*m.b357
+ m.b5*m.b374 + m.b5*m.b383 + m.b5*m.b397 + m.b5*m.b402 + m.b5*m.b410 + m.b5*m.b411 + m.b5*
m.b505 + m.b5*m.b509 + m.b5*m.b510 + m.b5*m.b530 + m.b5*m.b536 + m.b5*m.b544 + m.b5*m.b547 + m.b5
*m.b553 + m.b5*m.b562 + m.b5*m.b569 + m.b5*m.b574 + m.b5*m.b576 + m.b5*m.b583 + m.b5*m.b586 +
m.b5*m.b591 + m.b5*m.b602 + m.b5*m.b605 + m.b5*m.b641 + m.b5*m.b645 + m.b5*m.b648 + m.b5*m.b650
+ m.b5*m.b656 + m.b5*m.b658 + m.b5*m.b662 + m.b5*m.b666 + m.b6*m.b688 + m.b6*m.b689 + m.b6*
m.b699 + m.b6*m.b705 + m.b6*m.b708 + m.b6*m.b717 + m.b6*m.b738 + m.b6*m.b744 + m.b6*m.b751 + m.b6
*m.b777 + m.b6*m.b785 + m.b6*m.b799 + m.b6*m.b817 + m.b6*m.b827 + m.b6*m.b832 + m.b7*m.b124 +
m.b7*m.b136 + m.b9*m.b182 + m.b9*m.b252 + m.b9*m.b253 + m.b9*m.b265 + m.b9*m.b298 + m.b9*m.b300
+ m.b9*m.b373 + m.b9*m.b391 + m.b9*m.b408 + m.b9*m.b414 + m.b9*m.b420 + m.b9*m.b421 + m.b9*
m.b438 + m.b9*m.b444 + m.b9*m.b454 + m.b9*m.b462 + m.b9*m.b473 + m.b9*m.b481 + m.b9*m.b486 + m.b9
*m.b524 + m.b9*m.b540 + m.b9*m.b541 + m.b9*m.b550 + m.b9*m.b563 + m.b9*m.b568 + m.b9*m.b588 +
m.b9*m.b601 + m.b9*m.b606 + m.b9*m.b613 + m.b9*m.b620 + m.b9*m.b647 + m.b11*m.b45 + m.b11*m.b58
+ m.b11*m.b75 + m.b11*m.b77 + m.b11*m.b78 + m.b11*m.b84 + m.b11*m.b86 + m.b11*m.b88 + m.b11*
m.b92 | |
data_type : str or int
The data_type of the prep template
investigation_type : str, optional
The investigation type, if relevant
Returns
-------
A new instance of `cls` to access to the PrepTemplate stored in the DB
Raises
------
QiitaDBColumnError
If the investigation_type is not valid
If a required column is missing in md_template
"""
# If the investigation_type is supplied, make sure it is one of
# the recognized investigation types
if investigation_type is not None:
cls.validate_investigation_type(investigation_type)
invalid_ids = get_invalid_sample_names(md_template.index)
if invalid_ids:
raise QiitaDBColumnError("The following sample names in the prep"
" template contain invalid characters "
"(only alphanumeric characters or periods"
" are allowed): %s." %
", ".join(invalid_ids))
# We are going to modify the md_template. We create a copy so
# we don't modify the user one
md_template = deepcopy(md_template)
# Prefix the sample names with the study_id
_prefix_sample_names_with_id(md_template, study.id)
# In the database, all the column headers are lowercase
md_template.columns = [c.lower() for c in md_template.columns]
# Check that we don't have duplicate columns
if len(set(md_template.columns)) != len(md_template.columns):
raise QiitaDBDuplicateHeaderError(
find_duplicates(md_template.columns))
# Get a connection handler
conn_handler = SQLConnectionHandler()
queue_name = "CREATE_PREP_TEMPLATE_%d" % raw_data.id
conn_handler.create_queue(queue_name)
# Check if the data_type is the id or the string
if isinstance(data_type, (int, long)):
data_type_id = data_type
data_type_str = convert_from_id(data_type, "data_type",
conn_handler)
else:
data_type_id = convert_to_id(data_type, "data_type", conn_handler)
data_type_str = data_type
# We need to check for some special columns, that are not present on
# the database, but depending on the data type are required.
missing = cls._check_special_columns(md_template, data_type_str)
# Get some useful information from the metadata template
sample_ids = md_template.index.tolist()
num_samples = len(sample_ids)
# Get the required columns from the DB
db_cols = get_table_cols(cls._table, conn_handler)
# Remove the sample_id and study_id columns
db_cols.remove('sample_id')
db_cols.remove(cls._id_column)
# Retrieve the headers of the metadata template
headers = list(md_template.keys())
# Check that md_template has the required columns
remaining = set(db_cols).difference(headers)
missing = missing.union(remaining)
missing = missing.difference(cls.translate_cols_dict)
if missing:
raise QiitaDBColumnError("Missing columns: %s"
% ', '.join(missing))
# Insert the metadata template
# We need the prep_id for multiple calls below, which currently is not
# supported by the queue system. Thus, executing this outside the queue
prep_id = conn_handler.execute_fetchone(
"INSERT INTO qiita.prep_template (data_type_id, raw_data_id, "
"investigation_type) VALUES (%s, %s, %s) RETURNING "
"prep_template_id", (data_type_id, raw_data.id,
investigation_type))[0]
# Insert values on required columns
values = _as_python_types(md_template, db_cols)
values.insert(0, sample_ids)
values.insert(0, [prep_id] * num_samples)
values = [v for v in zip(*values)]
conn_handler.add_to_queue(
queue_name,
"INSERT INTO qiita.{0} ({1}, sample_id, {2}) "
"VALUES (%s, %s, {3})".format(
cls._table, cls._id_column, ', '.join(db_cols),
', '.join(['%s'] * len(db_cols))),
values, many=True)
# Insert rows on *_columns table
headers = list(set(headers).difference(db_cols))
datatypes = _get_datatypes(md_template.ix[:, headers])
# psycopg2 requires a list of tuples, in which each tuple is a set
# of values to use in the string formatting of the query. We have all
# the values in different lists (but in the same order) so use zip
# to create the list of tuples that psycopg2 requires.
values = [
v for v in zip([prep_id] * len(headers), headers, datatypes)]
conn_handler.add_to_queue(
queue_name,
"INSERT INTO qiita.{0} ({1}, column_name, column_type) "
"VALUES (%s, %s, %s)".format(cls._column_table, cls._id_column),
values, many=True)
# Create table with custom columns
table_name = cls._table_name(prep_id)
column_datatype = ["%s %s" % (col, dtype)
for col, dtype in zip(headers, datatypes)]
conn_handler.add_to_queue(
queue_name,
"CREATE TABLE qiita.{0} (sample_id varchar, "
"{1})".format(table_name, ', '.join(column_datatype)))
# Insert values on custom table
values = _as_python_types(md_template, headers)
values.insert(0, sample_ids)
values = [v for v in zip(*values)]
conn_handler.add_to_queue(
queue_name,
"INSERT INTO qiita.{0} (sample_id, {1}) "
"VALUES (%s, {2})".format(table_name, ", ".join(headers),
', '.join(["%s"] * len(headers))),
values, many=True)
try:
conn_handler.execute_queue(queue_name)
except Exception:
# Clean up row from qiita.prep_template
conn_handler.execute(
"DELETE FROM qiita.prep_template where "
"{0} = %s".format(cls._id_column), (prep_id,))
# Check if sample IDs present here but not in sample template
sql = ("SELECT sample_id from qiita.required_sample_info WHERE "
"study_id = %s")
# Get list of study sample IDs, prep template study IDs,
# and their intersection
prep_samples = set(md_template.index.values)
unknown_samples = prep_samples.difference(
s[0] for s in conn_handler.execute_fetchall(sql, [study.id]))
if unknown_samples:
raise QiitaDBExecutionError(
'Samples found in prep template but not sample template: '
'%s' % ', '.join(unknown_samples))
# some other error we haven't seen before so raise it
raise
# figuring out the filepath of the backup
_id, fp = get_mountpoint('templates')[0]
fp = join(fp, '%d_prep_%d_%s.txt' % (study.id, prep_id,
strftime("%Y%m%d-%H%M%S")))
# storing the backup
pt = cls(prep_id)
pt.to_file(fp)
# adding the fp to the object
pt.add_filepath(fp)
# creating QIIME mapping file
pt.create_qiime_mapping_file(fp)
return pt
@classmethod
def validate_investigation_type(self, investigation_type):
"""Simple investigation validation to avoid code duplication
Parameters
----------
investigation_type : str
The investigation type, should be part of the ENA ontology
Raises
-------
QiitaDBColumnError
The investigation type is not in the ENA ontology
"""
ontology = Ontology(convert_to_id('ENA', 'ontology'))
terms = ontology.terms + ontology.user_defined_terms
if investigation_type not in terms:
raise QiitaDBColumnError("'%s' is Not a valid investigation_type. "
"Choose from: %s" % (investigation_type,
', '.join(terms)))
@classmethod
def _check_template_special_columns(cls, md_template, data_type):
r"""Checks for special columns based on obj type
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by sample ids
data_type : str
The data_type of the template.
Returns
-------
set
The set of missing columns
Notes
-----
Sometimes people use different names for the same columns. We just
rename them to use the naming that we expect, so this is normalized
across studies.
"""
# We only have column requirements if the data type of the raw data
# is one of the target gene types
missing_cols = set()
if data_type in TARGET_GENE_DATA_TYPES:
md_template.rename(columns=RENAME_COLS_DICT, inplace=True)
# Check for all required columns for target genes studies
missing_cols = REQUIRED_TARGET_GENE_COLS.difference(
md_template.columns)
return missing_cols
@classmethod
def delete(cls, id_):
r"""Deletes the table from the database
Parameters
----------
id_ : obj
The object identifier
Raises
------
QiitaDBError
If the prep template already has a preprocessed data
QiitaDBUnknownIDError
If no prep template with id = id_ exists
"""
table_name = cls._table_name(id_)
conn_handler = SQLConnectionHandler()
if not cls.exists(id_):
raise QiitaDBUnknownIDError(id_, cls.__name__)
# TODO: Should we cascade to preprocessed data? See issue #537
preprocessed_data_exists = conn_handler.execute_fetchone(
"SELECT EXISTS(SELECT * FROM qiita.prep_template_preprocessed_data"
" WHERE prep_template_id=%s)", (id_,))[0]
if preprocessed_data_exists:
raise QiitaDBError("Cannot remove prep template %d because a "
"preprocessed data has been already generated "
"using it." % id_)
# Delete the prep template filepaths
conn_handler.execute(
"DELETE FROM qiita.prep_template_filepath WHERE "
"prep_template_id = %s", (id_, ))
# Drop the prep_X table
conn_handler.execute(
"DROP TABLE qiita.{0}".format(table_name))
# Remove the rows from common_prep_info
conn_handler.execute(
"DELETE FROM qiita.{0} where {1} = %s".format(cls._table,
cls._id_column),
(id_,))
# Remove the rows from prep_columns
conn_handler.execute(
"DELETE FROM qiita.{0} where {1} = %s".format(cls._column_table,
cls._id_column),
(id_,))
# Remove the row from prep_template
conn_handler.execute(
"DELETE FROM qiita.prep_template where "
"{0} = %s".format(cls._id_column), (id_,))
def data_type(self, ret_id=False):
"""Returns the data_type or the data_type id
Parameters
----------
ret_id : bool, optional
If true, return the id instead of the string, default false.
Returns
-------
str or int
string value of data_type or data_type_id if ret_id is True
"""
ret = "_id" if ret_id else ""
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(
"SELECT d.data_type{0} FROM qiita.data_type d JOIN "
"qiita.prep_template p ON p.data_type_id = d.data_type_id WHERE "
"p.prep_template_id=%s".format(ret), (self.id,))[0]
@property
def raw_data(self):
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(
"SELECT raw_data_id FROM qiita.prep_template "
"WHERE prep_template_id=%s", (self.id,))[0]
@property
def preprocessed_data(self):
conn_handler = SQLConnectionHandler()
prep_datas = conn_handler.execute_fetchall(
"SELECT preprocessed_data_id FROM "
"qiita.prep_template_preprocessed_data WHERE prep_template_id=%s",
(self.id,))
return [x[0] for x in prep_datas]
@property
def preprocessing_status(self):
r"""Tells if the data has been preprocessed or not
Returns
-------
str
One of {'not_preprocessed', 'preprocessing', 'success', 'failed'}
"""
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(
"SELECT preprocessing_status FROM qiita.prep_template "
"WHERE {0}=%s".format(self._id_column), (self.id,))[0]
@preprocessing_status.setter
def preprocessing_status(self, state):
r"""Update the preprocessing status
Parameters
----------
state : str, {'not_preprocessed', 'preprocessing', 'success', 'failed'}
The current status of preprocessing
Raises
------
ValueError
If the state is not known.
"""
if (state not in ('not_preprocessed', 'preprocessing', 'success') and
not state.startswith('failed:')):
raise ValueError('Unknown state: %s' % state)
conn_handler | |
in
the System QoS Policy. An example entry is:
[{"Name": "Best Effort",
"AdminState": "Enabled",
"Cos": 255,
"Weight": 5,
"PacketDrop": True,
"Mtu": 1500
},
{"Name": "FC",
"AdminState": "Enabled",
"Cos": 3,
"Weight": 5,
"PacketDrop": False,
"Mtu": 2240
},
{"Name": "Platinum",
"AdminState": "Disabled",
"Cos": 5,
"Weight": 10,
"PacketDrop": True,
"Mtu": 1500
},
{"Name": "Gold",
"AdminState": "Disabled",
"Cos": 4,
"Weight": 9,
"PacketDrop": True,
"Mtu": 1500
},
{"Name": "Silver",
"AdminState": "Disabled",
"Cos": 2,
"Weight": 8,
"PacketDrop": True,
"Mtu": 1500
},
{"Name": "Bronze",
"AdminState": "Disabled",
"Cos": 1,
"Weight": 7,
"PacketDrop": True,
"Mtu": 1500
}
].
policy_description (str):
Optional; The description of the policy to be created. The default
value is an empty string ("").
organization (str):
Optional; The Intersight account organization of the policy.
The default value is "default".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
tags (dict):
Optional; The Intersight account tags that will be assigned to the
policy. The default value is None.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
ucs_domain_profile_name (str):
Optional; The UCS Domain Profile the policy should be attached to.
The default value is an empty string ("").
"""
def builder(target_object):
"""This is a function used to build the objects that are components of
an overarching pool or policy on Cisco Intersight.
Args:
target_object (class):
The class representing the object to be built on Intersight.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight
API path. The status code or error message will be specified.
"""
try:
target_object.object_maker()
except Exception:
print("\nA configuration error has occurred!\n")
print("The builder function failed to configure the "
f"{target_object.object_type} settings.")
print("Please check the provided arguments for the "
f"{target_object.object_type} settings.\n")
print("Exception Message: ")
traceback.print_exc()
# Define and create System QoS Policy object in Intersight
builder(SystemQosPolicy(intersight_api_key_id=intersight_api_key_id,
intersight_api_key=intersight_api_key,
policy_name=policy_name,
policy_description=policy_description,
organization=organization,
intersight_base_url=intersight_base_url,
tags=tags,
preconfigured_api_client=preconfigured_api_client,
ucs_domain_profile_name=ucs_domain_profile_name,
qos_class_settings_list=qos_class_settings_list
))
# Establish classes and functions to make Switch Control Policy
class SwitchControlPolicy(DirectlyAttachedUcsDomainPolicy):
"""This class is used to configure a Switch Control Policy in Intersight.
"""
object_type = "Switch Control Policy"
intersight_api_path = "fabric/SwitchControlPolicies"
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None,
ucs_domain_profile_name="",
enable_vlan_port_count_optimization=False,
mac_address_table_aging_time_option="Default",
mac_address_table_aging_time_custom_value_in_seconds=14500,
link_control_message_interval_in_seconds=15,
link_control_recovery_action="None"
):
super().__init__(intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description,
organization,
intersight_base_url,
tags,
preconfigured_api_client,
ucs_domain_profile_name,
fabric_interconnect="AB"
)
self.enable_vlan_port_count_optimization = enable_vlan_port_count_optimization
self.mac_address_table_aging_time_option = mac_address_table_aging_time_option
self.mac_address_table_aging_time_custom_value_in_seconds = mac_address_table_aging_time_custom_value_in_seconds
self.link_control_message_interval_in_seconds = link_control_message_interval_in_seconds
self.link_control_recovery_action = link_control_recovery_action
self.intersight_api_body = {
"Name": self.policy_name,
"Description": self.policy_description,
"VlanPortOptimizationEnabled": self.enable_vlan_port_count_optimization,
"MacAgingSettings": {
"MacAgingOption": self.mac_address_table_aging_time_option,
"MacAgingTime": self.mac_address_table_aging_time_custom_value_in_seconds
}
}
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.policy_name}', "
f"'{self.policy_description}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.tags}, "
f"{self.api_client}, "
f"'{self.ucs_domain_profile_name}', "
f"'{self.enable_vlan_port_count_optimization}', "
f"'{self.mac_address_table_aging_time_option}', "
f"{self.mac_address_table_aging_time_custom_value_in_seconds}, "
f"{self.link_control_message_interval_in_seconds}, "
f"'{self.link_control_recovery_action}')"
)
def object_maker(self):
"""This function makes the targeted policy object.
"""
print(f"\nConfiguring the {self.object_type} named "
f"{self.policy_name}...")
# Reformat the provided Link Control Recovery Action value to lowercase format for back-end Intersight API compatibility
try:
lowercase_link_control_recovery_action = self.link_control_recovery_action.lower()
except Exception:
print("\nA configuration error has occurred!\n")
print(f"During the configuration of the {self.object_type} named "
f"{self.policy_name}, there was an issue with the value "
"provided for the Link Control Recovery Action settings.")
print("The value provided was "
f"{self.link_control_recovery_action}.")
print("To proceed, the value provided for the Link Control "
"Recovery Action settings should be updated to an accepted "
"string format.")
print("The recommended values are 'None' or 'Reset' in string "
"format.")
print("Please update the configuration, then re-attempt "
"execution.\n")
sys.exit(0)
# Update the API body with the Switch Control Unidirectional Link Detection (UDLD) Settings
self.intersight_api_body["UdldSettings"] = {
"MessageInterval": self.link_control_message_interval_in_seconds,
"RecoveryAction": lowercase_link_control_recovery_action
}
# Update the API body with general attributes
self._update_api_body_general_attributes()
# Update the API body with individual subobject attributes
self._update_api_body_subobject_attributes()
# Update the API body with a UCS Domain Profile attached, if specified
self._attach_ucs_domain_profile()
# POST the API body to Intersight
self._post_intersight_object()
def switch_control_policy_maker(intersight_api_key_id,
intersight_api_key,
policy_name,
enable_vlan_port_count_optimization=False,
mac_address_table_aging_time_option="Default",
mac_address_table_aging_time_custom_value_in_seconds=14500,
link_control_message_interval_in_seconds=15,
link_control_recovery_action="None",
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None,
ucs_domain_profile_name=""
):
"""This is a function used to make a Switch Control Policy on Cisco Intersight.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
policy_name (str):
The name of the policy to be created.
enable_vlan_port_count_optimization (bool):
Optional; A setting to enable VLAN port optimization. The default
value is False.
mac_address_table_aging_time_option (str):
Optional; A setting to determine the MAC address table aging time
in seconds. The accepted values are "Default", which sets the
default time of 14500 seconds, "Custom", which allows a custom time
in seconds to be set using the
'mac_address_table_aging_time_custom_value_in_seconds' argument or
"Never", which disables the MAC address table aging time. The
default value is "Default"
mac_address_table_aging_time_custom_value_in_seconds (int):
Optional; The custom MAC address table aging time in seconds. This
setting is only valid if the 'mac_address_table_aging_time_option'
argument has been set to "Custom". The accepted range of values are
1 - 1000000 seconds. The default value is 14500.
link_control_message_interval_in_seconds (int):
Optional; Configures the time between Link Control probe messages
on ports that are in advertisement mode and are currently
determined to be bidirectional. The accepted range of values are
7 - 90 seconds. The default value is 15.
link_control_recovery_action (str):
Optional; Configures the Link Control recovery action. When
enabled, Link Control recovery attempts to bring a Link Control
error-disabled port out of reset. The accepted values are "None" or
"Reset". The default value is "None".
policy_description (str):
Optional; The description of the policy to be created. The default
value is an empty string ("").
organization (str):
Optional; The Intersight account organization of the policy.
The default value is "default".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
tags (dict):
Optional; The Intersight account tags that will be assigned to the
policy. The default value is None.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
ucs_domain_profile_name (str):
Optional; The UCS Domain Profile the policy should be attached to.
The default value is an empty string ("").
"""
def builder(target_object):
"""This is a function used to build the objects that are components of
an overarching pool or policy on Cisco Intersight.
Args:
target_object (class):
The class representing the object to be built on Intersight.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight
API path. The status code or error message will be specified.
"""
try:
target_object.object_maker()
except Exception:
print("\nA configuration error has occurred!\n")
print("The builder function failed to configure the "
f"{target_object.object_type} settings.")
print("Please check the provided arguments for the "
f"{target_object.object_type} settings.\n")
print("Exception Message: ")
traceback.print_exc()
# Define and create Switch Control Policy object in Intersight
builder(SwitchControlPolicy(intersight_api_key_id=intersight_api_key_id,
intersight_api_key=intersight_api_key,
policy_name=policy_name,
policy_description=policy_description,
organization=organization,
intersight_base_url=intersight_base_url,
tags=tags,
preconfigured_api_client=preconfigured_api_client,
ucs_domain_profile_name=ucs_domain_profile_name,
enable_vlan_port_count_optimization=enable_vlan_port_count_optimization,
mac_address_table_aging_time_option=mac_address_table_aging_time_option,
mac_address_table_aging_time_custom_value_in_seconds=mac_address_table_aging_time_custom_value_in_seconds,
link_control_message_interval_in_seconds=link_control_message_interval_in_seconds,
link_control_recovery_action=link_control_recovery_action
))
def main():
# Establish UCS Domain Deployment Tool specific variables
deployment_tool_type = "Intersight UCS Domain Deployment Tool"
deployment_tool_ucs_domain_profile_name = f"{deployment_name_prefix}{ucs_domain_profile_name}{deployment_name_suffix}"
# Establish Intersight SDK for Python API client instance
main_intersight_api_client = get_api_client(api_key_id=key_id,
api_secret_file=key,
endpoint=intersight_base_url
)
# Starting the UCS Domain Deployment Tool for Cisco Intersight
print(f"\nStarting the {deployment_tool_type} for Cisco Intersight.\n")
# Run the Intersight API and Account Availability Test
print("Running the Intersight API and Account Availability Test.")
test_intersight_api_service(
intersight_api_key_id=None,
intersight_api_key=None,
preconfigured_api_client=main_intersight_api_client
)
# Establish the initial UCS Domain Deployment Tool default pre-requisite and support Policy names
deployment_tool_multicast_policy_name = ""
deployment_tool_flow_control_policy_name = ""
deployment_tool_link_control_policy_name = ""
deployment_tool_link_aggregation_policy_name = ""
deployment_tool_ethernet_network_group_policy_name = ""
deployment_tool_ethernet_network_control_policy_name = ""
| |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.ticker as mtick
import BS
import bootstrapping
######################################################################### Training loss
#Plot loss for each epoch
def plotEpochLoss(lossSerie):
fig = plt.figure(figsize=(20,10))
ax = fig.gca()
ax.plot(lossSerie , "-", color="black")
ax.set_xlabel("Epoch number", fontsize=18, labelpad=20)
ax.set_ylabel("Logarithmic Loss", fontsize=18, labelpad=20)
ax.set_title("Training Loss evolution", fontsize=24)
ax.tick_params(labelsize=16)
ax.set_facecolor('white')
plt.show()
return
######################################################################### Plotting smiles
#Plot a surface as a superposition of curves
def plotMultipleCurve(data,
Title = 'True Price Surface',
yMin = 0,
yMax = 1,
zAsPercent = False):
dataCurve = data[(data.index.get_level_values("Strike") <= yMax) * (data.index.get_level_values("Strike") >= yMin)]
fig = plt.figure(figsize=(20,10))
ax = fig.gca()
for t in np.linspace(0,0.8,9) :
k = dataCurve[dataCurve.index.get_level_values("Maturity") >= t].index.get_level_values("Maturity").unique().min()
curveK = dataCurve[dataCurve.index.get_level_values("Maturity")==k]
dataSerie = pd.Series(curveK.values * (100 if zAsPercent else 1) ,
index = curveK.index.get_level_values("Strike"))
ax.plot(dataSerie , "--+", label=str(k))
ax.legend()
ax.set_xlabel(data.index.names[0], fontsize=18, labelpad=20)
ax.set_ylabel(data.name, fontsize=18, labelpad=20)
if zAsPercent :
ax.yaxis.set_major_formatter(mtick.PercentFormatter())
ax.set_title(Title, fontsize=24)
ax.tick_params(labelsize=16)
ax.set_facecolor('white')
plt.show()
return
######################################################################### Plot surface
#Plotting function for surface
#xTitle : title for x axis
#yTitle : title for y axis
#zTitle : title for z axis
#Title : plot title
#az : azimuth i.e. angle of view for surface
#yMin : minimum value for y axis
#yMax : maximum value for y axis
#zAsPercent : boolean, if true format zaxis as percentage
def plot2GridCustom(coordinates, zValue,
coordinates2, zValue2,
xTitle = "Maturity",
yTitle = "Strike",
zTitle = "Price",
Title = 'True Price Surface',
az=320,
yMin = 0,
yMax = 1,
zAsPercent = False):
y = coordinates[:,0]
filteredValue = (y > yMin) & (y < yMax)
x = coordinates[:,1][filteredValue]
y = coordinates[:,0][filteredValue]
z = zValue[filteredValue].flatten()
y2 = coordinates2[:,0]
filteredValue2 = (y2 > yMin) & (y2 < yMax)
x2 = coordinates2[:,1][filteredValue2]
y2 = coordinates2[:,0][filteredValue2]
z2 = zValue2[filteredValue2].flatten()
fig = plt.figure(figsize=(20,10))
ax = fig.gca(projection='3d')
ax.set_xlabel(xTitle, fontsize=18, labelpad=20)
ax.set_ylabel(yTitle, fontsize=18, labelpad=20)
ax.set_zlabel(zTitle, fontsize=18, labelpad=10)
cmap=plt.get_cmap("inferno")
colors=cmap(z * 100 if zAsPercent else z)[np.newaxis, :, :3]
ax.scatter(x2, y2, z2, marker='o', color="r", alpha=1, s=40)
ax.scatter(x, y, z, marker='o', color="b", alpha=1, s=40)
#surf = ax.plot_trisurf(x, y,
# z * 100 if zAsPercent else z ,
# linewidth=1.0,
# antialiased=True,
# cmap = cmap,
# color=(0,0,0,0))
#scaleEdgeValue = surf.to_rgba(surf.get_array())
#surf.set_edgecolors(scaleEdgeValue)
#surf.set_alpha(0)
if zAsPercent :
ax.zaxis.set_major_formatter(mtick.PercentFormatter())
ax.view_init(elev=10., azim=az)
#ax.set_title(Title, fontsize=24)
ax.set_facecolor('white')
plt.tick_params(labelsize=16)
plt.show()
return
#Plotting function from a pandas series
def plot2Series(data,
data2,
Title = 'True Price Surface',
az=320,
yMin = 0,
yMax = 1,
zAsPercent = False):
plot2GridCustom(data.index.to_frame().values,
data.values,
data2.index.to_frame().values,
data2.values,
xTitle = data.index.names[1],
yTitle = data.index.names[0],
zTitle = data.name,
Title = Title,
az=az,
yMin = yMin,
yMax = yMax,
zAsPercent = zAsPercent)
return
def convertToLogMoneyness(formerSerie, S0):
maturity = formerSerie.index.get_level_values("Maturity")
logMoneyness = np.log(S0 / formerSerie.index.get_level_values("Strike"))
newIndex = pd.MultiIndex.from_arrays([np.array(logMoneyness.values), np.array(maturity.values)],
names=('LogMoneyness', 'Maturity'))
if type(formerSerie) == type(pd.Series()) :
return pd.Series(formerSerie.values , index=newIndex)
return pd.DataFrame(formerSerie.values, index = newIndex, columns= formerSerie.columns)
#Plotting function for surface
#xTitle : title for x axis
#yTitle : title for y axis
#zTitle : title for z axis
#Title : plot title
#az : azimuth i.e. angle of view for surface
#yMin : minimum value for y axis
#yMax : maximum value for y axis
#zAsPercent : boolean, if true format zaxis as percentage
def plotGridCustom(coordinates, zValue,
xTitle = "Maturity",
yTitle = "Strike",
zTitle = "Price",
Title = 'True Price Surface',
az=320,
yMin = 0,
yMax = 1,
zAsPercent = False):
y = coordinates[:,0]
filteredValue = (y > yMin) & (y < yMax)
x = coordinates[:,1][filteredValue]
y = coordinates[:,0][filteredValue]
z = zValue[filteredValue].flatten()
fig = plt.figure(figsize=(15,9))
ax = fig.gca(projection='3d')
fontsize = 15
pad = 20
ax.set_xlabel(xTitle, color = "k", fontsize=fontsize, labelpad=pad * 1.0)
ax.set_ylabel(yTitle, color = "k", fontsize=fontsize, labelpad=pad * 1.0)
ax.set_zlabel(zTitle, color = "k", fontsize=fontsize, labelpad=pad * 1.0)
cmap=plt.get_cmap("jet")#("inferno")
colors=cmap(z * 100 if zAsPercent else z)[np.newaxis, :, :3]
surf = ax.plot_trisurf(x, y,
z * 100 if zAsPercent else z ,
linewidth=1.0,
antialiased=True,
cmap = cmap,
color=(0,0,0,0))
scaleEdgeValue = surf.to_rgba(surf.get_array())
surf.set_edgecolors(scaleEdgeValue)
surf.set_alpha(0)
if zAsPercent :
ax.zaxis.set_major_formatter(mtick.PercentFormatter())
ax.view_init(elev=40., azim=az)
ax.set_ylim(np.amax(y), np.amin(y))
ax.set_title(Title, fontsize=fontsize * 1.2)#, rotation='vertical', x=0.1, y=0.8)
ax.set_facecolor('white')
plt.tick_params(axis = "y", labelsize=fontsize * 0.9, pad = pad * 0.4, color = [1,0,0,1])
plt.tick_params(axis = "z", labelsize=fontsize * 0.9, pad = pad * 0.5, color = [1,0,0,1])
plt.tick_params(axis = "x", labelsize=fontsize * 0.9, pad = pad * 0.05, color = [1,0,0,1])
plt.tight_layout()
plt.show()
return
#Plotting function from a dataframe
def plotSurface(data,
zName,
Title = 'True Price Surface',
az=320,
yMin = 0,
yMax = 1,
zAsPercent = False):
plotGridCustom(dataSet.index.to_frame().values,
data[zName].values,
xTitle = dataSet.index.names[1],
yTitle = dataSet.index.names[0],
zTitle = zName,
Title = Title,
az=az,
yMin = yMin,
yMax = yMax,
zAsPercent=zAsPercent)
return
#Plotting function from a pandas series
def plotSerie(data,
Title = 'True Price Surface',
az=320,
yMin = 0,
yMax = 1,
zAsPercent = False):
plotGridCustom(data.index.to_frame().values,
data.values,
xTitle = data.index.names[1],
yTitle = data.index.names[0],
zTitle = data.name,
Title = Title,
az=az,
yMin = yMin,
yMax = yMax,
zAsPercent = zAsPercent)
return
######################################################################### Training Diagnostic
def selectIndex(df, indexToKeep):
return df.loc[indexToKeep][ ~df.loc[indexToKeep].index.duplicated(keep='first') ]
#Plot predicted value, benchmark value, absoluate error and relative error
#It also compute RMSE between predValue and refValue
#predValue : approximated value
#refValue : benchamrk value
#quantityName : name for approximated quantity
#az : azimuth i.e. angle of view for surface
#yMin : minimum value for y axis
#yMax : maximum value for y axis
def predictionDiagnosis(predValue,
refValue,
quantityName,
az=320,
yMin = 0,
yMax = 1,
threshold = None):
if threshold is not None :
filterIndex = refValue[refValue >= threshold].index
predictionDiagnosis(selectIndex(predValue, filterIndex),
selectIndex(refValue, filterIndex),
quantityName,
az=az,
yMin = yMin,
yMax = yMax,
threshold = None)
return
predValueFiltered = predValue[predValue.index.get_level_values("Maturity") > 0.001]
refValueFiltered = refValue[refValue.index.get_level_values("Maturity") > 0.001]
title = "Predicted " + quantityName + " surface"
plotSerie(predValueFiltered.rename(quantityName),
Title = title,
az=az,
yMin = yMin,
yMax = yMax)
title = "True " + quantityName + " surface"
plotSerie(refValueFiltered.rename(quantityName),
Title = title,
az=az,
yMin = yMin,
yMax = yMax)
title = quantityName + " surface error"
absoluteError = np.abs(predValueFiltered - refValueFiltered)
plotSerie(absoluteError.rename(quantityName + " Absolute Error"),
Title = title,
az=az,
yMin = yMin,
yMax = yMax)
title = quantityName + " surface error"
relativeError = np.abs(predValueFiltered - refValueFiltered) / refValueFiltered
plotSerie(relativeError.rename(quantityName + " Relative Error (%)"),
Title = title,
az=az,
yMin = yMin,
yMax = yMax,
zAsPercent = True)
print("RMSE : ", np.sqrt(np.mean(np.square(absoluteError))) )
print("RMSE Relative: ", np.sqrt(np.mean(np.square(relativeError))) )
return
def saveDataModel(predictedPrices, volLocal, impliedVol, name):
data = np.vstack([predictedPrices.sort_index().values, volLocal.sort_index().values, impliedVol.sort_index().values]).T
dataDf = pd.DataFrame(data, index = predictedPrices.sort_index().index,
columns = ["Price", "LocalVolatility", "ImpliedVol"])
dataDf.to_csv(name + ".csv")
return
def removeDuplicateIndex(df):
return selectIndex(df, df.index)
#Diagnose Price, theta, gamma and local volatility
def modelSummary(price,
volLocale,
delta_T,
gamma_K,
benchDataset,
S0,
bootstrap,
sigma=0.3,
az=40,
yMin = 0,
yMax = 1,
thresholdPrice = None,
removeNaN = False,
savePath = None):
if thresholdPrice is not None :
filterPrice = benchDataset["Price"] >= thresholdPrice
keptPrices = benchDataset["Price"][filterPrice].index
modelSummary(selectIndex(price, keptPrices),
selectIndex(volLocale, keptPrices),
selectIndex(delta_T, keptPrices),
selectIndex(gamma_K, keptPrices),
selectIndex(benchDataset, keptPrices),
S0,
bootstrap,
sigma=sigma,
az=az,
yMin = yMin,
yMax = yMax,
thresholdPrice = None,
removeNaN = removeNaN,
savePath = None)
return
nbArbitrageViolations = ((delta_T < 0) + (gamma_K < 0)).sum()
print("Number of static arbitrage violations : ", nbArbitrageViolations)
print("Arbitrable total variance : ", price[((delta_T < 0) + (gamma_K < 0))])
priceRef = benchDataset["Price"]
predictionDiagnosis(price,
priceRef,
"Price",
az=320,
yMin = yMin,
yMax = yMax)
volLocaleRef = benchDataset["locvol"] if "locvol" in benchDataset.columns else pd.Series(np.ones_like(priceRef),
index = priceRef.index)
predictionDiagnosis(volLocale,
volLocaleRef,
"Local volatility",
az=az,
yMin = yMin,
yMax = yMax)
dTRef = benchDataset["Theta"]
predictionDiagnosis(delta_T,
dTRef,
"Theta",
az=340,
yMin = yMin,
yMax = yMax)
gKRef = benchDataset["Gamma Strike"]
predictionDiagnosis(gamma_K,
gKRef,
"Gamma Strike",
az=340,
yMin = yMin,
yMax = yMax)
#Calibrate implied volatilities for each predicted price in testing set
ImpVol = BS.vectorizedImpliedVolatilityCalibration(S0, bootstrap,
benchDataset["Maturity"],
benchDataset["Strike"],
benchDataset["OptionType"],
price,
removeNaN = removeNaN)
ImpVol = pd.Series(ImpVol, index = price.index).sort_index().dropna()
predictionDiagnosis(ImpVol,
selectIndex(benchDataset['ImpliedVol'], ImpVol.index),
" Implied vol ",
yMin=yMin,
yMax=yMax,
| |
for i in reversed(sorted(to_remove)):
data.pop(i)
return index
def _get_complex_date_index(self, data, col_names=None, parse_dates=True):
def _get_name(icol):
if isinstance(icol, basestring):
return icol
if col_names is None:
raise ValueError(('Must supply column order to use %s as '
'index') % str(icol))
for i, c in enumerate(col_names):
if i == icol:
return c
index = None
if np.isscalar(self.index_col):
name = _get_name(self.index_col)
index = data.pop(name)
if col_names is not None:
col_names.remove(name)
else: # given a list of index
to_remove = []
index = []
for idx in self.index_col:
c = _get_name(idx)
to_remove.append(c)
index.append(data[c])
# remove index items from content and columns, don't pop in
# loop
for c in reversed(sorted(to_remove)):
data.pop(c)
if col_names is not None:
col_names.remove(c)
return index
def _agg_index(self, index, try_parse_dates=True):
if np.isscalar(self.index_col):
if try_parse_dates and self._should_parse_dates(self.index_col):
index = self._conv_date(index)
na_values = self.na_values
if isinstance(na_values, dict):
na_values = _get_na_values(self.index_name, na_values)
index, na_count = _convert_types(index, na_values)
index = Index(index, name=self.index_name)
if self.verbose and na_count:
print 'Found %d NA values in the index' % na_count
else:
arrays = []
for i, arr in enumerate(index):
if (try_parse_dates and
self._should_parse_dates(self.index_col[i])):
arr = self._conv_date(arr)
col_na_values = self.na_values
if isinstance(self.na_values, dict):
col_name = self.index_name[i]
if col_name is not None:
col_na_values = _get_na_values(col_name,
self.na_values)
arr, _ = _convert_types(arr, col_na_values)
arrays.append(arr)
index = MultiIndex.from_arrays(arrays, names=self.index_name)
return index
def _should_parse_dates(self, i):
if isinstance(self.parse_dates, bool):
return self.parse_dates
else:
if np.isscalar(self.index_col):
name = self.index_name
else:
name = self.index_name[i]
if np.isscalar(self.parse_dates):
return (i == self.parse_dates) or (name == self.parse_dates)
else:
return (i in self.parse_dates) or (name in self.parse_dates)
def _conv_date(self, *date_cols):
if self.date_parser is None:
return lib.try_parse_dates(_concat_date_cols(date_cols),
dayfirst=self.dayfirst)
else:
try:
return self.date_parser(*date_cols)
except Exception, inst:
try:
return generic_parser(self.date_parser, *date_cols)
except Exception, inst:
return lib.try_parse_dates(_concat_date_cols(date_cols),
parser=self.date_parser,
dayfirst=self.dayfirst)
def _process_date_conversion(self, data_dict):
new_cols = []
new_data = {}
columns = self.columns
date_cols = set()
if self.parse_dates is None or isinstance(self.parse_dates, bool):
return data_dict, columns
if isinstance(self.parse_dates, list):
# list of column lists
for colspec in self.parse_dates:
if np.isscalar(colspec):
if isinstance(colspec, int) and colspec not in data_dict:
colspec = self.orig_columns[colspec]
if self._isindex(colspec):
continue
data_dict[colspec] = self._conv_date(data_dict[colspec])
else:
new_name, col, old_names = _try_convert_dates(
self._conv_date, colspec, data_dict, self.orig_columns)
if new_name in data_dict:
raise ValueError('New date column already in dict %s' %
new_name)
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
elif isinstance(self.parse_dates, dict):
# dict of new name to column list
for new_name, colspec in self.parse_dates.iteritems():
if new_name in data_dict:
raise ValueError('Date column %s already in dict' %
new_name)
_, col, old_names = _try_convert_dates(
self._conv_date, colspec, data_dict, self.orig_columns)
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
data_dict.update(new_data)
new_cols.extend(columns)
if not self.keep_date_col:
for c in list(date_cols):
data_dict.pop(c)
new_cols.remove(c)
return data_dict, new_cols
def _isindex(self, colspec):
return (colspec == self.index_col or
(isinstance(self.index_col, list) and
colspec in self.index_col) or
(colspec == self.index_name or
(isinstance(self.index_name, list) and
colspec in self.index_name)))
def _get_lines(self, rows=None):
source = self.data
lines = self.buf
# already fetched some number
if rows is not None:
rows -= len(self.buf)
if isinstance(source, list):
if self.pos > len(source):
raise StopIteration
if rows is None:
lines.extend(source[self.pos:])
self.pos = len(source)
else:
lines.extend(source[self.pos:self.pos+rows])
self.pos += rows
else:
new_rows = []
try:
if rows is not None:
for _ in xrange(rows):
new_rows.append(next(source))
lines.extend(new_rows)
else:
rows = 0
while True:
try:
new_rows.append(next(source))
rows += 1
except csv.Error, inst:
if 'newline inside string' in inst.message:
row_num = str(self.pos + rows)
msg = ('EOF inside string starting with line '
+ row_num)
raise Exception(msg)
raise
except StopIteration:
lines.extend(new_rows)
if len(lines) == 0:
raise
self.pos += len(new_rows)
self.buf = []
if self.skip_footer:
lines = lines[:-self.skip_footer]
lines = self._check_comments(lines)
return self._check_thousands(lines)
def _get_na_values(col, na_values):
if isinstance(na_values, dict):
if col in na_values:
return set(list(na_values[col]))
else:
return _NA_VALUES
else:
return na_values
def _convert_to_ndarrays(dct, na_values, verbose=False):
result = {}
for c, values in dct.iteritems():
col_na_values = _get_na_values(c, na_values)
cvals, na_count = _convert_types(values, col_na_values)
result[c] = cvals
if verbose and na_count:
print 'Filled %d NA values in column %s' % (na_count, str(c))
return result
def _convert_types(values, na_values):
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
mask = lib.ismember(values, na_values)
na_count = mask.sum()
if na_count > 0:
if com.is_integer_dtype(values):
values = values.astype(np.float64)
np.putmask(values, mask, np.nan)
return values, na_count
try:
result = lib.maybe_convert_numeric(values, na_values, False)
except Exception:
na_count = lib.sanitize_objects(values, na_values, False)
result = values
if result.dtype == np.object_:
result = lib.maybe_convert_bool(values)
return result, na_count
def _get_col_names(colspec, columns):
colset = set(columns)
colnames = []
for c in colspec:
if c in colset:
colnames.append(str(c))
elif isinstance(c, int):
colnames.append(str(columns[c]))
return colnames
def _try_convert_dates(parser, colspec, data_dict, columns):
colspec = _get_col_names(colspec, columns)
new_name = '_'.join(colspec)
to_parse = [data_dict[c] for c in colspec if c in data_dict]
try:
new_col = parser(*to_parse)
except DateConversionError:
new_col = parser(_concat_date_cols(to_parse))
return new_name, new_col, colspec
def _concat_date_cols(date_cols):
if len(date_cols) == 1:
return date_cols[0]
# stripped = [map(str.strip, x) for x in date_cols]
return np.array([' '.join(x) for x in zip(*date_cols)], dtype=object)
class FixedWidthReader(object):
"""
A reader of fixed-width lines.
"""
def __init__(self, f, colspecs, filler, thousands=None):
self.f = f
self.colspecs = colspecs
self.filler = filler # Empty characters between fields.
self.thousands = thousands
assert isinstance(colspecs, (tuple, list))
for colspec in colspecs:
assert isinstance(colspec, (tuple, list))
assert len(colspec) == 2
assert isinstance(colspec[0], int)
assert isinstance(colspec[1], int)
def next(self):
line = next(self.f)
# Note: 'colspecs' is a sequence of half-open intervals.
return [line[fromm:to].strip(self.filler or ' ')
for (fromm, to) in self.colspecs]
# Iterator protocol in Python 3 uses __next__()
__next__ = next
class FixedWidthFieldParser(TextParser):
"""
Specialization that Converts fixed-width fields into DataFrames.
See TextParser for details.
"""
def __init__(self, f, **kwds):
# Support iterators, convert to a list.
self.colspecs = list(kwds.pop('colspecs'))
TextParser.__init__(self, f, **kwds)
def _make_reader(self, f):
self.data = FixedWidthReader(f, self.colspecs, self.delimiter)
#----------------------------------------------------------------------
# ExcelFile class
_openpyxl_msg = ("\nFor parsing .xlsx files 'openpyxl' is required.\n"
"You can install it via 'easy_install openpyxl' or "
"'pip install openpyxl'.\nAlternatively, you could save"
" the .xlsx file as a .xls file.\n")
class ExcelFile(object):
"""
Class for parsing tabular excel sheets into DataFrame objects.
Uses xlrd for parsing .xls files or openpyxl for .xlsx files.
See ExcelFile.parse for more documentation
Parameters
----------
path : string or file-like object
Path to xls file
kind : {'xls', 'xlsx', None}, default None
"""
def __init__(self, path_or_buf):
self.use_xlsx = True
self.path_or_buf = path_or_buf
self.tmpfile = None
if isinstance(path_or_buf, basestring):
if path_or_buf.endswith('.xls'):
self.use_xlsx = False
import xlrd
self.book = xlrd.open_workbook(path_or_buf)
else:
try:
from openpyxl.reader.excel import load_workbook
self.book = load_workbook(path_or_buf, use_iterators=True)
except ImportError: # pragma: no cover
raise ImportError(_openpyxl_msg)
else:
data = path_or_buf.read()
try:
import xlrd
self.book = xlrd.open_workbook(file_contents=data)
self.use_xlsx = False
except Exception:
from openpyxl.reader.excel import load_workbook
buf = py3compat.BytesIO(data)
self.book = load_workbook(buf, use_iterators=True)
def __repr__(self):
return object.__repr__(self)
def parse(self, sheetname, header=0, skiprows=None, index_col=None,
parse_cols=None, parse_dates=False, date_parser=None,
na_values=None, thousands=None, chunksize=None):
"""
Read Excel table into DataFrame
Parameters
----------
sheetname : string
Name of Excel sheet
header : int, default 0
Row to use for the column labels of the parsed DataFrame
skiprows : list-like
Row numbers to skip (0-indexed)
index_col : int, default None
Column to use as the row labels of the DataFrame. Pass None if
there is no such column
parse_cols : int or list, default None
If None then parse all columns,
If int then indicates last column to be parsed
If list of ints then indicates list of column numbers to be parsed
na_values : list-like, default None
List of additional strings to recognize as NA/NaN
Returns
-------
parsed : DataFrame
"""
choose = {True:self._parse_xlsx,
False:self._parse_xls}
return choose[self.use_xlsx](sheetname, header=header,
skiprows=skiprows, index_col=index_col,
parse_cols=parse_cols,
parse_dates=parse_dates,
date_parser=date_parser,
na_values=na_values,
thousands=thousands,
chunksize=chunksize)
def _should_parse(self, i, parse_cols):
if isinstance(parse_cols, int):
return i <= parse_cols
else:
return i in parse_cols
def _parse_xlsx(self, sheetname, header=0, skiprows=None, index_col=None,
parse_cols=None, parse_dates=False, date_parser=None,
na_values=None, thousands=None, chunksize=None):
sheet = self.book.get_sheet_by_name(name=sheetname)
data = []
# it brings a new method: iter_rows()
should_parse = {}
for row in sheet.iter_rows():
row_data = []
for j, cell in enumerate(row):
if parse_cols is not None and j not in should_parse:
should_parse[j] = self._should_parse(j, parse_cols)
if parse_cols is None or should_parse[j]:
row_data.append(cell.internal_value)
data.append(row_data)
if header is not None:
data[header] = _trim_excel_header(data[header])
parser = TextParser(data, header=header, index_col=index_col,
na_values=na_values,
thousands=thousands,
parse_dates=parse_dates,
date_parser=date_parser,
skiprows=skiprows,
chunksize=chunksize)
return parser.get_chunk()
def _parse_xls(self, sheetname, | |
node, 'tpRegJor')
self.tpRegJor = ival_
elif nodeName_ == 'natAtividade':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'natAtividade')
self.natAtividade = ival_
elif nodeName_ == 'dtBase':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'dtBase')
self.dtBase = ival_
elif nodeName_ == 'cnpjSindCategProf':
cnpjSindCategProf_ = child_.text
cnpjSindCategProf_ = self.gds_validate_string(cnpjSindCategProf_, node, 'cnpjSindCategProf')
self.cnpjSindCategProf = cnpjSindCategProf_
elif nodeName_ == 'trabTemp':
obj_ = trabTemp.factory()
obj_.build(child_)
self.trabTemp = obj_
obj_.original_tagname_ = 'trabTemp'
elif nodeName_ == 'aprend':
obj_ = aprend.factory()
obj_.build(child_)
self.aprend = obj_
obj_.original_tagname_ = 'aprend'
# end class infoCeletista
class tpRegJor(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, tpRegJor)
if subclass is not None:
return subclass(*args_, **kwargs_)
if tpRegJor.subclass:
return tpRegJor.subclass(*args_, **kwargs_)
else:
return tpRegJor(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='tpRegJor', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('tpRegJor')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='tpRegJor')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='tpRegJor', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='tpRegJor'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='tpRegJor', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class tpRegJor
class natAtividade(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, natAtividade)
if subclass is not None:
return subclass(*args_, **kwargs_)
if natAtividade.subclass:
return natAtividade.subclass(*args_, **kwargs_)
else:
return natAtividade(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='natAtividade', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('natAtividade')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='natAtividade')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='natAtividade', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='natAtividade'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='natAtividade', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class natAtividade
class dtBase(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, dtBase)
if subclass is not None:
return subclass(*args_, **kwargs_)
if dtBase.subclass:
return dtBase.subclass(*args_, **kwargs_)
else:
return dtBase(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='dtBase', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('dtBase')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='dtBase')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='dtBase', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='dtBase'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='dtBase', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class dtBase
class cnpjSindCategProf(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, cnpjSindCategProf)
if subclass is not None:
return subclass(*args_, **kwargs_)
if cnpjSindCategProf.subclass:
return cnpjSindCategProf.subclass(*args_, **kwargs_)
else:
return cnpjSindCategProf(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='cnpjSindCategProf', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('cnpjSindCategProf')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cnpjSindCategProf')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='cnpjSindCategProf', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='cnpjSindCategProf'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='cnpjSindCategProf', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cnpjSindCategProf
class trabTemp(GeneratedsSuper):
"""Dados sobre trabalho temporário. Preenchimento obrigatório na
prorrogação de contrato de trabalhador temporário"""
subclass = None
superclass = None
def __init__(self, justProrr=None):
self.original_tagname_ = None
self.justProrr = justProrr
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, trabTemp)
if subclass is not None:
return subclass(*args_, **kwargs_)
if trabTemp.subclass:
return trabTemp.subclass(*args_, **kwargs_)
else:
return trabTemp(*args_, **kwargs_)
factory = staticmethod(factory)
def get_justProrr(self): return self.justProrr
def set_justProrr(self, justProrr): self.justProrr = justProrr
def hasContent_(self):
if (
self.justProrr is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='trabTemp', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('trabTemp')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='trabTemp')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='trabTemp', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='trabTemp'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='trabTemp', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.justProrr is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sjustProrr>%s</%sjustProrr>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.justProrr), input_name='justProrr')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'justProrr':
justProrr_ = child_.text
justProrr_ = self.gds_validate_string(justProrr_, node, 'justProrr')
self.justProrr = justProrr_
# end class trabTemp
class justProrr(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, justProrr)
if subclass is not None:
return subclass(*args_, **kwargs_)
if justProrr.subclass:
return justProrr.subclass(*args_, **kwargs_)
else:
return justProrr(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='justProrr', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('justProrr')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='justProrr')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='justProrr', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
| |
import logging
import os
import io
from typing import List, Tuple
import unittest
import traceback
from time import sleep
import toml
from scalablerunner.util import info, warning, error, type_check, UtilLogger
from scalablerunner.ssh import SSH
from scalablerunner.dbrunner import DBRunner
from scalablerunner.taskrunner import TaskRunner
from scalablerunner.adapter import DBRunnerAdapter
# logging.basicConfig(filename='temp/test.log',
# filemode='a',
# format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
# datefmt='%H:%M:%S',
# level=logging.DEBUG)
# Global variables
temp_dir = 'temp'
host_infos_file = 'host_infos.toml'
test_log = 'test.log'
def get_temp_dir() -> str:
# Create 'temp' directory
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
return temp_dir
def get_host_infos() -> Tuple[str, str, str, str]:
config = toml.load(host_infos_file)
hostname = str(config['hostname'])
username = str(config['username'])
password = str(config['password'])
port = int(config['port'])
print(f"[Test] Hostname: {info(hostname)} | Username: {(info(username))} | Password: {info(password)} | Port: {info(str(port))}")
return hostname, username, password, port
def get_server_infos() -> Tuple[str, List[str], List[str], str]:
config = toml.load(host_infos_file)
sequencer = str(config['sequencer'])
servers = config['servers']
clients = config['clients']
package_path = config['package_path']
print(f"[Test] Sequencer: {info(sequencer)} | Servers: {(info(', '.join(servers)))} | Clients: {info(', '.join(clients))} | Package Path: {info(package_path)}")
return sequencer, servers, clients, package_path
def get_rpyc_server_infos() -> Tuple[str, int]:
config = toml.load(host_infos_file)
rpyc_ip = str(config['rpyc_ip'])
rpyc_port = int(config['rpyc_port'])
print(f"[Test] RPYC IP: {info(rpyc_ip)} | RPYC Port: {info(str(rpyc_port))}")
return rpyc_ip, rpyc_port
def get_log_path() -> str:
return os.path.join(get_temp_dir(), test_log)
class TestUtil(unittest.TestCase):
def test_info(self):
msg = info(f"Unit test on info()")
assert isinstance(msg, str)
def test_warning(self):
msg = info(f"Unit test on warning()")
assert isinstance(msg, str)
def test_error(self):
msg = info(f"Unit test on error()")
assert isinstance(msg, str)
def test_Logger(self):
UtilLogger
class TestSSH(unittest.TestCase):
TEST_LOOP_COUNT = 10
TIMEOUT = 20
RETRY_COUNT = 3
CMD_RETRY_COUNT = 2
# Default values
DEFAULT_IS_RAISE_ERR = True
DEFAULT_RETRY_COUNT = 3
DEFAULT_TIMEOUT = 20
def __init__(self, methodName: str) -> None:
super().__init__(methodName=methodName)
# Get host infos
self.HOSTNAME, self.USERNAME, self.PASSWORD, self.PORT = get_host_infos()
self.client = SSH(hostname=self.HOSTNAME, username=self.USERNAME, password=self.PASSWORD, port=self.PORT)
self.client.output_log(file_name=get_log_path())
# self.client.set_default_is_raise_err(default_is_raise_err=self.DEFAULT_IS_RAISE_ERR)
def __info(self, *args, **kwargs) -> None:
print(f"[Test SSH] Info: {info(*args, **kwargs)}")
def __warning(self, *args, **kwargs) -> None:
print(f"[Test SSH] Warning: {warning(*args, **kwargs)}")
def __error(self, *args, **kwargs) -> None:
print(f"[Test SSH] Error: {error(*args, **kwargs)}")
def __type_check(self, *args, **kwargs) -> None:
type_check(*args, **kwargs)
def setUp(self):
self.client.set_default_is_raise_err(default_is_raise_err=self.DEFAULT_IS_RAISE_ERR)
self.client.set_default_retry_count(default_retry_count=self.DEFAULT_RETRY_COUNT)
self.client.set_default_timeout(default_timeout=self.DEFAULT_TIMEOUT)
self.client.connect(retry_count=self.RETRY_COUNT)
def tearDown(self):
self.client.close()
def test_connect(self):
for i in range(self.TEST_LOOP_COUNT):
self.client.reconnect()
def test_set_default_is_raise_err(self):
is_passed = False
# Turn off is_raise_err, shouldn't raise error
self.client.set_default_is_raise_err(default_is_raise_err=False)
self.client.exec_command(command='rm new_dir_test', retry_count=self.RETRY_COUNT, cmd_retry_count=True)
# Turn on is_raise_err, should raise error
self.client.set_default_is_raise_err(default_is_raise_err=True)
try:
self.client.exec_command(command='rm new_dir_test', retry_count=self.RETRY_COUNT, cmd_retry_count=True)
is_passed = False
except:
is_passed = True
if not is_passed:
self.__error(f"Failed to pass test_set_default_is_raise_err()")
traceback.print_exc()
raise BaseException(f"Failed to pass test_set_default_is_raise_err()")
def test_exec_command(self):
for i in range(self.TEST_LOOP_COUNT):
stdin, stdout, stderr, is_successed = self.client.exec_command(command='ls -la; mkdir new_dir_test; rm -rf new_dir_test',
bufsize=-1, get_pty=False, environment=None,
retry_count=self.RETRY_COUNT, cmd_retry_count=self.CMD_RETRY_COUNT)
assert is_successed is True
stdin, stdout, stderr, is_successed = self.client.exec_command(command='rm new_dir_test1', retry_count=self.RETRY_COUNT,
cmd_retry_count=self.CMD_RETRY_COUNT, is_raise_err=False)
assert is_successed is False
is_passed = True
# Test on turning on both is_raise_err and is_show_result
try:
self.client.exec_command(command='rm new_dir_test2', retry_count=self.RETRY_COUNT, cmd_retry_count=1, is_raise_err=True)
is_passed = False
except:
traceback.print_exc()
# Test on turning on is_raise_err and turning off is_show_result
try:
self.client.exec_command(command='rm new_dir_test3', retry_count=self.RETRY_COUNT, cmd_retry_count=1, is_show_result=False, is_raise_err=True)
is_passed = False
except:
traceback.print_exc()
# Test on timeout
try:
self.client.exec_command(command='ls -lh; sleep 50', timeout=1, retry_count=self.RETRY_COUNT, cmd_retry_count=1, is_show_result=True, is_raise_err=True)
is_passed = False
except:
traceback.print_exc()
is_passed = True
# Test on default_timeout
try:
self.client.set_default_timeout(default_timeout=1)
self.client.reconnect()
self.client.exec_command(command='ls -lh; sleep 60', cmd_retry_count=1, is_show_result=True, is_raise_err=False)
except:
traceback.print_exc()
is_passed = False
# Check whether passed the test
if not is_passed:
self.__error(f"Failed to pass test_exec_command()")
traceback.print_exc()
raise BaseException(f"Failed to pass test_exec_command()")
def test_put(self):
for i in range(self.TEST_LOOP_COUNT):
# server.jar
self.client.put(files='data/jars/server.jar', remote_path='./', recursive=False, preserve_times=False, retry_count=3)
# bench.toml
# self.client.put(files='data/config/bench.toml', remote_path='./', recursive=False, preserve_times=False, retry_count=3)
# jars.zip
# self.client.put(files='data/jars/jars.zip', remote_path='./', recursive=False, preserve_times=False, retry_count=3)
for i in range(self.TEST_LOOP_COUNT // 2):
# server.jar
# self.client.put(files='data/jars/server.jar', remote_path='./', recursive=False, preserve_times=False, retry_count=3, is_raise_err=False)
# self.client.put(files='data/jars/server.jar', remote_path='./', recursive=False, preserve_times=False, retry_count=3, is_raise_err=True)
# bench.toml
self.client.put(files='data/config/bench.toml', remote_path='./', recursive=False, preserve_times=False, retry_count=3, is_raise_err=False)
self.client.put(files='data/config/bench.toml', remote_path='./', recursive=False, preserve_times=False, retry_count=3, is_raise_err=True)
# jars.zip
# self.client.put(files='data/jars/jars.zip', remote_path='./', recursive=False, preserve_times=False, retry_count=3, is_raise_err=False)
# self.client.put(files='data/jars/jars.zip', remote_path='./', recursive=False, preserve_times=False, retry_count=3, is_raise_err=True)
def test_putfo(self):
# Test file
test_file = io.StringIO('HI Test\n' * 100000)
for i in range(self.TEST_LOOP_COUNT):
self.client.putfo(fl=test_file, remote_path='./test.txt', retry_count=3)
for i in range(self.TEST_LOOP_COUNT // 2):
self.client.putfo(fl=test_file, remote_path='./test.txt', retry_count=3, is_raise_err=False)
self.client.putfo(fl=test_file, remote_path='./test.txt', retry_count=3, is_raise_err=True)
def test_large_put(self):
for i in range(self.TEST_LOOP_COUNT):
# server.jar
self.client.large_put(files='data/jars/server.jar', remote_path='./server.jar', recursive=False, retry_count=3)
# bench.toml
# self.client.put(files='data/config/bench.toml', remote_path='./bench.toml', recursive=False, retry_count=3)
# jars.zip
# self.client.put(files='data/jars/jars.zip', remote_path='./jars.zip', recursive=False, retry_count=3)
for i in range(self.TEST_LOOP_COUNT // 2):
# server.jar
# self.client.put(files='data/jars/server.jar', remote_path='./server.jar', recursive=False, retry_count=3, is_raise_err=False)
# self.client.put(files='data/jars/server.jar', remote_path='./server.jar', recursive=False, retry_count=3, is_raise_err=True)
# bench.toml
self.client.put(files='data/config/bench.toml', remote_path='./bench.toml', recursive=False, retry_count=3, is_raise_err=False)
self.client.put(files='data/config/bench.toml', remote_path='./bench.toml', recursive=False, retry_count=3, is_raise_err=True)
# jars.zip
# self.client.put(files='data/jars/jars.zip', remote_path='./jars.zip', recursive=False, retry_count=3, is_raise_err=False)
# self.client.put(files='data/jars/jars.zip', remote_path='./jars.zip', recursive=False, retry_count=3, is_raise_err=True)
def test_get(self):
for i in range(self.TEST_LOOP_COUNT):
self.client.get(local_path='/opt/shared-disk2/sychou/', remote_path='./server.jar', recursive=False, preserve_times=False, retry_count=3)
self.client.get(local_path='/opt/shared-disk2/sychou', remote_path='./db_runner_workspace_cw/auto-bencher/src', recursive=True, mode=SSH.STABLE)
self.client.get(local_path='/opt/shared-disk2/sychou', remote_path='./db_runner_workspace_cw/auto-bencher/src', recursive=True, mode=SSH.SFTP)
# self.client.get(local_path='/opt/shared-disk2/sychou/jars.zip', remote_path='./jars.zip', recursive=False, mode=SSH.STABLE)
# self.client.get(local_path='/opt/shared-disk2/sychou/transaction-features.csv', remote_path='./db_runner_workspace_test/temp/reports/transaction-features.csv', recursive=False, mode=SSH.STABLE)
for i in range(self.TEST_LOOP_COUNT // 2):
self.client.get(local_path=get_temp_dir(), remote_path='./server.jar', recursive=False, preserve_times=False, retry_count=3, is_raise_err=False)
self.client.get(local_path=get_temp_dir(), remote_path='./server.jar', recursive=False, preserve_times=False, retry_count=3, is_raise_err=True)
def test_task(epoch :int, decay: str, machine: int, gpu: int, dataset_size: int):
import os
# import jax.numpy as np
import numpy as np
from time import sleep
os.environ["CUDA_VISIBLE_DEVICES"] = f'{gpu}'
print(f"Epoch: {epoch}, Decay: {decay}, Dataset Size: {dataset_size}, Machine: {machine}, GPU: {gpu}")
sleep(5)
def test_err(flag: str, gpu: int):
raise BaseException(f"Something wrong with the flag {flag} on gpu {gpu}")
class TestTaskRunner(unittest.TestCase):
def __init__(self, methodName: str) -> None:
super().__init__(methodName=methodName)
def test_run(self):
rpyc_ip, rpyc_port = get_rpyc_server_infos()
config = {
'section-1': { # Each section would be executed sequentially.
'group-1': { # The groups under the same section would be executed concurrently
'Call': test_task, # Call can be either a function call or a command in string
'Param': { # The TaskRunner would list all kinds of combination of the parameters and execute them once
'decay': ['exp', 'anneal', 'static'],
'epoch': [100, 1000, 10000],
'dataset_size': [1000, 2000, 3000]
},
'Async': { # The task under the same group would be schedule to the resources by TaskRunner during runtime.
'machine': [0, 1],
'gpu': [0, 1]
},
},
'group-2':{ # 'group-2' can be seem as another resource group that handle different task from 'group-1' during 'section-1'
'Call': 'ls',
'Param': {
'': ['-l', '-a', '-la']
},
'Async': {
'': []
}
},
'group-3':{ # 'group-2' can be seem as another resource group that handle different task from 'group-1' during 'section-1'
'Call': 'ls',
'Param': {
'': ['-l', '-a', '-la']
}
},
'group-error': {
'Call': test_err,
'Param': {
'flag': ['-a', '-l', '-la']
},
'Async': {
'gpu': [0, 1, 2]
}
},
'group-bug': {
'Call': [],
'Param': {
'flag': ['-a', '-l', '-la']
},
'Async': {
'gpu': [0, 1, 2]
}
}
},
'section-error': {
'group-1': [],
'group-2': []
},
'section-2': {
'group-1': {
'Call': 'ls',
'Param': {
'': ['-a']
}
},
'group-wrong-cmd': {
'Call': 'lsa',
'Param': {
'': ['-a', '-l', '-la']
},
'Async': {
'': [0, 1, 2]
}
},
},
'section-remote': {
'group-remote 18180': {
'Call': test_task,
'Param': {
'decay': ['exp', 'anneal', 'static'],
'epoch': [100, 1000, 10000],
'dataset_size': [1000, 2000, 3000]
},
'Async': {
'machine': [0, 1],
'gpu': [0, 1]
},
'Remote': rpyc_ip, # RPYC server IP
'Port': rpyc_port # RPYC server port
},
'group-remote default': {
'Call': test_task,
'Param': {
'decay': ['exp', 'anneal', 'static'],
'epoch': [100, 1000, 10000],
'dataset_size': [1000, 2000, 3000]
},
'Async': {
'machine': [0, 1],
'gpu': [0, 1]
},
'Remote': rpyc_ip, # RPYC server IP
},
}
}
tr = TaskRunner(config=config, delay=0.5)
tr.output_log(file_name=get_log_path())
tr.run()
def config_db_runner(db_runner: DBRunner) -> DBRunner:
sequencer, servers, clients, package_path = get_server_infos()
db_runner.config_bencher(sequencer=sequencer,
servers=servers,
clients=clients,
package_path=package_path)
db_runner.config_cluster(server_count=4, jar_dir='latest')
return db_runner
def get_workspace_name():
return 'db_runner_workspace_test'
class TestDBRunner(unittest.TestCase):
# SSH default value
SSH_DEFAULT_RETRY_COUT = 3
SSH_DEFAULT_IS_RAISE_ERR = True
# Configurations
VANILLABENCH_NAME = "vanillabench"
ELASQL_NAME = "elasql"
ELASQLBENCH_NAME = "elasqlbench"
BENCHMARK_INTERVAL_NAME = "org.vanilladb.bench.BenchmarkerParameters.BENCHMARK_INTERVAL"
INIT_RECORD_PER_PART_NAME = "org.elasql.bench.benchmarks.ycsb.ElasqlYcsbConstants.INIT_RECORD_PER_PART"
RW_TX_RATE_NAME = "org.elasql.bench.benchmarks.ycsb.ElasqlYcsbConstants.RW_TX_RATE"
ENABLE_COLLECTING_DATA_NAME = "org.elasql.perf.tpart.TPartPerformanceManager.ENABLE_COLLECTING_DATA"
BENCHMARK_INTERVAL = "120000"
INIT_RECORD_PER_PART = "100000"
ENABLE_COLLECTING_DATA = "true"
RW_TX_RATE = "1"
ARGS_LOAD = {
ELASQLBENCH_NAME: {
INIT_RECORD_PER_PART_NAME: INIT_RECORD_PER_PART
}
}
ARGS_BENCH = {
VANILLABENCH_NAME: {
BENCHMARK_INTERVAL_NAME: BENCHMARK_INTERVAL
},
ELASQL_NAME: {
ENABLE_COLLECTING_DATA_NAME: ENABLE_COLLECTING_DATA
},
ELASQLBENCH_NAME: {
INIT_RECORD_PER_PART_NAME: INIT_RECORD_PER_PART,
RW_TX_RATE_NAME: RW_TX_RATE
}
}
def __info(self, *args, **kwargs) -> None:
print(f"[Test DB Runner] Info: {info(*args, **kwargs)}")
def __warning(self, *args, **kwargs) -> None:
print(f"[Test DB Runner] Warning: {warning(*args, **kwargs)}")
def | |
to the server
persist until the process is completed. The entity returned with
this response SHOULD include an indication of the request's
current status and either a pointer to a status monitor or some
estimate of when the user can expect the request to be fulfilled.
"""
# end class Accepted
class Non_Authoritative_Information (Successful) :
"""The returned metainformation in the entity-header is not the
definitive set as available from the origin server, but is gathered from
a local or a third-party copy.
"""
status_code = 203
_spec = \
""" The set presented MAY be a subset or superset of the original
version. For example, including local annotation information
about the resource might result in a superset of the
metainformation known by the origin server. Use of this response
code is not required and is only appropriate when the response
would otherwise be 200 (OK).
"""
# end class Non_Authoritative_Information
class No_Content (Successful) :
"""The server has fulfilled the request but does not need to return an
entity-body, and might want to return updated metainformation.
"""
status_code = 204
_spec = \
""" The response MAY include new or updated metainformation in the
form of entity-headers, which if present SHOULD be associated with
the requested variant.
If the client is a user agent, it SHOULD NOT change its document
view from that which caused the request to be sent. This response
is primarily intended to allow input for actions to take place
without causing a change to the user agent's active document
view, although any new or updated metainformation SHOULD be
applied to the document currently in the user agent's active
view.
The 204 response MUST NOT include a message-body, and thus is
always terminated by the first empty line after the header fields.
"""
# end class No_Content
class Reset_Content (Successful) :
"""The server has fulfilled the request and the user agent SHOULD reset
the document view which caused the request to be sent.
"""
status_code = 205
_spec = \
""" This response is primarily intended to allow input for actions to
take place via user input, followed by a clearing of the form in
which the input is given so that the user can easily initiate
another input action.
The response MUST NOT include an entity.
"""
# end class Reset_Content
class Partial_Content (Successful) :
"""The server has fulfilled the partial GET request for the resource."""
status_code = 206
_spec = \
""" The request MUST have included a Range header field (section
14.35) indicating the desired range, and MAY have included an
If-Range header field (section 14.27) to make the request
conditional.
The response MUST include the following header fields:
- Either a Content-Range header field (section 14.16)
indicating the range included with this response, or a
multipart/byteranges Content-Type including Content-Range
fields for each part. If a Content-Length header field is
present in the response, its value MUST match the actual
number of OCTETs transmitted in the message-body.
- Date
- ETag and/or Content-Location, if the header would have
been sent in a 200 response to the same request
- Expires, Cache-Control, and/or Vary, if the field-value might
differ from that sent in any previous response for the same
variant
If the 206 response is the result of an If-Range request that
used a strong cache validator (see section 13.3.3), the response
SHOULD NOT include other entity-headers. If the response is the
result of an If-Range request that used a weak validator, the
response MUST NOT include other entity-headers; this prevents
inconsistencies between cached entity-bodies and updated headers.
Otherwise, the response MUST include all of the entity-headers
that would have been returned with a 200 (OK) response to the
same request.
"""
# end class Partial_Content
class Redirection (Status) :
"""Base class for HTTP status classes indicating redirection [3xx]."""
def __init__ (self, location, message = None, ** kw) :
self.location = location
self.__super.__init__ (message, ** kw)
# end def __init__
def _add_response_headers (self, resource, request, response) :
self.__super._add_response_headers (resource, request, response)
response.set_header ("Location", self.location)
# end def _add_response_headers
# end class Redirection
class Multiple_Choices (Redirection) :
"""The requested resource corresponds to any one of a set of
representations, each with its own specific location, and agent-driven
negotiation information (section 12) is being provided so that the user
(or user agent) can select a preferred representation and redirect its
request to that location.
"""
status_code = 300
_spec = \
""" Unless it was a HEAD request, the response SHOULD include an
entity containing a list of resource characteristics and
location(s) from which the user or user agent can choose the one
most appropriate. The entity format is specified by the media type
given in the Content-Type header field. Depending upon the format
and the capabilities of the user agent, selection of the most
appropriate choice MAY be performed automatically. However, this
specification does not define any standard for such automatic
selection.
If the server has a preferred choice of representation, it SHOULD
include the specific URI for that representation in the Location
field; user agents MAY use the Location field value for automatic
redirection. This response is cacheable unless indicated
otherwise.
"""
# end class Multiple_Choices
class Moved_Permanently (Redirection) :
"""The requested resource has been assigned a new permanent URI and any
future references to this resource SHOULD use one of the returned URIs.
"""
status_code = 301
_spec = \
""" The new permanent URI SHOULD be given by the Location field in
the response. Unless the request method was HEAD, the entity of the
response SHOULD contain a short hypertext note with a hyperlink to
the new URI(s).
If the 301 status code is received in response to a request other
than GET or HEAD, the user agent MUST NOT automatically redirect
the request unless it can be confirmed by the user, since this
might change the conditions under which the request was issued.
Note: When automatically redirecting a POST request after
receiving a 301 status code, some existing HTTP/1.0 user agents
will erroneously change it into a GET request.
"""
# end class Moved_Permanently
class Found (Redirection) :
"""The requested resource resides temporarily under a different URI."""
status_code = 302
description = "Found (moved temporarily)"
_spec = \
""" Since the redirection might be altered on occasion, the client
SHOULD continue to use the Request-URI for future requests. This
response is only cacheable if indicated by a Cache-Control or
Expires header field.
The temporary URI SHOULD be given by the Location field in the
response. Unless the request method was HEAD, the entity of the
response SHOULD contain a short hypertext note with a hyperlink
to the new URI(s).
If the 302 status code is received in response to a request other
than GET or HEAD, the user agent MUST NOT automatically redirect
the request unless it can be confirmed by the user, since this
might change the conditions under which the request was issued.
Note: RFC 1945 and RFC 2068 specify that the client is not
allowed to change the method on the redirected request.
However, most existing user agent implementations treat 302 as
if it were a 303 response, performing a GET on the Location
field-value regardless of the original request method. The
status codes 303 and 307 have been added for servers that wish
to make unambiguously clear which kind of reaction is expected
of the client.
http://insanecoding.blogspot.co.at/2014/02/http-308-incompetence-expected.html::
Since 302 was being used in two different ways, two new codes
were created, one for each technique, to ensure proper use in
the future. 302 retained its definition, but with so many
incorrect implementations out there, 302 should essentially
never be used if you want to ensure correct semantics are
followed, instead use 303 - See Other (processing, move on...),
or 307 Temporary Redirect (The real version | |
<reponame>lemassykoi/XTBApi
#!/usr/bin/env python3
# adaptation du script FXCM pour XTB
##
debug = 1 ## DEBUG ENABLED OR DISABLED
from XTBApi.api import *
import time
import pandas as pd
import datetime as dt
import talib.abstract as ta
## Maths modules
import pyti.bollinger_bands as bb
from pyti.relative_strength_index import relative_strength_index as rsi
from pyti.bollinger_bands import upper_bollinger_band as ubb
from pyti.bollinger_bands import middle_bollinger_band as mbb
from pyti.bollinger_bands import lower_bollinger_band as lbb
from pyti.bollinger_bands import percent_bandwidth as percent_b
import requests
import sys, traceback
from os import system
from pprint import pprint
##
## SPINNER FUNC
##
import threading
import itertools
class Spinner:
def __init__(self, message, delay=0.05):
#self.spinner = itertools.cycle(['-', '/', '|', '\\']) # anti horaire
self.spinner = itertools.cycle(['-', '\\', '|', '/']) # horaire
self.delay = delay
self.busy = False
self.spinner_visible = False
sys.stdout.write(message)
def write_next(self):
with self._screen_lock:
if not self.spinner_visible:
sys.stdout.write(next(self.spinner))
self.spinner_visible = True
sys.stdout.flush()
def remove_spinner(self, cleanup=False):
with self._screen_lock:
if self.spinner_visible:
sys.stdout.write('\b')
self.spinner_visible = False
if cleanup:
sys.stdout.write(' ') # overwrite spinner with blank
sys.stdout.write('\r') # move to next line
sys.stdout.flush()
def spinner_task(self):
while self.busy:
self.write_next()
time.sleep(self.delay)
self.remove_spinner()
def __enter__(self):
if sys.stdout.isatty():
self._screen_lock = threading.Lock()
self.busy = True
self.thread = threading.Thread(target=self.spinner_task)
self.thread.start()
def __exit__(self, exception, value, tb):
if sys.stdout.isatty():
self.busy = False
self.remove_spinner(cleanup=True)
else:
sys.stdout.write('\r')
##
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def NotifyLogDebug(Message):
LOGGER.debug(Message)
requests.get('https://api.telegram.org/bot' + TG_token + '/sendMessage?chat_id=' + TG_chat_id + '&text=' + Message)
def NotifyLogInfo(Message):
LOGGER.info(Message)
requests.get('https://api.telegram.org/bot' + TG_token + '/sendMessage?chat_id=' + TG_chat_id + '&text=' + Message)
def NotifyLogWarning(Message):
LOGGER.warning(Message)
requests.get('https://api.telegram.org/bot' + TG_token + '/sendMessage?chat_id=' + TG_chat_id + '&text=' + Message)
def NotifyLogError(Message):
LOGGER.error(Message)
requests.get('https://api.telegram.org/bot' + TG_token + '/sendMessage?chat_id=' + TG_chat_id + '&text=' + Message)
def NotifyLogCritical(Message):
LOGGER.critical(Message)
requests.get('https://api.telegram.org/bot' + TG_token + '/sendMessage?chat_id=' + TG_chat_id + '&text=' + Message)
def NormalExit():
client.logout()
LOGGER.info('Logged Out : Script Exited Normally')
sys.exit()
if debug == 1: print(f"{bcolors.WARNING} DEBUG IS ON{bcolors.ENDC}")
## LOGGER LEVEL
LOGGER.setLevel(logging.INFO)
##
pricedata = None
timeframe = 'm1' ## TIMEFRAME (m1, m5, m15, m30, H1,H2,H3,H4,H6,H8,D1, W1, M1)
mn_timeframe = 60 ## Minutes (60, 300, 900, 1800, 3600, 14400, 86400, 604800, 2592000)
numberofcandles = 300 ## minimum 35 pour calcul MACD
symbol = 'EURUSD'
xtb_login = '1234567'
xtb_pass = '<PASSWORD>'
TG_chat_id='123456789'
TG_token='<PASSWORD>'
amount = 0.1
objectif_percent_sell = 1.02
objectif_percent_buy = 0.98
min_objectif_amount_sell = 50
trailing_step = 150
##
rsi_periods = 14
bb_periods = 20
bb_standard_deviations = 2.0
upper_rsi = 72
lower_rsi = 28
version = '20210127-0110'
## INIT XTB CONNEXION
NotifyLogInfo('Starting XTB Bot Tests')
client = Client()
client.login(xtb_login, xtb_pass, mode='real')
## Check if Market is Opened or Closed # return an array with 'symbol : Bool'
is_opened = client.check_if_market_open([symbol])
if is_opened[symbol] == False:
print('==MARKET IS CLOSED==')
NormalExit()
# This function runs once at the beginning of the strategy to run initial one-time processes
def Prepare():
global pricedata
if debug == 1: print(f"{bcolors.HEADER}Requesting Initial Price Data...{bcolors.ENDC}")
d = client.get_lastn_candle_history([symbol], mn_timeframe, numberofcandles)
pricedata = pd.DataFrame(data=d)
if debug == 1: print(f"{bcolors.OKGREEN}Initial Price Data Received...{bcolors.ENDC}")
print('')
## DEBUG LIGHT
#print(pricedata)
## DEBUG FULL
#print(pricedata.to_string())
print('')
# Get latest close bar prices and run Update() function every close of bar/candle
def StrategyHeartBeat():
while True:
currenttime = dt.datetime.now()
if timeframe == "m1" and currenttime.second == 0 and getLatestPriceData():
Update()
elif timeframe == "m5" and currenttime.second == 0 and currenttime.minute % 5 == 0 and getLatestPriceData():
Update()
with Spinner('Waiting for m5 bar...'):
time.sleep(240)
elif timeframe == "m15" and currenttime.second == 0 and currenttime.minute % 15 == 0 and getLatestPriceData():
Update()
with Spinner('Waiting for m15 bar...'):
time.sleep(840)
elif timeframe == "m30" and currenttime.second == 0 and currenttime.minute % 30 == 0 and getLatestPriceData():
Update()
with Spinner('Waiting for m30 bar...'):
time.sleep(1740)
elif currenttime.second == 0 and currenttime.minute == 0 and getLatestPriceData():
Update()
with Spinner('Waiting for H1 bar...'):
time.sleep(3540)
with Spinner('Waiting for m1 bar...'):
time.sleep(1)
# Returns True when pricedata is properly updated
def getLatestPriceData():
global pricedata
# Normal operation will update pricedata on first attempt
d = client.get_lastn_candle_history([symbol], mn_timeframe, numberofcandles)
new_pricedata = pd.DataFrame(data=d)
if new_pricedata['timestamp'][len(new_pricedata['timestamp'])-1] != pricedata['timestamp'][len(pricedata['timestamp'])-1]:
pricedata = new_pricedata
return True
counter = 0
# If data is not available on first attempt, try up to 6 times to update pricedata
while new_pricedata['timestamp'][len(new_pricedata['timestamp'])-1] == pricedata['timestamp'][len(pricedata['timestamp'])-1] and counter < 6:
print(f"{bcolors.BOLD}No updated prices found, trying again in 10 seconds...{bcolors.ENDC}")
print("")
counter+=1
with Spinner('Still waiting for next bar...'):
time.sleep(10)
d = client.get_lastn_candle_history([symbol], mn_timeframe, numberofcandles)
new_pricedata = pd.DataFrame(data=d)
if new_pricedata['timestamp'][len(new_pricedata['timestamp'])-1] != pricedata['timestamp'][len(pricedata['timestamp'])-1]:
pricedata = new_pricedata
return True
else:
return False
# Returns true if stream1 crossed over stream2 in most recent candle, stream2 can be integer/float or data array
def crossesOver(stream1, stream2):
# If stream2 is an int or float, check if stream1 has crossed over that fixed number
if isinstance(stream2, int) or isinstance(stream2, float):
if stream1[len(stream1)-1] <= stream2:
return False
else:
if stream1[len(stream1)-2] > stream2:
return False
elif stream1[len(stream1)-2] < stream2:
return True
else:
x = 2
while stream1[len(stream1)-x] == stream2:
x = x + 1
if stream1[len(stream1)-x] < stream2:
return True
else:
return False
# Check if stream1 has crossed over stream2
else:
if stream1[len(stream1)-1] <= stream2[len(stream2)-1]:
return False
else:
if stream1[len(stream1)-2] > stream2[len(stream2)-2]:
return False
elif stream1[len(stream1)-2] < stream2[len(stream2)-2]:
return True
else:
x = 2
while stream1[len(stream1)-x] == stream2[len(stream2)-x]:
x = x + 1
if stream1[len(stream1)-x] < stream2[len(stream2)-x]:
return True
else:
return False
# Returns true if stream1 crossed under stream2 in most recent candle, stream2 can be integer/float or data array
def crossesUnder(stream1, stream2):
# If stream2 is an int or float, check if stream1 has crossed under that fixed number
if isinstance(stream2, int) or isinstance(stream2, float):
if stream1[len(stream1)-1] >= stream2:
return False
else:
if stream1[len(stream1)-2] < stream2:
return False
elif stream1[len(stream1)-2] > stream2:
return True
else:
x = 2
while stream1[len(stream1)-x] == stream2:
x = x + 1
if stream1[len(stream1)-x] > stream2:
return True
else:
return False
# Check if stream1 has crossed under stream2
else:
if stream1[len(stream1)-1] >= stream2[len(stream2)-1]:
return False
else:
if stream1[len(stream1)-2] < stream2[len(stream2)-2]:
return False
elif stream1[len(stream1)-2] > stream2[len(stream2)-2]:
return True
else:
x = 2
while stream1[len(stream1)-x] == stream2[len(stream2)-x]:
x = x + 1
if stream1[len(stream1)-x] > stream2[len(stream2)-x]:
return True
else:
return False
# This function places a market order in the direction BuySell, "B" = Buy, "S" = Sell, uses symbol, amount, stop, limit
def enter(BuySell, stop, limit):
volume = amount
order = 'buy'
if BuySell == "S":
order = 'sell'
try:
msg = ' Opening tradeID for symbol ' + symbol
NotifyLogInfo(msg)
opentrade = client.open_trade(order, symbol, amount)
except:
msg = ' Error Opening Trade.'
NotifyLogError(msg)
else:
msg = ' Trade Opened Successfully.'
LOGGER.info(msg)
# This function closes all positions that are in the direction BuySell, "B" = Close All Buy Positions, "S" = Close All Sell Positions, uses symbol
def exit(BuySell=None):
openpositions = client.get_trades()
isbuy = 0
if BuySell == "S":
isbuy = 1
for position in openpositions:
if position['symbol'] == symbol:
if BuySell is None or position['cmd'] == isbuy:
msg = ' Closing tradeID : ' + str(position['order'])
NotifyLogInfo(msg)
try:
closetrade = client.close_trade(position['order'])
except:
msg = " Error Closing Trade."
NotifyLogError(msg)
else:
msg = " Trade Closed Successfully."
LOGGER.info(msg)
# Returns number of Open Positions for symbol in the direction BuySell, returns total number of both Buy and Sell positions if no direction is specified
def countOpenTrades(BuySell=None):
openpositions = client.get_trades()
counter = 0
isbuy = 0
if BuySell == "S":
isbuy = 1
for keys in openpositions:
if keys['symbol'] == symbol:
if BuySell is None or keys['cmd'] == isbuy:
counter+=1
return counter
def Update():
print(f"{bcolors.HEADER}==================================================================================={bcolors.ENDC}")
print(f"{bcolors.BOLD}" + str(dt.datetime.now()) + f"{bcolors.ENDC}" + " " + timeframe + " Bar Closed - Running Update Function...")
print("Version : " + f"{bcolors.BOLD}" + version + ' ' + sys.argv[0] + f"{bcolors.ENDC}")
print("Symbol : " + f"{bcolors.BOLD}" + symbol + f"{bcolors.ENDC}")
# Calculate Indicators
macd = ta.MACD(pricedata['close'])
pricedata['cci'] = ta.CCI(pricedata['high'],pricedata['low'],pricedata['close'])
iBBUpper = bb.upper_bollinger_band(pricedata['close'], bb_periods, bb_standard_deviations)
iBBMiddle = bb.middle_bollinger_band(pricedata['close'], bb_periods, bb_standard_deviations)
iBBLower = bb.lower_bollinger_band(pricedata['close'], bb_periods, bb_standard_deviations)
iRSI = rsi(pricedata['close'], rsi_periods)
# Declare simplified variable names for most recent close candle
pricedata['macd'] = macd[0]
pricedata['macdsignal'] = macd[1]
pricedata['macdhist'] = macd[2]
BBUpper = iBBUpper[len(iBBUpper)-1]
BBMiddle = iBBMiddle[len(iBBMiddle)-1]
BBLower = iBBLower[len(iBBLower)-1]
close_price = pricedata['close'][len(pricedata)-1]
last_close_price = pricedata['close'][len(pricedata)-2]
macd_now = pricedata['macd'][len(pricedata)-1]
macdsignal = pricedata['macdsignal'][len(pricedata)-1]
macdhist = pricedata['macdhist'][len(pricedata)-1]
cci = pricedata['cci'][len(pricedata)-1]
rsi_now = iRSI[len(iRSI)-1]
## DEBUG FULL
#print(pricedata.to_string())
# Print Price/Indicators
if close_price > last_close_price:
print(f"Close Price : | |
visibility):
""" Creates a tuple of (key, value) from the attribute text.
If visibility is '0' the attribute key is prefixed with '_'
to make it a hidden attribute.
Returns a tuple of key and value.
"""
key, value = text.split('=', 1)
## prefix attributes that are marked as invisible
if visibility == 0:
key = "_"+key
## these are special attributes that are treated differently
elif key in ['netname', 'pinnumber', 'pinlabel', 'refdes']:
key = "_"+key
return key.strip(), value.strip()
def skip_embedded_section(self, stream):
""" Reads the *stream* line by line until the end of an
embedded section (``]``) is found. This method is used
to skip over embedded sections of already known
components.
"""
pos = stream.tell()
typ = stream.readline().split(self.DELIMITER, 1)[0].strip()
## return with stream reset to previous position if not
## an embedded section
if typ != '[':
stream.seek(pos)
return
while typ != ']':
typ = stream.readline().split(self.DELIMITER, 1)[0].strip()
def get_netpoint(self, x, y):
""" Creates a new NetPoint at coordinates *x*,*y* and stores
it in the net point lookup table. If a NetPoint does already
exist, the existing point is returned.
Returns a NetPoint object at coordinates *x*,*y*
"""
if (x, y) not in self.net_points:
self.net_points[(x, y)] = net.NetPoint('%da%d' % (x, y), x, y)
return self.net_points[(x, y)]
@staticmethod
def intersects_segment(segment, pt_c):
""" Checks if point *pt_c* lays on the *segment*. This code is
adapted from the kiCAD parser.
Returns True if *pt_c* is on *segment*, False otherwise.
"""
pt_a, pt_b = segment
#check vertical segment
if pt_a.x == pt_b.x == pt_c.x:
if min(pt_a.y, pt_b.y) < pt_c.y < max(pt_a.y, pt_b.y):
return True
#check vertical segment
elif pt_a.y == pt_b.y == pt_c.y:
if min(pt_a.x, pt_b.x) < pt_c.x < max(pt_a.x, pt_b.x):
return True
#check diagonal segment
elif (pt_c.x-pt_a.x)*(pt_b.y-pt_a.y) == (pt_b.x-pt_a.x)*(pt_c.y-pt_a.y):
if min(pt_a.x, pt_b.x) < pt_c.x < max(pt_a.x, pt_b.x):
return True
## point C not on segment
return False
def _parse_environment(self, stream):
""" Checks if attribute environment starts in the next line
(marked by '{'). Environment only contains text elements
interpreted as text.
Returns a dictionary of attributes.
"""
current_pos = stream.tell()
typ, params = self._parse_command(stream)
#go back to previous position when no environment in stream
if typ != '{':
stream.seek(current_pos)
return None
typ, params = self._parse_command(stream)
attributes = {}
while typ is not None:
if typ == 'T':
key, value = self._parse_text(stream, params)
attributes[key] = value
typ, params = self._parse_command(stream)
return attributes
def calculate_nets(self):
""" Calculate connected nets from previously stored segments
and netpoints. The code has been adapted from the kiCAD
parser since the definition of segements in the schematic
file are similar. The segments are checked against
existing nets and added when they touch it. For this
to work, it is required that intersecting segments are
divided prior to this method.
Returns a list of valid nets and its net points.
"""
nets = []
# Iterate over the segments, removing segments when added to a net
while self.segments:
seg = self.segments.pop() # pick a point
net_name = ''
pt_a, pt_b = seg
if pt_a.point_id in self.net_names:
net_name = self.net_names[pt_a.point_id]
elif pt_b.point_id in self.net_names:
net_name = self.net_names[pt_b.point_id]
new_net = net.Net(net_name)
new_net.attributes['_name'] = net_name
new_net.connect(seg)
found = True
while found:
found = set()
for seg in self.segments: # iterate over segments
if new_net.connected(seg): # segment touching the net
new_net.connect(seg) # add the segment
found.add(seg)
for seg in found:
self.segments.remove(seg)
nets.append(new_net)
## check if names are available for calculated nets
for net_obj in nets:
for point_id in net_obj.points:
## check for stored net names based on pointIDs
if point_id in self.net_names:
net_obj.net_id = self.net_names[point_id]
net_obj.attributes['_name'] = self.net_names[point_id]
if '_name' in net_obj.attributes:
annotation = Annotation(
"{{_name}}", ## annotation referencing attribute '_name'
0, 0,
self.conv_angle(0.0),
self.conv_bool(1),
)
net_obj.add_annotation(annotation)
return nets
def _open_file_or_zip(self, filename, mode='rU'):
if self.geda_zip is not None:
temp_dir = tempfile.mkdtemp()
self.geda_zip.extract(filename, temp_dir)
filename = os.path.join(temp_dir, filename)
return open(filename, mode)
def _parse_bus(self, params):
""" Processing a bus instance with start end end coordinates
at (x1, y1) and (x2, y2). *color* is ignored. *ripperdir*
defines the direction in which the bus rippers are oriented
relative to the direction of the bus.
"""
x1, x2 = params['x1'], params['x2']
y1, y2 = params['y1'], params['y2']
## ignore bus when length is zero
if x1 == x2 and y1 == y2:
return
pta_x, pta_y = self.conv_coords(x1, y1)
ptb_x, ptb_y = self.conv_coords(x2, y2)
self.segments.add((
self.get_netpoint(pta_x, pta_y),
self.get_netpoint(ptb_x, ptb_y)
))
def _parse_segment(self, stream, params):
""" Creates a segment from the command *params* and
stores it in the global segment list for further
processing in :py:method:divide_segments and
:py:method:calculate_nets. It also extracts the
net name from the attribute environment if
present.
"""
## store segement for processing later
x1, y1 = self.conv_coords(params['x1'], params['y1'])
x2, y2 = self.conv_coords(params['x2'], params['y2'])
## store segment points in global point list
pt_a = self.get_netpoint(x1, y1)
pt_b = self.get_netpoint(x2, y2)
## add segment to global list for later processing
self.segments.add((pt_a, pt_b))
attributes = self._parse_environment(stream)
if attributes is not None:
## create net with name in attributes
if attributes.has_key('_netname'):
net_name = attributes['_netname']
if net_name not in self.net_names.values():
self.net_names[pt_a.point_id] = net_name
def _parse_path(self, stream, params, mirrored=False):
""" Parses a SVG-like path provided path into a list
of simple shapes. The gEDA formats allows only line
and curve segments with absolute coordinates. Hence,
shapes are either Line or BezierCurve objects.
The method processes the stream data according to
the number of lines in *params*.
Returns a list of Line and BezierCurve shapes.
"""
num_lines = params['num_lines']
command = stream.readline().strip().split(self.DELIMITER)
if command[0] != 'M':
raise GEDAError('found invalid path in gEDA file')
def get_coords(string, mirrored):
""" Get coordinates from string with comma-sparated notation."""
x, y = [int(value) for value in string.strip().split(',')]
if mirrored:
x = 0-x
return (self.x_to_px(x), self.y_to_px(y))
shapes = []
current_pos = initial_pos = (get_coords(command[1], mirrored))
## loop over the remaining lines of commands (after 'M')
for dummy in range(num_lines-1):
command = stream.readline().strip().split(self.DELIMITER)
## draw line from current to given position
if command[0] == 'L':
assert(len(command) == 2)
end_pos = get_coords(command[1], mirrored)
line = shape.Line(current_pos, end_pos)
shapes.append(line)
current_pos = end_pos
## draw curve from current to given position
elif command[0] == 'C':
assert(len(command) == 4)
control1 = get_coords(command[1], mirrored)
control2 = get_coords(command[2], mirrored)
end_pos = get_coords(command[3], mirrored)
curve = shape.BezierCurve(
control1,
control2,
current_pos,
end_pos
)
shapes.append(curve)
current_pos = end_pos
## end of sub-path, straight line from current to initial position
elif command[0] in ['z', 'Z']:
shapes.append(
shape.Line(current_pos, initial_pos)
)
else:
raise GEDAError(
"invalid command type in path '%s'" % command[0]
)
return shapes
def _parse_arc(self, params, mirrored=False):
""" Creates an Arc object from the parameter in *params*. All
style related parameters are ignored.
Returns Arc object.
"""
arc_x = params['x']
start_angle = params['startangle']
sweep_angle = params['sweepangle']
if mirrored:
arc_x = 0 - arc_x
start_angle = start_angle + sweep_angle
if start_angle <= 180:
start_angle = 180 - start_angle
else:
start_angle = (360 - start_angle) + 180
return shape.Arc(
self.x_to_px(arc_x),
self.y_to_px(params['y']),
self.conv_angle(start_angle),
self.conv_angle(start_angle+sweep_angle),
self.to_px(params['radius']),
)
def _parse_line(self, params, mirrored=None):
""" Creates a Line object from the parameters in *params*. All
style related parameters are ignored.
Returns a Line object.
"""
line_x1 = params['x1']
line_x2 = params['x2']
if mirrored:
line_x1 = 0 - params['x1']
line_x2 = 0 - params['x2']
return shape.Line(
self.conv_coords(line_x1, params['y1']),
self.conv_coords(line_x2, params['y2']),
)
def _parse_box(self, params, mirrored=False):
""" Creates rectangle from gEDA box with origin in bottom left
corner. All style related values are ignored.
Returns a Rectangle object.
"""
rect_x = params['x']
if mirrored:
rect_x = 0-(rect_x+params['width'])
return shape.Rectangle(
self.x_to_px(rect_x),
self.y_to_px(params['y']+params['height']),
self.to_px(params['width']),
self.to_px(params['height'])
)
def _parse_circle(self, params, mirrored=False):
""" Creates a Circle object from the gEDA parameters in *params. All
style related parameters are ignored.
Returns a Circle object.
"""
vertex_x = params['x']
if mirrored:
vertex_x = 0-vertex_x
return shape.Circle(
self.x_to_px(vertex_x),
self.y_to_px(params['y']),
self.to_px(params['radius']),
| |
information can be extracted from other job object fields.
In the case of ATLAS, information is extracted from the metadata field and
added to other job object fields.
:param job: job object
:return:
"""
## comment from Alexey:
## it would be better to reallocate this logic (as well as parse
## metadata values)directly to Job object since in general it's Job
## related part. Later on once we introduce VO specific Job class
## (inherited from JobData) this can be easily customized
# get label "all" or "log"
stageout = get_stageout_label(job)
if 'exeErrorDiag' in job.metadata:
job.exeerrordiag = job.metadata['exeErrorDiag']
if job.exeerrordiag:
logger.warning('payload failed: exeErrorDiag=%s', job.exeerrordiag)
# determine what should be staged out
job.stageout = stageout # output and log file or only log file
work_attributes = None
try:
work_attributes = parse_jobreport_data(job.metadata)
except Exception as exc:
logger.warning('failed to parse job report (cannot set job.nevents): %s', exc)
else:
# note: the number of events can be set already at this point
# if the value was extracted from the job report (a more thorough
# search for this value is done later unless it was set here)
nevents = work_attributes.get('nEvents', 0)
if nevents:
job.nevents = nevents
# extract output files from the job report if required, in case the trf
# has created additional (overflow) files. Also make sure all guids are
# assigned (use job report value if present, otherwise generate the guid)
if job.metadata and not job.is_eventservice:
# keep this for now, complicated to merge with verify_output_files?
extract_output_file_guids(job)
try:
verify_output_files(job)
except Exception as exc:
logger.warning('exception caught while trying verify output files: %s', exc)
else:
if not job.allownooutput: # i.e. if it's an empty list/string, do nothing
logger.debug((
"will not try to extract output files from jobReport "
"for user job (and allowNoOut list is empty)"))
else:
# remove the files listed in allowNoOutput if they don't exist
remove_no_output_files(job)
## validate output data (to be moved into the JobData)
## warning: do no execute this code unless guid lookup in job report
# has failed - pilot should only generate guids
## if they are not present in job report
for dat in job.outdata:
if not dat.guid:
dat.guid = get_guid()
logger.warning(
'guid not set: generated guid=%s for lfn=%s',
dat.guid,
dat.lfn
)
def get_stageout_label(job):
"""
Get a proper stage-out label.
:param job: job object.
:return: "all"/"log" depending on stage-out type (string).
"""
stageout = "all"
if job.is_eventservice:
logger.info('event service payload, will only stage-out log')
stageout = "log"
else:
# handle any error codes
if 'exeErrorCode' in job.metadata:
job.exeerrorcode = job.metadata['exeErrorCode']
if job.exeerrorcode == 0:
stageout = "all"
else:
logger.info('payload failed: exeErrorCode=%d', job.exeerrorcode)
stageout = "log"
return stageout
def update_output_for_hpo(job):
"""
Update the output (outdata) for HPO jobs.
:param job: job object.
:return:
"""
try:
new_outdata = discover_new_outdata(job)
except Exception as exc:
logger.warning('exception caught while discovering new outdata: %s', exc)
else:
if new_outdata:
logger.info((
'replacing job outdata with discovered output '
'(%d file(s))'), len(new_outdata))
job.outdata = new_outdata
def discover_new_outdata(job):
"""
Discover new outdata created by HPO job.
:param job: job object.
:return: new_outdata (list of FileSpec objects)
"""
from pilot.info.filespec import FileSpec
new_outdata = []
for outdata_file in job.outdata:
new_output = discover_new_output(outdata_file.lfn, job.workdir)
if new_output:
# create new FileSpec objects out of the new output
for outfile in new_output:
# note: guid will be taken from job report
# after this function has been called
files = [{
'scope': outdata_file.scope,
'lfn': outfile,
'workdir': job.workdir,
'dataset': outdata_file.dataset,
'ddmendpoint': outdata_file.ddmendpoint,
'ddmendpoint_alt': None,
'filesize': new_output[outfile]['filesize'],
'checksum': new_output[outfile]['checksum'],
'guid': ''
}]
# do not abbreviate the following two lines as otherwise
# the content of xfiles will be a list of generator objects
_xfiles = [FileSpec(type='output', **f) for f in files]
new_outdata += _xfiles
return new_outdata
def discover_new_output(name_pattern, workdir):
"""
Discover new output created by HPO job in the given work dir.
name_pattern for known 'filename' is 'filename_N' (N = 0, 1, 2, ..).
Example: name_pattern = 23578835.metrics.000001.tgz
should discover files with names 23578835.metrics.000001.tgz_N (N = 0, 1, ..)
new_output = { lfn: {'path': path, 'size': size, 'checksum': checksum}, .. }
:param name_pattern: assumed name pattern for file to discover (string).
:param workdir: work directory (string).
:return: new_output (dictionary).
"""
new_output = {}
outputs = glob("%s/%s_*" % (workdir, name_pattern))
if outputs:
lfns = [os.path.basename(path) for path in outputs]
for lfn, path in list(zip(lfns, outputs)):
# get file size
filesize = get_local_file_size(path)
# get checksum
try:
checksum = calculate_checksum(path)
except (FileHandlingFailure, NotImplementedError, Exception) as exc:
logger.warning(
'failed to create file info (filesize=%d) for lfn=%s: %s',
filesize,
lfn,
exc
)
else:
if filesize and checksum:
new_output[lfn] = {'path': path, 'filesize': filesize, 'checksum': checksum}
else:
logger.warning(
'failed to create file info (filesize=%d, checksum=%s) for lfn=%s',
filesize,
checksum,
lfn
)
return new_output
def extract_output_file_guids(job):
"""
Extract output file info from the job report and make sure all guids\
are assigned (use job report value if present, otherwise generate the guid.\
Note: guid generation is done later, not in this function since
this function might not be called if metadata info is not found prior
to the call).
:param job: job object.
:return:
"""
# make sure there is a defined output file list in the job report -
# unless it is allowed by task parameter allowNoOutput
if not job.allownooutput:
output = job.metadata.get('files', {}).get('output', [])
if output:
logger.info((
'verified that job report contains metadata '
'for %d file(s)'), len(output))
else:
#- will fail job since allowNoOutput is not set')
logger.warning((
'job report contains no output '
'files and allowNoOutput is not set'))
#job.piloterrorcodes, job.piloterrordiags =
# errors.add_error_code(errors.NOOUTPUTINJOBREPORT)
return
# extract info from metadata (job report JSON)
data = dict([e.lfn, e] for e in job.outdata)
#extra = []
for dat in job.metadata.get('files', {}).get('output', []):
for fdat in dat.get('subFiles', []):
lfn = fdat['name']
# verify the guid if the lfn is known
# only extra guid if the file is known by the
# job definition (March 18 change, v 2.5.2)
if lfn in data:
data[lfn].guid = fdat['file_guid']
logger.info((
'set guid=%s for lfn=%s '
'(value taken from job report)'), data[lfn].guid, lfn)
else: # found new entry
logger.warning((
'pilot no longer considers output files not mentioned '
'in job definition (lfn=%s)'), lfn)
continue
#if job.outdata:
# kw = {'lfn': lfn,
# . # take value from 1st output file?
# 'scope': job.outdata[0].scope,
# 'guid': fdat['file_guid'],
# 'filesize': fdat['file_size'],
# # take value from 1st output file?
# 'dataset': dat.get('dataset') or job.outdata[0].dataset
# }
# spec = FileSpec(filetype='output', **kw)
# extra.append(spec)
# make sure the output list has set guids from job report
for fspec in job.outdata:
if fspec.guid != data[fspec.lfn].guid:
fspec.guid = data[fspec.lfn].guid
logger.debug('reset guid=%s for lfn=%s', fspec.guid, fspec.lfn)
else:
if fspec.guid:
logger.debug('verified guid=%s for lfn=%s', fspec.guid, fspec.lfn)
else:
logger.warning('guid not set for lfn=%s', fspec.lfn)
#if extra:
#logger.info('found extra output files in job report,
# will overwrite output file list: extra=%s' % extra)
#job.outdata = extra
def verify_output_files(job):
"""
Make sure that the known output files from the job definition are listed
in the job report and number of processed events is greater than zero.
If the output file is not listed in the job report, then if the file is
listed in allowNoOutput remove it from stage-out, otherwise fail the job.
Note from Rod: fail scenario: The output file is not in output:[] or is
there with zero events. Then if allownooutput is not set - fail the job.
If it is set, then do not store the output, and finish ok.
:param job: job object.
:return: Boolean (and potentially updated job.outdata list)
"""
failed = False
# get list of output files from the job definition
lfns_jobdef = []
for fspec in job.outdata:
lfns_jobdef.append(fspec.lfn)
if not lfns_jobdef:
logger.debug('empty output file list from job definition (nothing to verify)')
return True
# get list of output files from job report
# (if None is returned, it means the job report | |
"""Util functions for CIFAR10/100."""
from collections import OrderedDict
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple
import matplotlib.pyplot as plt
import numpy as np
import numpy.typing as npt
import torch
from flwr.common.parameter import weights_to_parameters
from flwr.common.typing import Parameters, Scalar, Weights
from flwr.dataset.utils.common import (
XY,
create_lda_partitions,
shuffle,
sort_by_label,
split_array_at_indices,
)
from flwr.server.history import History
from PIL import Image
from torch import Tensor, load
from torch.nn import GroupNorm, Module
from torch.utils.data import DataLoader, Dataset
from torchvision.datasets import CIFAR10, CIFAR100
from torchvision.models import ResNet, resnet18
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomCrop,
RandomHorizontalFlip,
ToTensor,
)
CIFAR100_NUM_COARSE_CLASSES = 20
CIFAR100_NUM_FINE_CLASSES = 5
cifar100_coarse_to_real = [
[4, 30, 55, 72, 95],
[1, 32, 67, 73, 91],
[54, 62, 70, 82, 92],
[9, 10, 16, 28, 61],
[0, 51, 53, 57, 83],
[22, 39, 40, 86, 87],
[5, 20, 25, 84, 94],
[6, 7, 14, 18, 24],
[3, 42, 43, 88, 97],
[12, 17, 37, 68, 76],
[23, 33, 49, 60, 71],
[15, 19, 21, 31, 38],
[34, 63, 64, 66, 75],
[26, 45, 77, 79, 99],
[2, 11, 35, 46, 98],
[27, 29, 44, 78, 93],
[36, 50, 65, 74, 80],
[47, 52, 56, 59, 96],
[8, 13, 48, 58, 90],
[41, 69, 81, 85, 89],
]
# fmt: off
cifar100_real_to_coarse = [
4, 1, 14, 8, 0, 6, 7, 7, 18, 3,
3, 14, 9, 18, 7, 11, 3, 9, 7, 11,
6, 11, 5, 10, 7, 6, 13, 15, 3, 15,
0, 11, 1, 10, 12, 14, 16, 9, 11, 5,
5, 19, 8, 8, 15, 13, 14, 17, 18, 10,
16, 4, 17, 4, 2, 0, 17, 4, 18, 17,
10, 3, 2, 12, 12, 16, 12, 1, 9, 19,
2, 10, 0, 1, 16, 12, 9, 13, 15, 13,
16, 19, 2, 4, 6, 19, 5, 5, 8, 19,
18, 1, 2, 15, 6, 0, 17, 8, 14, 13,
]
# fmt: on
# transforms
def get_transforms(num_classes: int = 10) -> Dict[str, Compose]:
"""Returns the right Transform Compose for both train and evaluation.
Args:
num_classes (int, optional): Defines whether CIFAR10 or CIFAR100. Defaults to 10.
Returns:
Dict[str, Compose]: Dictionary with 'train' and 'test' keywords and Transforms
for each
"""
normalize_cifar10 = Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
normalize_cifar100 = Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2762))
normalize_cifar = normalize_cifar10 if num_classes == 10 else normalize_cifar100
train_transform = Compose(
[RandomCrop(24), RandomHorizontalFlip(), ToTensor(), normalize_cifar]
)
test_transform = Compose([CenterCrop(24), ToTensor(), normalize_cifar])
return {"train": train_transform, "test": test_transform}
def get_cifar_model(num_classes: int = 10) -> Module:
"""Generates ResNet18 model using GroupNormalization rather than
BatchNormalization. Two groups are used.
Args:
num_classes (int, optional): Number of classes {10,100}. Defaults to 10.
Returns:
Module: ResNet18 network.
"""
model: ResNet = resnet18(
norm_layer=lambda x: GroupNorm(2, x), num_classes=num_classes
)
return model
class ClientDataset(Dataset):
"""Client Dataset."""
def __init__(self, path_to_data: Path, transform: Compose = None):
"""Implements local dataset.
Args:
path_to_data (Path): Path to local '.pt' file is located.
transform (Compose, optional): Transforms to be used when sampling.
Defaults to None.
"""
super().__init__()
self.transform = transform
self.inputs, self.labels = load(path_to_data)
def __len__(self) -> int:
"""Size of the dataset.
Returns:
int: Number of samples in the dataset.
"""
return len(self.labels)
def __getitem__(self, idx: int) -> Tuple[Tensor, int]:
"""Fetches item in dataset.
Args:
idx (int): Position of item being fetched.
Returns:
Tuple[Tensor, int]: Tensor image and respective label
"""
this_input = Image.fromarray(self.inputs[idx])
this_label = self.labels[idx]
if self.transform:
this_input = self.transform(this_input)
return this_input, this_label
def save_partitions(
list_partitions: List[XY], fed_dir: Path, partition_type: str = "train"
):
"""Saves partitions to individual files.
Args:
list_partitions (List[XY]): List of partitions to be saves
fed_dir (Path): Root directory where to save partitions.
partition_type (str, optional): Partition type ("train" or "test"). Defaults to "train".
"""
for idx, partition in enumerate(list_partitions):
path_dir = fed_dir / f"{idx}"
path_dir.mkdir(exist_ok=True, parents=True)
torch.save(partition, path_dir / f"{partition_type}.pt")
def partition_cifar10_and_save(
dataset: XY,
fed_dir: Path,
dirichlet_dist: Optional[npt.NDArray[np.float32]] = None,
num_partitions: int = 500,
concentration: float = 0.1,
) -> np.ndarray:
"""Creates and saves partitions for CIFAR10.
Args:
dataset (XY): Original complete dataset.
fed_dir (Path): Root directory where to save partitions.
dirichlet_dist (Optional[npt.NDArray[np.float32]], optional):
Pre-defined distributions to be used for sampling if exist. Defaults to None.
num_partitions (int, optional): Number of partitions. Defaults to 500.
concentration (float, optional): Alpha value for Dirichlet. Defaults to 0.1.
Returns:
np.ndarray: Generated dirichlet distributions.
"""
# Create partitions
clients_partitions, dist = create_lda_partitions(
dataset=dataset,
dirichlet_dist=dirichlet_dist,
num_partitions=num_partitions,
concentration=concentration,
)
# Save partions
save_partitions(list_partitions=clients_partitions, fed_dir=fed_dir)
return dist
def gen_cifar10_partitions(
path_original_dataset: Path,
dataset_name: str,
num_total_clients: int,
lda_concentration: float,
) -> Path:
"""Defines root path for partitions and calls functions to create them.
Args:
path_original_dataset (Path): Path to original (unpartitioned) dataset.
dataset_name (str): Friendly name to dataset.
num_total_clients (int): Number of clients.
lda_concentration (float): Concentration (alpha) used when generation Dirichlet
distributions.
Returns:
Path: [description]
"""
fed_dir = (
path_original_dataset
/ f"{dataset_name}"
/ "partitions"
/ f"{num_total_clients}"
/ f"{lda_concentration:.2f}"
)
trainset = CIFAR10(root=path_original_dataset, train=True, download=True)
flwr_trainset = (trainset.data, np.array(trainset.targets, dtype=np.int32))
partition_cifar10_and_save(
dataset=flwr_trainset,
fed_dir=fed_dir,
dirichlet_dist=None,
num_partitions=num_total_clients,
concentration=lda_concentration,
)
return fed_dir
def shuffle_and_create_cifar100_lda_dists(
dataset: XY,
lda_concentration_coarse: float,
lda_concentration_fine: float,
num_partitions: int,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Shuffles the original dataset and creates the two-level LDA
distributions.
Args:
dataset (XY): original dataset in XY format
lda_concentration_coarse (float): Concentration for coarse (first) level
lda_concentration_fine (float): Concentration for coarse (second) level
num_partitions (int): Number of partitions
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray,np.ndarray]: organized list of
yet-to-be-partioned dataset and LDA distributions.
"""
x_orig, y_orig = shuffle(dataset[0], np.array(dataset[1], dtype=np.int32))
x_orig, y_orig = sort_by_label(x_orig, y_orig)
_, start_idx = np.unique(y_orig, return_index=True)
x_list = split_array_at_indices(x_orig, start_idx)
y_list = split_array_at_indices(y_orig, start_idx)
lda_concentration_coarse_vector = np.repeat(
lda_concentration_coarse, CIFAR100_NUM_COARSE_CLASSES
)
lda_concentration_fine_vector = np.repeat(
lda_concentration_fine, CIFAR100_NUM_FINE_CLASSES
)
coarse_dist = np.random.default_rng().dirichlet(
alpha=lda_concentration_coarse_vector, size=num_partitions
)
fine_dist = np.random.default_rng().dirichlet(
alpha=lda_concentration_fine_vector,
size=(num_partitions, CIFAR100_NUM_COARSE_CLASSES),
)
return x_list, y_list, coarse_dist, fine_dist
def partition_cifar100_and_save(
dataset: XY,
fed_dir: Path,
num_partitions: int,
lda_concentration_coarse: float,
lda_concentration_fine: float,
):
# pylint: disable-msg=too-many-locals
"""Partitions CIFAR100 and saves local datasets.
Args:
dataset (XY): Dataset to be partitioned
fed_dir (Path): Root directory where to save partitions
num_partitions (int): Number of partitions
lda_concentration_coarse (float): Concentration for the higer-level classes
lda_concentration_fine (float): Concentration for fine labels.
"""
x_list, y_list, coarse_dist, fine_dist = shuffle_and_create_cifar100_lda_dists(
dataset, lda_concentration_coarse, lda_concentration_fine, num_partitions
)
# Assuming balanced distribution
len_dataset = len(dataset[1])
remaining_samples_counter = (len_dataset // 100) * np.ones(
(CIFAR100_NUM_COARSE_CLASSES, CIFAR100_NUM_FINE_CLASSES)
)
partitions = []
for client_id in range(num_partitions):
x_this_client, y_this_client = [], []
for _ in range(len_dataset // num_partitions):
coarse_class = np.random.choice(
CIFAR100_NUM_COARSE_CLASSES, p=coarse_dist[client_id]
)
fine_class = np.random.choice(
CIFAR100_NUM_FINE_CLASSES, p=fine_dist[client_id][coarse_class]
)
real_class = cifar100_coarse_to_real[coarse_class][fine_class]
# obtain sample
sample_x: np.ndarray = x_list[real_class][0]
x_list[real_class] = np.delete(x_list[real_class], 0, 0)
sample_y: np.ndarray = y_list[real_class][0]
y_list[real_class] = np.delete(y_list[real_class], 0, 0)
x_this_client.append(sample_x)
y_this_client.append(sample_y)
# Update and renormalize
# check fine class is empty
remaining_samples_counter[coarse_class, fine_class] -= 1
if remaining_samples_counter[coarse_class, fine_class] == 0:
for k in range(num_partitions):
fine_dist[k][coarse_class][fine_class] = 0.0
norm_factor = np.sum(fine_dist[k][coarse_class])
if norm_factor > 0:
fine_dist[k][coarse_class] = (
fine_dist[k][coarse_class] / norm_factor
)
# Check coarse class is empty
if np.sum(remaining_samples_counter[coarse_class]) == 0:
for k in range(num_partitions):
coarse_dist[k][coarse_class] = 0.0
norm_factor = np.sum(coarse_dist[k])
if norm_factor > 0.0:
coarse_dist[k] = coarse_dist[k] / norm_factor
partitions.append(
(np.array(x_this_client), np.array(y_this_client, dtype=np.int64))
)
save_partitions(list_partitions=partitions, fed_dir=fed_dir, partition_type="train")
def gen_cifar100_partitions(
path_original_dataset: Path,
dataset_name: str,
num_total_clients: int,
lda_concentration_coarse: float,
lda_concentration_fine: float,
) -> Path:
"""Generates CIFAR100 partitions and return root directory where the
partitions are.
Args:
path_original_dataset (Path): Path to original dataset
dataset_name (str): Dataset name
num_total_clients (int): Number of total clients/partitions
lda_concentration_coarse (float): Concentration for first level LDA
lda_concentration_fine (float): Concentration for second level LDA
Returns:
Path: Path to where partitions are saved
"""
fed_dir = (
path_original_dataset
/ f"{dataset_name}"
/ "partitions"
/ f"{num_total_clients}"
/ f"{lda_concentration_coarse:.2f}_{lda_concentration_fine:.2f}"
)
trainset = CIFAR100(root=path_original_dataset, train=True, download=True)
trainset_xy = (trainset.data, trainset.targets)
partition_cifar100_and_save(
dataset=trainset_xy,
fed_dir=fed_dir,
num_partitions=num_total_clients,
lda_concentration_coarse=lda_concentration_coarse,
lda_concentration_fine=lda_concentration_fine,
)
return fed_dir
def train(
net: Module,
trainloader: DataLoader,
epochs: int,
device: str,
learning_rate: float = 0.01,
) -> None:
"""Train the network on the training set."""
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate)
net.train()
for _ in range(epochs):
for images, labels in trainloader:
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
loss = criterion(net(images), labels)
loss.backward()
optimizer.step()
def test(net: Module, testloader: DataLoader, device: str) -> Tuple[float, float]:
"""Validate the network on the entire test set."""
criterion = torch.nn.CrossEntropyLoss()
correct, total, loss = 0, 0, 0.0
net.eval()
with | |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
import oci # noqa: F401
from oci.util import WAIT_RESOURCE_NOT_FOUND # noqa: F401
class DataSafeClientCompositeOperations(object):
"""
This class provides a wrapper around :py:class:`~oci.data_safe.DataSafeClient` and offers convenience methods
for operations that would otherwise need to be chained together. For example, instead of performing an action
on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource
to enter a given state, you can call a single method in this class to accomplish the same functionality
"""
def __init__(self, client, **kwargs):
"""
Creates a new DataSafeClientCompositeOperations object
:param DataSafeClient client:
The service client which will be wrapped by this object
"""
self.client = client
def activate_target_database_and_wait_for_state(self, activate_target_database_details, target_database_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.activate_target_database` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param oci.data_safe.models.ActivateTargetDatabaseDetails activate_target_database_details: (required)
The details used to reactivate a target database in Data Safe.
:param str target_database_id: (required)
The OCID of the Data Safe target database.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.activate_target_database`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.activate_target_database(activate_target_database_details, target_database_id, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def add_masking_columns_from_sdm_and_wait_for_state(self, masking_policy_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.add_masking_columns_from_sdm` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str masking_policy_id: (required)
The OCID of the masking policy.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.add_masking_columns_from_sdm`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.add_masking_columns_from_sdm(masking_policy_id, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def apply_discovery_job_results_and_wait_for_state(self, sensitive_data_model_id, apply_discovery_job_results_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.apply_discovery_job_results` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str sensitive_data_model_id: (required)
The OCID of the sensitive data model.
:param oci.data_safe.models.ApplyDiscoveryJobResultsDetails apply_discovery_job_results_details: (required)
Details to apply the discovery results to a sensitive data model.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.apply_discovery_job_results`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.apply_discovery_job_results(sensitive_data_model_id, apply_discovery_job_results_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def calculate_audit_volume_available_and_wait_for_state(self, audit_profile_id, calculate_audit_volume_available_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.calculate_audit_volume_available` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str audit_profile_id: (required)
The OCID of the audit.
:param oci.data_safe.models.CalculateAuditVolumeAvailableDetails calculate_audit_volume_available_details: (required)
Details for the calculation of audit volume available on target database.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.calculate_audit_volume_available`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.calculate_audit_volume_available(audit_profile_id, calculate_audit_volume_available_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def calculate_audit_volume_collected_and_wait_for_state(self, audit_profile_id, calculate_audit_volume_collected_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.calculate_audit_volume_collected` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str audit_profile_id: (required)
The OCID of the audit.
:param oci.data_safe.models.CalculateAuditVolumeCollectedDetails calculate_audit_volume_collected_details: (required)
Details for the calculation of audit volume collected by data safe.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.calculate_audit_volume_collected`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.calculate_audit_volume_collected(audit_profile_id, calculate_audit_volume_collected_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def change_audit_archive_retrieval_compartment_and_wait_for_state(self, audit_archive_retrieval_id, change_audit_archive_retrieval_compartment_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.change_audit_archive_retrieval_compartment` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str audit_archive_retrieval_id: (required)
OCID of the archive retrieval.
:param oci.data_safe.models.ChangeAuditArchiveRetrievalCompartmentDetails change_audit_archive_retrieval_compartment_details: (required)
The details used to change the compartment of a archive retrieval.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.change_audit_archive_retrieval_compartment`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.change_audit_archive_retrieval_compartment(audit_archive_retrieval_id, change_audit_archive_retrieval_compartment_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def change_audit_policy_compartment_and_wait_for_state(self, audit_policy_id, change_audit_policy_compartment_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.change_audit_policy_compartment` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str audit_policy_id: (required)
Unique audit policy identifier.
:param oci.data_safe.models.ChangeAuditPolicyCompartmentDetails change_audit_policy_compartment_details: (required)
Details for the compartment move.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.change_audit_policy_compartment`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.change_audit_policy_compartment(audit_policy_id, change_audit_policy_compartment_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = | |
<filename>skybright/skybright.py
#!/usr/bin/env python
"""A model for the sky brightness
"""
from functools import partial
from math import pi, cos, acos, sin, sqrt, log10
from datetime import datetime, tzinfo, timedelta
from time import strptime
from calendar import timegm
from copy import deepcopy
from sys import argv
from collections import namedtuple, OrderedDict
from argparse import ArgumentParser
try:
from ConfigParser import ConfigParser
except:
from configparser import ConfigParser
import numexpr
from numexpr import NumExpr
import warnings
from warnings import warn
import numpy as np
try:
from palpy import rdplan as rdplan_not_vectorized
from palpy import gmst as gmst_not_vectorized
from palpy import dmoon
from palpy import evp
except ImportError:
from pyslalib.slalib import sla_rdplan as rdplan_not_vectorized
from pyslalib.slalib import sla_gmst as gmst_not_vectorized
from pyslalib.slalib import sla_dmoon as dmoon
from pyslalib.slalib import sla_evp as evp
palpy_body = {'sun': 0,
'moon': 3}
MAG0 = 23.9
# warnings.simplefilter("always")
rdplan = np.vectorize(rdplan_not_vectorized)
def gmst(mjd):
# Follow Meeus chapter 12
big_t = numexpr.evaluate("(mjd - 51544.5)/36525")
st = np.radians(np.mod(numexpr.evaluate("280.46061837 + 360.98564736629*(mjd-51544.5) + 0.000387933*big_t*big_t - big_t*big_t*big_t/38710000"), 360))
return st
def ang_sep(ra1, decl1, ra2, decl2):
# haversine formula
return numexpr.evaluate("2*arcsin(sqrt(cos(decl1)*cos(decl2)*(sin(((ra1-ra2)/2))**2) + (sin((decl1-decl2)/2))**2))")
## Works and is trivially faster, but less flexible w.r.t. data types
#
# ang_sep = NumExpr("2*arcsin(sqrt(cos(decl1)*cos(decl2)*(sin(((ra1-ra2)/2))**2) + (sin((decl1-decl2)/2))**2))",
# (('ra1', np.float64), ('decl1', np.float64), ('ra2', np.float64), ('decl2', np.float64)))
def calc_zd(latitude, ha, decl):
# zenith is always at ha=0, dec=latitude, by defn.
return ang_sep(ha, decl, 0, latitude)
def calc_airmass(cos_zd):
a = numexpr.evaluate("462.46 + 2.8121/(cos_zd**2 + 0.22*cos_zd + 0.01)")
airmass = numexpr.evaluate("sqrt((a*cos_zd)**2 + 2*a + 1) - a * cos_zd")
airmass[cos_zd < 0] = np.nan
return airmass
def calc_airglow(r0, h, m_zen, k, sin_zd, airmass):
airglow = numexpr.evaluate("10**(-0.4*(m_zen + 1.25*log10(1.0 - (r0/(h+r0))*(sin_zd**2)) + k*(airmass-1) - MAG0))")
return airglow
def calc_scat_extinction(k, x0, x):
if len(np.shape(x0)) == 0:
x0p = calc_airmass(0) if np.isnan(x0) else x0
else:
x0p = np.where(np.isnan(x0), calc_airmass(0), x0)
extinct = numexpr.evaluate("(10**(-0.4*k*x) - 10**(-0.4*k*x0p))/(-0.4*k*(x-x0p))")
return extinct
def elongation_not_vectorized(mjd):
"Calculate the elongation of the moon in radians"
pv = dmoon(mjd)
moon_distance = (sum([x**2 for x in pv[:3]]))**0.5
dvb, dpb, dvh, dph = evp(mjd,-1)
sun_distance = (sum([x**2 for x in dph[:3]]))**0.5
a = np.degrees(np.arccos(
(-pv[0]*dph[0] - pv[1]*dph[1] - pv[2]*dph[2])/
(moon_distance*sun_distance)))
return a
elongation = np.vectorize(elongation_not_vectorized)
def calc_moon_brightness(mjd, moon_elongation=None):
"""The brightness of the moon (relative to full)
The value here matches about what I expect from the value in
Astrophysical Quantities corresponding to the elongation calculated by
http://ssd.jpl.nasa.gov/horizons.cgi
>>> mjd = 51778.47
>>> print "%3.2f" % moon_brightness(mjd)
0.10
"""
if moon_elongation is None:
moon_elongation = elongation(mjd)
alpha = 180.0-moon_elongation
# Allen's _Astrophysical Quantities_, 3rd ed., p. 144
return 10**(-0.4*(0.026*abs(alpha) + 4E-9*(alpha**4)))
def one_calc_twilight_fract(z, twi1=-2.52333, twi2=0.01111):
if z<90:
return 1.0
if z>108:
return 0.0
if z>100:
twi0 = -1*(twi1*90+ twi2*90*90)
logfrac = twi0 + twi1*z + twi2*z*z
else:
logfrac = 137.11-2.52333*z+0.01111*z*z
frac = 10**logfrac
frac = 1.0 if frac>1.0 else frac
frac = 0.0 if frac<0.0 else frac
return frac
def calc_twilight_fract(zd, twi1=-2.52333, twi2=0.01111):
z = zd if len(np.shape(zd)) > 0 else np.array(zd)
logfrac = numexpr.evaluate("137.11-2.52333*z+0.01111*z*z")
logfrac[z>100] = numexpr.evaluate("twi1*z + twi2*z*z - (twi1*90 + twi2*90*90)")[z>100]
frac = 10**logfrac
frac = np.where(z<90, 1.0, frac)
frac = np.where(z>108, 0.0, frac)
frac = np.where(frac>1.0, 1.0, frac)
frac = np.where(frac<0.0, 0.0, frac)
return frac
def calc_body_scattering(brightness, body_zd_deg, cos_zd, body_ra, body_decl, ra, decl,
twi1, twi2, k, airmass, body_airmass, rayl_m, mie_m, g,
rayleigh=True, mie=True):
if len(np.shape(brightness)) == 0:
brightness = np.array(brightness)
brightness = np.where(body_zd_deg > 107.8, 0, brightness)
body_twi = body_zd_deg > 90
brightness[body_twi] = brightness[body_twi]*calc_twilight_fract(body_zd_deg[body_twi], twi1, twi2)
extinct = calc_scat_extinction(k, body_airmass, airmass)
cos_rho = numexpr.evaluate("cos(2*arcsin(sqrt(cos(decl)*cos(body_decl)*(sin(((ra-body_ra)/2))**2) + (sin((decl-body_decl)/2))**2)))")
rayleigh_frho = numexpr.evaluate("0.75*(1.0+cos_rho**2)") if rayleigh else np.zeros_like(cos_rho)
mie_frho = numexpr.evaluate("1.5*((1.0-g**2)/(2.0+g**2)) * (1.0 + cos_rho) * (1.0 + g**2 - 2.0*g*cos_rho*cos_rho)**(-1.5)") if mie else np.zeros_like(cos_rho)
mie_frho = np.where(mie_frho<0, 0.0, mie_frho)
# Fitter sometimes explores values of g resulting mie_frho being negative.
# Force a physical result.
mie_frho = np.where(mie_frho<0, 0.0, mie_frho)
rayl_c = 10**(-0.4*(rayl_m-MAG0))
mie_c = 10**(-0.4*(mie_m-MAG0))
flux = brightness*extinct*(rayl_c*rayleigh_frho + mie_c*mie_frho)
return flux
class MoonSkyModel(object):
def __init__(self, model_config):
self.longitude = model_config.getfloat("Observatory Position",
"longitude")
self.latitude = model_config.getfloat("Observatory Position",
"latitude")
self.k = OrderedDict()
self.m_inf = OrderedDict()
self.m_zen = OrderedDict()
self.h = OrderedDict()
self.rayl_m = OrderedDict()
self.g = OrderedDict()
self.mie_m = OrderedDict()
self.offset = OrderedDict()
self.sun_dm = OrderedDict()
self.twi1 = OrderedDict()
self.twi2 = OrderedDict()
for i, band in enumerate(model_config.get("sky","filters").split()):
i = model_config.get("sky","filters").split().index(band)
self.k[band] = float(model_config.get("sky","k").split()[i])
self.m_inf[band] = float(model_config.get("sky","m_inf").split()[i])
self.m_zen[band] = float(model_config.get("sky","m_zen").split()[i])
self.h[band] = float(model_config.get("sky","h").split()[i])
self.rayl_m[band] = float(model_config.get("sky","rayl_m").split()[i])
self.g[band] = float(model_config.get("sky","g").split()[i])
self.mie_m[band] = float(model_config.get("sky","mie_m").split()[i])
self.offset[band] = 0.0
self.sun_dm[band] = float(model_config.get("sky","sun_dm").split()[i])
self.twi1[band] = float(model_config.get("sky","twi1").split()[i])
self.twi2[band] = float(model_config.get("sky","twi2").split()[i])
self.calc_zd = partial(calc_zd, np.radians(self.latitude))
self.r0 = 6375.0
self.twilight_nan = True
def __call__(self, mjd, ra_deg, decl_deg, band, sun=True, moon=True,
moon_crds=None, moon_elongation=None, sun_crds=None, lst=None):
if len(np.shape(band)) < 1:
return self.single_band_call(
mjd, ra_deg, decl_deg, band, sun=sun, moon=moon,
moon_crds=moon_crds, moon_elongation=moon_elongation, sun_crds=sun_crds,
lst=lst)
mags = np.empty_like(ra_deg, dtype=np.float64)
mags.fill(np.nan)
for this_band in np.unique(band):
these = band == this_band
mjd_arg = mjd if len(np.shape(mjd))==0 else mjd[these]
mags[these] = self.single_band_call(
mjd_arg, ra_deg[these], decl_deg[these], this_band, sun=sun, moon=moon,
moon_crds=moon_crds, moon_elongation=moon_elongation, sun_crds=sun_crds,
lst=lst
)
return mags
def single_band_call(self, mjd, ra_deg, decl_deg, band, sun=True, moon=True,
moon_crds=None, moon_elongation=None, sun_crds=None, lst=None):
longitude = np.radians(self.longitude)
latitude = np.radians(self.latitude)
ra = np.radians(ra_deg)
decl = np.radians(decl_deg)
k = self.k[band]
twi1 = self.twi1[band]
twi2 = self.twi2[band]
m_inf = self.m_inf[band]
lst = gmst(mjd) + longitude if lst is None else np.radians(lst)
ha = lst - ra
if sun_crds is None:
sun_ra, sun_decl, diam = rdplan(mjd, 0, longitude, latitude)
else:
sun_ra = sun_crds.ra.rad
sun_decl = sun_crds.dec.rad
sun_ha = lst - sun_ra
sun_zd = self.calc_zd(sun_ha, sun_decl)
sun_zd_deg = np.degrees(sun_zd)
if len(np.shape(sun_zd_deg)) == 0 and self.twilight_nan:
if sun_zd_deg < 98:
m = np.empty_like(ra)
m.fill(np.nan)
return m
sun_cos_zd = np.cos(sun_zd)
sun_airmass = calc_airmass(sun_cos_zd)
if moon_crds is None:
moon_ra, moon_decl, diam = rdplan(mjd, 3, longitude, latitude)
else:
moon_ra = moon_crds.ra.rad
moon_decl = moon_crds.dec.rad
moon_ha = lst - moon_ra
moon_zd = self.calc_zd(moon_ha, moon_decl)
moon_cos_zd = np.cos(moon_zd)
moon_airmass = calc_airmass(moon_cos_zd)
moon_zd_deg = np.degrees(moon_zd)
# Flux from infinity
sky_flux = np.empty_like(ra)
sky_flux.fill(10**(-0.4*(m_inf-MAG0)))
# Airglow
zd = self.calc_zd(ha, decl)
sin_zd = np.sin(zd)
cos_zd = np.cos(zd)
airmass = calc_airmass(cos_zd)
airglow_flux = calc_airglow(self.r0, self.h[band], self.m_zen[band], k, sin_zd, airmass)
sky_flux += airglow_flux
# Needed for both scattering calculations
zd_deg = np.degrees(zd)
# Add scattering of moonlight
if moon:
moon_flux = calc_body_scattering(
calc_moon_brightness(mjd, moon_elongation),
moon_zd_deg, cos_zd, moon_ra, moon_decl, ra, decl, twi1, twi2, k, airmass, moon_airmass,
self.rayl_m[band], self.mie_m[band], self.g[band])
sky_flux += moon_flux
# Add scattering of sunlight
if sun:
sun_flux = calc_body_scattering(
10**(-0.4*(self.sun_dm[band])),
sun_zd_deg, cos_zd, sun_ra, sun_decl, ra, decl, twi1, twi2, k, airmass, sun_airmass,
self.rayl_m[band], self.mie_m[band], self.g[band])
sky_flux += sun_flux
m = MAG0 - 2.5*np.log10(sky_flux)
if len(np.shape(m)) > 0 and self.twilight_nan:
m[sun_zd_deg < 98] = np.nan
return m
#
# Included for backword compatibility with previous implementation
#
def skymag(m_inf, m_zen, h, g, mie_m, rayl_m, ra, decl, mjd, k, latitude, longitude, offset=0.0,
sun_dm=-14.0, twi1=-2.52333, twi2=0.01111):
config = ConfigParser()
sect = "Observatory Position"
config.add_section(sect)
config.set(sect, 'longitude', longitude)
config.set(sect, 'latitude', latitude)
sect = "sky"
config.add_section(sect)
config.set(sect, 'filters', 'x')
config.set(sect, 'k', k)
config.set(sect, 'm_inf', m_inf)
config.set(sect, 'm_zen', m_zen)
config.set(sect, 'h', h)
config.set(sect, 'rayl_m', rayl_m)
config.set(sect, 'g', g)
config.set(sect, 'mie_m', mie_m)
config.set(sect, 'sun_dm', sun_dm)
config.set(sect, 'twi1', twi1)
config.set(sect, 'twi2', twi2)
calc_sky = MoonSkyModel(config)
sky = calc_sky(mjd, ra, decl, 'x')
return sky
if __name__=='__main__':
parser = ArgumentParser('Estimate the sky brightness')
parser.add_argument("-m", "--mjd", type=float,
help="Modified Julian Date (float) (UTC)")
parser.add_argument("-r", "--ra", type=float,
help="the RA (decimal degrees)")
parser.add_argument("-d", "--dec", type=float,
help="the declination (decimal degrees)")
parser.add_argument("-f", "--filter",
help="the filter")
parser.add_argument("-c", "--config",
help="the configuration file")
args = parser.parse_args()
model_config = ConfigParser()
model_config.read(args.config)
longitude = model_config.getfloat("Observatory Position",
"longitude")
latitude = model_config.getfloat("Observatory Position",
"latitude")
lst = gmst(args.mjd) + np.radians(longitude)
print("GMST: %f" % np.degrees(gmst(args.mjd)))
print("LST: %f" % np.degrees(lst))
sun_ra, sun_decl, diam = rdplan(args.mjd, 0, np.radians(longitude), np.radians(latitude))
sun_ha = lst - sun_ra
sun_zd = np.degrees(calc_zd(np.radians(latitude), sun_ha, sun_decl))
print("Sun zenith distance: %f" % sun_zd)
moon_ra, moon_decl, diam = rdplan(args.mjd, 3, longitude, latitude)
moon_ha = lst - moon_ra
moon_zd = np.degrees(calc_zd(np.radians(latitude), moon_ha, moon_decl))
print("Moon zenith distance: %f" % moon_zd)
print("Elongation of the moon: %f" % elongation(args.mjd))
print("Moon brightness: %f" % calc_moon_brightness(args.mjd))
sep = ang_sep(moon_ra, moon_decl, np.radians(args.ra), np.radians(args.dec))
print("Pointing angle with moon: %f" % sep)
ha = lst - np.radians(args.ra)
print("Hour angle: %f" % np.degrees(ha))
z = calc_zd(np.radians(latitude), ha, np.radians(args.dec))
print("Pointing zenith distance: %f" % np.degrees(z))
print("Airmass: %f" % calc_airmass(np.cos(z)))
| |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Global addon constants.
Everything that is to be globally accessible must be defined in this module
and initialized in GlobalVariables.init_globals.
When reusing Kodi languageInvokers, only the code in the main module
(addon.py or service.py) will be run every time the addon is called.
All other code executed on module level will only be executed once, when
the module is first imported on the first addon invocation."""
from __future__ import unicode_literals
import os
from urlparse import urlparse, parse_qsl
from urllib import unquote
import collections
import xbmc
import xbmcaddon
import xbmcvfs
import resources.lib.cache as cache
class GlobalVariables(object):
"""Encapsulation for global variables to work around quirks with
Kodi's reuseLanguageInvoker behavior"""
# pylint: disable=attribute-defined-outside-init
# pylint: disable=invalid-name, too-many-instance-attributes
# Values in the variables VIEW_* stand for a partial menu id,
# contained in the settings xml, example 'profiles' stand for id 'viewmodeprofiles'
VIEW_PROFILES = 'profiles'
VIEW_MAINMENU = 'mainmenu'
VIEW_MYLIST = 'mylist'
VIEW_FOLDER = 'folder'
VIEW_MOVIE = 'movie'
VIEW_SHOW = 'show'
VIEW_SEASON = 'season'
VIEW_EPISODE = 'episode'
VIEW_SEARCH = 'search'
VIEW_EXPORTED = 'exported'
CONTENT_IMAGES = 'images'
CONTENT_FOLDER = 'files'
CONTENT_MOVIE = 'movies'
CONTENT_SHOW = 'tvshows'
CONTENT_SEASON = 'seasons'
CONTENT_EPISODE = 'episodes'
'''
--Main Menu key infos--
path : passes information to the called method generally structured as follows: [func. name, menu id, context id]
lolomo_contexts : contexts used to obtain the list of contents (use only one context when lolomo_known = True)
lolomo_known : if True, keys label_id/description_id/icon are ignored, the values are obtained from lolomo list
label_id : menu title
description_id : description info text
icon : set a default image
view : override the default "partial menu id" of view
content_type : override the default content type (CONTENT_SHOW)
Explanation of function names in the 'path' key:
video_list: automatically gets the list_id by making a lolomo request,
the list_id search is made using the value specified on the lolomo_contexts key
video_list_sorted: to work must have a third argument on the path that is the context_id
or instead specified the key request_context_name
'''
MAIN_MENU_ITEMS = collections.OrderedDict([
('myList', {'path': ['video_list_sorted', 'myList'],
'lolomo_contexts': ['queue'],
'lolomo_known': True,
'request_context_name': 'mylist',
'view': VIEW_MYLIST}),
('continueWatching', {'path': ['video_list', 'continueWatching'],
'lolomo_contexts': ['continueWatching'],
'lolomo_known': True}),
('chosenForYou', {'path': ['video_list', 'chosenForYou'],
'lolomo_contexts': ['topTen'],
'lolomo_known': True}),
('recentlyAdded', {'path': ['video_list_sorted', 'recentlyAdded', '1592210'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'genres',
'label_id': 30145,
'description_id': 30146,
'icon': 'DefaultRecentlyAddedMovies.png'}),
('newRelease', {'path': ['video_list_sorted', 'newRelease'],
'lolomo_contexts': ['newRelease'],
'lolomo_known': True,
'request_context_name': 'newrelease'}),
('currentTitles', {'path': ['video_list', 'currentTitles'],
'lolomo_contexts': ['trendingNow'],
'lolomo_known': True}),
('mostViewed', {'path': ['video_list', 'mostViewed'],
'lolomo_contexts': ['popularTitles'],
'lolomo_known': True}),
('netflixOriginals', {'path': ['video_list_sorted', 'netflixOriginals', '839338'],
'lolomo_contexts': ['netflixOriginals'],
'lolomo_known': True,
'request_context_name': 'genres'}),
('assistiveAudio', {'path': ['video_list_sorted', 'assistiveAudio', 'None'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'assistiveAudio',
'label_id': 30163,
'description_id': 30164,
'icon': 'DefaultTVShows.png'}),
('recommendations', {'path': ['recommendations', 'recommendations'],
'lolomo_contexts': ['similars', 'becauseYouAdded'],
'lolomo_known': False,
'label_id': 30001,
'description_id': 30094,
'icon': 'DefaultUser.png',
'content_type': CONTENT_FOLDER}),
('tvshowsGenres', {'path': ['subgenres', 'tvshowsGenres', '83'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30174,
'description_id': None,
'icon': 'DefaultTVShows.png',
'content_type': CONTENT_FOLDER}),
('moviesGenres', {'path': ['subgenres', 'moviesGenres', '34399'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30175,
'description_id': None,
'icon': 'DefaultMovies.png',
'content_type': CONTENT_FOLDER}),
('tvshows', {'path': ['genres', 'tvshows', '83'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30095,
'description_id': None,
'icon': 'DefaultTVShows.png',
'content_type': CONTENT_FOLDER}),
('movies', {'path': ['genres', 'movies', '34399'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30096,
'description_id': None,
'icon': 'DefaultMovies.png',
'content_type': CONTENT_FOLDER}),
('genres', {'path': ['genres', 'genres'],
'lolomo_contexts': ['genre'],
'lolomo_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30010,
'description_id': 30093,
'icon': 'DefaultGenre.png',
'content_type': CONTENT_FOLDER}),
('search', {'path': ['search', 'search'],
'lolomo_contexts': None,
'lolomo_known': False,
'label_id': 30011,
'description_id': 30092,
'icon': None,
'view': VIEW_SEARCH}),
('exported', {'path': ['exported', 'exported'],
'lolomo_contexts': None,
'lolomo_known': False,
'label_id': 30048,
'description_id': 30091,
'icon': 'DefaultHardDisk.png',
'view': VIEW_EXPORTED})
])
MODE_DIRECTORY = 'directory'
MODE_HUB = 'hub'
MODE_ACTION = 'action'
MODE_PLAY = 'play'
MODE_LIBRARY = 'library'
def __init__(self):
"""Do nothing on constructing the object"""
pass
def init_globals(self, argv):
"""Initialized globally used module variables.
Needs to be called at start of each plugin instance!
This is an ugly hack because Kodi doesn't execute statements defined on
module level if reusing a language invoker."""
self._library = None
self.SETTINGS_MONITOR_IGNORE = False
self.COOKIES = {}
self.ADDON = xbmcaddon.Addon()
self.ADDON_ID = self.ADDON.getAddonInfo('id')
self.PLUGIN = self.ADDON.getAddonInfo('name')
self.VERSION = self.ADDON.getAddonInfo('version')
self.DEFAULT_FANART = self.ADDON.getAddonInfo('fanart')
self.ICON = self.ADDON.getAddonInfo('icon')
self.DATA_PATH = self.ADDON.getAddonInfo('profile')
self.CACHE_PATH = os.path.join(self.DATA_PATH, 'cache')
self.COOKIE_PATH = os.path.join(self.DATA_PATH, 'COOKIE')
self.CACHE_TTL = self.ADDON.getSettingInt('cache_ttl') * 60
self.CACHE_METADATA_TTL = (
self.ADDON.getSettingInt('cache_metadata_ttl') * 24 * 60 * 60)
self.URL = urlparse(argv[0])
try:
self.PLUGIN_HANDLE = int(argv[1])
except IndexError:
self.PLUGIN_HANDLE = 0
self.BASE_URL = '{scheme}://{netloc}'.format(scheme=self.URL[0],
netloc=self.URL[1])
self.PATH = unquote(self.URL[2][1:]).decode('utf-8')
try:
self.PARAM_STRING = argv[2][1:]
except IndexError:
self.PARAM_STRING = ''
self.REQUEST_PARAMS = dict(parse_qsl(self.PARAM_STRING))
self.reset_time_trace()
self.TIME_TRACE_ENABLED = self.ADDON.getSettingBool('enable_timing')
self.IPC_OVER_HTTP = self.ADDON.getSettingBool('enable_ipc_over_http')
try:
os.mkdir(self.DATA_PATH)
except OSError:
pass
self._init_cache()
self.init_persistent_storage()
def _init_cache(self):
if not os.path.exists(
xbmc.translatePath(self.CACHE_PATH).decode('utf-8')):
self._init_filesystem_cache()
# This is ugly: Pass the common module into Cache.__init__ to work
# around circular import dependencies.
import resources.lib.common as common
self.CACHE = cache.Cache(common, self.CACHE_PATH, self.CACHE_TTL,
self.CACHE_METADATA_TTL, self.PLUGIN_HANDLE)
def _init_filesystem_cache(self):
# pylint: disable=broad-except
for bucket in cache.BUCKET_NAMES:
if bucket != cache.CACHE_LIBRARY:
# Library gets special location in DATA_PATH root because
# we don't want users accidentally deleting it.
xbmcvfs.mkdirs(
xbmc.translatePath(
os.path.join(self.CACHE_PATH, bucket)))
def initial_addon_configuration(self):
"""
Initial addon configuration,
helps users to automatically configure addon parameters for proper viewing of videos
"""
run_initial_config = self.ADDON.getSettingBool('run_init_configuration')
if run_initial_config:
import resources.lib.common as common
import resources.lib.kodi.ui as ui
self.SETTINGS_MONITOR_IGNORE = True
system = common.get_system_platform()
common.debug('Running initial addon configuration dialogs on system: {}'.format(system))
if system in ['osx','ios','xbox']:
self.ADDON.setSettingBool('enable_vp9_profiles', False)
self.ADDON.setSettingBool('enable_hevc_profiles', True)
elif system == 'windows':
# Currently inputstream does not support hardware video acceleration on windows,
# there is no guarantee that we will get 4K without video hardware acceleration,
# so no 4K configuration
self.ADDON.setSettingBool('enable_vp9_profiles', True)
self.ADDON.setSettingBool('enable_hevc_profiles', False)
elif system == 'android':
ultrahd_capable_device = False
premium_account = ui.ask_for_confirmation(common.get_local_string(30154),
common.get_local_string(30155))
if premium_account:
ultrahd_capable_device = ui.ask_for_confirmation(common.get_local_string(30154),
common.get_local_string(30156))
if ultrahd_capable_device:
ui.show_ok_dialog(common.get_local_string(30154),
common.get_local_string(30157))
ia_enabled = xbmc.getCondVisibility('System.HasAddon(inputstream.adaptive)')
if ia_enabled:
xbmc.executebuiltin('Addon.OpenSettings(inputstream.adaptive)')
else:
ui.show_ok_dialog(common.get_local_string(30154),
common.get_local_string(30046))
self.ADDON.setSettingBool('enable_vp9_profiles', False)
self.ADDON.setSettingBool('enable_hevc_profiles', True)
else:
# VP9 should have better performance since there is no need for 4k
self.ADDON.setSettingBool('enable_vp9_profiles', True)
self.ADDON.setSettingBool('enable_hevc_profiles', False)
self.ADDON.setSettingBool('enable_force_hdcp', ultrahd_capable_device)
elif system == 'linux':
# Too many different linux systems, we can not predict all the behaviors
# Some linux distributions have encountered problems with VP9,
# OMSC users complain that hevc creates problems
self.ADDON.setSettingBool('enable_vp9_profiles', False)
self.ADDON.setSettingBool('enable_hevc_profiles', False)
else:
self.ADDON.setSettingBool('enable_vp9_profiles', False)
self.ADDON.setSettingBool('enable_hevc_profiles', False)
self.ADDON.setSettingBool('run_init_configuration', False)
self.SETTINGS_MONITOR_IGNORE = False
def init_persistent_storage(self):
"""
Save on disk the data to keep in memory,
at each screen change kodi reinitializes the addon
making it impossible to have persistent variables
"""
# This is ugly: Pass the common module into Cache.__init__ to work
# around circular import dependencies.
import resources.lib.common as common
# In PersistentStorage "save on destroy" here cause problems because often gets destroyed by various behaviors
self.PERSISTENT_STORAGE = common.PersistentStorage(__name__, no_save_on_destroy=True)
# If missing create necessary keys
if not self.PERSISTENT_STORAGE.get('show_menus'):
self.PERSISTENT_STORAGE['show_menus'] = {}
if not self.PERSISTENT_STORAGE.get('menu_sortorder'):
self.PERSISTENT_STORAGE['menu_sortorder'] = {}
if not self.PERSISTENT_STORAGE.get('menu_titles'):
self.PERSISTENT_STORAGE['menu_titles'] = {}
if not self.PERSISTENT_STORAGE.get('sub_menus'):
self.PERSISTENT_STORAGE['sub_menus'] = {}
def get_menu_title(self, menu_key, fallback_title=''):
"""
Get the menu title from persistent storage,
in some situations, such as deleting the persistent file,
or context_id/menu_id changed due to netflix changes or addon menu code changes..
the list or key may no longer be present
"""
if not g.PERSISTENT_STORAGE.get('menu_titles'):
return fallback_title
return g.PERSISTENT_STORAGE['menu_titles'].get(menu_key, fallback_title)
def library(self):
"""Get the current library instance"""
# pylint: disable=global-statement, attribute-defined-outside-init
if not self._library:
try:
self._library = self.CACHE.get(cache.CACHE_LIBRARY, 'library')
except cache.CacheMiss:
self._library = {}
return self._library
def save_library(self):
"""Save the library to disk via cache"""
if self._library is not None:
self.CACHE.add(cache.CACHE_LIBRARY, 'library', self._library,
ttl=cache.TTL_INFINITE, to_disk=True)
def get_esn(self):
"""Get the ESN from settings"""
return self.ADDON.getSetting('esn')
def set_esn(self, esn):
"""
Set the ESN in settings if it hasn't been set yet.
Return True if the new ESN has been set, False otherwise
"""
if not self.get_esn() and esn:
self.ADDON.setSetting('esn', esn)
return True
return False
def get_edge_esn(self):
"""Get a previously generated edge ESN from the settings or generate
a new one if none exists"""
return self.ADDON.getSetting('edge_esn') or self.generate_edge_esn()
def generate_edge_esn(self):
"""Generate a random EDGE ESN and save it to | |
<filename>src/main/python/extract_results.py
#!/usr/bin/env python
# Extract results from a number of runs. Converted from shell script
# 'tg-extract-results'. The intent was to speed things up but unfortunately
# it doesn't always work. It seems the majority of the time is spent reading
# the file from disk, and that's the same for both versions. However, it
# does noticeably speed up reading 31 relatively short files in Maverick
# (max size about 3 MB). It runs in less than half a second, vs. about
# 12 seconds for the old version. This suggests it will yield dramatic
# speedups on Maverick, exactly as I would like. It also correctly sorts by
# CPUTIME/RUNTIME/RSS.
# Given output from a run-* front end, extract the mean and median errors
# from each specified file, compute the avg mean/median error, and output
# a line giving the errors along with relevant parameters for that particular
# run.
import re
import argparse
# Equivalent of $(grep -m 1 $regex)
def findfirst(lines, regex):
for line in lines:
if re.search(regex, line):
return line
return ""
# Equivalent of $(grep $regex)
def find(lines, regex):
ret = []
for line in lines:
if re.search(regex, line):
ret.append(line)
return ret
def sub(line, r1, r2):
return re.sub(r1, r2, line)
def findsub(lines, find, r1, r2):
return sub(findfirst(lines, find), r1, r2)
# Equivalent of $(sed -n "/$r1/,/$r2/p")
def findsection(lines, r1, r2):
ret = []
start = False
for line in lines:
if not start and re.search(r1, line):
start = True
if start:
ret.append(line)
if start and re.search(r2, line):
start = False
return ret
def safefloat(arg):
arg = "%s" % arg
arg = re.sub("[^-0-9.]", "", arg)
try:
return float(arg)
except:
return 0.0
parser = argparse.ArgumentParser(description="Extract results from a number of runs.")
def flag(*args):
parser.add_argument(*args, action="store_true")
flag("--no-sort")
flag("--sort-average", "--sort-avg", "--sort")
flag("--sort-mean")
flag("--sort-median")
flag("--sort-accuracy", "--sort-acc")
flag("--sort-acc161")
flag("--sort-numeval")
flag("--sort-numtrain")
flag("--sort-file", "--sort-name")
flag("--sort-runtime")
flag("--sort-cputime")
#It's tricky to sort the way we output it; need to sort on raw number
#and then convert to human-readable, but just sorting on the "cooked"
#number works if everything is in the same units e.g. GB.
flag("--sort-rss")
flag("--omit-average", "--omit-avg")
flag("--omit-mean")
flag("--omit-median")
flag("--omit-accuracy", "--omit-acc")
flag("--omit-acc161")
flag("--omit-numeval")
flag("--omit-numtrain")
flag("--omit-file", "--omit-name")
flag("--omit-runtime")
flag("--omit-cputime")
flag("--omit-rss")
flag("--verbose")
flag("--debug")
parser.add_argument("files", nargs="+")
pp = parser.parse_args()
if not pp.verbose:
#print " #Eval #Train %Acc. Acc@161 Mean Median Average Runtime Cputime RSS File"
if not pp.omit_numeval:
print " #Eval",
if not pp.omit_numtrain:
print " #Train",
if not pp.omit_accuracy:
print " %Acc.",
if not pp.omit_acc161:
print "Acc@161",
if not pp.omit_mean:
print " Mean ",
if not pp.omit_median:
print " Median",
if not pp.omit_average:
print "Average",
if not pp.omit_runtime:
print " Runtime",
if not pp.omit_cputime:
print " Cputime",
if not pp.omit_rss:
print " RSS ",
if not pp.omit_file:
print "File",
print ""
def formattime(time0, time0prompt):
# Convert time from "24 minutes 5 seconds" or "0 minutes 6 seconds"
# (old style) or "24 min 5 sec" or "6 sec" (new style) to a raw version
# of HH:MM:SS.
time0 = sub(time0, " (hour|hr)s? ", ":")
time0 = sub(time0, " min(ute)?s? ", ":")
time0 = sub(time0, " sec(ond)s?", "")
if pp.debug:
print "%s: [%s]" % (time0prompt, time0)
# Reformat to proper HH:MM:SS, making sure to have two digits for the
# minutes.
if not time0:
return 0, "NA"
else:
m = re.match("(?:(.*?):)?(?:(.*?):)?(.*?)$", time0)
# We may have only a number of seconds; check for this.
hrs = m.group(1)
if hrs == None:
hrs = 0
else:
hrs = float(hrs)
mins = m.group(2)
if mins == None:
mins = hrs
hrs = 0
else:
mins = float(mins)
secs = float(re.sub(" .*", "", m.group(3)))
totalsecs = hrs*3600 + mins*60 + secs
if hrs == 0 and mins == 0:
secs = "%04.1f" % secs
else:
secs += 0.5
while secs >= 60:
secs -= 60
mins += 1
while mins >= 60:
mins -= 60
hrs += 1
secs = "%02d" % secs
mins = "%02d" % mins
# secs = "%04.1f" % secs
if hrs == 0:
hrs = ""
else:
hrs = "%s:" % int(hrs)
return totalsecs, "%s%s:%s" % (hrs, mins, secs)
def output(files):
retval = []
for fil in files:
if pp.debug:
print "[%s]" % fil
print ""
contents = [x.rstrip("\n") for x in open(fil).readlines()]
args = findfirst(contents, r"^Nohup script invoked as:.* --full-id ")
if args:
args = re.sub(r"^.*? --full-id (\S+) .*$", r"\1", args)
else:
args = findfirst(contents, "^Arguments:")
args = re.sub(r"^Arguments: ", "", args)
args = re.sub(r"--input(-corpus)? [^ ]*/", "", args)
args = re.sub(r" *--*([^ ]*) *([^-][^ ]*)", r" \1=\2", args)
args = re.sub(r" *--*([^ ]*)", r" \1", args)
args = re.sub(" ", ".", args)
args = re.sub("/", "-", args)
results = findsection(contents, "^Final results", "^Ending final results")
mean = findsub(results, "Mean true error", r"^.*distance = +(\S+).*", r"\1")
median = findsub(results, "Median true error", r"^.*distance = +(\S+).*", r"\1")
avg = None
if mean and median:
if pp.debug:
print "Extracted mean: [%s]" % mean
print "Extracted median: [%s]" % median
avg = "%.2f" % (0.5*(float(mean) + float(median)))
acc = findsub(results, "Percent correct at rank <= 1 =", r".*= (.*?)%.*", r"\1")
acc161 = findsub(results, "Accuracy@161 =", r".*= (.*?)%.*", r"\1")
numeval = findsub(results, "instances.total", r".*= (.*?) *$", r"\1")
numtrain = findsub(results, r"bytask.*num_training_documents_by_split\.training =",
r".*= (.*?) *$", r"\1")
finalstats = findsection(contents, "^Ending final results", "^Program running time/p")
# Note: This used to grep for 'Program running time:'; the elapsed time
# below is similar but doesn't include time spent determing memory usage,
# etc.
runtimeraw = findsub(finalstats, "Total elapsed time since program start:",
".*: ", "")
runtimesecs, runtime = formattime(runtimeraw, "Runtime0")
cputimeraw = findsub(finalstats, "Total CPU time since program start with children:",
".*: ", "")
cputimesecs, cputime = formattime(cputimeraw, "Cputime0")
# Check for the old way of outputting and convert to new-style (618.63 MB).
# The old style had various lines beginning "Memory usage" for different
# usage stats, and had the resident size given as something like
#
# Memory usage, actual (i.e. resident set) (proc): 618,635,264 bytes
#
rss = findsub(finalstats, "Memory usage, actual", ".*: (.*?) bytes", r"\1")
if rss:
rss = re.sub(",", "", rss)
rss = float(rss)
rssbytes = rss
if rss >= 1000000000:
rss = "%.2fGB" % (rss/1000000000.0)
elif rss >= 1000000:
rss = "%.2fMB" % (rss/1000000.0)
else:
rss = "%.2fKB" % (rss/1000.0)
else:
# The new way, which lines like
#
# Memory usage: virtual: 13.51 GB, resident: 1.49 GB, Java heap: 59.61 MB
#
rss = findsub(finalstats, "Memory usage:", ".*resident: (.*?) (.B).*$",
r"\1\2")
rssbytes = 0
if len(rss) >= 2 and rss[-1] == 'B':
if rss[-2] == 'G':
rssbytes = 1000000000 * float(rss[0:-2])
elif rss[-2] == 'M':
rssbytes = 1000000 * float(rss[0:-2])
elif rss[-2] == 'K':
rssbytes = 1000 * float(rss[0:-2])
if mean or median or avg or acc:
skip = False
else:
skip = True
if not numeval:
numeval="NA"
if not numtrain:
numtrain="NA"
if not acc:
acc="NA"
if not acc161:
acc161="NA"
if not mean:
mean="NA"
if not median:
median="NA"
if not avg:
avg="NA"
if not rss:
rss="NA"
numeval="%7s" % numeval
numtrain="%8s" % numtrain
acc="%6s" % acc
acc161="%6s" % acc161
mean="%7s" % mean
median="%7s" % median
avg="%7s" % avg
runtime="%8s" % runtime
cputime="%8s" % cputime
rss="%8s" % rss
if pp.debug:
print "Args: [%s]" % args
print "Mean: [%s]" % mean
print "Median: [%s]" % median
print "Average: [%s]" % avg
print "%Accuracy: [%s]" % acc
print "Acc@161: [%s]" % acc161
print "#Eval: [%s]" % numeval
print "#Train: [%s]" % numtrain
print "Runtime: [%s]" % runtime
print "Cputime: [%s]" % cputime
print "RSS: [%s]" % rss
if pp.verbose:
print args, findfirst(results, "true error")
if avg:
print "%s Avg-mean-median true error distance = %s km" % (args, avg)
elif not skip:
#echo "$numeval $numtrain $acc $mean $median $avg $runtime $cputime $rss $args"
line = ""
if not pp.omit_numeval:
line += "%s " % numeval
if not pp.omit_numtrain:
line += "%s " % numtrain
if not pp.omit_accuracy:
line += "%s " % acc
if not pp.omit_acc161:
line += "%s " % acc161
if not pp.omit_mean:
line += "%s " % mean
if not pp.omit_median:
line += "%s " % median
if not pp.omit_average:
line += "%s " % avg
if not pp.omit_runtime:
line += "%s " % runtime
if not pp.omit_cputime:
line += "%s " | |
<reponame>Duseong/my_packages
'''
VisIt_code_generator.py
this code is designed for generating VisIt codes for command line interface option
(1) 2D map plotting (class Map_2D)
(2) Quick plotting using class Map_2D
MODIFICATION HISTORY:
dsj, 30, DEC, 2020: VERSION 1.00
- 2D map plotting code generation
dsj, 04, JAN, 2021: VERSION 2.00
- Add Quick plot class
'''
import numpy as np
import datetime
from PIL import ImageFont
from dsj.io.NetCDF_IO import Write_NC
import os
from dsj.plot.map import get_cbar_prop
class Map_2D(object):
'''
NAME:
Map_2D
PURPOSE:
Code generation for 2D map plotting
INPUTS:
filename: a file name which has values for plotting
field_name: variable name in the file for plotting
winsize: VisIt window size for plotting
slicing: in case the variable is a 3D array with (X,Y,Z)
Currently, only Intercept method is supported
slice_index: layer number on Z-axis to be plotted (31 is the SFC for CESM/CAM-chem 32L)
drawlines: draw coastlines, country boundaries, state boundaries, etc.
can be provided as either number or string
(0) no - no lines at all
(1) coast - coastlines
(2) country - country boundaries
(3) country_lake - country boundaries + lake (default)
(4) state - state boundaries
(5) state_lake - states boundaries + lake
lines_resolution: can be 10m (fine), 50m (intermediate), or 110m (coarse)
default=10m
lon_range: longitude ranges for 2D map
lat_range: latitude ranges for 2D map
show_grid: show grid
plot_position: position of 2D map plot in the window [left, right, bottom, top]
plot_min: minimum value of the plot
plot_max: maximum value of the plot
color_scale: can be 'Linear' or 'Log'
color_table: color table name
scale_factor: scaling factor for variable
colorbar: Colorbar toggle (True or False)
colorbar_position: position of colorbar in the window [left, bottom]
colorbar_orientation: orientation of the colorbar
- VerticalRight, VerticalLeft, HozirontalTop, HorizontalBottom
colorbar_scale_x: a scale factor for horizontal length of the colorbar
colorbar_scale_y: a scale factor for vertical length of the colorbar
colorbar_nticks: number of ticks in the colorbar
colorbar_ticks: locations of ticks
colorbar_labels: labels of ticks
number_format: number format used in colorbar values
e.g., %5.2f, %1.1e, %3i
https://en.wikipedia.org/wiki/Printf_format_string#Type_field
fort_family: font family for texts in the plot (Arial, Courier, Times)
unit: unit for the plot
title: plot title
output_filename: image filename to be saved
noadd_date: in case you don't want to add the creation date at the end of the filename
smoothing/grid-preserve?
'''
def __init__(self, filename='', field_name='', winsize=[1600,900], slicing=False, slice_index=31,
drawlines='country', lines_resolution='10m', lon_range=[0,358.75], lat_range=[-90, 90],
show_grid=False, plot_position=[0.10, 0.95, 0.25, 0.90], plot_min=0, plot_max=None,
color_scale='Linear', color_table='myct_cont', scale_factor=1., colorbar=True,
colorbar_position=[0.12,0.13], colorbar_orientation='HorizontalBottom', colorbar_scale_x=3,
colorbar_scale_y=0.7, colorbar_nticks=5, colorbar_ticks=[], colorbar_labels=[],
number_format='%7.2f', font_family='Times', title='', unit='',
output_filename='', noadd_date=False ):
self.filename = filename
self.field_name = field_name
self.winsize = winsize
self.slicing = slicing
self.slice_index = slice_index
#self.drawlines = drawlines -> checked below
self.lines_resolution = lines_resolution
self.lon_range = lon_range
self.lat_range = lat_range
self.show_grid = show_grid
self.plot_min = plot_min
self.plot_max = plot_max
self.color_scale = color_scale
self.color_table = color_table
self.scale_factor = scale_factor
self.colorbar = colorbar
self.plot_position = plot_position
self.colorbar = colorbar
self.colorbar_position = colorbar_position
self.colorbar_orientation = colorbar_orientation
self.colorbar_scale_x = colorbar_scale_x
self.colorbar_scale_y = colorbar_scale_y
self.colorbar_nticks = colorbar_nticks
self.colorbar_ticks = colorbar_ticks
self.colorbar_labels = colorbar_labels
self.number_format = number_format
self.font_family = font_family
self.title = title
self.unit = unit
self.output_filename = output_filename
self.noadd_date = noadd_date
# ========================
# ===== Check Errors =====
# ========================
# --- Filename ---
if self.filename == '':
raise ValueError( '"filename" must be provided!' )
# --- longitude & latitude ranges ---
if (self.lon_range[0] < -180.) or (self.lon_range[1] > 360.):
print( 'Check "lon_range!"')
raise ValueError( 'current values: ', lon_range )
if (self.lat_range[0] < -90. ) or (self.lat_range[1] > 90.):
print( 'Check "lat_range!"')
raise ValueError( 'current values: ', lat_range )
if (self.plot_position[0] < 0.) or (self.plot_position[2] < 0.) or \
(self.plot_position[1] > 1.) or (self.plot_position[3] > 1.) or \
(self.plot_position[1] <= self.plot_position[0] ) or \
(self.plot_position[3] <= self.plot_position[2] ):
print( 'Check "plot_position!"')
raise ValueError( 'current values: ', plot_position )
if self.colorbar_orientation not in ['VerticalRight', 'VerticalLeft',
'HozirontalTop', 'HorizontalBottom']:
print( 'Check "colorbar_orientation!"')
raise ValueError( 'current values: ', colorbar_orientation )
if (self.colorbar_ticks != []) & (len(self.colorbar_ticks) != self.colorbar_nticks):
print( 'number of elements in colorbar_ticks must be the same as colorbar_nticks')
raise ValueError( 'current values: ', colorbar_ticks )
if (self.colorbar_labels != []) & (len(self.colorbar_labels) != self.colorbar_nticks):
print( 'number of elements in colorbar_labels must be the same as colorbar_nticks')
raise ValueError( 'current values: ', colorbar_labels )
# ===== END Check Errors =====
# =============================
# ===== Ininital Setting ======
# =============================
# --- longitude range check ---
if np.min( lon_range ) < 0:
self.lon_range_flag = 0
else:
self.lon_range_flag = 1
# --- convert drawlines number to string ---
drawlines_conv_dict_num_str = {0:'no',
1:'coast',
2:'country',
3:'country_lake',
4:'state',
5:'state_lake'}
if type(drawlines) == int:
self.drawlines = drawlines_conv_dict_num_str[drawlines]
elif type(drawlines) == str:
self.drawlines = drawlines
else:
print( 'Check drawlines!' )
raise ValueError( 'It must be string or integer number' )
if self.drawlines != 'no': # Skip below if drawlines="no"
# --- drawlines file location ---
shp_base_dir = '/glade/u/home/cdswk/python/Shape_files/NaturalEarth/'
shp_files = { 'coast':'RES_physical/ne_RES_coastline.shp',
'country':'RES_cultural/ne_RES_admin_0_countries.shp',
'country_lake':'RES_cultural/ne_RES_admin_0_countries_lakes.shp',
'state':'RES_cultural/ne_RES_admin_1_states_provinces.shp',
'state_lake':'RES_cultural/ne_RES_admin_1_states_provinces_lakes.shp',
'bounding_box':'RES_physical/ne_RES_wgs84_bounding_box.shp' }
self.drawlines_file = shp_base_dir + shp_files[drawlines].replace('RES',lines_resolution)
if (self.lon_range_flag):
self.drawlines_file = self.drawlines_file.replace('.shp','_0_360_processed.shp')
else:
self.drawlines_file = self.drawlines_file
# --- Construct output filename ---
if self.output_filename == '':
self.save_image = False
else:
self.save_image = True
if not noadd_date:
CTime = datetime.datetime.now()
CDate = str(CTime.year).zfill(4) + str(CTime.month).zfill(2) + \
str(CTime.day).zfill(2)
self.CDate = CDate
self.output_filename += '_c' + CDate
self.output_filename += '_'
# ===== END Initial Setting =====
self.code_gen()
def code_gen(self):
# ===== Check list =====
print( "# ===== Check list =====")
print( '# - Options -> Appearance -> uncheck use default system')
# ===== END Check list =====
# Buffer zone
print( "" )
print( "" )
print("# ▼▼▼▼▼▼▼ COPY BELOW ▼▼▼▼▼▼▼")
print( "" )
print( "" )
# ==============================================
# ===== print code for VisIt CLI interface =====
# ==============================================
# ===== Basic plotting =====
print( '# === Resize Window === ' )
print( 'ResizeWindow(1,' + str(self.winsize[0]) + ',' + str(self.winsize[1]) + ')' )
print( '# === Read files === ' )
print( 'OpenDatabase("localhost:' + self.filename + '"' + ', 0)' )
if self.drawlines != 'no':
print( 'OpenDatabase("localhost:' + self.drawlines_file + '"' + ', 0)' )
print( '# === Draw plots === ' )
print( 'ActivateDatabase("localhost:' + self.filename + '")')
print( 'DefineScalarExpression("' + self.field_name + '_scaled", "' + self.field_name + ' * ' \
+ str(self.scale_factor) + '")')
print( 'AddPlot("Pseudocolor", "' + self.field_name + '_scaled' + '", 1, 0)' )
if self.slicing:
print( 'AddOperator("Slice", 0)' )
print( 'SetActivePlots(0)' )
print( 'SliceAtts = SliceAttributes()' )
print( 'SliceAtts.originType = SliceAtts.Intercept # Point, Intercept, Percent, Zone, Node' )
print( 'SliceAtts.originPoint = (0, 0, 0)' )
print( 'SliceAtts.originIntercept = ' + str(self.slice_index) )
print( 'SliceAtts.originPercent = 0' )
print( 'SliceAtts.originZone = 0' )
print( 'SliceAtts.originNode = 0' )
print( 'SliceAtts.normal = (0, 0, 1)' )
print( 'SliceAtts.axisType = SliceAtts.ZAxis # XAxis, YAxis, ZAxis, Arbitrary, ThetaPhi' )
print( 'SliceAtts.upAxis = (0, 1, 0)' )
print( 'SliceAtts.project2d = 1' )
print( 'SliceAtts.interactive = 1' )
print( 'SliceAtts.flip = 0' )
print( 'SliceAtts.originZoneDomain = 0' )
print( 'SliceAtts.originNodeDomain = 0' )
print( 'SliceAtts.theta = 0' )
print( 'SliceAtts.phi = 90' )
print( 'SetOperatorOptions(SliceAtts, 0, 0)' )
if self.drawlines != 'no':
print( 'ActivateDatabase("localhost:' + self.drawlines_file + '")')
print( 'AddPlot("Mesh", "polygon", 1, 0)' )
# ===== END Basic plotting =====
# ===== Adjustplot and legend, remove redundant info. =====
print( '# === Plot Position ===' )
print( 'View2DAtts = View2DAttributes()' )
print( 'View2DAtts.viewportCoords = (' + str(self.plot_position)[1:-1] + ')' )
print( 'View2DAtts.windowCoords = (' + str(self.lon_range[0]) + ',' + str(self.lon_range[1]) + ',' + \
str(self.lat_range[0]) + ',' + str(self.lat_range[1]) + ' )' )
print( 'SetView2D(View2DAtts)' )
print( '# === Toggle information about user, database, and time ===' )
print( 'AnnotationAtts = AnnotationAttributes()' )
print( 'AnnotationAtts.userInfoFlag = 0' )
print( 'AnnotationAtts.databaseInfoFlag = 0' )
print( 'AnnotationAtts.timeInfoFlag = 0' )
# --- Longitude & Latitude ---
print( '# === Longitude & Latitude adjustment ===' )
print ('# --- Longitude | |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import functools
import math
import numpy as np
from collections import OrderedDict
import veriloggen as vg
import nngen.basic_types as bt
import nngen.util as util
class slice_(bt._Operator):
"""
Create a sliced tensor with a similar API to the numpy slice.
"""
input_chainable = False
output_chainable = False
def __sub_str__(self):
begins = str(self.begins)
ends = str(self.ends)
strides = str(self.strides)
par = ' par:%d' % self.par if self.par > 1 else ''
value_ram_size = (' value_ram_size:%d' % self.value_ram_size
if self.value_ram_size is not None else '')
out_ram_size = (' out_ram_size:%d' % self.out_ram_size
if self.out_ram_size is not None else '')
return (' begins:%s ends:%s strides:%s %s%s%s' %
(begins, ends, strides, par, value_ram_size, out_ram_size))
def __init__(self, value, begins, ends, strides,
dtype=None, name=None, par=1,
value_ram_size=None, out_ram_size=None):
if not isinstance(begins, (tuple, list)):
raise TypeError('begins must be tuple or list.')
if not isinstance(ends, (tuple, list)):
raise TypeError('ends must be tuple or list.')
if not isinstance(strides, (tuple, list)):
raise TypeError('strides must be tuple or list.')
if len(value.shape) != len(begins):
raise ValueError('length mismatch between value.shape and begins: %d != %d' %
(len(value.shape), len(begins)))
if len(value.shape) != len(ends):
raise ValueError('length mismatch between value.shape and ends: %d != %d' %
(len(value.shape), len(ends)))
if len(value.shape) != len(strides):
raise ValueError('length mismatch between value.shape and strides: %d != %d' %
(len(value.shape), len(strides)))
for begin in begins:
begin = int(begin)
if not isinstance(begin, int):
raise TypeError('values of begins must be int, not %s' % str(type(begin)))
for end in ends:
end = int(end)
if not isinstance(end, int):
raise TypeError('values of ends must be int, not %s' % str(type(end)))
for stride in strides:
stride = int(stride)
if not isinstance(stride, int):
raise TypeError('values of strides must be int, not %s' % str(type(stride)))
if strides[-1] != 1 and par != 1:
raise ValueError("par must be 1 when strides[-1] is not 1")
if value_ram_size is not None and value_ram_size < 1:
raise ValueError('value_ram_size must be greater than 0')
if out_ram_size is not None and out_ram_size < 1:
raise ValueError('out_ram_size must be greater than 0')
# delegate a shape calculation to numpy
slices = to_slices(begins, ends, strides)
shape = np.zeros(value.shape)[slices].shape
bt._Operator.__init__(self, value,
dtype=dtype, shape=shape, name=name, par=par)
self.begins = tuple(begins)
self.ends = tuple(ends)
self.strides = tuple(strides)
# attribute
self.value_ram_size = value_ram_size
self.out_ram_size = out_ram_size
slice_.attribute(self, par, value_ram_size, out_ram_size)
def attribute(self, par=None, value_ram_size=None, out_ram_size=None):
if par is not None:
if (par - 1) & par != 0:
raise ValueError('par must be power of 2.')
self.par = par
for arg in self.args:
arg.add_alignment_request(self.par)
self.add_alignment_request(self.par)
if value_ram_size is not None:
if value_ram_size < 1:
raise ValueError('value_ram_size must be greater than 0')
self.value_ram_size = value_ram_size
if out_ram_size is not None:
if out_ram_size < 1:
raise ValueError('out_ram_size must be greater than 0')
self.out_ram_size = out_ram_size
def get_required_rams(self):
act = self.args[0]
act_shape = act.get_aligned_shape()
out_shape = self.get_aligned_shape()
input_min_size = ((act_shape[-1] // self.par) *
(act_shape[-2] if len(act_shape) > 1 else 1) * 2)
if self.value_ram_size is not None and input_min_size < self.value_ram_size:
input_min_size = self.value_ram_size
input_width = act.get_ram_width() * self.par
output_min_size = ((out_shape[-1] // self.par) *
(out_shape[-2] if len(out_shape) > 1 else 1) * 2)
if self.out_ram_size is not None and output_min_size < self.out_ram_size:
output_min_size = self.out_ram_size
output_width = self.get_ram_width() * self.par
inputs = []
inputs.append((input_width, input_min_size))
outputs = []
outputs.append((output_width, output_min_size))
temps = []
return inputs, outputs, temps
def get_stream_hash(self):
base = bt._Operator.get_stream_hash(self)
rank = len(self.shape)
return (base, rank, self.par)
def get_stream_func(self):
def func(strm):
arg = self.args[0]
datawidth = arg.get_op_width()
vec_datawidth = datawidth * self.par
point = arg.get_op_point()
signed = arg.get_signed()
vec_act_var = strm.source(datawidth=vec_datawidth, signed=False)
strm.sink(vec_act_var)
return func
def get_control_param_values(self):
act = self.args[0]
act_shape = act.get_aligned_shape()
act_num_ch = act_shape[-1]
out_shape = self.get_aligned_shape()
out_num_ch = out_shape[-1]
act_offset_base = bt.to_byte(act_num_ch * act.get_ram_width())
act_offset_begins = []
act_offset_strides = []
for i, (begin, stride) in enumerate(zip(reversed(self.begins[:-2]), reversed(self.strides[:-2]))):
mul = functools.reduce(lambda x, y: x * y, act_shape[-i - 2:-1], 1)
act_offset_begin = act_offset_base * mul * begin
act_offset_begins.append(act_offset_begin)
act_offset_stride = act_offset_base * mul * stride
act_offset_strides.append(act_offset_stride)
act_offset_begins.reverse()
act_offset_strides.reverse()
act_read_size = ((act_num_ch // self.par) *
(act_shape[-2] if len(act_shape) > 1 else 1))
out_offset_base = bt.to_byte(out_num_ch * self.get_ram_width())
out_offset_strides = []
for i in range(len(out_shape) - 2):
mul = functools.reduce(lambda x, y: x * y, out_shape[-i - 2:-1], 1)
out_offset_stride = out_offset_base * mul
out_offset_strides.append(out_offset_stride)
out_offset_strides.reverse()
out_write_size = ((out_num_ch // self.par) *
(out_shape[-2] if len(out_shape) > 1 else 1))
stream_size = out_num_ch // self.par
if len(self.strides) > 1:
stream_stride = self.strides[-2] * (act_num_ch // self.par)
stream_local = self.begins[-2] * (act_num_ch // self.par) + self.begins[-1]
else:
stream_stride = 0
stream_local = self.begins[-1]
return OrderedDict([('act_shape', act_shape),
('out_shape', out_shape),
('act_begins', self.begins),
('act_strides', self.strides),
('act_offset_begins', act_offset_begins),
('act_offset_strides', act_offset_strides),
('act_read_size', act_read_size),
('out_offset_strides', out_offset_strides),
('out_write_size', out_write_size),
('stream_size', stream_size),
('stream_stride', stream_stride),
('stream_local', stream_local)])
def control_sequence(self, fsm):
act_ram = self.input_rams[0]
out_ram = self.output_rams[0]
act_base_offset = self.m.Wire(self._name('act_base_offset'),
self.maxi.addrwidth, signed=True)
act_offsets = [self.m.Reg(self._name('act_offset_%d' % i),
self.maxi.addrwidth, initval=0, signed=True)
for i, _ in enumerate(self.act_shape[:-2])]
if act_offsets:
v = act_offsets[0]
for act_offset in act_offsets[1:]:
v += act_offset
act_base_offset.assign(v)
else:
act_base_offset.assign(0)
out_base_offset = self.m.Wire(self._name('out_base_offset'),
self.maxi.addrwidth, signed=True)
out_offsets = [self.m.Reg(self._name('out_offset_%d' % i),
self.maxi.addrwidth, initval=0, signed=True)
for i, _ in enumerate(self.out_shape[:-2])]
if out_offsets:
v = out_offsets[0]
for out_offset in out_offsets[1:]:
v += out_offset
out_base_offset.assign(v)
else:
out_base_offset.assign(0)
counts = [self.m.Reg(self._name('count_%d' % i),
self.maxi.addrwidth, initval=0)
for i, _ in enumerate(self.act_shape[:-2])]
prev_counts = [self.m.Reg(self._name('prev_count_%d' % i),
self.maxi.addrwidth, initval=0)
for i, _ in enumerate(self.act_shape[:-2])]
stream_act_local = self.m.Reg(self._name('stream_act_local'),
self.maxi.addrwidth, initval=0)
stream_out_local = self.m.Reg(self._name('stream_out_local'),
self.maxi.addrwidth, initval=0)
comp_count = self.m.Reg(self._name('comp_count'),
self.maxi.addrwidth, initval=0)
out_count = self.m.Reg(self._name('out_count'),
self.maxi.addrwidth, initval=0)
act_page = self.m.Reg(self._name('act_page'), initval=0)
act_page_comp_offset = self.m.Reg(self._name('act_page_comp_offset'),
self.maxi.addrwidth, initval=0)
act_page_dma_offset = self.m.Reg(self._name('act_page_dma_offset'),
self.maxi.addrwidth, initval=0)
out_page = self.m.Reg(self._name('out_page'), initval=0)
out_page_comp_offset = self.m.Reg(self._name('out_page_comp_offset'),
self.maxi.addrwidth, initval=0)
out_page_dma_offset = self.m.Reg(self._name('out_page_dma_offset'),
self.maxi.addrwidth, initval=0)
act_page_size = act_ram.length // 2
out_page_size = out_ram.length // 2
skip_read_act = self.m.Reg(self._name('skip_read_act'), initval=0)
skip_comp = self.m.Reg(self._name('skip_comp'), initval=0)
skip_write_out = self.m.Reg(self._name('skip_write_out'), initval=0)
# --------------------
# initialization phase
# --------------------
# ReadAct: offset
for act_offset, act_offset_begin in zip(act_offsets, self.act_offset_begins):
fsm(
act_offset(act_offset_begin)
)
# ReadAct: double buffer control
fsm(
act_page(0),
act_page_comp_offset(0),
act_page_dma_offset(0)
)
# WriteOutput: offset
for out_offset in out_offsets:
fsm(
out_offset(0)
)
out_offset = out_base_offset
# WriteOutput: double buffer control
fsm(
out_page(0),
out_page_comp_offset(0),
out_page_dma_offset(0)
)
# counter
fsm(
[count(0) for count in counts],
[prev_count(0) for prev_count in prev_counts]
)
# double buffer control
fsm(
skip_read_act(0),
skip_comp(0),
skip_write_out(1)
)
fsm(
out_count(0)
)
state_init = fsm.current
fsm.goto_next()
# --------------------
# ReadAct phase
# --------------------
state_read_act = fsm.current
act_gaddr = self.arg_objaddrs[0] + act_base_offset
bt.bus_lock(self.maxi, fsm)
act_laddr = act_page_dma_offset
begin_state_read = fsm.current
fsm.goto_next()
bt.dma_read(self.maxi, fsm, act_ram, act_laddr,
act_gaddr, self.act_read_size, port=1)
end_state_read = fsm.current
# --------------------
# Comp phase
# --------------------
state_comp = fsm.current
# Stream Control FSM
comp_fsm = vg.FSM(self.m, self._name('comp_fsm'), self.clk, self.rst)
comp_state_init = comp_fsm.current
comp_fsm.If(fsm.state == state_comp, vg.Not(skip_comp)).goto_next()
fsm.If(comp_fsm.state == comp_state_init).goto_next()
# waiting for previous DMA write completion
bt.dma_wait_write_idle(self.maxi, comp_fsm)
# local address
comp_fsm(
stream_act_local(self.stream_local),
stream_out_local(0)
)
act_page_comp_offset_buf = self.m.Reg(self._name('act_page_comp_offset_buf'),
self.maxi.addrwidth, initval=0)
out_page_comp_offset_buf = self.m.Reg(self._name('out_page_comp_offset_buf'),
self.maxi.addrwidth, initval=0)
comp_fsm(
act_page_comp_offset_buf(act_page_comp_offset),
out_page_comp_offset_buf(out_page_comp_offset)
)
comp_fsm.goto_next()
# busy check
self.stream.source_join(comp_fsm)
# set_source
name = list(self.stream.sources.keys())[0]
local = stream_act_local + act_page_comp_offset_buf
if len(self.out_shape) > 1:
pat = ((self.stream_size, self.act_strides[-1]),
(self.out_shape[-2], self.stream_stride))
else:
pat = ((self.stream_size, self.act_strides[-1]),)
self.stream.set_source_pattern(comp_fsm, name, act_ram,
local, pat)
comp_fsm.set_index(comp_fsm.current - 1)
# set_sink
name = list(self.stream.sinks.keys())[0]
local = stream_out_local + out_page_comp_offset_buf
if len(self.out_shape) > 1:
pat = ((self.stream_size, 1),
(self.out_shape[-2], self.stream_size))
else:
pat = ((self.stream_size, 1),)
self.stream.set_sink_pattern(comp_fsm, name, out_ram,
local, pat)
# stream run (async)
self.stream.run(comp_fsm)
comp_fsm.goto_init()
# sync with WriteOut control
comp_fsm.seq.If(fsm.state == state_init)(
comp_count(0)
)
comp_fsm.seq.If(self.stream.source_stop)(
comp_count.inc()
)
# --------------------
# WriteOut phase
# --------------------
state_write_out = fsm.current
# sync with Comp control
fsm.If(comp_count > out_count).goto_next()
out_laddr = out_page_dma_offset
out_gaddr = self.objaddr + out_offset
bt.bus_lock(self.maxi, fsm)
bt.dma_write(self.maxi, fsm, out_ram, out_laddr,
out_gaddr, self.out_write_size, port=1, use_async=True)
bt.bus_unlock(self.maxi, fsm)
fsm(
out_count.inc()
)
fsm.goto_next()
state_write_out_end = fsm.current
fsm.If(skip_write_out).goto_from(state_write_out, state_write_out_end)
# --------------------
# update for next iteration
# --------------------
# ReadAct: count
cond = None
for size, count in zip(reversed(self.out_shape[:-2]), reversed(counts)):
fsm.If(cond)(
count.inc()
)
fsm.If(cond, count >= size - 1)(
count(0)
)
if cond is not None:
cond = vg.Ands(cond, count >= size - 1)
else:
cond = count >= | |
<filename>ReactivityProfile.py
###############################################################################
#
# Basic info for reading in profile files from shapemapper and
# performing various operations on them
#
# <NAME>
# Copyright 2018
#
# This file is licensed under the terms of the MIT license
#
# Change-log
#
#
###############################################################################
# import other packages
import sys
import numpy as np
ntorder = ('A','C','G','U')
class ReactivityProfile(object):
""" Object containing reactivity profile data
Can contain:
seqeunce = nt sequence
nts = nt numbering
rawprofile = rxn rate
rawerror = estimated error of rxn rate
subprofile = background subtracted profile
suberror = background subtracted errors
normprofile = normalized profile
normerror = normed errors
"""
def __init__(self, plusfile = None, minusfile = None, **kwargs):
self.sequence = None
self.nts = None
self.rawprofile = None
self.rawerror = None
self.backprofile = None
self.backerror = None
self.normprofile = None
self.normerror = None
self.subprofile = None
self.suberror = None
# set the default normMethod
self.normMethod = self.normBox
# set the default profile
self.defaultProfile = 'norm'
if plusfile:
self.readProfile(plusfile, **kwargs)
if minusfile:
self.backgroundSubtract(minusfile, **kwargs)
def initArray(self, name, mollen=None):
if mollen is None:
mollen = len(self.profile())
prof = np.empty(mollen)
prof[:] = np.nan
setattr(self, name+'profile', prof)
err = np.empty(mollen)
err[:] = np.nan
setattr(self, name+'error', err)
def copy(self):
out = ReactivityProfile()
out.sequence = np.copy(self.sequence)
out.nts = np.copy(self.nts)
out.rawprofile = np.copy(self.rawprofile)
out.backprofile = np.copy(self.backprofile)
out.subprofile = np.copy(self.subprofile)
out.normprofile = np.copy(self.normprofile)
out.rawerror = np.copy(self.rawerror)
out.backerror = np.copy(self.backerror)
out.suberror = np.copy(self.suberror)
out.normerror = np.copy(self.normerror)
out.normMethod = self.normMethod
# set the default profile
out.defaultProfile = self.defaultProfile
return out
def cutProfile(self, start=None, stop=None):
"""return new ReactivityProfile Object
Start is the nt number (typically 1-based)
Stop is the nt number (inclusive, typically 1-based)
"""
out = self.copy()
try:
start = start-out.nts[0]
except TypeError:
assert start is None
try:
stop = stop+1-out.nts[0]
except TypeErorr:
assert stop is None
sel = slice(start, stop)
out.sequence = out.sequence[sel]
out.nts = np.arange(1, len(out.sequence)+1)
out.rawprofile = out.rawprofile[sel]
out.backprofile = out.backprofile[sel]
out.subprofile = out.subprofile[sel]
out.normprofile = out.normprofile[sel]
out.rawerror = out.rawerror[sel]
out.backerror = out.backerror[sel]
out.suberror = out.suberror[sel]
out.normerror = out.normerror[sel]
return out
def profile(self, name = None, err=False):
"""return either the default or specified profile"""
if name is None:
name = self.defaultProfile
if not err:
return getattr(self, name+'profile')
else:
return getattr(self, name+'profile'), getattr(self, name+'error')
def readProfile(self, filename, **kwargs):
"""determine from the file extension what type of data it has, and then read"""
ext = filename.split('.')[-1].lower()
if ext == 'txt':
# assume it has profile format
prof = self.readProfileFile(filename, **kwargs) #
elif ext == 'csv':
# test whether it is a normal or pivoted file
with open(filename,'rU') as f:
spl = f.readline().split(',')[2]
if spl=='number':
self.readMutationCSV(filename, **kwargs) #
else:
self.readMutationCSVpivot(filename, **kwargs) #
elif ext == 'map':
self.readMapFile(filename, **kwargs) #
elif ext == 'tab':
prof = self.readTabFile(filename, **kwargs)
else:
raise IOError('unrecognized profile file extension :: %s' % ext)
self.convertSequence()
def convertSequence(self):
"""Make sure the sequence contains 'Us' vs. T'"""
mask = self.sequence == 'T'
self.sequence[mask] = 'U'
def readProfileFile(self, filepath, bg=0.02, ignorents =[], **kwargs):
"""read in Profile file output by new shapemapper"""
seq = []
num = []
plus = []
minus = []
pdepth = []
mdepth = []
shape = []
shapeerr = []
with open(filepath, 'rU') as f:
header = f.readline().split()
header = [x.lower() for x in header]
nt_idx = header.index('nucleotide')
seq_idx = header.index('sequence')
pd_idx = header.index('modified_effective_depth')
pr_idx = header.index('modified_rate')
md_idx = header.index('untreated_effective_depth')
mr_idx = header.index('untreated_rate')
s_idx = header.index('norm_profile')
se_idx = header.index('norm_stderr')
try:
for line in f:
spl=line.split()
num.append( spl[nt_idx] )
seq.append(spl[seq_idx].upper())
pdepth.append( spl[pd_idx] )
plus.append( spl[pr_idx] )
mdepth.append( spl[md_idx] )
minus.append( spl[mr_idx] )
shape.append( spl[s_idx] )
shapeerr.append( spl[se_idx] )
except:
raise IOError("Unrecognized profile file format")
self.sequence = np.array(seq)
self.nts = np.array(num, dtype=int)
self.backprofile = np.array(minus, dtype=float)
self.rawprofile = np.array(plus, dtype=float)
self.normprofile = np.array(shape, dtype=float)
self.normerror = np.array(shapeerr, dtype=float)
# compute rawerror
arr = np.array(pdepth, dtype=float)
self.rawprofile[arr<1000] = np.nan
self.rawerror = np.sqrt(self.rawprofile/arr)
arr = np.array(mdepth, dtype=float)
self.backprofile[arr<1000] = np.nan
self.backerror = np.sqrt(self.backprofile/arr)
self.backgroundSubtract(normalize=False)
with np.errstate(invalid='ignore'):
mask = (self.backprofile>bg) | np.isnan(self.normprofile) | (self.normprofile<-10)
self.subprofile[mask] = np.nan
self.normprofile[mask] = np.nan
self.maskignore(ignorents)
return None
def maskignore(self, ignorents):
mask = np.zeros(self.nts.size, dtype=bool)
for i in ignorents:
mask = mask | (self.nts == i)
self.subprofile[mask] = np.nan
self.normprofile[mask] = np.nan
self.backprofile[mask] = np.nan
self.rawprofile[mask] = np.nan
def readMutationCSV(self, filepath, exclude = [], **kwargs):
"""read in a mutation count"""
with open(filepath,'rU') as f:
for line in f:
line = line.strip(', \n')
spl = line.split(',')
if spl[2] == 'number':
nucs = np.array(spl[3:], dtype=int)
mutations = np.zeros(len(nucs))
excludeCount = np.zeros(len(nucs))
elif spl[2] == 'sequence':
seq = np.array(spl[3:])
elif ('del' in spl[2] or '->' in spl[2]):
if not (spl[2][0:3] in exclude or spl[2] in exclude):
mutations+=np.array(spl[3:], dtype=float)
else:
# tally excluded events so that we can subtract them from depth
excludeCount += np.array(spl[3:], dtype=float)
elif spl[2] == 'depth':
depth = np.array(spl[3:], dtype = float)
break
depth -= excludeCount
# find locations where depth is zero and set to nan
depth[depth==0] = np.nan
# find locations where mutation rate is zero and set to nan
mutations[mutations==0] = np.nan
mutations /= depth
stderr = np.sqrt(mutations)/np.sqrt(depth)
self.sequence = seq
self.nts = nucs
self.rawprofile = mutations
self.rawerror = stderr
def readMutationCSVpivot(self, filepath, exclude=[], **kwargs):
"""read in a pivoted mutation count file"""
f = open(filepath,'rU')
# pop off the first two lines
for i in range(2):
f.readline()
dkey = f.readline().strip(', \n').split(',')
data = [[] for i in dkey]
for line in f:
line = line.strip(', \n')
if len(line)==0:
break
spl = line.split(',')
for i,v in enumerate(spl):
data[i].append(v)
for i,k in enumerate(dkey):
if k == 'number':
nucs = np.array(data[i], dtype=int)
excludeCount = np.zeros(len(nucs))
mutations = np.zeros(len(nucs))
elif k== 'sequence':
seq = np.array(data[i])
elif ('del' in k or '->' in k):
if not (k[0:3] in exclude or k in exclude):
mutations += np.array(data[i], dtype=float)
else:
excludeCount += np.array(data[i], dtype=float)
elif k == 'depth':
depth = np.array(data[i], dtype=float)
break
depth -= excludeCount
# find locations where depth is zero and set to nan
depth[depth==0] = np.nan
# find locations where mutation rate is zero and set to nan
mutations[mutations==0] = np.nan
mutations /= depth
stderr = np.sqrt(mutations)/np.sqrt(depth)
self.sequence = seq
self.nts = nucs
self.rawprofile = mutations
self.rawerror = stderr
def readMapFile(self, filepath):
"""convert the .map file to a list of tuples"""
seq, nucs, prof, err = '', [], [], []
with open(filepath) as f:
for line in f:
spl = line.split()
nucs.append(int(spl[0]))
prof.append(float(spl[1]))
err.append(float(spl[2]))
seq+=spl[3]
seq = np.array(list(seq))
nucs = np.array(nucs)
prof = np.array(prof)
# convert -999 to nan
prof[prof <= -10] = np.nan
err = np.array(err)
# convert errors to nan as well
err[np.isnan(prof)] = np.nan
self.sequence = seq
self.nts = nucs
self.normprofile = prof
self.normerror = err
def readTabFile(self, filepath, bg=0.02, **kwargs):
"""read in tab file"""
with open(filepath, 'rU') as f:
header = f.readline().split('\t')
header = [x.strip() for x in header]
data = [ [] for i in header ]
for line in f:
spl = line.split('\t')
for i,v in enumerate(spl):
data[i].append(v)
self.sequence = np.array(data[1])
self.nts = np.array(data[0], dtype=int)
idx = header.index('rx rate')
arr = np.array(data[idx], dtype=float)
arr[arr<-10] = np.nan
self.rawprofile = arr
idx = header.index('bg rate')
arr = np.array(data[idx], dtype=float)
arr[arr<-10] = np.nan
self.backprofile = arr
idx = header.index('Normalized Reactivity')
arr = np.array(data[idx], dtype=float)
arr[arr<-10] = np.nan
self.normprofile = arr
idx = header.index('rx depth')
arr = np.array(data[idx], dtype=float)
arr[arr<1000] = 0
self.rawprofile[arr<1000] = np.nan
self.rawerror = np.sqrt(self.rawprofile/arr)
idx = header.index('bg depth')
arr = np.array(data[idx], dtype=float)
arr[arr<1000] = 0
self.backprofile[arr<1000] = np.nan
self.backerror = np.sqrt(self.backprofile/arr)
self.backgroundSubtract(normalize=False)
with np.errstate(invalid='ignore'):
mask = (self.backprofile>bg) | np.isnan(self.normprofile) | (self.normprofile<-10)
self.subprofile[mask] = np.nan
self.normprofile[mask] = np.nan
def normalize(self, DMS=False, byNT=False, normfactors = None, errfactors = None, **kwargs):
| |
import pandas as pd
import codecs as cd
import numpy as np
#### NEW ROUTINES JAN2020 ####
def header_keywords(X):
keywords = {}
defaults = {}
keywords['units'] = ('Units of measure ','Units of measure:')
defaults['units'] = 'SI'
keywords['mass'] = ('Mass:','Mass ','Mass =')
defaults['mass'] = 'N/A'
keywords['pause at reversal'] = ('Pause at reversal fields:','PauseRvrsl','PauseNtl =','PauseNtl ','PauseNtl =')
defaults['pause at reversal'] = 'N/A'
keywords['averaging time'] = ('Averaging time ','Averaging time:','Averaging time ','Averaging time =','Averaging time =')
defaults['averaging time'] = 'N/A'
keywords['pause at calibration'] = ('Pause at calibration field:','PauseCal','PauseCal =')
defaults['pause at calibration'] = 'N/A'
keywords['pause at saturation'] = ('Pause at saturation field:','PauseSat','PauseSat =')
defaults['pause at saturation'] = 'N/A'
keywords['field slewrate'] = ('SlewRate ','SlewRate =')
defaults['field slewrate'] = 1.0
keywords['saturation field'] = ('Saturation field:','HSat =','HSat')
defaults['saturation field'] = 'N/A'
keywords['Hb2'] = ('Max Hu field:','Hb2','Hb2 =')
defaults['Hb2'] = 'N/A'
keywords['Hb1'] = ('Min Hu field:','Hb1','Hb1 =')
defaults['Hb1'] = 'N/A'
keywords['Hc2'] = ('Max Hc field:','Hc2','Hc2 =')
defaults['Hc2'] = 'N/A'
keywords['Hc1'] = ('Min Hc field:','Hc1','Hc1 =')
defaults['Hc1'] = 0.0
keywords['number of FORCs'] = ('Number of FORCs:','NForc','NCrv')
defaults['number of FORCs'] = 'N/A'
keywords['number of rows'] = ('Number of data','NData')
defaults['number of rows'] = 'N/A'
keywords['calibration field'] = ('HCal','HCal =','HCal =')
defaults['calibration field'] = 'N/A'
X['keywords'] = keywords
X['defaults'] = defaults
return X
def line_num_for_phrase_in_file(phrase, filename):
with cd.open(filename,'r',encoding='latin9') as f:
for (i, line) in enumerate(f):
if phrase in line:
return i
return False
def line_num_for_starting_data_line(filename):
with cd.open(filename,'r',encoding='latin9') as f:
for (i, line) in enumerate(f):
if (line.startswith('+')) or (line.startswith('-')):
return i
return False
def line_for_phrase_in_file(phrase, filename):
with cd.open(filename,'r',encoding='latin9') as f:
for (i, line) in enumerate(f):
if phrase in line:
return line
return False
def end_of_line_for_phrase_in_file(phrase, filename):
with cd.open(filename,'r',encoding='latin9') as f:
for (i, line) in enumerate(f):
if phrase in line:
line = line[len(phrase):]
return line.strip()
return -1
def parse_digits(string,keywords0):
string = string[len(keywords0):]
if any(char.isdigit() for char in string):
idxS = next(i for i,j in list(enumerate(string,1))[::1] if j.isdigit())
idxF = next(i for i,j in list(enumerate(string,1))[::-1] if j.isdigit())
if idxS>1:
if string[idxS-2]=='-':
idxS -= 1
return float(string[idxS-1:idxF])
else:
return False
def result_by_condition(listOfElements, condition):
''' Returns the indexes of items in the list that returns True when passed
to condition() '''
for i in range(len(listOfElements)):
if condition(listOfElements[i]) == True:
output = listOfElements[i]
return output, i
def parse_header(keywords,defaults,key,fn):
keywords0 = keywords[key]
defaults0 = defaults[key]
if not isinstance(keywords0, tuple):
keywords0 = (keywords0,'dum')
output = []
for string in keywords0:
output.append(line_for_phrase_in_file(string, fn))
if not any(output):
return defaults0
line, idx = result_by_condition(output, lambda x: x != False)
result = parse_digits(line,keywords0)
if result is False:
result = line[len(keywords0[idx]):].strip()
elif 'mT' in line:
result /= 1000.0
return result
def parse_measurements(X):
Hcalib = parse_header(X['keywords'],X['defaults'],'calibration field',X['fn'])
skiprows = line_num_for_phrase_in_file('Time Stamp',X['fn'])
if skiprows is not False: #timestamp exists - new file format
df = pd.read_csv(X['fn'],skiprows=skiprows,encoding='latin9')
Htemp = np.array(df['Field (µ0H) [T]'])
Mtemp = np.array(df['Moment (m) [A·m²]'])
ttemp = np.array(df['Time Stamp [s]'])
#if Hcalib == 'N/A':
Hcalib = Htemp[0]
idx = np.argwhere((Htemp[1:]<Htemp[:-1]) & (np.abs(Htemp[1:]-Hcalib)/Hcalib>0.001)) #index of all calibration points
#create last FORC first
M = Mtemp[int(idx[-1])+1:]
H = Htemp[int(idx[-1])+1:]
Ft = ttemp[int(idx[-1])+1:]
Hr = np.ones(len(M))*Htemp[int(idx[-1])+1]
Fk = np.ones(len(M))*150
Fj = np.arange(len(M))+1
for i in range(len(idx)-1):
Mappend = Mtemp[int(idx[i])+1:int(idx[i+1])]
M = np.append(M,Mappend)
H = np.append(H,Htemp[int(idx[i])+1:int(idx[i+1])])
Hr = np.append(Hr,np.ones(len(Mappend))*Htemp[int(idx[i])+1])
Ft = np.append(Ft,ttemp[int(idx[i])+1:int(idx[i+1])])
Fk = np.append(Fk,np.ones(len(Mappend))+i)
Fj = np.append(Fj,np.arange(len(Mappend))+1)
else:
skiprows = line_num_for_starting_data_line(X['fn'])
if skiprows>200:
skiprows = line_num_for_phrase_in_file(',', X['fn'])
nrows = parse_header(X['keywords'],X['defaults'],'number of rows',X['fn'])
if nrows == "N/A":
f = cd.open(X['fn'],'r',encoding='latin9')
Xf = f.read()
nrows = Xf.count(",")
df = pd.read_csv(X['fn'],skiprows=skiprows,encoding='latin9',header=None,nrows=nrows)
temp = np.array(df)
Htemp = temp[:,0]
Mtemp = temp[:,1]
#if Hcalib == 'N/A':
Hcalib = Htemp[0]
#idx = np.argwhere((Htemp[1:]<Htemp[:-1]) & (np.abs(Htemp[1:]-Hcalib)>0.005)) #index of all calibration points
idx = np.argwhere((Htemp[1:]<Htemp[:-1]) & (np.abs(Htemp[1:]-Hcalib)/Hcalib>0.001)) #index of all calibration points - testing
#create last FORC first
M = Mtemp[int(idx[-1])+1:]
H = Htemp[int(idx[-1])+1:]
Hr = np.ones(len(M))*Htemp[int(idx[-1])+1]
Fk = np.ones(len(M))*len(idx)
Fj = np.arange(len(M))+1
for i in range(len(idx)-1):
Mappend = Mtemp[int(idx[i])+1:int(idx[i+1])]
M = np.append(M,Mappend)
H = np.append(H,Htemp[int(idx[i])+1:int(idx[i+1])])
Hr = np.append(Hr,np.ones(len(Mappend))*Htemp[int(idx[i])+1])
Fk = np.append(Fk,np.ones(len(Mappend))+i)
Fj = np.append(Fj,np.arange(len(Mappend))+1)
Ft=measurement_times(X,Fk,Fj)
#sort results into increasing time order
idx = np.argsort(Ft)
M, H, Hr, Ft, Fk, Fj = M[idx], H[idx], Hr[idx], Ft[idx], Fk[idx], Fj[idx]
unit = parse_header(X['keywords'],X['defaults'],'units',X['fn']) #Ensure use of SI units
if unit=='Cgs':
H=H/1E4 #Convert Oe into T
Hr=Hr/1E4 #Convert Oe into T
M=M/1E3 #Convert emu to Am^2
dH = np.mean(np.diff(H[Fk==np.max(Fk)])) #mean field spacing
return H, Hr, M, Fk, Fj, Ft, dH
def parse_units(X):
"""Function to extract instrument unit settings ('') from FORC data file header
Inputs:
file: name of data file (string)
Outputs:
CGS [Cgs setting] or SI [Hybrid SI] (string)
"""
unit = parse_header(X['keywords'],X['defaults'],'units',X['fn']) #Ensure use of SI units
if 'Hybrid' in unit:
unit = 'SI'
return unit
def parse_mass(X):
mass = parse_header(X['keywords'],X['defaults'],'mass',X['fn']) #Ensure use of SI units
return mass
def measurement_times(X,Fk,Fj):
"""Function to estimate the time at which magnetization points were measured in a FORC sequence
Follows the procedure given in:
<NAME> (2013) VARIFORC: An optimized protocol for calculating non-regular first-order reversal curve (FORC) diagrams. Global and Planetary Change, 110, 302-320, doi:10.1016/j.gloplacha.2013.08.003.
Inputs:
file: name of data file (string)
Fk: FORC indicies (int)
Fj: Measurement indicies within given FORC
Outputs:
Ft: Estimated times at which the magnetization points were measured (float)
"""
unit = parse_header(X['keywords'],X['defaults'],'units',X['fn'])
tr=parse_header(X['keywords'],X['defaults'],'pause at reversal',X['fn'])
tau=parse_header(X['keywords'],X['defaults'],'averaging time',X['fn'])
if tau=="N/A":
tau = 0.2
tcal=parse_header(X['keywords'],X['defaults'],'pause at calibration',X['fn'])
ts=parse_header(X['keywords'],X['defaults'],'pause at saturation',X['fn'])
alpha=parse_header(X['keywords'],X['defaults'],'field slewrate',X['fn'])
Hs=parse_header(X['keywords'],X['defaults'],'saturation field',X['fn'])
Hb2=parse_header(X['keywords'],X['defaults'],'Hb2',X['fn'])
Hb1=parse_header(X['keywords'],X['defaults'],'Hb1',X['fn'])
Hc2=parse_header(X['keywords'],X['defaults'],'Hc2',X['fn'])
N=parse_header(X['keywords'],X['defaults'],'number of FORCs',X['fn'])
if unit=='Cgs':
alpha=alpha/1E4 #convert from Oe to T
Hs=Hs/1E4 #convert from Oe to T
Hb2=Hb2/1E4 #convert from Oe to T
Hb1=Hb1/1E4 #convert from Oe to T
dH = (Hc2-Hb1+Hb2)/N #estimated field spacing
#now following Elgi's estimate of the measurement time
nc2 = Hc2/dH
Dt1 = tr + tau + tcal + ts + 2.*(Hs-Hb2-dH)/alpha
Dt3 = Hb2/alpha
Npts=int(Fk.size)
Ft=np.zeros(Npts)
for i in range(Npts):
if Fk[i]<=1+nc2:
Ft[i]=Fk[i]*Dt1+Dt3+Fj[i]*tau+dH/alpha*(Fk[i]*(Fk[i]-1))+(tau-dH/alpha)*(Fk[i]-1)**2
else:
Ft[i]=Fk[i]*Dt1+Dt3+Fj[i]*tau+dH/alpha*(Fk[i]*(Fk[i]-1))+(tau-dH/alpha)*((Fk[i]-1)*(1+nc2)-nc2)
return Ft
def parse_calibration(X):
Hcalib = parse_header(X['keywords'],X['defaults'],'calibration field',X['fn'])
skiprows = line_num_for_phrase_in_file('Time Stamp',X['fn'])
if skiprows is not False: #timestamp exists - new file format
df = pd.read_csv(X['fn'],skiprows=skiprows,encoding='latin9')
Htemp = np.array(df['Field (µ0H) [T]'])
Mtemp = np.array(df['Moment (m) [A·m²]'])
ttemp = np.array(df['Time Stamp [s]'])
#if Hcalib == 'N/A':
Hcalib = Htemp[0]
#idx = np.argwhere(np.abs(Htemp-Hcalib)<0.001)
idx = np.argwhere(np.abs(Htemp-Hcalib)/Hcalib<0.001)
Hcal, Mcal, tcal = Htemp[idx], Mtemp[idx], ttemp[idx]
else: #no timestamp - old file format, find line starting with "+"
skiprows = line_num_for_starting_data_line(X['fn'])
if skiprows>200:
skiprows = line_num_for_phrase_in_file(',', X['fn'])
nrows = parse_header(X['keywords'],X['defaults'],'number of rows',X['fn'])
if nrows == "N/A":
f = cd.open(X['fn'],'r',encoding='latin9')
Xf = f.read()
nrows = Xf.count(",")
df = pd.read_csv(X['fn'],skiprows=skiprows,encoding='latin9',header=None,nrows=nrows)
temp = np.array(df)
Htemp = temp[:,0]
Mtemp = temp[:,1]
#if Hcalib == 'N/A':
Hcalib = Htemp[0]
#idx = np.argwhere(np.abs(Htemp-Hcalib)<0.001)
idx = np.argwhere(np.abs(Htemp-Hcalib)/Hcalib<0.001)
Hcal, Mcal = Htemp[idx], Mtemp[idx]
tcal = calibration_times(X, len(Hcal))
return Hcal, Mcal, tcal
def calibration_times(X, Npts):
"""Function to estimate the time at which calibration points were measured in a FORC sequence
Follows the procedure given in:
<NAME> (2013) VARIFORC: An optimized protocol for calculating non-regular first-order reversal curve (FORC) diagrams. Global and Planetary Change, 110, 302-320, doi:10.1016/j.gloplacha.2013.08.003.
Inputs:
file: name of data file (string)
Npts: number of calibration points (int)
Outputs:
tcal_k: Estimated times at which the calibration points were measured (float)
"""
#unit=parse_units(file) #determine measurement system (CGS or SI)
unit = parse_header(X['keywords'],X['defaults'],'units',X['fn'])
tr=parse_header(X['keywords'],X['defaults'],'pause at reversal',X['fn'])
tau=parse_header(X['keywords'],X['defaults'],'averaging time',X['fn'])
if tau=="N/A":
tau = 0.2
tcal=parse_header(X['keywords'],X['defaults'],'pause at calibration',X['fn'])
ts=parse_header(X['keywords'],X['defaults'],'pause at saturation',X['fn'])
alpha=parse_header(X['keywords'],X['defaults'],'field slewrate',X['fn'])
Hs=parse_header(X['keywords'],X['defaults'],'saturation field',X['fn'])
Hb2=parse_header(X['keywords'],X['defaults'],'Hb2',X['fn'])
Hb1=parse_header(X['keywords'],X['defaults'],'Hb1',X['fn'])
Hc2=parse_header(X['keywords'],X['defaults'],'Hc2',X['fn'])
N=parse_header(X['keywords'],X['defaults'],'number of FORCs',X['fn'])
if unit=='Cgs':
alpha=alpha/1E4 #convert from Oe to T
Hs=Hs/1E4 #convert from Oe to T
Hb2=Hb2/1E4 #convert from Oe to T
Hb1=Hb1/1E4 #convert from Oe to T
dH = (Hc2-Hb1+Hb2)/N #estimated field spacing
#now following Elgi's estimate of the measurement time
nc2 = Hc2/dH
Dt1 = tr + tau + tcal + ts + 2.*(Hs-Hb2-dH)/alpha
Dt2 = tr + tau + (Hc2-Hb2-dH)/alpha
Npts=int(Npts)
tcal_k=np.zeros(Npts)
for k in range(1,Npts+1):
if k<=1+nc2:
tcal_k[k-1]=k*Dt1-Dt2+dH/alpha*k**2+(tau-dH/alpha)*(k-1)**2
else:
| |
"""
<NAME>
<EMAIL>
@shackoverflow
surrender_index_bot.py
A Twitter bot that tracks every live game in the NFL,
and tweets out the "Surrender Index" of every punt
as it happens.
Inspired by SB Nation's <NAME> @jon_bois.
"""
import argparse
from base64 import urlsafe_b64encode
import chromedriver_autoinstaller
from datetime import datetime, timedelta, timezone
from dateutil import parser, tz
from email.mime.text import MIMEText
import espn_scraper as espn
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import json
import numpy as np
import os
import pickle
import scipy.stats as stats
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.common.exceptions import StaleElementReferenceException
from subprocess import Popen, PIPE
import sys
import threading
import time
import tweepy
from twilio.rest import Client
import traceback
# A dictionary of plays that have already been tweeted.
tweeted_plays = None
# A dictionary of the currently active games.
games = {}
# The authenticated Tweepy APIs.
api, ninety_api = None, None
# NPArray of historical surrender indices.
historical_surrender_indices = None
# Whether the bot should tweet out any punts
should_tweet = True
### SELENIUM FUNCTIONS ###
def get_game_driver(headless=True):
global debug
global not_headless
options = webdriver.ChromeOptions()
if headless and not debug and not not_headless:
options.add_argument("headless")
return webdriver.Chrome(options=options)
def get_twitter_driver(link, headless=False):
with open('credentials.json', 'r') as f:
credentials = json.load(f)
email = credentials['cancel_email']
username = credentials['cancel_username']
password = <PASSWORD>['cancel_password']
driver = get_game_driver(headless=headless)
driver.implicitly_wait(60)
driver.get(link)
driver.find_element_by_xpath("//div[@aria-label='Reply']").click()
time.sleep(1)
login_button = driver.find_element_by_xpath("//a[@data-testid='login']")
time.sleep(1)
driver.execute_script("arguments[0].click();", login_button)
email_field = driver.find_element_by_xpath(
"//input[@name='session[username_or_email]']")
password_field = driver.find_element_by_xpath(
"//input[@name='session[password]']")
email_field.send_keys(email)
password_field.send_keys(password)
driver.find_element_by_xpath(
"//div[@data-testid='LoginForm_Login_Button']").click()
time.sleep(1)
if 'email_disabled=true' in driver.current_url:
username_field = driver.find_element_by_xpath(
"//input[@name='session[username_or_email]']")
password_field = driver.find_element_by_xpath(
"//input[@name='session[password]']")
username_field.send_keys(username)
password_field.send_keys(password)
driver.find_element_by_xpath(
"//div[@data-testid='LoginForm_Login_Button']").click()
return driver
def get_inner_html_of_element(element):
return element.get_attribute("innerHTML")
def get_inner_html_of_elements(elements):
return list(map(get_inner_html_of_element, elements))
def construct_play_from_element(element):
title = get_inner_html_of_element(element.find_element_by_tag_name("h3"))
desc = get_inner_html_of_element(
element.find_element_by_tag_name("p").find_element_by_tag_name("span"))
desc = desc.lstrip().rstrip()
play = {}
if len(title) > 5:
down_dist, yrdln = title.split("at")
play['yard_line'] = yrdln.lstrip(" ")
play['down'] = down_dist[:3]
play['dist'] = down_dist.rstrip(" ").split(" ")[-1]
if 'goal' in play['dist'].lower():
play['dist'] = play['yard_line'].split(" ")[1]
start_index = desc.find("(") + 1
end_index = desc.find(")")
time_qtr = desc[start_index:end_index]
play['time'] = time_qtr.split("-")[0].rstrip(" ")
play['qtr'] = time_qtr.split("-")[1].lstrip(" ")
play['text'] = desc[end_index + 1:].lstrip(" ")
return play
def get_plays_from_drive(drive, game):
all_plays = drive.find_elements_by_tag_name("li")
good_plays = []
if is_final(game):
relevant_plays = all_plays[-3:]
else:
relevant_plays = all_plays[:3]
for play in relevant_plays:
if play.get_attribute("class") == '' or play.get_attribute(
"class") == 'video':
play_dct = construct_play_from_element(play)
if 'yard_line' in play_dct:
good_plays.append(play_dct)
return good_plays
def get_all_drives(game):
all_drives = game.find_elements_by_class_name("drive-list")
for drive in all_drives:
accordion_content = drive.find_element_by_xpath(
'..').find_element_by_xpath('..')
if "in" not in accordion_content.get_attribute("class"):
accordion_content.find_element_by_xpath('..').click()
time.sleep(0.5)
return all_drives
### POSSESSION DETERMINATION FUNCTIONS ###
def get_possessing_team_from_play_roster(play, game):
global punters
home, away = get_home_team(game), get_away_team(game)
home_punters, away_punters = punters[home], punters[away]
home_possession, away_possession = False, False
for home_punter in home_punters:
if home_punter in play['text']:
home_possession = True
for away_punter in away_punters:
if away_punter in play['text']:
away_possession = True
if home_possession == away_possession:
return ''
else:
return home if home_possession else away
def get_possessing_team_from_punt_distance(play, game):
try:
split = play['text'].split(" ")
if split[1] == 'punts':
if int(split[2]) > int(play['yard_line'].split(" ")[1]):
return play['yard_line'].split(" ")[0]
if 'touchback' in play['text'].lower():
punt_distance = int(split[2])
if punt_distance > 50:
return play['yard_line'].split(" ")[0]
else:
return return_other_team(game,
play['yard_line'].split(" ")[0])
punt_distance = int(split[2]) + int(split[6])
if punt_distance > 50:
return play['yard_line'].split(" ")[0]
else:
return return_other_team(game, play['yard_line'].split(" ")[0])
return ''
except BaseException:
return ''
def get_possessing_team_from_drive(drive):
accordion_header = drive.find_element_by_xpath('../../..')
team_logo = accordion_header.find_element_by_class_name('team-logo')
if team_logo.get_attribute("src") is None:
team_logo = team_logo.find_element_by_tag_name('img')
img_name = team_logo.get_attribute("src")
index = img_name.find(".png")
return img_name[index - 3:index].lstrip("/").upper()
def get_possessing_team(play, drive, game):
possessing_team = get_possessing_team_from_play_roster(play, game)
if possessing_team != '':
return possessing_team
possessing_team = get_possessing_team_from_punt_distance(play, game)
return possessing_team if possessing_team != '' else get_possessing_team_from_drive(
drive)
### TEAM ABBREVIATION FUNCTIONS ###
def get_abbreviations(game):
return get_inner_html_of_elements(
game.find_elements_by_class_name("abbrev"))
def get_home_team(game):
return get_abbreviations(game)[1]
def get_away_team(game):
return get_abbreviations(game)[0]
def return_other_team(game, team):
return get_away_team(game) if get_home_team(
game) == team else get_home_team(game)
### GAME INFO FUNCTIONS ###
def get_game_id(game):
return game.current_url[-14:-5]
def get_game_header(game):
header_eles = game.find_elements_by_css_selector('div.game-details.header')
return get_inner_html_of_element(
header_eles[0]) if len(header_eles) > 0 else ""
def is_final(game):
element = game.find_element_by_class_name("status-detail")
is_final = 'final' in get_inner_html_of_element(element).lower()
if debug:
time_print(("is final", is_final))
return is_final
def is_postseason(game):
header = get_game_header(game).lower()
is_postseason = 'playoff' in header or 'championship' in header or 'super bowl' in header
if debug:
time_print(("is postseason", is_postseason))
return is_postseason
### SCORE FUNCTIONS ###
def get_scores(game):
parent_elements = game.find_elements_by_class_name("score-container")
elements = list(
map(lambda x: x.find_element_by_tag_name("div"), parent_elements))
return get_inner_html_of_elements(elements)
def get_home_score(play, drive, drives, game):
drive_index = drives.index(drive)
return get_drive_scores(drives, drive_index, game)[1]
def get_away_score(play, drive, drives, game):
drive_index = drives.index(drive)
return get_drive_scores(drives, drive_index, game)[0]
def get_drive_scores(drives, index, game):
if is_final(game):
if index == 0:
drive = drives[0]
else:
drive = drives[index - 1]
else:
if index == len(drives) - 1:
drive = drives[-1]
else:
drive = drives[index + 1]
accordion_header = drive.find_element_by_xpath('../../..')
away_parent = accordion_header.find_element_by_class_name(
'home') # this is intentional, ESPN is dumb
home_parent = accordion_header.find_element_by_class_name(
'away') # this is intentional, ESPN is dumb
away_score_element = away_parent.find_element_by_class_name('team-score')
home_score_element = home_parent.find_element_by_class_name('team-score')
away_score, home_score = int(
get_inner_html_of_element(away_score_element)), int(
get_inner_html_of_element(home_score_element))
if debug:
time_print(("away score", away_score))
time_print(("home score", home_score))
return away_score, home_score
### PLAY FUNCTIONS ###
def is_punt(play):
text = play['text'].lower()
if 'fake punt' in text:
return False
if 'punts' in text:
return True
if 'punt is blocked' in text:
return True
if 'punt for ' in text:
return True
return False
def is_penalty(play):
return 'penalty' in play['text'].lower()
def get_yrdln_int(play):
return int(play['yard_line'].split(" ")[-1])
def get_field_side(play):
if '50' in play['yard_line']:
return None
else:
return play['yard_line'].split(" ")[0]
def get_time_str(play):
return play['time']
def get_qtr_num(play):
qtr = play['qtr']
if qtr == 'OT':
return 5
elif qtr == '2OT':
return 6
elif qtr == '3OT':
return 7
else:
return int(qtr[0])
def is_in_opposing_territory(play, drive, game):
is_in_opposing_territory = get_field_side(play) != get_possessing_team(
play, drive, game)
if debug:
time_print(("is in opposing territory", is_in_opposing_territory))
return is_in_opposing_territory
def get_dist_num(play):
return int(play['dist'])
### CALCULATION HELPER FUNCTIONS ###
def calc_seconds_from_time_str(time_str):
minutes, seconds = map(int, time_str.split(":"))
return minutes * 60 + seconds
def calc_seconds_since_halftime(play, game):
# Regular season games have only one overtime of length 10 minutes
if not is_postseason(game) and get_qtr_num(play) == 5:
seconds_elapsed_in_qtr = (10 * 60) - calc_seconds_from_time_str(
get_time_str(play))
else:
seconds_elapsed_in_qtr = (15 * 60) - calc_seconds_from_time_str(
get_time_str(play))
seconds_since_halftime = max(
seconds_elapsed_in_qtr + (15 * 60) * (get_qtr_num(play) - 3), 0)
if debug:
time_print(("seconds since halftime", seconds_since_halftime))
return seconds_since_halftime
def calc_score_diff(play, drive, drives, game):
drive_index = drives.index(drive)
away, home = get_drive_scores(drives, drive_index, game)
if get_possessing_team(play, drive, game) == get_home_team(game):
score_diff = int(home) - int(away)
else:
score_diff = int(away) - int(home)
if debug:
time_print(("score diff", score_diff))
return score_diff
### SURRENDER INDEX FUNCTIONS ###
def calc_field_pos_score(play, drive, game):
try:
if get_yrdln_int(play) == 50:
return (1.1)**10.
if not is_in_opposing_territory(play, drive, game):
return max(1., (1.1)**(get_yrdln_int(play) - 40))
else:
return (1.2)**(50 - get_yrdln_int(play)) * ((1.1)**(10))
except BaseException:
return 0.
def calc_yds_to_go_multiplier(play):
dist = get_dist_num(play)
if dist >= 10:
return 0.2
elif dist >= 7:
return 0.4
elif dist >= 4:
return 0.6
elif dist >= 2:
return 0.8
else:
return 1.
def calc_score_multiplier(play, drive, drives, game):
score_diff = calc_score_diff(play, drive, drives, game)
if score_diff > 0:
return 1.
elif score_diff == 0:
return 2.
elif score_diff < -8.:
return 3.
else:
return 4.
def calc_clock_multiplier(play, drive, drives, game):
if calc_score_diff(play, drive, drives,
game) <= 0 and get_qtr_num(play) > 2:
seconds_since_halftime = calc_seconds_since_halftime(play, game)
return ((seconds_since_halftime * 0.001)**3.) + 1.
else:
return 1.
def calc_surrender_index(play, drive, drives, game):
field_pos_score = calc_field_pos_score(play, drive, game)
yds_to_go_mult = calc_yds_to_go_multiplier(play)
score_mult = calc_score_multiplier(play, drive, drives, game)
clock_mult = calc_clock_multiplier(play, drive, drives, game)
if debug:
time_print(play)
time_print("")
time_print(("field pos score", field_pos_score))
time_print(("yds to go mult", yds_to_go_mult))
time_print(("score mult", score_mult))
time_print(("clock mult", clock_mult))
return field_pos_score * yds_to_go_mult * score_mult * clock_mult
### PUNTER FUNCTIONS ###
def find_punters_for_team(team, roster):
base_link = 'https://www.espn.com/nfl/team/roster/_/name/'
roster_link = base_link + team
roster.get(roster_link)
header = roster.find_element_by_css_selector("div.Special.Teams")
parents = header.find_elements_by_css_selector(
"td.Table__TD:not(.Table__TD--headshot)")
punters = set()
for parent in parents:
try:
ele = parent.find_element_by_class_name("AnchorLink")
full_name = ele.get_attribute("innerHTML")
split = full_name.split(" ")
first_initial_last = full_name[0] + '.' + split[-1]
punters.add(first_initial_last)
except BaseException:
pass
return punters
def download_punters():
global punters
punters = {}
if os.path.exists('punters.json'):
file_mod_time = os.path.getmtime('punters.json')
else:
file_mod_time = 0.
if time.time() - file_mod_time < 60 * 60 * 12:
# if file modified within past 12 hours
with open('punters.json', 'r') as f:
punters_list = json.load(f)
for key, value in punters_list.items():
punters[key] = set(value)
else:
team_abbreviations = [
'ARI',
'ATL',
'BAL',
'BUF',
'CAR',
'CHI',
'CIN',
'CLE',
'DAL',
'DEN',
'DET',
'GB',
'HOU',
'IND',
'JAX',
'KC',
'LAC',
'LAR',
'LV',
'MIA',
'MIN',
'NE',
'NO',
'NYG',
'NYJ',
'PHI',
'PIT',
'SEA',
'SF',
'TB',
'TEN',
'WSH',
]
roster = get_game_driver()
for team in team_abbreviations:
time_print("Downloading punters for " + team)
punters[team] = find_punters_for_team(team, roster)
| |
<filename>sdk/python/pulumi_ucloud/ipsecvpn/vpn_gateway.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['VPNGatewayArgs', 'VPNGateway']
@pulumi.input_type
class VPNGatewayArgs:
def __init__(__self__, *,
eip_id: pulumi.Input[str],
grade: pulumi.Input[str],
vpc_id: pulumi.Input[str],
charge_type: Optional[pulumi.Input[str]] = None,
duration: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
remark: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VPNGateway resource.
:param pulumi.Input[str] eip_id: The ID of eip associate to the VPN Gateway.
:param pulumi.Input[str] grade: The type of the VPN Gateway. Possible values: `standard`, `enhanced`. `standard` recommended application scenario: Applicable to services with bidirectional peak bandwidth of 1M~50M; `enhanced` recommended application scenario: Suitable for services with bidirectional peak bandwidths of 50M~100M.
:param pulumi.Input[str] vpc_id: The ID of VPC linked to the VPN Gateway.
:param pulumi.Input[str] charge_type: The charge type of VPN Gateway, possible values are: `year`, `month` and `dynamic` as pay by hour (specific permission required). (Default: `month`).
:param pulumi.Input[int] duration: The duration that you will buy the VPN Gateway (Default: `1`). The value is `0` when pay by month and the instance will be valid till the last day of that month. It is not required when `dynamic` (pay by hour).
:param pulumi.Input[str] remark: The remarks of the VPN Gateway. (Default: `""`).
:param pulumi.Input[str] tag: A tag assigned to VPN Gateway, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
* ``
"""
pulumi.set(__self__, "eip_id", eip_id)
pulumi.set(__self__, "grade", grade)
pulumi.set(__self__, "vpc_id", vpc_id)
if charge_type is not None:
pulumi.set(__self__, "charge_type", charge_type)
if duration is not None:
pulumi.set(__self__, "duration", duration)
if name is not None:
pulumi.set(__self__, "name", name)
if remark is not None:
pulumi.set(__self__, "remark", remark)
if tag is not None:
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter(name="eipId")
def eip_id(self) -> pulumi.Input[str]:
"""
The ID of eip associate to the VPN Gateway.
"""
return pulumi.get(self, "eip_id")
@eip_id.setter
def eip_id(self, value: pulumi.Input[str]):
pulumi.set(self, "eip_id", value)
@property
@pulumi.getter
def grade(self) -> pulumi.Input[str]:
"""
The type of the VPN Gateway. Possible values: `standard`, `enhanced`. `standard` recommended application scenario: Applicable to services with bidirectional peak bandwidth of 1M~50M; `enhanced` recommended application scenario: Suitable for services with bidirectional peak bandwidths of 50M~100M.
"""
return pulumi.get(self, "grade")
@grade.setter
def grade(self, value: pulumi.Input[str]):
pulumi.set(self, "grade", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Input[str]:
"""
The ID of VPC linked to the VPN Gateway.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vpc_id", value)
@property
@pulumi.getter(name="chargeType")
def charge_type(self) -> Optional[pulumi.Input[str]]:
"""
The charge type of VPN Gateway, possible values are: `year`, `month` and `dynamic` as pay by hour (specific permission required). (Default: `month`).
"""
return pulumi.get(self, "charge_type")
@charge_type.setter
def charge_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "charge_type", value)
@property
@pulumi.getter
def duration(self) -> Optional[pulumi.Input[int]]:
"""
The duration that you will buy the VPN Gateway (Default: `1`). The value is `0` when pay by month and the instance will be valid till the last day of that month. It is not required when `dynamic` (pay by hour).
"""
return pulumi.get(self, "duration")
@duration.setter
def duration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "duration", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def remark(self) -> Optional[pulumi.Input[str]]:
"""
The remarks of the VPN Gateway. (Default: `""`).
"""
return pulumi.get(self, "remark")
@remark.setter
def remark(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remark", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[str]]:
"""
A tag assigned to VPN Gateway, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
* ``
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag", value)
@pulumi.input_type
class _VPNGatewayState:
def __init__(__self__, *,
charge_type: Optional[pulumi.Input[str]] = None,
create_time: Optional[pulumi.Input[str]] = None,
duration: Optional[pulumi.Input[int]] = None,
eip_id: Optional[pulumi.Input[str]] = None,
expire_time: Optional[pulumi.Input[str]] = None,
grade: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
remark: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[str]] = None,
vpc_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering VPNGateway resources.
:param pulumi.Input[str] charge_type: The charge type of VPN Gateway, possible values are: `year`, `month` and `dynamic` as pay by hour (specific permission required). (Default: `month`).
:param pulumi.Input[str] create_time: The creation time for VPN Gateway, formatted in RFC3339 time string.
:param pulumi.Input[int] duration: The duration that you will buy the VPN Gateway (Default: `1`). The value is `0` when pay by month and the instance will be valid till the last day of that month. It is not required when `dynamic` (pay by hour).
:param pulumi.Input[str] eip_id: The ID of eip associate to the VPN Gateway.
:param pulumi.Input[str] expire_time: The expiration time for VPN Gateway, formatted in RFC3339 time string.
:param pulumi.Input[str] grade: The type of the VPN Gateway. Possible values: `standard`, `enhanced`. `standard` recommended application scenario: Applicable to services with bidirectional peak bandwidth of 1M~50M; `enhanced` recommended application scenario: Suitable for services with bidirectional peak bandwidths of 50M~100M.
:param pulumi.Input[str] remark: The remarks of the VPN Gateway. (Default: `""`).
:param pulumi.Input[str] tag: A tag assigned to VPN Gateway, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
* ``
:param pulumi.Input[str] vpc_id: The ID of VPC linked to the VPN Gateway.
"""
if charge_type is not None:
pulumi.set(__self__, "charge_type", charge_type)
if create_time is not None:
pulumi.set(__self__, "create_time", create_time)
if duration is not None:
pulumi.set(__self__, "duration", duration)
if eip_id is not None:
pulumi.set(__self__, "eip_id", eip_id)
if expire_time is not None:
pulumi.set(__self__, "expire_time", expire_time)
if grade is not None:
pulumi.set(__self__, "grade", grade)
if name is not None:
pulumi.set(__self__, "name", name)
if remark is not None:
pulumi.set(__self__, "remark", remark)
if tag is not None:
pulumi.set(__self__, "tag", tag)
if vpc_id is not None:
pulumi.set(__self__, "vpc_id", vpc_id)
@property
@pulumi.getter(name="chargeType")
def charge_type(self) -> Optional[pulumi.Input[str]]:
"""
The charge type of VPN Gateway, possible values are: `year`, `month` and `dynamic` as pay by hour (specific permission required). (Default: `month`).
"""
return pulumi.get(self, "charge_type")
@charge_type.setter
def charge_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "charge_type", value)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> Optional[pulumi.Input[str]]:
"""
The creation time for VPN Gateway, formatted in RFC3339 time string.
"""
return pulumi.get(self, "create_time")
@create_time.setter
def create_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_time", value)
@property
@pulumi.getter
def duration(self) -> Optional[pulumi.Input[int]]:
"""
The duration that you will buy the VPN Gateway (Default: `1`). The value is `0` when pay by month and the instance will be valid till the last day of that month. It is not required when `dynamic` (pay by hour).
"""
return pulumi.get(self, "duration")
@duration.setter
def duration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "duration", value)
@property
@pulumi.getter(name="eipId")
def eip_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of eip associate to the VPN Gateway.
"""
return pulumi.get(self, "eip_id")
@eip_id.setter
def eip_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eip_id", value)
@property
@pulumi.getter(name="expireTime")
def expire_time(self) -> Optional[pulumi.Input[str]]:
"""
The expiration time for VPN Gateway, formatted in RFC3339 time string.
"""
return pulumi.get(self, "expire_time")
@expire_time.setter
def expire_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expire_time", value)
@property
@pulumi.getter
def grade(self) -> Optional[pulumi.Input[str]]:
"""
The type of the VPN Gateway. Possible values: `standard`, `enhanced`. `standard` recommended application scenario: Applicable to services with bidirectional peak bandwidth of 1M~50M; `enhanced` recommended application scenario: Suitable for services with bidirectional peak bandwidths of 50M~100M.
"""
return pulumi.get(self, "grade")
@grade.setter
def grade(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "grade", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def remark(self) -> Optional[pulumi.Input[str]]:
"""
The remarks of the VPN Gateway. (Default: | |
# 调用该信号/指标画布(plotDataItem.setData()),更新数据,更新画笔颜色,更新名称
if indicator in self.sub_indicator_plots:
self.sub_indicator_plots[indicator].setData(self.sub_indicator_data[indicator],
pen=self.sub_indicator_colors[indicator][0],
name=indicator)
# ----------------------------------------------------------------------
def add_indicator(self, indicator, is_main=True):
"""
新增指标信号图
:param indicator: 指标/信号的名称,如ma10,
:param is_main: 是否为主图
:return:
"""
if is_main:
if indicator in self.main_indicator_plots:
self.pi_main.removeItem(self.main_indicator_plots[indicator]) # 存在该指标/信号,先移除原有画布
self.main_indicator_plots[indicator] = self.pi_main.plot() # 为该指标/信号,创建新的主图画布,登记字典
self.main_indicator_colors[indicator] = self.main_color_pool[0] # 登记该指标/信号使用的颜色
self.main_color_pool.append(self.main_color_pool.popleft()) # 调整剩余颜色
if indicator not in self.main_indicator_data:
self.main_indicator_data[indicator] = []
else:
if indicator in self.sub_indicator_plots:
self.pi_sub.removeItem(self.sub_indicator_plots[indicator]) # 若存在该指标/信号,先移除原有的附图画布
self.sub_indicator_plots[indicator] = self.pi_sub.plot() # 为该指标/信号,创建新的主图画布,登记字典
self.sub_indicator_colors[indicator] = self.sub_color_pool[0] # 登记该指标/信号使用的颜色
self.sub_color_pool.append(self.sub_color_pool.popleft()) # 调整剩余颜色
if indicator not in self.sub_indicator_data:
self.sub_indicator_data[indicator] = []
# ----------------------------------------------------------------------
def plot_indicator(self, datas, is_main=True, clear=False):
"""
刷新指标/信号图( 新数据)
:param datas: 所有数据
:param is_main: 是否为主图
:param clear: 是否要清除旧数据
:return:
"""
if clear:
self.clear_indicator(is_main) # 清除主图/副图
if is_main:
for indicator in datas:
self.add_indicator(indicator, is_main) # 逐一添加主图信号/指标
self.main_indicator_data[indicator] = datas[indicator] # 更新组件数据字典
# 调用该信号/指标画布(plotDataItem.setData()),更新数据,更新画笔颜色,更新名称
self.main_indicator_plots[indicator].setData(datas[indicator], pen=self.main_indicator_colors[indicator][0], name=indicator)
else:
for indicator in datas:
self.add_indicator(indicator, is_main) # 逐一增加子图指标/信号
self.sub_indicator_data[indicator] = datas[indicator] # 更新组件数据字典
# 调用该信号/指标画布(plotDataItem.setData()),更新数据,更新画笔颜色,更新名称
self.sub_indicator_plots[indicator].setData(datas[indicator], pen=self.sub_indicator_colors[indicator][0], name=indicator)
#----------------------------------------------------------------------
def update_all(self):
"""
手动更新所有K线图形,K线播放模式下需要
"""
datas = self.datas
if self.display_vol:
self.ci_volume.pictrue = None
self.ci_volume.update()
self.ci_candle.pictrue = None
self.ci_candle.update()
def update(view, low, high):
"""
更新视图
:param view: viewbox
:param low:
:param high:
:return:
"""
vRange = view.viewRange()
xmin = max(0,int(vRange[0][0]))
xmax = max(0,int(vRange[0][1]))
xmax = min(xmax,len(datas))
if len(datas)>0 and xmax > xmin:
ymin = min(datas[xmin:xmax][low])
ymax = max(datas[xmin:xmax][high])
view.setRange(yRange = (ymin,ymax))
else:
view.setRange(yRange = (0,1))
update(self.pi_main.getViewBox(), 'low', 'high')
update(self.pi_volume.getViewBox(), 'volume', 'volume')
#----------------------------------------------------------------------
def plot_all(self, redraw=True, xMin=0, xMax=-1):
"""
重画所有界面
redraw :False=重画最后一根K线; True=重画所有
xMin,xMax : 数据范围
"""
xMax = len(self.datas) if xMax < 0 else xMax
self.countK = xMax-xMin
self.index = int((xMax+xMin)/2) # 设置当前索引所在位置为数据的中心点
self.pi_sub.setLimits(xMin=xMin, xMax=xMax)
self.pi_main.setLimits(xMin=xMin, xMax=xMax)
self.plot_kline(redraw, xMin, xMax) # K线图
if self.display_vol:
self.pi_volume.setLimits(xMin=xMin, xMax=xMax)
self.plot_volume(redraw, xMin, xMax) # K线副图,成交量
self.plot_sub(0, len(self.datas)) # K线副图,持仓量
self.refresh()
#----------------------------------------------------------------------
def refresh(self):
"""
刷新三个子图的显示范围
"""
datas = self.datas
# 计算界面上显示数量的最小x/最大x
minutes = int(self.countK/2)
xmin = max(0,self.index-minutes)
xmax = xmin+2*minutes
# 更新主图/副图/成交量的 x范围
self.pi_sub.setRange(xRange = (xmin, xmax))
self.pi_main.setRange(xRange = (xmin, xmax))
self.pi_volume.setRange(xRange = (xmin, xmax))
#----------------------------------------------------------------------
# 快捷键相关
#----------------------------------------------------------------------
def onNxt(self):
"""跳转到下一个开平仓点"""
try:
if len(self.x_t_trade_map)>0 and self.index is not None:
datalen = len(self.datas)
self.index+=1
while self.index < datalen and self.index in self.x_t_trade_map:
self.index+=1
self.refresh()
x = self.index
y = self.datas[x]['close']
self.crosshair.signal.emit((x,y))
except Exception as ex:
print(u'{} onDown() exception:{},trace:{}'.format(self.title, str(ex), traceback.format_exc()))
#----------------------------------------------------------------------
def onPre(self):
"""跳转到上一个开平仓点"""
try:
if len(self.x_t_trade_map)>0 and not self.index is None:
self.index-=1
while self.index > 0 and self.index in self.x_t_trade_map:
self.index-=1
self.refresh()
x = self.index
y = self.datas[x]['close']
self.crosshair.signal.emit((x,y))
except Exception as ex:
print(u'{}.onDown() exception:{},trace:{}'.format(self.title, str(ex), traceback.format_exc()))
# ----------------------------------------------------------------------
def onDown(self):
"""放大显示区间"""
try:
self.countK = min(len(self.datas),int(self.countK*1.2)+1)
self.refresh()
if len(self.datas)>0:
x = self.index-self.countK/2+2 if int(self.crosshair.xAxis)<self.index-self.countK/2+2 else int(self.crosshair.xAxis)
x = self.index+self.countK/2-2 if x >self.index+self.countK/2-2 else x
x = int(x)
y = self.datas[x][2]
self.crosshair.signal.emit((x,y))
print(u'onDown:countK:{},x:{},y:{},index:{}'.format(self.countK,x,y,self.index))
except Exception as ex:
print(u'{}.onDown() exception:{},trace:{}'.format(self.title, str(ex), traceback.format_exc()))
# ----------------------------------------------------------------------
def onUp(self):
"""缩小显示区间"""
try:
# 减少界面显示K线数量
self.countK = max(3,int(self.countK/1.2)-1)
self.refresh()
if len(self.datas) > 0:
x = self.index-int(self.countK/2)+2 if int(self.crosshair.xAxis)<self.index- int(self.countK/2)+2 else int(self.crosshair.xAxis)
x = self.index+ int(self.countK/2)-2 if x > self.index+ (self.countK/2)-2 else x
x = int(x)
y = self.datas[x]['close']
self.crosshair.signal.emit((x,y))
print(u'onUp:countK:{},x:{},y:{},index:{}'.format(self.countK, x, y, self.index))
except Exception as ex:
print(u'{}.onDown() exception:{},trace:{}'.format(self.title, str(ex), traceback.format_exc()))
#----------------------------------------------------------------------
def onLeft(self):
"""向左移动"""
try:
if len(self.datas)>0 and int(self.crosshair.xAxis)>2:
x = int(self.crosshair.xAxis)-1
y = self.datas[x]['close']
if x <= self.index-self.countK/2+2 and self.index>1:
self.index -= 1
self.refresh()
self.crosshair.signal.emit((x,y))
print(u'onLeft:countK:{},x:{},y:{},index:{}'.format(self.countK, x, y, self.index))
except Exception as ex:
print(u'{}.onLeft() exception:{},trace:{}'.format(self.title, str(ex), traceback.format_exc()))
# ----------------------------------------------------------------------
def onRight(self):
"""向右移动"""
try:
if len(self.datas)>0 and int(self.crosshair.xAxis)<len(self.datas)-1:
x = int(self.crosshair.xAxis)+1
y = self.datas[x]['close']
if x >= self.index+int(self.countK/2)-2:
self.index += 1
self.refresh()
self.crosshair.signal.emit((x,y))
except Exception as ex:
print(u'{}.onLeft() exception:{},trace:{}'.format(self.title, str(ex), traceback.format_exc()))
def onDoubleClick(self, pos):
"""
鼠标双击事件
:param pos:
:return:
"""
try:
if len(self.datas) > 0 and int(self.crosshair.xAxis) >= 0:
x = int(self.crosshair.xAxis)
time_value = self.axisTime.xdict.get(x, None)
self.index = x
print(u'{} doubleclick: {},x:{},index:{}'.format(self.title, time_value, x, self.index))
if self.relocate_notify_func is not None and time_value is not None:
self.relocate_notify_func(self.windowId, time_value, self.countK)
except Exception as ex:
print(u'{}.onDoubleClick() exception:{},trace:{}'.format(self.title, str(ex), traceback.format_exc()))
def relocate(self, window_id, t_value, count_k):
"""
重定位到最靠近t_value的x坐标
:param window_id:
:param t_value:
:param count_k
:return:
"""
if self.windowId == window_id or count_k < 2:
return
try:
x_value = self.axisTime.get_x_by_time(t_value)
self.countK = count_k
if 0 < x_value <= len(self.datas):
self.index = x_value
x = self.index
y = self.datas[x]['close']
self.refresh()
self.crosshair.signal.emit((x, y))
print(u'{} reloacate to :{},{}'.format(self.title, x,y))
except Exception as ex:
print(u'{}.relocate() exception:{},trace:{}'.format(self.title, str(ex), traceback.format_exc()))
# ----------------------------------------------------------------------
# 界面回调相关
#----------------------------------------------------------------------
def onPaint(self):
"""界面刷新回调"""
view = self.pi_main.getViewBox()
vRange = view.viewRange()
xmin = max(0,int(vRange[0][0]))
xmax = max(0,int(vRange[0][1]))
self.index = int((xmin+xmax)/2)+1
#----------------------------------------------------------------------
def resignData(self,datas):
"""更新数据,用于Y坐标自适应"""
self.crosshair.datas = datas
def viewXRangeChanged(low,high,self):
vRange = self.viewRange()
xmin = max(0,int(vRange[0][0]))
xmax = max(0,int(vRange[0][1]))
xmax = min(xmax,len(datas))
if len(datas)>0 and xmax > xmin:
ymin = min(datas[xmin:xmax][low])
ymax = max(datas[xmin:xmax][high])
self.setRange(yRange = (ymin,ymax))
else:
self.setRange(yRange = (0,1))
view = self.pi_main.getViewBox()
view.sigXRangeChanged.connect(partial(viewXRangeChanged,'low','high'))
if self.display_vol:
view = self.pi_volume.getViewBox()
view.sigXRangeChanged.connect(partial(viewXRangeChanged,'volume','volume'))
if self.display_sub:
view = self.pi_sub.getViewBox()
#view.sigXRangeChanged.connect(partial(viewXRangeChanged,'openInterest','openInterest'))
view.setRange(yRange = (0,100))
#----------------------------------------------------------------------
# 数据相关
#----------------------------------------------------------------------
def clearData(self):
"""清空数据"""
# 清空数据,重新画图
self.time_index = []
self.listBar = []
self.listVol = []
self.list_trade_arrow = []
self.x_t_trade_map = OrderedDict()
self.t_trade_dict = OrderedDict()
self.list_trans = []
self.list_trans_lines = []
self.list_markup = []
self.x_t_markup_map = OrderedDict()
self.t_markup_dict = OrderedDict()
# 清空主图指标
self.main_indicator_data = {}
# 清空副图指标
self.sub_indicator_data = {}
self.datas = None
#----------------------------------------------------------------------
def clear_indicator(self, main=True):
"""清空指标图形"""
# 清空信号图
if main:
for indicator in self.main_indicator_plots:
self.pi_main.removeItem(self.main_indicator_plots[indicator])
self.main_indicator_data = {}
self.main_indicator_plots = {}
else:
for indicator in self.sub_indicator_plots:
self.pi_sub.removeItem(self.sub_indicator_plots[indicator])
self.sub_indicator_data = {}
self.sub_indicator_plots = {}
#----------------------------------------------------------------------
def onBar(self, bar, main_indicator_datas, sub_indicator_datas, nWindow = 20, inited=False):
"""
新增K线数据,K线播放模式
:param bar: VtBarData
:param main_indicator_datas:
:param sub_indicator_datas:
:param nWindow:
:return: nWindow : 最大数据窗口
"""
# 是否需要更新K线
newBar = False if len(self.datas)>0 and bar.datetime==self.datas[-1].datetime else True
nrecords = len(self.datas) if newBar else len(self.datas)-1
bar.openInterest = np.random.randint(0,3) if bar.openInterest==np.inf or bar.openInterest==-np.inf else bar.openInterest
recordVol = (nrecords, bar.volume,0,0,bar.volume) if bar.close < bar.open else (nrecords,0,bar.volume,0,bar.volume)
if newBar and any(self.datas):
# 主图数据增加一项
self.datas.resize(nrecords+1, refcheck=0)
self.listBar.resize(nrecords+1, refcheck=0)
# 成交量指标,增加一项
self.listVol.resize(nrecords+1, refcheck=0)
# 主图指标,增加一项
for indicator in list(self.main_indicator_data.keys()):
indicator_data = self.main_indicator_data.get(indicator,[])
indicator_data.append(0)
# 副图指标,增加一行
for indicator in list(self.sub_indicator_data.keys()):
indicator_data = self.sub_indicator_data.get(indicator, [])
indicator_data.append(0)
elif any(self.datas):
# 主图指标,移除第一项
for indicator in list(self.main_indicator_data.keys()):
indicator_data = self.main_indicator_data.get(indicator, [])
indicator_data.pop()
# 副图指标,移除第一项
for indicator in list(self.sub_indicator_data.keys()):
indicator_data = self.sub_indicator_data.get(indicator, [])
indicator_data.pop()
if any(self.datas):
self.datas[-1] = (bar.datetime, bar.open, bar.close, bar.low, bar.high, bar.volume, bar.openInterest)
self.listBar[-1] = (nrecords, bar.open, bar.close, bar.low, bar.high)
self.listVol[-1] = recordVol
# 主图指标,更新最后记录
for indicator in list(self.main_indicator_data.keys()):
indicator_data = self.main_indicator_data.get(indicator, [])
indicator_data[-1] = main_indicator_datas.get(indicator, 0)
# 副图指标,更新最后记录
for indicator in list(self.sub_indicator_data.keys()):
indicator_data = self.sub_indicator_data.get(indicator, [])
indicator_data[-1] = sub_indicator_datas.get(indicator, 0)
else:
self.datas = np.rec.array([(datetime, bar.open, bar.close, bar.low, bar.high, bar.volume, bar.openInterest)],\
names=('datetime', 'open','close','low','high','volume','openInterest'))
self.listBar = np.rec.array([(nrecords, bar.open, bar.close, bar.low, bar.high)],\
names=('time_int', 'open','close','low','high'))
self.listVol = np.rec.array([recordVol], names=('time_int','open','close','low','high'))
# 主图指标,添加数据
for indicator in list(self.main_indicator_data.keys()):
indicator_data = self.main_indicator_data.get(indicator, [])
indicator_data.append(main_indicator_datas.get(indicator, 0))
# 副图指标,添加数据
for indicator in list(self.sub_indicator_data.keys()):
indicator_data = self.sub_indicator_data.get(indicator, [])
indicator_data.append(sub_indicator_datas.get(indicator, 0))
self.resignData(self.datas)
self.axisTime.update_xdict({nrecords:bar.datetime})
if 'openInterest' in self.sub_indicator_data:
self.sub_indicator_data['openInterest'].append(bar.openInterest)
self.resignData(self.datas)
nWindow0 = min(nrecords,nWindow)
xMax = nrecords+2
xMin = max(0, nrecords-nWindow0)
if inited:
self.plot_all(False, xMin, xMax)
if not newBar:
self.update_all()
self.index = 0
self.crosshair.signal.emit((None,None))
def add_signal(self, t_value, direction, offset, price, volume):
"""
增加信号
:param t_value:
:param direction:
:param offset:
:param price:
:param volume:
:return:
"""
# 找到信号时间最贴近的bar x轴
x = self.axisTime.get_x_by_time(t_value)
need_plot_arrow = False
# 修正一下 信号时间,改为bar的时间
if x not in self.x_t_trade_map:
bar_time = self.axisTime.xdict.get(x, t_value)
else:
# 如果存在映射,就更新
bar_time = self.x_t_trade_map[x]
trade_node = self.t_trade_dict.get(bar_time, None)
if trade_node is None:
# 当前时间无交易信号
self.t_trade_dict[bar_time] = {'x': x, 'signals': [{'direction': direction, 'offset':offset,'price': price,'volume': volume}]}
self.x_t_trade_map[x] = bar_time
need_plot_arrow = True
else:
#match_signals = [t for t in trade_node['signals'] if t['direction'] == direction and t['offset'] == offset]
#if len(match_signals) == 0:
need_plot_arrow = True
trade_node['signals'].append({'direction': direction, 'offset':offset, 'price': price, 'volume': volume})
self.x_t_trade_map[x] = bar_time
# 需要显示图标
if need_plot_arrow:
arrow = None
# 多信号
if direction == DIRECTION_LONG:
if offset == OFFSET_OPEN:
# buy
arrow = pg.ArrowItem(pos=(x, price), angle=135, brush=None, pen={'color':'r','width':1}, tipAngle=30, baseAngle=20, tailLen=10, tailWidth=2)
else:
# cover
arrow = pg.ArrowItem(pos=(x, price), angle=0, brush=(255, 0, 0),pen=None, headLen=20, headWidth=20, tailLen=10, tailWidth=2)
# 空信号
elif direction == DIRECTION_SHORT:
if offset == OFFSET_CLOSE:
# sell
arrow = pg.ArrowItem(pos=(x, price), angle=0, brush=(0, 255, 0),pen=None ,headLen=20, headWidth=20, tailLen=10, tailWidth=2)
else:
# short
arrow | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.dialogflowcx_v3.services.test_cases import pagers
from google.cloud.dialogflowcx_v3.types import test_case
from google.cloud.dialogflowcx_v3.types import test_case as gcdc_test_case
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import TestCasesTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import TestCasesGrpcAsyncIOTransport
from .client import TestCasesClient
class TestCasesAsyncClient:
"""Service for managing [Test
Cases][google.cloud.dialogflow.cx.v3.TestCase] and [Test Case
Results][google.cloud.dialogflow.cx.v3.TestCaseResult].
"""
_client: TestCasesClient
DEFAULT_ENDPOINT = TestCasesClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = TestCasesClient.DEFAULT_MTLS_ENDPOINT
agent_path = staticmethod(TestCasesClient.agent_path)
parse_agent_path = staticmethod(TestCasesClient.parse_agent_path)
entity_type_path = staticmethod(TestCasesClient.entity_type_path)
parse_entity_type_path = staticmethod(TestCasesClient.parse_entity_type_path)
environment_path = staticmethod(TestCasesClient.environment_path)
parse_environment_path = staticmethod(TestCasesClient.parse_environment_path)
flow_path = staticmethod(TestCasesClient.flow_path)
parse_flow_path = staticmethod(TestCasesClient.parse_flow_path)
intent_path = staticmethod(TestCasesClient.intent_path)
parse_intent_path = staticmethod(TestCasesClient.parse_intent_path)
page_path = staticmethod(TestCasesClient.page_path)
parse_page_path = staticmethod(TestCasesClient.parse_page_path)
test_case_path = staticmethod(TestCasesClient.test_case_path)
parse_test_case_path = staticmethod(TestCasesClient.parse_test_case_path)
test_case_result_path = staticmethod(TestCasesClient.test_case_result_path)
parse_test_case_result_path = staticmethod(TestCasesClient.parse_test_case_result_path)
transition_route_group_path = staticmethod(TestCasesClient.transition_route_group_path)
parse_transition_route_group_path = staticmethod(TestCasesClient.parse_transition_route_group_path)
webhook_path = staticmethod(TestCasesClient.webhook_path)
parse_webhook_path = staticmethod(TestCasesClient.parse_webhook_path)
common_billing_account_path = staticmethod(TestCasesClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(TestCasesClient.parse_common_billing_account_path)
common_folder_path = staticmethod(TestCasesClient.common_folder_path)
parse_common_folder_path = staticmethod(TestCasesClient.parse_common_folder_path)
common_organization_path = staticmethod(TestCasesClient.common_organization_path)
parse_common_organization_path = staticmethod(TestCasesClient.parse_common_organization_path)
common_project_path = staticmethod(TestCasesClient.common_project_path)
parse_common_project_path = staticmethod(TestCasesClient.parse_common_project_path)
common_location_path = staticmethod(TestCasesClient.common_location_path)
parse_common_location_path = staticmethod(TestCasesClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TestCasesAsyncClient: The constructed client.
"""
return TestCasesClient.from_service_account_info.__func__(TestCasesAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TestCasesAsyncClient: The constructed client.
"""
return TestCasesClient.from_service_account_file.__func__(TestCasesAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> TestCasesTransport:
"""Returns the transport used by the client instance.
Returns:
TestCasesTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(TestCasesClient).get_transport_class, type(TestCasesClient))
def __init__(self, *,
credentials: ga_credentials.Credentials = None,
transport: Union[str, TestCasesTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the test cases client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.TestCasesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = TestCasesClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_test_cases(self,
request: test_case.ListTestCasesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTestCasesAsyncPager:
r"""Fetches a list of test cases for a given agent.
Args:
request (:class:`google.cloud.dialogflowcx_v3.types.ListTestCasesRequest`):
The request object. The request message for
[TestCases.ListTestCases][google.cloud.dialogflow.cx.v3.TestCases.ListTestCases].
parent (:class:`str`):
Required. The agent to list all pages for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3.services.test_cases.pagers.ListTestCasesAsyncPager:
The response message for
[TestCases.ListTestCases][google.cloud.dialogflow.cx.v3.TestCases.ListTestCases].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = test_case.ListTestCasesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_test_cases,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListTestCasesAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def batch_delete_test_cases(self,
request: test_case.BatchDeleteTestCasesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Batch deletes test cases.
Args:
request (:class:`google.cloud.dialogflowcx_v3.types.BatchDeleteTestCasesRequest`):
The request object. The request message for
[TestCases.BatchDeleteTestCases][google.cloud.dialogflow.cx.v3.TestCases.BatchDeleteTestCases].
parent (:class:`str`):
Required. The agent to delete test cases from. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = test_case.BatchDeleteTestCasesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_delete_test_cases,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def get_test_case(self,
request: test_case.GetTestCaseRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> test_case.TestCase:
r"""Gets a test case.
Args:
request (:class:`google.cloud.dialogflowcx_v3.types.GetTestCaseRequest`):
The request object. | |
<filename>conans/client/conan_api.py
import hashlib
import os
import sys
import requests
import conans
from conans import __version__ as client_version, tools
from conans.client.client_cache import ClientCache
from conans.client.conf import MIN_SERVER_COMPATIBLE_VERSION, ConanClientConfigParser
from conans.client.conf.detect import detect_defaults_settings
from conans.client.manager import ConanManager, existing_info_files
from conans.client.migrations import ClientMigrator
from conans.client.output import ConanOutput, ScopedOutput
from conans.client.profile_loader import read_profile, get_profile_path, profile_from_args, \
read_conaninfo_profile
from conans.client.remote_manager import RemoteManager
from conans.client.remote_registry import RemoteRegistry
from conans.client.rest.auth_manager import ConanApiAuthManager
from conans.client.rest.rest_client import RestApiClient
from conans.client.rest.version_checker import VersionCheckerRequester
from conans.client.runner import ConanRunner
from conans.client.store.localdb import LocalDB
from conans.client.cmd.test import PackageTester
from conans.client.userio import UserIO
from conans.errors import ConanException
from conans.model.env_info import EnvValues
from conans.model.options import OptionsValues
from conans.model.profile import Profile
from conans.model.ref import ConanFileReference
from conans.model.scope import Scopes
from conans.model.version import Version
from conans.paths import CONANFILE, get_conan_user_home, CONANFILE_TXT, CONANINFO, BUILD_INFO
from conans.search.search import DiskSearchManager, DiskSearchAdapter
from conans.util.env_reader import get_env
from conans.util.files import rmdir, save_files, exception_message_safe, save, mkdir
from conans.util.log import configure_logger
from conans.util.tracer import log_command, log_exception
from conans.client.loader_parse import load_conanfile_class
from conans.client import settings_preprocessor
from conans.tools import set_global_instances
from conans.client.cmd.uploader import CmdUpload
default_manifest_folder = '.conan_manifests'
def get_basic_requester(client_cache):
requester = requests.Session()
proxies = client_cache.conan_config.proxies
if proxies:
# Account for the requests NO_PROXY env variable, not defined as a proxy like http=
no_proxy = proxies.pop("no_proxy", None)
if no_proxy:
os.environ["NO_PROXY"] = no_proxy
requester.proxies = proxies
return requester
def api_method(f):
def wrapper(*args, **kwargs):
the_self = args[0]
try:
log_command(f.__name__, kwargs)
with tools.environment_append(the_self._client_cache.conan_config.env_vars):
# Patch the globals in tools
return f(*args, **kwargs)
except Exception as exc:
msg = exception_message_safe(exc)
try:
log_exception(exc, msg)
except:
pass
raise
return wrapper
def prepare_cwd(cwd):
if cwd:
if os.path.isabs(cwd):
return cwd
else:
return os.path.abspath(cwd)
else:
return os.getcwd()
class ConanAPIV1(object):
@staticmethod
def factory():
"""Factory"""
def instance_remote_manager(client_cache):
requester = get_basic_requester(client_cache)
# Verify client version against remotes
version_checker_req = VersionCheckerRequester(requester, Version(client_version),
Version(MIN_SERVER_COMPATIBLE_VERSION),
out)
# To handle remote connections
put_headers = client_cache.read_put_headers()
rest_api_client = RestApiClient(out, requester=version_checker_req,
put_headers=put_headers)
# To store user and token
localdb = LocalDB(client_cache.localdb)
# Wraps RestApiClient to add authentication support (same interface)
auth_manager = ConanApiAuthManager(rest_api_client, user_io, localdb)
# Handle remote connections
remote_manager = RemoteManager(client_cache, auth_manager, out)
return remote_manager
use_color = get_env("CONAN_COLOR_DISPLAY", 1)
if use_color and hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
import colorama
colorama.init()
color = True
else:
color = False
out = ConanOutput(sys.stdout, color)
user_io = UserIO(out=out)
try:
client_cache = migrate_and_get_client_cache(get_conan_user_home(), out)
except Exception as e:
out.error(str(e))
raise
with tools.environment_append(client_cache.conan_config.env_vars):
# Adjust CONAN_LOGGING_LEVEL with the env readed
conans.util.log.logger = configure_logger()
# Get the new command instance after migrations have been done
remote_manager = instance_remote_manager(client_cache)
# Get a search manager
search_adapter = DiskSearchAdapter()
search_manager = DiskSearchManager(client_cache, search_adapter)
# Settings preprocessor
conan = Conan(client_cache, user_io, get_conan_runner(), remote_manager, search_manager,
settings_preprocessor)
return conan, client_cache, user_io
def __init__(self, client_cache, user_io, runner, remote_manager, search_manager,
settings_preprocessor):
assert isinstance(user_io, UserIO)
assert isinstance(client_cache, ClientCache)
self._client_cache = client_cache
self._user_io = user_io
self._runner = runner
self._manager = ConanManager(client_cache, user_io, runner, remote_manager, search_manager,
settings_preprocessor)
# Patch the tools module with a good requester and user_io
set_global_instances(self._user_io.out, get_basic_requester(self._client_cache))
@api_method
def new(self, name, header=False, pure_c=False, test=False, exports_sources=False, bare=False,
cwd=None, visual_versions=None, linux_gcc_versions=None, linux_clang_versions=None,
osx_clang_versions=None, shared=None, upload_url=None, gitignore=None,
gitlab_gcc_versions=None, gitlab_clang_versions=None):
from conans.client.cmd.new import cmd_new
cwd = prepare_cwd(cwd)
files = cmd_new(name, header=header, pure_c=pure_c, test=test,
exports_sources=exports_sources, bare=bare,
visual_versions=visual_versions,
linux_gcc_versions=linux_gcc_versions,
linux_clang_versions=linux_clang_versions,
osx_clang_versions=osx_clang_versions, shared=shared,
upload_url=upload_url, gitignore=gitignore,
gitlab_gcc_versions=gitlab_gcc_versions,
gitlab_clang_versions=gitlab_clang_versions)
save_files(cwd, files)
for f in sorted(files):
self._user_io.out.success("File saved: %s" % f)
@api_method
def test(self, path, profile_name=None, settings=None, options=None, env=None, remote=None,
update=False, user=None, channel=None, name=None,
version=None, build_modes=None):
settings = settings or []
options = options or []
env = env or []
cwd = os.getcwd()
base_folder = self._abs_relative_to(path, cwd, default=cwd)
conanfile_abs_path = self._get_conanfile_path(base_folder, "conanfile.py")
profile = profile_from_args(profile_name, settings, options, env, None, cwd,
self._client_cache)
pt = PackageTester(self._manager, self._user_io)
pt.install_build_and_test(conanfile_abs_path, profile, name, version, user, channel, remote,
update, build_modes=build_modes)
@api_method
def test_package(self, profile_name=None, settings=None, options=None, env=None,
scope=None, test_folder=None, not_export=False, build=None, keep_source=False,
verify=None, manifests=None,
manifests_interactive=None,
remote=None, update=False, cwd=None, user=None, channel=None, name=None,
version=None):
self._user_io.out.warn("THIS METHOD IS DEPRECATED and will be removed. "
"Use 'conan create' to generate binary packages for a "
"recipe. If you want to test a package you can use 'conan test' "
"command.")
settings = settings or []
options = options or []
env = env or []
cwd = prepare_cwd(cwd)
if name and version:
package_name = name
package_version = version
else:
conanfile_path = os.path.join(cwd, "conanfile.py")
conanfile = load_conanfile_class(conanfile_path)
package_name = getattr(conanfile, "name", None)
package_version = getattr(conanfile, "version", None)
if not package_name or not package_version:
raise ConanException("conanfile.py doesn't declare package name or version")
test_folders = [test_folder] if test_folder else ["test_package", "test"]
for test_folder_name in test_folders:
test_folder = os.path.join(cwd, test_folder_name)
test_conanfile_path = os.path.join(test_folder, "conanfile.py")
if os.path.exists(test_conanfile_path):
break
else:
raise ConanException("test folder '%s' not available, "
"or it doesn't have a conanfile.py" % test_folder_name)
sha = hashlib.sha1("".join(options + settings).encode()).hexdigest()
build_folder = os.path.join(test_folder, "build", sha)
rmdir(build_folder)
# shutil.copytree(test_folder, build_folder)
profile = profile_from_args(profile_name, settings, options, env, scope, cwd,
self._client_cache)
loader = self._manager.get_loader(profile)
test_conanfile = loader.load_conan(test_conanfile_path, self._user_io.out, consumer=True)
try:
if hasattr(test_conanfile, "requirements"):
test_conanfile.requirements()
except Exception as e:
raise ConanException("Error in test_package/conanfile.py requirements(). %s" % str(e))
requirement = test_conanfile.requires.get(package_name)
if requirement:
if requirement.conan_reference.version != package_version:
raise ConanException("package version is '%s', but test_package/conanfile "
"is requiring version '%s'\n"
"You can remove this requirement and use "
"'conan test_package user/channel' instead"
% (package_version, requirement.conan_reference.version))
user = user or requirement.conan_reference.user
channel = channel or requirement.conan_reference.channel
if not user or not channel:
raise ConanException("Please specify user and channel")
conanfile_reference = ConanFileReference(package_name, package_version, user, channel)
# Forcing an export!
if not not_export:
self._user_io.out.info("Exporting package recipe")
self._manager.export(user, channel, cwd, keep_source=keep_source)
if build is None: # Not specified, force build the tested library
build = [package_name]
manifests = _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd)
manifest_folder, manifest_interactive, manifest_verify = manifests
self._manager.install(inject_require=conanfile_reference,
reference=test_folder,
install_folder=build_folder,
manifest_folder=manifest_folder,
manifest_verify=manifest_verify,
manifest_interactive=manifest_interactive,
remote=remote,
profile=profile,
build_modes=build,
update=update,
generators=["txt"]
)
test_conanfile = os.path.join(test_folder, CONANFILE)
self._manager.build(test_conanfile, test_folder, build_folder, package_folder=None,
install_folder=build_folder,
test=str(conanfile_reference))
@api_method
def create(self, profile_name=None, settings=None,
options=None, env=None, scope=None, test_folder=None, not_export=False,
build_modes=None,
keep_source=False, verify=None,
manifests=None, manifests_interactive=None,
remote=None, update=False, conan_file_path=None, filename=None,
user=None, channel=None, name=None, version=None, werror=False):
settings = settings or []
options = options or []
env = env or []
self._user_io.out.werror_active = werror
cwd = os.getcwd()
conanfile_folder = self._abs_relative_to(conan_file_path, cwd, default=cwd)
if not name or not version:
conanfile_abs_path = self._get_conanfile_path(conanfile_folder, filename or CONANFILE)
conanfile = load_conanfile_class(conanfile_abs_path)
name, version = conanfile.name, conanfile.version
if not name or not version:
raise ConanException("conanfile.py doesn't declare package name or version")
reference = ConanFileReference(name, version, user, channel)
scoped_output = ScopedOutput(str(reference), self._user_io.out)
# Forcing an export!
if not not_export:
scoped_output.highlight("Exporting package recipe")
self._manager.export(user, channel, conanfile_folder, keep_source=keep_source,
name=name, version=version, filename=filename)
if build_modes is None: # Not specified, force build the tested library
build_modes = [name]
manifests = _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd)
manifest_folder, manifest_interactive, manifest_verify = manifests
profile = profile_from_args(profile_name, settings, options, env, scope,
cwd, self._client_cache)
self._manager.install(reference=reference,
install_folder=None, # Not output anything
manifest_folder=manifest_folder,
manifest_verify=manifest_verify,
manifest_interactive=manifest_interactive,
remote=remote,
profile=profile,
build_modes=build_modes,
update=update,
filename=filename)
base_folder = self._abs_relative_to(conan_file_path, cwd, default=cwd)
def get_test_conanfile_path(tf):
"""Searchs in the declared test_folder or in the standard locations"""
test_folders = [tf] if tf else ["test_package", "test"]
for test_folder_name in test_folders:
test_folder = os.path.join(base_folder, test_folder_name)
test_conanfile_path = os.path.join(test_folder, "conanfile.py")
if os.path.exists(test_conanfile_path):
return test_conanfile_path
else:
if tf:
raise ConanException("test folder '%s' not available, "
"or it doesn't have a conanfile.py" % tf)
test_conanfile_path = get_test_conanfile_path(test_folder)
if test_conanfile_path:
pt = PackageTester(self._manager, self._user_io)
scoped_output.highlight("Testing with 'test_package'")
pt.install_build_and_test(test_conanfile_path, profile, name, version, user,
channel, remote, update)
def _get_profile(self, profile_name, settings, options, env, cwd, install_folder):
infos_present = existing_info_files(install_folder)
if not infos_present:
profile = profile_from_args(profile_name, settings, options, env=env, scope=None,
cwd=cwd, client_cache=self._client_cache)
else:
profile = read_conaninfo_profile(install_folder)
return profile
def _validate_can_read_infos(self, install_folder, cwd):
if install_folder and not existing_info_files(self._abs_relative_to(install_folder, cwd)):
raise ConanException("The specified --install-folder doesn't contain '%s' and '%s' "
"files" % (CONANINFO, BUILD_INFO))
@staticmethod
def _validate_one_settings_source(install_folder, profile_name, settings, options, env):
if install_folder and existing_info_files(install_folder) and \
(profile_name or settings or options or env):
raise ConanException("%s and %s are found, at '%s' folder, so specifying profile, "
"settings, options or env is not allowed" % (CONANINFO, BUILD_INFO,
install_folder))
@api_method
def export_pkg(self, path, name, channel, source_folder=None, build_folder=None,
install_folder=None, profile_name=None, settings=None, options=None,
env=None, force=False, user=None, version=None):
settings = settings or []
options = options or []
env = env or []
cwd = os.getcwd()
# Checks that info files exists if the install folder is specified
self._validate_can_read_infos(install_folder, cwd)
path = self._abs_relative_to(path, cwd)
build_folder = self._abs_relative_to(build_folder, cwd, default=cwd)
install_folder = self._abs_relative_to(install_folder, cwd, default=build_folder)
source_folder = self._abs_relative_to(source_folder, cwd, default=build_folder)
# Checks that no both settings and info | |
: ( 25599, 25600 ),
"AL37a3" : ( 25600, 25601 ),
"AL37a4" : ( 25601, 25602 ),
"AL37a5" : ( 25602, 25603 ),
"AL37a6" : ( 25603, 25604 ),
"AL37a7" : ( 25604, 25605 ),
"AL37a8" : ( 25605, 25606 ),
"AL37a9" : ( 25606, 25607 ),
"AL37a10" : ( 25607, 25608 ),
"AL37b" : ( 25608, 25611 ),
"AL37c" : ( 25611, 25612 ),
"AL37aAgeOns" : ( 25612, 25614 ),
"AL37aOns" : ( 25614, 25615 ),
"AL37aAgeRec" : ( 25615, 25617 ),
"AL37aRec" : ( 25617, 25618 ),
"AL37eAgeOns" : ( 25618, 25620 ),
"AL37eOns" : ( 25620, 25621 ),
"AL37eAgeRec" : ( 25621, 25623 ),
"AL37eRec" : ( 25623, 25624 ),
"Al37aCount" : ( 25624, 25626 ),
"al37TOT" : ( 25626, 25628 ),
"AL37aCluster" : ( 25628, 25629 ),
"AL37bCluster" : ( 25629, 25630 ),
"Al37cCluster" : ( 25630, 25631 ),
"AL37f" : ( 25631, 25634 ),
"AL37f1" : ( 25634, 25635 ),
"AL37g" : ( 25635, 25636 ),
"AL37h" : ( 25636, 25637 ),
"AL37hAgeOns" : ( 25637, 25639 ),
"AL37hOns" : ( 25639, 25640 ),
"AL37hAgeRec" : ( 25640, 25642 ),
"AL37hRec" : ( 25642, 25643 ),
"AL37i" : ( 25643, 25644 ),
"AL37j" : ( 25644, 25645 ),
"Al37Another" : ( 25645, 25646 ),
"AL37_Specify" : ( 25646, 25726 ),
"AL37_Code" : ( 25726, 25745 ),
"Al37Another2" : ( 25745, 25746 ),
"AL37_Specify2" : ( 25746, 25826 ),
"AL37_Code2" : ( 25826, 25845 ),
"Al37Another3" : ( 25845, 25846 ),
"AL37_Specify3" : ( 25846, 25926 ),
"AL37_Code3" : ( 25926, 25945 ),
"Al37Another4" : ( 25945, 25946 ),
"AL37_Specify4" : ( 25946, 26026 ),
"AL37_Code4" : ( 26026, 26045 ),
"AL38" : ( 26045, 26046 ),
"AL38aAgeOns" : ( 26046, 26048 ),
"AL38aOns" : ( 26048, 26049 ),
"AL38aAgeRec" : ( 26049, 26051 ),
"AL38aRec" : ( 26051, 26052 ),
"AL38b" : ( 26052, 26055 ),
"AL38b1" : ( 26055, 26056 ),
"AL38c" : ( 26056, 26057 ),
"AL38cAgeOns" : ( 26057, 26059 ),
"AL38cOns" : ( 26059, 26060 ),
"AL38cAgeRec" : ( 26060, 26062 ),
"AL38cRec" : ( 26062, 26063 ),
"AL38d" : ( 26063, 26064 ),
"Al38Another" : ( 26064, 26065 ),
"AL38_Specify" : ( 26065, 26145 ),
"AL38_Code" : ( 26145, 26164 ),
"Al38Another2" : ( 26164, 26165 ),
"AL38_Specify2" : ( 26165, 26245 ),
"AL38_Code2" : ( 26245, 26264 ),
"Al38Another3" : ( 26264, 26265 ),
"AL38_Specify3" : ( 26265, 26345 ),
"AL38_Code3" : ( 26345, 26364 ),
"Al38Another4" : ( 26364, 26365 ),
"AL38_Specify4" : ( 26365, 26445 ),
"AL38_Code4" : ( 26445, 26464 ),
"AL39Another" : ( 26464, 26465 ),
"AL39_Specify" : ( 26465, 26545 ),
"AL39_Code" : ( 26545, 26564 ),
"AL39Another2" : ( 26564, 26565 ),
"AL39_Specify2" : ( 26565, 26645 ),
"AL39_Code2" : ( 26645, 26664 ),
"AL39Another3" : ( 26664, 26665 ),
"AL39_Specify3" : ( 26665, 26745 ),
"AL39_Code3" : ( 26745, 26764 ),
"AL39Another4" : ( 26764, 26765 ),
"AL39_Specify4" : ( 26765, 26845 ),
"AL39_Code4" : ( 26845, 26864 ),
"AL39" : ( 26864, 26865 ),
"AL39aAgeOns" : ( 26865, 26867 ),
"AL39aOns" : ( 26867, 26868 ),
"AL39aAgeRec" : ( 26868, 26870 ),
"AL39aRec" : ( 26870, 26871 ),
"AL39b" : ( 26871, 26874 ),
"AL39b1" : ( 26874, 26875 ),
"AL39c" : ( 26875, 26876 ),
"AL39cAgeOns" : ( 26876, 26878 ),
"AL39cOns" : ( 26878, 26879 ),
"AL39cAgeRec" : ( 26879, 26881 ),
"AL39cRec" : ( 26881, 26882 ),
"AL39d" : ( 26882, 26883 ),
"AL39_Specify1" : ( 26883, 26963 ),
"AL39_code1" : ( 26963, 26982 ),
"AL39_specify5" : ( 26982, 27062 ),
"AL39_code5" : ( 27062, 27081 ),
"ALSxYrCount" : ( 27081, 27083 ),
"ALSxMnthCount" : ( 27083, 27085 ),
"AL40AgeOns" : ( 27085, 27087 ),
"AL40Ons" : ( 27087, 27088 ),
"AL40AgeRec" : ( 27088, 27090 ),
"AL40Rec" : ( 27090, 27091 ),
"Al40Qsx" : ( 27091, 27092 ),
"Al40Qsx2" : ( 27092, 27093 ),
"Al40Qsx3" : ( 27093, 27094 ),
"Al40Qsx4" : ( 27094, 27095 ),
"Al40Qsx5" : ( 27095, 27096 ),
"Al40Qsx6" : ( 27096, 27097 ),
"Al40Qsx7" : ( 27097, 27098 ),
"Al40Qsx8" : ( 27098, 27099 ),
"Al40Qsx9" : ( 27099, 27100 ),
"Al40Qsx10" : ( 27100, 27101 ),
"Al40Qsx11" : ( 27101, 27102 ),
"Al40Qsx12" : ( 27102, 27103 ),
"Al40Qsx13" : ( 27103, 27104 ),
"Al40Qsx14" : ( 27104, 27105 ),
"Al40Qsx15" : ( 27105, 27106 ),
"Al40Qsx16" : ( 27106, 27107 ),
"Al40Qsx17" : ( 27107, 27108 ),
"Al40Qsx18" : ( 27108, 27109 ),
"Al40Qsx19" : ( 27109, 27110 ),
"Al40Qsx20" : ( 27110, 27111 ),
"Al40Qsx21" : ( 27111, 27112 ),
"Al40Qsx22" : ( 27112, 27113 ),
"Al40Qsx23" : ( 27113, 27114 ),
"Al40Qsx24" : ( 27114, 27115 ),
"Al40Qsx25" : ( 27115, 27116 ),
"Al40Qsx26" : ( 27116, 27117 ),
"Al40Qsx27" : ( 27117, 27118 ),
"Al40Qsx28" : ( 27118, 27119 ),
"Al40Qsx29" : ( 27119, 27120 ),
"Al40Qsx30" : ( 27120, 27121 ),
"Al40Qsx31" : ( 27121, 27122 ),
"Al40Qsx32" : ( 27122, 27123 ),
"Al40aQsx" : ( 27123, 27124 ),
"Al40aQsx2" : ( 27124, 27125 ),
"Al40aQsx3" : ( 27125, 27126 ),
"Al40aQsx4" : ( 27126, 27127 ),
"Al40aQsx5" : ( 27127, 27128 ),
"Al40aQsx6" : ( 27128, 27129 ),
"Al40aQsx7" : ( 27129, 27130 ),
"Al40aQsx8" : ( 27130, 27131 ),
"Al40aQsx9" : ( 27131, 27132 ),
"Al40aQsx10" : ( 27132, 27133 ),
"Al40aQsx11" : ( 27133, 27134 ),
"Al40aQsx12" : ( 27134, 27135 ),
"Al40aQsx13" : ( 27135, 27136 ),
"Al40aQsx14" : ( 27136, 27137 ),
"Al40aQsx15" : ( 27137, 27138 ),
"Al40aQsx16" : ( 27138, 27139 ),
"Al40aQsx17" : ( 27139, 27140 ),
"Al40aQsx18" : ( 27140, 27141 ),
"Al40aQsx19" : ( 27141, 27142 ),
"Al40aQsx20" : ( 27142, 27143 ),
"Al40aQsx21" : ( 27143, 27144 ),
"Al40aQsx22" : ( 27144, 27145 ),
"Al40aQsx23" : ( 27145, 27146 ),
"Al40aQsx24" : ( 27146, 27147 ),
"Al40aQsx25" : ( 27147, 27148 ),
"Al40aQsx26" : ( 27148, 27149 ),
"Al40aQsx27" : ( 27149, 27150 ),
"Al40aQsx28" : ( 27150, 27151 ),
"Al40aQsx29" : ( 27151, 27152 ),
"Al40aQsx30" : ( 27152, 27153 ),
"Al40aQsx31" : ( 27153, 27154 ),
"Al40aQsx32" : ( 27154, 27155 ),
"AL9dYrCl" : ( 27155, 27156 ),
"AL9iYrCl" : ( 27156, 27157 ),
"AL10YrCl" : ( 27157, 27158 ),
"AL10cYrCl" : ( 27158, 27159 ),
"AL10dYrCl" : ( 27159, 27160 ),
"AL12cYrCl" : ( 27160, 27161 ),
"AL13bYrCl" : ( 27161, 27162 ),
"AL14bYrCl" : ( 27162, 27163 ),
"AL15aYrCl" : ( 27163, 27164 ),
"AL16bYrCl" : ( 27164, 27165 ),
"AL21YrCl" : ( 27165, 27166 ),
"AL21dYrCl" : ( 27166, 27167 ),
"AL22bYrCl" : ( 27167, 27168 ),
"AL23bYrCl" : ( 27168, 27169 ),
"AL24bYrCl" : ( 27169, 27170 ),
"AL25YrCl" : ( 27170, 27171 ),
"AL29bYrCl" : ( 27171, 27172 ),
"AL26a1YrCl" : ( 27172, 27173 ),
"AL26a2YrCl" : ( 27173, 27174 ),
"AL26a3YrCl" : ( 27174, 27175 ),
"AL27cYrCl" : ( 27175, 27176 ),
"AL28bYrCl" : ( 27176, 27177 ),
"AL31bYrCl" : ( 27177, 27178 ),
"AL32YrCl" : ( 27178, 27179 ),
"AL33aYrCl" : ( 27179, 27180 ),
"AL37dYrCl" : ( 27180, 27181 ),
"AL38YrCl" : ( 27181, 27182 ),
"AL39YrCl" : ( 27182, 27183 ),
"AL37iYrCl" : ( 27183, 27184 ),
"AL38cYrCl" : ( 27184, 27185 ),
"AL39cYrCl" : ( 27185, 27186 ),
"Al19YrCl" : ( 27186, 27187 ),
"AL9dMnthCl" : ( 27187, 27188 ),
"AL9iMnthCl" : ( 27188, 27189 ),
"AL10MnthCl" : ( 27189, 27190 ),
"AL10cMnthCl" : ( 27190, 27191 ),
"AL10dMnthCl" : ( 27191, 27192 ),
"AL12cMnthCl" : ( 27192, 27193 ),
"AL13bMnthCl" : ( 27193, 27194 ),
"AL14bMnthCl" : ( 27194, 27195 ),
"AL15aMnthCl" : ( 27195, 27196 ),
"AL16bMnthCl" : ( 27196, 27197 ),
"AL21MnthCl" : ( 27197, 27198 ),
"AL21dMnthCl" : ( | |
<gh_stars>100-1000
#!/usr/bin/python
import sys, getopt, locale
from scipy.stats.stats import pearsonr
import numpy
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.dates
import os.path
from sys import getsizeof
from scipy.spatial import distance
import platform
def replace_whitespaces(str):
res = str
res = res.replace (' ','')
res = res.replace (',', '')
res = res.replace (',', '')
res = res.replace (';', '')
res = res.replace('\t', '')
res = res.replace('\n', '')
res = res.replace('/', '')
return res
def parse_sar(filepath, date_MMDDYY, has_multiple_inputs = 0):
data = []
dict = {}
ref_dict = {}
cnt_lines = 1
ref_dict[TIME_COL] = ''
ignored_empty_cnt = 0
ignored_useless_cnt = 0
ignored_error_cnt = 0
print ('parse_sar : ' + filepath + ' date_MMDDYY:' + str (date_MMDDYY))
if ('Linux' in platform.system()):
locale.setlocale(locale.LC_ALL, 'en_US')
else:
locale.setlocale(locale.LC_ALL, 'English')
ll = locale.getlocale(locale.LC_ALL)
if (has_multiple_inputs):
prefix = os.path.basename (filepath) + "."
else:
prefix = ''
with open(filepath, 'r') as file_object:
line = file_object.readline()
cnt_lines = 1
rowid = 0
while line:
if line.isspace():
#
ignored_empty_cnt = ignored_empty_cnt + 1
elif 'Linux' in line:
ignored_useless_cnt = ignored_useless_cnt + 1
elif 'CPU'.lower() in line.lower():
words = line.split()
for col in words:
if (':' in col):
ref_dict[TIME_COL] = ''
elif '%' in col:
ref_dict[col] = ''
else:
if (ref_dict.keys ().__len__() < 2):
raise ("sar file : cannot find header")
words = line.split()
time_val = datetime.strptime (words[0], '%H:%M:%S')
datetime_val = date_MMDDYY + ' ' + time_val.strftime ('%H:%M:%S')
datetime_val = datetime.strptime (datetime_val, '%m/%d/%y %H:%M:%S')
key = prefix + TIME_COL
dict[key] = datetime_val
#dict[CPU] = words[1]
key = prefix + '%user'
dict[key] = float (words[2])
key = prefix + '%nice'
dict[key] = float (words[3])
key = prefix + '%system'
dict[key] = float (words[4])
key = prefix + '%iowait'
dict[key] = float (words[5])
key = prefix + '%steal'
dict[key] = float (words[6])
key = prefix + '%idle'
dict[key] = float (words[7])
key = prefix + '%working'
dict[key] = 1 - dict[prefix + '%idle']
if has_multiple_inputs:
dict[ROWID_COL] = rowid
data.append (dict)
dict = {}
rowid = rowid + 1
line = file_object.readline()
cnt_lines = cnt_lines + 1
print('Read lines:', cnt_lines)
print('ignored_empty_cnt:', ignored_empty_cnt)
print('ignored_useless_cnt:', ignored_useless_cnt)
print('ignored_error_cnt:', ignored_error_cnt)
print('Keys:', ref_dict.keys().__len__())
return data
def parse_iostat(filepath, device_name, has_multiple_inputs = 0):
data = []
dict = {}
ref_dict = {}
cnt_lines = 1
ref_dict[TIME_COL] = ''
ignored_empty_cnt = 0
ignored_useless_cnt = 0
ignored_error_cnt = 0
print('parse_iostat : ' + filepath + ' device_name:' + device_name)
if ('Linux' in platform.system()):
locale.setlocale(locale.LC_ALL, 'en_US')
else:
locale.setlocale(locale.LC_ALL, 'English')
ll = locale.getlocale(locale.LC_ALL)
if (has_multiple_inputs):
prefix = os.path.basename (filepath) + "."
else:
prefix = ''
with open(filepath, 'r') as file_object:
line = file_object.readline()
cnt_lines = 1
rowid = 0
while line:
if line.isspace():
ignored_empty_cnt = ignored_empty_cnt + 1
elif "Device" in line:
ignored_useless_cnt = ignored_useless_cnt + 1
elif "Device" in line:
ignored_useless_cnt = ignored_useless_cnt + 1
elif '/' in line and ':' in line:
line = line.strip()
datetime_val = datetime.strptime (line, '%m/%d/%y %H:%M:%S')
key = prefix + TIME_COL
dict[key] = datetime_val
elif device_name in line:
if dict.keys ().__len__ () != 1:
raise ("Time row not found in iostat file")
words = line.split()
key = prefix + 'rkB/s'
dict[key] = float (words[5])
key = prefix + 'wkB/s'
dict[key] = float (words[6])
key = prefix + '<KEY>'
dict[key] = float(words[7])
key = prefix + '<KEY>'
dict[key] = float(words[8])
key = prefix + 'util'
dict[key] = float(words[13])
if has_multiple_inputs:
dict[ROWID_COL] = rowid
data.append (dict)
dict = {}
rowid = rowid + 1
else:
ignored_useless_cnt = ignored_useless_cnt + 1
line = file_object.readline()
cnt_lines = cnt_lines + 1
print('Read lines:', cnt_lines)
print('ignored_empty_cnt:', ignored_empty_cnt)
print('ignored_useless_cnt:', ignored_useless_cnt)
print('ignored_error_cnt:', ignored_error_cnt)
print('Keys:', ref_dict.keys().__len__())
return data
def parse_statdump(filepath, has_multiple_inputs = 0, stats_filter_list = []):
complex_stats_header_list = ['Num_data_page_fix_ext:',
'Num_data_page_promote_ext:',
'Num_data_page_promote_time_ext:',
'Num_data_page_unfix_ext:',
'Time_data_page_lock_acquire_time:',
'Time_data_page_hold_acquire_time:',
'Time_data_page_fix_acquire_time:',
'Time_data_page_unfix_time:',
'Num_mvcc_snapshot_ext:',
'Time_obj_lock_acquire_time:']
data = []
dict = {}
ref_dict = {}
cnt_lines = 1
ref_dict[TIME_COL] = ''
ignored_empty_cnt = 0
ignored_useless_cnt = 0
ignored_error_cnt = 0
ignored_filter_cnt = 0
row_cnt = 0
GET_LOCAL = 'GET_LOCAL'
local_timezone = ''
print('parse_statdump : ' + filepath)
if (has_multiple_inputs):
prefix = os.path.basename (filepath) + "."
else:
prefix = ''
if ('Linux' in platform.system()):
locale.setlocale(locale.LC_ALL, 'en_US')
else:
locale.setlocale(locale.LC_ALL, 'English')
ll = locale.getlocale(locale.LC_ALL)
with open(filepath, 'r') as file_object:
line = file_object.readline()
cnt_lines = 1
complex_stat_idx = -1
stripped_line = line.strip ()
while line:
if line.isspace ():
#
ignored_empty_cnt = ignored_empty_cnt + 1
elif 'The' in line:
ignored_useless_cnt = ignored_useless_cnt + 1
elif '***' in line:
ignored_useless_cnt = ignored_useless_cnt + 1
elif GET_LOCAL in line:
words = line.split('=')
local_timezone = words[1].strip()
elif line in complex_stats_header_list or stripped_line in complex_stats_header_list:
try:
complex_stat_idx = complex_stats_header_list.index (line)
except:
complex_stat_idx = complex_stats_header_list.index(stripped_line)
if complex_stat_idx == -1:
complex_stat_idx = complex_stats_header_list.index(stripped_line)
elif '=' in line:
words = line.split ('=')
key_base = words[0].strip()
if (complex_stat_idx != -1):
key = prefix + complex_stats_header_list[complex_stat_idx]+ '.' + key_base
key = key.replace (':','.')
else:
key = prefix + key_base
value = words[1].strip ()
accept_stat = 1
if len (stats_filter_list) > 0:
accept_stat = 0
if complex_stat_idx != -1:
curr_complex_stat = complex_stats_header_list[complex_stat_idx]
curr_complex_stat2 = curr_complex_stat.replace (':', '')
if curr_complex_stat in stats_filter_list:
accept_stat = 1
elif curr_complex_stat2 in stats_filter_list:
accept_stat = 1
elif complex_stats_header_list[complex_stat_idx] + '.' + key_base in stats_filter_list:
accept_stat = 1
elif key_base in stats_filter_list:
accept_stat = 1
if accept_stat:
# only numeric values are added in dictionaries
try:
num_val = float (value)
dict[key] = num_val
if not key in ref_dict.keys ():
ref_dict[key] = num_val
except:
ignored_useless_cnt = ignored_useless_cnt + 1
else:
ignored_filter_cnt = ignored_filter_cnt + 1
else:
line = line.strip()
try:
#dt = parser.parse (line)
if local_timezone == '':
raise ('Local timezone not found in statdump file ;' + 'Missing line containing :' + GET_LOCAL)
dt = datetime.strptime (line, STATDUMP_TIME_FORMAT1 + local_timezone + STATDUMP_TIME_FORMAT2)
if (not dt is None):
if dict.__len__() > 0:
data.append (dict)
row_cnt = row_cnt + 1
dict = {}
key = TIME_COL
dict[key] = dt
complex_stat_idx = -1
except:
if ':' in line:
ignored_useless_cnt = ignored_useless_cnt + 1
else:
print (line)
ignored_error_cnt = ignored_error_cnt + 1
line = file_object.readline()
stripped_line = line.strip()
cnt_lines = cnt_lines + 1
if dict.__len__() > 0:
data.append (dict)
row_cnt = row_cnt + 1
print ('Read lines:', cnt_lines)
print ('ignored_empty_cnt:', ignored_empty_cnt)
print ('ignored_useless_cnt:', ignored_useless_cnt)
print ('ignored_error_cnt:', ignored_error_cnt)
print ('ignored_filter_cnt:', ignored_filter_cnt)
print ('Keys:', ref_dict.keys ().__len__ ())
print ('Rows:' + str (len (data)) + ' ; ' + str (row_cnt))
return data, ref_dict, local_timezone
def extend_data(data, ref_dict, add_row_id = 0):
data_adj = []
rowid = 0
prev_row = {}
print ('extend_data : Keys before extending :' + str (data[0].keys ().__len__()))
for row in data:
for k in ref_dict.keys():
if not k in row.keys():
row[k] = 0
k_DELTA = k + '_DELTA'
k_ACCUM = k + '_ACCUM'
if k == TIME_COL:
pass
elif rowid == 0:
row[k_DELTA] = 0
row[k_ACCUM] = 0
else:
row[k_DELTA] = row[k] - prev_row[k]
row[k_ACCUM] = row[k] + prev_row[k]
if (add_row_id):
row[ROWID_COL] = rowid
data_adj.append(row)
if row.keys().__len__() - add_row_id != 1 + 3 * (ref_dict.keys().__len__() - 1):
raise ("row size should match")
rowid = rowid + 1
prev_row = row
print ('Keys after extending:', prev_row.keys().__len__())
print ('Rows:' + str (len (data_adj)) + ' ; ' + str (rowid))
return data_adj
def plot_two_graphs(time_array, k1,k2,array1,array2, text_string):
#fig = plt.figure ()
axes = plt.axes ()
myFmt = matplotlib.dates.DateFormatter('%H:%M:%S')
axes.xaxis.set_major_formatter(myFmt)
my_graph = plt.plot(time_array, array1, label=k1, color='b')
plt.setp(my_graph, linewidth=1)
my_graph = plt.plot(time_array, array2, label=k2, color='r')
plt.setp(my_graph, linewidth=1)
plt.legend(loc='lower left', title=text_string, bbox_to_anchor=(0.0, 1.01), ncol=1, borderaxespad=0, frameon=False,
fontsize='xx-small')
if (platform.system () in 'Windows'):
filename = k1 + k2;
filename = filename[:200] + '.png'
else:
filename = k1 + k2 + '.png'
filename = replace_whitespaces (filename)
if (graph_mode == 1 or graph_mode == 2):
plt.savefig (filename, format = 'png', dpi=PLOT_DPI)
plt.close()
#merges data2 into data1 by key 'merge_key'
#both data1 and data2 are arrays of dictionary
#if values of merge_key from data2 does not exist in data1, data is not merged
#if values of merge_key from data1 does not exist in data2, the first value of data2 is used to populate rows of merged_data
def merge_data (data1, data2, merge_key):
merged_data = []
rowid2=0
rowid1=0
while rowid1 < len (data1):
| |
# !/usr/bin/env python3
"""
author: Apeiria
contact: <EMAIL>
file: GeneticAlgorithm.py
function: Genetic algorithm correlation
"""
# ---------------------------------- import --------------------------------- #
import os
import sys
import time
import random
import copy
import numpy as np
import ray
from Method.GenerationSolve import GenerationSolve
# ---------------------------------- Tool Class ----------------------------- #
class Genetic_Base(object):
"""Real Genetic algorithm, Standard tool class"""
@classmethod
def __param_check(cls, param,
type,
param_name):
"""Parameter validity check
Args:
param (Any): Param
type (Any): Param type
param_name (string): Param Name
"""
assert isinstance(param, type), "[ERROR] {} need {}".format(param_name, type)
return
@classmethod
def init_population(cls,
population_size,
individual_member):
"""Initialize population
Args:
population_size (int): population size
individual_member (list): individual member element
Returns:
[list, list]: [Initialize population, Individual optimal record]
"""
# Param Check
cls.__param_check(population_size, int, "population_size")
cls.__param_check(individual_member, list, "individual_member")
for element in individual_member:
cls.__param_check(element, int, "individual_member element")
# Main Process
population, hist_individual_best = [], []
for _ in range(0, population_size):
individual = copy.deepcopy(individual_member)
random.shuffle(individual)
population.append(individual)
hist_individual_best.append(copy.deepcopy(individual))
return population, hist_individual_best
@classmethod
def update_histinfo(cls,
population,
hist_info,
fitFunction,
fitFunctionInput,
output=False):
"""Update history information
Args:
population (list): Population list
hist_info (list): Historical information list
fitFunction (function): Fitness calculation function
fitFunctionInput (list): Auxiliary input of fitness function
output (bool, optional): Output tag. Defaults to False.
Returns:
[bool, double, list]: Update marker, optimal fitness of current population, updated historical information
"""
# Param Check
cls.__param_check(population, list, "population")
for element in population:
cls.__param_check(element, list, "population element")
cls.__param_check(hist_info, list, "hist_info")
# Main Process
[hist_best_individual, hist_best_fit] = hist_info
update_flag = False
populationFit = []
for individual in population:
Fit = fitFunction(individual, fitFunctionInput)
populationFit.append(Fit)
Fit = np.max(populationFit)
if hist_best_fit == None or hist_best_fit < np.max(populationFit):
update_flag = True
hist_best_fit = np.max(populationFit)
hist_best_individual = copy.deepcopy(population[np.argmax(populationFit)])
hist_info = [hist_best_individual, hist_best_fit]
# Output control
if output == True:
populationFit = sorted(populationFit, reverse=True)
output_str = "Best: "
for i in range(0, 15):
output_str += "{:2.2f}%, ".format(populationFit[i])
print(output_str[0:-2])
output_str = "Low: "
for i in range(0, 15):
output_str += "{:2.2f}%, ".format(populationFit[-i - 1])
print(output_str[0:-2])
return update_flag, Fit, hist_info
@classmethod
def Cross(cls,
individual1,
individual2,
cross_mode,
fitFunction,
fitFunctionInput):
"""Genetic algorithm, single point crossover process
Args:
individual1 (list): Parent individual 1
individual2 (list): Parent individual 2
cross_mode (string): Crossover mode, optional "no_bias", "bias"
fitFunction (function): Fitness calculation function
fitFunctionInput (list): Auxiliary input of fitness function
Raises:
Exception: Overlapping problem
Returns:
list: New individuals after crossover
"""
# Param Check
cls.__param_check(individual1, list, "individual1")
cls.__param_check(individual2, list, "individual2")
assert len(individual1) > 0, "[ERROR] individual1 is null."
assert len(individual2) > 0, "[ERROR] individual2 is null."
for i in range(0, len(individual1)):
cls.__param_check(individual1[i], int, "individual1 element")
for i in range(0, len(individual2)):
cls.__param_check(individual2[i], int, "individual2 element")
# Main Process
if cross_mode == "no_bias":
# Unbiased crossover mode
new_individual = []
if len(individual1) == len(individual2):
temp = len(individual1) - 1
else:
temp = min(len(individual1) - 1, len(individual2) - 1)
cross_position = random.randint(1, temp)
new_individual.extend(individual1[0:cross_position])
new_individual.extend(individual2[cross_position:])
# 1. Filter duplicate members
repeat_member_index = {}
for i in range(0, len(new_individual)):
if new_individual[i] not in repeat_member_index.keys():
repeat_member_index[new_individual[i]] = [i]
else:
repeat_member_index[new_individual[i]].append(i)
# 2. Find missing members
replace_index = []
for i in individual1:
if i not in new_individual:
replace_index.append(i)
# 3. Replace conflicting duplicate elements
for key in repeat_member_index.keys():
if len(repeat_member_index[key]) == 2:
choice_index = random.choice(repeat_member_index[key])
choice_member = random.choice(replace_index)
new_individual[choice_index] = choice_member
repeat_member_index[key].remove(choice_index)
replace_index.remove(choice_member)
elif len(repeat_member_index[key]) > 2:
raise Exception("[ERROR] In 2 individuals, 1 index cannot appear more than 3 times.")
elif cross_mode == "bias":
# Bias crossover mode
new_individual = []
if len(individual1) == len(individual2):
temp = len(individual1) - 1
else:
temp = min(len(individual1) - 1, len(individual2) - 1)
cross_position = random.randint(1, temp)
Fit1, _ = fitFunction(individual1, fitFunctionInput)
Fit2, _ = fitFunction(individual2, fitFunctionInput)
better_individual = 1 if Fit1 > Fit2 else 2
new_individual.extend(individual1[0:cross_position])
new_individual.extend(individual2[cross_position:])
# 1. Filter duplicate members
repeat_member_index = {}
for i in range(0, len(new_individual)):
if new_individual[i] not in repeat_member_index.keys():
repeat_member_index[new_individual[i]] = [i]
else:
repeat_member_index[new_individual[i]].append(i)
# 2. Find missing members
replace_index = []
for i in individual1:
if i not in new_individual:
replace_index.append(i)
# 3. Replace conflicting duplicate elements
for key in repeat_member_index.keys():
if len(repeat_member_index[key]) == 2:
# Bias
tt = 1 if repeat_member_index[key][0] in new_individual[0:cross_position] else 2
choice_index = repeat_member_index[key][0] if tt != better_individual else repeat_member_index[key][1]
choice_member = random.choice(replace_index)
new_individual[choice_index] = choice_member
repeat_member_index[key].remove(choice_index)
replace_index.remove(choice_member)
elif len(repeat_member_index[key]) > 2:
raise Exception("[ERROR] In 2 individuals, 1 index cannot appear more than 3 times.")
else:
raise Exception("[ERROR] Unknown Param: cross_mode({})".format(cross_mode))
return new_individual
@classmethod
def Mutation(cls,
individual):
"""Genetic algorithm, mutation process
Args:
individual (list): individual individual
Returns:
list: new individual
"""
# Param Check
cls.__param_check(individual, list, "individual")
assert len(individual) > 0, "[ERROR] individual is null."
for i in range(0, len(individual)):
cls.__param_check(individual[i], int, "individual element")
# Main Process
if len(individual) > 10:
p = random.random()
if p > 0.5:
# 50% Swap an individual element
new_individual = GenerationSolve.get_NewSeq_RandomSwitchOne(individual)
elif p > 0.3:
# 20% Partial exchange
new_individual = GenerationSolve.get_NewSeq_RandomSwitchPart(individual)
elif p > 0.1:
# 20% Make a jump switch
new_individual = GenerationSolve.get_NewSeq_PartJump(individual)
else:
# 10% Partial inversion
new_individual = GenerationSolve.get_NewSeq_PartReverse(individual)
else:
new_individual = GenerationSolve.get_NewSeq_RandomSwitchOne(individual)
return new_individual
@classmethod
def Select(cls,
population,
population_size,
select_mode,
fitFunction,
fitFunctionInput,
candidate_parameters,
epoch,
output=True):
"""Genetic algorithm, selection process
Args:
population (list): Population list
population_size (int): Population size
select_mode (string): Select mode, optional "greedy","championships","roulette"
fitFunction (function): Fitness calculation function
fitFunctionInput (list): Auxiliary input of fitness function
candidate_parameters (list): Candidate parameter list
epoch (int): Number of iterations, which provides the current number of iterations for select_mode
output (bool, optional): output tag. Defaults to False.
Raises:
Exception: Mismatch data type
Returns:
list: new Population
"""
# Param Check
cls.__param_check(population, list, "population")
for element in population:
cls.__param_check(element, list, "population element")
cls.__param_check(population_size, int, "population_size")
cls.__param_check(candidate_parameters, list, "candidate_parameters")
# Main Process
# 1. Calculation of fitness in population
populationFit = []
for individual in population:
Fit = fitFunction(individual, fitFunctionInput)
populationFit.append(Fit)
temp = []
for i in range(0, len(population)):
temp.append((population[i], populationFit[i]))
# 2. Population screening (maximum screening)
new_population = []
if select_mode == "greedy":
# A. Greedy strategy, retain the optimal individual
temp = sorted(temp, key=lambda x: x[1], reverse=True)
temp = temp[0:population_size]
for element in temp:
new_population.append(element[0])
elif select_mode == "championships":
# B. Championships strategy (no put back)
[error_prob] = candidate_parameters
cls.__param_check(error_prob, float, "error_prob")
for _ in range(0, population_size):
sample = random.sample(temp, 2)
sample = sorted(sample, key=lambda x: x[1], reverse=True)
if random.random() <= error_prob:
individual = sample[1][0]
temp.remove(sample[1])
else:
individual = sample[0][0]
temp.remove(sample[0])
new_population.append(individual)
elif select_mode == "championships_back":
# C. Championships strategy (put back)
[error_prob] = candidate_parameters
cls.__param_check(error_prob, float, "error_prob")
for _ in range(0, population_size):
sample = random.sample(temp, 2)
sample = sorted(sample, key=lambda x: x[1], reverse=True)
if random.random() <= error_prob:
new_population.append(sample[1][0])
else:
new_population.append(sample[0][0])
elif select_mode == "championships_adaption":
# D. Championships strategy (self-adaption, no put back)
[error_prob_init, sigma] = candidate_parameters
cls.__param_check(error_prob_init, float, "error_prob_init")
cls.__param_check(sigma, float, "sigma")
error_prob = error_prob_init * (sigma ** (epoch + 1))
for _ in range(0, population_size):
sample = random.sample(temp, 2)
sample = sorted(sample, key=lambda x: x[1], reverse=True)
if random.random() <= error_prob:
individual = sample[1][0]
temp.remove(sample[1])
else:
individual = sample[0][0]
temp.remove(sample[0])
new_population.append(individual)
elif select_mode == "roulette":
# E. Roulette strategy (no put back)
[exponent_coefficient] = candidate_parameters
cls.__param_check(exponent_coefficient, float, "exponent_coefficient")
for i in range(0, len(populationFit)):
populationFit[i] = exponent_coefficient ** populationFit[i]
for _ in range(0, population_size):
assert len(population) == len(populationFit)
individual = random.choices(population, weights=populationFit, k=1)[0]
t = population.index(individual)
population.remove(individual)
del populationFit[t]
new_population.append(individual)
elif select_mode == "roulette_back":
# F. Roulette strategy (put back)
[exponent_coefficient] = candidate_parameters
cls.__param_check(exponent_coefficient, float, "exponent_coefficient")
for i in range(0, len(populationFit)):
populationFit[i] = exponent_coefficient ** populationFit[i]
for _ in range(0, population_size):
individual = random.choices(population, weights=populationFit, k=1)[0]
new_population.append(individual)
else:
raise Exception("[ERROR] Unknown Param: select_mode({})".format(select_mode))
return copy.deepcopy(new_population)
@ray.remote
def population_fit_feature_cal(population,
fitFunction,
fitFunctionInput,
ite):
"""Parallel population fitness calculation function
Args:
population (list): Single population
fitFunction (function): Fitness calculation function
fitFunctionInput (list): Auxiliary input of fitness calculation function
ite (int): Current population number
Returns:
[int, list]: Current population number, population characteristics (mean, variance, maximum)
"""
# Calculate population fitness
populationFit = []
for individual in population:
Fit = fitFunction(individual, | |
# -*- coding: utf-8 -*-
from subprocess import Popen
import os
from datetime import date
import shlex
import shutil
import re
import codecs
import traceback
import sys
import urlparse
import ftplib
import json
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import webbrowser
import httplib2
from apiclient.discovery import build
from apiclient.http import MediaFileUpload
from apiclient.http import BatchHttpRequest
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
CREDENTIALS_FILE = 'credentials'
OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive'
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
class BaseEditor(object):
def __init__(self, filePath):
self.fileHandle = codecs.open(filePath, 'r+', 'utf-8')
self.fileData = self.fileHandle.read()
def commit(self):
self.fileHandle.seek(0)
self.fileHandle.truncate()
self.fileHandle.write(self.fileData)
self.fileHandle.close()
def discard(self):
self.fileHandle.close()
class PlistEditor(BaseEditor):
def __init__(self, filePath):
super(PlistEditor, self).__init__(filePath)
def replaceSimpleValue(self, key, value, valueType = 'string'):
pattern = r'<key>%s</key>\s*<%s>[^<>]+</%s>' % (key, valueType, valueType)
substitute = '<key>%s</key>\n\t<%s>%s</%s>' % (key, valueType, value, valueType)
self.fileData = re.sub(pattern, substitute, self.fileData, flags = re.IGNORECASE)
def readSimpleValue(self, key, valueType = 'string'):
pattern = r'<key>%s</key>\s*<%s>([^<>]+)</%s>' % (key, valueType, valueType)
match = re.search(pattern, self.fileData, flags = re.IGNORECASE)
if match is not None:
return match.group(1)
else:
return None
class BaseBuilderModel(object):
def __init__(self, buildInfo):
self.buildInfo = buildInfo
self.buildNumber = None
self.buildName = None
def __getitem__(self, key):
return self.buildInfo[key]
def __contains__(self, key):
return key in self.buildInfo
def __unicode__(self):
return u'%s(%s)' % (self.buildName, self.buildNumber)
def getBuildName(self, buildProfile, appBuild = None):
pass
def incrementBuildNumber(self, appBuild):
pass
def nextBuildPathInfo(self, currentAppBuild, buildProfile):
self.buildNumber = self.incrementBuildNumber(currentAppBuild)
self.buildName = self.getBuildName(buildProfile, self.buildNumber)
class IpaBuilderModel(BaseBuilderModel):
def __init__(self, buildInfo):
super(IpaBuilderModel, self).__init__(buildInfo)
self.outputFolder = os.path.join(self['EXPORT_PATH_PREFIX'], self['BUILD_FOLDER'])
self.archivePath = None
self.exportPath = None
def getBuildName(self, buildProfile, appBuild = None):
def generateCurrentDateString():
currentDate = date.today()
return '%04d%02d%02d' % (currentDate.year, currentDate.month, currentDate.day)
appName = self['FRIENDLY_APP_NAME'].replace(' ', '')
appVersion = self['APP_VERSION']
suffix = buildProfile['ipaNameSuffix']
ipaNameComponents = [appName, generateCurrentDateString(), appVersion]
isValidComponent = lambda c: bool(isinstance(c, basestring) and c)
if isValidComponent(appBuild):
ipaNameComponents.append(appBuild)
if isValidComponent(suffix):
ipaNameComponents.append(suffix)
return '_'.join(ipaNameComponents)
def incrementBuildNumber(self, appBuild):
if not appBuild:
return
return str(int(appBuild) + 1)
def nextBuildPathInfo(self, currentAppBuild, buildProfile):
super(IpaBuilderModel, self).nextBuildPathInfo(currentAppBuild if self['INCREMENT_BUILD_NUMBER'] else None, buildProfile)
self.archivePath = os.path.join(self.outputFolder, 'archives', self.buildName + '.xcarchive')
self.exportPath = os.path.join(self.outputFolder, self.buildName + '.ipa')
def generateUniqueFileName(fileName):
nameTuple = os.path.splitext(fileName)
number = 1
while os.path.exists(fileName):
number += 1
fileName = '%s_%d%s' % (nameTuple[0], number, nameTuple[1])
return fileName
def issueCommand(command):
print 'issue', command
if isinstance(command, unicode):
command = command.encode('utf-8')
arguments = shlex.split(command)
logFile = generateUniqueFileName('issueCommandLog')
pOut = open(logFile, 'w+')
p = Popen(arguments, stdout = pOut)
p.wait()
pOut.close()
if p.returncode != os.EX_OK:
print 'returns %s. Refer to %s for details.' % (str(p.returncode), logFile)
else:
os.remove(logFile)
return os.EX_OK == p.returncode
optionGenerator = lambda name, value: '%s "%s"' % (name, value) if name and value else name or '"%s"' % value
class BaseBuilder(object):
def __init__(self, model, verbose):
self.model = model
self.verbose = verbose
def prepareRun(self):
pass
def run(self):
if not self.prepareRun():
return
results = []
for profile in self.getProfiles():
self.prepareRunProfile(profile)
result = self.runProfile(profile)
if not result:
return
results.append(result)
self.runDone()
return results
def runDone(self):
pass
def prepareRunProfile(self, profile):
self.model.nextBuildPathInfo(self.getCurrentAppBuild(), profile)
if self.verbose:
print 'Build %s' % unicode(self.model)
def runProfile(self, profile):
pass
def getProfiles(self):
pass
def getCurrentAppBuild(self):
pass
class IpaBuilder(BaseBuilder):
def __init__(self, model, verbose):
super(IpaBuilder, self).__init__(model, verbose)
self.plistEditor = None
def prepareRun(self):
if not issueCommand('svn update'):
return False
# version should be fixed
self.plistEditor = PlistEditor(self.model['INFO_PLIST_PATH'])
self.plistEditor.replaceSimpleValue('CFBundleShortVersionString', self.model['APP_VERSION'])
self.plistEditor.commit()
return True
def runDone(self):
# commit the info plist
logMessage = self.model['COMMIT_LOG_TEMPLATE'].format(**self.model.buildInfo)
commitOptions = []
commitOptions.append(optionGenerator('-m', logMessage))
if 'SVN_USER' in self.model and 'SVN_PASSWORD' in self.model:
commitOptions.append(optionGenerator('--username', self.model['SVN_USER']))
commitOptions.append(optionGenerator('--password', self.model['SVN_PASSWORD']))
commitCommand = 'svn commit %s "%s"' % (' '.join(commitOptions), self.model['INFO_PLIST_PATH'])
issueCommand(commitCommand)
def prepareRunProfile(self, profile):
self.plistEditor = PlistEditor(self.model['INFO_PLIST_PATH'])
super(IpaBuilder, self).prepareRunProfile(profile)
def runProfile(self, profile):
self.updatePlist(self.plistEditor, profile)
if not self.issueClean():
return
if not self.issueArchive(profile):
return
return self.issueExport(profile)
def getProfiles(self):
return self.model['BUILD_PROFILES']
def getCurrentAppBuild(self):
return self.plistEditor.readSimpleValue('CFBundleVersion')
def updatePlist(self, plistEditor, profile):
plistEditor.replaceSimpleValue('CFBundleIdentifier', profile['bundleIdentifier'])
if self.model.buildNumber:
plistEditor.replaceSimpleValue('CFBundleVersion', self.model.buildNumber)
self.plistEditor.commit()
def issueClean(self):
cleanCommand = 'xcodebuild clean'
return issueCommand(cleanCommand)
def issueArchive(self, profile):
scheme = profile['scheme']
archiveCommand = 'xcodebuild -scheme "%s" archive -archivePath "%s"' % (scheme, self.model.archivePath)
if os.path.exists(self.model.archivePath):
shutil.rmtree(self.model.archivePath)
return issueCommand(archiveCommand)
def issueExport(self, profile):
exportOptions = []
exportOptions.append(optionGenerator('-exportArchive', ''))
optionsPlist = profile.get('exportOptionsPlist')
if optionsPlist:
optionsPlist = os.path.join(self.model['THIS_FILE_FOLDER'], optionsPlist)
exportOptions.append(optionGenerator('-exportOptionsPlist', optionsPlist))
else:
exportOptions.append(optionGenerator('-exportFormat', 'ipa'))
exportProvisioningProfile = profile['provisioningProfile']
if exportProvisioningProfile:
exportOptions.append(optionGenerator('-exportProvisioningProfile', exportProvisioningProfile))
else:
exportOptions.append(optionGenerator('-exportSigningIdentity', profile['signingIdentity']))
exportOptions.append(optionGenerator('-archivePath', self.model.archivePath))
exportOptions.append(optionGenerator('-exportPath', self.model.exportPath))
exportCommand = 'xcodebuild %s' % ' '.join(exportOptions)
if os.path.exists(self.model.exportPath):
if os.path.isdir(self.model.exportPath):
shutil.rmtree(self.model.exportPath)
else:
os.remove(self.model.exportPath)
if not issueCommand(exportCommand):
return
if os.path.isdir(self.model.exportPath):
self.moveProduct()
return self.model.exportPath
def moveProduct(self):
for fileName in os.listdir(self.model.exportPath):
if os.path.splitext(fileName)[1].lower() != '.ipa' or not os.path.isfile(os.path.join(self.model.exportPath, fileName)):
continue
parentDirectory = os.path.split(self.model.exportPath)[0]
shutil.copy2(os.path.join(self.model.exportPath, fileName), parentDirectory)
shutil.rmtree(self.model.exportPath)
os.rename(os.path.join(parentDirectory, fileName), self.model.exportPath)
return
def printProgress(progress, ongoing):
message = '%3d%%' % min(progress, 100)
if ongoing:
sys.stdout.write(message)
sys.stdout.write('\b' * len(message))
sys.stdout.flush()
else:
print message
def splitPathIntoComponents(path):
components = []
if not isinstance(path, basestring):
return components
while True:
pathTuple = os.path.split(path)
components.insert(0, pathTuple[1])
path = pathTuple[0]
if not path:
break
return components
class GoogleDriveManager(object):
def __init__(self):
self.service = None
self.http = httplib2.Http()
def authorize(self, credentials):
self.http = credentials.authorize(self.http)
self.service = build('drive', 'v2', http=self.http)
def makeDirectory(self, directory):
components = splitPathIntoComponents(directory)
folderID = None
folderCreated = False
for component in components:
if not folderCreated:
if folderID:
queriedFolder = self.service.children().list(folderId = folderID, q = 'mimeType=\'application/vnd.google-apps.folder\' and title=\'%s\'' % component).execute()
else:
queriedFolder = self.service.files().list(q = 'mimeType=\'application/vnd.google-apps.folder\' and title=\'%s\'' % component).execute()
if folderCreated or len(queriedFolder['items']) < 1:
body = {
'title': component,
'mimeType': 'application/vnd.google-apps.folder'
}
if folderID:
body['parents'] = [{
'id': folderID
}]
folderID = self.service.files().insert(body = body).execute()['id']
folderCreated = True
else:
folderID = queriedFolder['items'][0]['id']
return folderID
def insertFile(self, filePath, folderID, progressCallback = None):
media_body = MediaFileUpload(filePath, mimetype='application/octet-stream', resumable=True)
body = {
'title': os.path.split(filePath)[1],
'mimeType': 'application/octet-stream',
'parents': [{
'kind': 'drive#fileLink',
'id': folderID
}]
}
uploadRequest = self.service.files().insert(body = body, media_body = media_body)
uploadedFile = None
if callable(progressCallback):
while uploadedFile is None:
uploadStatus, uploadedFile = uploadRequest.next_chunk()
if uploadStatus:
progressCallback(uploadStatus.progress())
elif uploadedFile:
progressCallback(1)
else:
uploadedFile = uploadRequest.execute()
return uploadedFile['id']
def insertPermission(self, fileIDs, permission):
makeRequest = lambda i: self.service.permissions().insert(fileId = fileIDs[i], body = permission)
return GoogleDriveManager.executeMultipleRequests(fileIDs, makeRequest)
def getFileInfo(self, fileIDs):
makeRequest = lambda i: self.service.files().get(fileId = fileIDs[i])
return GoogleDriveManager.executeMultipleRequests(fileIDs, makeRequest)
@staticmethod
def executeMultipleRequests(responseOrder, makeRequest):
responses = [None for i in xrange(len(responseOrder))]
def batchCallback(request_id, response, exception):
if exception:
return
responses[responseOrder.index(request_id)] = response
batch = BatchHttpRequest()
for i in xrange(len(responseOrder)):
batch.add(makeRequest(i), request_id = responseOrder[i], callback = batchCallback)
batch.execute()
return responses
def uploadToGoogleDrive(filePaths, transferInfo):
if not filePaths:
return []
credentialsStorage = Storage(CREDENTIALS_FILE)
credentials = credentialsStorage.get()
if not credentials or not credentials.refresh_token:
flow = OAuth2WebServerFlow(transferInfo['CLIENT_ID'], transferInfo['CLIENT_SECRET'], OAUTH_SCOPE, REDIRECT_URI)
authorize_url = flow.step1_get_authorize_url()
webbrowser.open_new(authorize_url)
print 'Could not find valid credentials. Re-request access rights.'
code = raw_input('Enter verification code: ').strip()
credentials = flow.step2_exchange(code)
credentialsStorage.put(credentials)
driveManager = GoogleDriveManager()
driveManager.authorize(credentials)
fileIDs = []
targetFolderID = driveManager.makeDirectory(transferInfo['GOOGLE_DRIVE_PATH'])
for filePath in filePaths:
print 'uploading %s......' % filePath,
uploadedFileID = driveManager.insertFile(filePath, targetFolderID, lambda progress: printProgress(progress * 100, True))
printProgress(100, False)
fileIDs.append(uploadedFileID)
new_permission = {
'type': 'anyone',
'role': 'reader',
'withLink': True
}
driveManager.insertPermission(fileIDs, new_permission)
# get the link
uploadedFileInfo = driveManager.getFileInfo(fileIDs)
return map(lambda fileInfo: fileInfo['webContentLink'], uploadedFileInfo)
class FTPUploadProgressHandler(object):
def __init__(self, expectedSize):
self.__totalUploadedSize = 0
self.expectedSize = expectedSize
def update(self, uploadedSize):
self.__totalUploadedSize += uploadedSize
printProgress(int(self.__totalUploadedSize / float(self.expectedSize) * 100), self.__totalUploadedSize < self.expectedSize)
def FTPMakeWholeDirectory(FTPClient, directory):
components = splitPathIntoComponents(directory)
for component in components:
try:
FTPClient.cwd(component)
except ftplib.error_perm:
FTPClient.mkd(component)
FTPClient.cwd(component)
def uploadToFTPServer(filePaths, transferInfo):
if not filePaths:
return []
loginInfo = urlparse.urlparse(transferInfo['FTP_SERVER_URL'])
FTPClient = None
fileHandles = []
filesUploaded = []
try:
FTPClient = ftplib.FTP(loginInfo.hostname, loginInfo.username, loginInfo.password)
buildDir = transferInfo['FTP_SERVER_BUILD_DIRECTORY']
# create directory if it does not exist
try:
FTPClient.cwd(buildDir)
except ftplib.error_perm:
print '%s may not exist. Create one' % buildDir
FTPMakeWholeDirectory(FTPClient, buildDir)
buildDir = FTPClient.pwd()
for filePath in filePaths:
print 'uploading %s......' % filePath,
fileHandles.append(open(filePath, 'rb'))
fileName = os.path.split(filePath)[1]
FTPCommand = 'STOR %s' % (fileName.encode('utf-8') if isinstance(fileName, unicode) else fileName,)
blockSize = 8192
progressHandler = FTPUploadProgressHandler(os.path.getsize(filePath))
FTPClient.storbinary(FTPCommand, fileHandles[-1], blockSize, lambda block: progressHandler.update(blockSize))
FTPLink = '%s://%s%s' % (loginInfo.scheme, loginInfo.hostname, os.path.join(buildDir, os.path.split(filePath)[1]))
filesUploaded.append(FTPLink)
except:
excInfo = sys.exc_info()
traceback.print_exception(excInfo[0], excInfo[1], excInfo[2], limit = 2, file = sys.stdout)
# cleanup
if fileHandles:
for fileHandle in fileHandles:
fileHandle.close()
| |
`tf.data` dataset.
- A generator or `keras.utils.Sequence` instance.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely).
If `x` is a dataset, generator or
`keras.utils.Sequence` instance, `y` should not be specified (since
targets will be obtained from the iterator/dataset).
batch_size: Integer or `None`.
Number of samples per batch of computation.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of symbolic tensors, dataset,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
verbose: 0 or 1. Verbosity mode.
0 = silent, 1 = progress bar.
sample_weight: Optional Numpy array of weights for
the test samples, used for weighting the loss function.
You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is not
supported when `x` is a dataset, instead pass
sample weights as the third element of `x`.
steps: Integer or `None`.
Total number of steps (batches of samples)
before declaring the evaluation round finished.
Ignored with the default value of `None`.
If x is a `tf.data` dataset and `steps` is
None, 'evaluate' will run until the dataset is exhausted.
This argument is not supported with array inputs.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during evaluation.
See [callbacks](/api_docs/python/tf/keras/callbacks).
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using
process-based threading. If unspecified, `workers` will default
to 1. If 0, will execute the generator on the main thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
"""
_keras_api_gauge.get_cell('evaluate_v1').set(True)
self._assert_compile_was_called()
self._check_call_args('evaluate')
func = self._select_training_loop(x)
return func.evaluate(
self,
x=x,
y=y,
batch_size=batch_size,
verbose=verbose,
sample_weight=sample_weight,
steps=steps,
callbacks=callbacks,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
def predict(self,
x,
batch_size=None,
verbose=0,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Generates output predictions for the input samples.
Computation is done in batches (see the `batch_size` arg.)
Arguments:
x: Input samples. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data` dataset.
- A generator or `keras.utils.Sequence` instance.
batch_size: Integer or `None`.
Number of samples per batch of computation.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of symbolic tensors, dataset,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
verbose: Verbosity mode, 0 or 1.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`. If x is a `tf.data`
dataset and `steps` is None, `predict` will
run until the input dataset is exhausted.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during prediction.
See [callbacks](/api_docs/python/tf/keras/callbacks).
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using
process-based threading. If unspecified, `workers` will default
to 1. If 0, will execute the generator on the main thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
_keras_api_gauge.get_cell('predict_v1').set(True)
self._check_call_args('predict')
func = self._select_training_loop(x)
return func.predict(
self,
x=x,
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
def reset_metrics(self):
"""Resets the state of metrics."""
metrics = self._get_training_eval_metrics()
for m in metrics:
m.reset_states()
# Reset metrics on all the distributed (cloned) models.
if self._distribution_strategy:
distributed_training_utils._reset_metrics(self) # pylint: disable=protected-access
def train_on_batch(self,
x,
y=None,
sample_weight=None,
class_weight=None,
reset_metrics=True):
"""Runs a single gradient update on a single batch of data.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset.
y: Target data. Like the input data `x`, it could be either Numpy
array(s) or TensorFlow tensor(s). It should be consistent with `x`
(you cannot have Numpy inputs and tensor targets, or inversely). If
`x` is a dataset, `y` should not be specified
(since targets will be obtained from the iterator).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample. In the case of
temporal data, you can pass a 2D array with shape (samples,
sequence_length), to apply a different weight to every timestep of
every sample. In this case you should make sure to specify
sample_weight_mode="temporal" in compile(). This argument is not
supported when `x` is a dataset.
class_weight: Optional dictionary mapping class indices (integers) to a
weight (float) to apply to the model's loss for the samples from this
class during training. This can be useful to tell the model to "pay
more attention" to samples from an under-represented class.
reset_metrics: If `True`, the metrics returned will be only for this
batch. If `False`, the metrics will be statefully accumulated across
batches.
Returns:
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
self._assert_compile_was_called()
self._check_call_args('train_on_batch')
# If at this point we are in the replica context, then it is okay to execute
# the Eager code path. The expected way to get here is to call `fit` that
# calls `train_on_batch` on each replica.
if (self._distribution_strategy and
distribution_strategy_context.in_cross_replica_context()):
raise NotImplementedError('`train_on_batch` is not supported for models '
'distributed with tf.distribute.Strategy.')
# Validate and standardize user data.
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight, class_weight=class_weight,
extract_tensors_from_dataset=True)
# If `self._distribution_strategy` is True, then we are in a replica context
# at this point because of the check above. `train_on_batch` is being run
# for each replica by `self._distribution_strategy` and the same code path
# as Eager is expected to be taken.
if self.run_eagerly or self._distribution_strategy:
output_dict = training_eager.train_on_batch(
self,
x,
y,
sample_weights=sample_weights,
output_loss_metrics=self._output_loss_metrics)
outputs = (output_dict['total_loss'] + output_dict['output_losses']
+ output_dict['metrics'])
outputs = [_non_none_constant_value(v) for v in outputs] # | |
Pressure in bar
eq_fug : numpy array
Equilibrium fugacity of each non-water component that will
be in equilibrium within some other phase
"""
error = 1e6
TOL = 1e-8
if hasattr(self, 'C_small'):
C_small = self.C_small
C_large = self.C_large
else:
C_small = np.zeros(self.num_comps)
C_large = np.zeros(self.num_comps)
if hasattr(self.kappa_tmp, 'copy'):
kappa = self.kappa_tmp.copy()
else:
kappa = self.kappa_tmp
lattice_sz = self.a_0
while error > TOL:
out = self.iterate_function(comps, T, P, eq_fug,
lattice_sz, kappa)
# Update these on every iteration
C_small_new = out[0]
C_large_new = out[1]
self.Y_small = out[2]
self.Y_large = out[3]
kappa = self.kappa_func(self.Y_large)
error = 0.0
for ii, comp in enumerate(comps):
if comp.compname != 'h2o':
error += (abs(C_small_new[ii]
- C_small[ii])/C_small_new[ii]
+ abs(C_large_new[ii]
- C_large[ii])/C_large_new[ii])
C_small = C_small_new
C_large = C_large_new
self.C_small = C_small
self.C_large = C_large
if hasattr(self.kappa_tmp, 'copy'):
kappa = self.kappa_tmp.copy()
else:
kappa = self.kappa_tmp
self.v_H = self.hydrate_size(T, P, self.v_H_0, kappa)
def kappa_func(self, Y_large):
"""Compressibility of hydrate function
Parameters
----------
Y_large : numpy array
Fractional occupancy of large hydrate cages
"""
if self.num_comps > 2:
kappa = 3.0*np.sum(self.kappa_vec*Y_large)
else:
kappa = self.kappa0
return kappa
def hydrate_size(self, T, P, v, kappa, dim='volumetric'):
"""Hydrate properties at T, P, and with specific composition
Parameters
----------
T : float
Temperature in Kelvin
P : float
Pressure in bar
v : float
Size of hydrate (either volumetric or linear)
kappa : float
Compressibility of hydrate
dim : str
Dimension of interest ('volumetric' or 'linear'
Returns
----------
H_size : float
Size of hydrate in dimension specified in argument list
Notes
----------
In various places, either the linear or volumetric sizes are necessary
components in a particular calculation. A small, constant difference is
required to switch between the two.
"""
if dim == 'volumetric':
factor = 1.0
elif dim == 'linear':
factor = 1.0/3.0
else:
raise ValueError("Invalid option for optional argument 'dim'!")
H_size = (v*np.exp(factor*(self.Hs.alf[1]*(T - T_0)
+ self.Hs.alf[2]*(T - T_0)**2
+ self.Hs.alf[3]*(T - T_0)**3
- kappa*(P - P_0))))
return H_size
def h_vol_Pint(self, T, P, v, kappa):
"""Hydrate volume integrated with respect to pressure
Parameters
----------
T : float
Temperature in Kelvin
P : float
Pressure in bar
v : float
Size of hydrate (either volumetric or linear)
kappa : float
Compressibility of hydrate
Returns
----------
v : float
Hydrate volume integrated wrt pressure in cm^3 - bar /mol
"""
v = self.hydrate_size(T, P, v, kappa) / (-kappa)
return v
def lattice_to_volume(self, lattice_sz):
"""Conversion of linear hydrate size to volumetric size
Parameters
----------
lattice_sz : float
Size of hydrate as a radius in angstrom
Returns
----------
v : float
Volume of hydrate in cm^3/mol
"""
if self.Hs.hydstruc != 'sH':
v = 6.0221413e23/self.Hs.Num_h2o/1e24*lattice_sz**3
else:
v = self.Hs.v0
return v
def cage_distortion(self):
"""Distortion of cages when filled with guests
Notes
----------
This depends on composition, but not on temperature or pressure.
"""
small_const = (
(1 + self.Hs.etam['small']/self.Hs.Num_h2o)*self.Y_small_0
/ (1 + (self.Hs.etam['small']/self.Hs.Num_h2o)*self.Y_small_0)
)
# Ballard did not differentiate between multiple or single guests
# as we do here. In his version, the weighted exponential is always
# used. However, we saw better agreement by separating single and
# multiple guests.
if self.num_comps > 1:
# Results of flash calculations for hydrocarbon systems do not produce results that are consistent
# with CSMGem. On August 31, 2017. I commented out the following lines to see if they are consisent.
# if self.Hs.hydstruc == 's1':
# self.repulsive_small = (small_const * np.exp(
# self.D_vec - np.sum(self.Y_small_0 * self.D_vec) / np.sum(self.Y_small_0)
# ))
# else:
# self.repulsive_small = small_const
# self.repulsive_small = (small_const * np.exp(
# self.D_vec - np.sum(self.Y_small_0 * self.D_vec)
# ))
self.repulsive_small = (small_const * np.exp(
-(self.D_vec - np.sum(self.Y_small_0 * self.D_vec))
))
self.repulsive_small[self.water_ind] = 0.0
else:
self.repulsive_small = small_const
self.repulsive_large = (
(1 + self.Hs.etam['large']/self.Hs.Num_h2o) * self.Y_large_0
/ (1 + (self.Hs.etam['large']/self.Hs.Num_h2o) * self.Y_large_0)
)
self.repulsive_large[self.water_ind] = 0.0
# Determine size of lattice due to the filling of cages at T_0, P_0
def filled_lattice_size(self):
"""Size of hydrate lattice when filled
Returns
----------
lattice sz : float
Size of lattice when filled by guests
"""
self.cage_distortion()
lattice_sz = (self.Hs.a0_ast
+ (self.Hs.Nm['small']
* np.sum(self.repulsive_small * self.rep_sm_vec))
+ (self.Hs.Nm['large']
* np.sum(self.repulsive_large * self.rep_lg_vec)))
return lattice_sz
def iterate_function(self, comps, T, P, fug, lattice_sz, kappa):
"""Function that must be iterated until convergence due to nonlinearity
Parameters
----------
comps : list
List of components as 'Component' objects created with
'component_properties.py'
T : float
Temperature in Kelvin
P : float
Pressure in bar
fug : numpy array
Fugacity of each non-water component that will
be in equilibrium within some other phase
lattice_sz : float
Size of filled hydrate lattice
kappa : float
Compressibility of filled hydrate
Returns
----------
calc_list : list of numpy arrays
List of output variables such that each element is a
numpy array. First element is a numpy array of small cage
langmuir constants, second element is a numpy array of large
cage langmuir constants, third elements is a numpy array
of small cage fractional occupancies, and fourth element is
a numpy array of large cage fractional occupancies.
"""
C_small, C_large = self.langmuir_consts(comps, T, P,
lattice_sz, kappa)
Y_small = self.calc_langmuir(C_small, fug)
Y_large = self.calc_langmuir(C_large, fug)
calc_list = [C_small, C_large, Y_small, Y_large]
return calc_list
def integrand(self, r, Rn, z, eps_k, sigma, aj, T):
"""Kihara spherical potential function
Parameters
----------
r : float
General radius
Rn : list, numpy array
Radius of each cage in hydrate structure
z : int
Number of water molecules
eps_k : float
Kihara potential parameter, \epsilon (normalized by
Boltzmann's constant), of guest
sigma : float
Kihara potential parameter, sigma, og guest
aj : float
Radius of guest molecule
T : float
Temperature in Kelvin
Returns
----------
integrand_w : numpy array
Integrand as a function of radius
"""
integrand_sum = 0
for ii in range(len(Rn)):
integrand_sum += w_func(z[ii], eps_k, r, Rn[ii], sigma, aj)
integrand_w = r**2*np.exp((-1.0/T)*integrand_sum)
return integrand_w
def compute_integral_constants(self, T, P, lattice_sz, kappa):
"""Function to compute integral for langmuir constant calculation
Parameters
----------
T : float
Temperature in Kelvin
P : float
Pressure in bar
lattice_sz : float
Size of filled hydrate lattice
kappa : float
Compressibility of filled hydrate
"""
Pfactor = self.hydrate_size(T_0, P, 1.0, kappa, dim='linear')
a_factor = (lattice_sz/self.Hs.a_norm)*self.lattice_Tfactor*Pfactor
for ii in range(len(self.Hs.R['sm'])):
self.R_sm[ii] = self.Hs.R['sm'][ii + 1]*a_factor
for ii in range(len(self.Hs.R['lg'])):
self.R_lg[ii] = self.Hs.R['lg'][ii + 1]*a_factor
def langmuir_consts(self, comps, T, P, lattice_sz, kappa):
"""Calculates langmuir constant through many interior calculations
Parameters
----------
comps : list
List of components as 'Component' objects created with
'component_properties.py'
T : float
Temperature in Kelvin
P : float
Pressure in bar
lattice_sz : float
Size of filled hydrate lattice
kappa : float
Compressibility of filled hydrate
Returns
----------
C_small : numpy array
Langmuir constants for each guest in small cage
C_large : numpy array
Langmuir constants for each guest in large cage
Notes
----------
Calculation will perform numerical integration and is numerically
expensive. Other methods are possible, but not as accurate given
the accompanying empirically fit parameter set.
"""
self.compute_integral_constants(T, P, lattice_sz, kappa)
C_small = np.zeros(self.num_comps)
C_large = np.zeros(self.num_comps)
C_const = 1e-10**3*4*np.pi/(k*T)*1e5
for ii, comp in enumerate(comps):
if ii != self.water_ind:
small_int = quad(self.integrand,
0,
min(self.R_sm) - comp.HvdWPM['kih']['a'],
args=(self.R_sm,
self.z_sm,
comp.HvdWPM['kih']['epsk'],
comp.HvdWPM['kih']['sig'],
comp.HvdWPM['kih']['a'],
T,))
large_int = quad(self.integrand,
0,
min(self.R_lg) - comp.HvdWPM['kih']['a'],
args=(self.R_lg,
self.z_lg,
comp.HvdWPM['kih']['epsk'],
comp.HvdWPM['kih']['sig'],
comp.HvdWPM['kih']['a'],
T,))
# Quad returns a tuple of the integral and the error.
# We want to retrieve the integrated value.
C_small[ii] = C_const*small_int[0]
C_large[ii] = C_const*large_int[0]
else:
C_small[ii] = 0
C_large[ii] = 0
return C_small, C_large
def activity_func(self, T, P, v_H_0):
"""Calculates activity of water between aqueous phase and filled hydrate
Parameters
----------
T : float
Temperature in Kelvin
P : float
Pressure in bar
v_H_0 : float
Volume of hydrate at standard state
Returns
----------
activity : float
Activity of water
"""
kappa_wtavg = self.kappa_func(self.Y_large)
activity = (
(v_H_0 - self.a0_cubed) / R * (self.Hs.a_fit/T_0
+ self.Hs.b_fit*(1/T - 1/T_0))
+ ((self.h_vol_Pint(T, P, v_H_0, kappa_wtavg)
- self.h_vol_Pint(T, P, | |
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Module with generic coins configuration for all other modules."""
# Imports
from bip_utils.coin_conf.coin_conf import CoinConf
from bip_utils.utils.conf import CoinNames
# Bitcoin constants used by different coins
# Main net
_BTC_P2PKH_NET_VER_MN: bytes = b"\x00"
_BTC_P2SH_NET_VER_MN: bytes = b"\x05"
_BTC_P2WPKH_HRP_MN: str = "bc"
_BTC_P2WPKH_WIT_VER_MN: int = 0
_BTC_P2TR_HRP_MN: str = "bc"
_BTC_P2TR_WIT_VER_MN: int = 1
_BTC_WIF_NET_VER_MN: bytes = b"\x80"
# Test net
_BTC_P2PKH_NET_VER_TN: bytes = b"\x6f"
_BTC_P2SH_NET_VER_TN: bytes = b"\xc4"
_BTC_P2WPKH_HRP_TN: str = "tb"
_BTC_P2WPKH_WIT_VER_TN: int = 0
_BTC_P2TR_HRP_TN: str = "tb"
_BTC_P2TR_WIT_VER_TN: int = 1
_BTC_WIF_NET_VER_TN: bytes = b"\xef"
class CoinsConf:
"""Class container for coins configuration."""
# Configuration for Acala
Acala: CoinConf = CoinConf(
coin_name=CoinNames("Acala", "ACA"),
params={
"addr_ss58_format": 10,
},
)
# Configuration for Akash Network
AkashNetwork: CoinConf = CoinConf(
coin_name=CoinNames("Akash Network", "AKT"),
params={
"addr_hrp": "akash",
},
)
# Configuration for Algorand
Algorand: CoinConf = CoinConf(
coin_name=CoinNames("Algorand", "ALGO"),
params={},
)
# Configuration for Avax C-Chain
AvaxCChain: CoinConf = CoinConf(
coin_name=CoinNames("Avax C-Chain", "AVAX"),
params={},
)
# Configuration for Avax P-Chain
AvaxPChain: CoinConf = CoinConf(
coin_name=CoinNames("Avax P-Chain", "AVAX"),
params={
"addr_hrp": "avax",
"addr_prefix": "P-",
},
)
# Configuration for Avax X-Chain
AvaxXChain: CoinConf = CoinConf(
coin_name=CoinNames("Avax X-Chain", "AVAX"),
params={
"addr_hrp": "avax",
"addr_prefix": "X-",
},
)
# Configuration for Band Protocol
BandProtocol: CoinConf = CoinConf(
coin_name=CoinNames("Band Protocol", "BAND"),
params={
"addr_hrp": "band",
},
)
# Configuration for Bifrost
Bifrost: CoinConf = CoinConf(
coin_name=CoinNames("Bifrost", "BNC"),
params={
"addr_ss58_format": 6,
},
)
# Configuration for Binance Chain
BinanceChain: CoinConf = CoinConf(
coin_name=CoinNames("Binance Chain", "BNB"),
params={
"addr_hrp": "bnb",
},
)
# Configuration for Binance Smart Chain
BinanceSmartChain: CoinConf = CoinConf(
coin_name=CoinNames("Binance Smart Chain", "BNB"),
params={},
)
# Configuration for Bitcoin main net
BitcoinMainNet: CoinConf = CoinConf(
coin_name=CoinNames("Bitcoin", "BTC"),
params={
"p2pkh_net_ver": _BTC_P2PKH_NET_VER_MN,
"p2sh_net_ver": _BTC_P2SH_NET_VER_MN,
"p2wpkh_hrp": _BTC_P2WPKH_HRP_MN,
"p2wpkh_wit_ver": _BTC_P2WPKH_WIT_VER_MN,
"p2tr_hrp": _BTC_P2TR_HRP_MN,
"p2tr_wit_ver": _BTC_P2TR_WIT_VER_MN,
"wif_net_ver": _BTC_WIF_NET_VER_MN,
},
)
# Configuration for Bitcoin test net
BitcoinTestNet: CoinConf = CoinConf(
coin_name=CoinNames("Bitcoin TestNet", "BTC"),
params={
"p2pkh_net_ver": _BTC_P2PKH_NET_VER_TN,
"p2sh_net_ver": _BTC_P2SH_NET_VER_TN,
"p2wpkh_hrp": _BTC_P2WPKH_HRP_TN,
"p2wpkh_wit_ver": _BTC_P2WPKH_WIT_VER_TN,
"p2tr_hrp": _BTC_P2TR_HRP_TN,
"p2tr_wit_ver": _BTC_P2TR_WIT_VER_MN,
"wif_net_ver": _BTC_WIF_NET_VER_TN,
},
)
# Configuration for Bitcoin Cash main net
BitcoinCashMainNet: CoinConf = CoinConf(
coin_name=CoinNames("Bitcoin Cash", "BCH"),
params={
"p2pkh_std_hrp": "bitcoincash",
"p2pkh_std_net_ver": _BTC_P2PKH_NET_VER_MN,
"p2pkh_legacy_net_ver": _BTC_P2PKH_NET_VER_MN,
"p2sh_std_hrp": "bitcoincash",
"p2sh_std_net_ver": b"\x08",
"p2sh_legacy_net_ver": _BTC_P2SH_NET_VER_MN,
"wif_net_ver": _BTC_WIF_NET_VER_MN,
},
)
# Configuration for Bitcoin Cash test net
BitcoinCashTestNet: CoinConf = CoinConf(
coin_name=CoinNames("Bitcoin Cash TestNet", "BCH"),
params={
"p2pkh_std_hrp": "bchtest",
"p2pkh_std_net_ver": b"\x00",
"p2pkh_legacy_net_ver": _BTC_P2PKH_NET_VER_TN,
"p2sh_std_hrp": "bchtest",
"p2sh_std_net_ver": b"\x08",
"p2sh_legacy_net_ver": _BTC_P2SH_NET_VER_TN,
"wif_net_ver": _BTC_WIF_NET_VER_TN,
},
)
# Configuration for Bitcoin Cash Simple Ledger Protocol main net
BitcoinCashSlpMainNet: CoinConf = CoinConf(
coin_name=CoinNames("Bitcoin Cash SLP", "SLP"),
params={
"p2pkh_std_hrp": "simpleledger",
"p2pkh_std_net_ver": b"\x00",
"p2pkh_legacy_net_ver": _BTC_P2PKH_NET_VER_MN,
"p2sh_std_hrp": "simpleledger",
"p2sh_std_net_ver": b"\x08",
"p2sh_legacy_net_ver": _BTC_P2SH_NET_VER_MN,
"wif_net_ver": _BTC_WIF_NET_VER_MN,
},
)
# Configuration for Bitcoin Cash Simple Ledger Protocol test net
BitcoinCashSlpTestNet: CoinConf = CoinConf(
coin_name=CoinNames("Bitcoin Cash SLP TestNet", "SLP"),
params={
"p2pkh_std_hrp": "slptest",
"p2pkh_std_net_ver": b"\x00",
"p2pkh_legacy_net_ver": _BTC_P2PKH_NET_VER_TN,
"p2sh_std_hrp": "slptest",
"p2sh_std_net_ver": b"\x08",
"p2sh_legacy_net_ver": _BTC_P2SH_NET_VER_TN,
"wif_net_ver": _BTC_WIF_NET_VER_TN,
},
)
# Configuration for Bitcoin SV main net
BitcoinSvMainNet: CoinConf = CoinConf(
coin_name=CoinNames("BitcoinSV", "BSV"),
params={
"p2pkh_net_ver": _BTC_P2PKH_NET_VER_MN,
"p2sh_net_ver": _BTC_P2SH_NET_VER_MN,
"wif_net_ver": _BTC_WIF_NET_VER_MN,
},
)
# Configuration for Bitcoin SV test net
BitcoinSvTestNet: CoinConf = CoinConf(
coin_name=CoinNames("BitcoinSV TestNet", "BSV"),
params={
"p2pkh_net_ver": _BTC_P2PKH_NET_VER_TN,
"p2sh_net_ver": _BTC_P2SH_NET_VER_TN,
"wif_net_ver": _BTC_WIF_NET_VER_TN,
},
)
# Configuration for Celo
Celo: CoinConf = CoinConf(
coin_name=CoinNames("Celo", "CELO"),
params={},
)
# Configuration for Certik
Certik: CoinConf = CoinConf(
coin_name=CoinNames("Certik", "CTK"),
params={
"addr_hrp": "certik",
},
)
# Configuration for ChainX
ChainX: CoinConf = CoinConf(
coin_name=CoinNames("ChainX", "PCX"),
params={
"addr_ss58_format": 44,
},
)
# Configuration for Chihuahua
Chihuahua: CoinConf = CoinConf(
coin_name=CoinNames("Chihuahua", "HUAHUA"),
params={
"addr_hrp": "chihuahua",
},
)
# Configuration for Cosmos
Cosmos: CoinConf = CoinConf(
coin_name=CoinNames("Cosmos", "ATOM"),
params={
"addr_hrp": "cosmos",
},
)
# Configuration for Dash main net
DashMainNet: CoinConf = CoinConf(
coin_name=CoinNames("Dash", "DASH"),
params={
"p2pkh_net_ver": b"\x4c",
"p2sh_net_ver": b"\x10",
"wif_net_ver": b"\xcc",
},
)
# Configuration for Dash test net
DashTestNet: CoinConf = CoinConf(
coin_name=CoinNames("Dash TestNet", "DASH"),
params={
"p2pkh_net_ver": b"\x8c",
"p2sh_net_ver": b"\x13",
"wif_net_ver": _BTC_WIF_NET_VER_TN,
},
)
# Configuration for Dogecoin main net
DogecoinMainNet: CoinConf = CoinConf(
coin_name=CoinNames("Dogecoin", "DOGE"),
params={
"p2pkh_net_ver": b"\x1e",
"p2sh_net_ver": b"\x16",
"wif_net_ver": b"\x9e",
},
)
# Configuration for Dogecoin test net
DogecoinTestNet: CoinConf = CoinConf(
coin_name=CoinNames("Dogecoin TestNet", "DOGE"),
params={
"p2pkh_net_ver": b"\x71",
"p2sh_net_ver": _BTC_P2SH_NET_VER_TN,
"wif_net_ver": b"\xf1",
},
)
# Configuration for eCash main net
EcashMainNet: CoinConf = CoinConf(
coin_name=CoinNames("eCash", "XEC"),
params={
"p2pkh_std_hrp": "ecash",
"p2pkh_std_net_ver": b"\x00",
"p2pkh_legacy_net_ver": _BTC_P2PKH_NET_VER_MN,
"p2sh_std_hrp": "ecash",
"p2sh_std_net_ver": b"\x08",
"p2sh_legacy_net_ver": _BTC_P2SH_NET_VER_MN,
"wif_net_ver": _BTC_WIF_NET_VER_MN,
},
)
# Configuration for eCash test net
EcashTestNet: CoinConf = CoinConf(
coin_name=CoinNames("eCash TestNet", "XEC"),
params={
"p2pkh_std_hrp": "ectest",
"p2pkh_std_net_ver": b"\x00",
"p2pkh_legacy_net_ver": _BTC_P2PKH_NET_VER_TN,
"p2sh_std_hrp": "ectest",
"p2sh_std_net_ver": b"\x08",
"p2sh_legacy_net_ver": _BTC_P2SH_NET_VER_TN,
"wif_net_ver": _BTC_WIF_NET_VER_TN,
},
)
# Configuration for Edgeware
Edgeware: CoinConf = CoinConf(
coin_name=CoinNames("Edgeware", "EDG"),
params={
"addr_ss58_format": 7,
},
)
# Configuration for Elrond
Elrond: CoinConf = CoinConf(
coin_name=CoinNames("Elrond eGold", "eGLD"),
params={
"addr_hrp": "erd",
},
)
# Configuration for Eos
Eos: CoinConf = CoinConf(
coin_name=CoinNames("EOS", "EOS"),
params={
"addr_prefix": "EOS",
},
)
# Configuration for Ethereum
Ethereum: CoinConf = CoinConf(
coin_name=CoinNames("Ethereum", "ETH"),
params={
"addr_prefix": "0x",
},
)
# Configuration for Ethereum Classic
EthereumClassic: CoinConf = CoinConf(
coin_name=CoinNames("Ethereum Classic", "ETC"),
params={},
)
# Configuration for Fantom Opera
FantomOpera: CoinConf = CoinConf(
coin_name=CoinNames("Fantom Opera", "FTM"),
params={},
)
# Configuration for Filecoin
Filecoin: CoinConf = CoinConf(
coin_name=CoinNames("Filecoin", "FIL"),
params={
"addr_prefix": "f",
},
)
# Configuration for generic Substrate coin
GenericSubstrate: CoinConf = CoinConf(
coin_name=CoinNames("Generic Substrate", ""),
params={
"addr_ss58_format": 42,
},
)
# Configuration for Harmony One
HarmonyOne: CoinConf = CoinConf(
coin_name=CoinNames("Harmony One", "ONE"),
params={
"addr_hrp": "one",
},
)
# Configuration for Huobi Chain
HuobiChain: CoinConf = CoinConf(
coin_name=CoinNames("Huobi Token", "HT"),
params={},
)
# Configuration for IRISnet
IrisNet: CoinConf = CoinConf(
coin_name=CoinNames("IRIS Network", "IRIS"),
params={
"addr_hrp": "iaa",
},
)
# Configuration for Karura
Karura: CoinConf = CoinConf(
coin_name=CoinNames("Karura", "KAR"),
params={
"addr_ss58_format": 8,
},
)
# Configuration for Kava
Kava: CoinConf = CoinConf(
coin_name=CoinNames("Kava", "KAVA"),
params={
"addr_hrp": "kava",
},
)
# Configuration for Kusama
Kusama: CoinConf = CoinConf(
coin_name=CoinNames("Kusama", "KSM"),
params={
"addr_ss58_format": 2,
},
)
# Configuration for Litecoin main net
LitecoinMainNet: CoinConf = CoinConf(
coin_name=CoinNames("Litecoin", "LTC"),
params={
"p2pkh_std_net_ver": b"\x30",
"p2pkh_depr_net_ver": _BTC_P2PKH_NET_VER_MN,
"p2sh_std_net_ver": b"\x32",
"p2sh_depr_net_ver": _BTC_P2SH_NET_VER_MN,
"p2wpkh_hrp": "ltc",
"p2wpkh_wit_ver": _BTC_P2WPKH_WIT_VER_MN,
"wif_net_ver": b"\xb0",
},
)
# Configuration for Litecoin test net
LitecoinTestNet: CoinConf = CoinConf(
coin_name=CoinNames("Litecoin TestNet", "LTC"),
params={
"p2pkh_std_net_ver": b"\x6f",
"p2pkh_depr_net_ver": _BTC_P2PKH_NET_VER_TN,
"p2sh_std_net_ver": b"\x3a",
"p2sh_depr_net_ver": _BTC_P2SH_NET_VER_TN,
"p2wpkh_hrp": "tltc",
"p2wpkh_wit_ver": _BTC_P2WPKH_WIT_VER_TN,
"wif_net_ver": _BTC_WIF_NET_VER_TN,
},
)
# Configuration for Monero main net
MoneroMainNet: CoinConf = CoinConf(
coin_name=CoinNames("Monero", "XMR"),
params={
"addr_net_ver": b"\x12",
"addr_int_net_ver": b"\x13",
"subaddr_net_ver": b"\x2a",
},
)
# Configuration for Monero stage net
MoneroStageNet: CoinConf = CoinConf(
coin_name=CoinNames("Monero StageNet", "XMR"),
params={
"addr_net_ver": b"\x18",
"addr_int_net_ver": b"\x19",
"subaddr_net_ver": b"\x24",
},
)
# Configuration for Monero test net
MoneroTestNet: CoinConf = CoinConf(
coin_name=CoinNames("Monero TestNet", "XMR"),
params={
"addr_net_ver": b"\x35",
"addr_int_net_ver": b"\x36",
"subaddr_net_ver": b"\x3f",
},
)
# Configuration for Moonbeam
Moonbeam: CoinConf = CoinConf(
coin_name=CoinNames("Moonbeam", "GLMR"),
params={
"addr_ss58_format": 1284,
},
)
# Configuration for Moonriver
Moonriver: CoinConf = CoinConf(
coin_name=CoinNames("Moonriver", "MOVR"),
params={
"addr_ss58_format": 1285,
},
)
# Configuration for Nano
Nano: CoinConf = CoinConf(
coin_name=CoinNames("Nano", "NANO"),
params={
"addr_prefix": "nano_",
},
)
# Configuration for Near Protocol
NearProtocol: CoinConf = CoinConf(
coin_name=CoinNames("Near Protocol", "NEAR"),
params={},
)
# Configuration for Neo
Neo: CoinConf = CoinConf(
coin_name=CoinNames("NEO", "NEO"),
params={
"addr_ver": b"\x17",
},
)
# Configuration for Nine Chronicles Gold
NineChroniclesGold: CoinConf = CoinConf(
coin_name=CoinNames("NineChroniclesGold", "NCG"),
| |
name
paste $folder*concoct.stats|tail -n +2 | sed "s/^/$var.bin./g" >> concoct.checkm
paste $folder*metabat.stats|tail -n +2 | sed "s/^/$var./g" >> metabat.checkm
paste $folder*maxbin.stats|tail -n +2 >> maxbin.checkm
paste $folder*metawrap_*_bins.stats|tail -n +2|sed "s/^/$var./g" >> refined.checkm
done
echo "Done reading metawrap refined output, moving refined_bins.stats, concoct.checkm, metabat.checkm, maxbin.checkm, and refined.checkm files to $(echo {input}/{config[folder][reassembled]}) ."
mv *.stats *.checkm {input}/{config[folder][reassembled]}
# READ METAWRAP REASSEMBLED BINS
echo "Generating reassembled_bins.stats file containing bin ID, number of contigs, and length ... "
cd {input}/{config[folder][reassembled]}
for folder in */;do
samp=$(echo $folder | sed 's|/||'); # Define sample name
for bin in $folder*reassembled_bins/*.fa;do
name=$(echo $bin | sed 's/.fa//g' | sed 's|^.*/||g' | sed "s/^/$samp./g"); # Define bin name
N=$(less $bin | grep -c ">");
# Need to check if bins are original (megahit-assembled) or strict/permissive (metaspades-assembled)
if [[ $name == *.strict ]] || [[ $name == *.permissive ]];then
L=$(less $bin |grep ">"|cut -d '_' -f4|awk '{{sum+=$1}}END{{print sum}}')
else
L=$(less $bin |grep ">"|cut -d '-' -f4|sed 's/len_//g'|awk '{{sum+=$1}}END{{print sum}}')
fi
echo "Reading bin $bin ... Contigs: $N , Length: $L "
echo $name $N $L >> reassembled_bins.stats;
done;
done
echo "Done reading metawrap reassembled bins ... "
# READ METAWRAP REFINED CHECKM OUTPUT
echo "Generating CheckM summary file reassembled.checkm across samples for reassembled bins ... "
for folder in */;do
var=$(echo $folder|sed 's|/||g');
paste $folder*reassembled_bins.stats|tail -n +2|sed "s/^/$var./g";
done >> reassembled.checkm
echo "Done generating all statistics files for binning results ... running plotting script ... "
# RUN PLOTTING R SCRIPT
mv *.stats *.checkm {config[path][root]}/{config[folder][stats]}
cd {config[path][root]}/{config[folder][stats]}
Rscript {config[path][root]}/{config[folder][scripts]}/{config[scripts][binningVis]}
rm Rplots.pdf # Delete redundant pdf file
echo "Done. "
"""
rule GTDBtk:
input:
f'{config["path"]["root"]}/{config["folder"]["reassembled"]}/{{IDs}}/reassembled_bins'
output:
directory(f'{config["path"]["root"]}/GTDBtk/{{IDs}}')
benchmark:
f'{config["path"]["root"]}/benchmarks/{{IDs}}.GTDBtk.benchmark.txt'
message:
"""
The folder dna_bins_organized assumes subfolders containing dna bins for refined and reassembled bins.
Note: slightly modified inputs/outputs for european dataset.
"""
shell:
"""
set +u;source activate gtdbtk-tmp;set -u;
export GTDBTK_DATA_PATH=/g/scb2/patil/zorrilla/conda/envs/gtdbtk/share/gtdbtk-1.1.0/db/
cd $SCRATCHDIR
cp -r {input} .
gtdbtk classify_wf --genome_dir $(basename {input}) --out_dir GTDBtk -x fa --cpus {config[cores][gtdbtk]}
mkdir -p {output}
mv GTDBtk/* {output}
"""
rule classifyGenomes:
input:
bins = f'{config["path"]["root"]}/{config["folder"]["reassembled"]}/{{IDs}}/reassembled_bins',
script = f'{config["path"]["root"]}/{config["folder"]["scripts"]}/classify-genomes'
output:
directory(f'{config["path"]["root"]}/{config["folder"]["classification"]}/{{IDs}}')
benchmark:
f'{config["path"]["root"]}/benchmarks/{{IDs}}.classify-genomes.benchmark.txt'
shell:
"""
set +u;source activate {config[envs][metabagpipes]};set -u;
mkdir -p {output}
cd $TMPDIR
cp -r {input.script}/* {input.bins}/* .
echo "Begin classifying bins ... "
for bin in *.fa; do
echo -e "\nClassifying $bin ... "
$PWD/classify-genomes $bin -t {config[cores][classify]} -o $(echo $bin|sed 's/.fa/.taxonomy/')
cp *.taxonomy {output}
rm *.taxonomy
rm $bin
done
echo "Done classifying bins. "
"""
rule taxonomyVis:
input:
f'{config["path"]["root"]}/{config["folder"]["classification"]}'
output:
text = f'{config["path"]["root"]}/{config["folder"]["stats"]}/classification.stats',
plot = f'{config["path"]["root"]}/{config["folder"]["stats"]}/taxonomyVis.pdf'
message:
"""
Generate bar plot with most common taxa (n>15) and density plots with mapping statistics.
"""
shell:
"""
set +u;source activate {config[envs][metabagpipes]};set -u;
cd {input}
echo -e "\nBegin reading classification result files ... \n"
for folder in */;do
for file in $folder*.taxonomy;do
# Define sample ID to append to start of each bin name in summary file
sample=$(echo $folder|sed 's|/||')
# Define bin name with sample ID, shorten metaWRAP naming scheme (orig/permissive/strict)
fasta=$(echo $file | sed 's|^.*/||' | sed 's/.taxonomy//g' | sed 's/orig/o/g' | sed 's/permissive/p/g' | sed 's/strict/s/g' | sed "s/^/$sample./g");
# Extract NCBI ID
NCBI=$(less $file | grep NCBI | cut -d ' ' -f4);
# Extract consensus taxonomy
tax=$(less $file | grep tax | sed 's/Consensus taxonomy: //g');
# Extract consensus motus
motu=$(less $file | grep mOTUs | sed 's/Consensus mOTUs: //g');
# Extract number of detected genes
detect=$(less $file | grep detected | sed 's/Number of detected genes: //g');
# Extract percentage of agreeing genes
percent=$(less $file | grep agreeing | sed 's/Percentage of agreeing genes: //g' | sed 's/%//g');
# Extract number of mapped genes
map=$(less $file | grep mapped | sed 's/Number of mapped genes: //g');
# Extract COG IDs, need to use set +e;...;set -e to avoid erroring out when reading .taxonomy result file for bin with no taxonomic annotation
set +e
cog=$(less $file | grep COG | cut -d$'\t' -f1 | tr '\n' ',' | sed 's/,$//g');
set -e
# Display and store extracted results
echo -e "$fasta \t $NCBI \t $tax \t $motu \t $detect \t $map \t $percent \t $cog"
echo -e "$fasta \t $NCBI \t $tax \t $motu \t $detect \t $map \t $percent \t $cog" >> classification.stats;
done;
done
echo -e "\nDone generating classification.stats summary file, moving to stats/ directory and running taxonomyVis.R script ... "
mv classification.stats {config[path][root]}/{config[folder][stats]}
cd {config[path][root]}/{config[folder][stats]}
Rscript {config[path][root]}/{config[folder][scripts]}/{config[scripts][taxonomyVis]}
rm Rplots.pdf # Delete redundant pdf file
echo "Done. "
"""
rule abundance:
input:
bins = f'{config["path"]["root"]}/{config["folder"]["reassembled"]}/{{IDs}}/reassembled_bins',
READS = rules.qfilter.output
output:
directory(f'{config["path"]["root"]}/{config["folder"]["abundance"]}/{{IDs}}')
benchmark:
f'{config["path"]["root"]}/benchmarks/{{IDs}}.abundance.benchmark.txt'
message:
"""
Calculate bin abundance fraction using the following:
binAbundanceFraction = ( X / Y / Z) * 1000000
X = # of reads mapped to bin_i from sample_k
Y = length of bin_i (bp)
Z = # of reads mapped to all bins in sample_k
Note: 1000000 scaling factor converts length in bp to Mbp
"""
shell:
"""
set +u;source activate {config[envs][metabagpipes]};set -u;
mkdir -p {output}
cd $TMPDIR
echo -e "\nCopying quality filtered single end reads and generated MAGs to TMPDIR ... "
cp {input.READS} {input.bins}/* .
echo -e "\nConcatenating all bins into one FASTA file ... "
cat *.fa > $(basename {output}).fa
echo -e "\nCreating bwa index for concatenated FASTA file ... "
bwa index $(basename {output}).fa
echo -e "\nMapping quality filtered single end reads to concatenated FASTA file with bwa mem ... "
bwa mem -t {config[cores][abundance]} $(basename {output}).fa \
$(basename {input.READS}) > $(basename {output}).sam
echo -e "\nConverting SAM to BAM with samtools view ... "
samtools view -@ {config[cores][abundance]} -Sb $(basename {output}).sam > $(basename {output}).bam
echo -e "\nSorting BAM file with samtools sort ... "
samtools sort -@ {config[cores][abundance]} -o $(basename {output}).sort.bam $(basename {output}).bam
echo -e "\nExtracting stats from sorted BAM file with samtools flagstat ... "
samtools flagstat $(basename {output}).sort.bam > map.stats
echo -e "\nCopying sample_map.stats file to root/abundance/sample for bin concatenation and deleting temporary FASTA file ... "
cp map.stats {output}/$(basename {output})_map.stats
rm $(basename {output}).fa
echo -e "\nRepeat procedure for each bin ... "
for bin in *.fa;do
echo -e "\nSetting up temporary sub-directory to map against bin $bin ... "
mkdir -p $(echo "$bin"| sed "s/.fa//")
mv $bin $(echo "$bin"| sed "s/.fa//")
cd $(echo "$bin"| sed "s/.fa//")
echo -e "\nCreating bwa index for bin $bin ... "
bwa index $bin
echo -e "\nMapping quality filtered single end reads to bin $bin with bwa mem ... "
bwa mem -t {config[cores][abundance]} $bin ../$(basename {input.READS}) > $(echo "$bin"|sed "s/.fa/.sam/")
echo -e "\nConverting SAM to BAM with samtools view ... "
samtools view -@ {config[cores][abundance]} -Sb $(echo "$bin"|sed "s/.fa/.sam/") > $(echo "$bin"|sed "s/.fa/.bam/")
echo -e "\nSorting BAM file with samtools sort ... "
samtools sort -@ {config[cores][abundance]} -o $(echo "$bin"|sed "s/.fa/.sort.bam/") $(echo "$bin"|sed "s/.fa/.bam/")
echo -e "\nExtracting stats from sorted BAM file with samtools flagstat ... "
samtools flagstat $(echo "$bin"|sed "s/.fa/.sort.bam/") > $(echo "$bin"|sed "s/.fa/.map/")
echo -e "\nAppending bin length to bin.map stats file ... "
echo -n "Bin Length = " >> $(echo "$bin"|sed "s/.fa/.map/")
# Need to check if bins are original (megahit-assembled) or strict/permissive (metaspades-assembled)
if [[ $bin == *.strict.fa ]] || [[ $bin == *.permissive.fa ]];then
less $bin |grep ">"|cut -d '_' -f4|awk '{{sum+=$1}}END{{print sum}}' >> $(echo "$bin"|sed "s/.fa/.map/")
else
less $bin |grep ">"|cut -d '-' -f4|sed 's/len_//g'|awk '{{sum+=$1}}END{{print sum}}' >> $(echo "$bin"|sed "s/.fa/.map/")
fi
paste $(echo "$bin"|sed "s/.fa/.map/")
echo -e "\nCalculating abundance for bin $bin ... "
echo -n "$bin"|sed "s/.fa//" >> $(echo "$bin"|sed "s/.fa/.abund/")
echo -n $'\t' >> $(echo "$bin"|sed "s/.fa/.abund/")
X=$(less $(echo "$bin"|sed "s/.fa/.map/")|grep "mapped ("|awk -F' ' '{{print $1}}')
Y=$(less $(echo "$bin"|sed "s/.fa/.map/")|tail -n 1|awk -F' ' '{{print $4}}')
Z=$(less "../map.stats"|grep "mapped ("|awk -F' ' '{{print $1}}')
awk -v x="$X" -v y="$Y" -v z="$Z" 'BEGIN{{print (x/y/z) * 1000000}}' >> $(echo "$bin"|sed "s/.fa/.abund/")
paste $(echo "$bin"|sed "s/.fa/.abund/")
echo -e "\nRemoving | |
import string
from scml.nlp import (
RepeatingCharacter,
RepeatingSubstring,
collapse_whitespace,
count_alpha,
count_digit,
count_punctuation,
count_space,
count_upper,
emoji_shortcode_to_text,
has_1a1d,
ngrams,
sentences,
split,
strip_ip_address,
strip_punctuation,
strip_url,
strip_xml,
to_str,
)
class TestToStr:
def test_case_1(self):
assert to_str("a1") == "a1"
# noinspection PyTypeChecker
def test_numerics_cast_to_string(self):
assert to_str(1) == "1"
assert to_str(1.2) == "1.2"
class TestCountDigit:
def test_case_1(self):
assert count_digit(" aA!") == 0
assert count_digit(" a12A!") == 2
class TestCountSpace:
def test_case_1(self):
assert count_space("a1A!") == 0
assert count_space(" a1A! ") == 2
class TestCountAlpha:
def test_case_1(self):
assert count_alpha(" !") == 0
assert count_alpha(" a1A!") == 2
class TestCountUpper:
def test_case_1(self):
assert count_upper(" a1!") == 0
assert count_upper(" Ba1A!") == 2
class TestCountPunctuation:
def test_case_1(self):
assert count_punctuation(" a1A") == 0
assert count_punctuation(" ?a1A!") == 2
class TestCollapseWhitespace:
def test_no_replacement(self):
assert collapse_whitespace("") == ""
assert collapse_whitespace("a") == "a"
def test_convert_whitespace_to_space_char(self):
assert collapse_whitespace("1\t2") == "1 2"
assert collapse_whitespace("1\r2") == "1 2"
assert collapse_whitespace("1\n2") == "1 2"
assert collapse_whitespace("1\f2") == "1 2"
assert collapse_whitespace("1\t2\r3\n4\f5") == "1 2 3 4 5"
assert collapse_whitespace("1\t 2\r 3\n 4\f 5") == "1 2 3 4 5"
def test_string_is_trimmed_on_both_ends(self):
assert collapse_whitespace(" ") == ""
assert collapse_whitespace("\t\r\n\f") == ""
assert collapse_whitespace("\na \t\r\n\fb\n") == "a b"
assert collapse_whitespace(" a \t\r\n\fb ") == "a b"
class TestRepeatingCharacter:
def test_count(self):
assert RepeatingCharacter.count("") == 0
assert RepeatingCharacter.count("a") == 0
assert RepeatingCharacter.count("aa") == 1
assert RepeatingCharacter.count("1") == 0
assert RepeatingCharacter.count("11") == 0
for p in string.punctuation:
assert RepeatingCharacter.count(p) == 0
assert RepeatingCharacter.count(p * 2) == 1
assert RepeatingCharacter.count("aa\n\naa\t\t!!") == 3
def test_no_replacement(self):
max_times = 2
f = RepeatingCharacter(max_times=max_times, letters=True, punctuation=True)
assert f.collapse("") == ""
assert f.collapse("a") == "a"
assert f.collapse("aa") == "aa"
for p in string.punctuation:
inp = p * max_times
assert f.collapse(inp) == inp
def test_repeating_letter(self):
f = RepeatingCharacter(max_times=2, letters=True, punctuation=False)
assert f.collapse("aaa") == "aa"
assert f.collapse("aaabbb") == "aabb"
assert f.collapse("abbba") == "abba"
assert f.collapse("abbba abbba") == "abba abba"
def test_repeating_letter_is_case_preserving(self):
f = RepeatingCharacter(max_times=2, letters=True, punctuation=False)
assert f.collapse("AAA") == "AA"
def test_repeating_punctuation(self):
max_times = 2
f = RepeatingCharacter(max_times=max_times, letters=False, punctuation=True)
for p in string.punctuation:
inp = p * (max_times + 1)
e = p * max_times
assert f.collapse(inp) == e
assert f.collapse("a!!! b??? ***c*** --->d") == "a!! b?? **c** -->d"
class TestRepeatingSubstring:
def test_count(self):
f = RepeatingSubstring(
min_length=2,
max_times=1,
letters=True,
punctuation=True,
whitespace=True,
)
assert f.count("") == 0
assert f.count("\n") == 0
assert f.count("\n\n") == 0
assert f.count("\n\n\n") == 0
assert f.count("a") == 0
assert f.count("aa") == 0
assert f.count("aaa") == 0
assert f.count("ab ab") == 0
assert f.count("abab") == 1
assert f.count("ababab") == 2
assert f.count("abababab") == 3
assert f.count("ab cdab cd") == 1
assert f.count("ab cdab cdab cd") == 2
assert f.count(" ab cd ab cd") == 1
assert f.count(" ab cd ab cd ab cd") == 2
assert f.count("ab?cd!ab?cd!") == 1
assert f.count("ab?cd!ab?cd!ab?cd!") == 2
assert f.count("ab? cd!ab? cd!") == 1
assert f.count("ab? cd!ab? cd!ab? cd!") == 2
assert f.count(" ab? cd! ab? cd! ab? cd!") == 2
def test_count_char(self):
f = RepeatingSubstring(
min_length=2,
max_times=1,
letters=True,
punctuation=True,
whitespace=True,
)
assert f.count_char("") == 0
assert f.count_char("\n") == 0
assert f.count_char("\n\n") == 0
assert f.count_char("\n\n\n") == 0
assert f.count_char("a") == 0
assert f.count_char("aa") == 0
assert f.count_char("aaa") == 0
assert f.count_char("ab ab") == 0
assert f.count_char("abab") == 2
assert f.count_char("ababab") == 4
assert f.count_char("abababab") == 6
assert f.count_char("ab cdab cd") == 5
assert f.count_char("ab cdab cdab cd") == 10
assert f.count_char(" ab cd ab cd") == 6
assert f.count_char(" ab cd ab cd ab cd") == 12
assert f.count_char("ab?cd!ab?cd!") == 6
assert f.count_char("ab?cd!ab?cd!ab?cd!") == 12
assert f.count_char("ab? cd!ab? cd!") == 7
assert f.count_char("ab? cd!ab? cd!ab? cd!") == 14
assert f.count_char(" ab? cd! ab? cd! ab? cd!") == 16
def test_no_replacement(self):
min_length = 2
max_times = 1
f = RepeatingSubstring(
min_length=min_length,
max_times=max_times,
letters=True,
punctuation=True,
whitespace=True,
)
assert f.collapse("") == ""
assert f.collapse("\n") == "\n"
assert f.collapse("\n\n") == "\n\n"
assert f.collapse("\n\n\n") == "\n\n\n"
assert f.collapse("a") == "a"
assert f.collapse("aa") == "aa"
assert f.collapse("aaa") == "aaa"
assert f.collapse("ab ab") == "ab ab"
for p in string.punctuation:
inp = (p * min_length) * max_times
assert f.collapse(inp) == inp
def test_repeating_letter(self):
f = RepeatingSubstring(
min_length=2,
max_times=1,
letters=True,
punctuation=False,
whitespace=True,
)
assert f.collapse("abab") == "ab"
assert f.collapse("ab cdab cd") == "ab cd"
assert f.collapse(" ab cd ab cd") == " ab cd"
def test_repeating_letter_is_case_preserving(self):
f = RepeatingSubstring(
min_length=2,
max_times=1,
letters=True,
punctuation=False,
whitespace=False,
)
assert f.collapse("ABAB") == "AB"
def test_repeating_punctuation(self):
min_length = 2
max_times = 1
f = RepeatingSubstring(
min_length=min_length,
max_times=max_times,
letters=False,
punctuation=True,
whitespace=True,
)
for p in string.punctuation:
e = p * min_length
inp = e * (max_times + 1)
assert f.collapse(inp) == e
assert f.collapse("!?!?") == "!?"
assert f.collapse("!? $#!? $#") == "!? $#"
assert f.collapse(" !? $# !? $#") == " !? $#"
def test_all_allowed_chars(self):
f = RepeatingSubstring(
min_length=2,
max_times=1,
letters=True,
punctuation=True,
whitespace=True,
)
assert f.collapse("ab?cd!ab?cd!") == "ab?cd!"
assert f.collapse("ab? cd!ab? cd!") == "ab? cd!"
assert f.collapse(" ab? cd! ab? cd!") == " ab? cd!"
class TestSplit:
def test_delimiter_length_equals_1(self):
assert split(
delimiters=["a"],
s="a1a2a",
) == ["", "1", "2", ""]
assert split(delimiters=["a", "b"], s="ab1ba2ab",) == [
"",
"",
"1",
"",
"2",
"",
"",
]
def test_delimiter_length_greater_than_1(self):
assert split(
delimiters=["a", "...", "(c)"],
s="stackoverflow (c) is awesome... isn't it?",
) == ["st", "ckoverflow ", " is ", "wesome", " isn't it?"]
def test_punctuation(self):
assert split(
delimiters=["!", ".", "?", ")", "(", ","],
s="hi, there! greetings. how are you? (foo) end",
) == ["hi", " there", " greetings", " how are you", " ", "foo", " end"]
class TestNgrams:
def test_gram_number(self):
assert ngrams(["hello", "world", "foo", "bar"], n=1) == [
("hello",),
("world",),
("foo",),
("bar",),
]
assert ngrams(["hello", "world", "foo", "bar"], n=2) == [
("hello", "world"),
("world", "foo"),
("foo", "bar"),
]
assert ngrams(["hello", "world", "foo", "bar"], n=3) == [
("hello", "world", "foo"),
("world", "foo", "bar"),
]
assert ngrams(["hello", "world", "foo", "bar"], n=4) == [
("hello", "world", "foo", "bar"),
]
assert ngrams(["hello", "world", "foo", "bar"], n=5) == []
def test_skip_set(self):
assert ngrams(["hello", "world", "foo", "bar"], n=2, skip={"hello", "foo"}) == [
("world", "bar"),
]
class TestSentences:
def test_end_of_sentence_punctuation(self):
assert sentences("Full stop. Question mark? Exclamation mark! The end.") == [
"Full stop.",
"Question mark?",
"Exclamation mark!",
"The end.",
]
def test_salutations(self):
assert sentences("Mr. <NAME> met Dr. Watson for coffee.") == [
"Mr. <NAME> met Dr. Watson for coffee."
]
def test_period_delimited_strings(self):
assert sentences("foo 123.456 bar") == ["foo 123.456 bar"]
assert sentences("foo 123.456.789.123 bar") == ["foo 123.456.789.123 bar"]
assert sentences("foo abc.def.ghk bar") == ["foo abc.def.ghk bar"]
class TestHasAtLeastOneDigitAndOneLetter:
def test_no_matches(self):
assert not has_1a1d("")
assert not has_1a1d("A")
assert not has_1a1d("a")
assert not has_1a1d("1")
assert not has_1a1d("Aa")
assert not has_1a1d("aA")
assert not has_1a1d("12")
assert not has_1a1d("1.2")
assert not has_1a1d("1,234")
def test_matches(self):
assert has_1a1d("A1")
assert has_1a1d("a1")
assert has_1a1d("1A")
assert has_1a1d("1a")
assert has_1a1d("10x20")
def test_include_chars(self):
include = ":-"
assert has_1a1d("a-1", include=include)
assert has_1a1d("A-1", include=include)
assert has_1a1d("1-a", include=include)
assert has_1a1d("1-A", include=include)
assert has_1a1d("a:1", include=include)
assert has_1a1d("A:1", include=include)
assert has_1a1d("1:a", include=include)
assert has_1a1d("1:A", include=include)
assert has_1a1d("-a1", include=include)
assert has_1a1d("a1-", include=include)
assert has_1a1d(":a1", include=include)
assert has_1a1d("a1:", include=include)
# Allow only chars inside the whitelist
assert not has_1a1d(",a1", include=include)
assert not has_1a1d("a,1", include=include)
assert not has_1a1d("a1,", include=include)
# Missing either letter or digit
assert not has_1a1d('15"', include='"')
class TestStripPunctuation:
def test_no_replacement(self):
assert strip_xml("") == ""
assert strip_xml("a1") == "a1"
def test_replacement(self):
for p in string.punctuation:
assert strip_punctuation(p) == ""
class TestStripXml:
def test_no_replacement(self):
assert strip_xml("") == ""
assert strip_xml("a") == "a"
assert strip_xml("1 < 2 and 2 > 1") == "1 < 2 and 2 > 1"
assert strip_xml("1<2 and 2>1") == "1<2 and 2>1"
def test_replacement(self):
assert strip_xml("<strong>a</strong>") == "a"
assert strip_xml("<p>a</p><p>b</p>") == "ab"
assert strip_xml("<br />") == ""
class TestStripUrl:
def test_no_replacement(self):
assert strip_url("") == ""
assert strip_url("a") == "a"
assert strip_url(".com") == ".com"
assert strip_url("a.com") == "a.com"
assert strip_url("www.a") == "www.a"
assert strip_url("sub1.a.com") == "sub1.a.com"
assert strip_url("www.a#.com") == "www.a#.com"
assert strip_url("www.a-.com") == "www.a-.com"
assert strip_url("www.-a.com") == "www.-a.com"
assert strip_url("http://www.a") == "http://www.a"
assert strip_url("http://a") == "http://a"
assert strip_url("s3://a.com") == "s3://a.com"
assert strip_url("a.com/dir1") == "a.com/dir1"
assert strip_url("a.com/file.html") == "a.com/file.html"
def test_scheme_and_domain_name(self):
assert strip_url("http://a.com") == ""
assert strip_url("https://a.com") == ""
assert strip_url("https://mp3.com") == ""
assert strip_url("1 https://mp3.com 2") == "1 2"
def test_subdomain(self):
| |
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.9.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ControllerServiceDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'versioned_component_id': 'str',
'parent_group_id': 'str',
'position': 'PositionDTO',
'name': 'str',
'type': 'str',
'bundle': 'BundleDTO',
'controller_service_apis': 'list[ControllerServiceApiDTO]',
'comments': 'str',
'state': 'str',
'persists_state': 'bool',
'restricted': 'bool',
'deprecated': 'bool',
'multiple_versions_available': 'bool',
'properties': 'dict(str, str)',
'descriptors': 'dict(str, PropertyDescriptorDTO)',
'custom_ui_url': 'str',
'annotation_data': 'str',
'referencing_components': 'list[ControllerServiceReferencingComponentEntity]',
'validation_errors': 'list[str]',
'validation_status': 'str',
'extension_missing': 'bool'
}
attribute_map = {
'id': 'id',
'versioned_component_id': 'versionedComponentId',
'parent_group_id': 'parentGroupId',
'position': 'position',
'name': 'name',
'type': 'type',
'bundle': 'bundle',
'controller_service_apis': 'controllerServiceApis',
'comments': 'comments',
'state': 'state',
'persists_state': 'persistsState',
'restricted': 'restricted',
'deprecated': 'deprecated',
'multiple_versions_available': 'multipleVersionsAvailable',
'properties': 'properties',
'descriptors': 'descriptors',
'custom_ui_url': 'customUiUrl',
'annotation_data': 'annotationData',
'referencing_components': 'referencingComponents',
'validation_errors': 'validationErrors',
'validation_status': 'validationStatus',
'extension_missing': 'extensionMissing'
}
def __init__(self, id=None, versioned_component_id=None, parent_group_id=None, position=None, name=None, type=None, bundle=None, controller_service_apis=None, comments=None, state=None, persists_state=None, restricted=None, deprecated=None, multiple_versions_available=None, properties=None, descriptors=None, custom_ui_url=None, annotation_data=None, referencing_components=None, validation_errors=None, validation_status=None, extension_missing=None):
"""
ControllerServiceDTO - a model defined in Swagger
"""
self._id = None
self._versioned_component_id = None
self._parent_group_id = None
self._position = None
self._name = None
self._type = None
self._bundle = None
self._controller_service_apis = None
self._comments = None
self._state = None
self._persists_state = None
self._restricted = None
self._deprecated = None
self._multiple_versions_available = None
self._properties = None
self._descriptors = None
self._custom_ui_url = None
self._annotation_data = None
self._referencing_components = None
self._validation_errors = None
self._validation_status = None
self._extension_missing = None
if id is not None:
self.id = id
if versioned_component_id is not None:
self.versioned_component_id = versioned_component_id
if parent_group_id is not None:
self.parent_group_id = parent_group_id
if position is not None:
self.position = position
if name is not None:
self.name = name
if type is not None:
self.type = type
if bundle is not None:
self.bundle = bundle
if controller_service_apis is not None:
self.controller_service_apis = controller_service_apis
if comments is not None:
self.comments = comments
if state is not None:
self.state = state
if persists_state is not None:
self.persists_state = persists_state
if restricted is not None:
self.restricted = restricted
if deprecated is not None:
self.deprecated = deprecated
if multiple_versions_available is not None:
self.multiple_versions_available = multiple_versions_available
if properties is not None:
self.properties = properties
if descriptors is not None:
self.descriptors = descriptors
if custom_ui_url is not None:
self.custom_ui_url = custom_ui_url
if annotation_data is not None:
self.annotation_data = annotation_data
if referencing_components is not None:
self.referencing_components = referencing_components
if validation_errors is not None:
self.validation_errors = validation_errors
if validation_status is not None:
self.validation_status = validation_status
if extension_missing is not None:
self.extension_missing = extension_missing
@property
def id(self):
"""
Gets the id of this ControllerServiceDTO.
The id of the component.
:return: The id of this ControllerServiceDTO.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ControllerServiceDTO.
The id of the component.
:param id: The id of this ControllerServiceDTO.
:type: str
"""
self._id = id
@property
def versioned_component_id(self):
"""
Gets the versioned_component_id of this ControllerServiceDTO.
The ID of the corresponding component that is under version control
:return: The versioned_component_id of this ControllerServiceDTO.
:rtype: str
"""
return self._versioned_component_id
@versioned_component_id.setter
def versioned_component_id(self, versioned_component_id):
"""
Sets the versioned_component_id of this ControllerServiceDTO.
The ID of the corresponding component that is under version control
:param versioned_component_id: The versioned_component_id of this ControllerServiceDTO.
:type: str
"""
self._versioned_component_id = versioned_component_id
@property
def parent_group_id(self):
"""
Gets the parent_group_id of this ControllerServiceDTO.
The id of parent process group of this component if applicable.
:return: The parent_group_id of this ControllerServiceDTO.
:rtype: str
"""
return self._parent_group_id
@parent_group_id.setter
def parent_group_id(self, parent_group_id):
"""
Sets the parent_group_id of this ControllerServiceDTO.
The id of parent process group of this component if applicable.
:param parent_group_id: The parent_group_id of this ControllerServiceDTO.
:type: str
"""
self._parent_group_id = parent_group_id
@property
def position(self):
"""
Gets the position of this ControllerServiceDTO.
The position of this component in the UI if applicable.
:return: The position of this ControllerServiceDTO.
:rtype: PositionDTO
"""
return self._position
@position.setter
def position(self, position):
"""
Sets the position of this ControllerServiceDTO.
The position of this component in the UI if applicable.
:param position: The position of this ControllerServiceDTO.
:type: PositionDTO
"""
self._position = position
@property
def name(self):
"""
Gets the name of this ControllerServiceDTO.
The name of the controller service.
:return: The name of this ControllerServiceDTO.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ControllerServiceDTO.
The name of the controller service.
:param name: The name of this ControllerServiceDTO.
:type: str
"""
self._name = name
@property
def type(self):
"""
Gets the type of this ControllerServiceDTO.
The type of the controller service.
:return: The type of this ControllerServiceDTO.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this ControllerServiceDTO.
The type of the controller service.
:param type: The type of this ControllerServiceDTO.
:type: str
"""
self._type = type
@property
def bundle(self):
"""
Gets the bundle of this ControllerServiceDTO.
The details of the artifact that bundled this processor type.
:return: The bundle of this ControllerServiceDTO.
:rtype: BundleDTO
"""
return self._bundle
@bundle.setter
def bundle(self, bundle):
"""
Sets the bundle of this ControllerServiceDTO.
The details of the artifact that bundled this processor type.
:param bundle: The bundle of this ControllerServiceDTO.
:type: BundleDTO
"""
self._bundle = bundle
@property
def controller_service_apis(self):
"""
Gets the controller_service_apis of this ControllerServiceDTO.
Lists the APIs this Controller Service implements.
:return: The controller_service_apis of this ControllerServiceDTO.
:rtype: list[ControllerServiceApiDTO]
"""
return self._controller_service_apis
@controller_service_apis.setter
def controller_service_apis(self, controller_service_apis):
"""
Sets the controller_service_apis of this ControllerServiceDTO.
Lists the APIs this Controller Service implements.
:param controller_service_apis: The controller_service_apis of this ControllerServiceDTO.
:type: list[ControllerServiceApiDTO]
"""
self._controller_service_apis = controller_service_apis
@property
def comments(self):
"""
Gets the comments of this ControllerServiceDTO.
The comments for the controller service.
:return: The comments of this ControllerServiceDTO.
:rtype: str
"""
return self._comments
@comments.setter
def comments(self, comments):
"""
Sets the comments of this ControllerServiceDTO.
The comments for the controller service.
:param comments: The comments of this ControllerServiceDTO.
:type: str
"""
self._comments = comments
@property
def state(self):
"""
Gets the state of this ControllerServiceDTO.
The state of the controller service.
:return: The state of this ControllerServiceDTO.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this ControllerServiceDTO.
The state of the controller service.
:param state: The state of this ControllerServiceDTO.
:type: str
"""
allowed_values = ["ENABLED", "ENABLING", "DISABLED", "DISABLING"]
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}"
.format(state, allowed_values)
)
self._state = state
@property
def persists_state(self):
"""
Gets the persists_state of this ControllerServiceDTO.
Whether the controller service persists state.
:return: The persists_state of this ControllerServiceDTO.
:rtype: bool
"""
return self._persists_state
@persists_state.setter
def persists_state(self, persists_state):
"""
Sets the persists_state of this ControllerServiceDTO.
Whether the controller service persists state.
:param persists_state: The persists_state of this ControllerServiceDTO.
:type: bool
"""
self._persists_state = persists_state
@property
def restricted(self):
"""
Gets the restricted of this ControllerServiceDTO.
Whether the controller service requires elevated privileges.
:return: The restricted of this ControllerServiceDTO.
:rtype: bool
"""
return self._restricted
@restricted.setter
def restricted(self, restricted):
"""
Sets the restricted of this ControllerServiceDTO.
Whether the controller service requires elevated privileges.
:param restricted: The restricted of this ControllerServiceDTO.
:type: bool
"""
self._restricted = restricted
@property
def deprecated(self):
"""
Gets the deprecated of this ControllerServiceDTO.
Whether the ontroller service has been deprecated.
:return: The deprecated of this ControllerServiceDTO.
:rtype: bool
"""
return self._deprecated
@deprecated.setter
def deprecated(self, deprecated):
"""
Sets | |
= None
elif latest["time"] is None:
clrs.update(dict(_sym=cfg.dark, _sepl="", _sepr="",
_prc=(cfg.faint_shade if lnum % 2 else
cfg.faint_tint), _vol="", _chg=""))
change, pulse = 0, None
# Must divide by 100 because ``_pulse_over`` is a %
elif (abs(abs(latest["last"]) - abs(last_seen["last"])) >
abs(_pulse_over / 100 * last_seen["last"])):
pulse = None
_wait = 0.0764 if PULSE == "fast" else 0.124
if change - last_seen["chg"] > 0:
pulse = "+"
clrs["_beg"] = cbg.green
if not HAS_24:
clrs.update(dict(_sym=cfg.green, _sepl="", _sepr="",
_vol="", _prc="", _chg=""))
else:
pulse = "-"
clrs["_beg"] = cbg.red
if not HAS_24:
clrs.update(dict(_sym=cfg.red, _sepl="", _sepr="",
_vol="", _prc="", _chg=""))
try:
with await semaphore:
print(up,
fmt.format("", "", base=base.lower(), sep=sep,
quote=quote.lower(), **clrs, **latest,
volconv=volconv),
down,
sep="", end="", flush=True)
except asyncio.CancelledError:
break
last_seen.update(latest)
#
return "Cancelled _paint_ticker_line for: %s" % sym
async def do_run_ticker(ranked, client, loop, manage_subs=True,
manage_sigs=True):
"""
Common keys::
"ask", "bid", "last", "open", "volB",
"volQ", "time", "sym", "chg", "chgP"
The value of ``open`` is that of ``last`` from 24 hours ago and is
continuous/"moving". This can't be gotten with the various ``*Candle``
calls because the limit for ``period="M1"`` is 1000, but we'd need 1440.
"""
if manage_sigs:
# Actually unnecessary since existing uses default handler
old_sig_info = remove_async_sig_handlers("SIGINT", loop=loop).pop()
def rt_sig_cb(**kwargs):
kwargs.setdefault("msg", "Received SIGINT, quitting")
out_futs.update(kwargs)
if not all(t.cancelled() for t in tasks):
client.echo("Cancelling tasks")
for task in tasks:
task.cancel()
# Not sure if this can ever run. Thinking is if user sends multiple
# SIGINTs in rapid succession. Tried naive test w. kill util.
# Didn't trigger, but need to verify.
else:
client.echo("Already cancelled: %r" % gathered)
loop.call_later(0.1,
client.echo, "Cancelled tasks: %r" % tasks)
add_async_sig_handlers(old_sig_info, loop=loop)
# No need to partialize since ``gathered``, which ``rt_sig_cb``
# should have closure over once initialized below, will be the same
# object when the trap is sprung
add_async_sig_handlers(("SIGINT", rt_sig_cb), loop=loop)
#
c_fg = client.foreground_256
c_bg = client.background_256
if HAS_24:
if client.foreground_24 is None:
globals()["HAS_24"] = False
else:
c_fg = client.foreground_24
c_bg = client.background_24
#
all_subs = set(ranked)
# Ensure conversion pairs available for all volume units
if VOL_UNIT:
if "USD" not in VOL_UNIT and VOL_UNIT not in client.markets:
# XXX should eventually move this block somewhere else
return {"error": "%r is not a market currency supported by %s" %
(VOL_UNIT, client.exchange)}
if manage_subs:
if VOL_UNIT == "USD" and "USD" not in client.markets:
assert "USDT" in client.markets
globals()["VOL_UNIT"] = "USDT"
all_subs |= await client.get_market_conversion_pairs(VOL_UNIT)
else:
client.echo("The ``VOL_UNIT`` option requires ``manage_subs``", 3)
globals()["VOL_UNIT"] = None
#
# Results to return
out_futs = {}
#
# Abbreviations
cls, clt = client.symbols, client.ticker
#
if manage_subs:
await asyncio.gather(*map(client.subscribe_ticker, all_subs))
max_tries = 3
while max_tries:
if all(s in clt and s in cls for s in ranked):
break
await asyncio.sleep(1)
max_tries -= 1
else:
out_futs["subs"] = await asyncio.gather(
*map(client.unsubscribe_ticker, all_subs)
)
out_futs["error"] = "Problem subscribing to remote service"
return out_futs
#
# TODO determine practicality of using existing volume rankings reaped
# during arg parsing via in ``choose_pairs()``
if VOL_UNIT and VOL_SORTED:
vr = sorted((_convert_volume(client, s, cls[s]["curB"], cls[s]["curQ"],
decimate(clt[s])), s)
for s in ranked)
ranked = [s for v, s in vr]
#
# Arbitrarily assume biggest volume and/or change could grow 10x between
# open/close, so +1 for those.
#
# TODO move all this widths figuring to a separate coro that updates some
# shared location at regular intervals. If max column width is exceeded,
# just lower precision for the offending item. So, if some "change" value
# were to grow from 99.99 to 100.00, make it 100.0 instead.
sep = "/"
volstr = "Vol (%s)" % (VOL_UNIT or "base") + (" " if VOL_UNIT else "")
if VOL_UNIT:
try:
vprec = "USD ETH BTC".split().index(VOL_UNIT)
except ValueError:
vprec = 0 # Covers USDT and corners like BNB, XRP, BCH
# Market (symbol) pairs will be "concatenated" (no intervening padding)
sym_widths = (
# Base
max(len(cls[s]["curB"]) for s in ranked),
# Sep
len(sep),
# Quote (corner case: left-justifying, so need padding)
max(len(cls[s]["curQ"]) for s in ranked)
)
# Can't decide among exchange name, "" (blank), "Pair," and "Product"
widths = (
# 1: Exchange name
max(sum(sym_widths), len(client.exchange)),
# 2: Price
max(len("{:.2f}".format(Dec(clt[s]["last"])) if
"USD" in s else clt[s]["last"]) for s in ranked),
# 3: Volume
max(*(len("{:,.{pc}f}"
.format(_convert_volume(client, s, cls[s]["curB"],
cls[s]["curQ"], decimate(clt[s])),
pc=vprec) if VOL_UNIT else clt[s]["volB"])
for s in ranked), len(volstr)),
# 4: Bid
max(len("{:.2f}".format(Dec(clt[s]["bid"])) if
"USD" in s else clt[s]["bid"]) for s in ranked),
# 5: Ask
max(len("{:.2f}".format(Dec(clt[s]["ask"])) if
"USD" in s else clt[s]["ask"]) for s in ranked),
# 6: Change (should maybe do max++ for breathing room)
max(len("{:+.3f}%".format(
(Dec(clt[s]["last"]) - Dec(clt[s]["open"])) / Dec(clt[s]["open"])
)) for s in ranked),
)
pad = 2
widths = (pad, # <- 0: Left padding
*(l + pad for l in widths),
pad) # <- 7: Right padding
del cls, clt
#
# Die nicely when needed width exceeds what's available
if sum(widths) > os.get_terminal_size().columns:
msg = ("Insufficient terminal width. Need %d more column(s)."
% (sum(widths) - os.get_terminal_size().columns))
out_futs["error"] = msg
if manage_subs:
out_futs["subs"] = await asyncio.gather(
*map(client.unsubscribe_ticker, all_subs)
)
return out_futs
# Format string for actual line items.
fmt_parts = [
"{_beg}{:%d}" % widths[0],
"{_sym}{base}{_sepl}{sep}{_sepr}{quote:<{quote_w}}",
"{_prc}{last:<%df}" % widths[2],
"{_vol}" + ("{volconv:>%d,.%df}%s" %
(widths[3] - pad, vprec, " " * pad) if
VOL_UNIT else "{volB:<%df}" % widths[3]),
"{bid:<%df}" % widths[4],
"{ask:<%df}" % widths[5],
"{_chg}{chg:>+%d.3%%}" % widths[6],
"{:%d}{_end}" % widths[7]
]
fmt = "".join(fmt_parts)
#
_print_heading(client, (c_bg, c_fg), widths, len(ranked), volstr)
#
semaphore = asyncio.Semaphore(1)
snapshots = {}
coros = []
for lnum, sym in enumerate(ranked):
base = client.symbols[sym]["curB"]
quote = client.symbols[sym]["curQ"]
fmt_nudge = (
"".join(
(fmt_parts[n].replace("f}", ".2f}") if n in (1, 4, 5) else
fmt_parts[n] for n in range(len(fmt_parts)))
)
if "USD" in quote and Dec(client.ticker[sym]["last"]) >= Dec(10)
else fmt
).replace("{quote_w}", "%d" % (widths[1] - len(base) - len(sep)))
#
coros.append(_paint_ticker_line(
client, lnum, sym, semaphore, snapshots, fmt_nudge,
(c_bg, c_fg), (base, quote), wait=(0.1 * len(ranked)),
pulse_over=(PULSE_OVER if PULSE else 100.0)
))
# Should conversion pairs (all_subs) be included here if not displayed?
ts_chk = _check_timestamps(all_subs, client, rt_sig_cb, STRICT_TIME)
#
tasks = [asyncio.ensure_future(c) for c in (*coros, ts_chk)]
gathered = asyncio.gather(*tasks)
#
try:
out_futs["gathered"] = await gathered
except Exception as exc:
# Repr of ``Future.exception`` only contains exc name
out_futs["gathered"] = gathered.exception()
from traceback import print_exc, format_exc
if LOGFILE:
print_exc(file=LOGFILE)
elif not isinstance(exc, asyncio.CancelledError):
out_futs["gathered"] = {"error": format_exc()}
finally:
if manage_subs:
client.echo("Unsubscribing", 6)
gunsubs = asyncio.gather(*map(client.unsubscribe_ticker, all_subs))
try:
out_futs["subs"] = await gunsubs
# Catch network/inet errors, etc.
except Exception:
from traceback import print_exc, format_exc
if LOGFILE:
out_futs["subs"] = gunsubs.exception()
print_exc(file=LOGFILE)
else:
tb_str = format_exc()
if "ConnectionClosed" not in tb_str:
out_futs["subs"] = {"error": tb_str}
if manage_sigs:
add_async_sig_handlers(old_sig_info, loop=loop)
return out_futs
async def choose_pairs(client):
"""
If the length of named pairs alone exceeds the terminal height, trim
from the end (rightmost args). Afterwards, reduce NUM leaders, as
required. Print a warning for dropped syms if AUTO_CULL is on,
otherwise raise a ValueError. Note: This will probably have to be
redone when argparse stuff is added.
"""
num = None
syms = []
msg = []
#
if len(sys.argv) == 1:
num = min(MAX_FILL, MAX_HEIGHT)
elif sys.argv[1].isdigit():
num = int(sys.argv[1])
if num == 0: # Don't auto-fill regardless of AUTO_FILL
num = None
syms = sys.argv[2:]
else:
syms = sys.argv[1:]
if AUTO_FILL: # ... till MAX_FILL (or MAX_HEIGHT)
num = 0
#
ranked = []
num_skipped = 0
# Need to preserve order, so can't use set union here
for sym in reversed(syms):
try:
symbol = await client.canonicalize_pair(sym)
except ValueError as e:
# Could use ``warnings.warn`` for stuff like this
msg += ["%r not found, removing..." % sym]
if AUTO_FILL:
num_skipped += 1
else:
if symbol not in ranked:
ranked.append(symbol)
#
if len(ranked) > MAX_HEIGHT:
msg += ["Too many pairs requested for current terminal height. "
"Over by %d." % (len(ranked) - MAX_HEIGHT)]
if not AUTO_CULL:
raise ValueError(msg)
culled = ranked[-1 * (len(ranked) - MAX_HEIGHT):]
ranked = ranked[:-1 * len(culled)]
msg += ["\nAUTO_CULL is on; | |
# Copyright 2016-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import abc
import os
import pytest
import re
import socket
import tempfile
import time
import unittest
from datetime import datetime
import reframe.core.runtime as rt
import reframe.utility.os_ext as os_ext
import unittests.fixtures as fixtures
from reframe.core.environments import Environment
from reframe.core.exceptions import JobError, JobNotStartedError
from reframe.core.launchers.local import LocalLauncher
from reframe.core.launchers.registry import getlauncher
from reframe.core.schedulers import Job
from reframe.core.schedulers.registry import getscheduler
from reframe.core.schedulers.slurm import _SlurmNode, _create_nodes
class _TestJob(abc.ABC):
def setUp(self):
self.workdir = tempfile.mkdtemp(dir='unittests')
self.testjob = Job.create(
self.scheduler, self.launcher,
name='testjob',
workdir=self.workdir,
script_filename=os_ext.mkstemp_path(
dir=self.workdir, suffix='.sh'
),
stdout=os_ext.mkstemp_path(dir=self.workdir, suffix='.out'),
stderr=os_ext.mkstemp_path(dir=self.workdir, suffix='.err'),
)
self.environs = [Environment(name='foo', modules=['testmod_foo'])]
self.pre_run = ['echo prerun']
self.post_run = ['echo postrun']
self.parallel_cmd = 'hostname'
def tearDown(self):
os_ext.rmtree(self.workdir)
def prepare(self):
with rt.module_use('unittests/modules'):
self.testjob.prepare(self.commands, self.environs)
@property
def commands(self):
runcmd = self.launcher.run_command(self.testjob)
return [*self.pre_run,
runcmd + ' ' + self.parallel_cmd,
*self.post_run]
@property
def scheduler(self):
return getscheduler(self.sched_name)()
@property
@abc.abstractmethod
def sched_name(self):
'''Return the registered name of the scheduler.'''
@property
def sched_configured(self):
return True
@property
def launcher(self):
return getlauncher(self.launcher_name)()
@property
@abc.abstractmethod
def launcher_name(self):
'''Return the registered name of the launcher.'''
@abc.abstractmethod
def setup_user(self, msg=None):
'''Configure the test for running with the user supplied job scheduler
configuration or skip it.
'''
partition = fixtures.partition_with_scheduler(self.sched_name)
if partition is None:
msg = msg or "scheduler '%s' not configured" % self.sched_name
pytest.skip(msg)
self.testjob._sched_access = partition.access
def assertScriptSanity(self, script_file):
'''Assert the sanity of the produced script file.'''
with open(self.testjob.script_filename) as fp:
matches = re.findall(r'echo prerun|echo postrun|hostname',
fp.read())
assert ['echo prerun', 'hostname', 'echo postrun'] == matches
def setup_job(self):
# Mock up a job submission
self.testjob.time_limit = '5m'
self.testjob.num_tasks = 16
self.testjob.num_tasks_per_node = 2
self.testjob.num_tasks_per_core = 1
self.testjob.num_tasks_per_socket = 1
self.testjob.num_cpus_per_task = 18
self.testjob.use_smt = True
self.testjob.options = ['--gres=gpu:4',
'#DW jobdw capacity=100GB',
'#DW stage_in source=/foo']
self.testjob._sched_nodelist = 'nid000[00-17]'
self.testjob._sched_exclude_nodelist = 'nid00016'
self.testjob._sched_partition = 'foo'
self.testjob._sched_reservation = 'bar'
self.testjob._sched_account = 'spam'
self.testjob._sched_exclusive_access = True
def test_prepare(self):
self.prepare()
self.assertScriptSanity(self.testjob.script_filename)
@fixtures.switch_to_user_runtime
def test_submit(self):
self.setup_user()
self.prepare()
assert self.testjob.nodelist is None
self.testjob.submit()
assert self.testjob.jobid is not None
self.testjob.wait()
@fixtures.switch_to_user_runtime
def test_submit_timelimit(self, check_elapsed_time=True):
self.setup_user()
self.parallel_cmd = 'sleep 10'
self.testjob.time_limit = '2s'
self.prepare()
t_job = datetime.now()
self.testjob.submit()
assert self.testjob.jobid is not None
self.testjob.wait()
t_job = datetime.now() - t_job
if check_elapsed_time:
assert t_job.total_seconds() >= 2
assert t_job.total_seconds() < 3
with open(self.testjob.stdout) as fp:
assert re.search('postrun', fp.read()) is None
@fixtures.switch_to_user_runtime
def test_cancel(self):
self.setup_user()
self.parallel_cmd = 'sleep 30'
self.prepare()
t_job = datetime.now()
self.testjob.submit()
self.testjob.cancel()
self.testjob.wait()
t_job = datetime.now() - t_job
assert self.testjob.finished()
assert t_job.total_seconds() < 30
def test_cancel_before_submit(self):
self.parallel_cmd = 'sleep 3'
self.prepare()
with pytest.raises(JobNotStartedError):
self.testjob.cancel()
def test_wait_before_submit(self):
self.parallel_cmd = 'sleep 3'
self.prepare()
with pytest.raises(JobNotStartedError):
self.testjob.wait()
@fixtures.switch_to_user_runtime
def test_poll(self):
self.setup_user()
self.parallel_cmd = 'sleep 2'
self.prepare()
self.testjob.submit()
assert not self.testjob.finished()
self.testjob.wait()
def test_poll_before_submit(self):
self.parallel_cmd = 'sleep 3'
self.prepare()
with pytest.raises(JobNotStartedError):
self.testjob.finished()
def test_no_empty_lines_in_preamble(self):
for l in self.testjob.scheduler.emit_preamble(self.testjob):
assert l != ''
def test_guess_num_tasks(self):
self.testjob.num_tasks = 0
with pytest.raises(NotImplementedError):
self.testjob.guess_num_tasks()
class TestLocalJob(_TestJob, unittest.TestCase):
def assertProcessDied(self, pid):
try:
os.kill(pid, 0)
pytest.fail('process %s is still alive' % pid)
except (ProcessLookupError, PermissionError):
pass
@property
def sched_name(self):
return 'local'
@property
def launcher_name(self):
return 'local'
@property
def sched_configured(self):
return True
def setup_user(self, msg=None):
# Local scheduler is by definition available
pass
def test_submit(self):
super().test_submit()
assert 0 == self.testjob.exitcode
assert [socket.gethostname()] == self.testjob.nodelist
def test_submit_timelimit(self):
super().test_submit_timelimit()
assert self.testjob.state == 'TIMEOUT'
def test_cancel_with_grace(self):
# This test emulates a spawned process that ignores the SIGTERM signal
# and also spawns another process:
#
# reframe --- local job script --- sleep 10
# (TERM IGN)
#
# We expect the job not to be cancelled immediately, since it ignores
# the gracious signal we are sending it. However, we expect it to be
# killed immediately after the grace period of 2 seconds expires.
#
# We also check that the additional spawned process is also killed.
self.parallel_cmd = 'sleep 5 &'
self.pre_run = ['trap -- "" TERM']
self.post_run = ['echo $!', 'wait']
self.testjob.time_limit = '1m'
self.testjob.scheduler._cancel_grace_period = 2
self.prepare()
self.testjob.submit()
# Stall a bit here to let the the spawned process start and install its
# signal handler for SIGTERM
time.sleep(1)
t_grace = datetime.now()
self.testjob.cancel()
t_grace = datetime.now() - t_grace
self.testjob.wait()
# Read pid of spawned sleep
with open(self.testjob.stdout) as f:
sleep_pid = int(f.read())
assert t_grace.total_seconds() >= 2
assert t_grace.total_seconds() < 5
assert self.testjob.state == 'TIMEOUT'
# Verify that the spawned sleep is killed, too
self.assertProcessDied(sleep_pid)
def test_cancel_term_ignore(self):
# This test emulates a descendant process of the spawned job that
# ignores the SIGTERM signal:
#
# reframe --- local job script --- sleep_deeply.sh --- sleep
# (TERM IGN)
#
# Since the "local job script" does not ignore SIGTERM, it will be
# terminated immediately after we cancel the job. However, the deeply
# spawned sleep will ignore it. We need to make sure that our
# implementation grants the sleep process a grace period and then
# kills it.
self.pre_run = []
self.post_run = []
self.parallel_cmd = os.path.join(fixtures.TEST_RESOURCES_CHECKS,
'src', 'sleep_deeply.sh')
self.testjob._cancel_grace_period = 2
self.prepare()
self.testjob.submit()
# Stall a bit here to let the the spawned process start and install its
# signal handler for SIGTERM
time.sleep(1)
t_grace = datetime.now()
self.testjob.cancel()
t_grace = datetime.now() - t_grace
self.testjob.wait()
# Read pid of spawned sleep
with open(self.testjob.stdout) as f:
sleep_pid = int(f.read())
assert t_grace.total_seconds() >= 2
assert self.testjob.state == 'TIMEOUT'
# Verify that the spawned sleep is killed, too
self.assertProcessDied(sleep_pid)
def test_guess_num_tasks(self):
# We want to trigger bug #1087 (Github), that's we set allocation
# policy to idle.
self.testjob.num_tasks = 0
self.testjob._sched_flex_alloc_nodes = 'idle'
self.prepare()
self.testjob.submit()
self.testjob.wait()
assert self.testjob.num_tasks == 1
class TestSlurmJob(_TestJob, unittest.TestCase):
@property
def sched_name(self):
return 'slurm'
@property
def launcher_name(self):
return 'local'
@property
def sched_configured(self):
return fixtures.partition_with_scheduler('slurm') is not None
def setup_user(self, msg=None):
super().setup_user(msg='SLURM (with sacct) not configured')
def test_prepare(self):
self.setup_job()
super().test_prepare()
expected_directives = set([
'#SBATCH --job-name="testjob"',
'#SBATCH --time=0:5:0',
'#SBATCH --output=%s' % self.testjob.stdout,
'#SBATCH --error=%s' % self.testjob.stderr,
'#SBATCH --ntasks=%s' % self.testjob.num_tasks,
'#SBATCH --ntasks-per-node=%s' % self.testjob.num_tasks_per_node,
'#SBATCH --ntasks-per-core=%s' % self.testjob.num_tasks_per_core,
('#SBATCH --ntasks-per-socket=%s' %
self.testjob.num_tasks_per_socket),
'#SBATCH --cpus-per-task=%s' % self.testjob.num_cpus_per_task,
'#SBATCH --hint=multithread',
'#SBATCH --nodelist=%s' % self.testjob.sched_nodelist,
'#SBATCH --exclude=%s' % self.testjob.sched_exclude_nodelist,
'#SBATCH --partition=%s' % self.testjob.sched_partition,
'#SBATCH --reservation=%s' % self.testjob.sched_reservation,
'#SBATCH --account=%s' % self.testjob.sched_account,
'#SBATCH --exclusive',
# Custom options and directives
'#SBATCH --gres=gpu:4',
'#DW jobdw capacity=100GB',
'#DW stage_in source=/foo'
])
with open(self.testjob.script_filename) as fp:
found_directives = set(re.findall(r'^\#\w+ .*', fp.read(),
re.MULTILINE))
assert expected_directives == found_directives
def test_prepare_no_exclusive(self):
self.setup_job()
self.testjob._sched_exclusive_access = False
super().test_prepare()
with open(self.testjob.script_filename) as fp:
assert re.search(r'--exclusive', fp.read()) is None
def test_prepare_no_smt(self):
self.setup_job()
self.testjob.use_smt = None
super().test_prepare()
with open(self.testjob.script_filename) as fp:
assert re.search(r'--hint', fp.read()) is None
def test_prepare_with_smt(self):
self.setup_job()
self.testjob.use_smt = True
super().test_prepare()
with open(self.testjob.script_filename) as fp:
assert re.search(r'--hint=multithread', fp.read()) is not None
def test_prepare_without_smt(self):
self.setup_job()
self.testjob.use_smt = False
super().test_prepare()
with open(self.testjob.script_filename) as fp:
assert re.search(r'--hint=nomultithread', fp.read()) is not None
def test_submit(self):
super().test_submit()
assert 0 == self.testjob.exitcode
num_tasks_per_node = self.testjob.num_tasks_per_node or 1
num_nodes = self.testjob.num_tasks // num_tasks_per_node
assert num_nodes == len(self.testjob.nodelist)
def test_submit_timelimit(self):
# Skip this test for Slurm, since we the minimum time limit is 1min
pytest.skip("SLURM's minimum time limit is 60s")
def test_cancel(self):
super().test_cancel()
assert self.testjob.state == 'CANCELLED'
def test_guess_num_tasks(self):
self.testjob.num_tasks = 0
self.testjob._sched_flex_alloc_nodes = 'all'
# Monkey patch `allnodes()` to simulate extraction of
# slurm nodes through the use of `scontrol show`
self.testjob.scheduler.allnodes = lambda: set()
# monkey patch `_get_default_partition()` to simulate extraction
# of the default partition through the use of `scontrol show`
self.testjob.scheduler._get_default_partition = lambda: 'pdef'
assert self.testjob.guess_num_tasks() == 0
def test_submit_job_array(self):
self.testjob.options = ['--array=0-1']
self.parallel_cmd = 'echo "Task id: ${SLURM_ARRAY_TASK_ID}"'
super().test_submit()
assert self.testjob.exitcode == 0
with open(self.testjob.stdout) as fp:
output = fp.read()
assert all([re.search('Task id: 0', output),
re.search('Task id: 1', output)])
class TestSqueueJob(TestSlurmJob):
@property
def sched_name(self):
return 'squeue'
def setup_user(self, msg=None):
partition = (fixtures.partition_with_scheduler(self.sched_name) or
fixtures.partition_with_scheduler('slurm'))
if partition is None:
pytest.skip('SLURM not configured')
self.testjob.options += partition.access
def test_submit(self):
# Squeue backend may not set the exitcode; bypass our parent's submit
_TestJob.test_submit(self)
class TestPbsJob(_TestJob, unittest.TestCase):
@property
def sched_name(self):
return 'pbs'
@property
def launcher_name(self):
return 'local'
@property
def sched_configured(self):
return fixtures.partition_with_scheduler('pbs') is not None
def setup_user(self, msg=None):
super().setup_user(msg='PBS not configured')
def test_prepare(self):
self.setup_job()
self.testjob.options += ['mem=100GB', 'cpu_type=haswell']
super().test_prepare()
num_nodes = self.testjob.num_tasks // self.testjob.num_tasks_per_node
num_cpus_per_node = (self.testjob.num_cpus_per_task *
self.testjob.num_tasks_per_node)
| |
os
import fcntl
from os.path import join as pjoin
import shutil
import subprocess
from glob import glob
from string import Template
from pprint import pformat
import tempfile
import errno
import select
from StringIO import StringIO
import json
from ..hdist_logging import CRITICAL, ERROR, WARNING, INFO, DEBUG
from .common import working_directory
LOG_PIPE_BUFSIZE = 4096
class InvalidJobSpecError(ValueError):
pass
class JobFailedError(RuntimeError):
pass
def run_job(logger, build_store, job_spec, override_env, virtuals, cwd, config):
"""Runs a job in a controlled environment, according to rules documented above.
Parameters
----------
logger : Logger
build_store : BuildStore
BuildStore to find referenced artifacts in.
job_spec : document
See above
override_env : dict
Extra environment variables not present in job_spec, these will be added
last and overwrite existing ones.
virtuals : dict
Maps virtual artifact to real artifact IDs.
cwd : str
The starting working directory of the script. Currently this
cannot be changed (though a ``cd`` command may be implemented in
the future if necesarry)
config : dict
Configuration from :mod:`hashdist.core.config`. This will be
serialied and put into the HDIST_CONFIG environment variable
for use by ``hdist``.
Returns
-------
out_env: dict
The environment with modifications done by "root scope" of
the script (modifications done in nested scopes are intentionally
discarded).
"""
job_spec = canonicalize_job_spec(job_spec)
env = get_imports_env(build_store, virtuals, job_spec['import'])
env.update(job_spec['env'])
env.update(job_spec['env_nohash'])
env.update(override_env)
env['HDIST_VIRTUALS'] = pack_virtuals_envvar(virtuals)
env['HDIST_CONFIG'] = json.dumps(config, separators=(',', ':'))
executor = ScriptExecution(logger)
try:
out_env = executor.run(job_spec['script'], env, cwd)
finally:
executor.close()
return out_env
def canonicalize_job_spec(job_spec):
"""Returns a copy of job_spec with default values filled in.
Also performs a tiny bit of validation.
"""
def canonicalize_import(item):
item = dict(item)
item.setdefault('in_env', True)
if item.setdefault('ref', None) == '':
raise ValueError('Empty ref should be None, not ""')
item['before'] = sorted(item.get('before', []))
return item
result = dict(job_spec)
result['import'] = [
canonicalize_import(item) for item in result.get('import', ())]
result['import'].sort(key=lambda item: item['id'])
result.setdefault("env", {})
result.setdefault("env_nohash", {})
result.setdefault("script", [])
return result
def substitute(x, env):
"""
Substitute environment variable into a string following the rules
documented above.
Raises KeyError if an unreferenced variable is not present in env
(``$$`` always raises KeyError)
"""
if '$$' in x:
# it's the escape character of string.Template, hence the special case
raise KeyError('$$ is not allowed (no variable can be named $): %s' % x)
x = x.replace(r'\$', '$$')
return Template(x).substitute(env)
def get_imports_env(build_store, virtuals, imports):
"""
Sets up environment variables given by the 'import' section
of the job spec (see above).
Parameters
----------
build_store : BuildStore object
Build store to look up artifacts in
virtuals : dict
Maps virtual artifact IDs (including "virtual:" prefix) to concrete
artifact IDs.
imports : list
'import' section of job spec document as documented above.
Returns
-------
env : dict
Environment variables to set containing variables for the dependency
artifacts
"""
# do a topological sort of imports
imports = stable_topological_sort(imports)
env = {}
# Build the environment variables due to imports, and complain if
# any dependency is not built
PATH = []
HDIST_CFLAGS = []
HDIST_LDFLAGS = []
HDIST_IMPORT = []
for dep in imports:
dep_ref = dep['ref']
dep_id = dep['id']
HDIST_IMPORT.append(dep_id)
# Resolutions of virtual imports should be provided by the user
# at the time of build
if dep_id.startswith('virtual:'):
try:
dep_id = virtuals[dep_id]
except KeyError:
raise ValueError('build spec contained a virtual dependency "%s" that was not '
'provided' % dep_id)
dep_dir = build_store.resolve(dep_id)
if dep_dir is None:
raise InvalidJobSpecError('Dependency "%s"="%s" not already built, please build it first' %
(dep_ref, dep_id))
if dep_ref is not None:
env[dep_ref] = dep_dir
env['%s_ID' % dep_ref] = dep_id
if dep['in_env']:
bin_dir = pjoin(dep_dir, 'bin')
if os.path.exists(bin_dir):
PATH.append(bin_dir)
libdirs = glob(pjoin(dep_dir, 'lib*'))
if len(libdirs) == 1:
HDIST_LDFLAGS.append('-L' + libdirs[0])
HDIST_LDFLAGS.append('-Wl,-R,' + libdirs[0])
elif len(libdirs) > 1:
raise InvalidJobSpecError('in_hdist_compiler_paths set for artifact %s with '
'more than one library dir (%r)' % (dep_id, libdirs))
incdir = pjoin(dep_dir, 'include')
if os.path.exists(incdir):
HDIST_CFLAGS.append('-I' + incdir)
env['PATH'] = os.path.pathsep.join(PATH)
env['HDIST_CFLAGS'] = ' '.join(HDIST_CFLAGS)
env['HDIST_LDFLAGS'] = ' '.join(HDIST_LDFLAGS)
env['HDIST_IMPORT'] = ' '.join(HDIST_IMPORT)
return env
def pack_virtuals_envvar(virtuals):
return ';'.join('%s=%s' % tup for tup in sorted(virtuals.items()))
def unpack_virtuals_envvar(x):
if not x:
return {}
else:
return dict(tuple(tup.split('=')) for tup in x.split(';'))
def stable_topological_sort(problem):
"""Topologically sort items with dependencies
The concrete algorithm is to first identify all roots, then
do a DFS. Children are visited in the order they appear in
the input. This ensures that there is a predictable output
for every input. If no constraints are given the output order
is the same as the input order.
The items to sort must be hashable and unique.
Parameters
----------
problem : list of dict(id=..., before=..., ...)
Each object is a dictionary which is preserved to the output.
The `id` key is each objects identity, and the `before` is a list
of ids of objects that a given object must come before in
the ordered output.
Returns
-------
solution : list
The input `problem` in a possibly different order
"""
# record order to use for sorting `before`
id_to_obj = {}
order = {}
for i, obj in enumerate(problem):
if obj['id'] in order:
raise ValueError('%r appears twice in input' % obj['id'])
order[obj['id']] = i
id_to_obj[obj['id']] = obj
# turn into dict-based graph, and find the roots
graph = {}
roots = set(order.keys())
for obj in problem:
graph[obj['id']] = sorted(obj['before'], key=order.__getitem__)
roots.difference_update(obj['before'])
result = []
def dfs(obj_id):
if obj_id not in result:
result.append(obj_id)
for child in graph[obj_id]:
dfs(child)
for obj_id in sorted(roots, key=order.__getitem__):
dfs(obj_id)
# cycles will have been left entirely out at this point
if len(result) != len(problem):
raise ValueError('provided constraints forms a graph with cycles')
return [id_to_obj[obj_id] for obj_id in result]
class ScriptExecution(object):
"""
Class for maintaining state (in particular logging pipes) while
executing script. Note that the environment is passed around as
parameters instead.
Executing :meth:`run` multiple times amounts to executing
different variable scopes (but with same logging pipes set up).
Parameters
----------
logger : Logger
rpc_dir : str
A temporary directory on a local filesystem. Currently used for creating
pipes with the "hdist logpipe" command.
"""
def __init__(self, logger):
self.logger = logger
self.log_fifo_filenames = {}
self.rpc_dir = tempfile.mkdtemp(prefix='hdist-sandbox-')
def close(self):
"""Removes log FIFOs; should always be called when one is done
"""
shutil.rmtree(self.rpc_dir)
def run(self, script, env, cwd):
"""Executes script, given as the 'script' part of the job spec.
Parameters
----------
script : document
The 'script' part of the job spec
env : dict
The starting process environment
cwd : str
Working directory
Returns
-------
out_env : dict
The environment as modified by the script.
"""
env = dict(env)
for script_line in script:
if not isinstance(script_line, list):
raise TypeError("expected a list but got %r: %r" % (type(script_line), script_line))
if len(script_line) == 0:
continue
if isinstance(script_line[0], list):
if any(not isinstance(x, list) for x in script_line):
raise ValueError("mixing list and str at same level in script")
# sub-scope; recurse and discard the modified environment
self.run(script_line, env, cwd)
else:
cmd = script_line[0]
silent = cmd.startswith('@')
if silent:
cmd = cmd[1:]
args = [substitute(x, env) for x in script_line[1:]]
if '=$(' in cmd:
# a=$(command)
varname, cmd = cmd.split('=$(')
if args[-1] != ')':
raise ValueError("opens with $( but no closing ): %r" % script_line)
del args[-1]
cmd = substitute(cmd, env)
stdout = StringIO()
self.run_command([cmd] + args, env, cwd, stdout_to=stdout, silent=silent)
env[varname] = stdout.getvalue().strip()
elif '=' in cmd:
# VAR=value
varname, value = cmd.split('=')
if args:
raise ValueError('assignment takes no extra arguments')
env[varname] = substitute(value, env)
elif '>' in cmd:
# program>out
cmd, stdout_filename = cmd.split('>')
cmd = substitute(cmd, env)
stdout_filename = substitute(stdout_filename, env)
if not os.path.isabs(stdout_filename):
stdout_filename = pjoin(cwd, stdout_filename)
stdout_filename = os.path.realpath(stdout_filename)
if stdout_filename.startswith(self.rpc_dir):
raise NotImplementedError("Cannot currently use stream re-direction to write to "
"a log-pipe (doing the write from a "
"sub-process is OK)")
stdout = file(stdout_filename, 'a')
try:
self.run_command([cmd] + args, env, cwd, stdout_to=stdout, silent=silent)
finally:
stdout.close()
elif cmd == 'cd':
# cd command just affects cwd on this scope
if len(args) != 1:
raise ValueError("wrong number of arguments to cd")
cwd = os.path.realpath(pjoin(cwd, args[0]))
else:
# program
cmd = substitute(cmd, env)
self.run_command([cmd] + args, env, cwd, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.