input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
a namespace
# Apply namespace from nsmap
colonoffset=attrname.find(':')
namespaceuri=namespaces[attrname[:colonoffset]]
tag.attrib["{%s}%s" % (namespaceuri,attrname[(colonoffset+1):])]=value
pass
else :
tag.attrib[attrname]=value
pass
self.modified=True;
# if self.autoflush:
# self.flush()
# pass
pass
def remattr(self,tag,attrname,namespaces=None) :
"""Remove an attribute of an element.
Use namespace prefixes as usual.
tag: The element itself, or path to it from the main tag
attrname: Name of attribute to remove
"""
if isinstance(tag,basestring):
tag=self.find(tag,namespaces=namespaces);
pass
self.element_in_doc(tag)
if namespaces is not None:
# merge namespaces dictionaries
namespaces=dict(list(self.namespaces.items())+list(namespaces.items()))
pass
else :
namespaces=self.namespaces
pass
if ":" in attrname: # colon in attrname means this is in a namespace
# Apply namespace from nsmap
colonoffset=attrname.find(':')
namespaceuri=namespaces[attrname[:colonoffset]]
ETattrname="{%s}%s" % (namespaceuri,attrname[(colonoffset+1):])
if ETattrname in tag.attrib:
del tag.attrib[ETattrname]
pass
else:
raise ValueError("Attempt to remove nonexistent attribute %s from element" % (attrname))
pass
else :
if attrname in tag.attrib:
del tag.attrib[attrname]
pass
else:
raise ValueError("Attempt to remove nonexistent attribute %s from element" % (attrname))
pass
self.modified=True;
# if self.autoflush:
# self.flush()
# pass
pass
def removeelement(self,element):
# Remove the specified element.
self.element_in_doc(element)
provenance.element_to_be_removed(self,element)
element.getparent().remove(element)
pass
def listattrs(self,element,namespaces=None,noprovenance=False):
"""List the attributes of the specified element or path
Use namespace prefixes as usual. Any attributes in unknown
namespaces will be ignored
element: The element itself, or path to it from the main tag,
or None to get attributes of the main tag
namespaces: Additional namespaces for attribute evaluation
returns: list of attribute names with prefixes
"""
if isinstance(element,basestring):
element=self.find(element);
pass
if element is None:
element=self.doc.getroot()
pass
if not noprovenance:
provenance.xmldocelementaccessed(self,element)
pass
self.element_in_doc(element)
nsdict=copy.copy(self.namespaces)
if namespaces is not None:
nsdict.update(namespaces)
pass
reverse_nsdict = { nsurl: nspre for (nspre,nsurl) in nsdict.items() }
attrlist = []
for attrname in element.attrib:
if attrname[0]=='{': # prefixed namespace
closebraceoffset = attrname.find('}')
if attrname[1:closebraceoffset] in reverse_nsdict:
# Found suitable prefix
prefixedattrname = reverse_nsdict[attrname[1:closebraceoffset]] + ":" + attrname[(closebraceoffset+1):]
pass
else:
# Did not find suitable prefix -- ignore this attribute
continue
pass
else:
# No namespace
prefixedattrname = attrname
pass
attrlist.append(prefixedattrname)
pass
return attrlist
def getattr(self,tag,attrname,default=IndexError("Attribute not found"),namespaces=None,noprovenance=False) :
"""Get the attribute of the specified element or path
Use namespace prefixes as usual.
tag: The element itself, or path to it from the main tag,
or None to get attributes of the main tag
attrname: Name of attribute to get
default: Default value of the attribute to return. If
this is not provided, IndexError
will be raised.
namespaces: Additional namespaces for attribute evaluation
"""
if isinstance(tag,basestring):
tag=self.find(tag);
pass
if tag is None:
tag=self.doc.getroot()
pass
if not noprovenance:
provenance.xmldocelementaccessed(self,tag)
pass
self.element_in_doc(tag)
if ":" in attrname: # colon in attrname means this is in a namespace
# Apply namespace from nsmap
colonoffset=attrname.find(':')
namespaceuri=self.nsmap[attrname[:colonoffset]]
fullattrname="{%s}%s" % (namespaceuri,attrname[(colonoffset+1):])
pass
else :
fullattrname=attrname
pass
if fullattrname in tag.attrib:
return tag.attrib[fullattrname]
elif isinstance(default,BaseException):
raise default
else:
return default
pass
def hasattr(self,tag,attrname,noprovenance=False) :
"""Check if the attribute of the specified element or path
exists. Use namespace prefixes as usual.
tag: The element itself, or path to it from the main tag,
or None to reference the main tag
attrname: Name of attribute to check the existance of
"""
if isinstance(tag,basestring):
tag=self.find(tag);
pass
if tag is None:
tag=self.doc.getroot()
pass
if not noprovenance:
provenance.xmldocelementaccessed(self,tag)
pass
self.element_in_doc(tag)
if ":" in attrname: # colon in attrname means this is in a namespace
# Apply namespace from nsmap
colonoffset=attrname.find(':')
namespaceuri=self.nsmap[attrname[:colonoffset]]
return ("{%s}%s" % (namespaceuri,attrname[(colonoffset+1):])) in tag.attrib
else :
return attrname in tag.attrib
pass
def _flush(self,rolock=False,rwlock=False,ok_to_be_unlocked=False) :
"""Flush file to disk, whether or not
any explicit changes have been made.
Creates backups according to num_backups
If rwlock is set it creates a low level read/write lock, leaves
this lock set, and assigns self.lockfd. Does not adjust or pay attention
to lock counts
"""
# print "Flush: self._filename=%s" % (self._filename)
if self.debug:
if self.debug_last_serialized is not None and not self.modified:
debugdoc=self.doc
if debugdoc is None:
debugdoc=self.olddoc
pass
if debugdoc is not None and etree.tostring(self.doc,encoding="utf-8") != self.debug_last_serialized:
raise ValueError("Document content has changed without modified flag being set")
pass
pass
if self.filehref is not None:
if self.readonly:
raise IOError('xmldoc: attempt to flush in readonly mode')
if self.use_locking and self.lockfd < 0 and self.filehref is not None and not(rolock) and not(rwlock) and not(ok_to_be_unlocked):
sys.stderr.write("flush() when not locked!\n")
traceback.print_stack()
pass
lockfd=-1
lockfh=None
if os.name=="posix" and rolock or rwlock :
# This stuff is to support rename-based backups, which we don't do on non-POSIX platforms (NT)
assert(self.lockfd < 0)
#assert(os.name=="posix")
try :
lockfd=os.open(self._filename,os.O_RDONLY)
if rwlock:
self._lock_rw(lockfd) # pass ownership of lockfd
pass
else :
self._lock_ro(lockfd) # pass ownership of lockfd
pass
pass
except OSError:
# can not lock if file does not exist
pass
pass
# Check if we have something to write!
if self.doc is None and ( not(ok_to_be_unlocked) or self.olddoc is None):
raise ValueError("No document available to write!")
# flush changes to disk
(filenamepath,filenamefile)=os.path.split(self._filename)
# save backup first
if os.name=="posix":
for baknum in range(self.num_backups,0,-1):
bakname=os.path.join(filenamepath,"."+filenamefile+(".bak%d" % (baknum)))
nextbakname=os.path.join(filenamepath,"."+filenamefile+(".bak%d" % (baknum+1)))
if baknum==self.num_backups and os.path.exists(bakname):
try :
os.remove(bakname)
pass
except :
(exctype,value)=sys.exc_info()[:2]
sys.stderr.write("%s: %s removing old backup %s\n" % (unicode(exctype.__name__),unicode(value),bakname))
pass
pass
elif os.path.exists(bakname):
try:
os.rename(bakname,nextbakname)
pass
except :
(exctype,value)=sys.exc_info()[:2]
sys.stderr.write("%s: %s renaming old backup %s to %s\n" % (unicode(exctype.__name__),unicode(value),bakname,nextbakname))
pass
pass
pass
bakname=os.path.join(filenamepath,"."+filenamefile+(".bak1"))
if self.num_backups > 0 and os.path.exists(self._filename):
try :
shutil.copyfile(self._filename,bakname);
pass
except :
(exctype,value)=sys.exc_info()[:2]
sys.stderr.write("%s: %s renaming %s to %s to save as backup\n" % (unicode(exctype.__name__),unicode(value),self._filename,bakname))
pass
pass
pass
# put temporary SIGINT handler in place that
# ignores during critical writing code
gotsigints=[0] # Storage for counter of how many SIGINTS we got
def sigintholdoff(signalnum,stackframe): # signal handler
gotsigints[0]+=1
pass
oldsiginthandler=None
try:
oldsiginthandler=signal.signal(signal.SIGINT,sigintholdoff)
pass
except ValueError:
sys.stderr.write("xmldoc _flush() cannot hold off SIGINT for critical output section (not running in main thread?)\n")
pass
if os.name=="nt" and self.lockfd >= 0:
# reuse file handle
OutFH=self.lockfh
OutFH.seek(0)
OutFH.truncate()
reused_fh = True
pass
else:
OutFH=open(self._filename,"wb");
reused_fh = False
pass
if (rolock or rwlock) and lockfd < 0:
# try again to lock
if os.name=="posix":
lockfd=os.dup(OutFH.fileno())
pass
else:
lockfd=OutFH.fileno()
pass
if rwlock:
self._lock_rw(lockfd,OutFH) # pass ownership of dup'd file descriptor
pass
else:
# Shoudn't this be dependent on rolock parameter??
self._lock_ro(lockfd,OutFH) # pass ownership of dup'd file descriptor
pass
pass
if self.doc is None and ok_to_be_unlocked:
# if we are unlocked and we don't have a current document, use olddoc
self.olddoc.write(OutFH,encoding='utf-8',pretty_print=True,xml_declaration=True)
pass
else :
self.doc.write(OutFH,encoding='utf-8',pretty_print=True,xml_declaration=True)
pass
#if os.name=="posix": # Close non-lock copy
if not reused_fh:
OutFH.close();
pass
if self.lockfd >= 0:
# if locked, save mtime, etc.
self.lastfileinfo=fileinfo(self.lockfd)
pass
else:
self.lastfileinfo=None
pass
# put old SIGINT handler back in place
if oldsiginthandler is not None:
try:
signal.signal(signal.SIGINT,oldsiginthandler)
pass
except ValueError:
pass
if gotsigints[0] > 0:
raise KeyboardInterrupt("Deferred during xmldoc write")
if self.debug:
self.debug_last_serialized=etree.tostring(self.doc,encoding="utf-8")
pass
pass
self.modified=False
pass
def __lock_ro(self):
# super-low-level file locking
if os.name=='nt':
hfile=win32file._get_osfhandle(self.lockfd)
flags=0
win32file.LockFileEx(hfile, flags, 0, -0x10000, pwt__overlapped)
pass
else:
fcntl.flock(self.lockfd,fcntl.LOCK_SH)
pass
pass
def _lock_ro(self,fd,fh):
# low-level non recursive file locking
# NOTE: This takes ownership of fd and will close it on unlock
assert(self.lockfd==-1)
self.lockfd=fd
self.lockfh=fh
# !!!*** bug: Should handle receipt of signal
# during flock() call...
# fcntl.flock(self.lockfd,fcntl.LOCK_SH)
self.__lock_ro()
pass
def _lock_convert_rw_to_ro(self):
# !!!*** WARNING ***!!!! non-atomic !!!***
assert(self.lockfd > 0)
#fcntl.flock(self.lockfd,fcntl.LOCK_UN)
self.__unlock()
# Somebody else could modifiy the file right now!!!
# (according to man page we don't actually need to unlock it first
# .... should probably fix this)
# !!!*** bug: Should handle receipt of signal
# during flock() call...
#fcntl.flock(self.lockfd,fcntl.LOCK_SH)
self.__lock_ro()
pass
def __unlock(self):
# super-low-level file locking
if os.name=='nt':
hfile=win32file._get_osfhandle(self.lockfd)
win32file.UnlockFileEx(hfile, 0, -0x10000, pwt__overlapped)
pass
else:
fcntl.flock(self.lockfd,fcntl.LOCK_UN)
pass
pass
def _unlock_ro(self):
# low-level | |
"""Accounts forms module."""
from math import floor
from django import forms
from django.db.models import Q
from django.contrib.auth import forms as auth_forms
from django.contrib.auth import password_validation
from django.contrib.sites.shortcuts import get_current_site
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from django.urls import reverse
from django.utils import timezone
from django.utils.html import escape, html_safe, mark_safe, format_html
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from django.utils.translation import gettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import (
ButtonHolder,
Field,
Fieldset,
HTML,
Button,
Layout,
Submit,
Div,
)
from accounts.models import AreaCode, PhoneNumber, NationalId, Profile, User, reduce_to_alphanum
from accounts.tokens import verify_token_generator, reset_token_generator
from admin_console.models import CityTown, Address
EIGHTEEN_YEARS_AGO = (timezone.now() - timezone.timedelta(days=((365*18)+5))
).date()
EIGHTEEN_YEARS_AGO_STR = EIGHTEEN_YEARS_AGO.strftime('%m/%d/%Y')
class NationalIdForm(forms.ModelForm):
national_id_type = forms.ChoiceField(label=_('ID Type'), required=True,
choices=NationalId.ID_TYPE_CHOICES)
national_id_number = forms.CharField(label=_('ID Number'), required=True)
class RegistrationForm(forms.ModelForm):
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
first_names = forms.CharField(label=_('First names'), required=True)
last_names = forms.CharField(label=_('Last names'), required=True)
email = forms.EmailField(label=_('Email'), required=True)
username = forms.CharField(label=_('Username'), required=True)
accepted_tos = forms.BooleanField(label=_('Accept Terms of Service'),
required=True,)
birth_date = forms.DateField(label=_('Birth date'),
required=True,)
national_id_type = forms.ChoiceField(label=_('ID Type'), required=True,
choices=NationalId.ID_TYPE_CHOICES)
national_id_number = forms.CharField(label=_('ID Number'), required=True)
password1 = forms.CharField(
label=_("Password"),
strip=False,
widget=forms.PasswordInput(attrs={'class': 'form-control'}),
)
password2 = forms.CharField(
label=_("Password confirmation"),
widget=forms.PasswordInput(attrs={'class': 'form-control'}),
strip=False,
)
class Meta:
model = User
fields = (
'first_names',
'last_names',
'email',
'username',
'birth_date',
'national_id_type',
'national_id_number',
'accepted_tos',
'<PASSWORD>',
'<PASSWORD>',
)
def __init__(self, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'id-registration-form'
self.helper.form_class = 'needs-validation'
self.helper.form_method = 'post'
self.helper.form_action = reverse('accounts:register')
self.helper.html5_required = False
self.helper.form_tag = True
self.helper.layout = Layout(
Fieldset(
_('{{ COMPANY_NAME }}: Register'),
Div(
Field('first_names', id='first-names',
wrapper_class='col-sm-12 col-md-6 mb-3'),
Field('last_names', id='last-names',
wrapper_class='col-sm-12 col-md-6 mb-3'),
css_class='form-row'
),
Div(
Field('email', id='email',
wrapper_class='col-sm-12 col-md-6 mb-3'),
Field('username', id='username',
wrapper_class='col-sm-12 col-md-6 mb-3'),
css_class='form-row'
),
Div(
Field(
'birth_date',
placeholder='mm/dd/yyyy',
css_class='datepicker',
wrapper_class='col-sm-12 col-md-3 mb-3'
),
Field(
'national_id_type',
css_class='dropdown',
wrapper_class='col-sm-12 col-md-3 mb-3'
),
Field(
'national_id_number',
placeholder='000-0000000-0',
wrapper_class='col-sm-12 col-md-6 mb-3'
),
css_class='form-row'
),
Div(
Field(
'<PASSWORD>',
id='<PASSWORD>',
data_toggle='tooltip',
data_placement='top',
data_html="true",
title=password_validation.password_validators_help_text_html(),
wrapper_class='col-sm-12 col-md-6 mb-3'
),
Field(
'<PASSWORD>',
id='<PASSWORD>',
data_toggle='tooltip',
data_placement='top',
data_html="true",
title=_('Enter the same password as before, for verification.'),
wrapper_class='col-sm-12 col-md-6 mb-3'
),
css_class='form-row'
),
Div(
Field(
'accepted_tos',
template='prettycheckbox_fill.html',
label=_('I have read and accepted the Terms of Service.'),
wrapper_class='mb-2',
),
css_class='form-row'
),
Div(
ButtonHolder(
Submit('submit', _('Submit'), css_class='button white')
),
css_class='form-row'
),
),
)
# pylint: disable=E1101
# if self._meta.model.USERNAME_FIELD in self.fields:
# self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update({'autofocus': True})
def _post_clean(self):
super()._post_clean()
# Validate the password after self.instance is updated with form data
# by super().
password = self.cleaned_data.get('password2')
if password:
try:
password_validation.validate_password(password, self.instance)
except forms.ValidationError as error:
self.add_error('password2', error)
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
"""
Send a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, 'text/html')
# for testing only
email_message.send()
def get_inactive_users(self, username):
"""Given an username, return matching user(s) who should receive a reset.
This allows subclasses to more easily customize the default policies
that prevent inactive users and users with unusable passwords from
resetting their password.
"""
inactive_users = User.objects.filter(**{
'username__iexact': username,
'is_active': False,
})
return (u for u in inactive_users if u.has_usable_password())
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def clean_national_id_number(self):
natid = reduce_to_alphanum(
self.cleaned_data.get('national_id_number')
)
if NationalId.objects.filter(id_number=natid).exists():
raise forms.ValidationError(
_('This National ID already exists.'),
code='national_id_exists',
)
return natid
def clean_birth_date(self):
# cleaned_data = super(RegistrationForm, self).clean()
birth_date = self.cleaned_data.get('birth_date')
# import pdb; pdb.set_trace()
if settings.ENFORCE_MIN_AGE:
min_age = (
timezone.now() - timezone.timedelta(days=(
365*settings.MINIMUM_AGE_ALLOWED +
floor(settings.MINIMUM_AGE_ALLOWED / 4)
))
).date()
if min_age < birth_date:
raise forms.ValidationError(
_('You must be %(value)s years old to enroll.'),
code='age_restricted',
params={'value': settings.MINIMUM_AGE_ALLOWED},
)
return birth_date
# def clean(self, *args, **kwargs):
def save(self, domain_override=None,
subject_template_name='accounts/registration_subject.txt',
email_template_name='accounts/registration_email.html',
use_https=False, token_generator=verify_token_generator,
from_email=None, request=None, html_email_template_name=None,
extra_email_context=None, commit=True):
"""
Generate a one-use only link for resetting password and send it to the
user.
"""
self.full_clean()
if self.is_valid():
# import pdb; pdb.set_trace()
user = super(RegistrationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
NationalId.objects.create(
id_type=self.cleaned_data['national_id_type'],
id_number=self.cleaned_data['national_id_number'],
user=user,
is_verified=False
)
email = self.cleaned_data['email']
username = self.cleaned_data['username']
for user in self.get_inactive_users(username):
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
context = {
'email': email,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode(),
'user': user,
'token': token_generator.make_token(user),
'protocol': 'https' if use_https else 'http',
**(extra_email_context or {}),
}
self.send_mail(
subject_template_name, email_template_name, context, from_email,
email, html_email_template_name=html_email_template_name,
)
return None
class LoginForm(auth_forms.AuthenticationForm):
"""This class merely modifies the widgets in the Django's
AuthenticationForm"""
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
# pylint: disable=E1101
self.fields['username'].widget.attrs.update({'autofocus': True})
self.helper = FormHelper()
self.helper.form_id = 'id-login-form'
self.helper.form_method = 'post'
self.helper.form_action = reverse('accounts:login')
self.helper.html5_required = False
self.helper.form_tag = True
self.helper.layout = Layout(
Fieldset(
'',
Div(
Field('username', id='username',
wrapper_class='col-sm-12 mb-3'),
Field('password', id='last-names',
wrapper_class='col-sm-12 mb-3'),
HTML(_("""<p><a href="{% url 'accounts:password_reset' %}">Forgot your password?</a></p>""")),
css_class='form-row'
),
Div(
ButtonHolder(
Submit('submit', _('Login'), css_class='btn btn-primary col-sm-12'),
css_class='col-sm-12'
),
css_class='form-row'
),
),
)
class PasswordChangeForm(auth_forms.PasswordChangeForm):
"""This class merely modifies the widgets in the Django's
PasswordChangeForm"""
old_password = forms.CharField(
label=_("Old password"),
strip=False,
widget=forms.PasswordInput(attrs={'class': 'form-control'}),
)
new_password1 = forms.CharField(
label=_("New password"),
widget=forms.PasswordInput(attrs={
'class': 'form-control',
}),
strip=False,
)
new_password2 = forms.CharField(
label=_("New password confirmation"),
widget=forms.PasswordInput(attrs={'class': 'form-control'}),
strip=False,
help_text=_("Enter the same password as before, for verification."),
)
def __init__(self, *args, **kwargs):
super(PasswordChangeForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'id-password-change-form'
self.helper.form_method = 'post'
self.helper.form_action = reverse('accounts:password_change')
self.helper.html5_required = False
self.helper.form_tag = True
self.helper.layout = Layout(
Fieldset(
'',
Div(
Field('old_password', id='old-password',
wrapper_class='col-sm-12 mb-3'),
Field(
'new_password1',
id='new-password',
data_toggle='tooltip',
data_placement='top',
data_html="true",
title=password_validation.password_validators_help_text_html(),
wrapper_class='col-sm-12 mb-3'
),
Field('new_password2', id='new-password-confirm',
wrapper_class='col-sm-12 mb-3'),
css_class='form-row'
),
Div(
ButtonHolder(
Submit('submit', _('Submit'), css_class='btn btn-primary col-sm-12'),
Button('cancel', _('Cancel'), css_class='btn btn-default col-sm-12 mt-1'),
css_class='col-sm-12'
),
css_class='form-row'
),
),
)
class PasswordResetForm(forms.Form):
"""Own implementation of django's PasswordResetForm."""
email_or_username = forms.CharField(label=_('Email or username'),
max_length=254)
def __init__(self, *args, **kwargs):
super(PasswordResetForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'id-password-reset-form'
self.helper.form_method = 'post'
self.helper.form_action = reverse('accounts:password_reset')
self.helper.html5_required = False
self.helper.form_tag = True
self.helper.layout = Layout(
Fieldset(
'',
Div(
Field('email_or_username', id='email',
wrapper_class='col-sm-12 mb-3'),
css_class='form-row'
),
Div(
ButtonHolder(
Submit('submit', _('Submit'), css_class='btn btn-primary col-sm-12'),
css_class='col-sm-12'
),
css_class='form-row'
),
),
)
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
"""
Send a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, 'text/html')
email_message.send()
def get_active_users(self, email_or_username):
"""Given an email or username, return matching user(s) who should
receive a reset.
"""
active_users = User.objects.filter(
Q(username=email_or_username) |
Q(email=email_or_username),
Q(is_active=True)
)
return [u for u in active_users if u.has_usable_password()]
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=reset_token_generator,
from_email=None, request=None, html_email_template_name=None,
extra_email_context=None):
"""
Generate a one-use only link for resetting password and send it to the
user.
"""
self.full_clean()
if self.is_valid():
email_or_username = self.cleaned_data["email_or_username"]
for user in self.get_active_users(email_or_username):
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
context = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode(),
'user': user,
'token': token_generator.make_token(user),
'protocol': 'https' if use_https else 'http',
**(extra_email_context or {}),
}
self.send_mail(
subject_template_name, email_template_name, context, from_email,
user.email, html_email_template_name=html_email_template_name,
)
return None
class PasswordSetForm(auth_forms.SetPasswordForm):
"""
This class merely modifies the widgets in the Django's
SetPasswordForm. Used after the user follows the link sent after
the password reset process is initiated.
"""
new_password1 = forms.CharField(
label=_("New password"),
widget=forms.PasswordInput(attrs={
'class': 'form-control',
}),
strip=False,
)
def __init__(self, *args, **kwargs):
super(PasswordSetForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'id-password-set-form'
self.helper.form_method = 'post'
self.helper.html5_required = False
self.helper.form_tag = True
self.helper.layout = Layout(
Fieldset(
'',
Div(
Field(
'new_password1',
id='new-password',
data_toggle='tooltip',
data_placement='bottom',
data_html="true",
title=password_validation.password_validators_help_text_html(),
wrapper_class='col-sm-12 mb-3'
),
Field('new_password2', id='new-password-confirm',
wrapper_class='col-sm-12 mb-3'),
css_class='form-row'
),
Div(
ButtonHolder(
Submit('submit', _('Change my password'), css_class='btn btn-primary col-sm-6'),
css_class='col-sm-12'
),
css_class='form-row'
),
),
)
# class ConfirmPasswordForm(forms.Form):
# password = forms.CharField(
# label=_("Password"),
# strip=False,
# widget=forms.PasswordInput(attrs={'class': 'form-control'}),
# )
# def __init__(self, *args, **kwargs):
# super(ConfirmPasswordForm, self).__init__(*args, **kwargs)
# self.helper = FormHelper()
# self.helper.form_id = 'id-password-confirm-form'
# self.helper.form_method = 'post'
# self.helper.html5_required = False
# self.helper.form_tag = True
# self.helper.layout = Layout(
# Fieldset(
# HTML("user: {{user.username}}"),
# Div(
# Field('username', id='username',
# wrapper_class='col-sm-12 mb-3'),
# Field('password', id='password',
# wrapper_class='col-sm-12 mb-3'),
# css_class='form-row'
# | |
# Copyright 2004-2008 <NAME>.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""
This file contains indexing suite v2 code
"""
file_name = "indexing_suite/algorithms.hpp"
code = """// Header file algorithms.hpp
//
// Uniform interface layer for all containers.
//
// Copyright (c) 2003 <NAME>
//
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy
// at http://www.boost.org/LICENSE_1_0.txt)
//
// History
// =======
// 2003/ 9/11 rmg File creation from suite_utils.hpp
// 2003/10/28 rmg Split container-specific versions into separate headers
// 2006/10/25 Roman Adding keys function to assoc_algorithms class
// 2008/12/08 Roman Change indexing suite layout
//
// $Id: algorithms.hpp,v 1.1.2.15 2004/02/08 18:57:42 raoulgough Exp $
//
#ifndef BOOST_PYTHON_INDEXING_ALGORITHMS_HPP
#define BOOST_PYTHON_INDEXING_ALGORITHMS_HPP
#include <indexing_suite/suite_utils.hpp>
#include <boost/type_traits.hpp>
#include <boost/python/errors.hpp>
#include <indexing_suite/int_slice_helper.hpp>
#include <indexing_suite/slice.hpp>
#include <boost/mpl/if.hpp>
#include <boost/limits.hpp>
#include <algorithm>
#include <functional>
#include <stdexcept>
#include <string>
#include <set>
namespace boost { namespace python { namespace indexing {
template<typename ContainerTraits, typename Ovr = detail::no_override>
class default_algorithms
{
typedef default_algorithms<ContainerTraits, Ovr> self_type;
typedef typename detail::maybe_override<self_type, Ovr>
::type most_derived;
public:
typedef ContainerTraits container_traits;
// Import typedefs from the container_traits for convenience
typedef typename ContainerTraits::container container;
typedef typename ContainerTraits::iterator iterator;
typedef typename ContainerTraits::const_iterator const_iterator;
typedef typename ContainerTraits::reference reference;
typedef typename ContainerTraits::size_type size_type;
typedef typename ContainerTraits::value_type value_type;
typedef typename ContainerTraits::value_param value_param;
typedef typename ContainerTraits::index_param index_param;
typedef typename ContainerTraits::key_param key_param;
// Defer selection of supported_methods to the ContainerTraits
// template argument. This makes sense because default_algorithms
// derives all of its other information from this argument, and
// can't decide which of the static member functions will
// instantiate successfully for the container. Obviously a
// custom-written Algorithms implementation could choose to
// provide the supported_methods directly.
BOOST_STATIC_CONSTANT(
method_set_type,
supported_methods = ContainerTraits::supported_methods);
static size_type size (container &);
static iterator find (container &, key_param);
static size_type get_index (container &, key_param);
static size_type count (container &, key_param);
static bool contains (container &, key_param);
static void reverse (container &);
static reference get (container &, index_param);
static void assign (container &, index_param, value_param);
static void insert (container &, index_param, value_param);
static void erase_one (container &, index_param);
static void erase_range(container &, index_param, index_param);
static void push_back (container &, value_param);
static void sort (container &);
// static void sort (container &, PyObject *);
static bool equal (container &, const boost::python::object & obj);
static iterator begin (container &c) { return c.begin(); }
static iterator end (container &c) { return c.end(); }
static const_iterator begin (const container &c) { return c.begin(); }
static const_iterator end (const container &c) { return c.end(); }
// Reasonable defaults for slice handling
typedef int_slice_helper<self_type, integer_slice> slice_helper;
static slice_helper make_slice_helper (container &c, slice const &);
// Default visit_container_class
template<typename PythonClass, typename Policy>
static void visit_container_class(
PythonClass &pyClass, Policy const &policy)
{
container_traits::visit_container_class (pyClass, policy);
}
#if BOOST_WORKAROUND(BOOST_MSVC, <= 1300)
// MSVC6 and 7.0 seem to complain about most_derived::bounds_check
// for an instantiation of list_algorithms.
public:
#else
private:
#endif
static size_type bounds_check(
container &, index_param, char const *msg,
bool one_past = false,
bool truncate = false);
// Throws std::out_of_range if necessary. If one_past is set, then
// indexes up to container.size() *inclusive* are allowed. If
// truncate is set, then out of bounds values are reset to the
// nearest in-bound value (and if none exists, throws an
// exception). If truncate is *not* set, then negative values index
// from the upper bound backwards and are bounds-checked.
};
/////////////////////////////////////////////////////////////////////////
// Base class for associative containers
/////////////////////////////////////////////////////////////////////////
template<typename ContainerTraits, typename Ovr = detail::no_override>
class assoc_algorithms
: public default_algorithms
<ContainerTraits,
BOOST_DEDUCED_TYPENAME detail::maybe_override
<assoc_algorithms<ContainerTraits, Ovr>, Ovr>
::type>
{
typedef assoc_algorithms<ContainerTraits, Ovr> self_type;
typedef typename detail::maybe_override<self_type, Ovr>
::type most_derived;
typedef default_algorithms<ContainerTraits, most_derived> Parent;
public:
typedef typename Parent::iterator iterator;
typedef typename Parent::size_type size_type;
typedef typename Parent::container container;
typedef typename Parent::reference reference;
typedef typename Parent::key_param key_param;
typedef typename Parent::value_param value_param;
typedef typename Parent::index_param index_param;
static reference get (container &, index_param);
// Use member functions for the following (hiding base class versions)
static void erase_one (container &, key_param);
static iterator find (container &, key_param);
static size_type count (container &, key_param);
static bool contains (container &, key_param);
// Default visit_container_class
template<typename PythonClass, typename Policy>
static void visit_container_class( PythonClass &pyClass, Policy const &policy)
{
ContainerTraits::visit_container_class (pyClass, policy);
}
protected:
static iterator find_or_throw (container &, index_param);
};
/////////////////////////////////////////////////////////////////////////
// Get the size of a container
/////////////////////////////////////////////////////////////////////////
template<typename ContainerTraits, typename Ovr>
BOOST_DEDUCED_TYPENAME default_algorithms<ContainerTraits, Ovr>::size_type
default_algorithms<ContainerTraits, Ovr>::size (container &c)
{
return c.size();
}
/////////////////////////////////////////////////////////////////////////
// Range check an index and throw out_of_range if necessary
/////////////////////////////////////////////////////////////////////////
template<typename ContainerTraits, typename Ovr>
BOOST_DEDUCED_TYPENAME default_algorithms<ContainerTraits, Ovr>::size_type
default_algorithms<ContainerTraits, Ovr>::bounds_check(
container &c,
index_param ix,
char const *msg,
bool one_past,
bool truncate)
{
size_type bound = most_derived::size(c) + (one_past ? 1 : 0);
size_type result;
if (truncate)
{
if (ix < 0)
{
result = 0;
}
else
{
result = ix;
if ((result >= bound) && (bound > 0))
{
result = bound - 1;
}
}
}
else if (ix < 0)
{
if (size_type(-ix) > bound)
{
throw std::out_of_range (msg);
}
result = bound + ix;
}
else
{
result = ix;
}
if (result >= bound)
{
throw std::out_of_range (msg);
}
return result;
}
/////////////////////////////////////////////////////////////////////////
// Find an element in a container (std algorithm version)
/////////////////////////////////////////////////////////////////////////
template<typename ContainerTraits, typename Ovr>
BOOST_DEDUCED_TYPENAME default_algorithms<ContainerTraits, Ovr>::iterator
default_algorithms<ContainerTraits, Ovr>::find(
container &c, key_param key)
{
typedef typename container_traits::value_traits_type vtraits;
typedef typename vtraits::equal_to comparison;
return std::find_if(
most_derived::begin(c),
most_derived::end(c),
std::bind1st (comparison(), key));
}
/////////////////////////////////////////////////////////////////////////
// Find an element and return its index (std algorithm version)
/////////////////////////////////////////////////////////////////////////
template<typename ContainerTraits, typename Ovr>
BOOST_DEDUCED_TYPENAME default_algorithms<ContainerTraits, Ovr>::size_type
default_algorithms<ContainerTraits, Ovr>::get_index(
container &c, key_param key)
{
iterator found (most_derived::find (c, key));
if (found == most_derived::end(c))
{
PyErr_SetString(
PyExc_ValueError, "get_index: element not found");
boost::python::throw_error_already_set ();
}
iterator start (most_derived::begin (c));
return std::distance (start, found);
}
/////////////////////////////////////////////////////////////////////////
// Count occurances of an element in a container (std algorithm version)
/////////////////////////////////////////////////////////////////////////
template<typename ContainerTraits, typename Ovr>
BOOST_DEDUCED_TYPENAME default_algorithms<ContainerTraits, Ovr>::size_type
default_algorithms<ContainerTraits, Ovr>::count(
container &c, key_param key)
{
typedef typename container_traits::value_traits_type vtraits;
typedef typename vtraits::equal_to comparison;
return std::count_if(
most_derived::begin(c),
most_derived::end(c),
std::bind1st (comparison(), key));
}
/////////////////////////////////////////////////////////////////////////
// Check whether a container contains the given element (std algo ver)
/////////////////////////////////////////////////////////////////////////
template<typename ContainerTraits, typename Ovr>
bool
default_algorithms<ContainerTraits, Ovr>::contains(
container &c, key_param key)
{
return most_derived::find (c, key) != most_derived::end(c);
}
/////////////////////////////////////////////////////////////////////////
// Index into a container (generic version)
/////////////////////////////////////////////////////////////////////////
template<typename ContainerTraits, typename Ovr>
BOOST_DEDUCED_TYPENAME default_algorithms<ContainerTraits, Ovr>::reference
default_algorithms<ContainerTraits, Ovr>::get(
container &c, index_param ix)
{
return c[most_derived::bounds_check (c, ix, "get")];
}
/////////////////////////////////////////////////////////////////////////
// Assign a value at a particular index (generic version)
/////////////////////////////////////////////////////////////////////////
template<typename ContainerTraits, typename Ovr>
void
default_algorithms<ContainerTraits, Ovr>::assign(
container &c, index_param ix, value_param val)
{
c[most_derived::bounds_check (c, ix, "assign")] = val;
}
/////////////////////////////////////////////////////////////////////////
// Insert at end of a container (generic version)
/////////////////////////////////////////////////////////////////////////
template<typename ContainerTraits, typename Ovr>
void
default_algorithms<ContainerTraits, Ovr>::push_back(
container &c, value_param v)
{
c.push_back (v);
}
/////////////////////////////////////////////////////////////////////////
// Insert at an index in the container (generic version)
/////////////////////////////////////////////////////////////////////////
template<typename ContainerTraits, typename Ovr>
void
default_algorithms<ContainerTraits, Ovr>::insert(
container &c, index_param i, value_param v)
{
iterator insert_pos (most_derived::begin(c));
// Index may range up to c.size() inclusive to allow inserting at end
std::advance(
insert_pos, most_derived::bounds_check (c, i, "insert", true, true));
c.insert (insert_pos, v);
}
/////////////////////////////////////////////////////////////////////////
// Erase between given indexes in the container (generic version)
/////////////////////////////////////////////////////////////////////////
template<typename ContainerTraits, typename Ovr>
void
default_algorithms<ContainerTraits, Ovr>::erase_range(
container &c, index_param from, index_param to)
{
iterator start (most_derived::begin(c));
iterator finish (most_derived::begin(c));
// Start index must be properly in bounds
std::advance
(start, most_derived::bounds_check (c, from, "erase_range (from)"));
// End index is one-past-the-end, so may range up to c.size() inclusive
std::advance
(finish, most_derived::bounds_check (c, to, "erase_range (to)", true));
c.erase (start, finish);
}
/////////////////////////////////////////////////////////////////////////
// Erase one element at the given index in the container (generic version)
/////////////////////////////////////////////////////////////////////////
template<typename ContainerTraits, typename Ovr>
void
default_algorithms<ContainerTraits, Ovr>::erase_one(
container &c, index_param ix)
{
iterator iter (most_derived::begin(c));
std::advance (iter, most_derived::bounds_check (c, ix, "erase_one"));
c.erase (iter);
}
/////////////////////////////////////////////////////////////////////////
// Reverse the contents of a container (std algorithm version)
/////////////////////////////////////////////////////////////////////////
template<typename ContainerTraits, typename Ovr>
void default_algorithms<ContainerTraits, Ovr>::reverse (container &c)
{
std::reverse (most_derived::begin(c), most_derived::end(c));
}
/////////////////////////////////////////////////////////////////////////
// Sort the contents of a container (std algorithm version)
/////////////////////////////////////////////////////////////////////////
template<typename ContainerTraits, typename Ovr>
void default_algorithms<ContainerTraits, Ovr>::sort (container &c)
{
typedef | |
is not fed.
shape: A `tf.TensorShape` or list of `ints`.
The (possibly partial) shape of the tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "PlaceholderWithDefault", name,
tld.op_callbacks, input, "shape", shape)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return placeholder_with_default_eager_fallback(
input, shape=shape, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
shape = _execute.make_shape(shape, "shape")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"PlaceholderWithDefault", input=input, shape=shape, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("dtype", _op._get_attr_type("dtype"), "shape",
_op.get_attr("shape"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"PlaceholderWithDefault", _inputs_flat, _attrs, _result)
_result, = _result
return _result
PlaceholderWithDefault = tf_export("raw_ops.PlaceholderWithDefault")(_ops.to_raw_op(placeholder_with_default))
def placeholder_with_default_eager_fallback(input, shape, name, ctx):
shape = _execute.make_shape(shape, "shape")
_attr_dtype, (input,) = _execute.args_to_matching_eager([input], ctx)
_inputs_flat = [input]
_attrs = ("dtype", _attr_dtype, "shape", shape)
_result = _execute.execute(b"PlaceholderWithDefault", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"PlaceholderWithDefault", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def prevent_gradient(input, message="", name=None):
r"""An identity op that triggers an error if a gradient is requested.
When executed in a graph, this op outputs its input tensor as-is.
When building ops to compute gradients, the TensorFlow gradient system
will return an error when trying to lookup the gradient of this op,
because no gradient must ever be registered for this function. This
op exists to prevent subtle bugs from silently returning unimplemented
gradients in some corner cases.
Args:
input: A `Tensor`. any tensor.
message: An optional `string`. Defaults to `""`.
Will be printed in the error when anyone tries to differentiate
this operation.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "PreventGradient", name,
tld.op_callbacks, input, "message", message)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return prevent_gradient_eager_fallback(
input, message=message, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if message is None:
message = ""
message = _execute.make_str(message, "message")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"PreventGradient", input=input, message=message, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "message",
_op.get_attr("message"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"PreventGradient", _inputs_flat, _attrs, _result)
_result, = _result
return _result
PreventGradient = tf_export("raw_ops.PreventGradient")(_ops.to_raw_op(prevent_gradient))
def prevent_gradient_eager_fallback(input, message, name, ctx):
if message is None:
message = ""
message = _execute.make_str(message, "message")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "message", message)
_result = _execute.execute(b"PreventGradient", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"PreventGradient", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def quantize_and_dequantize(input, signed_input=True, num_bits=8, range_given=False, input_min=0, input_max=0, name=None):
r"""Use QuantizeAndDequantizeV2 instead.
Args:
input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
signed_input: An optional `bool`. Defaults to `True`.
num_bits: An optional `int`. Defaults to `8`.
range_given: An optional `bool`. Defaults to `False`.
input_min: An optional `float`. Defaults to `0`.
input_max: An optional `float`. Defaults to `0`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "QuantizeAndDequantize", name,
tld.op_callbacks, input, "signed_input", signed_input, "num_bits",
num_bits, "range_given", range_given, "input_min", input_min,
"input_max", input_max)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantize_and_dequantize_eager_fallback(
input, signed_input=signed_input, num_bits=num_bits,
range_given=range_given, input_min=input_min, input_max=input_max,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if signed_input is None:
signed_input = True
signed_input = _execute.make_bool(signed_input, "signed_input")
if num_bits is None:
num_bits = 8
num_bits = _execute.make_int(num_bits, "num_bits")
if range_given is None:
range_given = False
range_given = _execute.make_bool(range_given, "range_given")
if input_min is None:
input_min = 0
input_min = _execute.make_float(input_min, "input_min")
if input_max is None:
input_max = 0
input_max = _execute.make_float(input_max, "input_max")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizeAndDequantize", input=input, signed_input=signed_input,
num_bits=num_bits, range_given=range_given,
input_min=input_min, input_max=input_max,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("signed_input", _op._get_attr_bool("signed_input"), "num_bits",
_op._get_attr_int("num_bits"), "range_given",
_op._get_attr_bool("range_given"), "input_min",
_op.get_attr("input_min"), "input_max",
_op.get_attr("input_max"), "T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizeAndDequantize", _inputs_flat, _attrs, _result)
_result, = _result
return _result
QuantizeAndDequantize = tf_export("raw_ops.QuantizeAndDequantize")(_ops.to_raw_op(quantize_and_dequantize))
def quantize_and_dequantize_eager_fallback(input, signed_input, num_bits, range_given, input_min, input_max, name, ctx):
if signed_input is None:
signed_input = True
signed_input = _execute.make_bool(signed_input, "signed_input")
if num_bits is None:
num_bits = 8
num_bits = _execute.make_int(num_bits, "num_bits")
if range_given is None:
range_given = False
range_given = _execute.make_bool(range_given, "range_given")
if input_min is None:
input_min = 0
input_min = _execute.make_float(input_min, "input_min")
if input_max is None:
input_max = 0
input_max = _execute.make_float(input_max, "input_max")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
_inputs_flat = [input]
_attrs = ("signed_input", signed_input, "num_bits", num_bits, "range_given",
range_given, "input_min", input_min, "input_max", input_max, "T", _attr_T)
_result = _execute.execute(b"QuantizeAndDequantize", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizeAndDequantize", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def quantize_and_dequantize_v2(input, input_min, input_max, signed_input=True, num_bits=8, range_given=False, round_mode="HALF_TO_EVEN", narrow_range=False, axis=-1, name=None):
r"""Quantizes then dequantizes a tensor.
This op simulates the precision loss from the quantized forward pass by:
1. Quantizing the tensor to fixed point numbers, which should match the target
quantization method when it is used in inference.
2. Dequantizing it back to floating point numbers for the following ops, most
likely matmul.
There are different ways to quantize. This version uses only scaling, so 0.0
maps to 0.
From the specified 'num_bits' in the quantized output type, it determines
minimum and maximum representable quantized values.
e.g.
* [-128, 127] for signed, num_bits = 8, or
* [0, 255] for unsigned, num_bits = 8.
If range_given == False, the initial input_min, input_max will be determined
automatically as the minimum and maximum values in the input tensor, otherwise
the specified values of input_min, input_max are used.
Note: If the input_min, input_max are specified, they do not need to equal the
actual minimum and maximum values in the tensor. e.g. in some cases it may be
beneficial to specify these values such that the low probability extremes of the
input distribution are clipped.
This op determines the maximum scale_factor that would map the initial
[input_min, input_max] range to a range that lies within the representable
quantized range.
It determines the scale from one of input_min and input_max, then updates the
other one to maximize the representable range.
e.g.
* if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it
would update input_max to be 127 / 12.8 = 9.921875
* if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it
would update input_min to be 128.0 / 12.7 = -10.07874
* if the output is unsigned, input_min is forced to be 0, and only the
specified input_max is used.
After determining the scale_factor and updating the input range, it applies the
following to each value in the 'input' tensor.
output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor.
The above round function rounds the value based on the given round_mode.
Args:
input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
Tensor to quantize and then dequantize.
input_min: A `Tensor`. Must have the same type as `input`.
If `range_given == True`, this specifies the minimum input value that needs to
be represented, otherwise it is determined from the min value of the `input`
tensor.
input_max: A `Tensor`. Must have the same type as `input`.
If `range_given == True`, this specifies the maximum input value that needs to
be represented, otherwise it is determined from the max value of the `input`
tensor.
signed_input: An optional `bool`. Defaults to `True`.
Whether the quantization is signed | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
A Gherkin parser written using pyparsing.
"""
import os
from collections import OrderedDict
from copy import copy
from gherkin.dialect import Dialect
from gherkin.errors import ParserError
from gherkin.parser import Parser
from gherkin.token_matcher import TokenMatcher
from aloe import strings
from aloe.exceptions import AloeSyntaxError
from aloe.utils import memoizedproperty
# Pylint can't figure out methods vs. properties and which classes are
# abstract
# pylint:disable=abstract-method
class LanguageTokenMatcher(TokenMatcher):
"""Gherkin 3 token matcher that always uses the given language."""
def __init__(self, dialect_name="en"):
self.actual_dialect_name = dialect_name
super().__init__(dialect_name=dialect_name)
def _change_dialect(self, dialect_name, location=None):
"""Force the dialect name given in the constructor."""
super()._change_dialect(self.actual_dialect_name, location=location)
def cell_values(row):
"""Extract cell values from a table header or row."""
return tuple(cell["value"] for cell in row["cells"])
class Node(object):
"""
A base parse node.
"""
def __init__(self, parsed, filename=None):
"""Construct the node from Gherkin parse results."""
self.line = parsed["location"]["line"]
self.col = parsed["location"]["column"]
self.filename = filename
@property
def feature(self):
"""The feature this node ultimately belongs to."""
raise NotImplementedError
@property
def location(self):
"""Location as 'filename:line'"""
return "{filename}:{line}".format(filename=os.path.relpath(self.filename), line=self.line)
@property
def text(self):
"""The text for this node."""
raise NotImplementedError
indent = 0 # The indent to use when printing the node
def represented(self):
"""A representation of the node."""
result = " " * self.indent + self.text.strip()
return result
class Step(Node):
"""
A single statement within a test.
A :class:`Scenario` or :class:`Background` is composed of multiple
:class:`Step`.
"""
table = None
"""
A Gherkin table as an iterable of rows, themselves iterables of cells.
e.g.:
.. code-block:: gherkin
Then I have fruit:
| apples | oranges |
| 0 | 2 |
Becomes:
.. code-block:: python
(('apples', 'oranges'), ('0', '2'))
"""
multiline = None
"""
A Gherkin multiline string with the appropriate indenting removed.
.. code-block:: gherkin
Then I have poem:
\"\"\"
Glittering-Minded deathless Aphrodite,
I beg you, Zeus’s daughter, weaver of snares,
Don’t shatter my heart with fierce
Pain, goddess,
\"\"\"
"""
outline = None
"""
If this step is a part of an outline, the reference to the outline.
"""
def __init__(self, parsed, background=None, scenario=None, **kwargs):
super().__init__(parsed, **kwargs)
if background:
self.background = background
elif scenario:
self.scenario = scenario
else:
raise ValueError("Step must belong to either a scenario or a background.")
self.sentence = parsed["keyword"] + parsed["text"]
"""The sentence parsed for this step."""
try:
argument_type = parsed["argument"]["type"]
except KeyError:
argument_type = None
if argument_type == "DataTable":
self.table = tuple(cell_values(row) for row in parsed["argument"]["rows"])
elif argument_type == "DocString":
self.multiline = parsed["argument"]["content"]
@property
def text(self):
return self.sentence
def __str__(self):
return '<Step: "%s">' % self.sentence
def __repr__(self):
return str(self)
@property
def container(self):
"""The background or scenario that contains this step."""
try:
return self.background
except AttributeError:
return self.scenario
def parse_steps_from_string(self, string, **kwargs):
"""
Parse a number of steps, returns an iterable of :class:`Step`.
This is used by :func:`step.behave_as`.
"""
try:
self.scenario # pylint:disable=pointless-statement
container_text = "%s: scenario" % self.feature.dialect.scenario_keywords[0]
is_scenario = True
except AttributeError:
container_text = "%s: " % self.feature.dialect.background_keywords[0]
is_scenario = False
# Gherkin can't parse anything other than complete features
feature_string = """
# language: {feature.language}
{feature.keyword}: feature
{container_text}
{string}
""".format(
container_text=container_text, feature=self.feature, string=string
)
feature = self.feature.parse(string=feature_string, filename=self.filename)
if is_scenario:
return feature.scenarios[0].steps
else:
return feature.background.steps
@property
def feature(self):
"""
The :class:`Feature` this step is a part of.
"""
return self.container.feature
@memoizedproperty
def keys(self):
"""
Return the first row of a table if this statement contains one.
"""
if self.table:
return tuple(self.table[0])
else:
return ()
@memoizedproperty
def hashes(self):
"""
Return the table attached to the step as an iterable of hashes, where
the first row - the column headings - supplies keys for all the others.
e.g.:
.. code-block:: gherkin
Then I have fruit:
| apples | oranges |
| 0 | 2 |
Becomes:
.. code-block:: python
({
'apples': '0',
'oranges': '2',
},)
"""
if not self.table:
return ()
keys = self.keys
return tuple(dict(zip(keys, row)) for row in self.table[1:])
@memoizedproperty
def max_length(self):
"""
The max length of the feature, description and child blocks
"""
return max(
0,
strings.get_terminal_width(self.represented(table=False, multiline=False)),
*[strings.get_terminal_width(line) for line in self.represent_table().splitlines()]
)
indent = 4
# pylint:disable=arguments-differ
def represented(self, table=True, multiline=True, color=str):
"""
Render the line.
"""
lines = [color(super().represented())]
if table and self.table:
lines.append(self.represent_table(cell_wrap=color))
if multiline and self.multiline:
lines.append(self.represent_multiline(string_wrap=color))
return "\n".join(lines)
# pylint:enable=arguments-differ
def represent_table(self, **kwargs):
"""
Render the table.
:param cell_wrap: color to use inside the table cells
"""
return strings.represent_table(self.table, indent=self.indent + 2, **kwargs)
def represent_multiline(self, string_wrap=str):
"""
Render the multiline.
:param string_wrap: color to use inside the string
"""
indent = self.indent + 2
lines = [" " * indent + '"""']
lines += [" " * indent + string_wrap(line) for line in self.multiline.splitlines()]
lines += [" " * indent + '"""']
return "\n".join(lines)
def resolve_substitutions(self, outline):
"""
Creates a copy of the step with any <variables> resolved.
"""
replaced = copy(self)
def replace_vars(string):
"""Replace all the variables in a string."""
for key, value in outline.items():
key = "<{key}>".format(key=key)
string = string.replace(key, value)
return string
replaced.sentence = replace_vars(self.sentence)
if self.multiline:
replaced.multiline = replace_vars(self.multiline)
if self.table:
replaced.table = tuple(tuple(replace_vars(cell) for cell in row) for row in self.table)
replaced.outline = outline
return replaced
def step_keyword(self, kind):
"""
An appropriate keyword for a particular kind of step
(Given, When, Then) for the language the current step is written in.
"""
dialect = self.feature.dialect
keywords = {"given": dialect.given_keywords, "when": dialect.when_keywords, "then": dialect.then_keywords}[kind]
# Gherkin allows '*' as a keyword; skip it to be sure the keyword is
# specifically for the given kind
return next(keyword for keyword in keywords if not keyword.startswith("*"))
class StepContainer(Node):
"""A node containing steps, e.g. Feature:, Scenario:"""
step_class = Step
container_name = "container" # override in subclasses
@property
def feature(self):
return self._feature
def __init__(self, parsed, feature=None, filename=None, **kwargs):
super().__init__(parsed, filename=filename, **kwargs)
self._feature = feature
# Put a reference to the parent node into all the steps
parent_ref = {self.container_name: self}
self.steps = tuple(self.step_class(step, filename=filename, **parent_ref) for step in parsed["steps"])
indent = 2
class HeaderNode(Node):
"""A node with a header consisting of a keyword and a name."""
name_required = True
def __init__(self, parsed, **kwargs):
super().__init__(parsed, **kwargs)
self.keyword = parsed["keyword"]
self.name = parsed["name"].strip()
if self.name_required and self.name == "":
raise AloeSyntaxError(
self.filename,
"{line}:{col} {klass} must have a name".format(
line=self.line, col=self.col, klass=self.__class__.__name__
),
)
def __str__(self):
return '<{klass}: "{name}">'.format(klass=self.__class__.__name__, name=self.name)
def __repr__(self):
return str(self)
@property
def text(self):
"""The text for this block."""
return "{keyword}: {name}".format(keyword=self.keyword, name=self.name).strip()
class TaggedNode(Node):
"""A node with attached tags."""
def __init__(self, parsed, **kwargs):
super().__init__(parsed, **kwargs)
self._tags = tuple(tag["name"][1:] for tag in parsed["tags"])
@property
def tags(self):
"""
Tags for a feature.
Tags are applied to a feature using the appropriate Gherkin syntax:
.. code-block:: gherkin
@tag1 @tag2
Feature: Eat leaves
"""
return self._tags
def represent_tags(self):
"""
Render the tags of a tagged block.
"""
return " " * self.indent + " ".join("@%s" % tag for tag in self.tags)
class Background(HeaderNode, StepContainer):
"""The background of all :class:`Scenario` in a :class:`Feature`."""
container_name = "background"
name_required = False
class Outline(OrderedDict, Node):
"""An outline within a :class:`Scenario`."""
def __init__(self, keys, table_row, filename=None):
"""Construct the outline."""
# Extract values
OrderedDict.__init__(self, zip(keys, cell_values(table_row)))
# Store the file and line information
Node.__init__(self, table_row, filename=filename)
class Scenario(HeaderNode, TaggedNode, StepContainer):
"""A scenario within a :class:`Feature`."""
container_name = "scenario"
def __init__(self, parsed, **kwargs):
super().__init__(parsed, **kwargs)
# Build a list of outline hashes
# A single scenario can have multiple example blocks, the returned
# token is a list of table tokens
self.outlines = ()
for example_table in parsed.get("examples", ()):
# the first row of the table is the column headings
keys = cell_values(example_table["tableHeader"])
self.outlines += tuple(Outline(keys, row) for row in example_table["tableBody"])
indent = 2
def represent_outlines(self):
"""
Render the outlines table.
"""
return strings.represent_table(self.outlines_table, indent=self.indent + 2)
@memoizedproperty
def max_length(self):
"""
The max horizontal length of the feature, description and child blocks.
"""
return max(
0,
strings.get_terminal_width(self.represented()),
*(
[step.max_length for step in self.steps]
+ [strings.get_terminal_width(line) for line in self.represent_outlines().splitlines()]
)
)
@memoizedproperty
def outlines_table(self):
"""
Return the scenario outline examples as a table.
"""
# get the list of column headings
headings_dict = OrderedDict()
for outline in self.outlines:
headings_dict.update(outline)
headings = list(headings_dict.keys())
table = | |
# coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
from openapi_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class HostsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def api_v1_hosts_download_get(self, **kwargs): # noqa: E501
"""api_v1_hosts_download_get # noqa: E501
DownloadHosts downloads the hosts data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_hosts_download_get(async_req=True)
>>> result = thread.get()
:param offset: Offset from the start of the list from which to retrieve documents.
:type offset: int
:param limit: Number of documents to return.
:type limit: int
:param search: Search term.
:type search: str
:param sort: Key on which to sort.
:type sort: str
:param reverse: Sort order.
:type reverse: bool
:param collections: Scopes the query by collection.
:type collections: list[str]
:param account_ids: Scopes the query by account ID.
:type account_ids: list[str]
:param fields: List of fields to retrieve.
:type fields: list[str]
:param hostname: Filters results by hostname.
:type hostname: list[str]
:param distro: Filters results by OS distro.
:type distro: list[str]
:param provider: Filters results by cloud provider.
:type provider: list[str]
:param compact: Indicates if only minimal image data is to be sent (i.e., vulnerabilities, compliance, and extended image metadata should be skipped) (true) or not (false).
:type compact: bool
:param clusters: Filters results by cluster name.
:type clusters: list[str]
:param compliance_ids: Filters results by compliance ID.
:type compliance_ids: list[int]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_hosts_download_get_with_http_info(**kwargs) # noqa: E501
def api_v1_hosts_download_get_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_hosts_download_get # noqa: E501
DownloadHosts downloads the hosts data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_hosts_download_get_with_http_info(async_req=True)
>>> result = thread.get()
:param offset: Offset from the start of the list from which to retrieve documents.
:type offset: int
:param limit: Number of documents to return.
:type limit: int
:param search: Search term.
:type search: str
:param sort: Key on which to sort.
:type sort: str
:param reverse: Sort order.
:type reverse: bool
:param collections: Scopes the query by collection.
:type collections: list[str]
:param account_ids: Scopes the query by account ID.
:type account_ids: list[str]
:param fields: List of fields to retrieve.
:type fields: list[str]
:param hostname: Filters results by hostname.
:type hostname: list[str]
:param distro: Filters results by OS distro.
:type distro: list[str]
:param provider: Filters results by cloud provider.
:type provider: list[str]
:param compact: Indicates if only minimal image data is to be sent (i.e., vulnerabilities, compliance, and extended image metadata should be skipped) (true) or not (false).
:type compact: bool
:param clusters: Filters results by cluster name.
:type clusters: list[str]
:param compliance_ids: Filters results by compliance ID.
:type compliance_ids: list[int]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'offset',
'limit',
'search',
'sort',
'reverse',
'collections',
'account_ids',
'fields',
'hostname',
'distro',
'provider',
'compact',
'clusters',
'compliance_ids'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_hosts_download_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'search' in local_var_params and local_var_params['search'] is not None: # noqa: E501
query_params.append(('search', local_var_params['search'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'reverse' in local_var_params and local_var_params['reverse'] is not None: # noqa: E501
query_params.append(('reverse', local_var_params['reverse'])) # noqa: E501
if 'collections' in local_var_params and local_var_params['collections'] is not None: # noqa: E501
query_params.append(('collections', local_var_params['collections'])) # noqa: E501
collection_formats['collections'] = 'multi' # noqa: E501
if 'account_ids' in local_var_params and local_var_params['account_ids'] is not None: # noqa: E501
query_params.append(('accountIDs', local_var_params['account_ids'])) # noqa: E501
collection_formats['accountIDs'] = 'multi' # noqa: E501
if 'fields' in local_var_params and local_var_params['fields'] is not None: # noqa: E501
query_params.append(('fields', local_var_params['fields'])) # noqa: E501
collection_formats['fields'] = 'multi' # noqa: E501
if 'hostname' in local_var_params and local_var_params['hostname'] is not None: # noqa: E501
query_params.append(('hostname', local_var_params['hostname'])) # noqa: E501
collection_formats['hostname'] = 'multi' # noqa: E501
if 'distro' in local_var_params and local_var_params['distro'] is not None: # noqa: E501
query_params.append(('distro', local_var_params['distro'])) # noqa: E501
collection_formats['distro'] = 'multi' # noqa: E501
if 'provider' in local_var_params and local_var_params['provider'] is not None: # noqa: E501
query_params.append(('provider', local_var_params['provider'])) # noqa: E501
collection_formats['provider'] = 'multi' # noqa: E501
if 'compact' in local_var_params and local_var_params['compact'] is not None: # noqa: E501
query_params.append(('compact', local_var_params['compact'])) # noqa: E501
if 'clusters' in local_var_params and local_var_params['clusters'] is not None: # noqa: E501
query_params.append(('clusters', local_var_params['clusters'])) # noqa: E501
collection_formats['clusters'] = 'multi' # noqa: E501
if 'compliance_ids' in local_var_params and local_var_params['compliance_ids'] is not None: # noqa: E501
query_params.append(('complianceIDs', local_var_params['compliance_ids'])) # noqa: E501
collection_formats['complianceIDs'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/api/v1/hosts/download', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_hosts_evaluate_post(self, **kwargs): # noqa: E501
"""api_v1_hosts_evaluate_post # noqa: E501
ResolveHosts adds vulnerability data for the given host # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_hosts_evaluate_post(async_req=True)
>>> result = thread.get()
:param api_resolve_images_req:
:type api_resolve_images_req: ApiResolveImagesReq
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ApiResolveImagesResp
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_hosts_evaluate_post_with_http_info(**kwargs) # noqa: E501
def api_v1_hosts_evaluate_post_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_hosts_evaluate_post # noqa: | |
<gh_stars>0
# Copyright 2021 <NAME> <<EMAIL>>. All Rights Reserved.
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Representation of the CAD model in Simple Edge configuration.
BRepNet: A topological message passing system for solid models.
https://arxiv.org/pdf/2104.00706.pdf
"""
from typing import List, Tuple
import numpy as np
def simple_edge(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_face: np.ndarray,
coedge_to_edge: np.ndarray,
):
"""Create graph according to the `simple edge` configuration."""
del coedge_to_next
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
face_to_node = np.arange(faces_num)
edge_to_node = np.arange(edges_num) + faces_num
coedge_to_node = np.arange(coedges_num) + (faces_num + edges_num)
edges = []
# Faces
_f(coedge_to_face, coedge_to_node, face_to_node, edges)
_mf(coedge_to_mate, coedge_to_node, coedge_to_face, face_to_node, edges)
# Edges
_e(coedge_to_edge, coedge_to_node, edge_to_node, edges)
# CoEdges
_i(coedges_num, coedge_to_node, edges)
_m(coedge_to_mate, coedge_to_node, edges)
return _create_graph(face_features, edge_features, coedge_features, edges)
def assymetric(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_face: np.ndarray,
coedge_to_edge: np.ndarray,
):
"""Create graph according to the `assymetric` configuration."""
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
face_to_node = np.arange(faces_num)
edge_to_node = np.arange(edges_num) + faces_num
coedge_to_node = np.arange(coedges_num) + (faces_num + edges_num)
edges = []
# Faces
_f(coedge_to_face, coedge_to_node, face_to_node, edges)
_mf(coedge_to_mate, coedge_to_node, coedge_to_face, face_to_node, edges)
# Edges
_e(coedge_to_edge, coedge_to_node, edge_to_node, edges)
# CoEdges
_i(coedges_num, coedge_to_node, edges)
_n(coedge_to_next, coedge_to_node, edges)
return _create_graph(face_features, edge_features, coedge_features, edges)
def assymetric_plus(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_face: np.ndarray,
coedge_to_edge: np.ndarray,
):
"""Create graph according to the `assymetric_plus` configuration."""
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
face_to_node = np.arange(faces_num)
edge_to_node = np.arange(edges_num) + faces_num
coedge_to_node = np.arange(coedges_num) + (faces_num + edges_num)
edges = []
# Faces
_f(coedge_to_face, coedge_to_node, face_to_node, edges)
_mf(coedge_to_mate, coedge_to_node, coedge_to_face, face_to_node, edges)
# Edges
_e(coedge_to_edge, coedge_to_node, edge_to_node, edges)
# CoEdges
_i(coedges_num, coedge_to_node, edges)
_m(coedge_to_mate, coedge_to_node, edges)
_n(coedge_to_next, coedge_to_node, edges)
return _create_graph(face_features, edge_features, coedge_features, edges)
def assymetric_plus_plus(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_face: np.ndarray,
coedge_to_edge: np.ndarray,
):
"""Create graph according to the `assymetric++` configuration."""
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
face_to_node = np.arange(faces_num)
edge_to_node = np.arange(edges_num) + faces_num
coedge_to_node = np.arange(coedges_num) + (faces_num + edges_num)
edges = []
# Faces
_f(coedge_to_face, coedge_to_node, face_to_node, edges)
_mf(coedge_to_mate, coedge_to_node, coedge_to_face, face_to_node, edges)
# Edges
_e(coedge_to_edge, coedge_to_node, edge_to_node, edges)
_ne(coedge_to_next, coedge_to_node, coedge_to_edge, edges)
# CoEdges
_i(coedges_num, coedge_to_node, edges)
_m(coedge_to_mate, coedge_to_node, edges)
_n(coedge_to_next, coedge_to_node, edges)
return _create_graph(face_features, edge_features, coedge_features, edges)
def winged_edge(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_face: np.ndarray,
coedge_to_edge: np.ndarray,
):
"""Create graph according to the `winged edge` configuration."""
coedge_to_prev = np.zeros_like(coedge_to_next)
for (from_ix, to_ix) in enumerate(coedge_to_next):
coedge_to_prev[to_ix] = from_ix
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
face_to_node = np.arange(faces_num)
edge_to_node = np.arange(edges_num) + faces_num
coedge_to_node = np.arange(coedges_num) + (faces_num + edges_num)
edges = []
# Faces
_f(coedge_to_face, coedge_to_node, face_to_node, edges)
_mf(coedge_to_mate, coedge_to_node, coedge_to_face, face_to_node, edges)
# Edges
_e(coedge_to_edge, coedge_to_node, edge_to_node, edges)
_ne(coedge_to_next, coedge_to_node, coedge_to_edge, edges)
_pe(coedge_to_prev, coedge_to_node, coedge_to_edge, edges)
_mne(coedge_to_next, coedge_to_node, coedge_to_mate, coedge_to_edge, edges)
_mpe(coedge_to_prev, coedge_to_node, coedge_to_mate, coedge_to_edge, edges)
# CoEdges
_i(coedges_num, coedge_to_node, edges)
_m(coedge_to_mate, coedge_to_node, edges)
_n(coedge_to_next, coedge_to_node, edges)
_p(coedge_to_prev, coedge_to_node, edges)
_mn(coedge_to_next, coedge_to_node, coedge_to_mate, edges)
_mp(coedge_to_prev, coedge_to_node, coedge_to_mate, edges)
return _create_graph(face_features, edge_features, coedge_features, edges)
def winged_edge_plus(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_face: np.ndarray,
coedge_to_edge: np.ndarray,
):
"""Create graph according to the `winged edge+` configuration."""
coedge_to_prev = np.zeros_like(coedge_to_next)
for (from_ix, to_ix) in enumerate(coedge_to_next):
coedge_to_prev[to_ix] = from_ix
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
face_to_node = np.arange(faces_num)
edge_to_node = np.arange(edges_num) + faces_num
coedge_to_node = np.arange(coedges_num) + (faces_num + edges_num)
edges = []
# Faces
_f(coedge_to_face, coedge_to_node, face_to_node, edges)
_mf(coedge_to_mate, coedge_to_node, coedge_to_face, face_to_node, edges)
# Edges
_e(coedge_to_edge, coedge_to_node, edge_to_node, edges)
_ne(coedge_to_next, coedge_to_node, coedge_to_edge, edges)
_pe(coedge_to_prev, coedge_to_node, coedge_to_edge, edges)
_mne(coedge_to_next, coedge_to_node, coedge_to_mate, coedge_to_edge, edges)
_mpe(coedge_to_next, coedge_to_node, coedge_to_mate, coedge_to_edge, edges)
# CoEdges
_i(coedges_num, coedge_to_node, edges)
_m(coedge_to_mate, coedge_to_node, edges)
_n(coedge_to_next, coedge_to_node, edges)
_nm(coedge_to_mate, coedge_to_node, coedge_to_next, edges)
_p(coedge_to_prev, coedge_to_node, edges)
_pm(coedge_to_prev, coedge_to_node, coedge_to_next, edges)
_mn(coedge_to_next, coedge_to_node, coedge_to_mate, edges)
_mnm(coedge_to_mate, coedge_to_node, coedge_to_next, edges)
_mp(coedge_to_next, coedge_to_node, coedge_to_mate, edges)
_mpm(coedge_to_mate, coedge_to_node, coedge_to_next, edges)
return _create_graph(face_features, edge_features, coedge_features, edges)
def winged_edge_plus_plus(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_face: np.ndarray,
coedge_to_edge: np.ndarray,
):
"""Create graph according to the `winged edge++` configuration."""
coedge_to_prev = np.zeros_like(coedge_to_next)
for (from_ix, to_ix) in enumerate(coedge_to_next):
coedge_to_prev[to_ix] = from_ix
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
face_to_node = np.arange(faces_num)
edge_to_node = np.arange(edges_num) + faces_num
coedge_to_node = np.arange(coedges_num) + (faces_num + edges_num)
edges = []
# Faces
_f(coedge_to_face, coedge_to_node, face_to_node, edges)
_mf(coedge_to_mate, coedge_to_node, coedge_to_face, face_to_node, edges)
# Edges
_e(coedge_to_edge, coedge_to_node, edge_to_node, edges)
_ne(coedge_to_next, coedge_to_node, coedge_to_edge, edges)
_pe(coedge_to_prev, coedge_to_node, coedge_to_edge, edges)
_mne(coedge_to_next, coedge_to_node, coedge_to_mate, coedge_to_edge, edges)
_mpe(coedge_to_prev, coedge_to_node, coedge_to_mate, coedge_to_edge, edges)
_nmne(coedge_to_node, coedge_to_next, coedge_to_mate, coedge_to_edge,
edges)
_pmpe(coedge_to_node, coedge_to_prev, coedge_to_mate, coedge_to_edge,
edges)
_mpmpe(coedge_to_node, coedge_to_prev, coedge_to_mate, coedge_to_edge,
edges)
_mnmne(coedge_to_node, coedge_to_next, coedge_to_mate, coedge_to_edge,
edges)
# CoEdges
_i(coedges_num, coedge_to_node, edges)
_m(coedge_to_mate, coedge_to_node, edges)
_n(coedge_to_next, coedge_to_node, edges)
_nm(coedge_to_mate, coedge_to_node, coedge_to_next, edges)
_p(coedge_to_prev, coedge_to_node, edges)
_pm(coedge_to_mate, coedge_to_node, coedge_to_prev, edges)
_mn(coedge_to_next, coedge_to_node, coedge_to_mate, edges)
_mnm(coedge_to_mate, coedge_to_node, coedge_to_next, edges)
_mp(coedge_to_next, coedge_to_node, coedge_to_mate, edges)
_mpm(coedge_to_mate, coedge_to_node, coedge_to_next, edges)
_nmn(coedge_to_next, coedge_to_mate, coedge_to_node, edges)
_pmp(coedge_to_prev, coedge_to_mate, coedge_to_node, edges)
_mpmp(coedge_to_prev, coedge_to_mate, coedge_to_node, edges)
_mnmn(coedge_to_next, coedge_to_mate, coedge_to_node, edges)
return _create_graph(face_features, edge_features, coedge_features, edges)
def _create_graph(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
edges: List[Tuple[int, int]],
):
"""Create the graph."""
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
n_node = faces_num + edges_num + coedges_num
senders = []
receivers = []
for (f, t) in edges:
senders.append(f)
receivers.append(t)
# don't add self-loops more than once
if f != t:
senders.append(t)
receivers.append(f)
assert len(senders) == len(receivers)
n_edge = len(senders)
nodes = np.concatenate(
(np.pad(face_features,
((0, 0),
(0, edge_features.shape[1] + coedge_features.shape[1]))),
np.pad(edge_features,
((0, 0), (face_features.shape[1], coedge_features.shape[1]))),
np.pad(coedge_features,
((0, 0),
(face_features.shape[1] + edge_features.shape[1], 0)))))
return {
"n_node": np.array([n_node], dtype=np.int32),
"n_edge": np.array([n_edge], dtype=np.int32),
"nodes": nodes,
"senders": np.array(senders, dtype=np.int32),
"receivers": np.array(receivers, dtype=np.int32),
}
def _f(
coedge_to_face: np.ndarray,
coedge_to_node: np.ndarray,
face_to_node: np.ndarray,
edges: List[Tuple[int, int]],
):
"""F.
Creates an edge between coedge and corresponding face.
"""
for coedge_ix, face_ix in enumerate(coedge_to_face):
edges.append((coedge_to_node[coedge_ix], face_to_node[face_ix]))
def _mf(
coedge_to_mate: np.ndarray,
coedge_to_node: np.ndarray,
coedge_to_face: np.ndarray,
face_to_node: np.ndarray,
edges: List[Tuple[int, int]],
):
"""MF.
Creates an edge between coedge and face of the mate of the coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append((coedge_to_node[coedge_from_ix],
face_to_node[coedge_to_face[coedge_to_ix]]))
def _e(
coedge_to_edge: np.ndarray,
coedge_to_node: np.ndarray,
edge_to_node: np.ndarray,
edges: List[Tuple[int, int]],
):
"""E.
Creates an edge between coedge and corresponding edge.
"""
for coedge_ix, edge_ix in enumerate(coedge_to_edge):
edges.append((coedge_to_node[coedge_ix], edge_to_node[edge_ix]))
def _ne(
coedge_to_next: np.ndarray,
coedge_to_node: np.ndarray,
coedge_to_edge: np.ndarray,
edges: List[Tuple[int, int]],
):
"""NE.
Creates an edge between coedge and edge of the next coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_next):
edges.append(
(coedge_to_node[coedge_from_ix], coedge_to_edge[coedge_to_ix]))
def _pe(
coedge_to_prev: np.ndarray,
coedge_to_node: np.ndarray,
coedge_to_edge: np.ndarray,
edges: List[Tuple[int, int]],
):
"""PE.
Creates an edge between coedge and previous edge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_prev):
edges.append(
(coedge_to_node[coedge_from_ix], coedge_to_edge[coedge_to_ix]))
def _mne(
coedge_to_next: np.ndarray,
coedge_to_node: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_edge: np.ndarray,
edges: List[Tuple[int, int]],
):
"""MN.
Creates an edge between coedge and edge of the mate next coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append((coedge_to_node[coedge_from_ix],
coedge_to_edge[coedge_to_next[coedge_to_ix]]))
def _mpe(
coedge_to_prev: np.ndarray,
coedge_to_node: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_edge: np.ndarray,
edges: List[Tuple[int, int]],
):
"""MP.
Creates an edge between coedge and edge of the mate previous coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append((coedge_to_node[coedge_from_ix],
coedge_to_edge[coedge_to_prev[coedge_to_ix]]))
def _nmne(
coedge_to_node: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_edge: np.ndarray,
edges: List[Tuple[int, int]],
):
"""NMNE.
Creates an edge between coedge and edge of next mate next coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_next):
edges.append(
(coedge_to_node[coedge_from_ix],
coedge_to_edge[coedge_to_next[coedge_to_mate[coedge_to_ix]]]))
def _pmpe(
coedge_to_node: np.ndarray,
coedge_to_prev: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_edge: np.ndarray,
edges: List[Tuple[int, int]],
):
"""PMPE.
Creates an edge between coedge and edge of previous mate previous coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_prev):
edges.append(
(coedge_to_node[coedge_from_ix],
coedge_to_edge[coedge_to_prev[coedge_to_mate[coedge_to_ix]]]))
def _mpmpe(
coedge_to_node: np.ndarray,
coedge_to_prev: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_edge: np.ndarray,
edges: List[Tuple[int, int]],
):
"""PMPE.
Creates an edge between coedge and edge | |
# applying mask for "apparent horizon"
#
dens = ne.evaluate("rho * w_lorentz * vol") # conserved density (?)
#
mode0 = dxyz * ne.evaluate("sum(dens)") # total mass of the BH
int_modes[0][i_it] = mode0
#
Ix = dxyz * ne.evaluate("sum(dens * x)") # computing inertia center
Iy = dxyz * ne.evaluate("sum(dens * y)")
xc = Ix / mode0 # computing center of mass
yc = Iy / mode0
xcs.append(xc)
ycs.append(yc)
#
phi = ne.evaluate("arctan2(y - yc, x - xc)") # shifting coordinates for center of mass
r = ne.evaluate("sqrt(x**2 + y**2)")
#
for m in range(1, mmax + 1):
_mode = dxyz * np.sum(dens * np.exp(-1j * m * phi)) # = C_m that is not f(r)
int_modes[m][i_it] = _mode # np.complex128 number
#
shells = np.linspace(r.min(), r.max(), nshells) # for C_m that is a f(r)
rbins[i_it] = 0.5 * (shells[:-1] + shells[1:]) # middle of every shell
for i_shell, inner, outer in zip(range(nshells), shells[:-1], shells[1:]):
mask = ((r > inner) & (r <= outer)) # to render False all points outside of the i_shell
for m in range(0, mmax + 1):
_mode = dxyz * np.sum(dens[mask] * np.exp(-1j * m * phi[mask])) # complex128 numer
r_modes[m][i_it].append(_mode)
return times, iterations, xcs, ycs, int_modes, rbins, r_modes
# # int_modes = [modes][iterations] -> number
# # r_modes = [modes][iteratiopns] -> array for every 'r'
# # plot(radii[it], rmodes[mode][it])
#
# r_res = []
# for m in range(mmax + 1):
# for i_it, it in enumerate(iterations):
# for m in range(mmax + 1):
#
#
#
# for m in range(mmax + 1):
# combined = np.zeros(len(iterations))
# for ir in range(nshells):
# combined = np.vstack((combined, r_modes[m][:][ir]))
# combined = np.delete(combined, 0, 0)
#
# for m in range(mmax + 1):
# combined = np.zeros(len(iterations))
# for ir in range(nshells):
# combined = np.vstack((combined, r_modes[m][:][ir]))
# combined = np.delete(combined, 0, 0)
# r_res.append(combined)
#
# return times, iterations, xcs, ycs, int_modes, rs, mmodes
# #
# exit(1)
#
#
# # times = []
# # modes = [[] for m in range(mmax + 1)]
# # mmodes = [[[] for p in range(nshells)] for m in range(mmax + 1)]
# # xcs = []
# # ycs = []
# #
#
# for idx, it in enumerate(iterations):
# print("\tcomputing {} modes, it: {}/{}".format(rho_dens, idx, len(iterations)))
# #
# delta = self.get_grid_data(it, rl, "delta")[:-1]
# # print(delta)
# dxyz = np.prod(delta)
# # print(dxyz); exit(0)
# x = self.get_grid_data(it, rl, 'x')
# y = self.get_grid_data(it, rl, 'y')
# z = self.get_grid_data(it, rl, 'z')
# x = x[:, :, 0]
# y = y[:, :, 0]
#
# # get z=0 slice
# rho = self.get_prof_arr(it, rl, "rho")[:, :, 0]
#
# # Exclude region outside refinement levels
# idx = np.isnan(rho)
# rho[idx] = 0.0
# #
# lapse = self.get_prof_arr(it, rl, "lapse")[:, :, 0]
# vol = self.get_prof_arr(it, rl, "vol")[:, :, 0]
# w_lorentz = self.get_prof_arr(it, rl, "w_lorentz")[:, :, 0]
# # Exclude region outside refinement levels
# vol[idx] = 0.0
# w_lorentz[idx] = 0.0
# # apply mask to cut off the horizon
# rho[lapse < 0.15] = 0
#
# dens = ne.evaluate("rho * w_lorentz * vol")
#
# # Compute center of mass
# print(idx)
# int_modes[0][idx] = dxyz * ne.evaluate("sum(dens)")
# # modes[0].append(dxyz * ne.evaluate("sum(rho)"))
# Ix = dxyz * ne.evaluate("sum(dens * x)")
# Iy = dxyz * ne.evaluate("sum(dens * y)")
# xc = Ix / int_modes[0][-1]
# yc = Iy / int_modes[0][-1]
# phi = ne.evaluate("arctan2(y - yc, x - xc)")
# r = ne.evaluate("sqrt(x**2 + y**2)")
# print(r.max(), r.min())
# # phi = ne.evaluate("arctan2(y, x)")
# xcs.append(xc)
# ycs.append(yc)
#
# times.append(self.get_time_for_it(it, d1d2d3prof="prof"))
# print('\tm:'),
# for m in range(1, mmax + 1):
# print(m),
# # _mode1 = dxyz * ne.evaluate("sum(rho * w_lorentz * vol * exp(-1j * m * phi))")
# _mode = dxyz * np.sum(dens * np.exp(-1j * m * phi))
# # print(_mode2)
# # exit(1)
# int_modes[m][idx] = _mode
#
# #
# print('r:'),
# shells = np.linspace(r.min(), r.max(), nshells)
# for i_shell, inner, outer in zip(range(nshells), shells[:-1], shells[1:]):
# print(i_shell),
# for m in range(0, mmax + 1):
# mask = ((r > inner) & (r <= outer))
# # _mode1 = dxyz * ne.evaluate("sum(rho * w_lorentz * vol * exp(-1j * m * phi))")
# _mode = dxyz * np.sum(dens[mask] * np.exp(-1j * m * phi[mask]))
# # print(_mode1, _mode2)
# # exit(1)
# r_modes[m][idx].append(_mode)
#
# rs = 0.5 * (shells[:-1] + shells[1:])
# # print(len(rs), len(mmodes))
# # assert len(rs) == len(mmodes)
# print('done')
# # exit(0)
# #
#
#
# # r_modes = np.vstack((r_modes[][][:]))
#
# # for i_shell in range(shells):
#
#
# return times, iterations, xcs, ycs, modes, rs, mmodes
# def __delete__(self, instance):
# # instance.dfile.close()
# instance.data_matrix = [[np.zeros(0, )
# for x in range(self.nlevels)]
# for y in range(len(self.list_all_v_ns))]
# instance.mask_matrix = [np.ones(0, dtype=bool) for x in range(self.nlevels)]
def delete_for_it(self, it, except_v_ns, rm_masks=True, rm_comp=True, rm_prof=True):
self.check_it(it)
nlevels = self.get_nlevels(it)
# clean up mask array
if rm_masks:
for v_n in self.list_mask_names:
for rl in range(nlevels):
# print("it:{} rl:{} v_n:{} [all len(rls):{}]".format(it, rl, v_n, nlevels))
self.mask_matrix[self.i_it(it)][rl][self.i_mask_v_n(v_n)] = np.ones(0, dtype=bool)
# clean up data
if rm_comp:
for v_n in self.list_all_v_ns:
if v_n not in except_v_ns:
self.check_v_n(v_n)
for rl in range(nlevels):
self.data_matrix[self.i_it(it)][rl][self.i_v_n(v_n)] = np.zeros(0, )
# clean up the initial data
if rm_prof:
self.dfile_matrix[self.i_it(it)] = 0
self.grid_matrix[self.i_it(it)] = 0
for v_n in self.list_grid_v_ns:
if not v_n in except_v_ns:
for rl in range(nlevels):
self.grid_data_matrix[self.i_it(it)][rl][self.i_grid_v_n(v_n)] = np.zeros(0,)
class INTERPOLATE_STORE(MAINMETHODS_STORE):
def __init__(self, sim, grid_object, symmetry=None):
"""
fname - of the profile
sim - name of the simulation (for directory searching)
grid_object -
object of the class with the interpolated grid. Must contain:
list(list_grid_v_ns) that comtains the list of variable names of new grid,
for examply x_cyl ... z_cyl, r_cyl ... z_cyl, dr_cyl ... dz_cyl
get_xi() function that returns array of the type
return np.column_stack([self.x_cyl_3d.flatten(),
self.y_cyl_3d.flatten(),
self.z_cyl_3d.flatten()])
get_shape() function that returns the shape of the new grid such as
example: self.x_cyl_3d.shape
get_int_grid(v_n) fucntion that returns the array of the new grid
for variable v_n. For ecample for v_n = "r_cyl"
:param fname:
:param sim:
:param grid_object:
"""
MAINMETHODS_STORE.__init__(self, sim, symmetry)
self.new_grid = grid_object
self.list_int_grid_v_ns = grid_object.list_int_grid_v_ns
self.list_int_v_ns = self.list_prof_v_ns + \
self.list_comp_v_ns + \
self.list_grid_v_ns
self.int_data_matrix = [[np.zeros(0,)
for y in range(len(self.list_int_v_ns))]
for x in range(len(self.list_iterations))]
def check_int_v_n(self, v_n):
if v_n not in self.list_int_v_ns:
raise NameError("v_n: '{}' not in the v_n list \n{}"
.format(v_n, self.list_int_v_ns))
def i_int_v_n(self, v_n):
self.check_int_v_n(v_n)
return int(self.list_int_v_ns.index(v_n))
def do_append_grid_var(self, it, v_n):
self.int_data_matrix[self.i_it(it)][self.i_int_v_n(v_n)] = \
self.new_grid.get_int_grid(v_n)
# ---
def do_interpolate(self, it, v_n):
tmp = []
nlevels = self.get_nlevels(it)
for rl in range(nlevels):
data = self.get_comp_data(it, rl, v_n)
if self.new_grid.grid_type == "pol":
tmp.append(data)
else:
tmp.append(data)
xi = self.new_grid.get_xi()
shape = self.new_grid.get_shape()
# print(xi.shape)
print("\t\tInterpolating: it:{} v_n:{} -> {} grid"
.format(it, v_n, self.new_grid.grid_type))
# carpet_grid = self.get_grid(it)
if self.enforce_xy_grid:
carpet_grid = self.get_grid(it)
else:
carpet_grid = self.get_grid(it)
# exit(1)
F = Interpolator(carpet_grid, tmp, interp=1)
arr = F(xi).reshape(shape)
self.int_data_matrix[self.i_it(it)][self.i_int_v_n(v_n)] = arr
# ----
def is_data_interpolated(self, it, v_n):
if len(self.int_data_matrix[self.i_it(it)][self.i_int_v_n(v_n)]) == 0:
if v_n in self.list_int_grid_v_ns:
self.do_append_grid_var(it, v_n)
else:
self.do_interpolate(it, v_n)
def get_int(self, it, v_n):
self.check_it(it)
self.check_int_v_n(v_n)
self.is_data_interpolated(it, v_n)
return self.int_data_matrix[self.i_it(it)][self.i_int_v_n(v_n)]
class INTMETHODS_STORE(INTERPOLATE_STORE):
"""
interpolates the data for any variable onto one of the
grids: cyindrical, spherical, cartesian (see classes above)
"""
def __init__(self, sim, grid_object, symmetry=None):
INTERPOLATE_STORE.__init__(self, sim, grid_object, symmetry)
def save_new_grid(self, it, outdir="profiles/"):
self.check_it(it)
grid_type = self.new_grid.grid_info['type']
if not os.path.exists(Paths.ppr_sims + self.sim + '/' + outdir):
os.makedirs(Paths.ppr_sims + self.sim + '/' + outdir)
path = Paths.ppr_sims + self.sim + '/' + outdir + str(it) + '/'
if os.path.isfile(path + grid_type + '_grid.h5'):
os.remove(path + grid_type + '_grid.h5')
outfile = h5py.File(path + grid_type + '_grid.h5', "w")
if not os.path.exists(path):
os.makedirs(path)
# print("Saving grid...")
for v_n in self.list_int_grid_v_ns:
outfile.create_dataset(v_n, data=self.new_grid.get_int_grid(v_n))
outfile.close()
| |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @miemie2013
import os
import sys
import random
import torch
import torch.distributed as dist
import torch.nn as nn
from mmdet.data import *
from mmdet.exp.datasets.coco_base import COCOBaseExp
class FCOSEvalCollater():
def __init__(self, context, batch_transforms):
self.context = context
self.batch_transforms = batch_transforms
def __call__(self, batch):
# 重组samples
samples = []
for i, item in enumerate(batch):
sample = {}
sample['image'] = item[0]
sample['im_info'] = item[1]
sample['im_id'] = item[2]
samples.append(sample)
# batch_transforms
for batch_transform in self.batch_transforms:
samples = batch_transform(samples, self.context)
# 取出感兴趣的项
images = []
im_scales = []
im_ids = []
for i, sample in enumerate(samples):
images.append(sample['image'])
im_scales.append(sample['im_info'][2:3])
im_ids.append(sample['im_id'])
images = np.stack(images, axis=0)
im_scales = np.stack(im_scales, axis=0)
im_ids = np.stack(im_ids, axis=0)
images = torch.Tensor(images)
im_scales = torch.Tensor(im_scales)
im_ids = torch.Tensor(im_ids)
return images, im_scales, im_ids
class FCOSTrainCollater():
def __init__(self, context, batch_transforms, n_layers):
self.context = context
self.batch_transforms = batch_transforms
self.n_layers = n_layers
def __call__(self, batch):
# 恢复训练时跳过。
image_shape = batch[0][0].shape
if len(image_shape) == 1:
images = torch.zeros((1, ), dtype=torch.float32)
labels0 = torch.zeros((1, ), dtype=torch.float32)
reg_target0 = torch.zeros((1, ), dtype=torch.float32)
centerness0 = torch.zeros((1, ), dtype=torch.float32)
labels1 = torch.zeros((1, ), dtype=torch.float32)
reg_target1 = torch.zeros((1, ), dtype=torch.float32)
centerness1 = torch.zeros((1, ), dtype=torch.float32)
labels2 = torch.zeros((1, ), dtype=torch.float32)
reg_target2 = torch.zeros((1, ), dtype=torch.float32)
centerness2 = torch.zeros((1, ), dtype=torch.float32)
if self.n_layers == 5:
labels3 = torch.zeros((1, ), dtype=torch.float32)
reg_target3 = torch.zeros((1, ), dtype=torch.float32)
centerness3 = torch.zeros((1, ), dtype=torch.float32)
labels4 = torch.zeros((1, ), dtype=torch.float32)
reg_target4 = torch.zeros((1, ), dtype=torch.float32)
centerness4 = torch.zeros((1, ), dtype=torch.float32)
return images, labels0, reg_target0, centerness0, labels1, reg_target1, centerness1, labels2, reg_target2, centerness2, labels3, reg_target3, centerness3, labels4, reg_target4, centerness4
return images, labels0, reg_target0, centerness0, labels1, reg_target1, centerness1, labels2, reg_target2, centerness2
# 重组samples
samples = []
for i, item in enumerate(batch):
sample = {}
sample['image'] = item[0]
sample['im_info'] = item[1]
sample['im_id'] = item[2]
sample['h'] = item[3]
sample['w'] = item[4]
sample['is_crowd'] = item[5]
sample['gt_class'] = item[6]
sample['gt_bbox'] = item[7]
sample['gt_score'] = item[8]
samples.append(sample)
# batch_transforms
for batch_transform in self.batch_transforms:
samples = batch_transform(samples, self.context)
# 取出感兴趣的项
images = []
labels0 = []
reg_target0 = []
centerness0 = []
labels1 = []
reg_target1 = []
centerness1 = []
labels2 = []
reg_target2 = []
centerness2 = []
labels3 = []
reg_target3 = []
centerness3 = []
labels4 = []
reg_target4 = []
centerness4 = []
for i, sample in enumerate(samples):
images.append(sample['image'].astype(np.float32))
labels0.append(sample['labels0'].astype(np.int32))
reg_target0.append(sample['reg_target0'].astype(np.float32))
centerness0.append(sample['centerness0'].astype(np.float32))
labels1.append(sample['labels1'].astype(np.int32))
reg_target1.append(sample['reg_target1'].astype(np.float32))
centerness1.append(sample['centerness1'].astype(np.float32))
labels2.append(sample['labels2'].astype(np.int32))
reg_target2.append(sample['reg_target2'].astype(np.float32))
centerness2.append(sample['centerness2'].astype(np.float32))
if self.n_layers == 5:
labels3.append(sample['labels3'].astype(np.int32))
reg_target3.append(sample['reg_target3'].astype(np.float32))
centerness3.append(sample['centerness3'].astype(np.float32))
labels4.append(sample['labels4'].astype(np.int32))
reg_target4.append(sample['reg_target4'].astype(np.float32))
centerness4.append(sample['centerness4'].astype(np.float32))
images = np.stack(images, axis=0)
labels0 = np.stack(labels0, axis=0)
reg_target0 = np.stack(reg_target0, axis=0)
centerness0 = np.stack(centerness0, axis=0)
labels1 = np.stack(labels1, axis=0)
reg_target1 = np.stack(reg_target1, axis=0)
centerness1 = np.stack(centerness1, axis=0)
labels2 = np.stack(labels2, axis=0)
reg_target2 = np.stack(reg_target2, axis=0)
centerness2 = np.stack(centerness2, axis=0)
images = torch.Tensor(images)
labels0 = torch.Tensor(labels0)
reg_target0 = torch.Tensor(reg_target0)
centerness0 = torch.Tensor(centerness0)
labels1 = torch.Tensor(labels1)
reg_target1 = torch.Tensor(reg_target1)
centerness1 = torch.Tensor(centerness1)
labels2 = torch.Tensor(labels2)
reg_target2 = torch.Tensor(reg_target2)
centerness2 = torch.Tensor(centerness2)
if self.n_layers == 5:
labels3 = np.stack(labels3, axis=0)
reg_target3 = np.stack(reg_target3, axis=0)
centerness3 = np.stack(centerness3, axis=0)
labels4 = np.stack(labels4, axis=0)
reg_target4 = np.stack(reg_target4, axis=0)
centerness4 = np.stack(centerness4, axis=0)
labels3 = torch.Tensor(labels3)
reg_target3 = torch.Tensor(reg_target3)
centerness3 = torch.Tensor(centerness3)
labels4 = torch.Tensor(labels4)
reg_target4 = torch.Tensor(reg_target4)
centerness4 = torch.Tensor(centerness4)
return images, labels0, reg_target0, centerness0, labels1, reg_target1, centerness1, labels2, reg_target2, centerness2, labels3, reg_target3, centerness3, labels4, reg_target4, centerness4
return images, labels0, reg_target0, centerness0, labels1, reg_target1, centerness1, labels2, reg_target2, centerness2
class FCOS_Method_Exp(COCOBaseExp):
def __init__(self):
super().__init__()
# ---------------- architecture name(算法名) ---------------- #
self.archi_name = 'FCOS'
# -------------- training config --------------------- #
self.max_epoch = 48
self.aug_epochs = 48 # 前几轮进行mixup、cutmix、mosaic
self.ema = True
self.ema_decay = 0.9998
self.weight_decay = 1e-4
self.momentum = 0.9
self.print_interval = 20
self.eval_interval = 2
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
# learning_rate
self.scheduler = "warm_piecewisedecay"
self.warmup_epochs = 1
self.basic_lr_per_img = 0.01 / 16.0
self.start_factor = 0.3333333333333333
self.decay_gamma = 0.1
self.milestones_epoch = [32, 44]
# ----------------- testing config ------------------ #
self.test_size = (512, 736)
# ---------------- model config ---------------- #
self.output_dir = "FCOS_outputs"
self.backbone_type = 'Resnet50Vb'
self.backbone = dict(
norm_type='bn',
feature_maps=[3, 4, 5],
dcn_v2_stages=[],
downsample_in3x3=False, # 注意这个细节,是在1x1卷积层下采样的。即Resnet50Va。
freeze_at=2,
fix_bn_mean_var_at=0,
freeze_norm=False,
norm_decay=0.,
)
self.fpn_type = 'FPN'
self.fpn = dict(
in_channels=[2048, 1024, 512],
num_chan=256,
min_level=3,
max_level=5,
spatial_scale=[0.03125, 0.0625, 0.125],
has_extra_convs=False,
use_c5=False,
reverse_out=False,
)
self.head_type = 'FCOSHead'
self.head = dict(
in_channel=256,
num_classes=self.num_classes,
fpn_stride=[8, 16, 32],
num_convs=4,
norm_type='gn',
norm_reg_targets=True,
thresh_with_ctr=True,
centerness_on_reg=True,
use_dcn_in_tower=False,
)
self.fcos_loss_type = 'FCOSLoss'
self.fcos_loss = dict(
loss_alpha=0.25,
loss_gamma=2.0,
iou_loss_type='giou', # linear_iou/giou/iou/ciou
reg_weights=1.0,
)
self.nms_cfg = dict(
nms_type='matrix_nms',
score_threshold=0.01,
post_threshold=0.01,
nms_top_k=500,
keep_top_k=100,
use_gaussian=False,
gaussian_sigma=2.,
)
# ---------------- 预处理相关 ---------------- #
self.context = {'fields': ['image', 'im_info', 'fcos_target']}
# DecodeImage
self.decodeImage = dict(
to_rgb=True,
with_mixup=True,
with_cutmix=False,
with_mosaic=False,
)
# MixupImage
self.mixupImage = dict(
alpha=1.5,
beta=1.5,
)
# CutmixImage
self.cutmixImage = dict(
alpha=1.5,
beta=1.5,
)
# MosaicImage
self.mosaicImage = dict(
alpha=1.5,
beta=1.5,
)
# RandomFlipImage
self.randomFlipImage = dict(
prob=0.5,
)
# NormalizeImage
self.normalizeImage = dict(
is_channel_first=False,
is_scale=False,
mean=[123.675, 116.28, 103.53],
std=[1.0, 1.0, 1.0],
)
# ResizeImage
# 图片短的那一边缩放到选中的target_size,长的那一边等比例缩放;如果这时候长的那一边大于max_size,
# 那么改成长的那一边缩放到max_size,短的那一边等比例缩放。这时候im_scale_x = im_scale, im_scale_y = im_scale。
# resize_box=True 表示真实框(格式是x0y0x1y1)也跟着缩放,横纵坐标分别乘以im_scale_x、im_scale_y。
# resize_box=False表示真实框(格式是x0y0x1y1)不跟着缩放,因为后面会在Gt2FCOSTarget中缩放。
self.resizeImage = dict(
target_size=[256, 288, 320, 352, 384, 416, 448, 480, 512, 544, 576, 608],
max_size=900,
interp=1,
use_cv2=True,
resize_box=False,
)
# Permute
self.permute = dict(
to_bgr=False,
channel_first=True,
)
# PadBatch
self.padBatch = dict(
pad_to_stride=32, # 添加黑边使得图片边长能够被pad_to_stride整除。pad_to_stride代表着最大下采样倍率,这个模型最大到p5,为32。
use_padded_im_info=False,
)
# Gt2FCOSTarget
self.gt2FCOSTarget = dict(
object_sizes_boundary=[64, 128],
center_sampling_radius=1.5,
downsample_ratios=[8, 16, 32],
norm_reg_targets=True,
)
# 预处理顺序。增加一些数据增强时这里也要加上,否则train.py中相当于没加!
self.sample_transforms_seq = []
self.sample_transforms_seq.append('decodeImage')
if self.decodeImage['with_mixup']:
self.sample_transforms_seq.append('mixupImage')
elif self.decodeImage['with_cutmix']:
self.sample_transforms_seq.append('cutmixImage')
elif self.decodeImage['with_mosaic']:
self.sample_transforms_seq.append('mosaicImage')
self.sample_transforms_seq.append('randomFlipImage')
self.sample_transforms_seq.append('normalizeImage')
self.sample_transforms_seq.append('resizeImage')
self.sample_transforms_seq.append('permute')
self.batch_transforms_seq = []
self.batch_transforms_seq.append('padBatch')
self.batch_transforms_seq.append('gt2FCOSTarget')
# ---------------- dataloader config ---------------- #
# 默认是4。如果报错“OSError: [WinError 1455] 页面文件太小,无法完成操作”,设置为2或0解决。
self.data_num_workers = 2
def get_model(self):
from mmdet.models import Resnet50Vd, Resnet18Vd, Resnet50Vb
from mmdet.models.necks.fpn import FPN
from mmdet.models import FCOS, FCOSHead
from mmdet.models import FCOSLoss
if getattr(self, "model", None) is None:
Backbone = None
if self.backbone_type == 'Resnet50Vd':
Backbone = Resnet50Vd
elif self.backbone_type == 'Resnet18Vd':
Backbone = Resnet18Vd
elif self.backbone_type == 'Resnet50Vb':
Backbone = Resnet50Vb
backbone = Backbone(**self.backbone)
# 冻结骨干网络
backbone.freeze()
backbone.fix_bn()
Fpn = None
if self.fpn_type == 'FPN':
Fpn = FPN
fpn = Fpn(**self.fpn)
fcos_loss = FCOSLoss(**self.fcos_loss)
head = FCOSHead(fcos_loss=fcos_loss, nms_cfg=self.nms_cfg, **self.head)
self.model = FCOS(backbone, fpn, head)
return self.model
def get_data_loader(
self, batch_size, is_distributed, cache_img=False
):
from mmdet.data import (
FCOS_COCOTrainDataset,
InfiniteSampler,
worker_init_reset_seed,
)
from mmdet.utils import (
wait_for_the_master,
get_local_rank,
)
local_rank = get_local_rank()
with wait_for_the_master(local_rank):
# 训练时的数据预处理
sample_transforms = get_sample_transforms(self)
batch_transforms = get_batch_transforms(self)
train_dataset = FCOS_COCOTrainDataset(
data_dir=self.data_dir,
json_file=self.train_ann,
ann_folder=self.ann_folder,
name=self.train_image_folder,
cfg=self,
sample_transforms=sample_transforms,
batch_size=batch_size,
)
self.dataset = train_dataset
self.n_layers = train_dataset.n_layers
if is_distributed:
batch_size = batch_size // dist.get_world_size()
sampler = InfiniteSampler(len(self.dataset), shuffle=True, seed=self.seed if self.seed else 0)
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler=sampler,
batch_size=batch_size,
drop_last=True,
)
dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
dataloader_kwargs["batch_sampler"] = batch_sampler
# Make sure each process has different random seed, especially for 'fork' method.
# Check https://github.com/pytorch/pytorch/issues/63311 for more details.
dataloader_kwargs["worker_init_fn"] = worker_init_reset_seed
collater = FCOSTrainCollater(self.context, batch_transforms, self.n_layers)
train_loader = torch.utils.data.DataLoader(self.dataset, collate_fn=collater, **dataloader_kwargs)
return train_loader
def random_resize(self, data_loader, epoch, rank, is_distributed):
return 1
def preprocess(self, inputs, targets, tsize):
return 1
def get_optimizer(self, batch_size, param_groups, momentum, weight_decay):
if "optimizer" not in self.__dict__:
if self.warmup_epochs > 0:
lr = self.basic_lr_per_img * batch_size * self.start_factor
else:
lr = self.basic_lr_per_img * batch_size
optimizer = torch.optim.SGD(
param_groups, lr=lr, momentum=momentum, weight_decay=weight_decay
)
self.optimizer = optimizer
return self.optimizer
def get_lr_scheduler(self, lr, iters_per_epoch):
from mmdet.utils import LRScheduler
scheduler = LRScheduler(
self.scheduler,
lr,
iters_per_epoch,
self.max_epoch,
warmup_epochs=self.warmup_epochs,
warmup_lr_start=lr * self.start_factor,
milestones=self.milestones_epoch,
gamma=self.decay_gamma,
)
return scheduler
def get_eval_loader(self, batch_size, is_distributed, testdev=False):
from mmdet.data import FCOS_COCOEvalDataset
# 预测时的数据预处理
# sample_transforms
decodeImage = DecodeImage(**self.decodeImage)
normalizeImage = NormalizeImage(**self.normalizeImage)
target_size = self.test_size[0]
max_size = self.test_size[1]
resizeImage = ResizeImage(target_size=target_size, max_size=max_size, interp=self.resizeImage['interp'],
use_cv2=self.resizeImage['use_cv2'])
permute = Permute(**self.permute)
# batch_transforms
padBatch = PadBatch(**self.padBatch)
sample_transforms = [decodeImage, normalizeImage, resizeImage, permute]
batch_transforms = [padBatch]
val_dataset = FCOS_COCOEvalDataset(
data_dir=self.data_dir,
json_file=self.val_ann if not testdev else "image_info_test-dev2017.json",
ann_folder=self.ann_folder,
name=self.val_image_folder if not testdev else "test2017",
cfg=self,
sample_transforms=sample_transforms,
)
if is_distributed:
batch_size = batch_size // dist.get_world_size()
sampler = torch.utils.data.distributed.DistributedSampler(
val_dataset, shuffle=False
)
else:
sampler = torch.utils.data.SequentialSampler(val_dataset)
# FCOS训练完指定epoch后开始验证时,若报错BrokenPipeError: [Errno 32] Broken pipe,则设置num_workers为0
dataloader_kwargs = {
# "num_workers": self.data_num_workers,
"num_workers": 0,
"pin_memory": True,
"sampler": sampler,
}
dataloader_kwargs["batch_size"] = batch_size
collater = FCOSEvalCollater(self.context, batch_transforms)
val_loader = torch.utils.data.DataLoader(val_dataset, collate_fn=collater, **dataloader_kwargs)
return val_loader
def get_evaluator(self, batch_size, is_distributed, testdev=False):
from mmdet.evaluators import COCOEvaluator
val_loader = self.get_eval_loader(batch_size, is_distributed, testdev)
evaluator | |
is useful when a series of tasks with prerequisites must be run
sequentially. The prerequisites may be finished in any order, but the
tasks may only be run when all prerequisites are complete, and the
dependent task is also complete. Tasks may only depend on one other task.
For example, you might want to download block bodies and receipts at
random, but need to import them sequentially. Importing blocks is the ``task``,
downloading the parts is the ``prerequisite``, and a block's parent is its
``dependency``.
Below is a sketch of how to do that:
# The complete list of prerequisites to complete
class BlockDownloads(Enum):
receipts = auto()
bodies = auto()
block_import_tasks = OrderedTaskPreparation(
BlockDownloads,
# we use this method to extract an ID from the header:
lambda header: header.hash,
# we use this method to extract the ID of the dependency,
# so that we can guarantee that the parent block gets imported first
lambda header: header.parent_hash,
)
# We mark the genesis block as already imported, so header1 is ready
# as soon as its prerequisites are complete.
block_import_tasks.set_finished_dependency(header0)
# We register the tasks before completing any prerequisites
block_import_tasks.register_tasks((header1, header2, header3))
# Start download of bodies & receipts...
# They complete in random order
# we notify this class which prerequisites are complete:
block_import_tasks.finish_prereq(BlockDownloads.receipts, (header2, header3))
block_import_tasks.finish_prereq(BlockDownloads.bodies, (header1, header2))
# this await would hang, waiting on the receipt from header1:
# await block_import_tasks.ready_tasks()
block_import_tasks.finish_prereq(BlockDownloads.receipts, (header1, ))
# now we have all the necessary info to import blocks 1 and 2
headers_ready_to_import = await block_import_tasks.ready_tasks()
# these will always return in sequential order:
assert headers_ready_to_import == (header1, header2)
In a real implementation, you would have a loop waiting on
:meth:`ready_tasks` and import them, rather than interleaving them like
the above example.
Note that this class does *not* track when the main tasks are
complete. It is assumed that the caller will complete the tasks in the
order they are returned by ready_tasks().
The memory needs of this class would naively be unbounded. Any newly
registered task might depend on any other task in history. To prevent
unbounded memory usage, old tasks are pruned after a configurable depth.
Pruning is triggered when `ready_tasks()` is called, starting from the
tail of the *previous* ready_tasks() result.
Vocab:
- prerequisites: all these must be completed for a task to be ready
(a necessary but not sufficient condition)
- ready: a task is ready after all its prereqs are completed, and the task it depends on is
also ready. The initial ready task is set with :meth:`set_finished_dependency`
"""
# methods to extract the id and dependency IDs out of a task
_id_of: StaticMethod[Callable[[TTask], TTaskID]]
_dependency_of: StaticMethod[Callable[[TTask], TTaskID]]
# by default, how long should the integrator wait before pruning?
_default_max_depth = 10 # not sure how to pick a good default here
_prereq_tracker: Type[BaseTaskPrerequisites[TTask, TPrerequisite]]
# track roots
_roots: RootTracker[TTaskID]
NoPrerequisites = NoPrerequisites
"""
This is a helper to identify that no prerequisites are required at all, only ordering of tasks
It can be used like so: `OrderedTaskPreparation(OrderedTaskPreparation.NoPrerequisites, ...)`
"""
def __init__(
self,
prerequisites: Type[TPrerequisite],
id_extractor: Callable[[TTask], TTaskID],
dependency_extractor: Callable[[TTask], TTaskID],
accept_dangling_tasks: bool = False,
max_depth: int = None) -> None:
self._prereq_tracker = BaseTaskPrerequisites.from_enum(prerequisites)
self._id_of = id_extractor
self._dependency_of = dependency_extractor
self._accept_dangling_tasks = accept_dangling_tasks
# how long to wait before pruning
if max_depth is None:
self._max_depth = self._default_max_depth
elif max_depth < 0:
raise ValidationError(f"The maximum depth must be at least 0, not {max_depth}")
else:
self._max_depth = max_depth
# all of the tasks that have been completed, and not pruned
self._tasks: Dict[TTaskID, BaseTaskPrerequisites[TTask, TPrerequisite]] = {}
# In self._dependencies, when the key becomes ready, the task ids in the
# value set *might* also become ready
# (they only become ready if their prerequisites are complete)
self._dependencies: Dict[TTaskID, Set[TTaskID]] = defaultdict(set)
# task ids are in this set if either:
# - one of their prerequisites is incomplete OR
# - their dependent task is not ready
self._unready: Set[TTaskID] = set()
# This is a queue of tasks that have become ready, in order.
# They wait in this Queue until being returned by ready_tasks().
self._ready_tasks: 'Queue[TTask]' = Queue()
# Declared finished with set_finished_dependency()
self._declared_finished: Set[TTaskID] = set()
self._roots = RootTracker()
self._last_yielded_tasks: Tuple[TTask, ...] = tuple()
def set_finished_dependency(self, finished_task: TTask) -> None:
"""
Mark this task as already finished. This is a bootstrapping method. In general,
tasks are marked as finished by :meth:`finish_prereq`. But how do we know which task is
first, and that its dependency is complete? We call `set_finished_dependency`.
Since a task can only become ready when its dependent
task is ready, the first result from ready_tasks will be dependent on
finished_task set in this method. (More precisely, it will be dependent on *one of*
the ``finished_task`` objects set with this method, since the method may be called
multiple times)
"""
completed = self._prereq_tracker(finished_task)
completed.set_complete()
task_id = self._id_of(finished_task)
if task_id in self._tasks:
raise DuplicateTasks(
f"Can't set a new finished dependency {finished_task} id:{task_id}, "
"it's already registered",
(finished_task, ),
)
self._tasks[task_id] = completed
self._declared_finished.add(task_id)
dependency_id = self._dependency_of(finished_task)
self._roots.add(task_id, dependency_id)
if dependency_id in self._tasks:
# set a finished dependency that has a parent already entered. Mark this as a dependency
self._dependencies[dependency_id].add(task_id)
# note that this task is intentionally *not* added to self._unready
def register_tasks(self, tasks: Tuple[TTask, ...], ignore_duplicates: bool = False) -> None:
"""
Initiate a task into tracking. By default, each task must be registered
*after* its dependency has been registered.
If you want to be able to register non-contiguous tasks, you can
initialize this intance with: ``accept_dangling_tasks=True``.
:param tasks: the tasks to register, in iteration order
:param ignore_duplicates: any tasks that have already been registered will be ignored,
whether ready or not
"""
identified_tasks = tuple((self._id_of(task), task) for task in tasks)
duplicates = tuple(task for task_id, task in identified_tasks if task_id in self._tasks)
if duplicates and not ignore_duplicates:
raise DuplicateTasks(
f"Cannot re-register tasks: {duplicates!r} for completion",
duplicates,
)
task_meta_info = tuple(
(self._prereq_tracker(task), task_id, self._dependency_of(task))
for task_id, task in identified_tasks
# when ignoring duplicates, must not try to re-add them
if task_id not in self._tasks
)
for prereq_tracker, task_id, dependency_id in task_meta_info:
if not self._accept_dangling_tasks and dependency_id not in self._tasks:
raise MissingDependency(
f"Cannot prepare task {prereq_tracker!r} with id {task_id} and "
f"dependency {dependency_id} before preparing its dependency "
f"among tasks {task_meta_info!r}, from the original registration: "
f"{tasks!r}."
)
else:
self._tasks[task_id] = prereq_tracker
self._unready.add(task_id)
self._dependencies[dependency_id].add(task_id)
self._roots.add(task_id, dependency_id)
if prereq_tracker.is_complete and self._is_ready(prereq_tracker.task):
# this is possible for tasks with 0 prerequisites (useful for pure ordering)
self._mark_complete(task_id)
def finish_prereq(self, prereq: TPrerequisite, tasks: Tuple[TTask, ...]) -> None:
"""For every task in tasks, mark the given prerequisite as completed"""
if len(self._tasks) == 0:
raise ValidationError("Cannot finish a task until set_last_completion() is called")
for task in tasks:
task_id = self._id_of(task)
if task_id not in self._tasks:
raise ValidationError(f"Cannot finish task {task_id!r} before preparing it")
elif task_id not in self._unready:
raise ValidationError(
f"Cannot finish prereq {prereq} of task {task} id:{task_id!r} that is complete"
)
task_completion = self._tasks[task_id]
task_completion.finish(prereq)
if task_completion.is_complete and self._is_ready(task):
self._mark_complete(task_id)
async def ready_tasks(self, max_results: int = None) -> Tuple[TTask, ...]:
"""
Return the next batch of tasks that are ready to process. If none are ready,
hang until at least one task becomes ready.
"""
for completed_task in self._last_yielded_tasks:
task_id = self._id_of(completed_task)
# Attempt pruning at least twice (to eventually catch up after forks)
# re-running is okay, because pruning limits the prune depth
self._prune_finished(task_id)
self._prune_finished(task_id)
self._last_yielded_tasks = await queue_get_batch(self._ready_tasks, max_results)
return self._last_yielded_tasks
def has_ready_tasks(self) -> bool:
return not self._ready_tasks.empty()
def _is_ready(self, task: TTask) -> bool:
dependency = self._dependency_of(task)
if dependency in self._declared_finished:
# Ready by declaration
return True
elif dependency in self._tasks and dependency not in self._unready:
# Ready by insertion and tracked completion
return True
else:
return False
def _mark_complete(self, task_id: TTaskID) -> None:
qualified_tasks = tuple([task_id])
while qualified_tasks:
qualified_tasks | |
lib in libs_found:
continue
if self.clib_compiler:
args = self.clib_compiler.find_library(lib[2:], self.env,
libpaths, libtype)
# If the project only uses a non-clib language such as D, Rust,
# C#, Python, etc, all we can do is limp along by adding the
# arguments as-is and then adding the libpaths at the end.
else:
args = None
if args is not None:
libs_found.add(lib)
# Replace -l arg with full path to library if available
# else, library is either to be ignored, or is provided by
# the compiler, can't be resolved, and should be used as-is
if args:
if not args[0].startswith('-l'):
lib = args[0]
else:
continue
else:
# Library wasn't found, maybe we're looking in the wrong
# places or the library will be provided with LDFLAGS or
# LIBRARY_PATH from the environment (on macOS), and many
# other edge cases that we can't account for.
#
# Add all -L paths and use it as -lfoo
if lib in libs_notfound:
continue
if self.static:
mlog.warning('Static library {!r} not found for dependency {!r}, may '
'not be statically linked'.format(lib[2:], self.name))
libs_notfound.append(lib)
elif lib.endswith(".la"):
shared_libname = self.extract_libtool_shlib(lib)
shared_lib = os.path.join(os.path.dirname(lib), shared_libname)
if not os.path.exists(shared_lib):
shared_lib = os.path.join(os.path.dirname(lib), ".libs", shared_libname)
if not os.path.exists(shared_lib):
raise DependencyException('Got a libtools specific "%s" dependencies'
'but we could not compute the actual shared'
'library path' % lib)
self.is_libtool = True
lib = shared_lib
if lib in link_args:
continue
link_args.append(lib)
# Add all -Lbar args if we have -lfoo args in link_args
if libs_notfound:
# Order of -L flags doesn't matter with ld, but it might with other
# linkers such as MSVC, so prepend them.
link_args = ['-L' + lp for lp in prefix_libpaths] + link_args
return link_args, raw_link_args
def _set_libs(self):
env = None
libcmd = [self.name, '--libs']
if self.static:
libcmd.append('--static')
# Force pkg-config to output -L fields even if they are system
# paths so we can do manual searching with cc.find_library() later.
env = os.environ.copy()
env['PKG_CONFIG_ALLOW_SYSTEM_LIBS'] = '1'
ret, out = self._call_pkgbin(libcmd, env=env)
if ret != 0:
raise DependencyException('Could not generate libs for %s:\n\n%s' %
(self.name, out))
# Also get the 'raw' output without -Lfoo system paths for adding -L
# args with -lfoo when a library can't be found, and also in
# gnome.generate_gir + gnome.gtkdoc which need -L -l arguments.
ret, out_raw = self._call_pkgbin(libcmd)
if ret != 0:
raise DependencyException('Could not generate libs for %s:\n\n%s' %
(self.name, out_raw))
self.link_args, self.raw_link_args = self._search_libs(out, out_raw)
def get_pkgconfig_variable(self, variable_name, kwargs):
options = ['--variable=' + variable_name, self.name]
if 'define_variable' in kwargs:
definition = kwargs.get('define_variable', [])
if not isinstance(definition, list):
raise MesonException('define_variable takes a list')
if len(definition) != 2 or not all(isinstance(i, str) for i in definition):
raise MesonException('define_variable must be made up of 2 strings for VARIABLENAME and VARIABLEVALUE')
options = ['--define-variable=' + '='.join(definition)] + options
ret, out = self._call_pkgbin(options)
variable = ''
if ret != 0:
if self.required:
raise DependencyException('dependency %s not found.' %
(self.name))
else:
variable = out.strip()
# pkg-config doesn't distinguish between empty and non-existent variables
# use the variable list to check for variable existence
if not variable:
ret, out = self._call_pkgbin(['--print-variables', self.name])
if not re.search(r'^' + variable_name + r'$', out, re.MULTILINE):
if 'default' in kwargs:
variable = kwargs['default']
else:
mlog.warning("pkgconfig variable '%s' not defined for dependency %s." % (variable_name, self.name))
mlog.debug('Got pkgconfig variable %s : %s' % (variable_name, variable))
return variable
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG]
def check_pkgconfig(self):
evar = 'PKG_CONFIG'
if evar in os.environ:
pkgbin = os.environ[evar].strip()
else:
pkgbin = 'pkg-config'
pkgbin = ExternalProgram(pkgbin, silent=True)
if pkgbin.found():
try:
p, out = Popen_safe(pkgbin.get_command() + ['--version'])[0:2]
if p.returncode != 0:
mlog.warning('Found pkg-config {!r} but couldn\'t run it'
''.format(' '.join(pkgbin.get_command())))
# Set to False instead of None to signify that we've already
# searched for it and not found it
pkgbin = False
except (FileNotFoundError, PermissionError):
pkgbin = False
else:
pkgbin = False
if not self.silent:
if pkgbin:
mlog.log('Found pkg-config:', mlog.bold(pkgbin.get_path()),
'(%s)' % out.strip())
else:
mlog.log('Found Pkg-config:', mlog.red('NO'))
return pkgbin
def extract_field(self, la_file, fieldname):
with open(la_file) as f:
for line in f:
arr = line.strip().split('=')
if arr[0] == fieldname:
return arr[1][1:-1]
return None
def extract_dlname_field(self, la_file):
return self.extract_field(la_file, 'dlname')
def extract_libdir_field(self, la_file):
return self.extract_field(la_file, 'libdir')
def extract_libtool_shlib(self, la_file):
'''
Returns the path to the shared library
corresponding to this .la file
'''
dlname = self.extract_dlname_field(la_file)
if dlname is None:
return None
# Darwin uses absolute paths where possible; since the libtool files never
# contain absolute paths, use the libdir field
if mesonlib.is_osx():
dlbasename = os.path.basename(dlname)
libdir = self.extract_libdir_field(la_file)
if libdir is None:
return dlbasename
return os.path.join(libdir, dlbasename)
# From the comments in extract_libtool(), older libtools had
# a path rather than the raw dlname
return os.path.basename(dlname)
def log_tried(self):
return self.type_name
class DubDependency(ExternalDependency):
class_dubbin = None
def __init__(self, name, environment, kwargs):
super().__init__('dub', environment, 'd', kwargs)
self.name = name
self.compiler = super().get_compiler()
self.module_path = None
if 'required' in kwargs:
self.required = kwargs.get('required')
if DubDependency.class_dubbin is None:
self.dubbin = self._check_dub()
DubDependency.class_dubbin = self.dubbin
else:
self.dubbin = DubDependency.class_dubbin
if not self.dubbin:
if self.required:
raise DependencyException('DUB not found.')
self.is_found = False
return
mlog.debug('Determining dependency {!r} with DUB executable '
'{!r}'.format(name, self.dubbin.get_path()))
# we need to know the target architecture
arch = self.compiler.arch
# Ask dub for the package
ret, res = self._call_dubbin(['describe', name, '--arch=' + arch])
if ret != 0:
self.is_found = False
return
comp = self.compiler.get_id().replace('llvm', 'ldc').replace('gcc', 'gdc')
packages = []
description = json.loads(res)
for package in description['packages']:
packages.append(package['name'])
if package['name'] == name:
self.is_found = True
not_lib = True
if 'targetType' in package:
if package['targetType'] == 'library':
not_lib = False
if not_lib:
mlog.error(mlog.bold(name), "found but it isn't a library")
self.is_found = False
return
self.module_path = self._find_right_lib_path(package['path'], comp, description, True, package['targetFileName'])
if not os.path.exists(self.module_path):
# check if the dependency was built for other archs
archs = [['x86_64'], ['x86'], ['x86', 'x86_mscoff']]
for a in archs:
description_a = copy.deepcopy(description)
description_a['architecture'] = a
arch_module_path = self._find_right_lib_path(package['path'], comp, description_a, True, package['targetFileName'])
if arch_module_path:
mlog.error(mlog.bold(name), "found but it wasn't compiled for", mlog.bold(arch))
self.is_found = False
return
mlog.error(mlog.bold(name), "found but it wasn't compiled with", mlog.bold(comp))
self.is_found = False
return
self.version = package['version']
self.pkg = package
if self.pkg['targetFileName'].endswith('.a'):
self.static = True
self.compile_args = []
for flag in self.pkg['dflags']:
self.link_args.append(flag)
for path in self.pkg['importPaths']:
self.compile_args.append('-I' + os.path.join(self.pkg['path'], path))
self.link_args = self.raw_link_args = []
for flag in self.pkg['lflags']:
self.link_args.append(flag)
self.link_args.append(os.path.join(self.module_path, self.pkg['targetFileName']))
# Handle dependencies
libs = []
def add_lib_args(field_name, target):
if field_name in target['buildSettings']:
for lib in target['buildSettings'][field_name]:
if lib not in libs:
libs.append(lib)
if os.name is not 'nt':
pkgdep = PkgConfigDependency(lib, environment, {'required': 'true', 'silent': 'true'})
for arg in pkgdep.get_compile_args():
self.compile_args.append(arg)
for arg in pkgdep.get_link_args():
self.link_args.append(arg)
for arg in pkgdep.get_link_args(raw=True):
self.raw_link_args.append(arg)
for target in description['targets']:
if target['rootPackage'] in packages:
add_lib_args('libs', target)
add_lib_args('libs-{}'.format(platform.machine()), target)
for file in target['buildSettings']['linkerFiles']:
lib_path = self._find_right_lib_path(file, comp, description)
if lib_path:
self.link_args.append(lib_path)
else:
self.is_found = False
def get_compiler(self):
return self.compiler
def _find_right_lib_path(self, default_path, comp, description, folder_only=False, file_name=''):
module_path = lib_file_name = ''
if folder_only:
module_path = default_path
lib_file_name = file_name
else:
module_path = os.path.dirname(default_path)
lib_file_name = os.path.basename(default_path)
module_build_path = os.path.join(module_path, '.dub', 'build')
# Get D version implemented in the compiler
# gdc doesn't support this
ret, res = self._call_dubbin(['--version'])
if ret != 0:
mlog.error('Failed to run {!r}', mlog.bold(comp))
return
d_ver = re.search('v[0-9].[0-9][0-9][0-9].[0-9]', res) # Ex.: v2.081.2
if d_ver is not None:
d_ver = d_ver.group().rsplit('.', 1)[0].replace('v', '').replace('.', '') # Fix structure. Ex.: 2081
else:
d_ver = '' # gdc
if not os.path.isdir(module_build_path):
return ''
# Ex.: library-debug-linux.posix-x86_64-ldc_2081-EF934983A3319F8F8FF2F0E107A363BA
build_name = 'library-{}-{}-{}-{}_{}'.format(description['buildType'], '.'.join(description['platform']), '.'.join(description['architecture']), comp, d_ver)
for entry in os.listdir(module_build_path):
if entry.startswith(build_name):
for file in os.listdir(os.path.join(module_build_path, entry)):
if file == lib_file_name:
if folder_only:
return os.path.join(module_build_path, entry)
else:
return os.path.join(module_build_path, entry, lib_file_name)
return ''
def _call_dubbin(self, args, env=None):
p, out = Popen_safe(self.dubbin.get_command() + args, env=env)[0:2]
return p.returncode, out.strip()
def _call_copmbin(self, args, env=None):
p, out = Popen_safe(self.compiler.get_exelist() + args, env=env)[0:2]
return p.returncode, out.strip()
def _check_dub(self):
dubbin = ExternalProgram('dub', silent=True)
if dubbin.found():
try:
p, out = Popen_safe(dubbin.get_command() + ['--version'])[0:2]
if p.returncode != 0:
| |
<gh_stars>10-100
# Generated from RuleLexerPy.g4 by ANTLR 4.7.2
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
from antlr4.Token import CommonToken
from antlr4.RuleContext import RuleContext
from antlr4.Token import Token
from RuleParserPy import RuleParserPy
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2J")
buf.write("\u02cf\b\1\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6")
buf.write("\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r")
buf.write("\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22")
buf.write("\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30")
buf.write("\t\30\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35")
buf.write("\4\36\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4")
buf.write("%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t")
buf.write("-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63")
buf.write("\4\64\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4")
buf.write(":\t:\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4")
buf.write("C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4")
buf.write("L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4")
buf.write("U\tU\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t")
buf.write("]\4^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\4f\t")
buf.write("f\3\2\3\2\5\2\u00d1\n\2\3\3\3\3\3\4\3\4\5\4\u00d7\n\4")
buf.write("\3\5\3\5\3\5\3\5\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\n\3\n")
buf.write("\3\n\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\f\3\f\3\f\3\r\3")
buf.write("\r\3\r\3\16\3\16\3\16\3\16\3\16\3\17\3\17\3\17\3\17\3")
buf.write("\17\3\20\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21")
buf.write("\3\22\3\22\3\22\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24")
buf.write("\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\26\3\26")
buf.write("\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30")
buf.write("\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\31\3\32")
buf.write("\3\32\3\32\5\32\u0144\n\32\3\32\3\32\5\32\u0148\n\32\3")
buf.write("\32\5\32\u014b\n\32\5\32\u014d\n\32\3\32\3\32\3\33\3\33")
buf.write("\7\33\u0153\n\33\f\33\16\33\u0156\13\33\3\34\3\34\3\34")
buf.write("\3\34\3\34\5\34\u015d\n\34\3\34\3\34\5\34\u0161\n\34\3")
buf.write("\35\3\35\3\35\3\35\3\35\5\35\u0168\n\35\3\35\3\35\5\35")
buf.write("\u016c\n\35\3\36\3\36\7\36\u0170\n\36\f\36\16\36\u0173")
buf.write("\13\36\3\36\6\36\u0176\n\36\r\36\16\36\u0177\5\36\u017a")
buf.write("\n\36\3\37\3\37\3\37\6\37\u017f\n\37\r\37\16\37\u0180")
buf.write("\3 \3 \3!\3!\3\"\3\"\3\"\3#\3#\3#\3$\3$\3%\3%\3&\3&\3")
buf.write("\'\3\'\3\'\3(\3(\3)\3)\3)\3*\3*\3*\3+\3+\3,\3,\3-\3-\3")
buf.write(".\3.\3.\3/\3/\3/\3\60\3\60\3\61\3\61\3\62\3\62\3\63\3")
buf.write("\63\3\64\3\64\3\65\3\65\3\65\3\66\3\66\3\66\3\67\3\67")
buf.write("\38\38\39\39\39\3:\3:\3:\3;\3;\3;\3<\3<\3<\3=\3=\3=\3")
buf.write(">\3>\3>\3?\3?\3?\3@\3@\3@\5@\u01d6\n@\3@\3@\3A\3A\3B\3")
buf.write("B\3B\7B\u01df\nB\fB\16B\u01e2\13B\3B\3B\3B\3B\7B\u01e8")
buf.write("\nB\fB\16B\u01eb\13B\3B\5B\u01ee\nB\3C\3C\3C\3C\3C\7C")
buf.write("\u01f5\nC\fC\16C\u01f8\13C\3C\3C\3C\3C\3C\3C\3C\3C\7C")
buf.write("\u0202\nC\fC\16C\u0205\13C\3C\3C\3C\5C\u020a\nC\3D\3D")
buf.write("\5D\u020e\nD\3E\3E\3F\3F\3F\3F\5F\u0216\nF\3G\3G\3H\3")
buf.write("H\3I\3I\3J\3J\3K\3K\3L\5L\u0223\nL\3L\3L\3L\3L\5L\u0229")
buf.write("\nL\3M\6M\u022c\nM\rM\16M\u022d\3N\3N\6N\u0232\nN\rN\16")
buf.write("N\u0233\3O\3O\3O\7O\u0239\nO\fO\16O\u023c\13O\3O\3O\3")
buf.write("O\3O\7O\u0242\nO\fO\16O\u0245\13O\3O\5O\u0248\nO\3P\3")
buf.write("P\3P\3P\3P\7P\u024f\nP\fP\16P\u0252\13P\3P\3P\3P\3P\3")
buf.write("P\3P\3P\3P\7P\u025c\nP\fP\16P\u025f\13P\3P\3P\3P\5P\u0264")
buf.write("\nP\3Q\3Q\5Q\u0268\nQ\3R\5R\u026b\nR\3S\5S\u026e\nS\3")
buf.write("T\5T\u0271\nT\3U\3U\3U\3V\6V\u0277\nV\rV\16V\u0278\3W")
buf.write("\3W\7W\u027d\nW\fW\16W\u0280\13W\3X\3X\5X\u0284\nX\3X")
buf.write("\5X\u0287\nX\3X\3X\5X\u028b\nX\3Y\5Y\u028e\nY\3Z\3Z\5")
buf.write("Z\u0292\nZ\3[\3[\3[\3[\3[\3\\\3\\\3\\\3\\\3]\3]\3]\3]")
buf.write("\3]\3^\3^\3_\3_\3`\3`\7`\u02a8\n`\f`\16`\u02ab\13`\3`")
buf.write("\3`\3`\7`\u02b0\n`\f`\16`\u02b3\13`\3`\5`\u02b6\n`\3a")
buf.write("\3a\7a\u02ba\na\fa\16a\u02bd\13a\3b\3b\3b\3b\3c\3c\3d")
buf.write("\3d\3e\3e\3e\3e\5e\u02cb\ne\3f\5f\u02ce\nf\6\u01f6\u0203")
buf.write("\u0250\u025d\2g\4\3\6\4\b\5\n\6\f\7\16\b\20\t\22\n\24")
buf.write("\13\26\f\30\r\32\16\34\17\36\20 \21\"\22$\23&\24(\25*")
buf.write("\26,\27.\30\60\31\62\32\64\33\66\348\35:\36<\37> @!B\"")
buf.write("D#F$H%J&L\'N(P)R*T+V,X-Z.\\/^\60`\61b\62d\63f\64h\65j")
buf.write("\66l\67n8p9r:t;v<x=z>|?~@\u0080A\u0082B\u0084\2\u0086")
buf.write("\2\u0088\2\u008a\2\u008c\2\u008e\2\u0090\2\u0092\2\u0094")
buf.write("\2\u0096\2\u0098\2\u009a\2\u009c\2\u009e\2\u00a0\2\u00a2")
buf.write("\2\u00a4\2\u00a6\2\u00a8\2\u00aa\2\u00ac\2\u00ae\2\u00b0")
buf.write("\2\u00b2\2\u00b4\2\u00b6C\u00b8D\u00baE\u00bcF\u00beG")
buf.write("\u00c0H\u00c2I\u00c4J\u00c6\2\u00c8\2\u00ca\2\u00cc\2")
buf.write("\4\2\3\35\b\2HHTTWWhhttww\4\2HHhh\4\2TTtt\4\2DDdd\4\2")
buf.write("ZZzz\6\2\f\f\16\17))^^\6\2\f\f\16\17$$^^\3\2^^\3\2\63")
buf.write(";\3\2\62;\3\2\629\5\2\62;CHch\3\2\62\63\7\2\2\13\r\16")
buf.write("\20(*]_\u0081\7\2\2\13\r\16\20#%]_\u0081\4\2\2]_\u0081")
buf.write("\3\2\2\u0081\4\2\13\13\"\"\4\2\f\f\16\17\u0129\2C\\aa")
buf.write("c|\u00ac\u00ac\u00b7\u00b7\u00bc\u00bc\u00c2\u00d8\u00da")
buf.write("\u00f8\u00fa\u0243\u0252\u02c3\u02c8\u02d3\u02e2\u02e6")
buf.write("\u02f0\u02f0\u037c\u037c\u0388\u0388\u038a\u038c\u038e")
buf.write("\u038e\u0390\u03a3\u03a5\u03d0\u03d2\u03f7\u03f9\u0483")
buf.write("\u048c\u04d0\u04d2\u04fb\u0502\u0511\u0533\u0558\u055b")
buf.write("\u055b\u0563\u0589\u05d2\u05ec\u05f2\u05f4\u0623\u063c")
buf.write("\u0642\u064c\u0670\u0671\u0673\u06d5\u06d7\u06d7\u06e7")
buf.write("\u06e8\u06f0\u06f1\u06fc\u06fe\u0701\u0701\u0712\u0712")
buf.write("\u0714\u0731\u074f\u076f\u0782\u07a7\u07b3\u07b3\u0906")
buf.write("\u093b\u093f\u093f\u0952\u0952\u095a\u0963\u097f\u097f")
buf.write("\u0987\u098e\u0991\u0992\u0995\u09aa\u09ac\u09b2\u09b4")
buf.write("\u09b4\u09b8\u09bb\u09bf\u09bf\u09d0\u09d0\u09de\u09df")
buf.write("\u09e1\u09e3\u09f2\u09f3\u0a07\u0a0c\u0a11\u0a12\u0a15")
buf.write("\u0a2a\u0a2c\u0a32\u0a34\u0a35\u0a37\u0a38\u0a3a\u0a3b")
buf.write("\u0a5b\u0a5e\u0a60\u0a60\u0a74\u0a76\u0a87\u0a8f\u0a91")
buf.write("\u0a93\u0a95\u0aaa\u0aac\u0ab2\u0ab4\u0ab5\u0ab7\u0abb")
buf.write("\u0abf\u0abf\u0ad2\u0ad2\u0ae2\u0ae3\u0b07\u0b0e\u0b11")
buf.write("\u0b12\u0b15\u0b2a\u0b2c\u0b32\u0b34\u0b35\u0b37\u0b3b")
buf.write("\u0b3f\u0b3f\u0b5e\u0b5f\u0b61\u0b63\u0b73\u0b73\u0b85")
buf.write("\u0b85\u0b87\u0b8c\u0b90\u0b92\u0b94\u0b97\u0b9b\u0b9c")
buf.write("\u0b9e\u0b9e\u0ba0\u0ba1\u0ba5\u0ba6\u0baa\u0bac\u0bb0")
buf.write("\u0bbb\u0c07\u0c0e\u0c10\u0c12\u0c14\u0c2a\u0c2c\u0c35")
buf.write("\u0c37\u0c3b\u0c62\u0c63\u0c87\u0c8e\u0c90\u0c92\u0c94")
buf.write("\u0caa\u0cac\u0cb5\u0cb7\u0cbb\u0cbf\u0cbf\u0ce0\u0ce0")
buf.write("\u0ce2\u0ce3\u0d07\u0d0e\u0d10\u0d12\u0d14\u0d2a\u0d2c")
buf.write("\u0d3b\u0d62\u0d63\u0d87\u0d98\u0d9c\u0db3\u0db5\u0dbd")
buf.write("\u0dbf\u0dbf\u0dc2\u0dc8\u0e03\u0e32\u0e34\u0e35\u0e42")
buf.write("\u0e48\u0e83\u0e84\u0e86\u0e86\u0e89\u0e8a\u0e8c\u0e8c")
buf.write("\u0e8f\u0e8f\u0e96\u0e99\u0e9b\u0ea1\u0ea3\u0ea5\u0ea7")
buf.write("\u0ea7\u0ea9\u0ea9\u0eac\u0ead\u0eaf\u0eb2\u0eb4\u0eb5")
buf.write("\u0ebf\u0ebf\u0ec2\u0ec6\u0ec8\u0ec8\u0ede\u0edf\u0f02")
buf.write("\u0f02\u0f42\u0f49\u0f4b\u0f6c\u0f8a\u0f8d\u1002\u1023")
buf.write("\u1025\u1029\u102b\u102c\u1052\u1057\u10a2\u10c7\u10d2")
buf.write("\u10fc\u10fe\u10fe\u1102\u115b\u1161\u11a4\u11aa\u11fb")
buf.write("\u1202\u124a\u124c\u124f\u1252\u1258\u125a\u125a\u125c")
buf.write("\u125f\u1262\u128a\u128c\u128f\u1292\u12b2\u12b4\u12b7")
buf.write("\u12ba\u12c0\u12c2\u12c2\u12c4\u12c7\u12ca\u12d8\u12da")
buf.write("\u1312\u1314\u1317\u131a\u135c\u1382\u1391\u13a2\u13f6")
buf.write("\u1403\u166e\u1671\u1678\u1683\u169c\u16a2\u16ec\u16f0")
buf.write("\u16f2\u1702\u170e\u1710\u1713\u1722\u1733\u1742\u1753")
buf.write("\u1762\u176e\u1770\u1772\u1782\u17b5\u17d9\u17d9\u17de")
buf.write("\u17de\u1822\u1879\u1882\u18aa\u1902\u191e\u1952\u196f")
buf.write("\u1972\u1976\u1982\u19ab\u19c3\u19c9\u1a02\u1a18\u1d02")
buf.write("\u1dc1\u1e02\u1e9d\u1ea2\u1efb\u1f02\u1f17\u1f1a\u1f1f")
buf.write("\u1f22\u1f47\u1f4a\u1f4f\u1f52\u1f59\u1f5b\u1f5b\u1f5d")
buf.write("\u1f5d\u1f5f\u1f5f\u1f61\u1f7f\u1f82\u1fb6\u1fb8\u1fbe")
buf.write("\u1fc0\u1fc0\u1fc4\u1fc6\u1fc8\u1fce\u1fd2\u1fd5\u1fd8")
buf.write("\u1fdd\u1fe2\u1fee\u1ff4\u1ff6\u1ff8\u1ffe\u2073\u2073")
buf.write("\u2081\u2081\u2092\u2096\u2104\u2104\u2109\u2109\u210c")
buf.write("\u2115\u2117\u2117\u211a\u211f\u2126\u2126\u2128\u2128")
buf.write("\u212a\u212a\u212c\u2133\u2135\u213b\u213e\u2141\u2147")
buf.write("\u214b\u2162\u2185\u2c02\u2c30\u2c32\u2c60\u2c82\u2ce6")
buf.write("\u2d02\u2d27\u2d32\u2d67\u2d71\u2d71\u2d82\u2d98\u2da2")
buf.write("\u2da8\u2daa\u2db0\u2db2\u2db8\u2dba\u2dc0\u2dc2\u2dc8")
buf.write("\u2dca\u2dd0\u2dd2\u2dd8\u2dda\u2de0\u3007\u3009\u3023")
buf.write("\u302b\u3033\u3037\u303a\u303e\u3043\u3098\u309d\u30a1")
buf.write("\u30a3\u30fc\u30fe\u3101\u3107\u312e\u3133\u3190\u31a2")
buf.write("\u31b9\u31f2\u3201\u3402\u4db7\u4e02\u9fbd\ua002\ua48e")
buf.write("\ua802\ua803\ua805\ua807\ua809\ua80c\ua80e\ua824\uac02")
buf.write("\ud7a5\uf902\ufa2f\ufa32\ufa6c\ufa72\ufadb\ufb02\ufb08")
buf.write("\ufb15\ufb19\ufb1f\ufb1f\ufb21\ufb2a\ufb2c\ufb38\ufb3a")
buf.write("\ufb3e\ufb40\ufb40\ufb42\ufb43\ufb45\ufb46\ufb48\ufbb3")
buf.write("\ufbd5\ufd3f\ufd52\ufd91\ufd94\ufdc9\ufdf2\ufdfd\ufe72")
buf.write("\ufe76\ufe78\ufefe\uff23\uff3c\uff43\uff5c\uff68\uffc0")
buf.write("\uffc4\uffc9\uffcc\uffd1\uffd4\uffd9\uffdc\uffde\u0096")
buf.write("\2\62;\u0302\u0371\u0485\u0488\u0593\u05bb\u05bd\u05bf")
buf.write("\u05c1\u05c1\u05c3\u05c4\u05c6\u05c7\u05c9\u05c9\u0612")
buf.write("\u0617\u064d\u0660\u0662\u066b\u0672\u0672\u06d8\u06de")
buf.write("\u06e1\u06e6\u06e9\u06ea\u06ec\u06ef\u06f2\u06fb\u0713")
buf.write("\u0713\u0732\u074c\u07a8\u07b2\u0903\u0905\u093e\u093e")
buf.write("\u0940\u094f\u0953\u0956\u0964\u0965\u0968\u0971\u0983")
buf.write("\u0985\u09be\u09be\u09c0\u09c6\u09c9\u09ca\u09cd\u09cf")
buf.write("\u09d9\u09d9\u09e4\u09e5\u09e8\u09f1\u0a03\u0a05\u0a3e")
buf.write("\u0a3e\u0a40\u0a44\u0a49\u0a4a\u0a4d\u0a4f\u0a68\u0a73")
buf.write("\u0a83\u0a85\u0abe\u0abe\u0ac0\u0ac7\u0ac9\u0acb\u0acd")
buf.write("\u0acf\u0ae4\u0ae5\u0ae8\u0af1\u0b03\u0b05\u0b3e\u0b3e")
buf.write("\u0b40\u0b45\u0b49\u0b4a\u0b4d\u0b4f\u0b58\u0b59\u0b68")
buf.write("\u0b71\u0b84\u0b84\u0bc0\u0bc4\u0bc8\u0bca\u0bcc\u0bcf")
buf.write("\u0bd9\u0bd9\u0be8\u0bf1\u0c03\u0c05\u0c40\u0c46\u0c48")
buf.write("\u0c4a\u0c4c\u0c4f\u0c57\u0c58\u0c68\u0c71\u0c84\u0c85")
buf.write("\u0cbe\u0cbe\u0cc0\u0cc6\u0cc8\u0cca\u0ccc\u0ccf\u0cd7")
buf.write("\u0cd8\u0ce8\u0cf1\u0d04\u0d05\u0d40\u0d45\u0d48\u0d4a")
buf.write("\u0d4c\u0d4f\u0d59\u0d59\u0d68\u0d71\u0d84\u0d85\u0dcc")
buf.write("\u0dcc\u0dd1\u0dd6\u0dd8\u0dd8\u0dda\u0de1\u0df4\u0df5")
buf.write("\u0e33\u0e33\u0e36\u0e3c\u0e49\u0e50\u0e52\u0e5b\u0eb3")
buf.write("\u0eb3\u0eb6\u0ebb\u0ebd\u0ebe\u0eca\u0ecf\u0ed2\u0edb")
buf.write("\u0f1a\u0f1b\u0f22\u0f2b\u0f37\u0f37\u0f39\u0f39\u0f3b")
buf.write("\u0f3b\u0f40\u0f41\u0f73\u0f86\u0f88\u0f89\u0f92\u0f99")
buf.write("\u0f9b\u0fbe\u0fc8\u0fc8\u102e\u1034\u1038\u103b\u1042")
buf.write("\u104b\u1058\u105b\u1361\u1361\u136b\u1373\u1714\u1716")
buf.write("\u1734\u1736\u1754\u1755\u1774\u1775\u17b8\u17d5\u17df")
buf.write("\u17df\u17e2\u17eb\u180d\u180f\u1812\u181b\u18ab\u18ab")
buf.write("\u1922\u192d\u1932\u193d\u1948\u1951\u19b2\u19c2\u19ca")
buf.write("\u19cb\u19d2\u19db\u1a19\u1a1d\u1dc2\u1dc5\u2041\u2042")
buf.write("\u2056\u2056\u20d2\u20de\u20e3\u20e3\u20e7\u20ed\u302c")
buf.write("\u3031\u309b\u309c\ua804\ua804\ua808\ua808\ua80d\ua80d")
buf.write("\ua825\ua829\ufb20\ufb20\ufe02\ufe11\ufe22\ufe25\ufe35")
buf.write("\ufe36\ufe4f\ufe51\uff12\uff1b\uff41\uff41\4\2$$>>\4\2")
buf.write("))>>\5\2\13\f\17\17\"\"\4\2/\60aa\5\2\u00b9\u00b9\u0302")
buf.write("\u0371\u2041\u2042\n\2<<C\\c|\u2072\u2191\u2c02\u2ff1")
buf.write("\u3003\ud801\uf902\ufdd1\ufdf2\uffff\2\u02e8\2\4\3\2\2")
buf.write("\2\2\6\3\2\2\2\2\b\3\2\2\2\2\n\3\2\2\2\2\f\3\2\2\2\2\16")
buf.write("\3\2\2\2\2\20\3\2\2\2\2\22\3\2\2\2\2\24\3\2\2\2\2\26\3")
buf.write("\2\2\2\2\30\3\2\2\2\2\32\3\2\2\2\2\34\3\2\2\2\2\36\3\2")
buf.write("\2\2\2 \3\2\2\2\2\"\3\2\2\2\2$\3\2\2\2\2&\3\2\2\2\2(\3")
buf.write("\2\2\2\2*\3\2\2\2\2,\3\2\2\2\2.\3\2\2\2\2\60\3\2\2\2\2")
buf.write("\62\3\2\2\2\2\64\3\2\2\2\2\66\3\2\2\2\28\3\2\2\2\2:\3")
buf.write("\2\2\2\2<\3\2\2\2\2>\3\2\2\2\2@\3\2\2\2\2B\3\2\2\2\2D")
buf.write("\3\2\2\2\2F\3\2\2\2\2H\3\2\2\2\2J\3\2\2\2\2L\3\2\2\2\2")
buf.write("N\3\2\2\2\2P\3\2\2\2\2R\3\2\2\2\2T\3\2\2\2\2V\3\2\2\2")
buf.write("\2X\3\2\2\2\2Z\3\2\2\2\2\\\3\2\2\2\2^\3\2\2\2\2`\3\2\2")
buf.write("\2\2b\3\2\2\2\2d\3\2\2\2\2f\3\2\2\2\2h\3\2\2\2\2j\3\2")
buf.write("\2\2\2l\3\2\2\2\2n\3\2\2\2\2p\3\2\2\2\2r\3\2\2\2\2t\3")
buf.write("\2\2\2\2v\3\2\2\2\2x\3\2\2\2\2z\3\2\2\2\2|\3\2\2\2\2~")
buf.write("\3\2\2\2\2\u0080\3\2\2\2\2\u0082\3\2\2\2\2\u00b6\3\2\2")
buf.write("\2\3\u00b8\3\2\2\2\3\u00ba\3\2\2\2\3\u00bc\3\2\2\2\3\u00be")
buf.write("\3\2\2\2\3\u00c0\3\2\2\2\3\u00c2\3\2\2\2\3\u00c4\3\2\2")
buf.write("\2\4\u00d0\3\2\2\2\6\u00d2\3\2\2\2\b\u00d6\3\2\2\2\n\u00d8")
buf.write("\3\2\2\2\f\u00de\3\2\2\2\16\u00e3\3\2\2\2\20\u00e8\3\2")
buf.write("\2\2\22\u00ed\3\2\2\2\24\u00f1\3\2\2\2\26\u00f8\3\2\2")
buf.write("\2\30\u00fb\3\2\2\2\32\u00fe\3\2\2\2\34\u0101\3\2\2\2")
buf.write("\36\u0106\3\2\2\2 \u010b\3\2\2\2\"\u0111\3\2\2\2$\u0115")
buf.write("\3\2\2\2&\u0118\3\2\2\2(\u011c\3\2\2\2*\u0120\3\2\2\2")
buf.write(",\u0125\3\2\2\2.\u012b\3\2\2\2\60\u0134\3\2\2\2\62\u013a")
buf.write("\3\2\2\2\64\u014c\3\2\2\2\66\u0150\3\2\2\28\u015c\3\2")
buf.write("\2\2:\u0167\3\2\2\2<\u0179\3\2\2\2>\u017b\3\2\2\2@\u0182")
buf.write("\3\2\2\2B\u0184\3\2\2\2D\u0186\3\2\2\2F\u0189\3\2\2\2")
buf.write("H\u018c\3\2\2\2J\u018e\3\2\2\2L\u0190\3\2\2\2N\u0192\3")
buf.write("\2\2\2P\u0195\3\2\2\2R\u0197\3\2\2\2T\u019a\3\2\2\2V\u019d")
buf.write("\3\2\2\2X\u019f\3\2\2\2Z\u01a1\3\2\2\2\\\u01a3\3\2\2\2")
buf.write("^\u01a6\3\2\2\2`\u01a9\3\2\2\2b\u01ab\3\2\2\2d\u01ad\3")
buf.write("\2\2\2f\u01af\3\2\2\2h\u01b1\3\2\2\2j\u01b3\3\2\2\2l\u01b6")
buf.write("\3\2\2\2n\u01b9\3\2\2\2p\u01bb\3\2\2\2r\u01bd\3\2\2\2")
buf.write("t\u01c0\3\2\2\2v\u01c3\3\2\2\2x\u01c6\3\2\2\2z\u01c9\3")
buf.write("\2\2\2|\u01cc\3\2\2\2~\u01cf\3\2\2\2\u0080\u01d5\3\2\2")
buf.write("\2\u0082\u01d9\3\2\2\2\u0084\u01ed\3\2\2\2\u0086\u0209")
buf.write("\3\2\2\2\u0088\u020d\3\2\2\2\u008a\u020f\3\2\2\2\u008c")
buf.write("\u0215\3\2\2\2\u008e\u0217\3\2\2\2\u0090\u0219\3\2\2\2")
buf.write("\u0092\u021b\3\2\2\2\u0094\u021d\3\2\2\2\u0096\u021f\3")
buf.write("\2\2\2\u0098\u0228\3\2\2\2\u009a\u022b\3\2\2\2\u009c\u022f")
buf.write("\3\2\2\2\u009e\u0247\3\2\2\2\u00a0\u0263\3\2\2\2\u00a2")
buf.write("\u0267\3\2\2\2\u00a4\u026a\3\2\2\2\u00a6\u026d\3\2\2\2")
buf.write("\u00a8\u0270\3\2\2\2\u00aa\u0272\3\2\2\2\u00ac\u0276\3")
buf.write("\2\2\2\u00ae\u027a\3\2\2\2\u00b0\u0281\3\2\2\2\u00b2\u028d")
buf.write("\3\2\2\2\u00b4\u0291\3\2\2\2\u00b6\u0293\3\2\2\2\u00b8")
buf.write("\u0298\3\2\2\2\u00ba\u029c\3\2\2\2\u00bc\u02a1\3\2\2\2")
buf.write("\u00be\u02a3\3\2\2\2\u00c0\u02b5\3\2\2\2\u00c2\u02b7\3")
buf.write("\2\2\2\u00c4\u02be\3\2\2\2\u00c6\u02c2\3\2\2\2\u00c8\u02c4")
buf.write("\3\2\2\2\u00ca\u02ca\3\2\2\2\u00cc\u02cd\3\2\2\2\u00ce")
buf.write("\u00d1\58\34\2\u00cf\u00d1\5:\35\2\u00d0\u00ce\3\2\2\2")
buf.write("\u00d0\u00cf\3\2\2\2\u00d1\5\3\2\2\2\u00d2\u00d3\5\b\4")
buf.write("\2\u00d3\7\3\2\2\2\u00d4\u00d7\5<\36\2\u00d5\u00d7\5>")
buf.write("\37\2\u00d6\u00d4\3\2\2\2\u00d6\u00d5\3\2\2\2\u00d7\t")
buf.write("\3\2\2\2\u00d8\u00d9\7h\2\2\u00d9\u00da\7k\2\2\u00da\u00db")
buf.write("\7t\2\2\u00db\u00dc\7u\2\2\u00dc\u00dd\7v\2\2\u00dd\13")
buf.write("\3\2\2\2\u00de\u00df\7n\2\2\u00df\u00e0\7c\2\2\u00e0\u00e1")
buf.write("\7u\2\2\u00e1\u00e2\7v\2\2\u00e2\r\3\2\2\2\u00e3\u00e4")
buf.write("\7t\2\2\u00e4\u00e5\7w\2\2\u00e5\u00e6\7n\2\2\u00e6\u00e7")
buf.write("\7g\2\2\u00e7\17\3\2\2\2\u00e8\u00e9\7d\2\2\u00e9\u00ea")
buf.write("\7c\2\2\u00ea\u00eb\7u\2\2\u00eb\u00ec\7g\2\2\u00ec\21")
buf.write("\3\2\2\2\u00ed\u00ee\7f\2\2\u00ee\u00ef\7g\2\2\u00ef\u00f0")
buf.write("\7h\2\2\u00f0\23\3\2\2\2\u00f1\u00f2\7t\2\2\u00f2\u00f3")
buf.write("\7g\2\2\u00f3\u00f4\7v\2\2\u00f4\u00f5\7w\2\2\u00f5\u00f6")
buf.write("\7t\2\2\u00f6\u00f7\7p\2\2\u00f7\25\3\2\2\2\u00f8\u00f9")
buf.write("\7c\2\2\u00f9\u00fa\7u\2\2\u00fa\27\3\2\2\2\u00fb\u00fc")
buf.write("\7k\2\2\u00fc\u00fd\7h\2\2\u00fd\31\3\2\2\2\u00fe\u00ff")
buf.write("\7k\2\2\u00ff\u0100\7p\2\2\u0100\33\3\2\2\2\u0101\u0102")
buf.write("\7g\2\2\u0102\u0103\7n\2\2\u0103\u0104\7k\2\2\u0104\u0105")
buf.write("\7h\2\2\u0105\35\3\2\2\2\u0106\u0107\7g\2\2\u0107\u0108")
buf.write("\7n\2\2\u0108\u0109\7u\2\2\u0109\u010a\7g\2\2\u010a\37")
buf.write("\3\2\2\2\u010b\u010c\7y\2\2\u010c\u010d\7j\2\2\u010d\u010e")
buf.write("\7k\2\2\u010e\u010f\7n\2\2\u010f\u0110\7g\2\2\u0110!\3")
buf.write("\2\2\2\u0111\u0112\7h\2\2\u0112\u0113\7q\2\2\u0113\u0114")
buf.write("\7t\2\2\u0114#\3\2\2\2\u0115\u0116\7q\2\2\u0116\u0117")
buf.write("\7t\2\2\u0117%\3\2\2\2\u0118\u0119\7c\2\2\u0119\u011a")
buf.write("\7p\2\2\u011a\u011b\7f\2\2\u011b\'\3\2\2\2\u011c\u011d")
buf.write("\7p\2\2\u011d\u011e\7q\2\2\u011e\u011f\7v\2\2\u011f)\3")
buf.write("\2\2\2\u0120\u0121\7V\2\2\u0121\u0122\7t\2\2\u0122\u0123")
buf.write("\7w\2\2\u0123\u0124\7g\2\2\u0124+\3\2\2\2\u0125\u0126")
buf.write("\7H\2\2\u0126\u0127\7c\2\2\u0127\u0128\7n\2\2\u0128\u0129")
buf.write("\7u\2\2\u0129\u012a\7g\2\2\u012a-\3\2\2\2\u012b\u012c")
buf.write("\7e\2\2\u012c\u012d\7q\2\2\u012d\u012e\7p\2\2\u012e\u012f")
buf.write("\7v\2\2\u012f\u0130\7k\2\2\u0130\u0131\7p\2\2\u0131\u0132")
buf.write("\7w\2\2\u0132\u0133\7g\2\2\u0133/\3\2\2\2\u0134\u0135")
buf.write("\7d\2\2\u0135\u0136\7t\2\2\u0136\u0137\7g\2\2\u0137\u0138")
buf.write("\7c\2\2\u0138\u0139\7m\2\2\u0139\61\3\2\2\2\u013a\u013b")
buf.write("\7d\2\2\u013b\u013c\7n\2\2\u013c\u013d\7q\2\2\u013d\u013e")
buf.write("\7e\2\2\u013e\u013f\7m\2\2\u013f\63\3\2\2\2\u0140\u0141")
buf.write("\6\32\2\2\u0141\u014d\5\u00acV\2\u0142\u0144\7\17\2\2")
buf.write("\u0143\u0142\3\2\2\2\u0143\u0144\3\2\2\2\u0144\u0145\3")
buf.write("\2\2\2\u0145\u0148\7\f\2\2\u0146\u0148\4\16\17\2\u0147")
buf.write("\u0143\3\2\2\2\u0147\u0146\3\2\2\2\u0148\u014a\3\2\2\2")
buf.write("\u0149\u014b\5\u00acV\2\u014a\u0149\3\2\2\2\u014a\u014b")
buf.write("\3\2\2\2\u014b\u014d\3\2\2\2\u014c\u0140\3\2\2\2\u014c")
buf.write("\u0147\3\2\2\2\u014d\u014e\3\2\2\2\u014e\u014f\b\32\2")
buf.write("\2\u014f\65\3\2\2\2\u0150\u0154\5\u00b2Y\2\u0151\u0153")
buf.write("\5\u00b4Z\2\u0152\u0151\3\2\2\2\u0153\u0156\3\2\2\2\u0154")
buf.write("\u0152\3\2\2\2\u0154\u0155\3\2\2\2\u0155\67\3\2\2\2\u0156")
buf.write("\u0154\3\2\2\2\u0157\u015d\t\2\2\2\u0158\u0159\t\3\2\2")
buf.write("\u0159\u015d\t\4\2\2\u015a\u015b\t\4\2\2\u015b\u015d\t")
buf.write("\3\2\2\u015c\u0157\3\2\2\2\u015c\u0158\3\2\2\2\u015c\u015a")
buf.write("\3\2\2\2\u015c\u015d\3\2\2\2\u015d\u0160\3\2\2\2\u015e")
buf.write("\u0161\5\u0084B\2\u015f\u0161\5\u0086C\2\u0160\u015e\3")
buf.write("\2\2\2\u0160\u015f\3\2\2\2\u01619\3\2\2\2\u0162\u0168")
buf.write("\t\5\2\2\u0163\u0164\t\5\2\2\u0164\u0168\t\4\2\2\u0165")
buf.write("\u0166\t\4\2\2\u0166\u0168\t\5\2\2\u0167\u0162\3\2\2\2")
buf.write("\u0167\u0163\3\2\2\2\u0167\u0165\3\2\2\2\u0168\u016b\3")
buf.write("\2\2\2\u0169\u016c\5\u009eO\2\u016a\u016c\5\u00a0P\2\u016b")
buf.write("\u0169\3\2\2\2\u016b\u016a\3\2\2\2\u016c;\3\2\2\2\u016d")
buf.write("\u0171\5\u008eG\2\u016e\u0170\5\u0090H\2\u016f\u016e\3")
buf.write("\2\2\2\u0170\u0173\3\2\2\2\u0171\u016f\3\2\2\2\u0171\u0172")
buf.write("\3\2\2\2\u0172\u017a\3\2\2\2\u0173\u0171\3\2\2\2\u0174")
buf.write("\u0176\7\62\2\2\u0175\u0174\3\2\2\2\u0176\u0177\3\2\2")
buf.write("\2\u0177\u0175\3\2\2\2\u0177\u0178\3\2\2\2\u0178\u017a")
buf.write("\3\2\2\2\u0179\u016d\3\2\2\2\u0179\u0175\3\2\2\2\u017a")
buf.write("=\3\2\2\2\u017b\u017c\7\62\2\2\u017c\u017e\t\6\2\2\u017d")
buf.write("\u017f\5\u0094J\2\u017e\u017d\3\2\2\2\u017f\u0180\3\2")
buf.write("\2\2\u0180\u017e\3\2\2\2\u0180\u0181\3\2\2\2\u0181?\3")
buf.write("\2\2\2\u0182\u0183\7\60\2\2\u0183A\3\2\2\2\u0184\u0185")
buf.write("\7,\2\2\u0185C\3\2\2\2\u0186\u0187\7*\2\2\u0187\u0188")
buf.write("\b\"\3\2\u0188E\3\2\2\2\u0189\u018a\7+\2\2\u018a\u018b")
buf.write("\b#\4\2\u018bG\3\2\2\2\u018c\u018d\7.\2\2\u018dI\3\2\2")
buf.write("\2\u018e\u018f\7<\2\2\u018fK\3\2\2\2\u0190\u0191\7=\2")
buf.write("\2\u0191M\3\2\2\2\u0192\u0193\7,\2\2\u0193\u0194\7,\2")
buf.write("\2\u0194O\3\2\2\2\u0195\u0196\7?\2\2\u0196Q\3\2\2\2\u0197")
buf.write("\u0198\7]\2\2\u0198\u0199\b)\5\2\u0199S\3\2\2\2\u019a")
buf.write("\u019b\7_\2\2\u019b\u019c\b*\6\2\u019cU\3\2\2\2\u019d")
buf.write("\u019e\7~\2\2\u019eW\3\2\2\2\u019f\u01a0\7`\2\2\u01a0")
buf.write("Y\3\2\2\2\u01a1\u01a2\7(\2\2\u01a2[\3\2\2\2\u01a3\u01a4")
buf.write("\7>\2\2\u01a4\u01a5\7>\2\2\u01a5]\3\2\2\2\u01a6\u01a7")
buf.write("\7@\2\2\u01a7\u01a8\7@\2\2\u01a8_\3\2\2\2\u01a9\u01aa")
buf.write("\7-\2\2\u01aaa\3\2\2\2\u01ab\u01ac\7/\2\2\u01acc\3\2\2")
buf.write("\2\u01ad\u01ae\7\61\2\2\u01aee\3\2\2\2\u01af\u01b0\7\'")
buf.write("\2\2\u01b0g\3\2\2\2\u01b1\u01b2\7\u0080\2\2\u01b2i\3\2")
buf.write("\2\2\u01b3\u01b4\7}\2\2\u01b4\u01b5\b\65\7\2\u01b5k\3")
buf.write("\2\2\2\u01b6\u01b7\7\177\2\2\u01b7\u01b8\b\66\b\2\u01b8")
buf.write("m\3\2\2\2\u01b9\u01ba\7>\2\2\u01bao\3\2\2\2\u01bb\u01bc")
buf.write("\7@\2\2\u01bcq\3\2\2\2\u01bd\u01be\7?\2\2\u01be\u01bf")
buf.write("\7?\2\2\u01bfs\3\2\2\2\u01c0\u01c1\7@\2\2\u01c1\u01c2")
buf.write("\7?\2\2\u01c2u\3\2\2\2\u01c3\u01c4\7>\2\2\u01c4\u01c5")
buf.write("\7?\2\2\u01c5w\3\2\2\2\u01c6\u01c7\7>\2\2\u01c7\u01c8")
buf.write("\7@\2\2\u01c8y\3\2\2\2\u01c9\u01ca\7#\2\2\u01ca\u01cb")
buf.write("\7?\2\2\u01cb{\3\2\2\2\u01cc\u01cd\7-\2\2\u01cd\u01ce")
buf.write("\7?\2\2\u01ce}\3\2\2\2\u01cf\u01d0\7/\2\2\u01d0\u01d1")
buf.write("\7?\2\2\u01d1\177\3\2\2\2\u01d2\u01d6\5\u00acV\2\u01d3")
buf.write("\u01d6\5\u00aeW\2\u01d4\u01d6\5\u00b0X\2\u01d5\u01d2\3")
buf.write("\2\2\2\u01d5\u01d3\3\2\2\2\u01d5\u01d4\3\2\2\2\u01d6\u01d7")
buf.write("\3\2\2\2\u01d7\u01d8\b@\t\2\u01d8\u0081\3\2\2\2\u01d9")
buf.write("\u01da\13\2\2\2\u01da\u0083\3\2\2\2\u01db\u01e0\7)\2\2")
buf.write("\u01dc\u01df\5\u008cF\2\u01dd\u01df\n\7\2\2\u01de\u01dc")
buf.write("\3\2\2\2\u01de\u01dd\3\2\2\2\u01df\u01e2\3\2\2\2\u01e0")
buf.write("\u01de\3\2\2\2\u01e0\u01e1\3\2\2\2\u01e1\u01e3\3\2\2\2")
buf.write("\u01e2\u01e0\3\2\2\2\u01e3\u01ee\7)\2\2\u01e4\u01e9\7")
buf.write("$\2\2\u01e5\u01e8\5\u008cF\2\u01e6\u01e8\n\b\2\2\u01e7")
buf.write("\u01e5\3\2\2\2\u01e7\u01e6\3\2\2\2\u01e8\u01eb\3\2\2\2")
buf.write("\u01e9\u01e7\3\2\2\2\u01e9\u01ea\3\2\2\2\u01ea\u01ec\3")
buf.write("\2\2\2\u01eb\u01e9\3\2\2\2\u01ec\u01ee\7$\2\2\u01ed\u01db")
buf.write("\3\2\2\2\u01ed\u01e4\3\2\2\2\u01ee\u0085\3\2\2\2\u01ef")
buf.write("\u01f0\7)\2\2\u01f0\u01f1\7)\2\2\u01f1\u01f2\7)\2\2\u01f2")
buf.write("\u01f6\3\2\2\2\u01f3\u01f5\5\u0088D\2\u01f4\u01f3\3\2")
buf.write("\2\2\u01f5\u01f8\3\2\2\2\u01f6\u01f7\3\2\2\2\u01f6\u01f4")
buf.write("\3\2\2\2\u01f7\u01f9\3\2\2\2\u01f8\u01f6\3\2\2\2\u01f9")
buf.write("\u01fa\7)\2\2\u01fa\u01fb\7)\2\2\u01fb\u020a\7)\2\2\u01fc")
buf.write("\u01fd\7$\2\2\u01fd\u01fe\7$\2\2\u01fe\u01ff\7$\2\2\u01ff")
buf.write("\u0203\3\2\2\2\u0200\u0202\5\u0088D\2\u0201\u0200\3\2")
buf.write("\2\2\u0202\u0205\3\2\2\2\u0203\u0204\3\2\2\2\u0203\u0201")
buf.write("\3\2\2\2\u0204\u0206\3\2\2\2\u0205\u0203\3\2\2\2\u0206")
buf.write("\u0207\7$\2\2\u0207\u0208\7$\2\2\u0208\u020a\7$\2\2\u0209")
buf.write("\u01ef\3\2\2\2\u0209\u01fc\3\2\2\2\u020a\u0087\3\2\2\2")
buf.write("\u020b\u020e\5\u008aE\2\u020c\u020e\5\u008cF\2\u020d\u020b")
buf.write("\3\2\2\2\u020d\u020c\3\2\2\2\u020e\u0089\3\2\2\2\u020f")
buf.write("\u0210\n\t\2\2\u0210\u008b\3\2\2\2\u0211\u0212\7^\2\2")
buf.write("\u0212\u0216\13\2\2\2\u0213\u0214\7^\2\2\u0214\u0216\5")
buf.write("\64\32\2\u0215\u0211\3\2\2\2\u0215\u0213\3\2\2\2\u0216")
buf.write("\u008d\3\2\2\2\u0217\u0218\t\n\2\2\u0218\u008f\3\2\2\2")
buf.write("\u0219\u021a\t\13\2\2\u021a\u0091\3\2\2\2\u021b\u021c")
buf.write("\t\f\2\2\u021c\u0093\3\2\2\2\u021d\u021e\t\r\2\2\u021e")
buf.write("\u0095\3\2\2\2\u021f\u0220\t\16\2\2\u0220\u0097\3\2\2")
buf.write("\2\u0221\u0223\5\u009aM\2\u0222\u0221\3\2\2\2\u0222\u0223")
buf.write("\3\2\2\2\u0223\u0224\3\2\2\2\u0224\u0229\5\u009cN\2\u0225")
buf.write("\u0226\5\u009aM\2\u0226\u0227\7\60\2\2\u0227\u0229\3\2")
buf.write("\2\2\u0228\u0222\3\2\2\2\u0228\u0225\3\2\2\2\u0229\u0099")
buf.write("\3\2\2\2\u022a\u022c\5\u0090H\2\u022b\u022a\3\2\2\2\u022c")
buf.write("\u022d\3\2\2\2\u022d\u022b\3\2\2\2\u022d\u022e\3\2\2\2")
buf.write("\u022e\u009b\3\2\2\2\u022f\u0231\7\60\2\2\u0230\u0232")
buf.write("\5\u0090H\2\u0231\u0230\3\2\2\2\u0232\u0233\3\2\2\2\u0233")
buf.write("\u0231\3\2\2\2\u0233\u0234\3\2\2\2\u0234\u009d\3\2\2\2")
buf.write("\u0235\u023a\7)\2\2\u0236\u0239\5\u00a4R\2\u0237\u0239")
buf.write("\5\u00aaU\2\u0238\u0236\3\2\2\2\u0238\u0237\3\2\2\2\u0239")
buf.write("\u023c\3\2\2\2\u023a\u0238\3\2\2\2\u023a\u023b\3\2\2\2")
buf.write("\u023b\u023d\3\2\2\2\u023c\u023a\3\2\2\2\u023d\u0248\7")
buf.write(")\2\2\u023e\u0243\7$\2\2\u023f\u0242\5\u00a6S\2\u0240")
buf.write("\u0242\5\u00aaU\2\u0241\u023f\3\2\2\2\u0241\u0240\3\2")
buf.write("\2\2\u0242\u0245\3\2\2\2\u0243\u0241\3\2\2\2\u0243\u0244")
buf.write("\3\2\2\2\u0244\u0246\3\2\2\2\u0245\u0243\3\2\2\2\u0246")
buf.write("\u0248\7$\2\2\u0247\u0235\3\2\2\2\u0247\u023e\3\2\2\2")
buf.write("\u0248\u009f\3\2\2\2\u0249\u024a\7)\2\2\u024a\u024b\7")
buf.write(")\2\2\u024b\u024c\7)\2\2\u024c\u0250\3\2\2\2\u024d\u024f")
buf.write("\5\u00a2Q\2\u024e\u024d\3\2\2\2\u024f\u0252\3\2\2\2\u0250")
buf.write("\u0251\3\2\2\2\u0250\u024e\3\2\2\2\u0251\u0253\3\2\2\2")
buf.write("\u0252\u0250\3\2\2\2\u0253\u0254\7)\2\2\u0254\u0255\7")
buf.write(")\2\2\u0255\u0264\7)\2\2\u0256\u0257\7$\2\2\u0257\u0258")
buf.write("\7$\2\2\u0258\u0259\7$\2\2\u0259\u025d\3\2\2\2\u025a\u025c")
buf.write("\5\u00a2Q\2\u025b\u025a\3\2\2\2\u025c\u025f\3\2\2\2\u025d")
buf.write("\u025e\3\2\2\2\u025d\u025b\3\2\2\2\u025e\u0260\3\2\2\2")
buf.write("\u025f\u025d\3\2\2\2\u0260\u0261\7$\2\2\u0261\u0262\7")
buf.write("$\2\2\u0262\u0264\7$\2\2\u0263\u0249\3\2\2\2\u0263\u0256")
buf.write("\3\2\2\2\u0264\u00a1\3\2\2\2\u0265\u0268\5\u00a8T\2\u0266")
buf.write("\u0268\5\u00aaU\2\u0267\u0265\3\2\2\2\u0267\u0266\3\2")
buf.write("\2\2\u0268\u00a3\3\2\2\2\u0269\u026b\t\17\2\2\u026a\u0269")
buf.write("\3\2\2\2\u026b\u00a5\3\2\2\2\u026c\u026e\t\20\2\2\u026d")
buf.write("\u026c\3\2\2\2\u026e\u00a7\3\2\2\2\u026f\u0271\t\21\2")
buf.write("\2\u0270\u026f\3\2\2\2\u0271\u00a9\3\2\2\2\u0272\u0273")
buf.write("\7^\2\2\u0273\u0274\t\22\2\2\u0274\u00ab\3\2\2\2\u0275")
buf.write("\u0277\t\23\2\2\u0276\u0275\3\2\2\2\u0277\u0278\3\2\2")
buf.write("\2\u0278\u0276\3\2\2\2\u0278\u0279\3\2\2\2\u0279\u00ad")
buf.write("\3\2\2\2\u027a\u027e\7%\2\2\u027b\u027d\n\24\2\2\u027c")
buf.write("\u027b\3\2\2\2\u027d\u0280\3\2\2\2\u027e\u027c\3\2\2\2")
buf.write("\u027e\u027f\3\2\2\2\u027f\u00af\3\2\2\2\u0280\u027e\3")
buf.write("\2\2\2\u0281\u0283\7^\2\2\u0282\u0284\5\u00acV\2\u0283")
buf.write("\u0282\3\2\2\2\u0283\u0284\3\2\2\2\u0284\u028a\3\2\2\2")
buf.write("\u0285\u0287\7\17\2\2\u0286\u0285\3\2\2\2\u0286\u0287")
buf.write("\3\2\2\2\u0287\u0288\3\2\2\2\u0288\u028b\7\f\2\2\u0289")
buf.write("\u028b\4\16\17\2\u028a\u0286\3\2\2\2\u028a\u0289\3\2\2")
buf.write("\2\u028b\u00b1\3\2\2\2\u028c\u028e\t\25\2\2\u028d\u028c")
buf.write("\3\2\2\2\u028e\u00b3\3\2\2\2\u028f\u0292\5\u00b2Y\2\u0290")
buf.write("\u0292\t\26\2\2\u0291\u028f\3\2\2\2\u0291\u0290\3\2\2")
buf.write("\2\u0292\u00b5\3\2\2\2\u0293\u0294\7>\2\2\u0294\u0295")
buf.write("\5\66\33\2\u0295\u0296\3\2\2\2\u0296\u0297\b[\n\2\u0297")
buf.write("\u00b7\3\2\2\2\u0298\u0299\7@\2\2\u0299\u029a\3\2\2\2")
buf.write("\u029a\u029b\b\\\13\2\u029b\u00b9\3\2\2\2\u029c\u029d")
buf.write("\7\61\2\2\u029d\u029e\7@\2\2\u029e\u029f\3\2\2\2\u029f")
buf.write("\u02a0\b]\13\2\u02a0\u00bb\3\2\2\2\u02a1\u02a2\7\61\2")
buf.write("\2\u02a2\u00bd\3\2\2\2\u02a3\u02a4\7?\2\2\u02a4\u00bf")
buf.write("\3\2\2\2\u02a5\u02a9\7$\2\2\u02a6\u02a8\n\27\2\2\u02a7")
buf.write("\u02a6\3\2\2\2\u02a8\u02ab\3\2\2\2\u02a9\u02a7\3\2\2\2")
buf.write("\u02a9\u02aa\3\2\2\2\u02aa\u02ac\3\2\2\2\u02ab\u02a9\3")
buf.write("\2\2\2\u02ac\u02b6\7$\2\2\u02ad\u02b1\7)\2\2\u02ae\u02b0")
buf.write("\n\30\2\2\u02af\u02ae\3\2\2\2\u02b0\u02b3\3\2\2\2\u02b1")
buf.write("\u02af\3\2\2\2\u02b1\u02b2\3\2\2\2\u02b2\u02b4\3\2\2\2")
buf.write("\u02b3\u02b1\3\2\2\2\u02b4\u02b6\7)\2\2\u02b5\u02a5\3")
buf.write("\2\2\2\u02b5\u02ad\3\2\2\2\u02b6\u00c1\3\2\2\2\u02b7\u02bb")
buf.write("\5\u00ccf\2\u02b8\u02ba\5\u00cae\2\u02b9\u02b8\3\2\2\2")
buf.write("\u02ba\u02bd\3\2\2\2\u02bb\u02b9\3\2\2\2\u02bb\u02bc\3")
buf.write("\2\2\2\u02bc\u00c3\3\2\2\2\u02bd\u02bb\3\2\2\2\u02be\u02bf")
buf.write("\t\31\2\2\u02bf\u02c0\3\2\2\2\u02c0\u02c1\bb\t\2\u02c1")
buf.write("\u00c5\3\2\2\2\u02c2\u02c3\t\r\2\2\u02c3\u00c7\3\2\2\2")
buf.write("\u02c4\u02c5\t\13\2\2\u02c5\u00c9\3\2\2\2\u02c6\u02cb")
buf.write("\5\u00ccf\2\u02c7\u02cb\t\32\2\2\u02c8\u02cb\5\u0090H")
buf.write("\2\u02c9\u02cb\t\33\2\2\u02ca\u02c6\3\2\2\2\u02ca\u02c7")
buf.write("\3\2\2\2\u02ca\u02c8\3\2\2\2\u02ca\u02c9\3\2\2\2\u02cb")
buf.write("\u00cb\3\2\2\2\u02cc\u02ce\t\34\2\2\u02cd\u02cc\3\2\2")
buf.write("\2\u02ce\u00cd\3\2\2\2;\2\3\u00d0\u00d6\u0143\u0147\u014a")
buf.write("\u014c\u0154\u015c\u0160\u0167\u016b\u0171\u0177\u0179")
buf.write("\u0180\u01d5\u01de\u01e0\u01e7\u01e9\u01ed\u01f6\u0203")
buf.write("\u0209\u020d\u0215\u0222\u0228\u022d\u0233\u0238\u023a")
buf.write("\u0241\u0243\u0247\u0250\u025d\u0263\u0267\u026a\u026d")
buf.write("\u0270\u0278\u027e\u0283\u0286\u028a\u028d\u0291\u02a9")
buf.write("\u02b1\u02b5\u02bb\u02ca\u02cd\f\3\32\2\3\"\3\3#\4\3)")
buf.write("\5\3*\6\3\65\7\3\66\b\b\2\2\7\3\2\6\2\2")
return buf.getvalue()
class RuleLexerPy(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
INSIDE = 1
STRING = 1
NUMBER = 2
INTEGER = 3
FIRST = 4
LAST = 5
RULE = 6
BASE = 7
DEF = 8
RETURN = 9
AS = 10
IF = 11
IN = 12
ELIF = 13
ELSE = 14
WHILE = 15
FOR = 16
OR = 17
AND = 18
NOT = 19
TRUE = 20
FALSE = 21
CONTINUE = 22
BREAK = 23
BLOCK = 24
NEWLINE = 25
NAME = 26
STRING_LITERAL = 27
BYTES_LITERAL = 28
DECIMAL_INTEGER = 29
HEX_INTEGER = 30
DOT = 31
STAR = 32
OPEN_PAREN = 33
CLOSE_PAREN = 34
COMMA = 35
COLON = 36
SEMI_COLON = 37
POWER = 38
ASSIGN = 39
OPEN_BRACK = 40
CLOSE_BRACK = 41
OR_OP = 42
XOR = 43
AND_OP = 44
LEFT_SHIFT = 45
RIGHT_SHIFT = 46
ADD = 47
MINUS = 48
DIV = 49
MOD = 50
NOT_OP = 51
OPEN_BRACE = 52
CLOSE_BRACE = 53
LESS_THAN = 54
GREATER_THAN = 55
EQUALS = 56
GT_EQ = 57
LT_EQ = 58
NOT_EQ_1 = 59
NOT_EQ_2 = 60
ADD_ASSIGN = 61
SUB_ASSIGN = 62
SKIP_ = 63
UNKNOWN_CHAR = 64
OPEN = 65
CLOSE = 66
SLASH_CLOSE = 67
SLASH = 68
XML_EQUALS = 69
XML_STRING = 70
Name = 71
S = 72
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE", "INSIDE" ]
literalNames = [ "<INVALID>",
"'first'", "'last'", "'rule'", "'base'", "'def'", "'return'",
"'as'", "'if'", "'in'", "'elif'", "'else'", "'while'", "'for'",
"'or'", "'and'", "'not'", "'True'", "'False'", "'continue'",
"'break'", "'block'", "'.'", "'*'", "'('", "')'", "','", "':'",
"';'", "'**'", "'['", "']'", "'|'", "'^'", "'&'", "'<<'", "'>>'",
"'+'", "'-'", "'%'", "'~'", "'{'", "'}'", "'<'", "'=='", "'>='",
"'<='", "'<>'", "'!='", "'+='", "'-='", "'/>'" ]
symbolicNames = [ "<INVALID>",
"STRING", "NUMBER", "INTEGER", "FIRST", "LAST", "RULE", "BASE",
"DEF", "RETURN", "AS", "IF", "IN", "ELIF", "ELSE", "WHILE",
"FOR", "OR", "AND", "NOT", "TRUE", "FALSE", "CONTINUE", "BREAK",
"BLOCK", "NEWLINE", "NAME", "STRING_LITERAL", "BYTES_LITERAL",
"DECIMAL_INTEGER", "HEX_INTEGER", "DOT", "STAR", "OPEN_PAREN",
"CLOSE_PAREN", "COMMA", "COLON", "SEMI_COLON", "POWER", "ASSIGN",
"OPEN_BRACK", "CLOSE_BRACK", "OR_OP", "XOR", "AND_OP", "LEFT_SHIFT",
"RIGHT_SHIFT", "ADD", "MINUS", "DIV", "MOD", "NOT_OP", "OPEN_BRACE",
"CLOSE_BRACE", "LESS_THAN", "GREATER_THAN", "EQUALS", "GT_EQ",
"LT_EQ", "NOT_EQ_1", "NOT_EQ_2", "ADD_ASSIGN", "SUB_ASSIGN",
"SKIP_", "UNKNOWN_CHAR", "OPEN", "CLOSE", "SLASH_CLOSE", "SLASH",
"XML_EQUALS", "XML_STRING", "Name", "S" ]
ruleNames = [ "STRING", "NUMBER", "INTEGER", "FIRST", "LAST", "RULE",
"BASE", "DEF", "RETURN", "AS", "IF", "IN", "ELIF", "ELSE",
"WHILE", "FOR", "OR", "AND", "NOT", "TRUE", "FALSE", "CONTINUE",
"BREAK", "BLOCK", "NEWLINE", "NAME", "STRING_LITERAL",
"BYTES_LITERAL", "DECIMAL_INTEGER", "HEX_INTEGER", "DOT",
"STAR", "OPEN_PAREN", "CLOSE_PAREN", "COMMA", "COLON",
"SEMI_COLON", "POWER", "ASSIGN", "OPEN_BRACK", "CLOSE_BRACK",
"OR_OP", "XOR", "AND_OP", "LEFT_SHIFT", "RIGHT_SHIFT",
"ADD", "MINUS", "DIV", "MOD", "NOT_OP", "OPEN_BRACE",
"CLOSE_BRACE", "LESS_THAN", "GREATER_THAN", "EQUALS",
"GT_EQ", "LT_EQ", "NOT_EQ_1", "NOT_EQ_2", "ADD_ASSIGN",
"SUB_ASSIGN", "SKIP_", "UNKNOWN_CHAR", "SHORT_STRING",
"LONG_STRING", "LONG_STRING_ITEM", "LONG_STRING_CHAR",
"STRING_ESCAPE_SEQ", "NON_ZERO_DIGIT", "DIGIT", "OCT_DIGIT",
"HEX_DIGIT", "BIN_DIGIT", "POINT_FLOAT", "INT_PART", "FRACTION",
"SHORT_BYTES", "LONG_BYTES", "LONG_BYTES_ITEM", "SHORT_BYTES_CHAR_NO_SINGLE_QUOTE",
"SHORT_BYTES_CHAR_NO_DOUBLE_QUOTE", "LONG_BYTES_CHAR",
"BYTES_ESCAPE_SEQ", "SPACES", "COMMENT", "LINE_JOINING",
"ID_START", "ID_CONTINUE", "OPEN", "CLOSE", "SLASH_CLOSE",
"SLASH", "XML_EQUALS", "XML_STRING", "Name", "S", "HEXDIGIT",
"XML_DIGIT", "NameChar", "NameStartChar" ]
grammarFileName = "RuleLexerPy.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
# A queue where extra tokens are pushed on (see the NEWLINE lexer rule).
tokens:Token = []
# The stack that keeps track of the indentation level.
indents:int = []
# The amount of opened braces, brackets and parenthesis.
opened:int = 0
# The most recently produced token.
lastToken:Token
def emitToken(self, t:Token):
super().emitToken(t)
self.tokens.append(t)
def nextToken(self):
#Check if the end-of-file is ahead and there are still some DEDENTS expected.
if self._input.LA(1) == Token.EOF and self.indents:
# Remove any trailing EOF tokens from our buffer
while true:
if self.tokens[len(self.tokens)-1].getType == Token.EOF:
self.tokens.pop()
if not self.indents:
break
# First emit an extra line break that serves as the end of the statement.
self.emitToken(self.commonToken(RuleParserPy.NEWLINE, "\n"))
# Now emitToken as much DEDENT tokens as needed.
while self.indents:
self.emitToken(self.createDedent())
self.indents.pop()
# Put the EOF back on the token stream.
self.emitToken(self.commonToken(RuleParserPy.EOF, "<EOF>"))
next:Token = super().nextToken()
if next.channel == Token.DEFAULT_CHANNEL:
# Keep track of the last token on the default channel.
self.lastToken = next
if not self.tokens:
return next
else:
res:Token = self.tokens[0];
self.tokens.pop(0);
return res
def createDedent(self):
self.dedent:CommonToken = self.commonToken(RuleParserPy.DEDENT, "")
self.dedent.line = self.lastToken.line
self.dedent.text = " " * self.indents[0]
return self.dedent
def commonToken(self, type:int, text:str):
stop:int = self.getCharIndex() - 1
start:int = 0
if text:
start = stop - len(text) + 1
ct = CommonToken(self._tokenFactorySourcePair, type, self.DEFAULT_TOKEN_CHANNEL, start, stop)
return ct
# Calculates the indentation of the provided spaces, taking the
# following rules into account:
#
# "Tabs are replaced (from left to right) by one to eight spaces
# such that the total number of characters up to and | |
<reponame>TomNicholas/datatree
from __future__ import annotations
import textwrap
from typing import Any, Callable, Dict, Hashable, Iterable, List, Mapping, Tuple, Union
import anytree
from xarray import DataArray, Dataset, merge
from xarray.core import dtypes, utils
from xarray.core.variable import Variable
from .mapping import TreeIsomorphismError, check_isomorphic, map_over_subtree
from .ops import (
DataTreeArithmeticMixin,
MappedDatasetMethodsMixin,
MappedDataWithCoords,
)
from .treenode import PathType, TreeNode
# """
# DEVELOPERS' NOTE
# ----------------
# The idea of this module is to create a `DataTree` class which inherits the tree structure from TreeNode, and also copies
# the entire API of `xarray.Dataset`, but with certain methods decorated to instead map the dataset function over every
# node in the tree. As this API is copied without directly subclassing `xarray.Dataset` we instead create various Mixin
# classes (in ops.py) which each define part of `xarray.Dataset`'s extensive API.
# Some of these methods must be wrapped to map over all nodes in the subtree. Others are fine to inherit unaltered
# (normally because they (a) only call dataset properties and (b) don't return a dataset that should be nested into a new
# tree) and some will get overridden by the class definition of DataTree.
# """
class DataTree(
TreeNode,
MappedDatasetMethodsMixin,
MappedDataWithCoords,
DataTreeArithmeticMixin,
):
"""
A tree-like hierarchical collection of xarray objects.
Attempts to present an API like that of xarray.Dataset, but methods are wrapped to also update all the tree's child nodes.
"""
# TODO should this instead be a subclass of Dataset?
# TODO attribute-like access for both vars and child nodes (by inheriting from xarray.core.common.AttrsAccessMixin?)
# TODO ipython autocomplete for child nodes
# TODO Some way of sorting children by depth
# TODO Consistency in copying vs updating objects
# TODO do we need a watch out for if methods intended only for root nodes are called on non-root nodes?
# TODO currently allows self.ds = None, should we instead always store at least an empty Dataset?
# TODO dataset methods which should not or cannot act over the whole tree, such as .to_array
# TODO del and delitem methods
# TODO .loc, __contains__, __iter__, __array__, __len__
def __init__(
self,
name: Hashable = "root",
data: Union[Dataset, DataArray] = None,
parent: TreeNode = None,
children: List[TreeNode] = None,
):
"""
Create a single node of a DataTree, which optionally contains data in the form of an xarray.Dataset.
Parameters
----------
name : Hashable
Name for the root node of the tree. Default is "root"
data : Dataset, DataArray, Variable or None, optional
Data to store under the .ds attribute of this node. DataArrays and Variables will be promoted to Datasets.
Default is None.
parent : TreeNode, optional
Parent node to this node. Default is None.
children : Sequence[TreeNode], optional
Any child nodes of this node. Default is None.
Returns
-------
node : DataTree
See Also
--------
DataTree.from_dict
"""
super().__init__(name, parent=parent, children=children)
self.ds = data
@property
def ds(self) -> Dataset:
return self._ds
@ds.setter
def ds(self, data: Union[Dataset, DataArray] = None):
if not isinstance(data, (Dataset, DataArray)) and data is not None:
raise TypeError(
f"{type(data)} object is not an xarray Dataset, DataArray, or None"
)
if isinstance(data, DataArray):
data = data.to_dataset()
elif data is None:
data = Dataset()
for var in list(data.variables):
if var in list(c.name for c in self.children):
raise KeyError(
f"Cannot add variable named {var}: node already has a child named {var}"
)
self._ds = data
@property
def has_data(self) -> bool:
return len(self.ds.variables) > 0
@classmethod
def from_dict(
cls,
data_objects: Dict[PathType, Union[Dataset, DataArray, None]] = None,
name: Hashable = "root",
):
"""
Create a datatree from a dictionary of data objects, labelled by paths into the tree.
Parameters
----------
data_objects : dict-like, optional
A mapping from path names to xarray.Dataset, xarray.DataArray, or DataTree objects.
Path names can be given as unix-like paths, or as tuples of strings (where each string
is known as a single "tag"). If path names containing more than one tag are given, new
tree nodes will be constructed as necessary.
To assign data to the root node of the tree use {name} as the path.
name : Hashable, optional
Name for the root node of the tree. Default is "root"
Returns
-------
DataTree
"""
# First create the root node
if data_objects:
root_data = data_objects.pop(name, None)
else:
root_data = None
obj = cls(name=name, data=root_data, parent=None, children=None)
if data_objects:
# Populate tree with children determined from data_objects mapping
for path, data in data_objects.items():
# Determine name of new node
path = obj._tuple_or_path_to_path(path)
if obj.separator in path:
node_path, node_name = path.rsplit(obj.separator, maxsplit=1)
else:
node_path, node_name = "/", path
relative_path = node_path.replace(obj.name, "")
# Create and set new node
new_node = cls(name=node_name, data=data)
obj.set_node(
relative_path,
new_node,
allow_overwrite=False,
new_nodes_along_path=True,
)
return obj
def _pre_attach(self, parent: TreeNode) -> None:
"""
Method which superclass calls before setting parent, here used to prevent having two
children with duplicate names (or a data variable with the same name as a child).
"""
super()._pre_attach(parent)
if parent.has_data and self.name in list(parent.ds.variables):
raise KeyError(
f"parent {parent.name} already contains a data variable named {self.name}"
)
def add_child(self, child: TreeNode) -> None:
"""
Add a single child node below this node, without replacement.
Will raise a KeyError if either a child or data variable already exists with this name.
"""
if child.name in list(c.name for c in self.children):
raise KeyError(f"Node already has a child named {child.name}")
elif self.has_data and child.name in list(self.ds.variables):
raise KeyError(f"Node already contains a data variable named {child.name}")
else:
child.parent = self
def __str__(self):
"""A printable representation of the structure of this entire subtree."""
renderer = anytree.RenderTree(self)
lines = []
for pre, fill, node in renderer:
node_repr = node._single_node_repr()
node_line = f"{pre}{node_repr.splitlines()[0]}"
lines.append(node_line)
if node.has_data:
ds_repr = node_repr.splitlines()[2:]
for line in ds_repr:
if len(node.children) > 0:
lines.append(f"{fill}{renderer.style.vertical}{line}")
else:
lines.append(f"{fill}{line}")
return "\n".join(lines)
def _single_node_repr(self):
"""Information about this node, not including its relationships to other nodes."""
node_info = f"DataTree('{self.name}')"
if self.has_data:
ds_info = "\n" + repr(self.ds)
else:
ds_info = ""
return node_info + ds_info
def __repr__(self):
"""Information about this node, including its relationships to other nodes."""
# TODO redo this to look like the Dataset repr, but just with child and parent info
parent = self.parent.name if self.parent is not None else "None"
node_str = f"DataTree(name='{self.name}', parent='{parent}', children={[c.name for c in self.children]},"
if self.has_data:
ds_repr_lines = self.ds.__repr__().splitlines()
ds_repr = (
ds_repr_lines[0]
+ "\n"
+ textwrap.indent("\n".join(ds_repr_lines[1:]), " ")
)
data_str = f"\ndata={ds_repr}\n)"
else:
data_str = "data=None)"
return node_str + data_str
def __getitem__(
self, key: Union[PathType, Hashable, Mapping, Any]
) -> Union[TreeNode, Dataset, DataArray]:
"""
Access either child nodes, variables, or coordinates stored in this tree.
Variables or coordinates of the contained dataset will be returned as a :py:class:`~xarray.DataArray`.
Indexing with a list of names will return a new ``Dataset`` object.
Like Dataset.__getitem__ this method also accepts dict-like indexing, and selection of multiple data variables
(from the same Dataset node) via list.
Parameters
----------
key :
Paths to nodes or to data variables in nodes can be given as unix-like paths, or as tuples of strings
(where each string is known as a single "tag").
"""
# Either:
if utils.is_dict_like(key):
# dict-like selection on dataset variables
return self.ds[key]
elif utils.hashable(key):
# path-like: a path to a node possibly with a variable name at the end
return self._get_item_from_path(key)
elif utils.is_list_like(key) and all(k in self.ds for k in key):
# iterable of variable names
return self.ds[key]
elif utils.is_list_like(key) and all("/" not in tag for tag in key):
# iterable of child tags
return self._get_item_from_path(key)
else:
raise ValueError("Invalid format for key")
def _get_item_from_path(
self, path: PathType
) -> Union[TreeNode, Dataset, DataArray]:
"""Get item given a path. Two valid cases: either all parts of path are nodes or last part is a variable."""
# TODO this currently raises a ChildResolverError if it can't find a data variable in the ds - that's inconsistent with xarray.Dataset.__getitem__
path = self._tuple_or_path_to_path(path)
tags = [
tag for tag in path.split(self.separator) if tag not in [self.separator, ""]
]
*leading_tags, last_tag = tags
if leading_tags is not None:
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage,
# (C) 2015, 2016, 2017, 2018 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-lines
"""Functional tests of minio-py."""
from __future__ import absolute_import, division
import hashlib
import io
import json
import math
import os
import random
import shutil
import sys
import tempfile
import time
import traceback
from datetime import datetime, timedelta
from threading import Thread
from uuid import uuid4
import certifi
import urllib3
from minio import CopyConditions, Minio, PostPolicy
from minio.error import (APINotImplemented, InvalidBucketError,
NoSuchBucketPolicy, PreconditionFailed, ResponseError)
from minio.fold_case_dict import FoldCaseDict
from minio.select.helpers import calculate_crc
from minio.select.options import (CSVInput, CSVOutput, InputSerialization,
OutputSerialization, RequestProgress,
SelectObjectOptions)
from minio.sse import SseCustomerKey
if sys.version_info[0] == 2:
from datetime import tzinfo # pylint: disable=ungrouped-imports
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
utc = UTC()
from inspect import getargspec
else:
from datetime import timezone # pylint: disable=ungrouped-imports
utc = timezone.utc
from inspect import getfullargspec # pylint: disable=ungrouped-imports
getargspec = getfullargspec
_CLIENT = None # initialized in main().
_TEST_FILE = None # initialized in main().
_LARGE_FILE = None # initialized in main().
_IS_AWS = None # initialized in main().
KB = 1024
MB = 1024 * KB
http = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=os.environ.get('SSL_CERT_FILE') or certifi.where()
)
def _gen_bucket_name():
"""Generate random bucket name."""
return "minio-py-test-{0}".format(uuid4())
def _get_sha256sum(filename):
"""Get SHA-256 checksum of given file."""
with open(filename, 'rb') as file:
contents = file.read()
return hashlib.sha256(contents).hexdigest()
def _get_random_string(size):
"""Get random string of given size."""
if not size:
return ""
chars = "abcdefghijklmnopqrstuvwxyz"
chars *= int(math.ceil(size / len(chars)))
chars = list(chars[:size])
random.shuffle(chars)
return "".join(chars)
class LimitedRandomReader: # pylint: disable=too-few-public-methods
"""Random data reader of specified size."""
def __init__(self, limit):
self._limit = limit
def read(self, size=64*KB):
"""Read random data of specified size."""
if size < 0 or size > self._limit:
size = self._limit
data = _get_random_string(size)
self._limit -= size
return data.encode()
def _call(log_entry, func, *args, **kwargs):
"""Execute given function."""
log_entry["method"] = func
return func(*args, **kwargs)
class TestFailed(Exception):
"""Indicate test failed error."""
def _call_test(func, *args, **kwargs):
"""Execute given test function."""
log_entry = {
"name": func.__name__,
"status": "PASS",
}
start_time = time.time()
try:
func(log_entry, *args, **kwargs)
except APINotImplemented:
log_entry["alert"] = "Not Implemented"
log_entry["status"] = "NA"
except Exception as exc: # pylint: disable=broad-except
log_entry["message"] = "{0}".format(exc)
log_entry["error"] = traceback.format_exc()
log_entry["status"] = "FAIL"
if log_entry.get("method"):
log_entry["function"] = "{0}({1})".format(
log_entry["method"].__name__,
# pylint: disable=deprecated-method
', '.join(getargspec(log_entry["method"]).args[1:]))
log_entry["args"] = {
k: v for k, v in log_entry.get("args", {}).items() if v
}
log_entry["duration"] = int(
round((time.time() - start_time) * 1000))
log_entry["name"] = 'minio-py:' + log_entry["name"]
log_entry["method"] = None
print(json.dumps({k: v for k, v in log_entry.items() if v}))
if log_entry["status"] == "FAIL":
raise TestFailed()
def test_make_bucket_default_region(log_entry):
"""Test make_bucket() with default region."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
log_entry["args"] = {
"bucket_name": bucket_name,
"location": "default value ('us-east-1')", # Default location
}
# Create a bucket with default bucket location
_call(log_entry, _CLIENT.make_bucket, bucket_name)
# Check if bucket was created properly
_call(log_entry, _CLIENT.bucket_exists, bucket_name)
# Remove bucket
_call(log_entry, _CLIENT.remove_bucket, bucket_name)
# Test passes
log_entry["method"] = _CLIENT.make_bucket
def test_make_bucket_with_region(log_entry):
"""Test make_bucket() with region."""
# Only test make bucket with region against AWS S3
if not _IS_AWS:
return
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
# A non-default location
location = 'us-west-1'
log_entry["args"] = {
"bucket_name": bucket_name,
"location": location,
}
# Create a bucket with default bucket location
_call(log_entry, _CLIENT.make_bucket, bucket_name, location)
# Check if bucket was created properly
_call(log_entry, _CLIENT.bucket_exists, bucket_name)
# Remove bucket
_call(log_entry, _CLIENT.remove_bucket, bucket_name)
# Test passes
log_entry["method"] = _CLIENT.make_bucket
def test_negative_make_bucket_invalid_name( # pylint: disable=invalid-name
log_entry):
"""Test make_bucket() with invalid bucket name."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
# Default location
log_entry["args"] = {
"location": "default value ('us-east-1')",
}
# Create an array of invalid bucket names to test
invalid_bucket_name_list = [
bucket_name + '.',
'.' + bucket_name,
bucket_name + '...abcd'
]
for name in invalid_bucket_name_list:
log_entry["args"]["bucket_name"] = name
try:
# Create a bucket with default bucket location
_call(log_entry, _CLIENT.make_bucket, bucket_name)
# Check if bucket was created properly
_call(log_entry, _CLIENT.bucket_exists, bucket_name)
# Remove bucket
_call(log_entry, _CLIENT.remove_bucket, bucket_name)
except InvalidBucketError:
pass
# Test passes
log_entry["method"] = _CLIENT.make_bucket
log_entry["args"]['bucket_name'] = invalid_bucket_name_list
def test_list_buckets(log_entry):
"""Test list_buckets()."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
# Create a bucket with default bucket location
_call(log_entry, _CLIENT.make_bucket, bucket_name)
try:
buckets = _CLIENT.list_buckets()
for bucket in buckets:
# bucket object should be of a valid value.
if bucket.name and bucket.creation_date:
continue
raise ValueError('list_bucket api failure')
finally:
# Remove bucket
_call(log_entry, _CLIENT.remove_bucket, bucket_name)
def test_select_object_content(log_entry):
"""Test select_object_content()."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
csvfile = 'test.csv'
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": csvfile,
}
try:
_CLIENT.make_bucket(bucket_name)
content = io.BytesIO(b"col1,col2,col3\none,two,three\nX,Y,Z\n")
_CLIENT.put_object(bucket_name, csvfile, content,
len(content.getvalue()))
options = SelectObjectOptions(
expression="select * from s3object",
input_serialization=InputSerialization(
compression_type="NONE",
csv=CSVInput(file_header_info="NONE",
record_delimiter="\n",
field_delimiter=",",
quote_character='"',
quote_escape_character='"',
comments="#",
allow_quoted_record_delimiter=False),
),
output_serialization=OutputSerialization(
csv=CSVOutput(quote_fields="ASNEEDED",
record_delimiter="\n",
field_delimiter=",",
quote_character='"',
quote_escape_character='"')
),
request_progress=RequestProgress(enabled=False)
)
data = _CLIENT.select_object_content(bucket_name, csvfile, options)
# Get the records
records = io.BytesIO()
for data_bytes in data.stream(10*KB):
records.write(data_bytes.encode('utf-8'))
expected_crc = calculate_crc(content.getvalue())
generated_crc = calculate_crc(records.getvalue())
if expected_crc != generated_crc:
raise ValueError(
'Data mismatch Expected : '
'"col1,col2,col3\none,two,three\nX,Y,Z\n"',
'Received {}', records)
finally:
_CLIENT.remove_object(bucket_name, csvfile)
_CLIENT.remove_bucket(bucket_name)
def _test_fput_object(bucket_name, object_name, filename, metadata, sse):
"""Test fput_object()."""
try:
_CLIENT.make_bucket(bucket_name)
if _IS_AWS:
_CLIENT.fput_object(bucket_name, object_name, filename,
metadata=metadata, sse=sse)
else:
_CLIENT.fput_object(bucket_name, object_name, filename, sse=sse)
_CLIENT.stat_object(bucket_name, object_name, sse=sse)
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_fput_object_small_file(log_entry, sse=None):
"""Test fput_object() with small file."""
if sse:
log_entry["name"] += "_with_SSE-C"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}-f".format(uuid4())
metadata = {'x-amz-storage-class': 'STANDARD_IA'}
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"file_path": _TEST_FILE,
"metadata": metadata,
}
_test_fput_object(bucket_name, object_name, _TEST_FILE, metadata, sse)
def test_fput_object_large_file(log_entry, sse=None):
"""Test fput_object() with large file."""
if sse:
log_entry["name"] += "_with_SSE-C"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}-large".format(uuid4())
metadata = {'x-amz-storage-class': 'STANDARD_IA'}
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"file_path": _LARGE_FILE,
"metadata": metadata,
}
# upload local large file through multipart.
_test_fput_object(bucket_name, object_name, _LARGE_FILE, metadata, sse)
def test_fput_object_with_content_type( # pylint: disable=invalid-name
log_entry):
"""Test fput_object() with content-type."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}-f".format(uuid4())
metadata = {'x-amz-storage-class': 'STANDARD_IA'}
content_type = 'application/octet-stream'
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"file_path": _TEST_FILE,
"metadata": metadata,
"content_type": content_type,
}
_test_fput_object(bucket_name, object_name, _TEST_FILE, metadata, None)
def _validate_stat(st_obj, expected_size, expected_meta):
"""Validate stat information."""
received_modification_time = st_obj.last_modified
received_etag = st_obj.etag
received_metadata = FoldCaseDict(st_obj.metadata)
received_content_type = st_obj.content_type
received_size = st_obj.size
received_is_dir = st_obj.is_dir
if not isinstance(received_modification_time, time.struct_time):
raise ValueError('Incorrect last_modified time type'
', received type: ', type(received_modification_time))
if not received_etag:
raise ValueError('No Etag value is returned.')
# content_type by default can be either application/octet-stream or
# binary/octet-stream
if received_content_type not in [
'application/octet-stream', 'binary/octet-stream']:
raise ValueError('Incorrect content type. Expected: ',
"'application/octet-stream' or 'binary/octet-stream',"
" received: ", received_content_type)
if received_size != expected_size:
raise ValueError('Incorrect file size. Expected: 11534336',
', received: ', received_size)
if received_is_dir:
raise ValueError('Incorrect file type. Expected: is_dir=False',
', received: is_dir=', received_is_dir)
if not all(i in received_metadata.items() for i in expected_meta.items()):
raise ValueError("Metadata key 'x-amz-meta-testing' not found")
def test_copy_object_no_copy_condition( # pylint: disable=invalid-name
log_entry, ssec_copy=None, ssec=None):
"""Test copy_object() with no conditiions."""
if ssec_copy or ssec:
log_entry["name"] += "_SSEC"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
object_source = object_name + "-source"
object_copy = object_name + "-copy"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_source": object_source,
"object_name": object_copy,
}
try:
_CLIENT.make_bucket(bucket_name)
# Upload a streaming object of 1 KiB
size = 1 * KB
reader = LimitedRandomReader(size)
_CLIENT.put_object(bucket_name, object_source, reader, size, sse=ssec)
_CLIENT.copy_object(bucket_name, object_copy,
'/' + bucket_name + '/' + object_source,
source_sse=ssec_copy, sse=ssec)
st_obj = _CLIENT.stat_object(bucket_name, object_copy, sse=ssec)
_validate_stat(st_obj, size, {})
finally:
_CLIENT.remove_object(bucket_name, object_source)
_CLIENT.remove_object(bucket_name, object_copy)
_CLIENT.remove_bucket(bucket_name)
def test_copy_object_with_metadata(log_entry):
"""Test copy_object() with metadata."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
object_source = object_name + "-source"
object_copy = object_name + | |
# -*- coding: utf-8 -*-
from collections import OrderedDict
from gluon import current
from gluon.storage import Storage
def config(settings):
"""
Template for WA-COP + CAD Cloud Integration
"""
T = current.T
# =========================================================================
# System Settings
#
settings.base.system_name = T("Sahana: Washington Common Operating Picture (WA-COP)")
settings.base.system_name_short = T("Sahana")
# Prepop default
settings.base.prepopulate += ("WACOP", "default/users", "WACOP/Demo")
# Theme (folder to use for views/layout.html)
settings.base.theme = "WACOP"
settings.ui.social_buttons = True
# -------------------------------------------------------------------------
# Self-Registration and User Profile
#
# Users can self-register
settings.security.self_registration = False
# Users need to verify their email
settings.auth.registration_requires_verification = True
# Users need to be approved
settings.auth.registration_requires_approval = True
settings.auth.registration_requests_organisation = True
settings.auth.registration_organisation_required = True
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff")}
settings.auth.registration_link_user_to_default = ["staff"]
settings.auth.registration_roles = {"organisation_id": ["USER"],
}
settings.auth.show_utc_offset = False
settings.auth.show_link = False
# -------------------------------------------------------------------------
# Security Policy
#
settings.security.policy = 7 # Apply Controller, Function and Table ACLs
settings.security.map = True
# -------------------------------------------------------------------------
# L10n (Localization) settings
#
settings.L10n.languages = OrderedDict([
("en", "English"),
("es", "Español"),
])
# Default Language
settings.L10n.default_language = "en"
# Default timezone for users
settings.L10n.utc_offset = "-0800"
# Unsortable 'pretty' date format
settings.L10n.date_format = "%b %d %Y"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Default Country Code for telephone numbers
settings.L10n.default_country_code = 1
# Enable this to change the label for 'Mobile Phone'
settings.ui.label_mobile_phone = "Cell Phone"
# Enable this to change the label for 'Postcode'
settings.ui.label_postcode = "ZIP Code"
settings.msg.require_international_phone_numbers = False
# PDF to Letter
settings.base.paper_size = T("Letter")
# Uncomment this to Translate CMS Series Names
# - we want this on when running s3translate but off in normal usage as we use the English names to lookup icons in render_posts
#settings.L10n.translate_cms_series = True
# Uncomment this to Translate Location Names
#settings.L10n.translate_gis_location = True
# Has scalability issues, but should be OK with our number of records
settings.search.dates_auto_range = True
# -------------------------------------------------------------------------
# GIS settings
#
# Restrict the Location Selector to just certain countries
settings.gis.countries = ("US",)
# Levels for the LocationSelector
levels = ("L1", "L2", "L3")
# Uncomment to pass Addresses imported from CSV to a Geocoder to try and automate Lat/Lon
#settings.gis.geocode_imported_addresses = "google"
# Until we add support to S3LocationSelector to set dropdowns from LatLons
settings.gis.check_within_parent_boundaries = False
# GeoNames username
settings.gis.geonames_username = "mcop"
# Uncomment to hide Layer Properties tool
#settings.gis.layer_properties = False
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# Uncomment to prevent showing LatLon in Location Represents
settings.gis.location_represent_address_only = "icon"
# Resources which can be directly added to the main map
settings.gis.poi_create_resources = None
# -------------------------------------------------------------------------
# Modules
#
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
# ("errors", Storage(
# name_nice = "Ticket Viewer",
# #description = "Needed for Breadcrumbs",
# restricted = False,
# module_type = None # No Menu
# )),
("sync", Storage(
name_nice = "Synchronization",
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
#("translate", Storage(
# name_nice = "Translation Functionality",
# #description = "Selective translation of strings based on module.",
# module_type = None,
#)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 1st item in the menu
)),
("pr", Storage(
name_nice = "Persons",
description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = None
)),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 10
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = "Contacts",
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
("cms", Storage(
name_nice = "Content Management",
restricted = True,
module_type = 10,
)),
("event", Storage(
name_nice = "Events",
restricted = True,
module_type = 2,
)),
("fire", Storage(
name_nice = "Fire",
restricted = True,
module_type = None,
)),
("police", Storage(
name_nice = "Police",
restricted = True,
module_type = None,
)),
("project", Storage(
name_nice = "Tasks",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("stats", Storage(
name_nice = "Statistics",
restricted = True,
module_type = None
)),
])
# -------------------------------------------------------------------------
# CMS Content Management
#
settings.cms.bookmarks = True
settings.cms.richtext = True
settings.cms.show_tags = True
# -------------------------------------------------------------------------
def cms_post_onaccept(form):
"""
Handle Tags in Create / Update forms
"""
post_id = form.vars.id
db = current.db
s3db = current.s3db
ttable = s3db.cms_tag
ltable = s3db.cms_tag_post
# Delete all existing tags for this post
db(ltable.post_id == post_id).delete()
# Add these tags
tags = current.request.post_vars.get("tags")
if not tags:
return
tags = tags.split(",")
tag_ids = db(ttable.name.belongs(tags)).select(ttable.id,
ttable.name).as_dict(key="name")
for tag in tags:
row = tag_ids.get("tag")
if row:
tag_id = row.get("id")
else:
tag_id = ttable.insert(name=tag)
ltable.insert(post_id = post_id,
tag_id = tag_id,
)
# -------------------------------------------------------------------------
def customise_cms_post_resource(r, tablename):
db = current.db
s3db = current.s3db
table = s3db.cms_post
table.priority.readable = table.priority.writable = True
table.series_id.readable = table.series_id.writable = True
table.status_id.readable = table.status_id.writable = True
method = r.method
if method in ("create", "update"):
# Custom Form
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_fields = [(T("Type"), "series_id"),
(T("Priority"), "priority"),
(T("Status"), "status_id"),
(T("Title"), "title"),
(T("Text"), "body"),
(T("Location"), "location_id"),
# Tags are added client-side
S3SQLInlineComponent("document",
name = "file",
label = T("Files"),
fields = [("", "file"),
#"comments",
],
),
]
if r.tablename != "event_incident":
if r.tablename == "event_event":
from gluon import IS_EMPTY_OR
from s3 import IS_ONE_OF
itable = s3db.event_incident
query = (itable.event_id == r.id) & \
(itable.closed == False) & \
(itable.deleted == False)
set = db(query)
f = s3db.event_post.incident_id
f.requires = IS_EMPTY_OR(
IS_ONE_OF(set, "event_incident.id",
f.represent,
orderby="event_incident.name",
sort=True))
crud_fields.insert(0, S3SQLInlineComponent("incident_post",
fields = [("", "incident_id")],
label = T("Incident"),
multiple = False,
))
crud_form = S3SQLCustomForm(*crud_fields
)
# Client support for Tags
appname = r.application
s3 = current.response.s3
scripts_append = s3.scripts.append
if s3.debug:
scripts_append("/%s/static/scripts/tag-it.js" % appname)
else:
scripts_append("/%s/static/scripts/tag-it.min.js" % appname)
scripts_append("/%s/static/themes/WACOP/js/update_tags.js" % appname)
if method == "create":
s3.jquery_ready.append('''wacop_update_tags("")''')
elif method == "update":
ttable = s3db.cms_tag
ltable = s3db.cms_tag_post
if r.tablename == "cms_post":
post_id = r.id
else:
post_id = r.component.id
query = (ltable.post_id == post_id) & \
(ltable.tag_id == ttable.id)
tags = db(query).select(ttable.name)
tags = [tag.name for tag in tags]
tags = ",".join(tags)
s3.jquery_ready.append('''wacop_update_tags("%s")''' % tags)
# Processing Tags
default = s3db.get_config(tablename, "onaccept")
if isinstance(default, list):
onaccept = default
onaccept.append(cms_post_onaccept)
else:
onaccept = [default, cms_post_onaccept]
s3db.configure(tablename,
crud_form = crud_form,
onaccept = onaccept,
)
elif method in ("custom", "datalist", "filter"):
# dataList configuration
from templates.WACOP.controllers import cms_post_list_layout
s3 = current.response.s3
s3.dl_no_header = True
s3db.configure(tablename,
list_fields = ["series_id",
"priority",
"status_id",
"date",
"title",
"body",
"created_by",
"tag.name",
"document.file",
"comment.id",
#"comment.body", # Extra fields come in unsorted, so can't match up to records
| |
#!/usr/bin/env python
# coding=utf-8
'''
Author: ZZ_Guo
Email: <EMAIL>
Date: 2020-09-11 23:03:00
LastEditor: ZZ_Guo
LastEditTime: 2021-05-06 17:04:38
Discription:
Environment:
'''
from pysc2.lib import actions, features, units
from pysc2.env import sc2_env, run_loop, available_actions_printer
from pysc2 import maps
import sc2
from sc2 import run_game, maps, Race, Difficulty, position
from sc2.player import Bot, Computer, Human
from sc2.constants import NEXUS, PROBE, PYLON, ASSIMILATOR, GATEWAY, \
CYBERNETICSCORE, STALKER, STARGATE, VOIDRAY, OBSERVER, ROBOTICSFACILITY
# from terran_agent import TerranAgent
from pathlib import Path
from absl import app, logging, flags
import random
import math
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import numpy as np
seed = 500
np.random.seed(seed)
_MOVE_RAND = 1000
_MOVE_MIDDLE = 2000
_BACKGROUND = 0
_AI_SELF = 1
_AI_ALLIES = 2
_AI_NEUTRAL = 3
_AI_HOSTILE = 4
_SELECT_ALL = [0]
_NOT_QUEUED = [0]
_QUEUED = [1]
EPS_START = 0.9
EPS_END = 0.025
EPS_DECAY = 2500
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_PLAYER_ID = features.SCREEN_FEATURES.player_id.index
_PLAYER_SELF = 1
_PLAYER_HOSTILE = 4
_AI_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_AI_SELECTED = features.SCREEN_FEATURES.selected.index
_UNIT_TYPE = features.SCREEN_FEATURES.unit_type.index
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE = actions.FUNCTIONS.Scan_Move_screen.id
_MOVE_SCREEN = actions.FUNCTIONS.Attack_screen.id
# _MOVE_SCREEN = actions.FUNCTIONS.Attack_unit.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
# _ATTACK_MINIMAP = actions.FUNCTIONS.Attack_Attack_minimap.id
_ATTACK_MINIMAP = actions.FUNCTIONS.Attack_minimap.id
army_selected = False
army_rallied = False
_SELECT_IDLE_WORKER = actions.FUNCTIONS.select_idle_worker.id
_TRAIN_SCV = actions.FUNCTIONS.Train_SCV_quick.id
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_BUILD_SUPPLY_DEPOT = actions.FUNCTIONS.Build_SupplyDepot_screen.id
_BUILD_BARRACKS = actions.FUNCTIONS.Build_Barracks_screen.id
_TRAIN_MARINE = actions.FUNCTIONS.Train_Marine_quick.id
# _COLLECT_RESOURCES = actions.FUNCTIONS.Harvest_Gather_SCV_screen.id
_COLLECT_RESOURCES = actions.FUNCTIONS.Harvest_Gather_screen.id
_BUILD_MISSLE_TURRENT = actions.FUNCTIONS.Build_MissileTurret_screen.id
_BUILD_ENG_BAY = actions.FUNCTIONS.Build_EngineeringBay_screen.id
_TERRAN_COMMANDCENTER = 18
_TERRAN_SCV = 45
_TERRAN_MARINE = 48
_TERRAN_SUPPLY_DEPOT = 19
_TERRAN_BARRACKS = 21
_TERRAN_ENGINEERINGBAY = 22
_MissileTurret = 23
_NEUTRAL_BATTLESTATIONMINERALFIELD = 886,
_NEUTRAL_BATTLESTATIONMINERALFIELD750 = 887,
ACTION_DO_NOTHING = 'donothing'
ACTION_SELECT_SCV = 'selectscv'
ACTION_BUILD_SUPPLY_DEPOT = 'buildsupplydepot'
ACTION_BUILD_BARRACKS = 'buildbarracks'
ACTION_SELECT_BARRACKS = 'selectbarracks'
ACTION_TRAIN_MARINE = 'buildmarine'
ACTION_SELECT_ARMY = 'selectarmy'
ACTION_ATTACK = 'attack'
ACTION_COLLECT_RESOUCES = 'collect'
ACTION_BUILD_ENGBAY = 'buildengbay'
ACTION_BUILD_MISSLE_TURRENT = 'buildmissleturrent'
ACTION_TRAIN_SCV = 'trainscv'
categorical_actions = [
ACTION_SELECT_SCV,
ACTION_BUILD_SUPPLY_DEPOT,
ACTION_BUILD_BARRACKS,
ACTION_SELECT_BARRACKS,
ACTION_TRAIN_MARINE,
ACTION_BUILD_ENGBAY,
ACTION_BUILD_MISSLE_TURRENT,
ACTION_SELECT_ARMY,
ACTION_ATTACK,
ACTION_TRAIN_SCV,
ACTION_DO_NOTHING,
ACTION_COLLECT_RESOUCES
]
categorical_actions_id = [
_SELECT_IDLE_WORKER,
_BUILD_SUPPLY_DEPOT,
_BUILD_BARRACKS,
_SELECT_POINT,
_TRAIN_MARINE,
_SELECT_ARMY,
_NO_OP,
]
# spatial_actions = [ACTION_ATTACK]
spatial_actions = [_MOVE_SCREEN]
id_from_actions = {}
action_from_id = {}
# for ix, k in enumerate(spatial_actions):
# id_from_actions[k] = ix
# action_from_id[ix] = k
# for ix, k in enumerate(categorical_actions):
# id_from_actions[k] = ix+len(spatial_actions)
# action_from_id[ix+len(spatial_actions)] = k
for ix, k in enumerate(categorical_actions):
id_from_actions[k] = ix
action_from_id[ix] = k
FLAGS = flags.FLAGS
FLAGS(['run_sc2'])
# def get_action_v3(state):
#
# pass
def get_state(obs):
return [np.array(obs.observation['feature_screen']).reshape(1, 27, 64, 64),
np.array(obs.observation['feature_minimap']).reshape(1, 11, 64, 64),
np.array(obs.observation['player']).reshape(1, 11)
]
def to_yx(point):
"""transform a scalar from [0;4095] to a (y,x) coordinate in [0:63,0:63]"""
return point % 64, (point - (point % 64)) / 64
def transformLocation(obs, x, y):
player_y, player_x = (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_SELF).nonzero()
base_top_left = 1 if player_y.any() and player_y.mean() <= 31 else 0
if not base_top_left:
return [64 - x, 64 - y]
else:
return [x, y]
def get_action_v3(id_action, point, obs, num_dict=None):
# obs = obs[0]
unit_type = obs.observation['feature_screen'][_UNIT_TYPE]
depot_y, depot_x = (unit_type == _TERRAN_SUPPLY_DEPOT).nonzero()
supply_depot_count = supply_depot_count = 1 if depot_y.any() else 0
barracks_y, barracks_x = (unit_type == _TERRAN_BARRACKS).nonzero()
barracks_count = 1 if barracks_y.any() else 0
supply_limit = obs.observation['player'][4]
army_supply = obs.observation['player'][5]
food_workers = obs.observation['player'][6]
idle_workers_cnt = obs.observation['player'][7]
army_cnt = obs.observation['player'][8]
killed_unit_score = obs.observation['score_cumulative'][5]
killed_building_score = obs.observation['score_cumulative'][6]
current_state = np.zeros(20)
current_state[0] = supply_depot_count
current_state[1] = barracks_count
current_state[2] = supply_limit
current_state[3] = army_supply
hot_squares = np.zeros(16)
army_selected = False
army_rallied = False
if (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_HOSTILE).nonzero():
enemy_y, enemy_x = (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_HOSTILE).nonzero()
for i in range(0, len(enemy_y)):
y = int(math.ceil((enemy_y[i] + 1) / 16))
x = int(math.ceil((enemy_x[i] + 1) / 16))
hot_squares[((y - 1) * 4) + (x - 1)] = 1
for i in range(0, 16):
current_state[i + 4] = hot_squares[i]
smart_action = id_action
# if '_' in smart_action:
# smart_action, x, y = smart_action.split('_')
if smart_action == ACTION_SELECT_SCV:
unit_type = obs.observation['feature_screen'][_UNIT_TYPE]
unit_y, unit_x = (unit_type == _TERRAN_SCV).nonzero()
if unit_y.any():
i = random.randint(0, len(unit_y) - 1)
target = [unit_x[i], unit_y[i]]
if _SELECT_IDLE_WORKER in obs.observation["available_actions"]:
func = actions.FunctionCall(_SELECT_IDLE_WORKER, [_NOT_QUEUED])
else:
func = actions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target])
elif smart_action == ACTION_TRAIN_SCV:
# worker_cnt = num_dict["workers"]
# if _TRAIN_SCV in obs.observation['available_actions'] and worker_cnt < 16:
if _TRAIN_SCV in obs.observation['available_actions']:
func = actions.FunctionCall(_TRAIN_SCV, [_QUEUED])
# num_dict["workers"] += 1
elif smart_action == ACTION_COLLECT_RESOUCES:
# TODO: Warning about "必须以资源为目标"
unit_type = obs.observation['feature_screen'][_UNIT_TYPE]
scv_y, scv_x = (unit_type == units.Terran.SCV).nonzero()
mineral_y, mineral_x = (unit_type == units.Neutral.MineralField).nonzero()
# mineral_y, mineral_x = (unit_type == _NEUTRAL_BATTLESTATIONMINERALFIELD).nonzero()
if _COLLECT_RESOURCES in obs.observation['available_actions'] and idle_workers_cnt > 0:
if mineral_y.any():
i = random.randint(0, len(scv_y) - 1)
# target = (mineral_y[i], mineral_y[i])
# target = (mineral_y.mean(), mineral_y.mean())
# target = (scv_y.mean(), scv_x.mean())
target = (scv_y[i], scv_x[i])
# target = (11, 16)
func = actions.FunctionCall(_COLLECT_RESOURCES, [_NOT_QUEUED, target])
elif smart_action == ACTION_BUILD_SUPPLY_DEPOT:
deports_cnt = num_dict["supply_deports"]
if _BUILD_SUPPLY_DEPOT in obs.observation['available_actions'] and deports_cnt < 4:
unit_type = obs.observation['feature_screen'][_UNIT_TYPE]
unit_y, unit_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()
if unit_y.any():
if num_dict["supply_deports"] == 0:
target = (31, 8)
elif num_dict["supply_deports"] == 1:
target = (26, 8)
elif num_dict["supply_deports"] == 2:
target = (21, 8)
elif num_dict["supply_deports"] == 3:
target = (16, 8)
else:
target = to_yx(point)
func = actions.FunctionCall(_BUILD_SUPPLY_DEPOT, [_NOT_QUEUED, target])
try:
num_dict["supply_deports"] += 1
return func, smart_action, num_dict
except UnboundLocalError:
num_dict["supply_deports"] -= 1
print(str(smart_action) + " " + str(point) + " is not an available action")
return get_action_v3(action_from_id[0], point, obs, num_dict)
elif smart_action == ACTION_BUILD_BARRACKS:
if _BUILD_BARRACKS in obs.observation['available_actions']:
unit_type = obs.observation['feature_screen'][_UNIT_TYPE]
unit_y, unit_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()
if unit_y.any() and num_dict['barracks'] < 3:
# target = to_yx(point)
if num_dict["barracks"] == 0:
target = (56, 18)
elif num_dict["barracks"] == 1:
target = (56, 28)
elif num_dict["barracks"] == 2:
target = (56, 38)
else:
target = to_yx(point)
func = actions.FunctionCall(_BUILD_BARRACKS, [_NOT_QUEUED, target])
try:
num_dict["barracks"] += 1
return func, smart_action, num_dict
except UnboundLocalError:
num_dict["barracks"] -= 1
print(str(smart_action) + " " + str(point) + " is not an available action")
if num_dict['supply_deports'] == 0:
return get_action_v3(action_from_id[1], point, obs, num_dict)
else:
return get_action_v3(action_from_id[0], point, obs, num_dict)
elif smart_action == ACTION_SELECT_BARRACKS:
unit_type = obs.observation['feature_screen'][_UNIT_TYPE]
unit_y, unit_x = (unit_type == _TERRAN_BARRACKS).nonzero()
if unit_y.any():
# target = [int(unit_x.mean()), int(unit_y.mean())]
# target = (np.random.([(unit_x[i], unit_y[i]) for i in range(len(unit_x))]))
a_list = [(unit_x[i], unit_y[i]) for i in range(len(unit_x))]
target = list(map(lambda x: random.choice(a_list), range(1)))[0]
func = actions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target])
try:
return func, smart_action, num_dict
except UnboundLocalError:
print(str(smart_action) + " " + str(point) + " is not an available action")
return get_action_v3(action_from_id[2], point, obs, num_dict)
elif smart_action == ACTION_TRAIN_MARINE:
unit_type = obs.observation['feature_screen'][_UNIT_TYPE]
unit_y, unit_x = (unit_type == _TERRAN_BARRACKS).nonzero()
if _TRAIN_MARINE in obs.observation['available_actions'] and unit_y.any():
func = actions.FunctionCall(_TRAIN_MARINE, [_QUEUED])
try:
# num_dict["marines"] += 1
return func, smart_action, num_dict
except UnboundLocalError:
# num_dict["marines"] -= 1
print(str(smart_action) + " " + str(point) + " is not an available action")
return get_action_v3(action_from_id[3], point, obs, num_dict)
elif smart_action == ACTION_SELECT_ARMY:
if _SELECT_ARMY in obs.observation['available_actions']:
func = actions.FunctionCall(_SELECT_ARMY, [_NOT_QUEUED])
try:
return func, smart_action, num_dict
except UnboundLocalError:
print(str(smart_action) + " " + str(point) + " is not an available action")
return get_action_v3(action_from_id[4], point, obs, num_dict)
elif smart_action == ACTION_ATTACK:
enemy_y, enemy_x = (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_HOSTILE).nonzero()
if (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_HOSTILE).nonzero(): # 攻击已知敌人
# for i in range(0, len(enemy_y)):
# marines_cnt = num_dict["marines"]
if len(obs.observation['multi_select']) and army_cnt > 12 and num_dict['attack_cnt'] < 2:
# if obs.observation['multi_select'][0][0] != _TERRAN_SCV and _ATTACK_MINIMAP in obs.observation["available_actions"]:
if _ATTACK_MINIMAP in obs.observation["available_actions"]:
# if _ATTACK_MINIMAP in obs.observation["available_actions"]:
if enemy_y.any():
target = [int(np.random.choice(enemy_x)), int(np.random.choice(enemy_y))]
# target = to_yx(point) # TODO:
func = actions.FunctionCall(_ATTACK_MINIMAP, [_NOT_QUEUED, target])
# num_dict['marines'] = 0
num_dict['attack_cnt'] += 1
elif num_dict['attack_cnt'] >= 2 and len(obs.observation['multi_select']) and army_cnt >= 3:
# if obs.observation['multi_select'][0][0] != _TERRAN_SCV and _ATTACK_MINIMAP in obs.observation["available_actions"]:
if _ATTACK_MINIMAP in obs.observation["available_actions"]:
# if _ATTACK_MINIMAP in obs.observation["available_actions"]:
if enemy_y.any():
target = [int(np.random.choice(enemy_x)), int(np.random.choice(enemy_y))]
# target = to_yx(point) # TODO:
func = actions.FunctionCall(_ATTACK_MINIMAP, [_NOT_QUEUED, target])
# num_dict['marines'] = 0
num_dict['attack_cnt'] += 1
# else:
# if len(obs.observation['multi_select']):
# # if obs.observation['multi_select'][0][0] != _TERRAN_SCV and _ATTACK_MINIMAP in obs.observation["available_actions"]:
# if _ATTACK_MINIMAP in obs.observation["available_actions"]:
# # if _ATTACK_MINIMAP in obs.observation["available_actions"]:
# if enemy_y.any():
# target = [int(np.random.choice(enemy_x)), int(np.random.choice(enemy_y))]
# # target = to_yx(point) # TODO:
# func = actions.FunctionCall(_ATTACK_MINIMAP, [_NOT_QUEUED, target])
# # num_dict['marines'] = 0
# num_dict['attack_cnt'] += 1
else: # 攻击任意位置(未找到敌人时,类似巡逻)
if len(obs.observation['multi_select']):
# if obs.observation['multi_select'][0][0] != _TERRAN_SCV and _ATTACK_MINIMAP in obs.observation["available_actions"]:
if _ATTACK_MINIMAP in obs.observation["available_actions"]:
target = to_yx(point)
func = actions.FunctionCall(_ATTACK_MINIMAP, [_NOT_QUEUED, target])
try:
return func, smart_action, num_dict
except UnboundLocalError:
num_dict['attack_cnt'] -= 1
print(str(smart_action) + " " + str(point) + " is not an available action")
return get_action_v3(action_from_id[4], point, obs, num_dict)
elif smart_action == ACTION_BUILD_ENGBAY:
engbays_cnt = num_dict["engbays"]
if _BUILD_ENG_BAY in obs.observation['available_actions'] and engbays_cnt == 0:
unit_type = obs.observation['feature_screen'][_UNIT_TYPE]
unit_y, unit_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()
if unit_y.any():
# target = to_yx(point)
target = (38, 44)
func = actions.FunctionCall(_BUILD_ENG_BAY, [_NOT_QUEUED, target])
try:
num_dict["engbays"] += 1
return func, smart_action, num_dict
except UnboundLocalError:
num_dict["engbays"] | |
<filename>UI/ui_DatabaseEdit.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'databaseEdit.ui'
#
# Created: Tue Mar 12 10:35:03 2013
# by: pyside-uic 0.2.14 running on PySide 1.1.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_DatabaseDialog(object):
def setupUi(self, DatabaseDialog):
DatabaseDialog.setObjectName("DatabaseDialog")
DatabaseDialog.resize(1002, 405)
self.groupBoxDbEdit = QtGui.QGroupBox(DatabaseDialog)
self.groupBoxDbEdit.setGeometry(QtCore.QRect(10, 0, 981, 361))
self.groupBoxDbEdit.setObjectName("groupBoxDbEdit")
self.DatabaseEditTab = QtGui.QTabWidget(self.groupBoxDbEdit)
self.DatabaseEditTab.setGeometry(QtCore.QRect(10, 20, 961, 331))
self.DatabaseEditTab.setMinimumSize(QtCore.QSize(20, 75))
self.DatabaseEditTab.setObjectName("DatabaseEditTab")
self.effectTab = QtGui.QWidget()
self.effectTab.setAutoFillBackground(True)
self.effectTab.setObjectName("effectTab")
self.LabelAlias = QtGui.QLabel(self.effectTab)
self.LabelAlias.setGeometry(QtCore.QRect(10, 100, 75, 20))
self.LabelAlias.setMinimumSize(QtCore.QSize(75, 20))
self.LabelAlias.setObjectName("LabelAlias")
self.LabelType = QtGui.QLabel(self.effectTab)
self.LabelType.setGeometry(QtCore.QRect(10, 20, 75, 20))
self.LabelType.setMinimumSize(QtCore.QSize(75, 20))
self.LabelType.setObjectName("LabelType")
self.LabelName = QtGui.QLabel(self.effectTab)
self.LabelName.setGeometry(QtCore.QRect(10, 60, 75, 20))
self.LabelName.setMinimumSize(QtCore.QSize(75, 20))
self.LabelName.setObjectName("LabelName")
self.descriptLabel = QtGui.QLabel(self.effectTab)
self.descriptLabel.setGeometry(QtCore.QRect(10, 150, 75, 20))
self.descriptLabel.setMinimumSize(QtCore.QSize(75, 20))
self.descriptLabel.setObjectName("descriptLabel")
self.sizeLabel = QtGui.QLabel(self.effectTab)
self.sizeLabel.setGeometry(QtCore.QRect(10, 210, 75, 20))
self.sizeLabel.setMinimumSize(QtCore.QSize(75, 20))
self.sizeLabel.setObjectName("sizeLabel")
self.StockLabel = QtGui.QLabel(self.effectTab)
self.StockLabel.setGeometry(QtCore.QRect(10, 270, 75, 20))
self.StockLabel.setMinimumSize(QtCore.QSize(75, 20))
self.StockLabel.setObjectName("StockLabel")
self.lineEditName = QtGui.QLineEdit(self.effectTab)
self.lineEditName.setGeometry(QtCore.QRect(80, 60, 113, 20))
self.lineEditName.setObjectName("lineEditName")
self.lineEditAlias = QtGui.QLineEdit(self.effectTab)
self.lineEditAlias.setGeometry(QtCore.QRect(80, 100, 113, 20))
self.lineEditAlias.setObjectName("lineEditAlias")
self.lineEdit_descript = QtGui.QLineEdit(self.effectTab)
self.lineEdit_descript.setGeometry(QtCore.QRect(80, 150, 191, 21))
self.lineEdit_descript.setObjectName("lineEdit_descript")
self.lineEdit_size = QtGui.QLineEdit(self.effectTab)
self.lineEdit_size.setGeometry(QtCore.QRect(80, 210, 70, 20))
self.lineEdit_size.setObjectName("lineEdit_size")
doubleVal = QtGui.QDoubleValidator()
self.lineEdit_size.setValidator(doubleVal)
self.lineEdit_stock = QtGui.QLineEdit(self.effectTab)
self.lineEdit_stock.setGeometry(QtCore.QRect(80, 270, 70, 20))
self.lineEdit_stock.setObjectName("lineEdit_stock")
intVal = QtGui.QIntValidator()
self.lineEdit_stock.setValidator(intVal)
self.line = QtGui.QFrame(self.effectTab)
self.line.setGeometry(QtCore.QRect(410, 10, 16, 281))
self.line.setFrameShape(QtGui.QFrame.VLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName("line")
self.mmLabel = QtGui.QLabel(self.effectTab)
self.mmLabel.setGeometry(QtCore.QRect(150, 210, 25, 20))
self.mmLabel.setObjectName("mmLabel")
self.lineEdit_usedEffect = QtGui.QLineEdit(self.effectTab)
self.lineEdit_usedEffect.setGeometry(QtCore.QRect(280, 210, 100, 20))
self.lineEdit_usedEffect.setObjectName("lineEdit_usedEffect")
intVal = QtGui.QIntValidator()
self.lineEdit_usedEffect.setValidator(intVal)
self.label_effectUsed = QtGui.QLabel(self.effectTab)
self.label_effectUsed.setGeometry(QtCore.QRect(200, 210, 75, 20))
self.label_effectUsed.setMinimumSize(QtCore.QSize(75, 20))
self.label_effectUsed.setObjectName("label_effectUsed")
self.line_2 = QtGui.QFrame(self.effectTab)
self.line_2.setGeometry(QtCore.QRect(670, 10, 16, 281))
self.line_2.setFrameShape(QtGui.QFrame.VLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.label_effectPrice = QtGui.QLabel(self.effectTab)
self.label_effectPrice.setGeometry(QtCore.QRect(380, 210, 30, 20))
self.label_effectPrice.setObjectName("label_effectPrice")
self.label_min = QtGui.QLabel(self.effectTab)
self.label_min.setGeometry(QtCore.QRect(200, 240, 75, 20))
self.label_min.setMinimumSize(QtCore.QSize(75, 20))
self.label_min.setObjectName("label_min")
self.label_Best = QtGui.QLabel(self.effectTab)
self.label_Best.setGeometry(QtCore.QRect(200, 270, 75, 20))
self.label_Best.setMinimumSize(QtCore.QSize(75, 20))
self.label_Best.setObjectName("label_Best")
self.lineEdit_Min = QtGui.QLineEdit(self.effectTab)
self.lineEdit_Min.setGeometry(QtCore.QRect(280, 240, 100, 20))
self.lineEdit_Min.setObjectName("lineEdit_Min")
intVal = QtGui.QIntValidator()
self.lineEdit_Min.setValidator(intVal)
self.lineEdit_Best = QtGui.QLineEdit(self.effectTab)
self.lineEdit_Best.setGeometry(QtCore.QRect(280, 270, 100, 20))
self.lineEdit_Best.setObjectName("lineEdit_Best")
intVal = QtGui.QIntValidator()
self.lineEdit_Best.setValidator(intVal)
self.label_minPrice = QtGui.QLabel(self.effectTab)
self.label_minPrice.setGeometry(QtCore.QRect(380, 240, 30, 20))
self.label_minPrice.setObjectName("label_minPrice")
self.label_bestPrice = QtGui.QLabel(self.effectTab)
self.label_bestPrice.setGeometry(QtCore.QRect(380, 270, 30, 20))
self.label_bestPrice.setObjectName("label_bestPrice")
self.label_riseTime = QtGui.QLabel(self.effectTab)
self.label_riseTime.setGeometry(QtCore.QRect(440, 20, 70, 20))
self.label_riseTime.setMinimumSize(QtCore.QSize(70, 20))
self.label_riseTime.setObjectName("label_riseTime")
self.lineEdit_riseTime = QtGui.QLineEdit(self.effectTab)
self.lineEdit_riseTime.setGeometry(QtCore.QRect(510, 20, 80, 20))
self.lineEdit_riseTime.setObjectName("lineEdit_riseTime")
doubleVal = QtGui.QDoubleValidator()
self.lineEdit_riseTime.setValidator(doubleVal)
self.lineEdit_effect1 = QtGui.QLineEdit(self.effectTab)
self.lineEdit_effect1.setGeometry(QtCore.QRect(510, 70, 120, 20))
self.lineEdit_effect1.setObjectName("lineEdit_effect1")
self.lineEdit_durationTime1 = QtGui.QLineEdit(self.effectTab)
self.lineEdit_durationTime1.setGeometry(QtCore.QRect(510, 150, 80, 20))
self.lineEdit_durationTime1.setObjectName("lineEdit_durationTime1")
doubleVal = QtGui.QDoubleValidator()
self.lineEdit_durationTime1.setValidator(doubleVal)
self.lineEdit_effect2 = QtGui.QLineEdit(self.effectTab)
self.lineEdit_effect2.setGeometry(QtCore.QRect(510, 190, 120, 20))
self.lineEdit_effect2.setObjectName("lineEdit_effect2")
self.lineEdit_durationTime2 = QtGui.QLineEdit(self.effectTab)
self.lineEdit_durationTime2.setGeometry(QtCore.QRect(510, 270, 80, 20))
self.lineEdit_durationTime2.setObjectName("lineEdit_durationTime2")
doubleVal = QtGui.QDoubleValidator()
self.lineEdit_durationTime2.setValidator(doubleVal)
self.label_risTimeSec = QtGui.QLabel(self.effectTab)
self.label_risTimeSec.setGeometry(QtCore.QRect(590, 20, 30, 20))
self.label_risTimeSec.setObjectName("label_risTimeSec")
self.label_effect1 = QtGui.QLabel(self.effectTab)
self.label_effect1.setGeometry(QtCore.QRect(440, 70, 70, 20))
self.label_effect1.setMinimumSize(QtCore.QSize(70, 20))
self.label_effect1.setObjectName("label_effect1")
self.label_color1 = QtGui.QLabel(self.effectTab)
self.label_color1.setGeometry(QtCore.QRect(440, 110, 70, 20))
self.label_color1.setMinimumSize(QtCore.QSize(70, 20))
self.label_color1.setObjectName("label_color1")
self.label_duration1 = QtGui.QLabel(self.effectTab)
self.label_duration1.setGeometry(QtCore.QRect(440, 150, 70, 20))
self.label_duration1.setMinimumSize(QtCore.QSize(70, 20))
self.label_duration1.setObjectName("label_duration1")
self.label_duration2 = QtGui.QLabel(self.effectTab)
self.label_duration2.setGeometry(QtCore.QRect(440, 270, 70, 20))
self.label_duration2.setMinimumSize(QtCore.QSize(70, 20))
self.label_duration2.setObjectName("label_duration2")
self.label_effect2 = QtGui.QLabel(self.effectTab)
self.label_effect2.setGeometry(QtCore.QRect(440, 190, 70, 20))
self.label_effect2.setMinimumSize(QtCore.QSize(70, 20))
self.label_effect2.setObjectName("label_effect2")
self.label_color2 = QtGui.QLabel(self.effectTab)
self.label_color2.setGeometry(QtCore.QRect(440, 230, 70, 20))
self.label_color2.setMinimumSize(QtCore.QSize(70, 20))
self.label_color2.setObjectName("label_color2")
self.label_druationSeconds = QtGui.QLabel(self.effectTab)
self.label_druationSeconds.setGeometry(QtCore.QRect(590, 270, 30, 20))
self.label_druationSeconds.setFrameShape(QtGui.QFrame.NoFrame)
self.label_druationSeconds.setFrameShadow(QtGui.QFrame.Plain)
self.label_druationSeconds.setObjectName("label_druationSeconds")
self.label_durationSeconds = QtGui.QLabel(self.effectTab)
self.label_durationSeconds.setGeometry(QtCore.QRect(590, 150, 30, 20))
self.label_durationSeconds.setObjectName("label_durationSeconds")
self.label_effect3 = QtGui.QLabel(self.effectTab)
self.label_effect3.setGeometry(QtCore.QRect(710, 40, 80, 20))
self.label_effect3.setMinimumSize(QtCore.QSize(80, 20))
self.label_effect3.setObjectName("label_effect3")
self.lineEdit_effect3 = QtGui.QLineEdit(self.effectTab)
self.lineEdit_effect3.setGeometry(QtCore.QRect(790, 40, 120, 20))
self.lineEdit_effect3.setObjectName("lineEdit_effect3")
self.label_color3 = QtGui.QLabel(self.effectTab)
self.label_color3.setGeometry(QtCore.QRect(710, 80, 80, 20))
self.label_color3.setMinimumSize(QtCore.QSize(80, 20))
self.label_color3.setObjectName("label_color3")
self.label_duration3 = QtGui.QLabel(self.effectTab)
self.label_duration3.setGeometry(QtCore.QRect(710, 120, 80, 20))
self.label_duration3.setMinimumSize(QtCore.QSize(80, 20))
self.label_duration3.setObjectName("label_duration3")
self.lineEdit_durationTime3 = QtGui.QLineEdit(self.effectTab)
self.lineEdit_durationTime3.setGeometry(QtCore.QRect(790, 120, 80, 20))
self.lineEdit_durationTime3.setObjectName("lineEdit_durationTime3")
doubleVal = QtGui.QDoubleValidator()
self.lineEdit_durationTime3.setValidator(doubleVal)
self.label_durationSeconds3 = QtGui.QLabel(self.effectTab)
self.label_durationSeconds3.setGeometry(QtCore.QRect(870, 120, 30, 20))
self.label_durationSeconds3.setObjectName("label_durationSeconds3")
self.label_shots = QtGui.QLabel(self.effectTab)
self.label_shots.setGeometry(QtCore.QRect(710, 160, 80, 20))
self.label_shots.setMinimumSize(QtCore.QSize(80, 20))
self.label_shots.setObjectName("label_shots")
self.lineEdit_shots = QtGui.QLineEdit(self.effectTab)
self.lineEdit_shots.setGeometry(QtCore.QRect(790, 160, 80, 20))
self.lineEdit_shots.setObjectName("lineEdit_shots")
intVal = QtGui.QIntValidator()
self.lineEdit_shots.setValidator(intVal)
self.label_application = QtGui.QLabel(self.effectTab)
self.label_application.setGeometry(QtCore.QRect(710, 210, 80, 20))
self.label_application.setMinimumSize(QtCore.QSize(80, 20))
self.label_application.setObjectName("label_application")
self.radioButton_indoor = QtGui.QRadioButton(self.effectTab)
self.radioButton_indoor.setGeometry(QtCore.QRect(790, 210, 70, 20))
self.radioButton_indoor.setObjectName("radioButton_indoor")
self.radioButton_outdoor = QtGui.QRadioButton(self.effectTab)
self.radioButton_outdoor.setGeometry(QtCore.QRect(870, 210, 70, 20))
self.radioButton_outdoor.setChecked(True)
self.radioButton_outdoor.setObjectName("radioButton_outdoor")
self.label_riseHeight = QtGui.QLabel(self.effectTab)
self.label_riseHeight.setGeometry(QtCore.QRect(710, 240, 80, 20))
self.label_riseHeight.setMinimumSize(QtCore.QSize(80, 20))
self.label_riseHeight.setObjectName("label_riseHeight")
self.label_diameter = QtGui.QLabel(self.effectTab)
self.label_diameter.setGeometry(QtCore.QRect(710, 270, 80, 20))
self.label_diameter.setMinimumSize(QtCore.QSize(80, 20))
self.label_diameter.setObjectName("label_diameter")
self.lineEdit_riseHeight = QtGui.QLineEdit(self.effectTab)
self.lineEdit_riseHeight.setGeometry(QtCore.QRect(790, 240, 80, 20))
self.lineEdit_riseHeight.setObjectName("lineEdit_riseHeight")
intVal = QtGui.QIntValidator()
self.lineEdit_riseHeight.setValidator(intVal)
self.lineEdit_diameter = QtGui.QLineEdit(self.effectTab)
self.lineEdit_diameter.setGeometry(QtCore.QRect(790, 270, 80, 20))
self.lineEdit_diameter.setObjectName("lineEdit_diameter")
intVal = QtGui.QIntValidator()
self.lineEdit_diameter.setValidator(intVal)
self.label_50 = QtGui.QLabel(self.effectTab)
self.label_50.setGeometry(QtCore.QRect(610, 230, 30, 20))
self.label_50.setMouseTracking(False)
self.label_50.setAcceptDrops(False)
self.label_50.setAutoFillBackground(False)
self.label_50.setFrameShadow(QtGui.QFrame.Sunken)
self.label_50.setText("")
self.label_50.setObjectName("label_50")
self.IconLabel_color2 = QtGui.QLabel(self.effectTab)
self.IconLabel_color2.setGeometry(QtCore.QRect(590, 230, 20, 20))
self.IconLabel_color2.setMinimumSize(QtCore.QSize(20, 20))
self.IconLabel_color2.setFrameShape(QtGui.QFrame.Box)
self.IconLabel_color2.setFrameShadow(QtGui.QFrame.Sunken)
self.IconLabel_color2.setText("")
self.IconLabel_color2.setObjectName("IconLabel_color2")
self.IconLabel_color1 = QtGui.QLabel(self.effectTab)
self.IconLabel_color1.setGeometry(QtCore.QRect(590, 110, 20, 20))
self.IconLabel_color1.setMinimumSize(QtCore.QSize(20, 20))
self.IconLabel_color1.setFrameShape(QtGui.QFrame.Box)
self.IconLabel_color1.setFrameShadow(QtGui.QFrame.Sunken)
self.IconLabel_color1.setText("")
self.IconLabel_color1.setObjectName("IconLabel_color1")
self.IconLabel_color3 = QtGui.QLabel(self.effectTab)
self.IconLabel_color3.setGeometry(QtCore.QRect(870, 80, 20, 20))
self.IconLabel_color3.setMinimumSize(QtCore.QSize(20, 20))
self.IconLabel_color3.setFrameShape(QtGui.QFrame.Box)
self.IconLabel_color3.setFrameShadow(QtGui.QFrame.Sunken)
self.IconLabel_color3.setText("")
self.IconLabel_color3.setObjectName("IconLabel_color3")
self.effectIconLabel = QtGui.QLabel(self.effectTab)
self.effectIconLabel.setGeometry(QtCore.QRect(240, 10, 140, 130))
self.effectIconLabel.setMinimumSize(QtCore.QSize(20, 20))
self.effectIconLabel.setFrameShape(QtGui.QFrame.Box)
self.effectIconLabel.setFrameShadow(QtGui.QFrame.Sunken)
self.effectIconLabel.setObjectName("effectIconLabel")
self.comboBoxColor1 = QtGui.QComboBox(self.effectTab)
self.comboBoxColor1.setGeometry(QtCore.QRect(510, 110, 70, 20))
self.comboBoxColor1.setObjectName("comboBoxColor1")
self.comboBoxColor1.addItem("")
self.comboBoxColor1.setItemText(0, "")
self.comboBoxColor1.addItem("")
self.comboBoxColor1.addItem("")
self.comboBoxColor1.addItem("")
self.comboBoxColor1.addItem("")
self.comboBoxColor1.addItem("")
self.comboBoxColor1.addItem("")
self.comboBoxColor1.addItem("")
self.comboBoxColor1.addItem("")
self.comboBoxColor1.addItem("")
self.comboBoxColor2 = QtGui.QComboBox(self.effectTab)
self.comboBoxColor2.setGeometry(QtCore.QRect(510, 230, 70, 20))
self.comboBoxColor2.setObjectName("comboBoxColor2")
self.comboBoxColor2.addItem("")
self.comboBoxColor2.setItemText(0, "")
self.comboBoxColor2.addItem("")
self.comboBoxColor2.addItem("")
self.comboBoxColor2.addItem("")
self.comboBoxColor2.addItem("")
self.comboBoxColor2.addItem("")
self.comboBoxColor2.addItem("")
self.comboBoxColor2.addItem("")
self.comboBoxColor2.addItem("")
self.comboBoxColor2.addItem("")
self.comboBoxColor3 = QtGui.QComboBox(self.effectTab)
self.comboBoxColor3.setGeometry(QtCore.QRect(790, 80, 70, 20))
self.comboBoxColor3.setObjectName("comboBoxColor3")
self.comboBoxColor3.addItem("")
self.comboBoxColor3.setItemText(0, "")
self.comboBoxColor3.addItem("")
self.comboBoxColor3.addItem("")
self.comboBoxColor3.addItem("")
self.comboBoxColor3.addItem("")
self.comboBoxColor3.addItem("")
self.comboBoxColor3.addItem("")
self.comboBoxColor3.addItem("")
self.comboBoxColor3.addItem("")
self.comboBoxColor3.addItem("")
self.label_risingHeight_m = QtGui.QLabel(self.effectTab)
self.label_risingHeight_m.setGeometry(QtCore.QRect(880, 240, 20, 20))
self.label_risingHeight_m.setMinimumSize(QtCore.QSize(20, 20))
self.label_risingHeight_m.setObjectName("label_risingHeight_m")
self.label_diameter_m = QtGui.QLabel(self.effectTab)
self.label_diameter_m.setGeometry(QtCore.QRect(880, 270, 20, 20))
self.label_diameter_m.setMinimumSize(QtCore.QSize(20, 20))
self.label_diameter_m.setObjectName("label_diameter_m")
self.label_stockPrice = QtGui.QLabel(self.effectTab)
self.label_stockPrice.setGeometry(QtCore.QRect(150, 270, 30, 20))
self.label_stockPrice.setObjectName("label_stockPrice")
self.comboBoxType = QtGui.QComboBox(self.effectTab)
self.comboBoxType.setGeometry(QtCore.QRect(80, 20, 111, 20))
self.comboBoxType.setObjectName("comboBoxType")
self.comboBoxType.addItem("")
self.comboBoxType.addItem("")
self.comboBoxType.addItem("")
self.comboBoxType.addItem("")
self.DatabaseEditTab.addTab(self.effectTab, "")
self.miscellaneTab = QtGui.QWidget()
self.miscellaneTab.setAutoFillBackground(True)
self.miscellaneTab.setObjectName("miscellaneTab")
self.lineEdit_class = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_class.setGeometry(QtCore.QRect(100, 20, 113, 20))
self.lineEdit_class.setObjectName("lineEdit_class")
self.label_UNnumber = QtGui.QLabel(self.miscellaneTab)
self.label_UNnumber.setGeometry(QtCore.QRect(20, 140, 75, 20))
self.label_UNnumber.setMinimumSize(QtCore.QSize(75, 20))
self.label_UNnumber.setObjectName("label_UNnumber")
self.label_class = QtGui.QLabel(self.miscellaneTab)
self.label_class.setGeometry(QtCore.QRect(20, 20, 75, 20))
self.label_class.setMinimumSize(QtCore.QSize(75, 20))
self.label_class.setObjectName("label_class")
self.label_chiper = QtGui.QLabel(self.miscellaneTab)
self.label_chiper.setGeometry(QtCore.QRect(20, 180, 75, 20))
self.label_chiper.setMinimumSize(QtCore.QSize(75, 20))
self.label_chiper.setObjectName("label_chiper")
self.label_BAM_Number = QtGui.QLabel(self.miscellaneTab)
self.label_BAM_Number.setGeometry(QtCore.QRect(20, 60, 75, 20))
self.label_BAM_Number.setMinimumSize(QtCore.QSize(75, 20))
self.label_BAM_Number.setObjectName("label_BAM_Number")
self.label_weightNet = QtGui.QLabel(self.miscellaneTab)
self.label_weightNet.setGeometry(QtCore.QRect(20, 230, 75, 20))
self.label_weightNet.setMinimumSize(QtCore.QSize(75, 20))
self.label_weightNet.setObjectName("label_weightNet")
self.label_weightGross = QtGui.QLabel(self.miscellaneTab)
self.label_weightGross.setGeometry(QtCore.QRect(20, 270, 75, 20))
self.label_weightGross.setMinimumSize(QtCore.QSize(75, 20))
self.label_weightGross.setObjectName("label_weightGross")
self.label_ADRclass = QtGui.QLabel(self.miscellaneTab)
self.label_ADRclass.setGeometry(QtCore.QRect(20, 100, 75, 20))
self.label_ADRclass.setMinimumSize(QtCore.QSize(75, 20))
self.label_ADRclass.setObjectName("label_ADRclass")
self.lineEdit_BAMnumber = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_BAMnumber.setGeometry(QtCore.QRect(100, 60, 113, 20))
self.lineEdit_BAMnumber.setObjectName("lineEdit_BAMnumber")
self.lineEdit_ADRclass = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_ADRclass.setGeometry(QtCore.QRect(100, 100, 113, 20))
self.lineEdit_ADRclass.setObjectName("lineEdit_ADRclass")
self.lineEdit_UNnumber = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_UNnumber.setGeometry(QtCore.QRect(100, 140, 113, 20))
self.lineEdit_UNnumber.setObjectName("lineEdit_UNnumber")
self.lineEdit_chiper = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_chiper.setGeometry(QtCore.QRect(100, 180, 113, 20))
self.lineEdit_chiper.setObjectName("lineEdit_chiper")
intVal = QtGui.QIntValidator()
self.lineEdit_chiper.setValidator(intVal)
self.lineEdit_weightNet = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_weightNet.setGeometry(QtCore.QRect(100, 230, 113, 20))
self.lineEdit_weightNet.setObjectName("lineEdit_weightNet")
doubleVal = QtGui.QDoubleValidator()
self.lineEdit_weightNet.setValidator(doubleVal)
self.lineEdit_weightGross = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_weightGross.setGeometry(QtCore.QRect(100, 270, 113, 20))
self.lineEdit_weightGross.setObjectName("lineEdit_weightGross")
doubleVal = QtGui.QDoubleValidator()
self.lineEdit_weightGross.setValidator(doubleVal)
self.label_weightNetKg = QtGui.QLabel(self.miscellaneTab)
self.label_weightNetKg.setGeometry(QtCore.QRect(220, 230, 30, 20))
self.label_weightNetKg.setObjectName("label_weightNetKg")
self.label_weightGrossKg = QtGui.QLabel(self.miscellaneTab)
self.label_weightGrossKg.setGeometry(QtCore.QRect(220, 270, 30, 20))
self.label_weightGrossKg.setObjectName("label_weightGrossKg")
self.line_3 = QtGui.QFrame(self.miscellaneTab)
self.line_3.setGeometry(QtCore.QRect(280, 20, 16, 271))
self.line_3.setFrameShape(QtGui.QFrame.VLine)
self.line_3.setFrameShadow(QtGui.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.line_4 = QtGui.QFrame(self.miscellaneTab)
self.line_4.setGeometry(QtCore.QRect(630, 20, 20, 271))
self.line_4.setFrameShape(QtGui.QFrame.VLine)
self.line_4.setFrameShadow(QtGui.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.label_safeDistance = QtGui.QLabel(self.miscellaneTab)
self.label_safeDistance.setGeometry(QtCore.QRect(330, 20, 90, 20))
self.label_safeDistance.setMinimumSize(QtCore.QSize(90, 20))
self.label_safeDistance.setObjectName("label_safeDistance")
self.label_HorizSafeDis = QtGui.QLabel(self.miscellaneTab)
self.label_HorizSafeDis.setGeometry(QtCore.QRect(330, 50, 105, 20))
self.label_HorizSafeDis.setMinimumSize(QtCore.QSize(105, 20))
self.label_HorizSafeDis.setObjectName("label_HorizSafeDis")
self.label_VertSafeDistance = QtGui.QLabel(self.miscellaneTab)
self.label_VertSafeDistance.setGeometry(QtCore.QRect(330, 80, 105, 20))
self.label_VertSafeDistance.setMinimumSize(QtCore.QSize(105, 20))
self.label_VertSafeDistance.setObjectName("label_VertSafeDistance")
self.label_ShowSimEffectID = QtGui.QLabel(self.miscellaneTab)
self.label_ShowSimEffectID.setGeometry(QtCore.QRect(330, 110, 105, 20))
self.label_ShowSimEffectID.setMinimumSize(QtCore.QSize(105, 20))
self.label_ShowSimEffectID.setObjectName("label_ShowSimEffectID")
self.label_Rating = QtGui.QLabel(self.miscellaneTab)
self.label_Rating.setGeometry(QtCore.QRect(330, 140, 105, 20))
self.label_Rating.setMinimumSize(QtCore.QSize(105, 20))
self.label_Rating.setObjectName("label_Rating")
self.label_information = QtGui.QLabel(self.miscellaneTab)
self.label_information.setGeometry(QtCore.QRect(330, 170, 105, 20))
self.label_information.setMinimumSize(QtCore.QSize(105, 20))
self.label_information.setObjectName("label_information")
self.lineEdit_HorizSafeDistance = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_HorizSafeDistance.setGeometry(QtCore.QRect(440, 50, 80, 20))
self.lineEdit_HorizSafeDistance.setObjectName("lineEdit_HorizSafeDistance")
intVal = QtGui.QIntValidator()
self.lineEdit_HorizSafeDistance.setValidator(intVal)
self.lineEdit_VertSafeDistance = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_VertSafeDistance.setGeometry(QtCore.QRect(440, 80, 80, 20))
self.lineEdit_VertSafeDistance.setObjectName("lineEdit_VertSafeDistance")
intVal = QtGui.QIntValidator()
self.lineEdit_VertSafeDistance.setValidator(intVal)
self.label_H_S_D_m = QtGui.QLabel(self.miscellaneTab)
self.label_H_S_D_m.setGeometry(QtCore.QRect(520, 50, 20, 20))
self.label_H_S_D_m.setMinimumSize(QtCore.QSize(20, 20))
self.label_H_S_D_m.setObjectName("label_H_S_D_m")
self.label_V_S_D_m = QtGui.QLabel(self.miscellaneTab)
self.label_V_S_D_m.setGeometry(QtCore.QRect(520, 80, 20, 20))
self.label_V_S_D_m.setMinimumSize(QtCore.QSize(20, 20))
self.label_V_S_D_m.setObjectName("label_V_S_D_m")
self.lineEdit_SimEffectID = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_SimEffectID.setGeometry(QtCore.QRect(440, 110, 130, 20))
self.lineEdit_SimEffectID.setObjectName("lineEdit_SimEffectID")
self.lineEdit_Rating = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_Rating.setGeometry(QtCore.QRect(440, 140, 130, 20))
self.lineEdit_Rating.setObjectName("lineEdit_Rating")
self.lineEdit_information = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_information.setGeometry(QtCore.QRect(440, 170, 130, 20))
self.lineEdit_information.setObjectName("lineEdit_information")
self.label_Supplier = QtGui.QLabel(self.miscellaneTab)
self.label_Supplier.setGeometry(QtCore.QRect(680, 20, 75, 20))
self.label_Supplier.setMinimumSize(QtCore.QSize(75, 20))
self.label_Supplier.setObjectName("label_Supplier")
self.label_producer = QtGui.QLabel(self.miscellaneTab)
self.label_producer.setGeometry(QtCore.QRect(680, 70, 75, 20))
self.label_producer.setMinimumSize(QtCore.QSize(75, 20))
self.label_producer.setObjectName("label_producer")
self.label_itemNo = QtGui.QLabel(self.miscellaneTab)
self.label_itemNo.setGeometry(QtCore.QRect(680, 120, 75, 20))
self.label_itemNo.setMinimumSize(QtCore.QSize(75, 20))
self.label_itemNo.setObjectName("label_itemNo")
self.label_stockPlace = QtGui.QLabel(self.miscellaneTab)
self.label_stockPlace.setGeometry(QtCore.QRect(680, 170, 75, 20))
self.label_stockPlace.setMinimumSize(QtCore.QSize(75, 20))
self.label_stockPlace.setObjectName("label_stockPlace")
self.label_Price = QtGui.QLabel(self.miscellaneTab)
self.label_Price.setGeometry(QtCore.QRect(680, 220, 75, 20))
self.label_Price.setMinimumSize(QtCore.QSize(75, 20))
self.label_Price.setObjectName("label_Price")
self.label_calcFactor = QtGui.QLabel(self.miscellaneTab)
self.label_calcFactor.setGeometry(QtCore.QRect(680, 270, 75, 20))
self.label_calcFactor.setMinimumSize(QtCore.QSize(75, 20))
self.label_calcFactor.setObjectName("label_calcFactor")
self.lineEdit_supplier = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_supplier.setGeometry(QtCore.QRect(760, 20, 140, 20))
self.lineEdit_supplier.setObjectName("lineEdit_supplier")
self.lineEdit_producer = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_producer.setGeometry(QtCore.QRect(760, 70, 140, 20))
self.lineEdit_producer.setObjectName("lineEdit_producer")
self.lineEdit_itemNo = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_itemNo.setGeometry(QtCore.QRect(760, 120, 140, 20))
self.lineEdit_itemNo.setObjectName("lineEdit_itemNo")
self.lineEdit_StockPlace = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_StockPlace.setGeometry(QtCore.QRect(760, 170, 140, 20))
self.lineEdit_StockPlace.setObjectName("lineEdit_StockPlace")
self.lineEdit_Price = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_Price.setGeometry(QtCore.QRect(760, 220, 80, 20))
self.lineEdit_Price.setObjectName("lineEdit_Price")
doubleVal = QtGui.QDoubleValidator()
self.lineEdit_Price.setValidator(doubleVal)
self.lineEdit_calcFactor = QtGui.QLineEdit(self.miscellaneTab)
self.lineEdit_calcFactor.setGeometry(QtCore.QRect(760, 270, 80, 20))
self.lineEdit_calcFactor.setObjectName("lineEdit_calcFactor")
doubleVal = QtGui.QDoubleValidator()
self.lineEdit_calcFactor.setValidator(doubleVal)
self.label_price_Euro = QtGui.QLabel(self.miscellaneTab)
self.label_price_Euro.setGeometry(QtCore.QRect(840, 220, 30, 20))
self.label_price_Euro.setObjectName("label_price_Euro")
self.label_calcFactorE_G = QtGui.QLabel(self.miscellaneTab)
self.label_calcFactorE_G.setGeometry(QtCore.QRect(840, 270, 60, 20))
self.label_calcFactorE_G.setObjectName("label_calcFactorE_G")
self.labelNotes = QtGui.QLabel(self.miscellaneTab)
self.labelNotes.setGeometry(QtCore.QRect(330, 210, 105, 20))
self.labelNotes.setMinimumSize(QtCore.QSize(105, 20))
self.labelNotes.setObjectName("labelNotes")
self.textEditNotes = QtGui.QTextEdit(self.miscellaneTab)
self.textEditNotes.setGeometry(QtCore.QRect(440, 200, 180, 90))
self.textEditNotes.setObjectName("textEditNotes")
self.DatabaseEditTab.addTab(self.miscellaneTab, "")
self.pushButtonSave = QtGui.QPushButton(DatabaseDialog)
self.pushButtonSave.setGeometry(QtCore.QRect(290, 370, 100, 30))
self.pushButtonSave.setMinimumSize(QtCore.QSize(100, 30))
self.pushButtonSave.setMaximumSize(QtCore.QSize(100, 30))
self.pushButtonSave.setObjectName("pushButtonSave")
self.pushButtonCancel = QtGui.QPushButton(DatabaseDialog)
self.pushButtonCancel.setGeometry(QtCore.QRect(620, 370, 100, 30))
self.pushButtonCancel.setMinimumSize(QtCore.QSize(100, 30))
self.pushButtonCancel.setMaximumSize(QtCore.QSize(100, 30))
self.pushButtonCancel.setObjectName("pushButtonCancel")
self.retranslateUi(DatabaseDialog)
self.DatabaseEditTab.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(DatabaseDialog)
def | |
+= len(bytes_read)
return bytestream
def send(self, bytestream: bytes) -> None:
"""Try and send the data in `bytestream` to the remote.
*Events Emitted*
- None
- Evt17: Transport connected closed.
Parameters
----------
bytestream : bytes
The data to send to the remote.
"""
self.socket = cast(socket.socket, self.socket)
total_sent = 0
length_data = len(bytestream)
try:
while total_sent < length_data:
# Returns the number of bytes sent
nr_sent = self.socket.send(bytestream[total_sent:])
total_sent += nr_sent
evt.trigger(self.assoc, evt.EVT_DATA_SENT, {"data": bytestream})
except (socket.error, socket.timeout):
# Evt17: Transport connection closed
self.event_queue.put("Evt17")
def __str__(self) -> str:
"""Return the string output for ``socket``."""
return self.socket.__str__()
@property
def tls_args(self) -> Optional[Tuple["ssl.SSLContext", str]]:
"""Get or set the TLS context and hostname.
Parameters
----------
tls_args : Tuple[ssl.SSLContext, str] or None
If the socket should be wrapped by TLS then this is
``(context, hostname)``, where *context* is a
:class:`ssl.SSLContext` that will be used to wrap the socket and
*hostname* is the value to use for the *server_hostname* keyword
argument for :meth:`SSLContext.wrap_socket()
<ssl.SSLContext.wrap_socket>`.
Returns
-------
Optional[Tuple[ssl.SSLContext, str]]
"""
return self._tls_args
@tls_args.setter
def tls_args(self, tls_args: Optional[Tuple["ssl.SSLContext", str]]) -> None:
"""Set the TLS arguments for the socket."""
if not _HAS_SSL:
raise RuntimeError("Your Python installation lacks support for SSL")
self._tls_args = tls_args
class RequestHandler(BaseRequestHandler):
"""Connection request handler for the ``AssociationServer``.
.. versionadded:: 1.2
Attributes
----------
client_address : 2-tuple
The ``(host, port)`` of the remote.
request : socket.socket
The (unaccepted) client socket.
server : transport.AssociationServer or transport.ThreadedAssociationServer
The server that received the connection request.
"""
server: "AssociationServer"
@property
def ae(self) -> "ApplicationEntity":
"""Return the server's parent AE."""
return self.server.ae
def handle(self) -> None:
"""Handle an association request.
* Creates a new Association acceptor instance and configures it.
* Sets the Association's socket to the request's socket.
* Starts the Association reactor.
"""
assoc = self._create_association()
# Trigger must be after binding the events
evt.trigger(assoc, evt.EVT_CONN_OPEN, {"address": self.client_address})
assoc.start()
@property
def local(self) -> Tuple[str, int]:
"""Return a 2-tuple of the local server's ``(host, port)`` address."""
return self.server.server_address
@property
def remote(self) -> Tuple[str, int]:
"""Return a 2-tuple of the remote client's ``(host, port)`` address."""
return cast(Tuple[str, int], self.client_address)
def _create_association(self) -> "Association":
"""Create an :class:`Association` object for the current request.
.. versionadded:: 1.5
"""
from pynetdicom.association import Association
assoc = Association(self.ae, MODE_ACCEPTOR)
assoc._server = self.server
# Set the thread name
timestamp = datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")
assoc.name = f"AcceptorThread@{timestamp}"
sock = AssociationSocket(assoc, client_socket=self.request)
assoc.set_socket(sock)
# Association Acceptor object -> local AE
assoc.acceptor.maximum_length = self.ae.maximum_pdu_size
assoc.acceptor.ae_title = self.server.ae_title
assoc.acceptor.address = self.local[0]
assoc.acceptor.port = self.local[1]
assoc.acceptor.implementation_class_uid = self.ae.implementation_class_uid
assoc.acceptor.implementation_version_name = self.ae.implementation_version_name
assoc.acceptor.supported_contexts = deepcopy(self.server.contexts)
# Association Requestor object -> remote AE
assoc.requestor.address = self.remote[0]
assoc.requestor.port = self.remote[1]
# Bind events to handlers
for event in self.server._handlers:
# Intervention events
if event.is_intervention and self.server._handlers[event]:
assoc.bind(event, *self.server._handlers[event])
elif isinstance(event, evt.NotificationEvent):
# List[Tuple[Callable, Optional[List[Any]]]]
for handler in self.server._handlers[event]:
handler = cast(evt._HandlerBase, handler)
assoc.bind(event, handler[0], handler[1])
return assoc
class AssociationServer(TCPServer):
"""An Association server implementation.
.. versionadded:: 1.2
.. versionchanged:: 1.5
Added `request_handler` keyword parameter.
Any attempts to connect will be assumed to be from association requestors.
The server should be started with
:meth:`serve_forever(poll_interval)<AssociationServer.serve_forever>`,
where *poll_interval* is the timeout (in seconds) that the
:func:`select.select` call will block for (default ``0.5``). A value of
``0`` specifies a poll and never blocks. A value of ``None`` blocks until
a connection is ready.
Attributes
----------
ae : ae.ApplicationEntity
The parent AE that is running the server.
request_queue_size : int
Default ``5``.
server_address : Tuple[str, int]
The ``(host: str, port: int)`` that the server is running on.
"""
def __init__(
self,
ae: "ApplicationEntity",
address: Tuple[str, int],
ae_title: str,
contexts: List[PresentationContext],
ssl_context: Optional["ssl.SSLContext"] = None,
evt_handlers: List[evt.EventHandlerType] = None,
request_handler: Optional[Callable[..., BaseRequestHandler]] = None,
) -> None:
"""Create a new :class:`AssociationServer`, bind a socket and start
listening.
Parameters
----------
ae : ae.ApplicationEntity
The parent AE that's running the server.
address : Tuple[str, int]
The ``(host: str, port: int)`` that the server should run on.
ae_title : str
The AE title of the SCP.
contexts : list of presentation.PresentationContext
The SCPs supported presentation contexts.
ssl_context : ssl.SSLContext, optional
If TLS is to be used then this should be the
:class:`ssl.SSLContext` used to wrap the client sockets, otherwise
if ``None`` then no TLS will be used (default).
evt_handlers : list of 2- or 3-tuple, optional
A list of ``(event, callable)`` or ``(event, callable, args)``,
the *callable* function to run when *event* occurs and the
optional extra *args* to pass to the callable.
request_handler : type
The request handler class; an instance of this class
is created for each request. Should be a subclass of
:class:`~socketserver.BaseRequestHandler`.
"""
self.ae = ae
self.ae_title = ae_title
self.contexts = contexts
self.ssl_context = ssl_context
self.allow_reuse_address = True
self.server_address: Tuple[str, int] = address
self.socket: Optional[socket.socket] = None # type: ignore[assignment]
request_handler = request_handler or RequestHandler
super().__init__(address, request_handler, bind_and_activate=True)
self.timeout = 60
# Stores all currently bound event handlers so future
# Associations can be bound
self._handlers: Dict[
evt.EventType,
Union[
List[Tuple[Callable, Optional[List[Any]]]],
Tuple[Callable, Optional[List[Any]]],
],
] = {}
self._bind_defaults()
# Bind the functions to their events
for evt_hh_args in evt_handlers or ():
self.bind(*evt_hh_args)
self._gc = [0, 59]
def bind(
self, event: evt.EventType, handler: Callable, args: Optional[List[Any]] = None
) -> None:
"""Bind a callable `handler` to an `event`.
.. versionadded:: 1.3
.. versionchanged:: 1.5
Added `args` keyword parameter.
Parameters
----------
event : namedtuple
The event to bind the function to.
handler : callable
The function that will be called if the event occurs.
args : list, optional
Optional extra arguments to be passed to the handler (default:
no extra arguments passed to the handler).
"""
evt._add_handler(event, self._handlers, (handler, args))
# Bind our child Association events
for assoc in self.active_associations:
assoc.bind(event, handler, args)
def _bind_defaults(self) -> None:
"""Bind the default event handlers."""
# Intervention event handlers
for event in evt._INTERVENTION_EVENTS:
handler = evt.get_default_handler(event)
self.bind(event, handler)
# Notification event handlers
if _config.LOG_HANDLER_LEVEL == "standard":
self.bind(evt.EVT_DIMSE_RECV, standard_dimse_recv_handler)
self.bind(evt.EVT_DIMSE_SENT, standard_dimse_sent_handler)
self.bind(evt.EVT_PDU_RECV, standard_pdu_recv_handler)
self.bind(evt.EVT_PDU_SENT, standard_pdu_sent_handler)
@property
def active_associations(self) -> List["Association"]:
"""Return the server's running
:class:`~pynetdicom.association.Association` acceptor instances
"""
# Find all AcceptorThreads with `_server` as self
threads = cast(
List["Association"],
[tt for tt in threading.enumerate() if "AcceptorThread" in tt.name],
)
return [tt for tt in threads if tt._server is self]
def get_events(self) -> List[evt.EventType]:
"""Return a list of currently bound events.
.. versionadded:: 1.3
"""
return sorted(self._handlers.keys(), key=lambda x: x.name)
def get_handlers(self, event: evt.EventType) -> evt.HandlerArgType:
"""Return handlers bound to a specific `event`.
.. versionadded:: 1.3
.. versionchanged:: 1.5
Returns a 2-tuple of (callable, args) or list of 2-tuple.
Parameters
----------
event : namedtuple
The event bound to the handlers.
Returns
-------
2-tuple of (callable, args), list of 2-tuple
If the event is a notification event then returns a list of
2-tuples containing the callable functions bound to `event` and
the arguments passed to the callable as ``(callable, args)``. If
the event is an intervention event then returns either a 2-tuple of
(callable, args) if a handler is bound to the event or
``(None, None)`` if no handler has been bound.
"""
if event not in self._handlers:
return []
return self._handlers[event]
def get_request(self) -> Tuple[socket.socket, Tuple[str, int]]:
"""Handle a connection request.
If :attr:`~AssociationServer.ssl_context` is set then the client socket
will be wrapped using
:meth:`SSLContext.wrap_socket()<ssl.SSLContext.wrap_socket>`.
Returns
-------
client_socket : socket.socket
The connection request.
address : 2-tuple
The client's address as ``(host, port)``.
"""
self.socket = cast(socket.socket, self.socket)
client_socket, address = self.socket.accept()
if self.ssl_context:
client_socket = self.ssl_context.wrap_socket(
client_socket, server_side=True
)
return client_socket, address
def process_request(
self,
request: Union[socket.socket, Tuple[bytes, socket.socket]],
client_address: Union[Tuple[str, int], str],
) -> None:
"""Process a connection request"""
# Calls request_handler(request, client_address, self)
self.finish_request(request, client_address)
def server_bind(self) -> None:
"""Bind the socket and set the socket options.
- ``socket.SO_REUSEADDR`` is set to ``1``
- socket.settimeout is used to set to
:attr:`AE.network_timeout
<pynetdicom.ae.ApplicationEntity.network_timeout>` unless the
value is ``None`` in which case it will be left unset.
"""
self.socket = cast(socket.socket, self.socket)
# SO_REUSEADDR: reuse the socket in TIME_WAIT state without
# waiting | |
force is False).
Returns a list of CrossShardTransactionDeposit from block.
Additionally, returns a map of reward token balances for this block
Raises on any error.
"""
start_time = time.time()
start_ms = time_ms()
if skip_if_too_old:
if (
self.header_tip.height - block.header.height
> self.shard_config.max_stale_minor_block_height_diff
):
Logger.info(
"[{}] drop old block {} << {}".format(
self.branch.to_str(),
block.header.height,
self.header_tip.height,
)
)
raise ValueError(
"block is too old {} << {}".format(
block.header.height, self.header_tip.height
)
)
block_hash = block.header.get_hash()
if not force and self.db.contain_minor_block_by_hash(block_hash):
return None, None
x_shard_receive_tx_list = []
# Throw exception if fail to run
self.validate_block(
block,
gas_limit=gas_limit,
xshard_gas_limit=xshard_gas_limit,
validate_time=validate_time,
)
evm_state = self.run_block(
block, x_shard_receive_tx_list=x_shard_receive_tx_list
)
# ------------------------ Validate ending result of the block --------------------
if evm_state.xshard_tx_cursor_info != block.meta.xshard_tx_cursor_info:
raise ValueError("Cross-shard transaction cursor info mismatches!")
if block.meta.hash_evm_state_root != evm_state.trie.root_hash:
raise ValueError(
"state root mismatch: header %s computed %s"
% (block.meta.hash_evm_state_root.hex(), evm_state.trie.root_hash.hex())
)
receipts = evm_state.receipts[:] + evm_state.xshard_deposit_receipts[:]
receipt_root = mk_receipt_sha(receipts, evm_state.db)
if block.meta.hash_evm_receipt_root != receipt_root:
raise ValueError(
"receipt root mismatch: header {} computed {}".format(
block.meta.hash_evm_receipt_root.hex(), receipt_root.hex()
)
)
if evm_state.gas_used != block.meta.evm_gas_used:
raise ValueError(
"gas used mismatch: header %d computed %d"
% (block.meta.evm_gas_used, evm_state.gas_used)
)
if (
evm_state.xshard_receive_gas_used
!= block.meta.evm_cross_shard_receive_gas_used
):
raise ValueError(
"x-shard gas used mismatch: header %d computed %d"
% (
block.meta.evm_cross_shard_receive_gas_used,
evm_state.xshard_receive_gas_used,
)
)
coinbase_amount_map = self.get_coinbase_amount_map(block.header.height)
# add block reward
coinbase_amount_map.add(evm_state.block_fee_tokens)
if (
coinbase_amount_map.balance_map
!= block.header.coinbase_amount_map.balance_map
):
raise ValueError("coinbase reward incorrect")
if evm_state.get_bloom() != block.header.bloom:
raise ValueError("bloom mismatch")
if write_db:
self.db.put_minor_block(block, x_shard_receive_tx_list)
else:
# Return immediately if it is not put into db
return evm_state.xshard_list, coinbase_amount_map
# Update tip if a block is appended or a fork is longer (with the same ancestor confirmed by root block tip)
# or they are equal length but the root height confirmed by the block is longer
update_tip = False
prev_root_header = self.db.get_root_block_header_by_hash(
block.header.hash_prev_root_block
)
if not prev_root_header:
raise ValueError("missing prev root block")
tip_prev_root_header = self.db.get_root_block_header_by_hash(
self.header_tip.hash_prev_root_block
)
if not self.__is_same_root_chain(self.root_tip, prev_root_header):
# Don't update tip if the block depends on a root block that is not root_tip or root_tip's ancestor
update_tip = False
elif block.header.hash_prev_minor_block == self.header_tip.get_hash():
update_tip = True
elif self.__is_minor_block_linked_to_root_tip(block):
if block.header.height > self.header_tip.height:
update_tip = True
elif block.header.height == self.header_tip.height:
update_tip = prev_root_header.height > tip_prev_root_header.height
if update_tip:
tip_prev_root_header = prev_root_header
evm_state.sender_disallow_map = self._get_sender_disallow_map(block.header)
self.__update_tip(block, evm_state)
check(self.__is_same_root_chain(self.root_tip, tip_prev_root_header))
Logger.debug(
"Add block took {} seconds for {} tx".format(
time.time() - start_time, len(block.tx_list)
)
)
tracking_data_str = block.tracking_data.decode("utf-8")
if tracking_data_str != "":
tracking_data = json.loads(tracking_data_str)
sample = {
"time": time_ms() // 1000,
"shard": str(block.header.branch.get_full_shard_id()),
"network": self.env.cluster_config.MONITORING.NETWORK_NAME,
"cluster": self.env.cluster_config.MONITORING.CLUSTER_ID,
"hash": block_hash.hex(),
"height": block.header.height,
"original_cluster": tracking_data["cluster"],
"inception": tracking_data["inception"],
"creation_latency_ms": tracking_data["creation_ms"],
"add_block_latency_ms": time_ms() - start_ms,
"mined": tracking_data.get("mined", 0),
"propagation_latency_ms": start_ms - tracking_data.get("mined", 0),
"num_tx": len(block.tx_list),
}
asyncio.ensure_future(
self.env.cluster_config.kafka_logger.log_kafka_sample_async(
self.env.cluster_config.MONITORING.PROPAGATION_TOPIC, sample
)
)
return evm_state.xshard_list, coinbase_amount_map
def get_coinbase_amount_map(self, height) -> TokenBalanceMap:
coinbase_amount = (
self.__decay_by_epoch(
self.env.quark_chain_config.shards[self.full_shard_id].COINBASE_AMOUNT,
height,
)
* self.local_fee_rate.numerator
// self.local_fee_rate.denominator
)
# shard coinbase only in genesis_token
return TokenBalanceMap(
{self.env.quark_chain_config.genesis_token: coinbase_amount}
)
def get_tip(self) -> MinorBlock:
return self.db.get_minor_block_by_hash(self.header_tip.get_hash())
def finalize_and_add_block(
self, block, gas_limit=None, xshard_gas_limit=None, validate_time=True
):
"""Finalize the block by filling post-tx data including tx fee collected
gas_limit and xshard_gas_limit is used to verify customized gas limits and they are for test purpose only
"""
evm_state = self.run_block(block)
coinbase_amount_map = self.get_coinbase_amount_map(block.header.height)
coinbase_amount_map.add(evm_state.block_fee_tokens)
block.finalize(evm_state=evm_state, coinbase_amount_map=coinbase_amount_map)
self.add_block(
block,
gas_limit=gas_limit,
xshard_gas_limit=xshard_gas_limit,
validate_time=validate_time,
)
def get_token_balance(
self, recipient: bytes, token_id: int, height: Optional[int] = None
) -> int:
evm_state = self._get_evm_state_from_height(height)
if not evm_state:
return 0
return evm_state.get_balance(recipient, token_id=token_id)
def get_balances(self, recipient: bytes, height: Optional[int] = None) -> dict:
evm_state = self._get_evm_state_from_height(height)
if not evm_state:
return {}
return evm_state.get_balances(recipient)
def get_transaction_count(
self, recipient: bytes, height: Optional[int] = None
) -> int:
evm_state = self._get_evm_state_from_height(height)
if not evm_state:
return 0
return evm_state.get_nonce(recipient)
def get_code(self, recipient: bytes, height: Optional[int] = None) -> bytes:
evm_state = self._get_evm_state_from_height(height)
if not evm_state:
return b""
return evm_state.get_code(recipient)
def get_storage_at(
self, recipient: bytes, key: int, height: Optional[int] = None
) -> bytes:
evm_state = self._get_evm_state_from_height(height)
if not evm_state:
return b""
int_result = evm_state.get_storage_data(recipient, key) # type: int
return int_result.to_bytes(32, byteorder="big")
def execute_tx(
self, tx: TypedTransaction, from_address=None, height: Optional[int] = None
) -> Optional[bytes]:
"""Execute the tx using a copy of state"""
evm_state = self._get_evm_state_from_height(height)
if not evm_state:
return None
state = evm_state.ephemeral_clone()
state.gas_used = 0
# Use the maximum gas allowed if gas is 0
evm_tx = tx.tx.to_evm_tx()
gas = evm_tx.startgas if evm_tx.startgas else state.gas_limit
try:
evm_tx = self.__validate_tx(tx, state, from_address, gas)
success, output = apply_transaction(
state, evm_tx, tx_wrapper_hash=bytes(32)
)
return output if success else None
except Exception as e:
Logger.warning_every_sec("Failed to apply transaction: {}".format(e), 1)
return None
def get_mining_info(self, recipient: bytes, token_balance: Dict[bytes, int]):
if self._posw_enabled(self.header_tip):
coinbase_address = Address(recipient, self.full_shard_id)
next_block = MinorBlock(
self.header_tip, MinorBlockMeta(), [], b""
).create_block_to_append(address=coinbase_address)
stakes = token_balance.get(self.env.quark_chain_config.genesis_token, 0)
posw_info = self._posw_info(next_block, stakes)
if posw_info:
return posw_info.posw_mined_blocks - 1, posw_info.posw_mineable_blocks
block_cnt = self._get_posw_coinbase_blockcnt(self.header_tip.get_hash())
return block_cnt.get(recipient, 0), 0
def get_next_block_difficulty(self, create_time=None):
if not create_time:
create_time = max(int(time.time()), self.header_tip.create_time + 1)
return self.diff_calc.calculate_diff_with_parent(self.header_tip, create_time)
def get_next_block_coinbase_amount(self):
# TODO: add block reward
# TODO: the current calculation is bogus and just serves as a placeholder.
coinbase = 0
for tx_wrapper in self.tx_queue.peek():
tx = tx_wrapper.tx.tx.to_evm_tx()
coinbase += tx.gasprice * tx.startgas
# TODO: add x-shard tx
return coinbase
def __get_all_unconfirmed_header_list(self) -> List[MinorBlockHeader]:
"""height in ascending order"""
header_list = []
header = self.header_tip
start_height = (
self.confirmed_header_tip.height if self.confirmed_header_tip else -1
)
for i in range(header.height - start_height):
header_list.append(header)
header = self.db.get_minor_block_header_by_hash(
header.hash_prev_minor_block
)
check(header == self.confirmed_header_tip)
header_list.reverse()
return header_list
def get_unconfirmed_header_list(self) -> List[MinorBlockHeader]:
headers = self.__get_all_unconfirmed_header_list()
max_blocks = self.__get_max_blocks_in_one_root_block()
return headers[0:max_blocks]
def get_unconfirmed_headers_coinbase_amount(self) -> int:
"""only returns genesis token coinbase amount
TODO remove coinbase_amount_map from minor header, this is the ONLY place that requires it
"""
amount = 0
headers = self.get_unconfirmed_header_list()
for header in headers:
amount += header.coinbase_amount_map.balance_map.get(
self.env.quark_chain_config.genesis_token, 0
)
return amount
def __get_max_blocks_in_one_root_block(self) -> int:
return self.shard_config.max_blocks_per_shard_in_one_root_block
def __add_transactions_to_block(self, block: MinorBlock, evm_state: EvmState):
"""Fill up the block tx list with tx from the tx queue"""
poped_txs = []
while evm_state.gas_used < evm_state.gas_limit:
tx = self.tx_queue.pop_transaction(
req_nonce_getter=evm_state.get_nonce,
max_gas=evm_state.gas_limit - evm_state.gas_used,
)
if tx is None: # tx_queue is exhausted
break
evm_tx = tx.tx.to_evm_tx()
evm_tx.set_quark_chain_config(self.env.quark_chain_config)
default_gasprice = convert_to_default_chain_token_gasprice(
evm_state, evm_tx.gas_token_id, evm_tx.gasprice
)
# simply ignore tx with lower gas price than specified
if default_gasprice < self.env.quark_chain_config.MIN_MINING_GAS_PRICE:
continue
# check if TX is disabled
if (
self.env.quark_chain_config.ENABLE_TX_TIMESTAMP is not None
and block.header.create_time
< self.env.quark_chain_config.ENABLE_TX_TIMESTAMP
):
if (
evm_tx.sender
not in self.env.quark_chain_config.tx_whitelist_senders
):
continue
# Check if EVM is disabled
if (
self.env.quark_chain_config.ENABLE_EVM_TIMESTAMP is not None
and block.header.create_time
< self.env.quark_chain_config.ENABLE_EVM_TIMESTAMP
):
if evm_tx.to == b"" or evm_tx.data != b"":
# Drop the smart contract creation tx from tx_queue
continue
# Check if EIP155 Signer is disabled
if (
self.env.quark_chain_config.ENABLE_EIP155_SIGNER_TIMESTAMP is not None
and block.header.create_time
< self.env.quark_chain_config.ENABLE_EIP155_SIGNER_TIMESTAMP
):
if evm_tx.version == 2:
# Drop the tx with incompatible signature
continue
try:
apply_transaction(evm_state, evm_tx, tx.get_hash())
block.add_tx(tx)
poped_txs.append(tx)
except Exception as e:
Logger.warning_every_sec(
"Failed to include transaction: {}".format(e), 1
)
# We don't want to drop the transactions if the mined block failed to be appended
for tx in poped_txs:
self.tx_queue.add_transaction(tx)
def create_block_to_mine(
self,
create_time=None,
address=None,
gas_limit=None,
xshard_gas_limit=None,
include_tx=True,
):
"""Create a block to append and include TXs to maximize rewards"""
start_time = time.time()
tracking_data = {
"inception": time_ms(),
"cluster": self.env.cluster_config.MONITORING.CLUSTER_ID,
}
if not create_time:
create_time = max(int(time.time()), self.header_tip.create_time + 1)
difficulty = self.get_next_block_difficulty(create_time)
prev_block = self.get_tip()
block = prev_block.create_block_to_append(
create_time=create_time, address=address, difficulty=difficulty
)
# Add corrected gas limit
# Set gas_limit. Since gas limit is fixed between blocks, this is for test purpose only.
gas_limit, xshard_gas_limit = self.get_gas_limit_all(
gas_limit, xshard_gas_limit
)
block.header.evm_gas_limit = gas_limit
block.meta.evm_xshard_gas_limit = xshard_gas_limit
evm_state = self._get_evm_state_for_new_block(block)
# Cross-shard receive must be handled before including tx from tx_queue
# This is part of consensus.
block.header.hash_prev_root_block = self.root_tip.get_hash()
# Move up to 100 rblock to prevent xchain cursor running out
prev_block_rblock = self.db.get_root_block_header_by_hash(prev_block.header.hash_prev_root_block)
assert (self.root_tip.height >= prev_block_rblock.height) # may further assert they are chained?
if (self.root_tip.height - prev_block_rblock.height > 100):
block.header.hash_prev_root_block = self.db.get_root_block_header_by_height(
block.header.hash_prev_root_block,
prev_block_rblock.height + | |
#!/usr/bin/python
# A simple database class to handle tagging files.
#TODO: return values?
#TODO: a fix for os.path.commonprefix
import sqlite3
import os
import datetime
import tarfile
import csv
import logging
logger = logging.getLogger(__name__)
SEARCH_EXCLUSIVE = 'e'
SEARCH_INCLUSIVE = 'i'
# CSV dialect used for exporting.
# using Unit and record separator chars as delimiters
# -> no collisions
class ExportDialect(csv.Dialect):
delimiter = "\u001F"
doublequote = False
lineterminator = "\u001E"
quoting = csv.QUOTE_NONE
skipinitialspace = True
strict = True
class FileDatabase(object):
def __init__(self, dbname):
"""Connects to a database with the name dbname.
dbname name of the database file.
If creating a new file call initialize afterwards.
"""
self.connection = sqlite3.connect(dbname)
self.connection.row_factory = sqlite3.Row
cursor = self.connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
self.connection.commit()
def initialize(self):
"""Creates the tables used into the database file."""
cursor = self.connection.cursor()
cursor.execute("BEGIN")
cursor.execute("""CREATE TABLE paths(id INTEGER PRIMARY KEY,
name TEXT NOT NULL UNIQUE ON CONFLICT IGNORE)""")
cursor.execute("""CREATE TABLE files(id INTEGER PRIMARY KEY,
name TEXT NOT NULL, path INTEGER NOT NULL, date INTEGER NOT NULL,
FOREIGN KEY(path) REFERENCES paths(id)
ON UPDATE CASCADE ON DELETE CASCADE)""")
# If a tag is added with a name already in the database
# -> IGNORE since the tag is already present.
cursor.execute("""CREATE TABLE tags(id INTEGER PRIMARY KEY,
name TEXT NOT NULL UNIQUE ON CONFLICT IGNORE)""")
cursor.execute("""CREATE TABLE file_tags(
file_id INTEGER NOT NULL, tag_id INTEGER NOT NULL,
FOREIGN KEY(file_id) REFERENCES files(id)
ON DELETE CASCADE ON UPDATE CASCADE,
FOREIGN KEY(tag_id) REFERENCES tags(id)
ON DELETE CASCADE ON UPDATE CASCADE)""")
cursor.execute("""CREATE TABLE collections(
id INTEGER PRIMARY KEY,
name TEXT NOT NULL UNIQUE ON CONFLICT IGNORE)""")
cursor.execute("""CREATE TABLE collection_tags(
collection_id INTEGER NOT NULL, tag_id INTEGER NOT NULL,
FOREIGN KEY(collection_id) REFERENCES collections(id)
ON DELETE CASCADE ON UPDATE CASCADE,
FOREIGN KEY(tag_id) REFERENCES tags(id)
ON DELETE CASCADE ON UPDATE CASCADE)""")
self.connection.commit()
def get_file_ids(self, filenames):
"""Returns a dictionary of file ids to be passed to other methods.
filenames An iterable of absolute paths to the files."""
cursor = self.connection.cursor()
ids = dict()
for filename in filenames:
cursor.execute("""SELECT files.id FROM files, paths
WHERE paths.id = files.path AND
paths.name = ? AND files.name = ?""",
os.path.split(filename))
row = cursor.fetchone()
if row is not None:
ids[filename] = row[0]
else:
ids[filename] = None
return ids
def get_tag_ids(self, tags):
"""Returns a dictionary of tag ids to be passed to other methods.
tags An iterable of tag names."""
cursor = self.connection.cursor()
ids = dict()
for t in tags:
cursor.execute("select id from tags where name = ?", (t, ))
ids[t] = cursor.fetchone()[0]
return ids
def get_full_paths(self, files):
ret = list()
for f in files:
ret.append(os.path.join(f["path"], f["name"]))
return ret
def get_file_tags(self, fileids):
cursor = self.connection.cursor()
cursor.execute("CREATE TEMPORARY TABLE tmp_file_ids(INTEGER)")
cursor.executemany("INSERT INTO tmp_file_ids VALUES (?)", ((i, ) for i in fileids))
cursor.execute("""SELECT file_tags.file_id AS id, group_concat(tags.name) AS tags
FROM file_tags, tags
WHERE file_tags.tag_id = tags.id AND
file_tags.file_id IN tmp_file_ids
GROUP BY file_tags.file_id""")
res = [dict(row) for row in cursor]
cursor.execute("DROP TABLE tmp_file_ids")
return res
def create_tags(self, tags):
"""Creates new tag entries into the database.
Called automatically by all tag adding methods.
tags an iterable of tag names."""
cursor = self.connection.cursor()
cursor.executemany("INSERT INTO tags(name) VALUES (?)", ((t,) for t in tags))
def add_files(self, fileinfos):
"""Adds new files to the database.
fileinfos a dictionary with the keys name and tags.
Tags should be a list or a tuple."""
cursor = self.connection.cursor()
cursor.execute("BEGIN")
file_tuples = list()
tags = set()
for fileinfo in fileinfos:
name = os.path.basename(fileinfo["name"])
path = os.path.dirname(fileinfo["name"])
path = os.path.abspath(path)
cursor.execute("INSERT INTO paths(name) VALUES(?)", (path,))
cursor.execute("SELECT id FROM paths WHERE name = ?", (path,))
path = cursor.fetchone()[0]
i = self.get_file_ids([fileinfo["name"]])
if i[fileinfo["name"]] is None:
add_time = datetime.date.today()
file_tuples.append((name, path, add_time, fileinfo["tags"]))
tags.update(fileinfo["tags"])
cursor.execute("BEGIN")
cursor.executemany("INSERT INTO files(name, path, date) VALUES(?, ?, ?)",
[f[:-1] for f in file_tuples])
self.create_tags(tags)
tag_ids = self.get_tag_ids(tags)
file_id_list = list()
for f in file_tuples:
cursor.execute("SELECT id FROM files WHERE name = ? AND path = ? AND date = ?", f[:-1])
file_id_list.append((cursor.fetchone()[0], f[-1]))
def gen_id_pairs():
for f in file_id_list:
for t in f[-1]:
yield (f[0], tag_ids[t])
cursor.executemany("INSERT INTO file_tags(file_id, tag_id) VALUES(?, ?)", gen_id_pairs())
self.connection.commit()
def remove_files(self, idict):
"""Removes files from the database.
Does not remove files from disk.
idict a dictionary as returned by get_file_ids"""
cursor = self.connection.cursor()
cursor.execute("BEGIN")
cursor.executemany("DELETE FROM files WHERE id = ?", [(i,) for i in idict.values()])
self.connection.commit()
def add_tags_to_files(self, items, tags):
"""Adds tags to the files identified by item.
tags iterable of the tags.
items ids of the file from get_file_ids."""
cursor = self.connection.cursor()
self.create_tags(tags)
tag_ids = self.get_tag_ids(tags)
cursor.execute("BEGIN")
for item in items:
cursor.executemany("INSERT INTO file_tags(file_id, tag_id) VALUES(?, ?)",
[(item, tag_ids[key]) for key in tag_ids.keys()])
self.connection.commit()
def remove_tags_from_files(self, items, tags):
"""Removes tags from a file.
items ids of the item
tags an iterable of tag names"""
cursor = self.connection.cursor()
tag_ids = self.get_tag_ids(tags)
cursor.execute("BEGIN")
for item in items:
cursor.executemany("DELETE FROM file_tags WHERE file_id = ? AND tag_id = ?",
((item, tag_ids[key]) for key in tag_ids.keys()))
self.connection.commit()
def search_by_name(self, search_string):
"""Returns a list of dictionaries of all files that match search_string.
search_string a string with sql wildcards"""
cursor = self.connection.cursor()
cursor.execute("""SELECT files.id AS id, files.name AS name, paths.name AS path, files.date,
group_concat(tags.name) AS tags
FROM files, paths, file_tags, tags
WHERE paths.id = files.path AND file_tags.file_id = files.id AND
file_tags.tag_id = tags.id
AND files.name GLOB :ss
GROUP BY files.id""",
{ "ss":search_string })
res = list()
for row in cursor:
res.append(dict(row))
res[-1]['tags'] = res[-1]['tags'].split(',')
return res
def search_by_tags(self, tags, search_type = SEARCH_EXCLUSIVE):
"""Returns a list of all files that have any of the tags given.
tags an iterable with the tag names searched."""
cursor = self.connection.cursor()
query = """SELECT files.id AS id, files.name AS name, paths.name AS path,
files.date AS date, group_concat(tags.name) AS tags,
count(tags.id) AS tags_matched
FROM files, file_tags, tags, paths
WHERE tags.id = file_tags.tag_id AND
files.path = paths.id AND
file_tags.file_id = files.id AND
tags.name IN ({})
GROUP BY files.id
""".format(",".join(["?"] * len(tags)))
cursor.execute(query, tags)
res = list()
for row in cursor:
if search_type == SEARCH_INCLUSIVE or row["tags_matched"] == len(tags):
res.append(dict(row))
res[-1]['tags'] = res[-1]['tags'].split(',')
return res
def add_tags_to_collection(self, name, tags):
cursor = self.connection.cursor()
cursor.execute("SELECT id FROM collections WHERE name = ?", (name,))
row = cursor.fetchone()
if row is None:
return
item = row[0]
self.create_tags(tags)
tag_ids = self.get_tag_ids(tags)
cursor.executemany("INSERT INTO collection_tags(collection_id, tag_id) VALUES(?, ?)",
((item, tag_ids[key]) for key in tag_ids.keys()))
self.connection.commit()
def add_collection(self, name, tags):
cursor = self.connection.cursor()
cursor.execute("INSERT INTO collections(name) values(?)", (name,))
self.create_tags(tags)
self.connection.commit()
self.add_tags_to_collection(name, tags)
def add_files_to_collection(self, collection, files):
tags = self.get_collection_tags(collection)
tag_ids = self.get_tag_ids(tags)
cursor = self.connection.cursor()
data = []
for f in files:
for tid in tag_ids.values():
data.append((f['id'], tid))
cursor.executemany("INSERT INTO file_tags(file_id, tag_id) VALUES(?, ?)", data)
self.connection.commit()
def remove_collection(self, name):
cursor = self.connection.cursor()
cursor.execute("SELECT COUNT(id) FROM collections WHERE name = ?", (name,))
exists = cursor.fetchone()[0] > 0
if exists:
cursor.execute("DELETE FROM collections WHERE name = ?", (name, ))
self.connection.commit()
else:
return
def _list_names(self, table):
"""Returns a list of names in a table."""
cursor = self.connection.cursor()
# table name can not contain a ;
if ';' in table:
return []
cursor.execute("SELECT name FROM " + table)
res = list()
for t in cursor:
res.append(t[0])
return res
def list_tags(self):
"""Returns a list of all tags in the database."""
return self._list_names("tags")
def list_collections(self):
"""Returns a list of all collection data in the database."""
cursor = self.connection.cursor()
cursor.execute("""SELECT collections.name AS name, group_concat(tags.name) AS tags
FROM collections, tags, collection_tags
WHERE collections.id = collection_tags.collection_id AND
tags.id = collection_tags.tag_id""")
ret = list()
for row in cursor:
if row[0] is not None:
ret.append({'name':row[0], 'tags':row[1]})
return ret
def list_files_in_collection(self, collection):
tags = self.get_collection_tags(collection)
return self.search_by_tags(tags, SEARCH_EXCLUSIVE)
def get_collection_tags(self, collection):
cursor = self.connection.cursor()
cursor.execute("""SELECT tags.name as name
FROM collections, collection_tags, tags
WHERE collections.id = collection_tags.collection_id AND
collection_tags.tag_id = tags.id AND
collections.name = ?""", (collection, ))
tags = list()
for row in cursor:
tags.append(row[0])
return tags
def list_all_files(self):
"""Returns a list of all files in the database."""
cursor = self.connection.cursor()
cursor.execute("""SELECT files.id AS id, files.name AS name, paths.name AS path,
files.date AS date, group_concat(tags.name) AS tags
FROM files, file_tags, tags, paths
WHERE tags.id = | |
<filename>uamqp/client.py
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import logging
import uuid
import queue
try:
from urllib import unquote_plus
except ImportError:
from urllib.parse import unquote_plus
import uamqp
from uamqp import authentication
from uamqp import constants
from uamqp import sender
from uamqp import receiver
from uamqp import address
from uamqp import errors
from uamqp import c_uamqp
from uamqp import Connection
from uamqp import Session
_logger = logging.getLogger(__name__)
class AMQPClient:
"""An AMQP client.
:param remote_address: The AMQP endpoint to connect to. This could be a send target
or a receive source.
:type remote_address: str, bytes or ~uamqp.address.Address
:param auth: Authentication for the connection. If none is provided SASL Annoymous
authentication will be used.
:type auth: ~uamqp.authentication.AMQPAuth
:param client_name: The name for the client, also known as the Container ID.
If no name is provided, a random GUID will be used.
:type client_name: str or bytes
:param debug: Whether to turn on network trace logs. If `True`, trace logs
will be logged at INFO level. Default is `False`.
:type debug: bool
:param max_frame_size: Maximum AMQP frame size. Default is 63488 bytes.
:type max_frame_size: int
:param channel_max: Maximum number of Session channels in the Connection.
:type channel_max: int
:param idle_timeout: Timeout in milliseconds after which the Connection will close
if there is no further activity.
:type idle_timeout: int
:param properties: Connection properties.
:type properties: dict
:param remote_idle_timeout_empty_frame_send_ratio: Ratio of empty frames to
idle time for Connections with no activity. Value must be between
0.0 and 1.0 inclusive. Default is 0.5.
:type remote_idle_timeout_empty_frame_send_ratio: float
:param incoming_window: The size of the allowed window for incoming messages.
:type incoming_window: int
:param outgoing_window: The size of the allowed window for outgoing messages.
:type outgoing_window: int
:param handle_max: The maximum number of concurrent link handles.
:type handle_max: int
:param encoding: The encoding to use for parameters supplied as strings.
Default is 'UTF-8'
:type encoding: str
"""
def __init__(self, remote_address, auth=None, client_name=None, debug=False, **kwargs):
self._remote_address = remote_address if isinstance(remote_address, address.Address) \
else address.Address(remote_address)
self._hostname = self._remote_address.parsed_address.hostname
if not auth:
username = self._remote_address.parsed_address.username
password = self._remote_address.parsed_address.password
if username and password:
username = unquote_plus(username)
password = <PASSWORD>)
auth = authentication.SASLPlain(self._hostname, username, password)
self._auth = auth if auth else authentication.SASLAnonymous(self._hostname)
self._name = client_name if client_name else str(uuid.uuid4())
self._debug_trace = debug
self._counter = c_uamqp.TickCounter()
self._shutdown = False
self._connection = None
self._ext_connection = False
self._session = None
self._encoding = kwargs.pop('encoding', None) or 'UTF-8'
# Connection settings
self._max_frame_size = kwargs.pop('max_frame_size', None) or constants.MAX_FRAME_SIZE_BYTES
self._channel_max = kwargs.pop('channel_max', None)
self._idle_timeout = kwargs.pop('idle_timeout', None)
self._properties = kwargs.pop('properties', None)
self._remote_idle_timeout_empty_frame_send_ratio = kwargs.pop(
'remote_idle_timeout_empty_frame_send_ratio', None)
# Session settings
self._outgoing_window = kwargs.pop('outgoing_window', None) or constants.MAX_FRAME_SIZE_BYTES
self._incoming_window = kwargs.pop('incoming_window', None) or constants.MAX_FRAME_SIZE_BYTES
self._handle_max = kwargs.pop('handle_max', None)
# AMQP object settings
self.connection_type = Connection
self.session_type = Session
if kwargs:
raise ValueError("Received unrecognized kwargs: {}".format(", ".join(kwargs.keys())))
def __enter__(self):
"""Run Client in a context manager."""
self.open()
return self
def __exit__(self, *args):
"""Close and destroy Client on exiting a context manager."""
self.close()
def _client_ready(self): # pylint: disable=no-self-use
"""Determine whether the client is ready to start sending and/or
receiving messages. To be ready, the connection must be open and
authentication complete.
:returns: bool
"""
return True
def _client_run(self):
"""Perform a single Connection iteration."""
self._connection.work()
def open(self, connection=None):
"""Open the client. The client can create a new Connection
or an existing Connection can be passed in. This existing Connection
may have an existing CBS authentication Session, which will be
used for this client as well. Otherwise a new Session will be
created.
:param connection: An existing Connection that may be shared between
multiple clients.
:type connetion: ~uamqp.Connection
"""
# pylint: disable=protected-access
if self._session:
return # already open.
_logger.debug("Opening client connection.")
if connection:
_logger.debug("Using existing connection.")
self._auth = connection.auth
self._ext_connection = True
self._connection = connection or self.connection_type(
self._hostname,
self._auth,
container_id=self._name,
max_frame_size=self._max_frame_size,
channel_max=self._channel_max,
idle_timeout=self._idle_timeout,
properties=self._properties,
remote_idle_timeout_empty_frame_send_ratio=self._remote_idle_timeout_empty_frame_send_ratio,
debug=self._debug_trace,
encoding=self._encoding)
if not self._connection.cbs and isinstance(self._auth, authentication.CBSAuthMixin):
self._connection.cbs = self._auth.create_authenticator(
self._connection,
debug=self._debug_trace)
self._session = self._auth._session
elif self._connection.cbs:
self._session = self._auth._session
else:
self._session = self.session_type(
self._connection,
incoming_window=self._incoming_window,
outgoing_window=self._outgoing_window,
handle_max=self._handle_max)
def close(self):
"""Close the client. This includes closing the Session
and CBS authentication layer as well as the Connection.
If the client was opened using an external Connection,
this will be left intact.
"""
if not self._session:
return # already closed.
else:
if self._connection.cbs and not self._ext_connection:
_logger.debug("Closing CBS session.")
self._auth.close_authenticator()
self._connection.cbs = None
elif not self._connection.cbs:
_logger.debug("Closing non-CBS session.")
self._session.destroy()
else:
_logger.debug("Not closing CBS session.")
self._session = None
if not self._ext_connection:
_logger.debug("Closing unshared connection.")
self._connection.destroy()
else:
_logger.debug("Shared connection remaining open.")
self._connection = None
def mgmt_request(self, message, operation, op_type=None, node=None, **kwargs):
"""Run a request/response operation. These are frequently used for management
tasks against a $management node, however any node name can be specified
and the available options will depend on the target service.
:param message: The message to send in the management request.
:type message: ~uamqp.Message
:param operation: The type of operation to be performed. This value will
be service-specific, but common values incluse READ, CREATE and UPDATE.
This value will be added as an application property on the message.
:type operation: bytes
:param op_type: The type on which to carry out the operation. This will
be specific to the entities of the service. This value will be added as
an application property on the message.
:type op_type: bytes
:param node: The target node. Default is `b"$management"`.
:type node: bytes
:param timeout: Provide an optional timeout in milliseconds within which a response
to the management request must be received.
:type timeout: int
:param status_code_field: Provide an alternate name for the status code in the
response body which can vary between services due to the spec still being in draft.
The default is `b"statusCode"`.
:type status_code_field: bytes
:param description_fields: Provide an alternate name for the description in the
response body which can vary between services due to the spec still being in draft.
The default is `b"statusDescription"`.
:type description_fields: bytes
:returns: ~uamqp.Message
"""
timeout = False
auth_in_progress = False
while True:
if self._connection.cbs:
timeout, auth_in_progress = self._auth.handle_token()
if timeout:
raise TimeoutError("Authorization timeout.")
elif auth_in_progress:
self._connection.work()
else:
break
if not self._session:
raise ValueError("Session not yet open")
response = self._session.mgmt_request(
message,
operation,
op_type=op_type,
node=node,
encoding=self._encoding,
debug=self._debug_trace,
**kwargs)
return response
def do_work(self):
"""Run a single connection iteration.
This will return `True` if the connection is still open
and ready to be used for further work, or `False` if it needs
to be shut down.
:returns: bool
:raises: TimeoutError if CBS authentication timeout reached.
"""
timeout = False
auth_in_progress = False
if self._connection.cbs:
timeout, auth_in_progress = self._auth.handle_token()
if self._shutdown:
return False
if timeout:
raise TimeoutError("Authorization timeout.")
elif auth_in_progress:
self._connection.work()
return True
elif not self._client_ready():
self._connection.work()
return True
else:
result = self._client_run()
return result
class SendClient(AMQPClient):
"""An AMQP client for sending messages.
:param target: The target AMQP service endpoint. This can either be the URI as
a string or a ~uamqp.Target object.
:type target: str, bytes or ~uamqp.Target
:param auth: Authentication for the connection. If none is provided SASL Annoymous
authentication will be used.
:type auth: ~uamqp.authentication.AMQPAuth
:param client_name: The name for the client, also known as the Container ID.
If no name is provided, a random GUID will be used.
:type client_name: str or bytes
:param debug: Whether to turn on network trace logs. If `True`, trace logs
will be logged at INFO level. Default is `False`.
:type debug: bool
:param msg_timeout: A timeout in seconds for messages from when they have been
added to the send queue to when the message is actually sent. This prevents potentially
expired data from being sent. If set to 0, messages will not expire. Default is 0.
:type msg_timeout: int
:param send_settle_mode: The mode by which to settle message send
operations. If set to `Unsettled`, the client will wait for a confirmation
from the service that the message was successfully sent. If set to 'Settled',
the client will not wait for confirmation and assume success.
:type send_settle_mode: ~uamqp.constants.SenderSettleMode
| |
: ( 8040, 8120 ),
"MH6bCd32" : ( 8120, 8139 ),
"MH6bAnother26" : ( 8139, 8140 ),
"MH6bSpecify33" : ( 8140, 8220 ),
"MH6bCd33" : ( 8220, 8239 ),
"MH6bAnother27" : ( 8239, 8240 ),
"MH6bSpecify34" : ( 8240, 8320 ),
"MH6bCd34" : ( 8320, 8339 ),
"MH6bAnother28" : ( 8339, 8340 ),
"MH6bSpecify35" : ( 8340, 8420 ),
"MH6bCd35" : ( 8420, 8439 ),
"MH6b8" : ( 8439, 8440 ),
"MH6bSpecify36" : ( 8440, 8520 ),
"MH6bCd36" : ( 8520, 8539 ),
"MH6bAnother29" : ( 8539, 8540 ),
"MH6bSpecify37" : ( 8540, 8620 ),
"MH6bCd37" : ( 8620, 8639 ),
"MH6bAnother30" : ( 8639, 8640 ),
"MH6bSpecify38" : ( 8640, 8720 ),
"MH6bCd38" : ( 8720, 8739 ),
"MH6bAnother31" : ( 8739, 8740 ),
"MH6bSpecify39" : ( 8740, 8820 ),
"MH6bCd39" : ( 8820, 8839 ),
"MH6bAnother32" : ( 8839, 8840 ),
"MH6bSpecify40" : ( 8840, 8920 ),
"MH6bCd40" : ( 8920, 8939 ),
"MH8" : ( 8939, 8940 ),
"MH8a1" : ( 8940, 8941 ),
"MH8a2" : ( 8941, 8942 ),
"MH8a3" : ( 8942, 8943 ),
"MH8a4" : ( 8943, 8944 ),
"MH8a5" : ( 8944, 8945 ),
"MH8a6" : ( 8945, 8946 ),
"MH8a7" : ( 8946, 8947 ),
"MH8a8" : ( 8947, 8948 ),
"MH8a8SPecify" : ( 8948, 9028 ),
"MH9" : ( 9028, 9030 ),
"MH9a" : ( 9030, 9032 ),
"MH9b" : ( 9032, 9034 ),
"MH10" : ( 9034, 9035 ),
"MH10a" : ( 9035, 9036 ),
"TB1c1" : ( 9036, 9037 ),
"TB1c_ao1" : ( 9037, 9039 ),
"TB1c_o1" : ( 9039, 9040 ),
"TB1c_ar1" : ( 9040, 9042 ),
"TB1c_r1" : ( 9042, 9043 ),
"TB1c2" : ( 9043, 9044 ),
"TB1c_ao2" : ( 9044, 9046 ),
"TB1c_o2" : ( 9046, 9047 ),
"TB1c_ar2" : ( 9047, 9049 ),
"TB1c_r2" : ( 9049, 9050 ),
"TB1c3" : ( 9050, 9051 ),
"TB1c_ao3" : ( 9051, 9053 ),
"TB1c_o3" : ( 9053, 9054 ),
"TB1c_ar3" : ( 9054, 9056 ),
"TB1c_r3" : ( 9056, 9057 ),
"TB1c4" : ( 9057, 9058 ),
"TB1c_ao4" : ( 9058, 9060 ),
"TB1c_o4" : ( 9060, 9061 ),
"TB1c_ar4" : ( 9061, 9063 ),
"TB1c_r4" : ( 9063, 9064 ),
"TB1d" : ( 9064, 9065 ),
"TBd_ao1" : ( 9065, 9067 ),
"TBd_o1" : ( 9067, 9068 ),
"TB3" : ( 9068, 9069 ),
"TB3_1" : ( 9069, 9070 ),
"TB3a" : ( 9070, 9073 ),
"TB4a" : ( 9073, 9074 ),
"TB4a1" : ( 9074, 9075 ),
"TB4b" : ( 9075, 9078 ),
"TB4b1" : ( 9078, 9079 ),
"TB4cNum" : ( 9079, 9081 ),
"TB4cUnit" : ( 9081, 9082 ),
"TB_ao4" : ( 9082, 9085 ),
"TB_o4" : ( 9085, 9086 ),
"TB_ar4" : ( 9086, 9089 ),
"TB_r4" : ( 9089, 9090 ),
"TB5x" : ( 9090, 9092 ),
"TB5xDK" : ( 9092, 9093 ),
"TB5" : ( 9093, 9096 ),
"TB5a" : ( 9096, 9097 ),
"TB6" : ( 9097, 9098 ),
"TB7" : ( 9098, 9099 ),
"TB8" : ( 9099, 9100 ),
"TB9" : ( 9100, 9101 ),
"TB10" : ( 9101, 9102 ),
"TB10a" : ( 9102, 9104 ),
"TB10a1" : ( 9104, 9105 ),
"TB10b" : ( 9105, 9107 ),
"TB10b1" : ( 9107, 9108 ),
"TB10cNum" : ( 9108, 9110 ),
"TB10cUNIT" : ( 9110, 9111 ),
"TB10c1" : ( 9111, 9112 ),
"TB11" : ( 9112, 9113 ),
"TB12" : ( 9113, 9114 ),
"TB12a" : ( 9114, 9115 ),
"TB13" : ( 9115, 9116 ),
"TB13A" : ( 9116, 9117 ),
"TB13B" : ( 9117, 9118 ),
"TB14" : ( 9118, 9119 ),
"TB14a" : ( 9119, 9120 ),
"TB14b" : ( 9120, 9123 ),
"TB14b1" : ( 9123, 9124 ),
"TB14C" : ( 9124, 9125 ),
"TB14c1" : ( 9125, 9126 ),
"TB14d" : ( 9126, 9127 ),
"TB15NUM" : ( 9127, 9129 ),
"TB15UNIT" : ( 9129, 9130 ),
"TB15a" : ( 9130, 9131 ),
"TB15b" : ( 9131, 9132 ),
"TB15c" : ( 9132, 9133 ),
"TB15d" : ( 9133, 9134 ),
"TB15Specify" : ( 9134, 9359 ),
"TB_ao15" : ( 9359, 9361 ),
"TB_o15" : ( 9361, 9362 ),
"TB_ar15" : ( 9362, 9364 ),
"TB_r15" : ( 9364, 9365 ),
"TB16_1" : ( 9365, 9366 ),
"TB16_2" : ( 9366, 9367 ),
"TB16_3" : ( 9367, 9368 ),
"TB16_4" : ( 9368, 9369 ),
"TB16_5" : ( 9369, 9370 ),
"TB16_6" : ( 9370, 9371 ),
"TB16_7" : ( 9371, 9372 ),
"TB16_8" : ( 9372, 9373 ),
"TB16_9" : ( 9373, 9374 ),
"TB16c" : ( 9374, 9375 ),
"TB16d" : ( 9375, 9376 ),
"TB17" : ( 9376, 9377 ),
"TB17a" : ( 9377, 9378 ),
"TB17Specify" : ( 9378, 9603 ),
"TB17b" : ( 9603, 9604 ),
"TB18" : ( 9604, 9605 ),
"TB18SPecify" : ( 9605, 9685 ),
"TB18Code" : ( 9685, 9704 ),
"TB18a" : ( 9704, 9705 ),
"TB19" : ( 9705, 9706 ),
"TB19Specify" : ( 9706, 9786 ),
"TB19Code" : ( 9786, 9805 ),
"TB20a" : ( 9805, 9806 ),
"TB20b" : ( 9806, 9807 ),
"TB20c" : ( 9807, 9808 ),
"TB20d" : ( 9808, 9809 ),
"TBqSx" : ( 9809, 9810 ),
"TBqSx2" : ( 9810, 9811 ),
"TBqSx3" : ( 9811, 9812 ),
"TBqSx4" : ( 9812, 9813 ),
"TBqSx5" : ( 9813, 9814 ),
"TBqSx6" : ( 9814, 9815 ),
"TBqSx7" : ( 9815, 9816 ),
"TBqSx8" : ( 9816, 9817 ),
"TBqSx9" : ( 9817, 9818 ),
"TBqSx10" : ( 9818, 9819 ),
"TBqSx11" : ( 9819, 9820 ),
"TBqSx12" : ( 9820, 9821 ),
"TBqSx13" : ( 9821, 9822 ),
"TBqSx14" : ( 9822, 9823 ),
"TBYrCl" : ( 9823, 9825 ),
"TB_ao21" : ( 9825, 9827 ),
"TB_ar21" : ( 9827, 9829 ),
"TBCrit1" : ( 9829, 9848 ),
"TBCrit2" : ( 9848, 9867 ),
"TBCrit3" : ( 9867, 9886 ),
"TBCrit4" : ( 9886, 9905 ),
"TBCrit5" : ( 9905, 9924 ),
"TBCrit6" : ( 9924, 9943 ),
"TBCrit7" : ( 9943, 9962 ),
"TBSxNum01" : ( 9962, 9981 ),
"TBSxNum02" : ( 9981, 10000 ),
"TBSxNum03" : ( 10000, 10019 ),
"TBSxNum04" : ( 10019, 10038 ),
"TBSxNum05" : ( 10038, 10057 ),
"TBSxNum06" : ( 10057, 10076 ),
"TBSxNum07" : ( 10076, 10095 ),
"TBSxNum08" : ( 10095, 10114 ),
"TBSxNum09" : ( 10114, 10133 ),
"TBSxNum10" : ( 10133, 10152 ),
"TBSxNum11" : ( 10152, 10171 ),
"TBSxNum12" : ( 10171, 10190 ),
"TBSxNum13" : ( 10190, 10209 ),
"TBSxNum14" : ( 10209, 10228 ),
"TBSxList01" : ( 10228, 10483 ),
"TBSxList02" : ( 10483, 10738 ),
"TBSxList03" : ( 10738, 10993 ),
"TBSxList04" : ( 10993, 11248 ),
"TBSxList05" : ( 11248, 11503 ),
"TBSxList06" : ( 11503, 11758 ),
"TBSxList07" : ( 11758, 12013 ),
"TBSxList08" : ( 12013, 12268 ),
"TBSxList09" : ( 12268, 12523 ),
"TBSxList10" : ( 12523, 12778 ),
"TBSxList11" : ( 12778, 13033 ),
"TBSxList12" : ( 13033, 13288 ),
"TBSxList13" : ( 13288, 13543 ),
"TBSxList14" : ( 13543, 13798 ),
"TBSxPastList01" : ( 13798, 14053 ),
"TBSxPastList02" : ( 14053, 14308 ),
"TBSxPastList03" : ( 14308, 14563 ),
"TBSxPastList04" : ( 14563, 14818 ),
"TBSxPastList05" : ( 14818, 15073 ),
"TBSxPastList06" : ( 15073, 15328 ),
"TBSxPastList07" : ( 15328, 15583 ),
"TBSxPastList08" : ( 15583, 15838 ),
"TBSxPastList09" : ( 15838, 16093 ),
"TBSxPastList10" : ( 16093, 16348 ),
"TBSxPastList11" : ( 16348, 16603 ),
"TBSxPastList12" : ( 16603, 16858 ),
"TBSxPastList13" : ( 16858, 17113 ),
"TBSxPastList14" : ( 17113, 17368 ),
"Varname01" : ( 17368, 17623 ),
"Varname02" : ( 17623, 17878 ),
"Varname03" : ( 17878, 18133 ),
"Varname04" : ( 18133, 18388 ),
"Varname05" : ( | |
<filename>src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py
# coding=utf-8
# Copyright 2022 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Wav2Vec2-Conformer model."""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...deepspeed import is_deepspeed_zero3_enabled
from ...modeling_outputs import (
BaseModelOutput,
CausalLMOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
Wav2Vec2BaseModelOutput,
XVectorOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import torch_int_div
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_wav2vec2_conformer import Wav2Vec2ConformerConfig
logger = logging.get_logger(__name__)
_HIDDEN_STATES_START_POSITION = 2
# General docstring
_CONFIG_FOR_DOC = "Wav2Vec2ConformerConfig"
_PROCESSOR_FOR_DOC = "Wav2Vec2Processor"
# Base docstring
_CHECKPOINT_FOR_DOC = "facebook/wav2vec2-conformer-rope-large-960h-ft"
_EXPECTED_OUTPUT_SHAPE = [1, 292, 1024]
# CTC docstring
_CTC_EXPECTED_OUTPUT = "'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'"
_CTC_EXPECTED_LOSS = 64.21
# Audio class docstring
_FEAT_EXTRACTOR_FOR_DOC = "Wav2Vec2FeatureExtractor"
_SEQ_CLASS_CHECKPOINT = "hf-internal-testing/wav2vec2-conformer-seq-class"
_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'"
_SEQ_CLASS_EXPECTED_LOSS = 0.68
# Frame class docstring
_FRAME_CLASS_CHECKPOINT = "hf-internal-testing/wav2vec2-conformer-frame-class"
_FRAME_EXPECTED_OUTPUT = [1, 0]
# Speaker Verification docstring
_XVECTOR_CHECKPOINT = "hf-internal-testing/wav2vec2-conformer-xvector"
_XVECTOR_EXPECTED_OUTPUT = 1.0
WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/wav2vec2-conformer-large-rel-pos",
# See all Wav2Vec2Conformer models at https://huggingface.co/models?filter=wav2vec2-conformer
]
@dataclass
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput with Wav2Vec2->Wav2Vec2Conformer
class Wav2Vec2ConformerForPreTrainingOutput(ModelOutput):
"""
Output type of [`Wav2Vec2ConformerForPreTraining`], with potential hidden states and attentions.
Args:
loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss.
projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
projected quantized states.
projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
target vectors for contrastive loss.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
contrastive_loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):
The contrastive loss (L_m) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) .
diversity_loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):
The diversity loss (L_d) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) .
"""
loss: Optional[torch.FloatTensor] = None
projected_states: torch.FloatTensor = None
projected_quantized_states: torch.FloatTensor = None
codevector_perplexity: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
contrastive_loss: Optional[torch.FloatTensor] = None
diversity_loss: Optional[torch.FloatTensor] = None
# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
def _compute_mask_indices(
shape: Tuple[int, int],
mask_prob: float,
mask_length: int,
attention_mask: Optional[torch.LongTensor] = None,
min_masks: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
CPU as part of the preprocessing during training.
Args:
shape: The shape for which to compute masks. This should be of a tuple of size 2 where
the first element is the batch size and the second element is the length of the axis to span.
mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
independently generated mask spans of length `mask_length` is computed by
`mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
actual percentage will be smaller.
mask_length: size of the mask
min_masks: minimum number of masked spans
attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
each batch dimension.
"""
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
f" and `sequence_length`: {sequence_length}`"
)
# epsilon is used for probabilistic rounding
epsilon = np.random.rand(1).item()
def compute_num_masked_span(input_length):
"""Given input length, compute how many spans should be masked"""
num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
num_masked_span = max(num_masked_span, min_masks)
# make sure num masked span <= sequence_length
if num_masked_span * mask_length > sequence_length:
num_masked_span = sequence_length // mask_length
# make sure num_masked span is also <= input_length - (mask_length - 1)
if input_length - (mask_length - 1) < num_masked_span:
num_masked_span = max(input_length - (mask_length - 1), 0)
return num_masked_span
# compute number of masked spans in batch
input_lengths = (
attention_mask.sum(-1).detach().tolist()
if attention_mask is not None
else [sequence_length for _ in range(batch_size)]
)
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
if max_num_masked_span == 0:
return spec_aug_mask
for input_length in input_lengths:
# compute num of masked spans for this input
num_masked_span = compute_num_masked_span(input_length)
# get random indices to mask
spec_aug_mask_idx = np.random.choice(
np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
)
# pick first sampled index that will serve as a dummy index to pad vector
# to ensure same dimension for all batches due to probabilistic rounding
# Picking first sample just pads those vectors twice.
if len(spec_aug_mask_idx) == 0:
# this case can only happen if `input_length` is strictly smaller then
# `sequence_length` in which case the last token has to be a padding
# token which we can use as a dummy mask id
dummy_mask_idx = sequence_length - 1
else:
dummy_mask_idx = spec_aug_mask_idx[0]
spec_aug_mask_idx = np.concatenate(
[spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
)
spec_aug_mask_idxs.append(spec_aug_mask_idx)
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
# expand masked indices to masked spans
spec_aug_mask_idxs = np.broadcast_to(
spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
)
spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
# add offset to the starting indexes so that that indexes now create a span
offsets = np.arange(mask_length)[None, None, :]
offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
batch_size, max_num_masked_span * mask_length
)
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
# ensure that we cannot have indices larger than sequence_length
if spec_aug_mask_idxs.max() > sequence_length - 1:
spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
# scatter indices to mask
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
return spec_aug_mask
# Copied from transformers.models.wav2vec2.modeling_wav2vec2._sample_negative_indices
def _sample_negative_indices(
features_shape: Tuple, num_negatives: int, mask_time_indices: Optional[np.ndarray] = None
):
"""
Sample `num_negatives` vectors from feature vectors.
"""
batch_size, sequence_length = features_shape
# generate indices of the positive vectors themselves, repeat them `num_negatives` times
sequence_length_range = np.arange(sequence_length)
# get `num_negatives` random vector indices from the same utterance
sampled_negative_indices = np.zeros(shape=(batch_size, sequence_length, num_negatives), dtype=np.int32)
mask_time_indices = (
mask_time_indices.astype(np.bool) if mask_time_indices is not None else np.ones(features_shape, dtype=np.bool)
)
for batch_idx in range(batch_size):
high = mask_time_indices[batch_idx].sum() - 1
mapped_masked_indices = sequence_length_range[mask_time_indices[batch_idx]]
feature_indices = np.broadcast_to(np.arange(high + 1)[:, None], (high + 1, num_negatives))
sampled_indices = np.random.randint(0, high, size=(high + 1, num_negatives))
# avoid sampling the same positive vector, but keep the distribution uniform
sampled_indices[sampled_indices >= feature_indices] += 1
# remap to actual indices
sampled_negative_indices[batch_idx][mask_time_indices[batch_idx]] = mapped_masked_indices[sampled_indices]
# correct for batch size
sampled_negative_indices[batch_idx] += batch_idx * sequence_length
return sampled_negative_indices
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->Wav2Vec2Conformer
class Wav2Vec2ConformerNoLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->Wav2Vec2Conformer
class Wav2Vec2ConformerLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
| |
import sys
import os
import errno
import inspect
from collections import namedtuple
from enum import Enum
from pathlib import Path
def DEBUG(*x):
if 'DEBUG' in os.environ:
print('@DEBUG', *x)
# ParsingExpression
class ParsingExpression(object):
def __iter__(self): pass
def __len__(self): return 0
# operator overloading
def __and__(self, y): return Seq2(self, y)
def __rand__(self, y): return Seq2(self, y)
def __or__(self, y): return Ore2(self, y)
def __truediv__(self, y): return Ore2(self, y)
def __invert__(self): return Not(self)
class Char(ParsingExpression):
__slots__ = ['text']
def __init__(self, text):
self.text = text
class Range(ParsingExpression):
__slots__ = ['chars', 'ranges']
def __init__(self, chars, ranges):
self.chars = chars
self.ranges = ranges
class Any(ParsingExpression):
pass
class Ref(ParsingExpression):
def __init__(self, name, peg):
self.name = name
self.peg = peg
def uname(self):
return self.name if self.name[0].isdigit() else self.peg.gid + self.name
def deref(self):
return self.peg[self.name]
def get(self, key, value):
return getattr(self, key) if hasattr(self, key) else value
def set(self, key, value):
setattr(self, key, value)
class Tuple(ParsingExpression):
__slots__ = ['es']
def __init__(self, *es):
self.es = list(es)
def __iter__(self):
return iter(self.es)
def __len__(self):
return len(self.es)
class Seq2(Tuple):
@classmethod
def new(cls, *es):
ls = [es[0]]
for e in es[1:]:
if e == EMPTY: continue
if isinstance(e, Char) and isinstance(ls[-1], Char):
ls[-1] = Char(ls[-1].text+e.text)
continue
ls.append(e)
return ls[0] if len(ls) == 1 else Seq2(*ls)
class Alt2(Tuple):
pass
class Ore2(Tuple):
@classmethod
def expand(cls, e):
choice = []
_expand(e, choice)
return choice[0] if len(choice)==1 else Ore2(*choice)
def _expand(e, choice=[]):
s = e
while isinstance(e, Ref):
e = e.deref()
if isinstance(e, Ore2):
for x in e:
_expand(x, choice)
else:
choice.append(s)
class Unary(ParsingExpression):
__slot__ = ['e']
def __init__(self, e):
self.e = e
def __iter__(self):
yield self.e
def __len__(self):
return 1
class And(Unary):
pass
class Not(Unary):
pass
class Many(Unary):
pass
class Many1(Unary):
pass
class Option(Unary):
pass
class Node(Unary):
__slot__ = ['e', 'tag']
def __init__(self, e, tag=''):
self.e = e
self.tag = tag
class Edge2(Unary):
__slot__ = ['e', 'edge']
def __init__(self, e, edge=''):
self.e = e
self.edge = edge
class Fold2(Unary):
__slot__ = ['e', 'edge', 'tag']
def __init__(self, e, edge='', tag=''):
self.e = e
self.edge = edge
self.tag = tag
class Abs(Unary):
__slot__ = ['e']
def __init__(self, e):
self.e = e
# Action
class Action(Unary):
__slots__ = ['e', 'func', 'params']
def __init__(self, e, func, params, pos4=None):
self.e = e
self.func = func
self.params = params
# Action = namedtuple('Action', 'inner func params pos4')
# CONSTANT
EMPTY = Char('')
ANY = Any()
FAIL = Not(EMPTY)
def setup():
def grouping(e, f):
return '(' + repr(e) + ')' if f(e) else repr(e)
def inUnary(e):
return isinstance(e, Ore2) \
or isinstance(e, Seq2) or isinstance(e, Alt2) \
or (isinstance(e, Edge2))or isinstance(e, Fold2)
CharTBL = str.maketrans(
{'\n': '\\n', '\t': '\\t', '\r': '\\r', '\\': '\\\\', "'": "\\'"})
RangeTBL = str.maketrans(
{'\n': '\\n', '\t': '\\t', '\r': '\\r', '\\': '\\\\', ']': '\\]', '-': '\\-'})
def rs(ranges):
ss = tuple(map(lambda x: x[0].translate(
RangeTBL) + '-' + x[1].translate(RangeTBL), ranges))
return ''.join(ss)
Char.__repr__ = lambda p: "'" + p.text.translate(CharTBL) + "'"
Range.__repr__ = lambda p: "[" + \
rs(p.ranges) + p.chars.translate(RangeTBL) + "]"
Any.__repr__ = lambda p: '.'
def ss(e): return grouping(e, lambda e: isinstance(
e, Ore2) or isinstance(e, Alt2))
Seq2.__repr__ = lambda p: ' '.join(map(ss, p))
Ore2.__repr__ = lambda p: ' / '.join(map(repr, p))
# grouping(
# p.left, inUnary) + '?' if p.right == EMPTY else repr(p.left) + ' / ' + repr(p.right)
Alt2.__repr__ = lambda p: ' | '.join(map(repr, p))
#repr(p.left) + ' | ' + repr(p.right)
And.__repr__ = lambda p: '&'+grouping(p.e, inUnary)
Not.__repr__ = lambda p: '!'+grouping(p.e, inUnary)
Many.__repr__ = lambda p: grouping(p.e, inUnary)+'*'
Many1.__repr__ = lambda p: grouping(p.e, inUnary)+'+'
Option.__repr__ = lambda p: grouping(p.e, inUnary)+'?'
Ref.__repr__ = lambda p: p.name
Node.__repr__ = lambda p: '{' + str(p.e) + ' #' + p.tag + '}'
Edge2.__repr__ = lambda p: (
'$' if p.edge == '' else p.edge + ': ') + grouping(p.e, inUnary)
Fold2.__repr__ = lambda p: (
'' if p.edge == '' else p.edge + ':') + '^ {' + str(p.e) + ' #' + p.tag + '}'
Abs.__repr__ = lambda p: f'@abs({p.e})'
Action.__repr__ = lambda p: f'@{p.func}{p.params}'
setup()
# # Grammar
GrammarId = 0
class Grammar(dict):
def __init__(self):
global GrammarId
self.gid = str(GrammarId)
GrammarId += 1
self.N = []
def __repr__(self):
ss = []
for rule in self.N:
ss.append(rule)
ss.append('=')
ss.append(repr(self[rule]))
ss.append('\n')
return ''.join(ss)
def add(self, key, item):
if not key in self:
self.N.append(key)
self[key] = item
def newRef(self, name):
key = '@' + name
if not key in self:
super().__setitem__(key, Ref(name, self))
return self[key]
def start(self):
if len(self.N) == 0:
self['EMPTY'] = EMPTY
return self.N[0]
##
# # TPEG Grammar Definition
def TPEG(g):
def Xe(p):
if isinstance(p, str):
return Char(p)
if isinstance(p, dict):
for key in p:
return Edge2(Xe(p[key]), key)
return EMPTY
return p
def seq(*ps):
if len(ps) == 0: return EMPTY
if len(ps) == 1: return Xe(ps[0])
return Seq2(*list(map(Xe, ps)))
e = seq
def choice(*ps):
return Ore2(*list(map(Xe, ps)))
def many(*ps): return Many(seq(*ps))
def many1(*ps): return Many1(seq(*ps))
def option(*ps): return Option(seq(*ps))
def TreeAs(node, *ps): return Node(seq(*ps), node)
def ListAs(*ps): return Node(seq(*ps), '')
def FoldAs(edge, node, *ps): return Fold2(seq(*ps), edge, node)
def c(*ps):
chars = []
ranges = []
for x in ps:
if isinstance(x, str):
chars.append(x)
else:
ranges.append(tuple(x))
return Range(''.join(chars), ranges)
#
def ref(p): return g.newRef(p)
def rule(g, name, *ps): g.add(name,seq(*ps))
__ = ref('__')
_ = ref('_')
EOS = ref('EOS')
EOL = ref('EOL')
S = ref('S')
COMMENT = ref('COMMENT')
Expression = ref('Expression')
Identifier = ref('Identifier')
Empty = ref('Empty')
rule(g, 'Start', __, ref('Source'), ref('EOF'))
rule(g, '__', many(choice(c(' \t\r\n'),COMMENT)))
rule(g, '_', many(choice(c(' \t'),COMMENT)))
rule(g, 'EOF', Not(ANY))
rule(g, 'COMMENT', choice(
e('/*', many(Not(e('*/')), ANY),'*/'),
e('//', many(Not(EOL), ANY))))
rule(g, 'EOL', choice('\n', '\r\n', ref('EOF')))
rule(g, 'S', c(' \t'))
rule(g, 'Source', TreeAs('Source', many({'': ref('Statement')})))
rule(g, 'EOS', _, many(choice(e(';', _), e(EOL,choice(S,COMMENT),_), EOL)))
rule(g, 'Statement', choice(ref('Import'),ref('Example'),ref('Rule')))
rule(g, 'Rule', TreeAs('Rule', {'name': Identifier}, __, '=', __, option(
c('/|'), __), {'inner': Expression}, EOS))
NAME = c(('A', 'Z'), ('a', 'z'), '@_') & many(
c(('A', 'Z'), ('a', 'z'), ('0', '9'), '_.'))
rule(g, 'Identifier', TreeAs('Name', NAME | e(
'"', many(e(r'\"') | Not(c('\\"\n')) & ANY), '"')))
# import
FROM = option(_, 'import', S, _, {'names': ref('Names')})
rule(g, 'Import', TreeAs('Import', 'from', S, _, {
'name': Identifier / ref('Char')}, FROM) & EOS)
rule(g,'Example', TreeAs('Example', 'example', S, _, {
'names': ref('Names')}, {'doc': ref('Doc')}) & EOS)
rule(g, 'Names', ListAs({'': Identifier}, _, many(
c(',&'), _, {'': Identifier}, _)))
DELIM = Xe("'''")
DOC1 = TreeAs("Doc", many(Not(e(DELIM, EOL)), ANY))
DOC2 = TreeAs("Doc", many(Not(c('\r\n')), ANY))
rule(g,'Doc', e(DELIM, many(S), EOL, DOC1, DELIM) | DOC2)
rule(g, 'Expression', ref('Choice'), option(
FoldAs('left', 'Alt', many1(__, '|', _, {'right': ref('Choice')}))))
rule(g, 'Choice', ref('Sequence'), option(
FoldAs('left', 'Ore', many1(__, '/', _, {'right': ref('Sequence')}))))
SS = choice(e(S, _, ~EOL), e(many1(_, EOL), S, _))
rule(g, 'Sequence', ref('Predicate'), option(
FoldAs('left', 'Seq', many1(SS, {'right': ref('Predicate')})) ))
rule(g, 'Predicate', choice(ref('Not'),ref('And'),ref('Suffix')))
rule(g, 'Not', TreeAs('Not', '!', {'inner': ref('Predicate')}))
rule(g,'And', TreeAs('And', '&', {'inner': ref('Predicate')}))
#g['Append'] = TreeAs('Append', '$', {'inner': ref('Term')})
rule(g, 'Suffix', ref('Term'), choice(
FoldAs('inner', 'Many', '*'),
FoldAs('inner', 'Many1', '+'),
FoldAs('inner', 'Option', '?'), EMPTY))
rule(g, 'Term', choice(ref('Group'),ref('Char'),ref('Class'),ref('Any'),ref('Node'),
ref('Fold'),ref('EdgeFold'),ref('Edge'),ref('Func'),ref('Identifier')))
rule(g, 'Group', '(', __, choice(Expression,Empty), __, ')')
rule(g, 'Empty', TreeAs('Empty', EMPTY))
rule(g, 'Any', TreeAs('Any', '.'))
rule(g, 'Char', "'", TreeAs('Char', many(
e('\\', ANY) | Not(c("'\n")) & ANY)), "'")
rule(g, 'Class',
'[', TreeAs('Class', many(e('\\', ANY) | e(Not(e("]")),ANY))), ']')
Tag = e('{', __, option('#', {'node': ref('Identifier')}), __)
ETag = e(option('#', {'node': ref('Identifier')}), __, '}')
rule(g, 'Node', TreeAs('Node', Tag, {'inner': choice(Expression,Empty)}, __, ETag))
rule(g, 'Fold', '^', _, TreeAs(
'Fold', Tag, {'inner': choice(Expression,Empty)}, __, ETag))
rule(g, 'Edge', TreeAs('Edge', {'edge': ref('EdgeName')}, ':', _, {
'inner': ref('Term')}))
rule(g, 'EdgeFold', TreeAs('Fold', {'edge': ref('EdgeName')}, ':', _, '^', _, Tag, {
'inner': choice(Expression,Empty)}, __, ETag))
rule(g, 'EdgeName', TreeAs('', c(('a', 'z'), '$'), many(
c(('A', 'Z'), ('a', 'z'), ('0', '9'), '_'))))
rule(g, 'Func', TreeAs('Func', '@', {'name': Identifier}, '(', __, {
'params': ref('Params')}, ')'))
rule(g, 'Params', ListAs({'': Expression}, many(
_, ',', __, {'': Expression}), __))
# rule(g, 'Ref', TreeAs('Ref', ref('REF')))
# rule(g, 'REF', e('"', many(Xe('\\"') | e(Not(c('\\"\n')), ANY)), '"') | many1(
# Not(c(' \t\r\n(,){};<>[|/*+?=^\'`#')) & ANY))
#g.N = ['Start', 'Sequence']
return g
TPEGGrammar = TPEG(Grammar())
#print(TPEGGrammar)
######################################################################
# ast.env
def bytestr(b):
return b.decode('utf-8') if isinstance(b, bytes) else b
#####################################
class ParseRange(object):
__slots__ = ['urn', 'inputs', 'spos', 'epos']
def __init__(self, urn, inputs, spos, epos):
self.urn = urn
self.inputs = inputs
self.spos = spos
self.epos = epos
@classmethod
def expand(cls, urn, inputs, spos):
inputs | |
read binary mat
"""
fd = open_or_fd(file_or_fd)
try:
if not read_binary:
binary = fd.read(2)
if binary == b"\0B":
mat = _read_mat_binary(fd)
else:
assert binary == b" ["
mat = _read_mat_ascii(fd)
else:
mat = _read_mat_binary(fd)
finally:
if fd is not file_or_fd:
fd.close()
return mat
def _read_mat_binary(fd):
# Data type
type = fd.read(3)
if type == b"FM " or type == b"FV ":
sample_size = 4 # floats
elif type == b"DM " or type == b"DV ":
sample_size = 8 # doubles
read_vector = False
if type == b"FV " or type == b"DV ":
read_vector = True
assert sample_size > 0
# Dimensions
fd.read(1)
rows = struct.unpack("<i", fd.read(4))[0]
if not read_vector:
fd.read(1)
cols = struct.unpack("<i", fd.read(4))[0]
else:
cols = 1
# Read whole matrix
buf = fd.read(rows * cols * sample_size)
if sample_size == 4:
vec = np.frombuffer(buf, dtype="float32")
elif sample_size == 8:
vec = np.frombuffer(buf, dtype="float64")
else:
raise ValueError("BadSampleSize")
mat = np.reshape(vec, (rows, cols))
return mat
def _read_mat_ascii(fd):
rows = []
while 1:
line = fd.readline()
if len(line) == 0:
raise ValueError("BadInputFormat") # eof, should not happen!
if len(line.strip()) == 0:
continue # skip empty line
arr = line.strip().split()
if arr[-1] != b"]":
rows.append(np.array(arr, dtype="float32")) # not last line
else:
rows.append(np.array(arr[:-1], dtype="float32")) # last line
mat = np.vstack(rows)
return mat
# Writing,
def write_vec(file_or_fd, v, key=b""):
"""write_vec(f, v, key='')
Write a binary kaldi vector to filename or stream. Supports 32bit and 64bit
floats.
Parameters
----------
file_or_fd: obj
filename of opened file descriptor for writing,
v: numpy.ndarray
the vector or matrix to be stored, for matrix it will be reshaped to vector in C order,
key : str, optional
used for writing ark-file, the utterance-id gets written before the
vector.
Example of writing single vector:
kaldi_io.write_vec(filename, vec)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,vec in dict.iteritems():
kaldi_io.write_vec(f, vec, key=key)
Raises
------
ValueError
Unsupported data-type of the input file.
"""
fd = open_or_fd(file_or_fd, mode="wb")
try:
if key != b"":
fd.write(key + b" ") # ark-files have keys (utterance-id),
fd.write(b"\0B") # we write binary!
# Data-type,
if v.dtype == "float32":
fd.write(b"FV ")
elif v.dtype == "float64":
fd.write(b"DV ")
else:
raise ValueError("MatrixDataTypeError")
# Dims,
dim = v.size
fd.write(b"\04")
fd.write(struct.pack("I", dim)) # vector length
# Data,
# m.tofile(fd, sep=b"") # binary
fd.write(v.tobytes())
finally:
if fd is not file_or_fd:
fd.close()
def write_mat(file_or_fd, m, key=b""):
"""write_mat(f, m, key='')
Write a binary kaldi matrix to filename or stream. Supports 32bit and 64bit
floats.
Parameters
----------
file_or_fd: obj
filename of opened file descriptor for writing,
m: numpy.ndarray
the matrix to be stored,
key : str, optional
used for writing ark-file, the utterance-id gets written before the
matrix.
Example of writing single matrix:
kaldi_io.write_mat(filename, mat)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,mat in dict.iteritems():
kaldi_io.write_mat(f, mat, key=key)
Raises
------
ValueError
Unsupported data-type of the input file.
"""
fd = open_or_fd(file_or_fd, mode="wb")
try:
if key != b"":
fd.write(key + b" ") # ark-files have keys (utterance-id),
fd.write(b"\0B") # we write binary!
# Data-type,
if m.dtype == "float32":
fd.write(b"FM ")
elif m.dtype == "float64":
fd.write(b"DM ")
else:
raise ValueError("MatrixDataTypeError")
# Dims,
fd.write(b"\04")
fd.write(struct.pack("I", m.shape[0])) # rows
fd.write(b"\04")
fd.write(struct.pack("I", m.shape[1])) # cols
# Data,
# m.tofile(fd, sep=b"") # binary
fd.write(m.tobytes())
finally:
if fd is not file_or_fd:
fd.close()
#################################################
# 'Posterior' kaldi type (posteriors, confusion network, nnet1 training targets, ...)
# Corresponds to: vector<vector<tuple<int,float> > >
# - outer vector: time axis
# - inner vector: records at the time
# - tuple: int = index, float = value
#
def read_cnet_ark(file_or_fd):
"""Alias of function 'read_post_ark()', 'cnet' = confusion network
Parameters
----------
file_or_fd : obj
An ark, gzipped ark, pipe or opened file descriptor.
"""
return read_post_ark(file_or_fd)
def read_post_ark(file_or_fd):
"""generator(key,vec<vec<int,float>>) = read_post_ark(file)
Returns generator of (key,posterior) tuples, read from ark file.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Iterate the ark
---------------
for key,post in kaldi_io.read_post_ark(file):
...
Read ark to a 'dictionary':
d = { key:post for key,post in kaldi_io.read_post_ark(file) }
Parameters
----------
file_or_fd : obj
An ark, gzipped ark, pipe or opened file descriptor.
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
post = read_post(fd)
yield key, post
key = read_key(fd)
finally:
if fd is not file_or_fd:
fd.close()
def read_post(file_or_fd):
"""[post] = read_post(file_or_fd)
Reads single kaldi 'Posterior' in binary format.
The 'Posterior' is C++ type 'vector<vector<tuple<int,float> > >',
the outer-vector is usually time axis, inner-vector are the records
at given time, and the tuple is composed of an 'index' (integer)
and a 'float-value'. The 'float-value' can represent a probability
or any other numeric value.
Returns vector of vectors of tuples.
Parameters
----------
file_or_fd : obj
An ark, gzipped ark, pipe or opened file descriptor.
"""
fd = open_or_fd(file_or_fd)
ans = []
binary = fd.read(2)
assert binary == b"\0B" # binary flag
assert fd.read(1) == b"\4" # int-size
outer_vec_size = struct.unpack("<i", fd.read(4))[0] # number of frames (or bins)
# Loop over 'outer-vector',
for i in range(outer_vec_size):
assert fd.read(1) == b"\4" # int-size
# number of records for frame (or bin)
inner_vec_size = struct.unpack("<i", fd.read(4))[0]
id = np.zeros(inner_vec_size, dtype=int) # buffer for integer id's
post = np.zeros(inner_vec_size, dtype=float) # buffer for posteriors
# Loop over 'inner-vector',
for j in range(inner_vec_size):
assert fd.read(1) == b"\4" # int-size
id[j] = struct.unpack("<i", fd.read(4))[0] # id
assert fd.read(1) == b"\4" # float-size
post[j] = struct.unpack("<f", fd.read(4))[0] # post
# Append the 'inner-vector' of tuples into the 'outer-vector'
ans.append(zip(id, post))
if fd is not file_or_fd:
fd.close()
return ans
#################################################
# Kaldi Confusion Network bin begin/end times,
# (kaldi stores CNs time info separately from the Posterior).
#
def read_cntime_ark(file_or_fd):
"""generator(key,vec<tuple<float,float>>) = read_cntime_ark(file_or_fd)
Returns generator of (key,cntime) tuples, read from ark file.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
Iterate the ark
---------------
for key,time in kaldi_io.read_cntime_ark(file):
...
Read ark to a 'dictionary':
d = { key:time for key,time in kaldi_io.read_post_ark(file) }
Parameters
----------
file_or_fd : obj
An ark, gzipped ark, pipe or opened file descriptor.
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
cntime = read_cntime(fd)
yield key, cntime
key = read_key(fd)
finally:
if fd is not file_or_fd:
fd.close()
def read_cntime(file_or_fd):
"""[cntime] = read_cntime(file_or_fd)
Reads single kaldi 'Confusion Network time info', in binary format:
C++ type: vector<tuple<float,float> >.
(begin/end times of bins at the confusion network).
Binary layout is '<num-bins> <beg1> <end1> <beg2> <end2> ...'
file_or_fd : file, gzipped file, pipe or opened file descriptor.
Returns vector of tuples.
Parameters
----------
file_or_fd : obj
An ark, gzipped ark, pipe or opened file descriptor.
"""
fd = open_or_fd(file_or_fd)
binary = fd.read(2)
assert binary == b"\0B" # assuming it's binary
assert fd.read(1) == b"\4" # int-size
# Get number of bins,
vec_size = struct.unpack("<i", fd.read(4))[0] # number of frames (or bins)
t_beg = np.zeros(vec_size, dtype=float)
t_end = np.zeros(vec_size, dtype=float)
# Loop over number of bins,
for i in range(vec_size):
assert fd.read(1) == b"\4" # float-size
t_beg[i] = struct.unpack("<f", fd.read(4))[0] # begin-time of bin
assert fd.read(1) == b"\4" # float-size
t_end[i] = struct.unpack("<f", fd.read(4))[0] # end-time of bin
# Return vector of tuples,
ans = zip(t_beg, t_end)
if fd is not file_or_fd:
fd.close()
return ans
def write_wav(fp, data, rate, bitdepth=16):
""" Write a wav file.
Parameters
----------
fp : obj
A file pointer.
data: numpy.ndarray
Data samples
rate: int
Sample rate.
bitdepth: int, optional
A bit depth of samples.
"""
num_chan = 1
num_samp = data.size
# logger.debug("... write_wav. num_samp: %s", num_samp)
assert bitdepth == 8 or bitdepth == 16 or bitdepth == 32
if bitdepth == 8:
bytes_per_samp = 1
elif bitdepth == 16:
bytes_per_samp = 2
elif bitdepth == 32:
bytes_per_samp = 4
subchunk2size = num_chan * num_samp * bytes_per_samp
chunk_size = 36 + subchunk2size
fp.write(b"RIFF")
fp.write(np.array(chunk_size, dtype="int32"))
fp.write(b"WAVE")
fp.write(b"fmt ")
fp.write(np.array(16, dtype="int32"))
# wFormatTag, 1 for PCM, 3 for IEEE_FLOAT
# Kaldi will only read PCM wav files
fp.write(np.array(1, dtype="int16"))
fp.write(np.array(num_chan, dtype="int16"))
# logger.debug("... write_wav",)
fp.write(np.array(rate, dtype="int32"))
fp.write(np.array(rate * num_chan * bytes_per_samp, dtype="int32"))
fp.write(np.array(num_chan * bytes_per_samp, dtype="int16"))
fp.write(np.array(8 * bytes_per_samp, dtype="int16"))
fp.write(b"data")
fp.write(np.array(subchunk2size, dtype="int32"))
# | |
5395, 5396, 5402, 5401)
model.createElement(3760, 5636, 5637, 5643, 5642, 5396, 5397, 5403, 5402)
model.createElement(3761, 5637, 5638, 5644, 5643, 5397, 5398, 5404, 5403)
model.createElement(3762, 5638, 3083, 3084, 5644, 5398, 3043, 3044, 5404)
model.createElement(3763, 2397, 5639, 5645, 2396, 2357, 5399, 5405, 2356)
model.createElement(3764, 5639, 5640, 5646, 5645, 5399, 5400, 5406, 5405)
model.createElement(3765, 5640, 5641, 5647, 5646, 5400, 5401, 5407, 5406)
model.createElement(3766, 5641, 5642, 5648, 5647, 5401, 5402, 5408, 5407)
model.createElement(3767, 5642, 5643, 5649, 5648, 5402, 5403, 5409, 5408)
model.createElement(3768, 5643, 5644, 5650, 5649, 5403, 5404, 5410, 5409)
model.createElement(3769, 5644, 3084, 3085, 5650, 5404, 3044, 3045, 5410)
model.createElement(3770, 2396, 5645, 5651, 2395, 2356, 5405, 5411, 2355)
model.createElement(3771, 5645, 5646, 5652, 5651, 5405, 5406, 5412, 5411)
model.createElement(3772, 5646, 5647, 5653, 5652, 5406, 5407, 5413, 5412)
model.createElement(3773, 5647, 5648, 5654, 5653, 5407, 5408, 5414, 5413)
model.createElement(3774, 5648, 5649, 5655, 5654, 5408, 5409, 5415, 5414)
model.createElement(3775, 5649, 5650, 5656, 5655, 5409, 5410, 5416, 5415)
model.createElement(3776, 5650, 3085, 3086, 5656, 5410, 3045, 3046, 5416)
model.createElement(3777, 2395, 5651, 5657, 2394, 2355, 5411, 5417, 2354)
model.createElement(3778, 5651, 5652, 5658, 5657, 5411, 5412, 5418, 5417)
model.createElement(3779, 5652, 5653, 5659, 5658, 5412, 5413, 5419, 5418)
model.createElement(3780, 5653, 5654, 5660, 5659, 5413, 5414, 5420, 5419)
model.createElement(3781, 5654, 5655, 5661, 5660, 5414, 5415, 5421, 5420)
model.createElement(3782, 5655, 5656, 5662, 5661, 5415, 5416, 5422, 5421)
model.createElement(3783, 5656, 3086, 3087, 5662, 5416, 3046, 3047, 5422)
model.createElement(3784, 2394, 5657, 5663, 2393, 2354, 5417, 5423, 2353)
model.createElement(3785, 5657, 5658, 5664, 5663, 5417, 5418, 5424, 5423)
model.createElement(3786, 5658, 5659, 5665, 5664, 5418, 5419, 5425, 5424)
model.createElement(3787, 5659, 5660, 5666, 5665, 5419, 5420, 5426, 5425)
model.createElement(3788, 5660, 5661, 5667, 5666, 5420, 5421, 5427, 5426)
model.createElement(3789, 5661, 5662, 5668, 5667, 5421, 5422, 5428, 5427)
model.createElement(3790, 5662, 3087, 3088, 5668, 5422, 3047, 3048, 5428)
model.createElement(3791, 2393, 5663, 5669, 2392, 2353, 5423, 5429, 2352)
model.createElement(3792, 5663, 5664, 5670, 5669, 5423, 5424, 5430, 5429)
model.createElement(3793, 5664, 5665, 5671, 5670, 5424, 5425, 5431, 5430)
model.createElement(3794, 5665, 5666, 5672, 5671, 5425, 5426, 5432, 5431)
model.createElement(3795, 5666, 5667, 5673, 5672, 5426, 5427, 5433, 5432)
model.createElement(3796, 5667, 5668, 5674, 5673, 5427, 5428, 5434, 5433)
model.createElement(3797, 5668, 3088, 3089, 5674, 5428, 3048, 3049, 5434)
model.createElement(3798, 2392, 5669, 5675, 2391, 2352, 5429, 5435, 2351)
model.createElement(3799, 5669, 5670, 5676, 5675, 5429, 5430, 5436, 5435)
model.createElement(3800, 5670, 5671, 5677, 5676, 5430, 5431, 5437, 5436)
model.createElement(3801, 5671, 5672, 5678, 5677, 5431, 5432, 5438, 5437)
model.createElement(3802, 5672, 5673, 5679, 5678, 5432, 5433, 5439, 5438)
model.createElement(3803, 5673, 5674, 5680, 5679, 5433, 5434, 5440, 5439)
model.createElement(3804, 5674, 3089, 3090, 5680, 5434, 3049, 3050, 5440)
model.createElement(3805, 2391, 5675, 5681, 2390, 2351, 5435, 5441, 2350)
model.createElement(3806, 5675, 5676, 5682, 5681, 5435, 5436, 5442, 5441)
model.createElement(3807, 5676, 5677, 5683, 5682, 5436, 5437, 5443, 5442)
model.createElement(3808, 5677, 5678, 5684, 5683, 5437, 5438, 5444, 5443)
model.createElement(3809, 5678, 5679, 5685, 5684, 5438, 5439, 5445, 5444)
model.createElement(3810, 5679, 5680, 5686, 5685, 5439, 5440, 5446, 5445)
model.createElement(3811, 5680, 3090, 3091, 5686, 5440, 3050, 3051, 5446)
model.createElement(3812, 2390, 5681, 5687, 2389, 2350, 5441, 5447, 2349)
model.createElement(3813, 5681, 5682, 5688, 5687, 5441, 5442, 5448, 5447)
model.createElement(3814, 5682, 5683, 5689, 5688, 5442, 5443, 5449, 5448)
model.createElement(3815, 5683, 5684, 5690, 5689, 5443, 5444, 5450, 5449)
model.createElement(3816, 5684, 5685, 5691, 5690, 5444, 5445, 5451, 5450)
model.createElement(3817, 5685, 5686, 5692, 5691, 5445, 5446, 5452, 5451)
model.createElement(3818, 5686, 3091, 3092, 5692, 5446, 3051, 3052, 5452)
model.createElement(3819, 2389, 5687, 5693, 2388, 2349, 5447, 5453, 2348)
model.createElement(3820, 5687, 5688, 5694, 5693, 5447, 5448, 5454, 5453)
model.createElement(3821, 5688, 5689, 5695, 5694, 5448, 5449, 5455, 5454)
model.createElement(3822, 5689, 5690, 5696, 5695, 5449, 5450, 5456, 5455)
model.createElement(3823, 5690, 5691, 5697, 5696, 5450, 5451, 5457, 5456)
model.createElement(3824, 5691, 5692, 5698, 5697, 5451, 5452, 5458, 5457)
model.createElement(3825, 5692, 3092, 3093, 5698, 5452, 3052, 3053, 5458)
model.createElement(3826, 2388, 5693, 5699, 2387, 2348, 5453, 5459, 2347)
model.createElement(3827, 5693, 5694, 5700, 5699, 5453, 5454, 5460, 5459)
model.createElement(3828, 5694, 5695, 5701, 5700, 5454, 5455, 5461, 5460)
model.createElement(3829, 5695, 5696, 5702, 5701, 5455, 5456, 5462, 5461)
model.createElement(3830, 5696, 5697, 5703, 5702, 5456, 5457, 5463, 5462)
model.createElement(3831, 5697, 5698, 5704, 5703, 5457, 5458, 5464, 5463)
model.createElement(3832, 5698, 3093, 3094, 5704, 5458, 3053, 3054, 5464)
model.createElement(3833, 2387, 5699, 5705, 2386, 2347, 5459, 5465, 2346)
model.createElement(3834, 5699, 5700, 5706, 5705, 5459, 5460, 5466, 5465)
model.createElement(3835, 5700, 5701, 5707, 5706, 5460, 5461, 5467, 5466)
model.createElement(3836, 5701, 5702, 5708, 5707, 5461, 5462, 5468, 5467)
model.createElement(3837, 5702, 5703, 5709, 5708, 5462, 5463, 5469, 5468)
model.createElement(3838, 5703, 5704, 5710, 5709, 5463, 5464, 5470, 5469)
model.createElement(3839, 5704, 3094, 3095, 5710, 5464, 3054, 3055, 5470)
model.createElement(3840, 2386, 5705, 5711, 2385, 2346, 5465, 5471, 2345)
model.createElement(3841, 5705, 5706, 5712, 5711, 5465, 5466, 5472, 5471)
model.createElement(3842, 5706, 5707, 5713, 5712, 5466, 5467, 5473, 5472)
model.createElement(3843, 5707, 5708, 5714, 5713, 5467, 5468, 5474, 5473)
model.createElement(3844, 5708, 5709, 5715, 5714, 5468, 5469, 5475, 5474)
model.createElement(3845, 5709, 5710, 5716, 5715, 5469, 5470, 5476, 5475)
model.createElement(3846, 5710, 3095, 3096, 5716, 5470, 3055, 3056, 5476)
model.createElement(3847, 2385, 5711, 5717, 2384, 2345, 5471, 5477, 2344)
model.createElement(3848, 5711, 5712, 5718, 5717, 5471, 5472, 5478, 5477)
model.createElement(3849, 5712, 5713, 5719, 5718, 5472, 5473, 5479, 5478)
model.createElement(3850, 5713, 5714, 5720, 5719, 5473, 5474, 5480, 5479)
model.createElement(3851, 5714, 5715, 5721, 5720, 5474, 5475, 5481, 5480)
model.createElement(3852, 5715, 5716, 5722, 5721, 5475, 5476, 5482, 5481)
model.createElement(3853, 5716, 3096, 3097, 5722, 5476, 3056, 3057, 5482)
model.createElement(3854, 2384, 5717, 5723, 2383, 2344, 5477, 5483, 2343)
model.createElement(3855, 5717, 5718, 5724, 5723, 5477, 5478, 5484, 5483)
model.createElement(3856, 5718, 5719, 5725, 5724, 5478, 5479, 5485, 5484)
model.createElement(3857, 5719, 5720, 5726, 5725, 5479, 5480, 5486, 5485)
model.createElement(3858, 5720, 5721, 5727, 5726, 5480, 5481, 5487, 5486)
model.createElement(3859, 5721, 5722, 5728, 5727, 5481, 5482, 5488, 5487)
model.createElement(3860, 5722, 3097, 3098, 5728, 5482, 3057, 3058, 5488)
model.createElement(3861, 2383, 5723, 5729, 2382, 2343, 5483, 5489, 2342)
model.createElement(3862, 5723, 5724, 5730, 5729, 5483, 5484, 5490, 5489)
model.createElement(3863, 5724, 5725, 5731, 5730, 5484, 5485, 5491, 5490)
model.createElement(3864, 5725, 5726, 5732, 5731, 5485, 5486, 5492, 5491)
model.createElement(3865, 5726, 5727, 5733, 5732, 5486, 5487, 5493, 5492)
model.createElement(3866, 5727, 5728, 5734, 5733, 5487, 5488, 5494, 5493)
model.createElement(3867, 5728, 3098, 3099, 5734, 5488, 3058, 3059, 5494)
model.createElement(3868, 2382, 5729, 5735, 2381, 2342, 5489, 5495, 2341)
model.createElement(3869, 5729, 5730, 5736, 5735, 5489, 5490, 5496, 5495)
model.createElement(3870, 5730, 5731, 5737, 5736, 5490, 5491, 5497, 5496)
model.createElement(3871, 5731, 5732, 5738, 5737, 5491, 5492, 5498, 5497)
model.createElement(3872, 5732, 5733, 5739, 5738, 5492, 5493, 5499, 5498)
model.createElement(3873, 5733, 5734, 5740, 5739, 5493, 5494, 5500, 5499)
model.createElement(3874, 5734, 3099, 3100, 5740, 5494, 3059, 3060, 5500)
model.createElement(3875, 2381, 5735, 1118, 41, 2341, 5495, 1112, 40)
model.createElement(3876, 5735, 5736, 1119, 1118, 5495, 5496, 1113, 1112)
model.createElement(3877, 5736, 5737, 1120, 1119, 5496, 5497, 1114, 1113)
model.createElement(3878, 5737, 5738, 1121, 1120, 5497, 5498, 1115, 1114)
model.createElement(3879, 5738, 5739, 1122, 1121, 5498, 5499, 1116, 1115)
model.createElement(3880, 5739, 5740, 1123, 1122, 5499, 5500, 1117, 1116)
model.createElement(3881, 5740, 3100, 221, 1123, 5500, 3060, 222, 1117)
model.createElement(3882, 18, 582, 2661, 481, 521, 3125, 5501, 2420)
model.createElement(3883, 582, 581, 2662, 2661, 3125, 3126, 5502, 5501)
model.createElement(3884, 581, 580, 2663, 2662, 3126, 3127, 5503, 5502)
model.createElement(3885, 580, 579, 2664, 2663, 3127, 3128, 5504, 5503)
model.createElement(3886, 579, 578, 2665, 2664, 3128, 3129, 5505, 5504)
model.createElement(3887, 578, 577, 2666, 2665, 3129, 3130, 5506, 5505)
model.createElement(3888, 577, 20, 622, 2666, 3130, 627, 3061, 5506)
model.createElement(3889, 481, 2661, 2667, 482, 2420, 5501, 5507, 2419)
model.createElement(3890, 2661, 2662, 2668, 2667, 5501, 5502, 5508, 5507)
model.createElement(3891, 2662, 2663, 2669, 2668, 5502, 5503, 5509, 5508)
model.createElement(3892, 2663, 2664, 2670, 2669, 5503, 5504, 5510, 5509)
model.createElement(3893, 2664, 2665, 2671, 2670, 5504, 5505, 5511, 5510)
model.createElement(3894, 2665, 2666, 2672, 2671, 5505, 5506, 5512, 5511)
model.createElement(3895, 2666, 622, 621, 2672, 5506, 3061, 3062, 5512)
model.createElement(3896, 482, 2667, 2673, 483, 2419, 5507, 5513, 2418)
model.createElement(3897, 2667, 2668, 2674, 2673, 5507, 5508, 5514, 5513)
model.createElement(3898, 2668, 2669, 2675, 2674, 5508, 5509, 5515, 5514)
model.createElement(3899, 2669, 2670, 2676, 2675, 5509, 5510, 5516, 5515)
model.createElement(3900, 2670, 2671, 2677, 2676, 5510, 5511, 5517, 5516)
model.createElement(3901, 2671, 2672, 2678, 2677, 5511, 5512, 5518, 5517)
model.createElement(3902, 2672, 621, 620, 2678, 5512, 3062, 3063, 5518)
model.createElement(3903, 483, 2673, 2679, 484, 2418, 5513, 5519, 2417)
model.createElement(3904, 2673, 2674, 2680, 2679, 5513, 5514, 5520, 5519)
model.createElement(3905, 2674, 2675, 2681, 2680, 5514, 5515, 5521, 5520)
model.createElement(3906, 2675, 2676, 2682, 2681, 5515, 5516, 5522, 5521)
model.createElement(3907, 2676, 2677, 2683, 2682, 5516, 5517, 5523, 5522)
model.createElement(3908, 2677, 2678, 2684, 2683, 5517, 5518, 5524, 5523)
model.createElement(3909, 2678, 620, 619, 2684, 5518, 3063, 3064, 5524)
model.createElement(3910, 484, 2679, 2685, 485, 2417, 5519, 5525, 2416)
model.createElement(3911, 2679, 2680, 2686, 2685, 5519, 5520, 5526, 5525)
model.createElement(3912, 2680, 2681, 2687, 2686, 5520, 5521, 5527, 5526)
model.createElement(3913, 2681, 2682, 2688, 2687, 5521, 5522, 5528, 5527)
model.createElement(3914, 2682, 2683, 2689, 2688, 5522, 5523, 5529, 5528)
model.createElement(3915, 2683, 2684, 2690, 2689, 5523, 5524, 5530, 5529)
model.createElement(3916, 2684, 619, 618, 2690, 5524, 3064, 3065, 5530)
model.createElement(3917, 485, 2685, 2691, 486, 2416, 5525, 5531, 2415)
model.createElement(3918, 2685, 2686, 2692, 2691, 5525, 5526, 5532, 5531)
model.createElement(3919, 2686, 2687, 2693, 2692, | |
import numpy as np
import torch
import torch.nn as nn
from utils.nn import RoundStraightThrough
class IDF(nn.Module):
def __init__(self, nett, num_flows, D=2):
super(IDF, self).__init__()
print('IDF by JT.')
self.t = torch.nn.ModuleList([nett() for _ in range(num_flows)])
self.num_flows = num_flows
self.round = RoundStraightThrough.apply
self.p = nn.Parameter(torch.zeros(1, D))
self.mu = nn.Parameter(torch.ones(1, D) * 0.5)
def coupling(self, x, index, forward=True):
(xa, xb) = torch.chunk(x, 2, 1)
t = self.t[index](xa)
if forward:
yb = xb + self.round(t)
else:
yb = xb - self.round(t)
return torch.cat((xa, yb), 1)
def permute(self, x):
return x.flip(1)
def f(self, x):
z = x
for i in range(self.num_flows):
z = self.coupling(z, i, forward=True)
z = self.permute(z)
return z
def f_inv(self, z):
x = z
for i in reversed(range(self.num_flows)):
x = self.permute(x)
x = self.coupling(x, i, forward=False)
return x
def forward(self, x):
z = self.f(x)
return self.log_prior(z)
def sample(self, batchSize, D=2, intMax=100):
# sample z:
z = self.prior_sample(batchSize=batchSize, D=D, intMax=intMax)
# x = f^-1(z)
x = self.f_inv(z)
return x.view(batchSize, 1, D)
def log_integer_probability(self, x, p, mu):
# Chakraborty & Chakravarty, "A new discrete probability distribution with integer support on (−∞, ∞)",
# Communications in Statistics - Theory and Methods, 45:2, 492-505, DOI: 10.1080/03610926.2013.830743
log_p = torch.log(1. - p) + (x - mu) * torch.log(p) \
- torch.log(1. + torch.exp((x - mu) * torch.log(p))) \
- torch.log(1. + torch.exp((x - mu + 1.) * torch.log(p)))
return log_p
def log_prior(self, x):
p = torch.sigmoid(self.p)
log_p = self.log_integer_probability(x, p, self.mu)
return log_p.sum(1)
def prior_sample(self, batchSize, D=2, intMax=100):
ints = np.expand_dims(np.arange(-intMax, intMax + 1), 0)
for d in range(D):
p = torch.sigmoid(self.p[:, [d]])
mu = self.mu[:, d]
log_p = self.log_integer_probability(torch.from_numpy(ints), p, mu)
if d == 0:
z = torch.from_numpy(np.random.choice(ints[0], (batchSize, 1),
p=torch.exp(log_p[0]).detach().numpy()).astype(np.float32))
else:
z_new = torch.from_numpy(np.random.choice(ints[0], (batchSize, 1),
p=torch.exp(log_p[0]).detach().numpy()).astype(np.float32))
z = torch.cat((z, z_new), 1)
return z
class IDF2(nn.Module):
def __init__(self, nett_a, nett_b, num_flows, D=2):
super(IDF2, self).__init__()
print('IDF by JT.')
self.t_a = torch.nn.ModuleList([nett_a() for _ in range(num_flows)])
self.t_b = torch.nn.ModuleList([nett_b() for _ in range(num_flows)])
self.num_flows = num_flows
self.round = RoundStraightThrough.apply
self.p = nn.Parameter(torch.zeros(1, D))
self.mu = nn.Parameter(torch.ones(1, D) * 0.5)
def coupling(self, x, index, forward=True):
(xa, xb) = torch.chunk(x, 2, 1)
if forward:
ya = xa + self.round(self.t_a[index](xb))
yb = xb + self.round(self.t_b[index](ya))
else:
yb = xb - self.round(self.t_b[index](xa))
ya = xa - self.round(self.t_a[index](yb))
return torch.cat((ya, yb), 1)
def permute(self, x):
return x.flip(1)
def f(self, x):
z = x
for i in range(self.num_flows):
z = self.coupling(z, i, forward=True)
z = self.permute(z)
return z
def f_inv(self, z):
x = z
for i in reversed(range(self.num_flows)):
x = self.permute(x)
x = self.coupling(x, i, forward=False)
return x
def forward(self, x):
z = self.f(x)
return self.log_prior(z)
def sample(self, batchSize, D=2, intMax=100):
# sample z:
z = self.prior_sample(batchSize=batchSize, D=D, intMax=intMax)
# x = f^-1(z)
x = self.f_inv(z)
return x.view(batchSize, 1, D)
def log_integer_probability(self, x, p, mu):
# Chakraborty & Chakravarty, "A new discrete probability distribution with integer support on (−∞, ∞)",
# Communications in Statistics - Theory and Methods, 45:2, 492-505, DOI: 10.1080/03610926.2013.830743
log_p = torch.log(1. - p) + (x - mu) * torch.log(p) \
- torch.log(1. + torch.exp((x - mu) * torch.log(p))) \
- torch.log(1. + torch.exp((x - mu + 1.) * torch.log(p)))
return log_p
def log_prior(self, x):
p = torch.sigmoid(self.p)
log_p = self.log_integer_probability(x, p, self.mu)
return log_p.sum()
def prior_sample(self, batchSize, D=2, intMax=100):
ints = np.expand_dims(np.arange(-intMax, intMax + 1), 0)
for d in range(D):
p = torch.sigmoid(self.p[:, [d]])
mu = self.mu[:, d]
log_p = self.log_integer_probability(torch.from_numpy(ints), p, mu)
if d == 0:
z = torch.from_numpy(np.random.choice(ints[0], (batchSize, 1),
p=torch.exp(log_p[0]).detach().numpy()).astype(np.float32))
else:
z_new = torch.from_numpy(np.random.choice(ints[0], (batchSize, 1),
p=torch.exp(log_p[0]).detach().numpy()).astype(np.float32))
z = torch.cat((z, z_new), 1)
return z
class IDF4(nn.Module):
def __init__(self, nett_a, nett_b, nett_c, nett_d, num_flows, D=2):
super(IDF4, self).__init__()
print('IDF by JT.')
self.t_a = torch.nn.ModuleList([nett_a() for _ in range(num_flows)])
self.t_b = torch.nn.ModuleList([nett_b() for _ in range(num_flows)])
self.t_c = torch.nn.ModuleList([nett_c() for _ in range(num_flows)])
self.t_d = torch.nn.ModuleList([nett_d() for _ in range(num_flows)])
self.num_flows = num_flows
self.round = RoundStraightThrough.apply
self.p = nn.Parameter(torch.zeros(1, D))
self.mu = nn.Parameter(torch.ones(1, D) * 0.5)
def coupling(self, x, index, forward=True):
(xa, xb, xc, xd) = torch.chunk(x, 4, 1)
if forward:
ya = xa + self.round(self.t_a[index](torch.cat((xb, xc, xd), 1)))
yb = xb + self.round(self.t_b[index](torch.cat((ya, xc, xd), 1)))
yc = xc + self.round(self.t_c[index](torch.cat((ya, yb, xd), 1)))
yd = xd + self.round(self.t_d[index](torch.cat((ya, yb, yc), 1)))
else:
yd = xd - self.round(self.t_d[index](torch.cat((xa, xb, xc), 1)))
yc = xc - self.round(self.t_c[index](torch.cat((xa, xb, yd), 1)))
yb = xb - self.round(self.t_b[index](torch.cat((xa, yc, yd), 1)))
ya = xa - self.round(self.t_a[index](torch.cat((yb, yc, yd), 1)))
return torch.cat((ya, yb, yc, yd), 1)
def permute(self, x):
return x.flip(1)
def f(self, x):
z = x
for i in range(self.num_flows):
z = self.coupling(z, i, forward=True)
z = self.permute(z)
return z
def f_inv(self, z):
x = z
for i in reversed(range(self.num_flows)):
x = self.permute(x)
x = self.coupling(x, i, forward=False)
return x
def forward(self, x):
z = self.f(x)
return self.log_prior(z)
def sample(self, batchSize, D=2, intMax=100):
# sample z:
z = self.prior_sample(batchSize=batchSize, D=D, intMax=intMax)
# x = f^-1(z)
x = self.f_inv(z)
return x.view(batchSize, 1, D)
def log_integer_probability(self, x, p, mu):
# Chakraborty & Chakravarty, "A new discrete probability distribution with integer support on (−∞, ∞)",
# Communications in Statistics - Theory and Methods, 45:2, 492-505, DOI: 10.1080/03610926.2013.830743
log_p = torch.log(1. - p) + (x - mu) * torch.log(p) \
- torch.log(1. + torch.exp((x - mu) * torch.log(p))) \
- torch.log(1. + torch.exp((x - mu + 1.) * torch.log(p)))
return log_p
def log_prior(self, x):
p = torch.sigmoid(self.p)
log_p = self.log_integer_probability(x, p, self.mu)
return log_p.sum()
def prior_sample(self, batchSize, D=2, intMax=100):
ints = np.expand_dims(np.arange(-intMax, intMax + 1), 0)
for d in range(D):
p = torch.sigmoid(self.p[:, [d]])
mu = self.mu[:, d]
log_p = self.log_integer_probability(torch.from_numpy(ints), p, mu)
if d == 0:
z = torch.from_numpy(np.random.choice(ints[0], (batchSize, 1),
p=torch.exp(log_p[0]).detach().numpy()).astype(np.float32))
else:
z_new = torch.from_numpy(np.random.choice(ints[0], (batchSize, 1),
p=torch.exp(log_p[0]).detach().numpy()).astype(np.float32))
z = torch.cat((z, z_new), 1)
return z
class IDF8(nn.Module):
def __init__(self, nett_a, nett_b, nett_c, nett_d, nett_e, nett_f, nett_g, nett_h, num_flows, D=2):
super(IDF8, self).__init__()
print('IDF by JT.')
self.t_a = torch.nn.ModuleList([nett_a() for _ in range(num_flows)])
self.t_b = torch.nn.ModuleList([nett_b() for _ in range(num_flows)])
self.t_c = torch.nn.ModuleList([nett_c() for _ in range(num_flows)])
self.t_d = torch.nn.ModuleList([nett_d() for _ in range(num_flows)])
self.t_e = torch.nn.ModuleList([nett_e() for _ in range(num_flows)])
self.t_f = torch.nn.ModuleList([nett_f() for _ in range(num_flows)])
self.t_g = torch.nn.ModuleList([nett_g() for _ in range(num_flows)])
self.t_h = torch.nn.ModuleList([nett_h() for _ in range(num_flows)])
self.num_flows = num_flows
self.round = RoundStraightThrough.apply
self.p = nn.Parameter(torch.zeros(1, D))
self.mu = nn.Parameter(torch.ones(1, D) * 0.5)
def coupling(self, x, index, forward=True):
(xa, xb, xc, xd, xe, xf, xg, xh) = torch.chunk(x, 8, 1)
if forward:
ya = xa + self.round(self.t_a[index](torch.cat((xb, xc, xd, xe, xf, xg, xh), 1)))
yb = xb + self.round(self.t_b[index](torch.cat((ya, xc, xd, xe, xf, xg, xh), 1)))
yc = xc + self.round(self.t_c[index](torch.cat((ya, yb, xd, xe, xf, xg, xh), 1)))
yd = xd + self.round(self.t_d[index](torch.cat((ya, yb, yc, xe, xf, xg, xh), 1)))
ye = xe + self.round(self.t_e[index](torch.cat((ya, yb, yc, yd, xf, xg, xh), 1)))
yf = xf + self.round(self.t_f[index](torch.cat((ya, yb, yc, yd, ye, xg, xh), 1)))
yg = xg + self.round(self.t_g[index](torch.cat((ya, yb, yc, yd, ye, yf, xh), 1)))
yh = xh + self.round(self.t_h[index](torch.cat((ya, yb, yc, yd, ye, yf, yg), 1)))
else:
yh = xh - self.round(self.t_h[index](torch.cat((xa, xb, xc, xd, xe, xf, xg), 1)))
yg = xg - self.round(self.t_g[index](torch.cat((xa, xb, xc, xd, xe, xf, yh), 1)))
yf = xf - self.round(self.t_f[index](torch.cat((xa, xb, xc, xd, xe, yg, yh), 1)))
ye = xe - self.round(self.t_e[index](torch.cat((xa, xb, xc, xd, yf, yg, yh), 1)))
yd = xd - self.round(self.t_d[index](torch.cat((xa, xb, xc, ye, yf, yg, yh), 1)))
yc = xc - self.round(self.t_c[index](torch.cat((xa, xb, yd, ye, yf, yg, yh), 1)))
yb = xb - self.round(self.t_b[index](torch.cat((xa, yc, yd, ye, yf, yg, yh), 1)))
ya = xa - self.round(self.t_a[index](torch.cat((yb, yc, yd, ye, yf, yg, yh), 1)))
return torch.cat((ya, yb, yc, yd, ye, yf, yg, yh), 1)
def permute(self, x):
return x.flip(1)
def f(self, x):
z = x
for i in range(self.num_flows):
z = self.coupling(z, i, forward=True)
z = self.permute(z)
return z
def f_inv(self, z):
x = z
for i in reversed(range(self.num_flows)):
x = self.permute(x)
x = self.coupling(x, i, forward=False)
return | |
: panose = r'{\*\panose %s}' % font.Panose
if font.Alternate : alternate = r'{\*\falt %s}' % font.Alternate.Name
self._write( r'{\f%s\f%s%s\fcharset%s%s %s%s;}',
offset,
font.Family,
pitch,
font.CharacterSet,
panose,
font.Name,
alternate )
self._font_map[ font ] = offset
offset += 1
self._write( "}\n" )
def _WriteStyleSheet( self ) :
self._write( r"{\stylesheet" )
# TO DO: character styles, does anybody actually use them?
offset_map = {}
for idx, style in enumerate( self._doc.StyleSheet.ParagraphStyles ) :
offset_map[ style ] = idx
# paragraph styles
self.paragraph_style_map = {}
for idx, style in enumerate( self._doc.StyleSheet.ParagraphStyles ) :
if idx == 0 :
default = style
else :
self._write( '\n' )
settings = Settings()
# paragraph properties
self._RendParagraphPropertySet( style.ParagraphPropertySet, settings )
self._RendFramePropertySet ( style.FramePropertySet, settings )
self._RendShadingPropertySet ( style.ShadingPropertySet, settings )
# text properties
self._RendTextPropertySet ( style.TextStyle.TextPropertySet, settings )
self._RendShadingPropertySet( style.TextStyle.ShadingPropertySet, settings )
# have to take
based_on = '\\sbasedon%s' % offset_map.get( style.BasedOn, 0 )
next = '\\snext%s' % offset_map.get( style.Next, 0 )
inln = '\\s%s%s' % ( idx, settings )
self._write( "{%s%s%s %s;}", inln, based_on, next, style.Name )
self.paragraph_style_map[ style ] = inln
# if now style is specified for the first paragraph to be written, this one
# will be used
self._CurrentStyle = self.paragraph_style_map[ default ]
self._write( "}\n" )
def _WriteSection( self, section, is_first, add_header ) :
def WriteHF( hf, rtfword ) :
#if not hf : return
# if we don't have anything in the header/footer then include
# a blank paragraph, this stops it from picking up the header/footer
# from the previous section
# if not hf : hf = [ Paragraph( '' ) ]
if not hf : hf = []
self._write( '{\\%s' % rtfword )
self._WriteElements( hf )
self._write( '}\n' )
settings = Settings()
if not is_first :
# we need to finish off the preceding section
# and reset all of our defaults back to standard
settings.append( 'sect' )
# reset to our defaults
settings.append( 'sectd' )
if add_header :
settings.append( SectionBreakTypeMap[ section.BreakType ] )
self._RendPageProperties( section, settings, in_section=True )
settings.append( section.HeaderY, 'headery%s' )
settings.append( section.FooterY, 'footery%s' )
# write all of these out now as we need to do a write elements in the
# next section
self._write( repr( settings ) )
# finally after all that has settled down we can do the
# headers and footers
if section.FirstHeader or section.FirstFooter :
# include the titlepg flag if the first page has a special format
self._write( r'\titlepg' )
WriteHF( section.FirstHeader, 'headerf' )
WriteHF( section.FirstFooter, 'footerf' )
WriteHF( section.Header, 'header' )
WriteHF( section.Footer, 'footer' )
# and at last the contents of the section that actually appear on the page
self._WriteElements( section )
def _WriteElements( self, elements ) :
new_line = ''
for element in elements :
self._write( new_line )
new_line = '\n'
clss = element.__class__
if clss == Paragraph :
self.WriteParagraphElement( element )
elif clss == Table :
self.WriteTableElement( element )
elif ininstance(element, StringType) :
self.WriteParagraphElement( Paragraph( element ) )
elif clss in [ RawCode, Image ] :
self.WriteRawCode( element )
#elif clss == List :
# self._HandleListElement( element )
elif self.WriteCustomElement :
self.WriteCustomElement( self, element )
else :
raise Exception( "Don't know how to handle elements of type %s" % clss )
def WriteParagraphElement( self, paragraph_elem, tag_prefix='', tag_suffix=r'\par', opening='{', closing='}' ) :
# the tag_prefix and the tag_suffix take care of paragraphs in tables. A
# paragraph in a table requires and extra tag at the front (intbl) and we
# don't want the ending tag everytime. We want it for all paragraphs but
# the last.
overrides = Settings()
self._RendParagraphPropertySet( paragraph_elem.Properties, overrides )
self._RendFramePropertySet ( paragraph_elem.Frame, overrides )
self._RendShadingPropertySet ( paragraph_elem.Shading, overrides )
# when writing the RTF the style is carried from the previous paragraph to the next,
# so if the currently written paragraph has a style then make it the current one,
# otherwise leave it as it was
self._CurrentStyle = self.paragraph_style_map.get( paragraph_elem.Style, self._CurrentStyle )
self._write( r'%s\pard\plain%s %s%s ' % ( opening, tag_prefix, self._CurrentStyle, overrides ) )
for element in paragraph_elem :
if isinstance( element, StringType ) :
self._write( element )
elif isinstance( element, RawCode ) :
self._write( element.Data )
elif isinstance( element, Text ) :
self.WriteTextElement( element )
elif isinstance( element, Inline ) :
self.WriteInlineElement( element )
elif element == TAB :
self._write( r'\tab ' )
elif element == LINE :
self._write( r'\line ' )
elif self.WriteCustomElement :
self.WriteCustomElement( self, element )
else :
raise Exception( 'Don\'t know how to handle %s' % element )
self._write( tag_suffix + closing )
def WriteRawCode( self, raw_elem ) :
self._write( raw_elem.Data )
def WriteTextElement( self, text_elem ) :
overrides = Settings()
self._RendTextPropertySet ( text_elem.Properties, overrides )
self._RendShadingPropertySet( text_elem.Shading, overrides, 'ch' )
# write the wrapper and then let the custom handler have a go
if overrides : self._write( '{%s ' % repr( overrides ) )
# if the data is just a string then we can now write it
if isinstance( text_elem.Data, StringType ) :
self._write( text_elem.Data or '' )
elif text_elem.Data == TAB :
self._write( r'\tab ' )
else :
self.WriteCustomElement( self, text_elem.Data )
if overrides : self._write( '}' )
def WriteInlineElement( self, inline_elem ) :
overrides = Settings()
self._RendTextPropertySet ( inline_elem.Properties, overrides )
self._RendShadingPropertySet( inline_elem.Shading, overrides, 'ch' )
# write the wrapper and then let the custom handler have a go
if overrides : self._write( '{%s ' % repr( overrides ) )
for element in inline_elem :
# if the data is just a string then we can now write it
if isinstance( element, StringType ) :
self._write( element )
elif isinstance( element, RawCode ) :
self._write( element.Data )
elif element == TAB :
self._write( r'\tab ' )
elif element == LINE :
self._write( r'\line ' )
else :
self.WriteCustomElement( self, element )
if overrides : self._write( '}' )
def WriteText( self, text ) :
self._write( text or '' )
def WriteTableElement( self, table_elem ) :
vmerge = [ False ] * table_elem.ColumnCount
for height, cells in table_elem.Rows :
# calculate the right hand edge of the cells taking into account the spans
offset = table_elem.LeftOffset or 0
cellx = []
cell_idx = 0
for cell in cells :
cellx.append( offset + sum( table_elem.ColumnWidths[ : cell_idx + cell.Span ] ) )
cell_idx += cell.Span
self._write( r'{\trowd' )
settings = Settings()
# the spec says that this value is mandatory and I think that 108 is the default value
# so I'll take care of it here
settings.append( table_elem.GapBetweenCells or 108, 'trgaph%s' )
settings.append( TableAlignmentMap[ table_elem.Alignment ] )
settings.append( height, 'trrh%s' )
settings.append( table_elem.LeftOffset, 'trleft%s' )
width = table_elem.LeftOffset or 0
for idx, cell in enumerate( cells ) :
self._RendFramePropertySet ( cell.Frame, settings, 'cl' )
# cells don't have margins so I don't know why I was doing this
# I think it might have an affect in some versions of some WPs.
#self._RendMarginsPropertySet( cell.Margins, settings, 'cl' )
# if we are starting to merge or if this one is the first in what is
# probably a series of merges then start the vertical merging
if cell.StartVerticalMerge or (cell.VerticalMerge and not vmerge[ idx ]) :
settings.append( 'clvmgf' )
vmerge[ idx ] = True
elif cell.VerticalMerge :
#..continuing a merge
settings.append( 'clvmrg' )
else :
#..no merging going on so make sure that it is off
vmerge[ idx ] = False
# for any cell in the next row that is covered by this span we
# need to run off the vertical merging as we don't want them
# merging up into this spanned cell
for vmerge_idx in range( idx + 1, idx + cell.Span - 1 ) :
vmerge[ vmerge_idx ] = False
settings.append( CellAlignmentMap[ cell.Alignment ] )
settings.append( CellFlowMap[ cell.Flow ] )
# this terminates the definition of a cell and represents the right most edge of the cell from the left margin
settings.append( cellx[ | |
<gh_stars>0
import matplotlib.pyplot as plt
import numpy as np
import heapq
import os
import copy
from itertools import cycle
from itertools import combinations
#%% Functions header
#------------------------------------
#----------- Functions --------------
#------------------------------------
#%% get_indexes_max_n_values
# Get indexes for n maximum values
def get_indexes_max_n_values(my_list, count):
max_values = heapq.nlargest(count, my_list)
max_values = list(set(max_values)) #Use unique values only
indexes = []
for i in range(0, len(max_values)):
for j in range(0, len(my_list)):
if max_values[i] == my_list[j]:
indexes.append(j)
return indexes
#%% limit_down
# Limit number down to minimum value
def limit_down(num, min_value):
return max(num, min_value)
#%% limit_up
# Limit number up to maximum value
def limit_up(num, max_value):
return min(num, max_value)
#%% limit_up_down
# Limit number to between minimum and maximum values
def limit_up_down(num, min_value, max_value):
return (min(max(num, min_value), max_value))
#%% get_input
# Input helper - returns default (typed) value upon no input
def get_input(my_text, default, my_type=None):
if default != '':
my_text = my_text + '[' + str(default) + ']: '
else:
my_text = my_text + '[?]: '
the_input = input(my_text)
if the_input == '' and default != None:
the_input = default
if my_type == 'int':
the_input = int(the_input)
elif my_type == 'float':
the_input = np.float64(the_input)
else:
the_input = str(the_input)
return the_input
#%% get_distribution_text
def get_distribution_text(value):
if value == 0:
return 'none'
elif value == 1:
return 'uniform'
elif value == 2:
return 'normal'
elif value == 3:
return 'poisson'
#%% moving_average
def moving_average(values, window_size):
window = min(int(window_size), len(values))
y_avg = np.convolve(values, np.ones(window)/window)
return y_avg[:len(values)]
#%% calc_geometric_mean
# Geometric mean (overflow resistant) [https://stackoverflow.com/posts/43099751/revisions]
def calc_geometric_mean(values):
try:
n = len(values)
except:
print('Error: "def calc_geometric_mean(values)": One dimensional list or array required.')
return 0.0
geometric_mean = 0.0
for j in range(0, n):
geometric_mean += np.log(values[j])
try:
geometric_mean = np.exp(geometric_mean/n)
except:
print('Error: "def calc_geometric_mean(values)": Division by zero, zero length array/list provided.')
return geometric_mean
#%% Classes header
#------------------------------------
#----------- Classes ----------------
#------------------------------------
#%% Class: ERROR
# Base class for other exceptions
class ERROR(Exception):
pass
#%% Class: COUNTER
class COUNTER:
def __init__(self, initial_value=0, increment=1):
self.new = np.int64(initial_value)
self.old = np.int64(initial_value)
self.initial_value = np.int64(initial_value)
self.increment = np.int64(increment)
def reset(self):
self.new = self.initial_value
self.old = self.initial_value
def incr(self):
self.old = self.new
self.new += self.increment
return self.old
def decr(self):
self.old = self.new
self.new -= self.increment
return self.old
def val(self):
return self.old
#%% Class: DIFFICULTY_LWMA_00
#Linear Weighted Moving Average - Basic
class DIFFICULTY_LWMA_00:
def __init__(self, difficulty_window):
self.difficulty_window = abs(difficulty_window)
print(' ----- Using difficulty algorithm: LWMA Basic -----')
def adjust_difficulty(self, difficulties, acc_difficulties, solve_times, target_time, current_time, previous_time_stamp):
#Not used: current_time, previous_time_stamp
n = self.difficulty_window if len(solve_times) > self.difficulty_window else len(solve_times)
avg_diff = np.mean(difficulties[len(difficulties) - n:])
_sum = 0
denom = 0
for i in range(len(solve_times) - n, len(solve_times)):
_sum += (solve_times[i] * (i + 1))
denom += (i + 1)
return np.float64(avg_diff * (target_time / (_sum / denom)))
#%% Class: DIFFICULTY_LWMA_01_20171206
#Linear Weighted Moving Average - Bitcoin & Zcash Clones - 2017-12-06
#(https://github.com/zawy12/difficulty-algorithms/issues/3#issue-279773112)
class DIFFICULTY_LWMA_01_20171206:
def __init__(self, difficulty_window):
self.difficulty_window = abs(difficulty_window)
print(' ----- Using difficulty algorithm: LWMA-1 version 2017-12-06 -----')
def adjust_difficulty(self, difficulties, acc_difficulties, solve_times, target_time, current_time, previous_time_stamp):
#Not used: current_time, previous_time_stamp
N = self.difficulty_window if len(solve_times) > self.difficulty_window else len(solve_times)
L = 0
T = target_time
for i in range(1, N + 1):
j = len(solve_times) - N + (i - 1) #only the last portion of the list must be indexed
L += i * min(6*T, solve_times[j])
if L < N*N*T/20:
L = N*N*T/20
if N > 1:
avg_D = limit_down((acc_difficulties[-1] - acc_difficulties[len(acc_difficulties) - N]) / N, \
acc_difficulties[0])
else:
avg_D = acc_difficulties[0]
#The original algo uses an if statement here that resolves to the same answer
next_D = (avg_D / (200 * L)) * (N * (N + 1) * T * 99)
return np.float64(next_D)
#%% Class: DIFFICULTY_LWMA_01_20181127
#Linear Weighted Moving Average - Bitcoin & Zcash Clones - 2018-11-27
#( https://github.com/zawy12/difficulty-algorithms/issues/3#issuecomment-442129791 )
#( https://github.com/tari-project/tari/blob/development/base_layer/core/src/proof_of_work/lwma_diff.rs )
class DIFFICULTY_LWMA_01_20181127:
def __init__(self, difficulty_window):
self.difficulty_window = abs(difficulty_window)
print(' ----- Using difficulty algorithm: LWMA-1 version 2018-11-27 -----')
def adjust_difficulty(self, difficulties, acc_difficulties, solve_times, target_time, current_time, previous_time_stamp):
#Not used: current_time, previous_time_stamp
n = self.difficulty_window if len(solve_times) > self.difficulty_window else len(solve_times)
k = np.float64(n * (n + 1) * target_time / 2)
weighted_times = np.float64(0)
j = np.float64(0)
for i in range(len(solve_times) - n, len(solve_times)):
solve_time = np.float64(min(6 * target_time, solve_times[i]))
j += 1
weighted_times += solve_time * j
if n > 1:
ave_difficulty = limit_down((acc_difficulties[-1] -
acc_difficulties[len(acc_difficulties) - n]) / n, acc_difficulties[0])
else:
ave_difficulty = acc_difficulties[0]
target = ave_difficulty * k / weighted_times
return np.float64(target)
#%% Class: DIFFICULTY_TSA_20181108
#TSA, Time Stamp Adjustment to Difficulty, Copyright (c) 2018 Zawy, MIT License.
#( https://github.com/zawy12/difficulty-algorithms/issues/36 )
class DIFFICULTY_TSA_20181108:
def __init__(self, difficulty_window):
self.difficulty_window = abs(difficulty_window)
self.k = np.float64(1E3)
self.M = np.float64(6.5) # M can from 3 (aggressive) to 5 (conservative) to 10 (slow)
self.lwma = DIFFICULTY_LWMA_01_20181127(self.difficulty_window)
print(' with')
print(' ----- Using difficulty algorithm: TSA version 2018-11-08 [(c) 2018 Zawy]-----')
def adjust_difficulty(self, difficulties, acc_difficulties, solve_times, target_time, current_time, previous_time_stamp):
TSA_D = self.lwma.adjust_difficulty(difficulties, acc_difficulties, solve_times, target_time, current_time,
previous_time_stamp)
TM = target_time*self.M
exk = self.k
current_solve_time_estimate = max(1, max(solve_times[-1], current_time - previous_time_stamp))
solve_time = np.float64(min(current_solve_time_estimate, 6*target_time))
for i in range(1, np.int64(np.ceil(solve_time / TM))):
exk = (exk * np.float64(2.718 * self.k)) / self.k
f = solve_time % np.ceil(TM)
exk = (exk * (self.k + (f * (self.k + (f * (self.k + (f * self.k) / (3 * TM))) / (2 * TM))) / (TM))) / self.k
TSA_D = np.float64(max(10.0, TSA_D * ((1000.0 * (self.k * solve_time)) /
(self.k * target_time + (solve_time - target_time) * exk)) / 1000.0))
j = np.float64(1000000000.0)
while j > 1:
if TSA_D > j * 100.0:
TSA_D = ((TSA_D + j / 2.0) / j) * j
break
else:
j /= 10.0
if self.M <= 1.0:
TSA_D = (TSA_D * 85.0) / 100.0
elif self.M <= 2.0:
TSA_D = (TSA_D * 95.0) / 100.0
elif self.M <= 3.0:
TSA_D = (TSA_D * 99.0) / 100.0
return np.float64(TSA_D)
#%% Class: RANDOM_FUNC
class RANDOM_FUNC:
def __init__(self, randomness, distribution, name, owner):
self.randomness = limit_up_down(randomness, 0, 0.9)
self.rand_down = (1 - self.randomness)
self.rand_up = (1 + self.randomness)
if str(distribution) == 'none':
self.distribution = 'none'
print(' ----- %s: Randomness %s => none\n' % (name, owner))
else:
if str(distribution) == 'poisson':
self.distribution = 'poisson'
print(' ----- %s: Randomness %s => poisson distribution\n' % (name, owner))
elif self.randomness > 0:
if str(distribution) == 'normal': #'uniform' or 'normal' or 'poisson'
self.distribution = 'normal'
print(' ----- %s: Randomness %s => normal distribution, at %s\n' % (name, owner, self.randomness * 100))
elif str(distribution) == 'uniform':
self.distribution = 'uniform'
print(' ----- %s: Randomness %s => uniform distribution, at +/- %s\n' % (name, owner, self.randomness * 100))
else:
self.distribution = 'none'
print(' ----- %s: Randomness %s => none\n' % (name, owner))
else:
self.distribution = 'none'
print(' ----- %s: Randomness %s => none\n' % (name, owner))
def get_value(self, value):
if self.distribution == 'normal':
value = np.random.normal(value, value*self.randomness, 1)
elif self.distribution == 'poisson':
value = np.random.poisson(value, 1)
elif self.distribution == 'uniform':
value = np.random.uniform(value*self.rand_down, value*self.rand_up)
return np.float64(value)
#%% Class: HASH_RATE
class HASH_RATE:
def __init__(self, initial_hash_rate, profile, randomness, dist, name):
# 'initial_hash_rate': initial hash rate assigned to algo, also the default hash rate
# 'profile' : [ [[start_block_mumber_1, end_block_number_1], [start_ratio_1, end_ratio_1]], \
# [[start_block_mumber_2, end_block_number_2], [start_ratio_2, end_ratio_2]] ]
# ratio is a factor of the initial hash rate
# block count starts after init phase ends
# 'randomness' : randomness factor with which the given hash rate can change
# 'dist : randomness distribution type
self.rand = RANDOM_FUNC(randomness, dist, name, 'hash_rate')
self.initial_hash_rate = abs(initial_hash_rate) #Only positive values
self.min_randomness = 1
self.profile = copy.deepcopy(profile)
self.values = [self.initial_hash_rate for i in range(0, self.profile[-1][0][1]+1)]
self.init = True
self.n = 0
#Hash rate from profile
for i in range(0, len(self.profile)):
for j in range(self.profile[i][0][0], self.profile[i][0][1]):
delta = (self.profile[i][1][1] - self.profile[i][1][0]) / (self.profile[i][0][1] - self.profile[i][0][0])
self.values[j] = self.initial_hash_rate * self.profile[i][1][0] + \
self.initial_hash_rate * (j - self.profile[i][0][0] + 1) * delta
self.values[-1] = self.initial_hash_rate * self.profile[-1][1][1]
def get_hash_rate(self, block_number, init):
#Transition from init to not init
if init == False and self.init == True:
self.init = False
self.n = block_number
#Init phase
if self.init == True:
return self.initial_hash_rate
#Run phase
else:
n = block_number - self.n
#Get hash rate + bounds check
if len(self.values) - 1 > n:
hash_rate = self.values[n]
else:
hash_rate = self.values[-1]
#Apply randomess
hash_rate | |
import atexit
import inspect
import os.path
import shutil
import tempfile
from dectate import Action
from webassets import Bundle, Environment
class Asset:
"""Represents a registered asset which points to one or more files or
child-assets.
"""
__slots__ = ("name", "assets", "filters")
def __init__(self, name, assets, filters):
self.name = name
self.assets = assets
self.filters = filters
def __eq__(self, other):
return (
self.name == self.name
and self.assets == self.assets
and self.filters == self.filters
)
@property
def is_pure(self):
"""Returns True if this asset is "pure".
Pure assets are assets which consist of a single file or a set of
files which share one common extension.
"""
if self.is_single_file:
return True
extensions = {a.split(".")[-1] for a in self.assets}
extensions |= {None for a in self.assets if "." not in a}
return len(extensions) == 1 and None not in extensions
@property
def is_single_file(self):
""" Returns True if this repesents a single file asset. """
return len(self.assets) == 1 and "." in self.assets[0]
@property
def path(self):
""" Returns the path to the single file asset if possible. """
assert self.is_single_file
return self.assets[0]
@property
def extension(self):
""" Returns the extension of this asset if it's a pure asset. """
if self.is_pure:
return self.assets[0].split(".")[-1]
class WebassetRegistry:
""" A registry managing webasset bundles registered through directives. """
def __init__(self):
#: A list of all paths which should be searched for files (in order)
self.paths = []
#: The default filters for extensions. Each extension has a webassets
#: filter string associated with it. (e.g. {'js': 'rjsmin'})
self.filters = {}
#: The extension the filter at self.filters[key] produces
self.filter_product = {}
#: :class:`Asset` objects keyed by their name
self.assets = {}
#: The output path for all bundles (a temporary directory by default)
self.output_path = temporary_directory = tempfile.mkdtemp()
atexit.register(shutil.rmtree, temporary_directory)
#: A cache of created bundles
self.cached_bundles = {}
#: The url passed to the webasset environment
self.url = "assets"
#: more.webasset only publishes js/css files - other file extensions
#: need to be compiled into either and mapped accordingly
self.mapping = {
"coffee": "js",
"dust": "js",
"jst": "js",
"jsx": "js",
"less": "css",
"sass": "css",
"scss": "css",
"ts": "js",
}
def register_path(self, path):
"""Registers the given path as a path to be searched for files.
The paths are prepended, so each new path has higher precedence than
all the already registered paths.
"""
assert os.path.isabs(path), "absolute paths only"
self.paths.insert(0, os.path.normpath(path))
def register_filter(self, name, filter, produces=None):
"""Registers a filter, overriding any existing filter of the same
name.
"""
self.filters[name] = filter
self.filter_product[name] = produces or name
def register_asset(self, name, assets, filters=None):
"""Registers a new asset."""
assert "." not in name, f"asset names may not contain dots ({name})"
# keep track of asset bundles
self.assets[name] = Asset(name=name, assets=assets, filters=filters)
# and have one additional asset for each file
for asset in assets:
basename = os.path.basename(asset)
# files are entries with an extension
if "." in basename:
path = os.path.normpath(self.find_file(asset))
self.assets[basename] = Asset(
name=basename, assets=(path,), filters=filters
)
else:
assert asset in self.assets, f"unknown asset {asset}"
def find_file(self, name):
""" Searches for the given file by name using the current paths. """
if os.path.isabs(name):
return name
searched = set()
for path in self.paths:
if path in searched:
continue
target = os.path.join(path, name)
if os.path.isfile(target):
return target
searched.add(path)
raise LookupError(f"Could not find {name} in paths")
def merge_filters(self, *filters):
"""Takes a list of filters and merges them.
The last filter has the highest precedence.
"""
result = {}
for filter in filters:
if filter:
result.update(filter)
return result
def get_bundles(self, name, filters=None):
""" Yields all the bundles for the given name (an asset). """
assert name in self.assets, f"unknown asset {name}"
assert self.output_path, "no webasset_output path set"
asset = self.assets[name]
overriding_filters = self.merge_filters(asset.filters, filters)
all_filters = self.merge_filters(self.filters, asset.filters, filters)
if asset.is_pure:
if asset.is_single_file:
files = (asset.path,)
else:
files = (a.path for a in (self.assets[a] for a in asset.assets))
extension = self.mapping.get(asset.extension, asset.extension)
assert extension in ("js", "css")
yield Bundle(
*files,
filters=self.get_asset_filters(asset, all_filters),
output=f"{name}.bundle.{extension}",
)
else:
for sub in (self.assets[a] for a in asset.assets):
yield from self.get_bundles(sub.name, overriding_filters)
def get_asset_filters(self, asset, filters):
""" Returns the filters used for the given asset. """
if not asset.is_pure:
return None
def append_filter(item):
str_classes = ("".__class__, b"".__class__, "".__class__)
if isinstance(item, str_classes):
bundle_filters.append(item)
else:
bundle_filters.extend(item)
bundle_filters = []
if filters.get(asset.extension) is not None:
append_filter(filters[asset.extension])
# include the filters for the resulting file to produce a chain
# of filters (for example React JSX -> Javascript -> Minified)
product = self.filter_product.get(asset.extension)
if product and product != asset.extension and product in filters:
append_filter(filters[product])
return bundle_filters
def get_environment(self):
""" Returns the webassets environment, registering all the bundles. """
debug = os.environ.get("MORE_WEBASSETS_DEBUG", "").lower().strip() in (
"true",
"1",
)
env = Environment(
directory=self.output_path,
load_path=self.paths,
url=self.url,
debug=debug,
)
for asset in self.assets:
bundles = tuple(self.get_bundles(asset))
js = tuple(b for b in bundles if b.output.endswith(".js"))
css = tuple(b for b in bundles if b.output.endswith(".css"))
if js:
js_bundle = (
len(js) == 1 and js[0] or Bundle(*js, output=f"{asset}.bundle.js")
)
else:
js_bundle = None
if css:
css_bundle = (
len(css) == 1
and css[0]
or Bundle(*css, output=f"{asset}.bundle.css")
)
else:
css_bundle = None
if js_bundle and css_bundle:
js_bundle.next_bundle = asset + "_1"
env.register(asset, js_bundle)
env.register(asset + "_1", css_bundle)
elif js_bundle:
env.register(asset, js_bundle)
else:
env.register(asset, css_bundle)
return env
class PathMixin:
def absolute_path(self, path):
if os.path.isabs(path):
return path
else:
return os.path.join(os.path.dirname(self.code_info.path), path)
class WebassetPath(Action, PathMixin):
"""Registers a path with more.webassets.
Registered paths are searched for assets registered::
@App.webasset_path()
def get_asset_path():
return 'assets/js' # relative to the directory of the code file
@App.webasset('jquery.js')
def get_jquery_asset():
yield 'jquery.js' # expected to be at assets/js/jquery.js
Registered paths can be accumulated, that is you can't override existing
paths, you can just add new paths which take precedence
(think ``PATH=/new/path:$PATH``).
Therefore paths registered first are searched last and paths registered
by a parent class are search after paths registered by the child class.
"""
config = {"webasset_registry": WebassetRegistry}
def identifier(self, webasset_registry):
return object()
def absolute_path(self, path):
if os.path.isabs(path):
return path
else:
return os.path.abspath(
os.path.join(os.path.dirname(self.code_info.path), path)
)
def perform(self, obj, webasset_registry):
path = self.absolute_path(obj())
assert os.path.isdir(path), f"'{path}' does not exist"
webasset_registry.register_path(self.absolute_path(obj()))
class WebassetOutput(Action, PathMixin):
"""Sets the output path for all bundles.
For example::
@App.webasset_output()
def get_output_path():
return 'assets/bundles'
"""
group_class = WebassetPath
def identifier(self, webasset_registry):
return self.__class__
def perform(self, obj, webasset_registry):
webasset_registry.output_path = self.absolute_path(obj())
class WebassetFilter(Action):
"""Registers a default filter for an extension.
Filters are strings interpreted by `webasset`::
@App.webasset_filter('js')
def get_js_filter():
return 'rjsmin'
@App.webasset_filter('scss', produces='css')
def get_scss_filter():
return 'pyscss'
For a list of available filters see
`<http://webassets.readthedocs.org/en/latest/builtin_filters.html>`_.
The ``produces`` argument indicates that a given filter produces a new
extension. This will be used to push the file resulting from the filter
into whatever filter is registered for the resulting extension. This can
be used to chain filters (i.e. Coffeescript -> Javascript -> Minified).
"""
group_class = WebassetPath
def __init__(self, name, produces=None):
self.name = name
self.produces = produces
def identifier(self, webasset_registry):
return self.name
def perform(self, obj, webasset_registry):
webasset_registry.register_filter(self.name, obj(), self.produces)
class WebassetMapping(Action):
"""Maps an extension to either css or js.
You usually don't have to use this, as more.webassets comes with default
values. If you do, please open an issue so your mapping may be added
to more.webassets.
Example::
@App.webasset_mapping('jsx')
def get_jsx_mapping():
return 'js'
@App.webasset_mapping('less')
def get_jsx_mapping():
return 'css'
"""
group_class = WebassetPath
def __init__(self, name):
self.name = name
def identifier(self, webasset_registry):
return self.name
def perform(self, obj, webasset_registry):
webasset_registry.mapping[self.name] = obj()
class WebassetUrl(Action):
"""Defines the url under which the bundles should be served.
Passed to the webasset environment, this is basically a url path prefix::
@App.webasset_url()
def get_webasset_url():
return 'my-assets'
Defaults to 'assets'.
"""
group_class = WebassetPath
def identifier(self, webasset_registry):
return self.__class__
def perform(self, obj, webasset_registry):
webasset_registry.url = obj()
class Webasset(Action):
"""Registers an asset which may then be included in the page.
For example::
@App.webasset('tooltip')
def get_tooltip_asset():
yield 'tooltip.js'
yield 'tooltip.css'
Assets may be included by using
:meth:`more.webassets.core.IncludeRequest.include`::
@App.view(model=Model)
def view_model(self, request):
request.include('tooltip')
Asset functions must be generators. They may include a mixed set of
assets. So | |
<filename>src/cosmicats/popgen.py
"""A collection of methods to generating astrophysical populations"""
import numpy as np
import pandas as pd
import astropy.coordinates as coord
from astropy.coordinates import SkyCoord
from astropy.table import Table
from astropy import units as u
from cosmic.evolve import Evolve
from cosmicats import utils
__all__ = ['get_star_3d_positions', 'get_sfh_stars', 'metallicity_dependent_binary_fraction',
'metallicity_dependent_single_fraction', 'match_metallicities', 'get_simulated_data_stats',
'get_formation_efficiency', 'get_simulated_matches', 'filter_sim_set',
'get_evolved_systesm', 'sample_stars', 'connect_simulations_to_stars']
def get_star_3d_positions(stars):
"""Uses astropy to get cartesian Galactocentric coordinates and
distances to stars of interest
Parameters
----------
stars : `DataFrame`
Dataset containing radii
Returns
-------
stars : `DataFrame`
Input dataset with added columns for cartesion Galactocentric
coordinates and distance
"""
phi = np.random.uniform(0, 2 * np.pi, len(stars))
theta = theta = np.pi - np.arccos(np.random.uniform(-1, 1, len(stars)))
stars['X'] = stars['R'] * np.cos(phi) * np.sin(theta)
stars['Y'] = stars['R'] * np.sin(phi) * np.sin(theta)
stars['Z'] = stars['R'] * np.cos(theta)
c = SkyCoord(x=np.array(stars.X) * u.kpc,
y=np.array(stars.Y) * u.kpc,
z=np.array(stars.Z) * u.kpc,
frame=coord.Galactocentric)
stars['dist'] = c.transform_to(coord.ICRS).distance.to(u.kpc)
return stars
def get_sfh_stars(sfh_model):
"""Generates a dataset of stars with positions, ages, and metallicities
according to your model of choice
Parameters
----------
sfh_model : `str`
model assumed for stellar ages and positions as a funciton of metallicity
current models include:
'Frankel19' : positions and ages from Frankel+2019
Returns
-------
star_sample : `DataFrame`
dataset of stars with positions, ages, and metallicities
according to specified model
"""
if sfh_model == 'Frankel19':
sfh_read = './2021-02-16_Breivik_mockmw.fits'
star_sample = Table.read(sfh_read).to_pandas()
star_sample = get_star_3d_positions(stars=star_sample)
star_sample['met_stars'] = utils.get_Z_from_FeH(star_sample['FeH'], Z_sun=0.014)
else:
raise ValueError("We only support sfh_model='Frankel19' at this time. Sorry bout it!")
return star_sample
def metallicity_dependent_binary_fraction(met):
"""Computes the binary, and single by way of only having
single stars and binary systems, fraction of a population
with Z=met following Moe+2019
Parameters
----------
met : `float`
metallicity of the population
Returns
-------
f_b : `float`
binary fraction
f_s : `float`
single fraction
"""
Fe_H = utils.get_FeH_from_Z(met, Z_sun=0.014)
if type(met) == float:
if Fe_H <= -1.0:
f_b = -0.0648 * Fe_H + 0.3356
else:
f_b = -0.1977 * Fe_H + 0.2025
else:
f_b = np.zeros(len(Fe_H))
ind_lo, = np.where(Fe_H <= -1.0)
ind_hi, = np.where(Fe_H > -1.0)
if len(ind_lo) > 0:
f_b[ind_lo] = -0.0648 * Fe_H[ind_lo] + 0.3356
if len(ind_hi) > 0:
f_b[ind_hi] = -0.1977 * Fe_H[ind_hi] + 0.2025
f_s = 1 - f_b
return f_b
def metallicity_dependent_single_fraction(met):
"""Computes the binary, and single by way of only having
single stars and binary systems, fraction of a population
with Z=met following Moe+2019
Parameters
----------
met : `float`
metallicity of the population
Returns
-------
f_b : `float`
binary fraction
f_s : `float`
single fraction
"""
Fe_H = utils.get_FeH_from_Z(met, Z_sun=0.014)
if type(met) == float:
if Fe_H <= -1.0:
f_b = -0.0648 * Fe_H + 0.3356
else:
f_b = -0.1977 * Fe_H + 0.2025
else:
f_b = np.zeros(len(Fe_H))
ind_lo, = np.where(Fe_H <= -1.0)
ind_hi, = np.where(Fe_H > -1.0)
if len(ind_lo) > 0:
f_b[ind_lo] = -0.0648 * Fe_H[ind_lo] + 0.3356
if len(ind_hi) > 0:
f_b[ind_hi] = -0.1977 * Fe_H[ind_hi] + 0.2025
f_s = 1 - f_b
return f_s
def match_metallicities(met_list, met_stars):
"""Matches the metallicities of Neige's star samples to
the metallicity bins of Katie's simulated binary populations
such that every stellar metallicity is assigned one-to-one
to the closest metallicity bin
Parameters
----------
met_list : list
List of metallicity bins for simulated binary population
met_stars : array
Array of metallicities from stars sampled from Frankel disk model
Returns
-------
inds : array
Array giving the index of met_list for each of met_stars to make a
one-to-one match between the two.
"""
diff = []
for met in met_list:
diff.append(np.abs(np.array(met_stars) - met))
diff = np.vstack(diff)
inds = np.argmin(diff, axis=0)
return inds
def get_simulated_data_stats(path, metallicity, var):
"""Gathers the number of simulated systems and the total simulated ZAMS
mass, including companions if appropriate, to compute the formation number
of a given stellar system type per unit mass
Parameters
----------
path : `str`
path to where the data is stored
metallicity : `float`
metallicity of simulated systems along metallicity grid
var : `str`
specifies the secondary mass variation
Returns
-------
xi : `float`
formation number per unit ZAMS mass of the stellar system
"""
if 'single' in path:
filename = 'singles.h5'
bcm = pd.read_hdf(path + str(metallicity) + '/' + filename, key='bcm')
n_sys = np.max(pd.read_hdf(path + str(metallicity) + '/' + filename, key='n_sim'))[0]
elif 'binary' in path:
filename = 'binaries_{}.h5'.format(var)
bcm = pd.read_hdf(path + str(metallicity) + '/' + filename, key='bcm')
n_sys = np.max(pd.read_hdf(path + str(metallicity) + '/' + filename, key='n_sim'))[0]
elif 'BH' in path:
### NOTE ### : BH_LCs are assumed to have a binary fraction of 70%
############ and the n_stars contains all of the stars, from single
############ stars and binary systems, to produce the BH-LC population
filename = 'dat_kstar1_14_kstar2_0_9_SFstart_13700.0_SFduration_0.0_metallicity_' + str(metallicity) + '.h5'
bpp = pd.read_hdf(path + '/' + var + '/' + filename, key='bcm')
AIC_bin_nums = bpp.loc[bpp.kstar_1 == 13].bin_num.unique()
bcm = pd.read_hdf(path + '/' + var + '/' + filename, key='bcm')
# bcm = bcm.loc[~bcm.bin_num.isin(AIC_bin_nums)]
n_sys = np.max(pd.read_hdf(path + '/' + var + '/' + filename, key='n_stars'))[0]
# xi is the number of unique systems divided by the total
# amount of stars simulated stars
xi = len(bcm.bin_num.unique()) / n_sys
return xi
def get_formation_efficiency(mets, path, var, sys_type, f_b=None):
"""Reads in saved data containing relative formation number
per number of samples form initial conditions for each metallicity
it met grid and system type: sys_type
if not available, reads in cosmic data to get this statistic and
saves that info for future use to cut on computation time
Parameters
----------
mets : `list of lists`
list of metallicities in simulation grid for passed system type
path : `list`
list containing path to simulated data for passed system type
var : `str`
specifies the model parameter variation
sys_type : `int`
singles = 0; binaries = 1; bh binaries = 2;
f_b : `float/list of lists`
binary fraction to weight single stars against binary stars
Returns
-------
xi : `ndarray`
relative formation number per total population size for each
metallicity in simulation grid for each system type
"""
# get relative formation number per unit solar mass for each metallicity
xi = []
if sys_type == 0:
if f_b == None:
weights = metallicity_dependent_single_fraction(mets)
else:
f_s = 1 - f_b
weights = len(mets) * [f_s]
if sys_type == 1:
if f_b == None:
weights = metallicity_dependent_binary_fraction(mets)
else:
weights = len(mets) * [f_b]
if sys_type == 2:
# This is already taken care of in the simulation which
# assumes close massive binaries have a binary fraction of 0.7
weights = np.ones_like(mets)
for met, weight in zip(mets, weights):
xi.append(weight * get_simulated_data_stats(path=path, metallicity=met, var=var))
xi = np.array(xi, dtype=object)
return xi
def get_simulated_matches(path, met, sample_to_match, pop_var):
"""Selects initial conditions from cosmic data to match to star sample
Parameters
----------
path : `str`
path to cosmic data
met : `float`
metallicity of cosmic data file
sample_to_match : `DataFrame`
A dataframe containing a population of stars with
metallicities, ages, and positions
pop_var : `int or str`
Can be supplied for populations where sys_type is the same but the
population is varied in some way, like if qmin is different. If no
variants, pop_var = 0
Returns
-------
initC_dat_sample : `DataFrame`
cosmic initial conditions with assigned ages, positions, and metallicities
"""
# read in the simulated binary data that has metallicities which
# are matched to sub_sample_sys_met
sim_dat, initC_dat = utils.sim_data_read(path=path, metallicity=met, var=pop_var)
initC_dat['acc_lim'] = -1
initC_dat['don_lim'] = -1
# sample the same number of systems from sim_dat as sub_sample_sys_met
initC_dat_sample = initC_dat.sample(len(sample_to_match), replace=True)
initC_dat_sample = pd.concat([initC_dat_sample.reset_index(drop=True), sample_to_match.reset_index(drop=True)],
axis=1)
initC_dat_sample['assigned_age'] = np.array(sample_to_match['AGE'].values) * 1000
return initC_dat_sample
def filter_sim_set(sim_set, lifetime_interp):
"""Filter out systems based on star types and coarse single star lifetime
assumptions to reduce the datasize of stellar systems to evolve to the present
Parameters
----------
sim_set : `DataFrame`
Dataframe containing initial conditions of cosmic data with ages
lifetime_interp : `scipy.interpolate.interp1d`
interpolation for single star lifetime as a function of mass
for the population metallicity
Returns
-------
| |
<filename>tutorials/autotvm/tune_simple_template.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Writing tunable template and Using auto-tuner
=============================================
**Author**: `<NAME> <https://github.com/merrymercy>`_
This is an introduction tutorial to the auto-tuning module in TVM.
There are two steps in auto-tuning.
The first step is defining a search space.
The second step is running a search algorithm to explore through this space.
In this tutorial, you can learn how to perform these two steps in TVM.
The whole workflow is illustrated by a matrix multiplication example.
"""
######################################################################
# Install dependencies
# --------------------
# To use autotvm package in TVM, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user psutil xgboost
#
# To make TVM run faster in tuning, it is recommended to use cython
# as FFI of TVM. In the root directory of TVM, execute
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user cython
# sudo make cython3
#
# Now return to python code. Import packages.
import logging
import sys
import numpy as np
import tvm
# the module is called `autotvm`
from tvm import autotvm
######################################################################
# Step 1: Define the search space
# --------------------------------
# In this section, we will rewrite a deterministic TVM schedule code to a
# tunable schedule template. You can regard the process of search space definition
# as the parameterization of our existing schedule code.
#
# To begin with, here is how we implement a blocked matrix multiplication in TVM.
# Matmul V0: Constant tiling factor
def matmul_v0(N, L, M, dtype):
A = tvm.placeholder((N, L), name='A', dtype=dtype)
B = tvm.placeholder((L, M), name='B', dtype=dtype)
k = tvm.reduce_axis((0, L), name='k')
C = tvm.compute((N, M), lambda i, j: tvm.sum(A[i, k] * B[k, j], axis=k), name='C')
s = tvm.create_schedule(C.op)
# schedule
y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
yo, yi = s[C].split(y, 8)
xo, xi = s[C].split(x, 8)
s[C].reorder(yo, xo, k, yi, xi)
return s, [A, B, C]
#####################################################################
# Parametrize the schedule
# ^^^^^^^^^^^^^^^^^^^^^^^^
# In the previous schedule code, we use a constant "8" as tiling factor.
# However, it might not be the best one because the best tiling factor depends
# on real hardware environment and input shape.
#
# If you want the schedule code to be portable across a wider range of input shapes
# and target hardware, it is better to define a set of candidate values and
# pick the best one according to the measurement results on target hardware.
#
# In autotvm, we can define a tunable parameter, or a "knob" for such kind of value.
# Matmul V1: List candidate values
@autotvm.template # 1. use a decorator
def matmul_v1(N, L, M, dtype):
A = tvm.placeholder((N, L), name='A', dtype=dtype)
B = tvm.placeholder((L, M), name='B', dtype=dtype)
k = tvm.reduce_axis((0, L), name='k')
C = tvm.compute((N, M), lambda i, j: tvm.sum(A[i, k] * B[k, j], axis=k), name='C')
s = tvm.create_schedule(C.op)
# schedule
y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
# 2. get the config object
cfg = autotvm.get_config()
# 3. define search space
cfg.define_knob("tile_y", [1, 2, 4, 8, 16])
cfg.define_knob("tile_x", [1, 2, 4, 8, 16])
# 4. schedule according to config
yo, yi = s[C].split(y, cfg['tile_y'].val)
xo, xi = s[C].split(x, cfg['tile_x'].val)
s[C].reorder(yo, xo, k, yi, xi)
return s, [A, B, C]
###############################################################################
# Here we make four modifications to the previous schedule code and get
# a tunable "template". We can explain the modifications one by one.
#
# 1. Use a decorator to mark this function as a simple template
# 2. Get a config object:
# You can regard this :code:`cfg` as an argument of this function but
# we obtain it in a different way. With this argument, this function is no longer
# a deterministic schedule code. Instead, we can pass different configurations to
# this function and get different schedules, so this function is a "template".
#
# To make the template function more compact, we do two things in a single function.
# (1) define a search space and (2) schedule according to an entity in this space.
# To achieve this, we make :code:`cfg` be either
# a :any:`ConfigSpace` or a :any:`ConfigEntity` object.
#
# When it is a :any:`ConfigSpace`, it will collect all tunable knobs in this function and
# build the search space.
# When it is a :any:`ConfigEntity`, it will ignore all space definition API
# (namely, :code:`cfg.define_XXXXX(...)`). Instead, it stores deterministic values for
# all tunable knobs, and we schedule according to these values.
#
# During auto-tuning, we will first call this template with a :any:`ConfigSpace`
# object to build the search space. Then we call this template with different :any:`ConfigEntity`
# in the built space to get different schedules. Finally we will measure the code generated by
# different schedules and pick the best one.
#
# 3. Define two tunable knobs. The first one is :code:`tile_y` with
# 5 possible values. The second one is :code:`tile_x` with a same
# list of possible values. These two knobs are independent, so they
# span a search space with size = 5x5 = 25
# 4. Schedule according to the deterministic values in :code:`cfg`
#
#####################################################################
# Use better space definition API
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# In the previous template, we manually list all possible values for a knob.
# This is the lowest level API to define the space.
# However, we also provide another set of API to make the space definition
# easier and smarter. It is recommended to use this set of high level API.
#
# In the flowing example, we use :any:`ConfigSpace.define_split` to define a split
# knob. It will enumerate all the possible ways to split an axis and construct
# the space.
#
# We also have :any:`ConfigSpace.define_reorder` for reorder knob and
# :any:`ConfigSpace.define_annotate` for annotation like unroll, vectorization,
# thread binding.
# When the high level API cannot meet your requirement, you can always fall
# back to use low level API.
@autotvm.template
def matmul(N, L, M, dtype):
A = tvm.placeholder((N, L), name='A', dtype=dtype)
B = tvm.placeholder((L, M), name='B', dtype=dtype)
k = tvm.reduce_axis((0, L), name='k')
C = tvm.compute((N, M), lambda i, j: tvm.sum(A[i, k] * B[k, j], axis=k), name='C')
s = tvm.create_schedule(C.op)
# schedule
y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
##### define space begin #####
cfg = autotvm.get_config()
cfg.define_split("tile_y", y, num_outputs=2)
cfg.define_split("tile_x", x, num_outputs=2)
##### define space end #####
# schedule according to config
yo, yi = cfg["tile_y"].apply(s, C, y)
xo, xi = cfg["tile_x"].apply(s, C, x)
s[C].reorder(yo, xo, k, yi, xi)
return s, [A, B, C]
######################################################################
# .. note:: More Explanation on :code:`cfg.defile_split`
#
# In this template, :code:`cfg.define_split("tile_y", y, num_outputs=2)` will enumerate
# all possible combinations that can split axis y into two axes with factors of the length of y.
# For example, if the length of y is 32 and we want to split it into two axes
# using factors of 32, then there are 6 possible values for
# (length of outer axis, length of inner axis) pair, namely
# (32, 1), (16, 2), (8, 4), (4, 8), (2, 16) or (1, 32).
# They are just the 6 possible values of `tile_y`.
#
# During schedule, :code:`cfg["tile_y"]` is a :code:`SplitEntity` object.
# We stores the lengths of outer axes and inner axes in :code:`cfg['tile_y'].size`
# (a tuple with two elements).
# In this template, we apply it by using :code:`yo, yi = cfg['tile_y'].apply(s, C, y)`.
# Actually, this is equivalent to
# :code:`yo, yi = s[C].split(y, cfg["tile_y"].size[1])`
# or :code:`yo, yi = s[C].split(y, nparts=cfg['tile_y"].size[0])`
#
# The advantage of using cfg.apply API is that it makes multi-level split
# (when num_outputs >= 3) easier.
######################################################################
# Step 2: Search through the space
# ---------------------------------
# In step 1, we build the search space by extending our old schedule | |
b, e) for (t, b, e) in sglQuotedString.scanString(escapedQuoteTest)
]
print(sglStrings)
self.assertTrue(
len(sglStrings) == 1
and (sglStrings[0][1] == 17 and sglStrings[0][2] == 66),
"single quoted string escaped quote failure (%s)" % str(sglStrings[0]),
)
dblStrings = [
(t[0], b, e) for (t, b, e) in dblQuotedString.scanString(escapedQuoteTest)
]
print(dblStrings)
self.assertTrue(
len(dblStrings) == 1
and (dblStrings[0][1] == 83 and dblStrings[0][2] == 132),
"double quoted string escaped quote failure (%s)" % str(dblStrings[0]),
)
allStrings = [
(t[0], b, e) for (t, b, e) in quotedString.scanString(escapedQuoteTest)
]
print(allStrings)
self.assertTrue(
len(allStrings) == 2
and (
allStrings[0][1] == 17
and allStrings[0][2] == 66
and allStrings[1][1] == 83
and allStrings[1][2] == 132
),
"quoted string escaped quote failure (%s)"
% ([str(s[0]) for s in allStrings]),
)
dblQuoteTest = r"""
'This string has an doubled ('') quote character'
"This string has an doubled ("") quote character"
"""
sglStrings = [
(t[0], b, e) for (t, b, e) in sglQuotedString.scanString(dblQuoteTest)
]
print(sglStrings)
self.assertTrue(
len(sglStrings) == 1
and (sglStrings[0][1] == 17 and sglStrings[0][2] == 66),
"single quoted string escaped quote failure (%s)" % str(sglStrings[0]),
)
dblStrings = [
(t[0], b, e) for (t, b, e) in dblQuotedString.scanString(dblQuoteTest)
]
print(dblStrings)
self.assertTrue(
len(dblStrings) == 1
and (dblStrings[0][1] == 83 and dblStrings[0][2] == 132),
"double quoted string escaped quote failure (%s)" % str(dblStrings[0]),
)
allStrings = [
(t[0], b, e) for (t, b, e) in quotedString.scanString(dblQuoteTest)
]
print(allStrings)
self.assertTrue(
len(allStrings) == 2
and (
allStrings[0][1] == 17
and allStrings[0][2] == 66
and allStrings[1][1] == 83
and allStrings[1][2] == 132
),
"quoted string escaped quote failure (%s)"
% ([str(s[0]) for s in allStrings]),
)
print(
"testing catastrophic RE backtracking in implementation of dblQuotedString"
)
for expr, test_string in [
(dblQuotedString, '"' + "\\xff" * 500),
(sglQuotedString, "'" + "\\xff" * 500),
(quotedString, '"' + "\\xff" * 500),
(quotedString, "'" + "\\xff" * 500),
(QuotedString('"'), '"' + "\\xff" * 500),
(QuotedString("'"), "'" + "\\xff" * 500),
]:
expr.parseString(test_string + test_string[0])
try:
expr.parseString(test_string)
except Exception:
continue
# test invalid endQuoteChar
with self.assertRaises(
SyntaxError, msg="issue raising error for invalid endQuoteChar"
):
expr = pp.QuotedString('"', endQuoteChar=" ")
def testCaselessOneOf(self):
caseless1 = pp.oneOf("d a b c aA B A C", caseless=True)
caseless1str = str(caseless1)
print(caseless1str)
caseless2 = pp.oneOf("d a b c Aa B A C", caseless=True)
caseless2str = str(caseless2)
print(caseless2str)
self.assertEqual(
caseless1str.upper(),
caseless2str.upper(),
"oneOf not handling caseless option properly",
)
self.assertNotEqual(
caseless1str, caseless2str, "Caseless option properly sorted"
)
res = caseless1[...].parseString("AAaaAaaA")
print(res)
self.assertEqual(4, len(res), "caseless1 oneOf failed")
self.assertEqual(
"aA" * 4, "".join(res), "caseless1 CaselessLiteral return failed"
)
res = caseless2[...].parseString("AAaaAaaA")
print(res)
self.assertEqual(4, len(res), "caseless2 oneOf failed")
self.assertEqual(
"Aa" * 4, "".join(res), "caseless1 CaselessLiteral return failed"
)
def testCommentParser(self):
print("verify processing of C and HTML comments")
testdata = """
/* */
/** **/
/**/
/***/
/****/
/* /*/
/** /*/
/*** /*/
/*
ablsjdflj
*/
"""
foundLines = [
pp.lineno(s, testdata) for t, s, e in pp.cStyleComment.scanString(testdata)
]
self.assertEqual(
list(range(11))[2:],
foundLines,
"only found C comments on lines " + str(foundLines),
)
testdata = """
<!-- -->
<!--- --->
<!---->
<!----->
<!------>
<!-- /-->
<!--- /-->
<!---- /-->
<!---- /- ->
<!---- / -- >
<!--
ablsjdflj
-->
"""
foundLines = [
pp.lineno(s, testdata) for t, s, e in pp.htmlComment.scanString(testdata)
]
self.assertEqual(
list(range(11))[2:],
foundLines,
"only found HTML comments on lines " + str(foundLines),
)
# test C++ single line comments that have line terminated with '\' (should continue comment to following line)
testSource = r"""
// comment1
// comment2 \
still comment 2
// comment 3
"""
self.assertEqual(
41,
len(pp.cppStyleComment.searchString(testSource)[1][0]),
r"failed to match single-line comment with '\' at EOL",
)
def testParseExpressionResults(self):
from pyparsing import Word, alphas, OneOrMore, Optional, Group
a = Word("a", alphas).setName("A")
b = Word("b", alphas).setName("B")
c = Word("c", alphas).setName("C")
ab = (a + b).setName("AB")
abc = (ab + c).setName("ABC")
word = Word(alphas).setName("word")
words = Group(OneOrMore(~a + word)).setName("words")
phrase = (
words("Head") + Group(a + Optional(b + Optional(c)))("ABC") + words("Tail")
)
results = phrase.parseString("xavier yeti alpha beta charlie will beaver")
print(results, results.Head, results.ABC, results.Tail)
for key, ln in [("Head", 2), ("ABC", 3), ("Tail", 2)]:
self.assertEqual(
ln,
len(results[key]),
"expected %d elements in %s, found %s" % (ln, key, str(results[key])),
)
def testParseKeyword(self):
kw = pp.Keyword("if")
lit = pp.Literal("if")
def test(s, litShouldPass, kwShouldPass):
print("Test", s)
print("Match Literal", end=" ")
try:
print(lit.parseString(s))
except Exception:
print("failed")
if litShouldPass:
self.fail("Literal failed to match %s, should have" % s)
else:
if not litShouldPass:
self.fail("Literal matched %s, should not have" % s)
print("Match Keyword", end=" ")
try:
print(kw.parseString(s))
except Exception:
print("failed")
if kwShouldPass:
self.fail("Keyword failed to match %s, should have" % s)
else:
if not kwShouldPass:
self.fail("Keyword matched %s, should not have" % s)
test("ifOnlyIfOnly", True, False)
test("if(OnlyIfOnly)", True, True)
test("if (OnlyIf Only)", True, True)
kw = pp.Keyword("if", caseless=True)
test("IFOnlyIfOnly", False, False)
test("If(OnlyIfOnly)", False, True)
test("iF (OnlyIf Only)", False, True)
with self.assertWarns(
SyntaxWarning, msg="failed to warn empty string passed to Keyword"
):
kw = pp.Keyword("")
def testParseExpressionResultsAccumulate(self):
from pyparsing import Word, delimitedList, Combine, alphas, nums
num = Word(nums).setName("num")("base10*")
hexnum = Combine("0x" + Word(nums)).setName("hexnum")("hex*")
name = Word(alphas).setName("word")("word*")
list_of_num = delimitedList(hexnum | num | name, ",")
tokens = list_of_num.parseString("1, 0x2, 3, 0x4, aaa")
print(tokens.dump())
self.assertParseResultsEquals(
tokens,
expected_list=["1", "0x2", "3", "0x4", "aaa"],
expected_dict={
"base10": ["1", "3"],
"hex": ["0x2", "0x4"],
"word": ["aaa"],
},
)
from pyparsing import (
Literal,
Word,
nums,
Group,
Dict,
alphas,
quotedString,
oneOf,
delimitedList,
removeQuotes,
alphanums,
)
lbrack = Literal("(").suppress()
rbrack = Literal(")").suppress()
integer = Word(nums).setName("int")
variable = Word(alphas, max=1).setName("variable")
relation_body_item = (
variable | integer | quotedString.copy().setParseAction(removeQuotes)
)
relation_name = Word(alphas + "_", alphanums + "_")
relation_body = lbrack + Group(delimitedList(relation_body_item)) + rbrack
Goal = Dict(Group(relation_name + relation_body))
Comparison_Predicate = Group(variable + oneOf("< >") + integer)("pred*")
Query = Goal("head") + ":-" + delimitedList(Goal | Comparison_Predicate)
test = """Q(x,y,z):-Bloo(x,"Mitsis",y),Foo(y,z,1243),y>28,x<12,x>3"""
queryRes = Query.parseString(test)
print(queryRes.dump())
self.assertParseResultsEquals(
queryRes.pred,
expected_list=[["y", ">", "28"], ["x", "<", "12"], ["x", ">", "3"]],
msg="Incorrect list for attribute pred, %s" % str(queryRes.pred.asList()),
)
def testReStringRange(self):
testCases = (
(r"[A-Z]"),
(r"[A-A]"),
(r"[A-Za-z]"),
(r"[A-z]"),
(r"[\ -\~]"),
(r"[\0x20-0]"),
(r"[\0x21-\0x7E]"),
(r"[\0xa1-\0xfe]"),
(r"[\040-0]"),
(r"[A-Za-z0-9]"),
(r"[A-Za-z0-9_]"),
(r"[A-Za-z0-9_$]"),
(r"[A-Za-z0-9_$\-]"),
(r"[^0-9\\]"),
(r"[a-zA-Z]"),
(r"[/\^~]"),
(r"[=\+\-!]"),
(r"[A-]"),
(r"[-A]"),
(r"[\x21]"),
(r"[а-яА-ЯёЁA-Z$_\041α-ω]"),
)
expectedResults = (
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"A",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
"ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz",
" !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~",
" !\"#$%&'()*+,-./0",
"!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~",
"¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖרÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþ",
" !\"#$%&'()*+,-./0",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_$",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_$-",
"0123456789\\",
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ",
"/^~",
"=+-!",
"A-",
"-A",
"!",
"абвгдежзийклмнопрстуфхцчшщъыьэюяАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯёЁABCDEFGHIJKLMNOPQRSTUVWXYZ$_!αβγδεζηθικλμνξοπρςστυφχψω",
)
for test in zip(testCases, expectedResults):
t, exp = test
res = pp.srange(t)
# print(t, "->", res)
self.assertEqual(
exp,
res,
"srange error, srange({!r})->'{!r}', expected '{!r}'".format(
t, res, exp
),
)
def testSkipToParserTests(self):
from pyparsing import Literal, SkipTo, cStyleComment
thingToFind = Literal("working")
testExpr = (
SkipTo(Literal(";"), include=True, ignore=cStyleComment) + thingToFind
)
def test_parse(someText):
print(testExpr.parseString(someText))
# This first test works, as the SkipTo expression is immediately following the ignore expression (cStyleComment)
test_parse("some text /* comment with ; in */; working")
# This second test previously failed, as there is text following the ignore expression, and before the SkipTo expression.
test_parse("some text /* comment with ; in */some other stuff; working")
# tests for optional failOn argument
testExpr = (
SkipTo(Literal(";"), include=True, ignore=cStyleComment, failOn="other")
+ thingToFind
)
test_parse("some text /* comment with ; in */; working")
with self.assertRaisesParseException():
test_parse("some text /* comment with ; in */some other stuff; working")
# test that we correctly create named results
text = "prefixDATAsuffix"
data = Literal("DATA")
suffix = Literal("suffix")
expr = SkipTo(data + suffix)("prefix") + data + suffix
result = expr.parseString(text)
self.assertTrue(
isinstance(result.prefix, str),
"SkipTo created with wrong saveAsList attribute",
)
from pyparsing import Literal, And, Word, alphas, nums
alpha_word = (~Literal("end") + Word(alphas, asKeyword=True)).setName("alpha")
num_word = Word(nums, asKeyword=True).setName("int")
def test(expr, test_string, expected_list, expected_dict):
if (expected_list, expected_dict) == (None, None):
with self.assertRaises(
Exception, msg="{} failed to parse {!r}".format(expr, test_string)
):
expr.parseString(test_string)
else:
result = expr.parseString(test_string)
self.assertParseResultsEquals(
result, expected_list=expected_list, expected_dict=expected_dict
)
# ellipses for SkipTo
e = ... + Literal("end")
test(e, "start 123 end", ["start 123 ", "end"], {"_skipped": ["start 123 "]})
e = Literal("start") + ... + Literal("end")
test(e, "start 123 end", ["start", "123 ", "end"], {"_skipped": ["123 "]})
e = Literal("start") + ...
test(e, "start 123 | |
from collections import namedtuple
from PySide2 import QtCore, QtGui, QtWidgets
from guis.g_need import GNeed
from guis.g_panel import GPanel
from guis.g_price_bar_chart import GPriceBarChart
from my_qt.buttons import CheckableMenuButton
from my_qt.charts import PriceBarChart
from my_qt.combo_boxes import SearchCombo
from my_qt.group_boxes import RelatedPanelQuotationGroup
from my_qt.spin_boxes import NoWheelSpinBox, NoWheelDoubleSpinBox
from resources import url_back, url_save, url_plus, url_minus
from utilities.various import color_gray, color_red
class GPanelNeed(GPriceBarChart, GPanel, GNeed):
def __init__(self, window):
super(GPriceBarChart, self).__init__()
super(GPanel, self).__init__(window)
self.setup_gui()
# Edited: SearchCombo, CheckableMenuButton, NoWheelSpinBox, NoWheelDoubleSpinBox, RelatedPanelQuotationGroup(+delete below), PriceBarChart(+delete below)
def setup_gui(self):
self.w.centralWidget = QtWidgets.QWidget(self.w)
self.w.centralWidget.setObjectName('centralWidget')
self.w.verticalLayout = QtWidgets.QVBoxLayout(self.w.centralWidget)
self.w.verticalLayout.setSpacing(16)
self.w.verticalLayout.setObjectName('verticalLayout')
self.w.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.w.horizontalLayout_3.setSpacing(0)
self.w.horizontalLayout_3.setObjectName('horizontalLayout_3')
self.w.buttonBack = QtWidgets.QPushButton(self.w.centralWidget)
self.w.buttonBack.setMinimumSize(QtCore.QSize(50, 40))
self.w.buttonBack.setMaximumSize(QtCore.QSize(50, 40))
font = QtGui.QFont()
font.setPointSize(13)
self.w.buttonBack.setFont(font)
self.w.buttonBack.setText('')
self.w.buttonBack.setObjectName('buttonBack')
self.w.horizontalLayout_3.addWidget(self.w.buttonBack)
spacerItem = QtWidgets.QSpacerItem(120, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.w.horizontalLayout_3.addItem(spacerItem)
self.w.labelTitle = QtWidgets.QLabel(self.w.centralWidget)
font = QtGui.QFont()
font.setPointSize(25)
self.w.labelTitle.setFont(font)
self.w.labelTitle.setAlignment(QtCore.Qt.AlignCenter)
self.w.labelTitle.setObjectName('labelTitle')
self.w.horizontalLayout_3.addWidget(self.w.labelTitle)
self.w.checkPriorities = QtWidgets.QCheckBox(self.w.centralWidget)
self.w.checkPriorities.setMinimumSize(QtCore.QSize(170, 40))
self.w.checkPriorities.setMaximumSize(QtCore.QSize(170, 40))
font = QtGui.QFont()
font.setPointSize(13)
self.w.checkPriorities.setFont(font)
self.w.checkPriorities.setTristate(False)
self.w.checkPriorities.setObjectName('checkPriorities')
self.w.horizontalLayout_3.addWidget(self.w.checkPriorities)
self.w.verticalLayout.addLayout(self.w.horizontalLayout_3)
self.w.scrollArea = QtWidgets.QScrollArea(self.w.centralWidget)
self.w.scrollArea.setWidgetResizable(True)
self.w.scrollArea.setObjectName('scrollArea')
self.w.scrollAreaWidgetContents = QtWidgets.QWidget()
self.w.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 1225, 861))
self.w.scrollAreaWidgetContents.setObjectName('scrollAreaWidgetContents')
self.w.verticalLayout_9 = QtWidgets.QVBoxLayout(self.w.scrollAreaWidgetContents)
self.w.verticalLayout_9.setSpacing(20)
self.w.verticalLayout_9.setObjectName('verticalLayout_9')
self.w.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.w.horizontalLayout_4.setSpacing(50)
self.w.horizontalLayout_4.setObjectName('horizontalLayout_4')
spacerItem1 = QtWidgets.QSpacerItem(29, 50, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.w.horizontalLayout_4.addItem(spacerItem1)
self.w.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_2.setObjectName('verticalLayout_2')
self.w.groupGeneral = QtWidgets.QGroupBox(self.w.scrollAreaWidgetContents)
self.w.groupGeneral.setMinimumSize(QtCore.QSize(250, 0))
font = QtGui.QFont()
font.setPointSize(12)
font.setWeight(75)
font.setBold(True)
self.w.groupGeneral.setFont(font)
self.w.groupGeneral.setAlignment(QtCore.Qt.AlignCenter)
self.w.groupGeneral.setObjectName('groupGeneral')
self.w.verticalLayout_3 = QtWidgets.QVBoxLayout(self.w.groupGeneral)
self.w.verticalLayout_3.setSpacing(20)
self.w.verticalLayout_3.setObjectName('verticalLayout_3')
self.w.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.w.horizontalLayout_5.setSpacing(10)
self.w.horizontalLayout_5.setObjectName('horizontalLayout_5')
self.w.verticalLayout_12 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_12.setSpacing(2)
self.w.verticalLayout_12.setObjectName('verticalLayout_12')
self.w.labelCompany = QtWidgets.QLabel(self.w.groupGeneral)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelCompany.setFont(font)
self.w.labelCompany.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.w.labelCompany.setObjectName('labelCompany')
self.w.verticalLayout_12.addWidget(self.w.labelCompany)
self.w.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.w.horizontalLayout_6.setObjectName('horizontalLayout_6')
self.w.comboCompany = SearchCombo(self.w.groupGeneral)
self.w.comboCompany.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setWeight(50)
font.setBold(False)
self.w.comboCompany.setFont(font)
self.w.comboCompany.setObjectName('comboCompany')
self.w.horizontalLayout_6.addWidget(self.w.comboCompany)
self.w.buttonAddCompany = QtWidgets.QPushButton(self.w.groupGeneral)
self.w.buttonAddCompany.setMinimumSize(QtCore.QSize(30, 30))
self.w.buttonAddCompany.setMaximumSize(QtCore.QSize(30, 30))
font = QtGui.QFont()
font.setPointSize(12)
font.setWeight(50)
font.setBold(False)
self.w.buttonAddCompany.setFont(font)
self.w.buttonAddCompany.setText('')
self.w.buttonAddCompany.setObjectName('buttonAddCompany')
self.w.horizontalLayout_6.addWidget(self.w.buttonAddCompany)
self.w.verticalLayout_12.addLayout(self.w.horizontalLayout_6)
self.w.horizontalLayout_5.addLayout(self.w.verticalLayout_12)
self.w.verticalLayout_31 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_31.setSpacing(2)
self.w.verticalLayout_31.setObjectName('verticalLayout_31')
self.w.labelPrioCompany = QtWidgets.QLabel(self.w.groupGeneral)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.labelPrioCompany.sizePolicy().hasHeightForWidth())
self.w.labelPrioCompany.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelPrioCompany.setFont(font)
self.w.labelPrioCompany.setAlignment(QtCore.Qt.AlignCenter)
self.w.labelPrioCompany.setObjectName('labelPrioCompany')
self.w.verticalLayout_31.addWidget(self.w.labelPrioCompany)
self.w.comboPrioCompany = QtWidgets.QComboBox(self.w.groupGeneral)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.comboPrioCompany.sizePolicy().hasHeightForWidth())
self.w.comboPrioCompany.setSizePolicy(sizePolicy)
self.w.comboPrioCompany.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setWeight(50)
font.setBold(False)
self.w.comboPrioCompany.setFont(font)
self.w.comboPrioCompany.setObjectName('comboPrioCompany')
self.w.verticalLayout_31.addWidget(self.w.comboPrioCompany)
self.w.horizontalLayout_5.addLayout(self.w.verticalLayout_31)
self.w.horizontalLayout_5.setStretch(0, 1)
self.w.verticalLayout_3.addLayout(self.w.horizontalLayout_5)
self.w.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.w.horizontalLayout_7.setSpacing(10)
self.w.horizontalLayout_7.setObjectName('horizontalLayout_7')
self.w.verticalLayout_28 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_28.setSpacing(2)
self.w.verticalLayout_28.setObjectName('verticalLayout_28')
self.w.labelMark = QtWidgets.QLabel(self.w.groupGeneral)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelMark.setFont(font)
self.w.labelMark.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.w.labelMark.setObjectName('labelMark')
self.w.verticalLayout_28.addWidget(self.w.labelMark)
self.w.comboMark = SearchCombo(self.w.groupGeneral)
self.w.comboMark.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setWeight(50)
font.setBold(False)
self.w.comboMark.setFont(font)
self.w.comboMark.setObjectName('comboMark')
self.w.verticalLayout_28.addWidget(self.w.comboMark)
self.w.horizontalLayout_7.addLayout(self.w.verticalLayout_28)
self.w.verticalLayout_79 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_79.setSpacing(2)
self.w.verticalLayout_79.setObjectName('verticalLayout_79')
self.w.labelPrioMark = QtWidgets.QLabel(self.w.groupGeneral)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.labelPrioMark.sizePolicy().hasHeightForWidth())
self.w.labelPrioMark.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelPrioMark.setFont(font)
self.w.labelPrioMark.setAlignment(QtCore.Qt.AlignCenter)
self.w.labelPrioMark.setObjectName('labelPrioMark')
self.w.verticalLayout_79.addWidget(self.w.labelPrioMark)
self.w.comboPrioMark = QtWidgets.QComboBox(self.w.groupGeneral)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.comboPrioMark.sizePolicy().hasHeightForWidth())
self.w.comboPrioMark.setSizePolicy(sizePolicy)
self.w.comboPrioMark.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setWeight(50)
font.setBold(False)
self.w.comboPrioMark.setFont(font)
self.w.comboPrioMark.setObjectName('comboPrioMark')
self.w.verticalLayout_79.addWidget(self.w.comboPrioMark)
self.w.horizontalLayout_7.addLayout(self.w.verticalLayout_79)
self.w.horizontalLayout_7.setStretch(0, 1)
self.w.verticalLayout_3.addLayout(self.w.horizontalLayout_7)
self.w.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.w.horizontalLayout_9.setSpacing(10)
self.w.horizontalLayout_9.setObjectName('horizontalLayout_9')
self.w.verticalLayout_13 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_13.setSpacing(2)
self.w.verticalLayout_13.setObjectName('verticalLayout_13')
self.w.labelTotalPower = QtWidgets.QLabel(self.w.groupGeneral)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelTotalPower.setFont(font)
self.w.labelTotalPower.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.w.labelTotalPower.setObjectName('labelTotalPower')
self.w.verticalLayout_13.addWidget(self.w.labelTotalPower)
self.w.spinTotalPower = NoWheelDoubleSpinBox(self.w.groupGeneral)
self.w.spinTotalPower.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setPointSize(12)
font.setWeight(50)
font.setBold(False)
self.w.spinTotalPower.setFont(font)
self.w.spinTotalPower.setFocusPolicy(QtCore.Qt.StrongFocus)
self.w.spinTotalPower.setDecimals(3)
self.w.spinTotalPower.setMaximum(9999999999.0)
self.w.spinTotalPower.setObjectName('spinTotalPower')
self.w.verticalLayout_13.addWidget(self.w.spinTotalPower)
self.w.horizontalLayout_9.addLayout(self.w.verticalLayout_13)
self.w.verticalLayout_80 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_80.setSpacing(2)
self.w.verticalLayout_80.setObjectName('verticalLayout_80')
self.w.labelPrioTotalPower = QtWidgets.QLabel(self.w.groupGeneral)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.labelPrioTotalPower.sizePolicy().hasHeightForWidth())
self.w.labelPrioTotalPower.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelPrioTotalPower.setFont(font)
self.w.labelPrioTotalPower.setAlignment(QtCore.Qt.AlignCenter)
self.w.labelPrioTotalPower.setObjectName('labelPrioTotalPower')
self.w.verticalLayout_80.addWidget(self.w.labelPrioTotalPower)
self.w.comboPrioTotalPower = QtWidgets.QComboBox(self.w.groupGeneral)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.comboPrioTotalPower.sizePolicy().hasHeightForWidth())
self.w.comboPrioTotalPower.setSizePolicy(sizePolicy)
self.w.comboPrioTotalPower.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setWeight(50)
font.setBold(False)
self.w.comboPrioTotalPower.setFont(font)
self.w.comboPrioTotalPower.setObjectName('comboPrioTotalPower')
self.w.verticalLayout_80.addWidget(self.w.comboPrioTotalPower)
self.w.horizontalLayout_9.addLayout(self.w.verticalLayout_80)
self.w.horizontalLayout_9.setStretch(0, 1)
self.w.verticalLayout_3.addLayout(self.w.horizontalLayout_9)
self.w.horizontalLayout_15 = QtWidgets.QHBoxLayout()
self.w.horizontalLayout_15.setSpacing(10)
self.w.horizontalLayout_15.setObjectName('horizontalLayout_15')
self.w.verticalLayout_14 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_14.setSpacing(2)
self.w.verticalLayout_14.setObjectName('verticalLayout_14')
self.w.labelPrice = QtWidgets.QLabel(self.w.groupGeneral)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelPrice.setFont(font)
self.w.labelPrice.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.w.labelPrice.setObjectName('labelPrice')
self.w.verticalLayout_14.addWidget(self.w.labelPrice)
self.w.spinPrice = NoWheelDoubleSpinBox(self.w.groupGeneral)
self.w.spinPrice.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setPointSize(12)
font.setWeight(50)
font.setBold(False)
self.w.spinPrice.setFont(font)
self.w.spinPrice.setFocusPolicy(QtCore.Qt.StrongFocus)
self.w.spinPrice.setDecimals(4)
self.w.spinPrice.setMaximum(9999999999.0)
self.w.spinPrice.setObjectName('spinPrice')
self.w.verticalLayout_14.addWidget(self.w.spinPrice)
self.w.horizontalLayout_15.addLayout(self.w.verticalLayout_14)
self.w.verticalLayout_87 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_87.setSpacing(2)
self.w.verticalLayout_87.setObjectName('verticalLayout_87')
self.w.labelPrioPrice = QtWidgets.QLabel(self.w.groupGeneral)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.labelPrioPrice.sizePolicy().hasHeightForWidth())
self.w.labelPrioPrice.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelPrioPrice.setFont(font)
self.w.labelPrioPrice.setAlignment(QtCore.Qt.AlignCenter)
self.w.labelPrioPrice.setObjectName('labelPrioPrice')
self.w.verticalLayout_87.addWidget(self.w.labelPrioPrice)
self.w.comboPrioPrice = QtWidgets.QComboBox(self.w.groupGeneral)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.comboPrioPrice.sizePolicy().hasHeightForWidth())
self.w.comboPrioPrice.setSizePolicy(sizePolicy)
self.w.comboPrioPrice.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setWeight(50)
font.setBold(False)
self.w.comboPrioPrice.setFont(font)
self.w.comboPrioPrice.setObjectName('comboPrioPrice')
self.w.verticalLayout_87.addWidget(self.w.comboPrioPrice)
self.w.horizontalLayout_15.addLayout(self.w.verticalLayout_87)
self.w.horizontalLayout_15.setStretch(0, 1)
self.w.verticalLayout_3.addLayout(self.w.horizontalLayout_15)
self.w.verticalLayout_2.addWidget(self.w.groupGeneral)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.w.verticalLayout_2.addItem(spacerItem2)
self.w.horizontalLayout_4.addLayout(self.w.verticalLayout_2)
spacerItem3 = QtWidgets.QSpacerItem(50, 50, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.w.horizontalLayout_4.addItem(spacerItem3)
self.w.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_4.setObjectName('verticalLayout_4')
self.w.groupPanel = QtWidgets.QGroupBox(self.w.scrollAreaWidgetContents)
self.w.groupPanel.setMinimumSize(QtCore.QSize(250, 0))
font = QtGui.QFont()
font.setPointSize(12)
font.setWeight(75)
font.setBold(True)
self.w.groupPanel.setFont(font)
self.w.groupPanel.setAlignment(QtCore.Qt.AlignCenter)
self.w.groupPanel.setObjectName('groupPanel')
self.w.verticalLayout_6 = QtWidgets.QVBoxLayout(self.w.groupPanel)
self.w.verticalLayout_6.setSpacing(20)
self.w.verticalLayout_6.setObjectName('verticalLayout_6')
self.w.horizontalLayout_21 = QtWidgets.QHBoxLayout()
self.w.horizontalLayout_21.setSpacing(10)
self.w.horizontalLayout_21.setObjectName('horizontalLayout_21')
self.w.verticalLayout_19 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_19.setSpacing(2)
self.w.verticalLayout_19.setObjectName('verticalLayout_19')
self.w.labelPanelType = QtWidgets.QLabel(self.w.groupPanel)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelPanelType.setFont(font)
self.w.labelPanelType.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.w.labelPanelType.setObjectName('labelPanelType')
self.w.verticalLayout_19.addWidget(self.w.labelPanelType)
self.w.buttonPanelTypes = CheckableMenuButton(self.w.groupPanel)
self.w.buttonPanelTypes.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setWeight(50)
font.setBold(False)
self.w.buttonPanelTypes.setFont(font)
self.w.buttonPanelTypes.setText('')
self.w.buttonPanelTypes.setObjectName('buttonPanelTypes')
self.w.verticalLayout_19.addWidget(self.w.buttonPanelTypes)
self.w.horizontalLayout_21.addLayout(self.w.verticalLayout_19)
self.w.verticalLayout_81 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_81.setSpacing(2)
self.w.verticalLayout_81.setObjectName('verticalLayout_81')
self.w.labelPrioPanelType = QtWidgets.QLabel(self.w.groupPanel)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.labelPrioPanelType.sizePolicy().hasHeightForWidth())
self.w.labelPrioPanelType.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelPrioPanelType.setFont(font)
self.w.labelPrioPanelType.setAlignment(QtCore.Qt.AlignCenter)
self.w.labelPrioPanelType.setObjectName('labelPrioPanelType')
self.w.verticalLayout_81.addWidget(self.w.labelPrioPanelType)
self.w.comboPrioPanelType = QtWidgets.QComboBox(self.w.groupPanel)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.comboPrioPanelType.sizePolicy().hasHeightForWidth())
self.w.comboPrioPanelType.setSizePolicy(sizePolicy)
self.w.comboPrioPanelType.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setWeight(50)
font.setBold(False)
self.w.comboPrioPanelType.setFont(font)
self.w.comboPrioPanelType.setObjectName('comboPrioPanelType')
self.w.verticalLayout_81.addWidget(self.w.comboPrioPanelType)
self.w.horizontalLayout_21.addLayout(self.w.verticalLayout_81)
self.w.horizontalLayout_21.setStretch(0, 1)
self.w.verticalLayout_6.addLayout(self.w.horizontalLayout_21)
self.w.horizontalLayout_22 = QtWidgets.QHBoxLayout()
self.w.horizontalLayout_22.setSpacing(10)
self.w.horizontalLayout_22.setObjectName('horizontalLayout_22')
self.w.verticalLayout_20 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_20.setSpacing(2)
self.w.verticalLayout_20.setObjectName('verticalLayout_20')
self.w.labelCells = QtWidgets.QLabel(self.w.groupPanel)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelCells.setFont(font)
self.w.labelCells.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.w.labelCells.setObjectName('labelCells')
self.w.verticalLayout_20.addWidget(self.w.labelCells)
self.w.buttonCells = CheckableMenuButton(self.w.groupPanel)
self.w.buttonCells.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setWeight(50)
font.setBold(False)
self.w.buttonCells.setFont(font)
self.w.buttonCells.setText('')
self.w.buttonCells.setObjectName('buttonCells')
self.w.verticalLayout_20.addWidget(self.w.buttonCells)
self.w.horizontalLayout_22.addLayout(self.w.verticalLayout_20)
self.w.verticalLayout_82 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_82.setSpacing(2)
self.w.verticalLayout_82.setObjectName('verticalLayout_82')
self.w.labelPrioCells = QtWidgets.QLabel(self.w.groupPanel)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.labelPrioCells.sizePolicy().hasHeightForWidth())
self.w.labelPrioCells.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelPrioCells.setFont(font)
self.w.labelPrioCells.setAlignment(QtCore.Qt.AlignCenter)
self.w.labelPrioCells.setObjectName('labelPrioCells')
self.w.verticalLayout_82.addWidget(self.w.labelPrioCells)
self.w.comboPrioCells = QtWidgets.QComboBox(self.w.groupPanel)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.comboPrioCells.sizePolicy().hasHeightForWidth())
self.w.comboPrioCells.setSizePolicy(sizePolicy)
self.w.comboPrioCells.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setWeight(50)
font.setBold(False)
self.w.comboPrioCells.setFont(font)
self.w.comboPrioCells.setObjectName('comboPrioCells')
self.w.verticalLayout_82.addWidget(self.w.comboPrioCells)
self.w.horizontalLayout_22.addLayout(self.w.verticalLayout_82)
self.w.horizontalLayout_22.setStretch(0, 1)
self.w.verticalLayout_6.addLayout(self.w.horizontalLayout_22)
self.w.horizontalLayout_31 = QtWidgets.QHBoxLayout()
self.w.horizontalLayout_31.setSpacing(10)
self.w.horizontalLayout_31.setObjectName('horizontalLayout_31')
self.w.verticalLayout_21 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_21.setSpacing(2)
self.w.verticalLayout_21.setObjectName('verticalLayout_21')
self.w.labelPanelPower = QtWidgets.QLabel(self.w.groupPanel)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelPanelPower.setFont(font)
self.w.labelPanelPower.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.w.labelPanelPower.setObjectName('labelPanelPower')
self.w.verticalLayout_21.addWidget(self.w.labelPanelPower)
self.w.spinPanelPower = NoWheelSpinBox(self.w.groupPanel)
self.w.spinPanelPower.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setPointSize(12)
font.setWeight(50)
font.setBold(False)
self.w.spinPanelPower.setFont(font)
self.w.spinPanelPower.setFocusPolicy(QtCore.Qt.StrongFocus)
self.w.spinPanelPower.setMaximum(999999999)
self.w.spinPanelPower.setObjectName('spinPanelPower')
self.w.verticalLayout_21.addWidget(self.w.spinPanelPower)
self.w.horizontalLayout_31.addLayout(self.w.verticalLayout_21)
self.w.verticalLayout_84 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_84.setSpacing(2)
self.w.verticalLayout_84.setObjectName('verticalLayout_84')
self.w.labelPrioPanelPower = QtWidgets.QLabel(self.w.groupPanel)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.labelPrioPanelPower.sizePolicy().hasHeightForWidth())
self.w.labelPrioPanelPower.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelPrioPanelPower.setFont(font)
self.w.labelPrioPanelPower.setAlignment(QtCore.Qt.AlignCenter)
self.w.labelPrioPanelPower.setObjectName('labelPrioPanelPower')
self.w.verticalLayout_84.addWidget(self.w.labelPrioPanelPower)
self.w.comboPrioPanelPower = QtWidgets.QComboBox(self.w.groupPanel)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.comboPrioPanelPower.sizePolicy().hasHeightForWidth())
self.w.comboPrioPanelPower.setSizePolicy(sizePolicy)
self.w.comboPrioPanelPower.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setWeight(50)
font.setBold(False)
self.w.comboPrioPanelPower.setFont(font)
self.w.comboPrioPanelPower.setObjectName('comboPrioPanelPower')
self.w.verticalLayout_84.addWidget(self.w.comboPrioPanelPower)
self.w.horizontalLayout_31.addLayout(self.w.verticalLayout_84)
self.w.horizontalLayout_31.setStretch(0, 1)
self.w.verticalLayout_6.addLayout(self.w.horizontalLayout_31)
self.w.horizontalLayout_32 = QtWidgets.QHBoxLayout()
self.w.horizontalLayout_32.setSpacing(10)
self.w.horizontalLayout_32.setObjectName('horizontalLayout_32')
self.w.verticalLayout_29 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_29.setSpacing(2)
self.w.verticalLayout_29.setObjectName('verticalLayout_29')
self.w.labelEfficiency = QtWidgets.QLabel(self.w.groupPanel)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelEfficiency.setFont(font)
self.w.labelEfficiency.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.w.labelEfficiency.setObjectName('labelEfficiency')
self.w.verticalLayout_29.addWidget(self.w.labelEfficiency)
self.w.spinEfficiency = NoWheelDoubleSpinBox(self.w.groupPanel)
self.w.spinEfficiency.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setPointSize(12)
font.setWeight(50)
font.setBold(False)
self.w.spinEfficiency.setFont(font)
self.w.spinEfficiency.setFocusPolicy(QtCore.Qt.StrongFocus)
self.w.spinEfficiency.setDecimals(2)
self.w.spinEfficiency.setMaximum(100.0)
self.w.spinEfficiency.setObjectName('spinEfficiency')
self.w.verticalLayout_29.addWidget(self.w.spinEfficiency)
self.w.horizontalLayout_32.addLayout(self.w.verticalLayout_29)
self.w.verticalLayout_85 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_85.setSpacing(2)
self.w.verticalLayout_85.setObjectName('verticalLayout_85')
self.w.labelPrioEfficiency = QtWidgets.QLabel(self.w.groupPanel)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.labelPrioEfficiency.sizePolicy().hasHeightForWidth())
self.w.labelPrioEfficiency.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelPrioEfficiency.setFont(font)
self.w.labelPrioEfficiency.setAlignment(QtCore.Qt.AlignCenter)
self.w.labelPrioEfficiency.setObjectName('labelPrioEfficiency')
self.w.verticalLayout_85.addWidget(self.w.labelPrioEfficiency)
self.w.comboPrioEfficiency = QtWidgets.QComboBox(self.w.groupPanel)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.comboPrioEfficiency.sizePolicy().hasHeightForWidth())
self.w.comboPrioEfficiency.setSizePolicy(sizePolicy)
self.w.comboPrioEfficiency.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setWeight(50)
font.setBold(False)
self.w.comboPrioEfficiency.setFont(font)
self.w.comboPrioEfficiency.setObjectName('comboPrioEfficiency')
self.w.verticalLayout_85.addWidget(self.w.comboPrioEfficiency)
self.w.horizontalLayout_32.addLayout(self.w.verticalLayout_85)
self.w.horizontalLayout_32.setStretch(0, 1)
self.w.verticalLayout_6.addLayout(self.w.horizontalLayout_32)
self.w.horizontalLayout_33 = QtWidgets.QHBoxLayout()
self.w.horizontalLayout_33.setSpacing(10)
self.w.horizontalLayout_33.setObjectName('horizontalLayout_33')
self.w.groupTolerance = QtWidgets.QGroupBox(self.w.groupPanel)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.groupTolerance.setFont(font)
self.w.groupTolerance.setObjectName('groupTolerance')
self.w.verticalLayout_8 = QtWidgets.QVBoxLayout(self.w.groupTolerance)
self.w.verticalLayout_8.setObjectName('verticalLayout_8')
self.w.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.w.horizontalLayout_8.setObjectName('horizontalLayout_8')
self.w.radioPositiveTolerance = QtWidgets.QRadioButton(self.w.groupTolerance)
font = QtGui.QFont()
font.setPointSize(12)
font.setWeight(50)
font.setBold(False)
self.w.radioPositiveTolerance.setFont(font)
self.w.radioPositiveTolerance.setChecked(True)
self.w.radioPositiveTolerance.setObjectName('radioPositiveTolerance')
self.w.buttonGroup = QtWidgets.QButtonGroup(self.w.centralWidget)
self.w.buttonGroup.setObjectName('buttonGroup')
self.w.buttonGroup.addButton(self.w.radioPositiveTolerance)
self.w.horizontalLayout_8.addWidget(self.w.radioPositiveTolerance)
self.w.radioNegativeTolerance = QtWidgets.QRadioButton(self.w.groupTolerance)
font = QtGui.QFont()
font.setPointSize(12)
font.setWeight(50)
font.setBold(False)
self.w.radioNegativeTolerance.setFont(font)
self.w.radioNegativeTolerance.setObjectName('radioNegativeTolerance')
self.w.buttonGroup.addButton(self.w.radioNegativeTolerance)
self.w.horizontalLayout_8.addWidget(self.w.radioNegativeTolerance)
self.w.verticalLayout_8.addLayout(self.w.horizontalLayout_8)
self.w.horizontalLayout_33.addWidget(self.w.groupTolerance)
self.w.verticalLayout_86 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_86.setSpacing(2)
self.w.verticalLayout_86.setObjectName('verticalLayout_86')
self.w.labelPrioTolerance = QtWidgets.QLabel(self.w.groupPanel)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.labelPrioTolerance.sizePolicy().hasHeightForWidth())
self.w.labelPrioTolerance.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelPrioTolerance.setFont(font)
self.w.labelPrioTolerance.setAlignment(QtCore.Qt.AlignCenter)
self.w.labelPrioTolerance.setObjectName('labelPrioTolerance')
self.w.verticalLayout_86.addWidget(self.w.labelPrioTolerance)
self.w.comboPrioTolerance = QtWidgets.QComboBox(self.w.groupPanel)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.comboPrioTolerance.sizePolicy().hasHeightForWidth())
self.w.comboPrioTolerance.setSizePolicy(sizePolicy)
self.w.comboPrioTolerance.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setWeight(50)
font.setBold(False)
self.w.comboPrioTolerance.setFont(font)
self.w.comboPrioTolerance.setObjectName('comboPrioTolerance')
self.w.verticalLayout_86.addWidget(self.w.comboPrioTolerance)
spacerItem4 = QtWidgets.QSpacerItem(0, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.w.verticalLayout_86.addItem(spacerItem4)
self.w.horizontalLayout_33.addLayout(self.w.verticalLayout_86)
self.w.horizontalLayout_33.setStretch(0, 1)
self.w.verticalLayout_6.addLayout(self.w.horizontalLayout_33)
self.w.horizontalLayout_34 = QtWidgets.QHBoxLayout()
self.w.horizontalLayout_34.setSpacing(10)
self.w.horizontalLayout_34.setObjectName('horizontalLayout_34')
self.w.verticalLayout_32 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_32.setSpacing(2)
self.w.verticalLayout_32.setObjectName('verticalLayout_32')
self.w.labelWarrantyProduct = QtWidgets.QLabel(self.w.groupPanel)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelWarrantyProduct.setFont(font)
self.w.labelWarrantyProduct.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.w.labelWarrantyProduct.setObjectName('labelWarrantyProduct')
self.w.verticalLayout_32.addWidget(self.w.labelWarrantyProduct)
self.w.spinWarrantyProduct = NoWheelSpinBox(self.w.groupPanel)
self.w.spinWarrantyProduct.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setPointSize(12)
font.setWeight(50)
font.setBold(False)
self.w.spinWarrantyProduct.setFont(font)
self.w.spinWarrantyProduct.setFocusPolicy(QtCore.Qt.StrongFocus)
self.w.spinWarrantyProduct.setMaximum(9999)
self.w.spinWarrantyProduct.setObjectName('spinWarrantyProduct')
self.w.verticalLayout_32.addWidget(self.w.spinWarrantyProduct)
self.w.horizontalLayout_34.addLayout(self.w.verticalLayout_32)
self.w.verticalLayout_88 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_88.setSpacing(2)
self.w.verticalLayout_88.setObjectName('verticalLayout_88')
self.w.labelPrioWarrantyProduct = QtWidgets.QLabel(self.w.groupPanel)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.labelPrioWarrantyProduct.sizePolicy().hasHeightForWidth())
self.w.labelPrioWarrantyProduct.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelPrioWarrantyProduct.setFont(font)
self.w.labelPrioWarrantyProduct.setAlignment(QtCore.Qt.AlignCenter)
self.w.labelPrioWarrantyProduct.setObjectName('labelPrioWarrantyProduct')
self.w.verticalLayout_88.addWidget(self.w.labelPrioWarrantyProduct)
self.w.comboPrioWarrantyProduct = QtWidgets.QComboBox(self.w.groupPanel)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.comboPrioWarrantyProduct.sizePolicy().hasHeightForWidth())
self.w.comboPrioWarrantyProduct.setSizePolicy(sizePolicy)
self.w.comboPrioWarrantyProduct.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setWeight(50)
font.setBold(False)
self.w.comboPrioWarrantyProduct.setFont(font)
self.w.comboPrioWarrantyProduct.setObjectName('comboPrioWarrantyProduct')
self.w.verticalLayout_88.addWidget(self.w.comboPrioWarrantyProduct)
self.w.horizontalLayout_34.addLayout(self.w.verticalLayout_88)
self.w.horizontalLayout_34.setStretch(0, 1)
self.w.verticalLayout_6.addLayout(self.w.horizontalLayout_34)
self.w.horizontalLayout_35 = QtWidgets.QHBoxLayout()
self.w.horizontalLayout_35.setSpacing(10)
self.w.horizontalLayout_35.setObjectName('horizontalLayout_35')
self.w.verticalLayout_33 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_33.setSpacing(2)
self.w.verticalLayout_33.setObjectName('verticalLayout_33')
self.w.labelWarrantyPerformance = QtWidgets.QLabel(self.w.groupPanel)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelWarrantyPerformance.setFont(font)
self.w.labelWarrantyPerformance.setAlignment(
QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.w.labelWarrantyPerformance.setObjectName('labelWarrantyPerformance')
self.w.verticalLayout_33.addWidget(self.w.labelWarrantyPerformance)
self.w.spinWarrantyPerformance = NoWheelSpinBox(self.w.groupPanel)
self.w.spinWarrantyPerformance.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setPointSize(12)
font.setWeight(50)
font.setBold(False)
self.w.spinWarrantyPerformance.setFont(font)
self.w.spinWarrantyPerformance.setFocusPolicy(QtCore.Qt.StrongFocus)
self.w.spinWarrantyPerformance.setMaximum(9999)
self.w.spinWarrantyPerformance.setObjectName('spinWarrantyPerformance')
self.w.verticalLayout_33.addWidget(self.w.spinWarrantyPerformance)
self.w.horizontalLayout_35.addLayout(self.w.verticalLayout_33)
self.w.verticalLayout_89 = QtWidgets.QVBoxLayout()
self.w.verticalLayout_89.setSpacing(2)
self.w.verticalLayout_89.setObjectName('verticalLayout_89')
self.w.labelPrioWarrantyPerformance = QtWidgets.QLabel(self.w.groupPanel)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w.labelPrioWarrantyPerformance.sizePolicy().hasHeightForWidth())
self.w.labelPrioWarrantyPerformance.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.w.labelPrioWarrantyPerformance.setFont(font)
self.w.labelPrioWarrantyPerformance.setAlignment(QtCore.Qt.AlignCenter)
self.w.labelPrioWarrantyPerformance.setObjectName('labelPrioWarrantyPerformance')
self.w.verticalLayout_89.addWidget(self.w.labelPrioWarrantyPerformance)
self.w.comboPrioWarrantyPerformance = | |
== '4,5,6,7':
gem5_cluster = 'bigCluster'
elif core_mask == '0,1,2,3':
gem5_cluster = 'littleCluster'
else:
raise ValueError("Unrecognised core mask!")
'''
gem5_per_cluster = make_gem5_cols_per_cluster(temp_df, gem5_cluster)
temp_df = temp_df[[x for x in temp_df.columns.values if x.find('gem5 stat') == -1]]
temp_df = pd.concat([temp_df, gem5_per_cluster], axis=1)
temp_df = temp_df._get_numeric_data().fillna(0)
temp_df = temp_df.fillna(0)
temp_df = temp_df.loc[:, (temp_df != 0).any(axis=0)] # remove 0 col
#temp_df = temp_df.loc[:, (temp_df != temp_df.ix[0]).any()]
temp_df = temp_df[[x for x in temp_df.columns.values.tolist() if not 0 in temp_df[x].tolist() ]]
temp_df['const'] = 1
# get var names:
var_names = var_select_func(temp_df)
model_inputs = ['const']
#model_inputs.append('const')
models = []
for i in range(0, num_inputs):
best_r2 = 0
best_var = ''
best_model_res = 0
var_names = [x for x in var_names if x in temp_df.columns.values.tolist() and x != y_col]
for var in var_names:
dep_vars = model_inputs + [var]
logger.info("Trying with these vars: "+str(dep_vars))
#formula = ''+y_col+' ~ '+' + '.join(["Q('"+x+"')" for x in dep_vars])+' '
#print(formula)
#mod = smf.ols(formula=formula,data=temp_df)
X = temp_df[dep_vars]
y = temp_df[y_col]
#X = sm.add_constant(X) # use const
try:
res = sm.OLS(y,X).fit()
except:
logger.info("Failed when adding var: "+var)
continue
r2 = res.rsquared
#print res.summary()
if r2 > best_r2:
best_r2 = r2
best_var = var
best_model_res = res
model_inputs.append(best_var)
models.append(best_model_res)
model_summary_df = pd.DataFrame(columns=['number_of_events', 'R2', 'adjR2', 'WAPE'])
for i in range(0, len(models)):
model = models[i]
'''
print "\nMODEL"
print ("r2: "+str(model.rsquared))
print ("params: "+(str(model.params)))
print ("Predict:")
print model.predict()
print ("MAPE: "+str(mape(temp_df[y_col],model.predict()).mean()))
print ("MAPE: ")
print mape(temp_df[y_col], model.predict())
print ("Actual: ")
print temp_df[y_col]
print "Predicted:"
print model.predict()
print "WAPE:"
print wape(temp_df[y_col],model.predict())
print model.summary()
'''
logger.info("Creating model summary DF for model "+str(i))
model_summary_df = model_summary_df.append({
'number_of_events' : i,
'R2' : model.rsquared,
'adjR2' : model.rsquared_adj,
'WAPE' : wape(temp_df[y_col],model.predict()),
'SER' : math.sqrt(model.scale)
},ignore_index=True)
#params_df = pd.concat([model.params, model.pvalues], axis=1)
params_df = pd.DataFrame(columns=['Name', 'Value', 'p-Value'])
params_df['Name'] = model.params.index
params_df['Value'] = model.params.tolist()
params_df['p-Value'] = model.pvalues.tolist()
params_df['pretty name'] = params_df['Name'].apply(lambda x: pmcs_and_gem5_stats.get_lovely_pmc_name(x,'a15')+' (total)' if x.find('total diff') > -1 else pmcs_and_gem5_stats.get_lovely_pmc_name(x,'a15')+' (rate)')
params_df.to_csv(filepath_prefix+'-model-'+str(i)+'.csv',sep='\t')
if i == len(models) -1:
params_df.to_csv(filepath_prefix+'-model-final.csv',sep='\t')
#model.params.append(model.pvalues).to_csv(filepath_prefix+'-model-'+str(i)+'.csv',sep='\t')
model_summary_df.to_csv(filepath_prefix+'-model-summary'+'.csv',sep='\t')
if __name__=='__main__':
import argparse
import os
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--clean', dest='clean', required=False, action='store_true')
parser.set_defaults(clean=False)
parser.add_argument('-i', '--input', dest='input_file_path', required=True, \
help="The stats df on which to apply the formulae")
parser.add_argument('-m', '--core-mask', dest='core_mask', required=True, \
help="Selects the correct cluster (e.g. A7 or A15) from the core "+ \
"mask. E.g.: '4,5,6,7'. For multiple core masks, divide " + \
"list with '#'. E.g. '0,1,2,3#4,5,6,7'")
parser.add_argument('-c', '--cluster_labels', dest='cluster_labels', required=True, \
help="Labels the core masks. E.g. 'a7' or 'a7#a15'")
parser.add_argument('-g', '--gem5-cluster_labels', dest='gem5_cluster_labels', required=True, \
help="The gem5 cluster name labels E.g. 'littleCluster' or 'littleCluster#bigCluster'")
parser.add_argument('--focsu--core-mask', dest='workload_cluster_core_mask', required=False, \
default='4,5,6,7', \
help="Specifies the core mask to use for workload clustering")
parser.add_argument('--focus-freq', dest='workload_cluster_freq', required=False, \
default=1000.0, \
help="Specifies the CPU frequency (MHz) to use for workload clustering")
parser.add_argument('--focus-cluster-label', dest='workload_cluster_cluster_label', required=False, \
default='a15', \
help="Specifies the cluster (e.g. a15) to use for workload clustering")
args=parser.parse_args()
# always clean!
logger.info("Cleaning...")
clean_dir = os.path.dirname(args.input_file_path)
# check if input file is valid
if not os.path.isfile(args.input_file_path):
raise ValueError("The supplied input file ("+args.input_file_path+") does not exist!")
input_filename = (os.path.basename(args.input_file_path))
logger.info("Removing all analysis files from "+clean_dir+" except "+input_filename)
files_to_delete = [x for x in os.listdir(clean_dir) if x != input_filename]
logger.info("Not deleting: "+str([x for x in os.listdir(clean_dir) if x not in files_to_delete]))
#logger.info("DELETING: "+str(files_to_delete))
for f in files_to_delete:
del_path = os.path.join(clean_dir,f)
logger.info("Deleting: "+del_path)
os.remove(del_path)
logger.info("Finished clean")
if args.clean:
sys.exit(0)
df = pd.read_csv(args.input_file_path,sep='\t')
convert_old_names_to_new(df)
core_masks = args.core_mask.split('#')
cluster_labels = args.cluster_labels.split('#')
gem5_cluster_labels = args.gem5_cluster_labels.split('#')
if len(core_masks) != len(cluster_labels):
raise ValueError("The number of core masks and cluster labels must be the same!")
if len(gem5_cluster_labels) != len(cluster_labels):
raise ValueError("The number of gem5 and hw cluster labels must be the same!")
clusters_and_labels_string = 'Clusters and labels: '
for i in range(0, len(core_masks)):
clusters_and_labels_string += cluster_labels[i] + ':'+core_masks[i]+':'+gem5_cluster_labels[i]+', '
#print(clusters_and_labels_string)
old_cols = df.columns.values.tolist()
logger.info("Creating HW cluster averages")
for i in range(0, len(core_masks)):
create_hw_cluster_average(df,core_masks[i],cluster_labels[i])
logger.info("Applying gem5 formulae (gem5-stats.equations)")
apply_formulae(df,'gem5-stats.equations',ignore_fails=True)
# find the most common number of appearances of workloads
most_common_workload_appearance = df['hw stat workload name'].value_counts().tolist()
workload_appearance_mode = max(set(most_common_workload_appearance),key=most_common_workload_appearance.count)
logger.info("Workload appearance mode: "+str(workload_appearance_mode))
workloads_to_remove = [x for x in df['hw stat workload name'].unique() if df['hw stat workload name'].value_counts()[x] != workload_appearance_mode]
logger.info("Workloads to remove:"+str(workloads_to_remove))
df = df[~df['hw stat workload name'].isin(workloads_to_remove)]
# now remove workloads that are unsuitable for performance evaluation (e.g. 'rl-')
df = df[df['hw stat workload name'].str.contains('rl-') == False]
rename_workloads(df)
#logger.info("The following workloads are in the data: "+str(df['hw stat workload name'].tolist()))
logger.info("There are "+str(len(df['hw stat workload name'].tolist()))+" workloads in the data")
logger.info("Saving the '-applied_formulae.csv' file...")
df.to_csv(args.input_file_path+'-applied_formulae.csv',sep='\t')
'''
clusters_and_labels_string = 'Clusters and labels: '
for i in range(0, len(core_masks)):
clusters_and_labels_string += cluster_labels[i] + ':'+core_masks[i]+', '
print(clusters_and_labels_string)
old_cols = df.columns.values.tolist()
for i in range(0, len(core_masks)):
create_hw_cluster_average(df,core_masks[i],cluster_labels[i])
apply_formulae(df,'gem5-stats.equations',ignore_fails=True)
# find the most common number of appearances of workloads
most_common_workload_appearance = df['hw stat workload name'].value_counts().tolist()
workload_appearance_mode = max(set(most_common_workload_appearance),key=most_common_workload_appearance.count)
print("Workload appearance mode: "+str(workload_appearance_mode))
workloads_to_remove = [x for x in df['hw stat workload name'].unique() if df['hw stat workload name'].value_counts()[x] != workload_appearance_mode]
print ("Workloads to remove:"+str(workloads_to_remove))
df = df[~df['hw stat workload name'].isin(workloads_to_remove)]
# now remove workloads that are unsuitable for performance evaluation (e.g. 'rl-')
df = df[df['hw stat workload name'].str.contains('rl-') == False]
rename_workloads(df)
print("The following workloads are in the data: ")
print(df['hw stat workload name'].tolist())
df.to_csv(args.input_file_path+'-applied_formulae.csv',sep='\t')
'''
new_cols_only = [x for x in df.columns.values if x not in old_cols]
condensed_df = df[important_cols + new_cols_only]
#print df[important_cols + new_cols_only]
condensed_df.to_csv(args.input_file_path+'-applied_formulae.csv'+'-condensed.csv',sep='\t')
# do workload clustering
logger.info("Cluserting workloads")
workload_clustering_df = cluster_workloads(df[(df['hw stat core mask'] == args.workload_cluster_core_mask) & (df['hw stat Freq (MHz) C'+args.workload_cluster_core_mask.split(',')[0]+''] == args.workload_cluster_freq)] , args.workload_cluster_cluster_label, args.input_file_path+'-graph' )
#print workload_clustering_df
wl_cluster_name = 'cluster 1' # which cluster to analyse
logger.info("Creating output file for clustered workload MPE plots")
workload_clustering_df = create_exec_time_err_and_wl_cluster_plots(workload_clustering_df, wl_cluster_name, args.input_file_path)
# for adding the clusters numbers to the main df (added for the power modelling)
logger.info("Applying clusters to full dataframe")
df['workload clusters'] = df['hw stat workload name'].apply(lambda x: workload_clustering_df[workload_clustering_df['wl name'] == x][wl_cluster_name].iloc[0])
#print df
df.to_csv(args.input_file_path+'-with-formulae-and-clusters.csv',sep='\t')
logger.info("Creating file of basic settings")
# create simple df with focus settings
settings_df = pd.DataFrame(columns=[
'input filename',
'number of workloads',
'coremask-freq settings',
'focus frequency (MHz)' ,
'focus core mask',
'focus cluster label'
])
freq_cols = [x for x in df.columns.values if x.find('hw stat Freq (MHz)') > -1]
cols_to_unique = ['hw stat core mask'] + freq_cols
#print (cols_to_unique)
#df['coremask freq'] = df.apply(lambda row: '-'.join([x+'_'+str(row[x]) for x in cols_to_unique]), axis=1)
df['coremask freq'] = df.apply(lambda row: '-'.join([str(row[x]) for x in cols_to_unique]), axis=1)
unique_coremask_freq = ' ## '.join(df['coremask freq'].unique().tolist())
settings_df = settings_df.append({
'input filename' : args.input_file_path,
'number of workloads' : len(df['hw stat workload name'].unique().tolist()),
'coremask-freq settings' : str(unique_coremask_freq),
'focus frequency (MHz)' : str(args.workload_cluster_freq),
'focus core mask' : str(args.workload_cluster_core_mask),
'focus cluster label' : str(args.workload_cluster_cluster_label)
},ignore_index=True)
settings_df.to_csv(args.input_file_path+'-settings.csv', sep='\t')
logger.info("Processing each core mask at a time...")
for i in range(0, len(core_masks)):
logger.info("Processing core mask: "+core_masks[i])
cur_core_mask = core_masks[i]
cur_cluster_label = cluster_labels[i]
cur_gem5_cluster_label = gem5_cluster_labels[i]
cur_first_core = core_masks[i].split(',')[0]
run_validate_on_cluster(df[df['hw stat core mask'] == cur_core_mask],cur_core_mask,cur_cluster_label,cur_first_core,cur_gem5_cluster_label,args.input_file_path+'-'+cur_cluster_label+'-')
logger.info("Complete")
sys.exit()
#find_stats_per_group(df)
#print df[important_cols + ['gem5new clock tick diff A15'] + ['gem5new A15 cycle count diff total'] + ['gem5new A15 active cycles per cycle'] + ['xu3gem5 A15 cycle count total signed err'] + ['xu3gemt A15 cycle count no idle total signed err']]
# remove roy from all!!!
#df = df[[x for x in df['xu3 stat workload name'] if x.find('rl-') == -1 ]]
# print average abs and signed errors:
workloads_to_error = [x for x in df['hw stat workload name'].unique().tolist() if x.find('rl-') == -1]
err_df = df[df['hw stat workload name'].isin(workloads_to_error)]
print err_df['hw stat workload name'].tolist()
print "No. unique workloads: "+str(len(err_df['hw stat workload name'].unique()))
parsec_wls = [x for x in err_df['hw stat workload name'].unique() if x.find('parsec') > -1]
print ("Parsec workloads: "+str(parsec_wls))
print("ALL PARSEC abs: "+str(err_df[err_df['hw stat workload name'].isin(parsec_wls)]['hwgem5 duration pc err'].mean()))
print("ALL abs: "+str(err_df['hwgem5 duration pc err'].mean()))
print("ALL signed: "+str(err_df['hwgem5 duration signed err'].mean()))
print("A15 abs: "+str(err_df[err_df['hw stat core mask'] == '4,5,6,7']['hwgem5 duration pc err'].mean()))
print("A15 signed: "+str(err_df[err_df['hw stat core mask'] == '4,5,6,7']['hwgem5 duration signed err'].mean()))
print("A7 abs: "+str(err_df[err_df['hw stat core mask'] == '0,1,2,3']['hwgem5 duration pc err'].mean()))
print("A7 signed: "+str(err_df[err_df['hw stat core mask'] == '0,1,2,3']['hwgem5 duration signed err'].mean()))
| |
<reponame>Robbybp/IDAES-CLC
"""
Base for IDAES process model objects.
"""
from __future__ import division # No integer division
from __future__ import print_function # Python 3 style print
from pyomo.environ import *
from pyomo.dae import *
from pyomo.core.base.block import SimpleBlock, IndexedBlock, _BlockData
from pyomo.core.base.external import AMPLExternalFunction
from pyomo.core.base.connector import ConnectorExpander
# import copy
import os
import sys
from datetime import datetime
import networkx as nx
import weakref
import itertools
import pyomo.environ as pe
from six import itervalues, iteritems, string_types
from bunch import Bunch
from .util.oa import apply_OA, add_oa_constraints
from .util.solve import SolverError, get_solvers
from .util.idjson import save_json, load_json
from .util.var import Wrapper
#from pyomo.core.base.indexed_component import _IndexedComponent_slice
from .util import idjson2 as j2
# Some more inforation about this module
__author__ = "<NAME> <<EMAIL>>, "\
"<NAME> <<EMAIL>>"
__version__ = "1.0.0"
try:
import matplotlib.pyplot as plt
except:
plt = None
__all__ = ['ProcessBase', 'ProcessBlock']
# Reserved keywords that go to the Block constructor 'name' is also a block
# keyword, but I'm stealing it, so it's not on the list
_block_kwds = ('rule', 'options', 'concrete', 'ctype', 'noruleinit', 'doc')
def ProcBlock(name):
def ProcBlockDec(cls):
c = type(name, (ProcessBlock,), {"_data_class":cls, "__module__":cls.__module__})
setattr(sys.modules[cls.__module__], name, c)
return cls
return ProcBlockDec
class _InitMethod():
"""Class contains a function and documentation string for an intialization
method.
"""
def __init__(self, func, doc=""):
"""
Args:
func: a function to perform a model intialization routine
doc: a description of the initialization routine
"""
self.func = func
self.doc = doc
self.long_doc = ""
class CallableDict(dict):
"""
A dictionary that returns itself when called. Behaves like a weakref would.
The reason I have this is so I can treat a dictionary containing Pyomo vars
the same as a weakref to an indexed Pyomo var. It helps with dealing with
links to Pyomo variable slices
"""
def __call__(self):
return self
class ExternalFunction(AMPLExternalFunction):
"""
Workaround for a bug in Pyomo. DAE transformation complains of missing
dim() method for ExternalFunction objects the -1 return value makes it
dodge some stuff in the DAE transoframtion code. I'm assuming dim isn't
going to cause problems elsewhere. DAE was the only thing that missed it.
"""
# TODO: remove when Pyomo is fixed. <JCE>
def dim(self):
return -1
def _pop_nonblock(dct):
"""
Split a kwargs dict into kwargs meant for block and args meant for the thing
that inherits block data.
"""
da = {}
pop_list = [key for key in dct if key not in _block_kwds]
for key in pop_list:
da[key] = dct.pop(key, {})
return da
class _IndexedProcessBlockMeta(type):
"""
This is a metaclass used to create an indexed model class
"""
def __new__(meta, name, bases, dct):
def __init__(self, *args, **kwargs):
kwargs.pop("data_class", None)
_pop_nonblock(kwargs)
Block.__init__(self, *args, **kwargs)
dct["__init__"] = __init__
return type.__new__(meta, name, bases, dct)
class _SimpleProcessBlockMeta(type):
"""
This is a metaclass used to create a simple model class
"""
def __new__(meta, name, bases, dct):
def __init__(self, *args, **kwargs):
kwargs.pop("data_class", None)
da = _pop_nonblock(kwargs)
bases[0].__init__(self, self, idx=None, **da)
kwargs["concrete"] = True
Block.__init__(self, *args, **kwargs)
self.__init2__()
self._data[None] = self
dct["__init__"] = __init__
return type.__new__(meta, name, bases, dct)
class ProcessBlock(Block):
"""
"""
def __new__(cls, *args, **kwds):
if cls.__name__.startswith('_Indexed') or \
cls.__name__.startswith('_Simple'):
return super(Block, cls).__new__(cls)
_data_args = _pop_nonblock(kwds)
if args == ():
bname = "_Simple{}".format(cls.__name__)
n = _SimpleProcessBlockMeta(bname, (cls._data_class, cls), {})
return n.__new__(n)
else:
bname = "_Indexed{}".format(cls.__name__)
n = _IndexedProcessBlockMeta(bname, (cls,), {})
o = n.__new__(n)
o._data_class = cls._data_class
o._data_args = _data_args
return o
def _default(self, idx):
return self._data.setdefault(
idx, self._data_class(self, idx=idx, **self._data_args))
def save_json(self, *args, **kwargs):
"""
Save Pyomo object state to JSON. See .util.idjson import save_json
and load_json for more details.
"""
return j2.save_json(self, *args, **kwargs)
def load_json(self, *args, **kwargs):
"""
Loads Pyomo object state from json. See .util.idjson import save_json
and load_json for more details.
"""
j2.load_json(self, *args, **kwargs)
class ProcessBase(_BlockData):
"""
This is the IDAES model base class for process model objects like
unit models, property models, reaction models, ...
These model objects construct and manage Pyomo Blocks or Indexed
Blocks
Attributes:
bstore (TYPE): Description
con_graph (TYPE): Description
constraint_types (dict): Dictionary of constraint types. Contains two
elements: 'linear' and 'nonlinear'.
delay_construct (Boolean): flag for whether construction of the class
should be delayed.
fluid_connector_elements (dict): Description
graph (TYPE): Description
link_map (dict): Dictionary storing link information between sub-units
contained within this class.
links (Block): Pyomo Block containing linking constraints between
sub-units.
model_type (str): Description
process_graph (TYPE): Description
pyomo_results (TYPE): Description
pyomo_solver (TYPE): Description
solve_time (TYPE): Description
solvers (dict): dictionary of solvers for various subproblems
units (dict): Dictionary of process units contained in this class.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the object. Anything inheriting from process base
should impliment a build() function that create the Pyomo
variables, constriants, and whatever.
Args:
------
delay_construct: A model that inherits this can call the build
function at the end of the constructor (or not). The
delay_construct argument provides a standard way to override
the automatic call to build and manually do it later. The
purpose of delaying the call to build is to do some more
model setup before constructing the Pyomo problem. False by
default
solver: the solver to be used in solving the this Pyomo Block
by default is ipopt. The solver and solver settings can
be changed at anytime.
"""
# Pop arguments that are used here and not to be passed on
# to the Pyomo Block constructor.
kwargs.setdefault('name', 'Unnamed_Unit_Model')
self.delay_construct = kwargs.pop("delay_construct", False)
self.unit_type = kwargs.pop('type', ['General'])
self.unit_name = kwargs.pop('name')
self.parent_unit = Wrapper(kwargs.pop('parent'))
self._idx = kwargs.pop('idx', None)
self._comps = set(kwargs.pop('comps', []))
self._init_methods = {}
self.link_map = {}
self.units = Bunch()
self.graph = nx.DiGraph()
self._built = False
_BlockData.__init__(self, *args)
#super(ProcessBase, self).__init__(self, *args)
if self.parent_component().__class__.__name__.startswith("_Indexed"):
#if its a simple block this gets called from Simple* init. need
#Block init in between __init__ and __init2__
self._suppress_ctypes = self.parent_component()._suppress_ctypes
self.__init2__()
self._reg_initialize(
'default',
func=self.default_init,
doc='Do nothing')
def __init2__(self):
"""
Second part of init to add Pyomo objects, which can't be done until after
block init
"""
if isinstance(self.unit_type, string_types):
self.unit_type = [self.unit_type]
if self.parent_unit._obj is None:
self.parent_unit = None # do not wrap None
self.solvers = get_solvers()
self.links = pe.Block()
self.fetch_limits()
if not self._comps:
try:
self._comps = self.parent_unit._comps
except:
pass
self.comps = pe.Set(initialize=self._comps)
if not self.delay_construct:
self.build()
def add_weakref_links(self, **kwargs):
"""
This adds a weakref object pointing to a variable contained in another
block.
"""
for key in kwargs:
#expand slices
o = kwargs[key]
if isinstance(o, _IndexedComponent_slicer):
indexes = []
indexes2 = []
objs = []
for o2 in o:
indexes.append(o2.index())
objs.append(o2)
mask = [False]*len(indexes[0])
for j in range(len(indexes[0])):
for i in indexes:
if indexes[0][j] != i[j]:
mask[j] = True
break
d = CallableDict()
setattr(self, key, d)
for j, i in enumerate(indexes):
i = tuple(itertools.compress(i, mask))
if len(i) == 1:
i = i[0]
d[i] = objs[j]
else:
setattr(self, key, weakref.ref(o))
def var_or_expr(self, *args, **kwargs):
"""
Creates either an expression or an expression, variable, and constraints
this allows the problem to be either constructed with more variables
and simpler constraints or less variables and more complex constraints.
If using expression only, and you want the find the values of some
quantity after solving a model the expression can be evaluated. If using
vaiables the expression can be evaluated in an initialization proceedure.
The arguments are the same as Var except:
Args
name: Name of variable or exprssion to create, if variable an
Expression object (expr_name) and a constraint object (eq_name)
will also be created.
expr or rule must also be specified they are the same as the Expression
expr and rule arguments
"""
if "domain" not in kwargs:
kwargs["domain"] = Reals
name = kwargs.pop("name")
expr_only = kwargs.pop("expr_only", self.expr_only)
eq_scale = kwargs.pop("eq_scale", 1.0)
if "rule" in kwargs:
expr = Expression(*args, rule=kwargs.pop("rule"))
elif "expr" in kwargs:
expr = Expression(*args, expr=kwargs.pop("expr"))
else:
raise Exception("expr or rule required")
if expr_only:
setattr(self, name, expr)
else:
setattr(self, "expr_"+name, expr)
if "initialize" not in kwargs and not args:
kwargs["initialize"] = value(expr)
v = Var(*args, **kwargs)
setattr(self, name, v)
if "initialize" not in kwargs and args:
for i in v:
v[i].value = value(expr[i])
if not args:
setattr(self, "eq_"+name,
Constraint(expr=eq_scale*v==expr*eq_scale))
else:
def eqrule(blk, *index):
return eq_scale*v[tuple(index)]==eq_scale*expr[tuple(index)]
setattr(self, "eq_"+name, Constraint(*args, rule=eqrule))
def default_init(self):
"""
An model initialization function that does nothing.
"""
pass
| |
<reponame>kat-mulberries/cjaas-sdk
# coding: utf-8
"""
Azure Functions OpenAPI Extension
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class AccountApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_app(self, authorization, **kwargs): # noqa: E501
"""Create App # noqa: E501
Create an application for a particular organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_app(authorization, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Common Identity Bearer Token Prefix token with 'Bearer ' (required)
:return: HttpGenericObjectResponseCreateApp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_app_with_http_info(authorization, **kwargs) # noqa: E501
else:
(data) = self.create_app_with_http_info(authorization, **kwargs) # noqa: E501
return data
def create_app_with_http_info(self, authorization, **kwargs): # noqa: E501
"""Create App # noqa: E501
Create an application for a particular organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_app_with_http_info(authorization, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Common Identity Bearer Token Prefix token with 'Bearer ' (required)
:return: HttpGenericObjectResponseCreateApp
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['authorization'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_app" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'authorization' is set
if ('authorization' not in params or
params['authorization'] is None):
raise ValueError("Missing the required parameter `authorization` when calling `create_app`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/organizations/apps', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HttpGenericObjectResponseCreateApp', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_app(self, authorization, **kwargs): # noqa: E501
"""Delete App # noqa: E501
Delete an application from a particular organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_app(authorization, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Cisco CI Bearer Token Prefix token with 'Bearer ' (required)
:return: HttpSimpleMessageObjectResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_app_with_http_info(authorization, **kwargs) # noqa: E501
else:
(data) = self.delete_app_with_http_info(authorization, **kwargs) # noqa: E501
return data
def delete_app_with_http_info(self, authorization, **kwargs): # noqa: E501
"""Delete App # noqa: E501
Delete an application from a particular organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_app_with_http_info(authorization, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Cisco CI Bearer Token Prefix token with 'Bearer ' (required)
:return: HttpSimpleMessageObjectResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['authorization'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_app" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'authorization' is set
if ('authorization' not in params or
params['authorization'] is None):
raise ValueError("Missing the required parameter `authorization` when calling `delete_app`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/organizations/apps/{appname}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HttpSimpleMessageObjectResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_apps(self, authorization, **kwargs): # noqa: E501
"""List Apps # noqa: E501
Lists Apps for a particular organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_apps(authorization, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Cisco CI Bearer Token Prefix token with 'Bearer ' (required)
:return: HttpGenericListObjectResponseAppsDocumentSwagger
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_apps_with_http_info(authorization, **kwargs) # noqa: E501
else:
(data) = self.list_apps_with_http_info(authorization, **kwargs) # noqa: E501
return data
def list_apps_with_http_info(self, authorization, **kwargs): # noqa: E501
"""List Apps # noqa: E501
Lists Apps for a particular organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_apps_with_http_info(authorization, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Cisco CI Bearer Token Prefix token with 'Bearer ' (required)
:return: HttpGenericListObjectResponseAppsDocumentSwagger
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['authorization'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_apps" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'authorization' is set
if ('authorization' not in params or
params['authorization'] is None):
raise ValueError("Missing the required parameter `authorization` when calling `list_apps`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/organizations/apps', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HttpGenericListObjectResponseAppsDocumentSwagger', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rotate_app_key(self, authorization, **kwargs): # noqa: E501
"""Rotate App Key # noqa: E501
Rotates a secret key for a particular application. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rotate_app_key(authorization, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Cisco CI Bearer Token Prefix token with 'Bearer ' (required)
:return: HttpGenericObjectResponseCreateApp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rotate_app_key_with_http_info(authorization, **kwargs) # noqa: E501
else:
(data) = self.rotate_app_key_with_http_info(authorization, **kwargs) # noqa: E501
return data
def rotate_app_key_with_http_info(self, authorization, **kwargs): # noqa: E501
"""Rotate App Key # noqa: E501
Rotates a secret key for a particular application. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rotate_app_key_with_http_info(authorization, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Cisco CI Bearer Token Prefix token with 'Bearer ' (required)
:return: HttpGenericObjectResponseCreateApp
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['authorization'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rotate_app_key" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'authorization' is set
if ('authorization' not in params or
params['authorization'] is | |
{neighbor} routes',
]
def cli(self, neighbor, address_family='', vrf='', output=None):
if output is None:
# Build command
if address_family and neighbor and vrf:
cmd = self.cli_command[0].format(address_family=address_family,
neighbor=neighbor,
vrf=vrf)
elif address_family and neighbor:
cmd = self.cli_command[1].format(address_family=address_family,
neighbor=neighbor)
elif neighbor:
cmd = self.cli_command[2].format(neighbor=neighbor)
# Execute command
show_output = self.device.execute(cmd)
else:
show_output = output
# Call super
return super().cli(output=show_output, neighbor=neighbor,
address_family=address_family, vrf=vrf)
# ==================================================================
# Parser for:
# * 'show ip bgp all neighbors {neighbor} routes'
# * 'show ip bgp {address_family} all neighbors {neighbor} routes'
# ==================================================================
class ShowIpBgpAllNeighborsRoutes(ShowBgpAllNeighborsRoutesSuperParser, ShowBgpAllNeighborsRoutesSchema):
''' Parser for:
* 'show ip bgp all neighbors {neighbor} routes'
* 'show ip bgp {address_family} all neighbors {neighbor} routes'
'''
cli_command = ['show ip bgp {address_family} all neighbors {neighbor} routes',
'show ip bgp all neighbors {neighbor} routes',
]
def cli(self, neighbor, address_family='', output=None):
if output is None:
# Build command
if address_family and neighbor:
cmd = self.cli_command[0].format(address_family=address_family,
neighbor=neighbor)
else:
cmd = self.cli_command[1].format(neighbor=neighbor)
# Execute command
show_output = self.device.execute(cmd)
else:
show_output = output
# Call super
return super().cli(output=show_output, neighbor=neighbor,
address_family=address_family)
# ==============================================================
# Parser for:
# * 'show ip bgp neighbors {neighbor} routes'
# * 'show ip bgp {address_family} neighbors {neighbor} routes'
# * 'show ip bgp {address_family} vrf {vrf} neighbors {neighbor} routes'
# ==============================================================
class ShowIpBgpNeighborsRoutes(ShowBgpAllNeighborsRoutesSuperParser, ShowBgpAllNeighborsRoutesSchema):
''' Parser for:
* 'show ip bgp neighbors {neighbor} routes'
* 'show ip bgp {address_family} neighbors {neighbor} routes'
* 'show ip bgp {address_family} vrf {vrf} neighbors {neighbor} routes'
'''
cli_command = ['show ip bgp {address_family} vrf {vrf} neighbors {neighbor} routes',
'show ip bgp {address_family} neighbors {neighbor} routes',
'show ip bgp neighbors {neighbor} routes',
]
def cli(self, neighbor, address_family='', vrf='', output=None):
if output is None:
# Build command
if address_family and vrf:
cmd = self.cli_command[0].format(neighbor=neighbor,
address_family=address_family,
vrf=vrf)
elif address_family:
cmd = self.cli_command[1].format(neighbor=neighbor,
address_family=address_family)
else:
cmd = self.cli_command[2].format(neighbor=neighbor)
# Execute command
show_output = self.device.execute(cmd)
else:
show_output = output
# Call super
return super().cli(output=show_output, neighbor=neighbor,
address_family=address_family, vrf=vrf)
#-------------------------------------------------------------------------------
# ==============================
# Schema for:
# * 'show bgp all cluster-ids'
# ==============================
class ShowBgpAllClusterIdsSchema(MetaParser):
''' Schema for "show bgp all cluster-ids" '''
schema = {
'vrf':
{Any():
{Optional('cluster_id'): str,
Optional('configured_id'): str,
Optional('reflection_all_configured'): str,
Optional('reflection_intra_cluster_configured'): str,
Optional('reflection_intra_cluster_used'): str,
Optional('list_of_cluster_ids'):
{Any():
{Optional('num_neighbors'): int,
Optional('client_to_client_reflection_configured'): str,
Optional('client_to_client_reflection_used'): str,
},
},
},
},
}
# ==============================
# Parser for:
# * 'show bgp all cluster-ids'
# ==============================
class ShowBgpAllClusterIds(ShowBgpAllClusterIdsSchema):
''' Parser for "show bgp all cluster-ids" '''
cli_command = 'show bgp all cluster-ids'
def cli(self, output=None):
# find vrf names
# show vrf detail | inc \(VRF
cmd_vrfs = 'show vrf detail | inc \(VRF'
out_vrf = self.device.execute(cmd_vrfs)
vrf_dict = {'0':'default'}
p = re.compile(r'^\s*VRF +(?P<vrf_name>[0-9a-zA-Z]+)'
' +\(+VRF +Id += +(?P<vrf_id>[0-9]+)+\)+;'
' +default +(?P<other_data>.+)$')
p1 = re.compile(r'^\s*Global +cluster-id: +(?P<cluster_id>[0-9\.]+)'
' +\(+configured: +(?P<configured>[0-9\.]+)+\)$')
p3 = re.compile(r'^\s*all +\(+inter-cluster +and +intra-cluster+\):'
' +(?P<all_configured>[a-zA-Z]+)$')
p4 = re.compile(r'^\s*intra-cluster:\s+(?P<intra_cluster_configured>[a-zA-Z]+)'
' +(?P<intra_cluster_used>[a-zA-Z]+)$')
p5 = re.compile(r'^\s*(?P<cluster_ids>[0-9\.]+)'
' +(?P<num_neighbors>[0-9]+)'
' +(?P<client_to_client_ref_configured>[a-zA-Z]+)'
' +(?P<client_to_client_ref_used>[a-zA-Z]+)$')
for line in out_vrf.splitlines():
if not line:
continue
else:
line = line.rstrip()
# VRF VRF1 (VRF Id = 1); default RD 300:1; default VPNID <not set>
m = p.match(line)
if m:
# Save variables for use later
vrf_name = str(m.groupdict()['vrf_name'])
vrf_id = str(m.groupdict()['vrf_id'])
vrf_dict[vrf_id] = vrf_name
continue
# show bgp all cluster-ids
cmd = self.cli_command
out = self.device.execute(cmd)
# Init vars
sum_dict = {}
cluster_id = None
list_of_cluster_ids = dict()
configured_id = ""
reflection_all_configured = ""
reflection_intra_cluster_configured = ""
reflection_intra_cluster_used = ""
for line in out.splitlines():
if line.strip():
line = line.rstrip()
else:
continue
# Global cluster-id: 10.64.4.4 (configured: 0.0.0.0)
m = p1.match(line)
if m:
# Save variables for use later
cluster_id = str(m.groupdict()['cluster_id'])
configured_id = str(m.groupdict()['configured'])
if 'vrf' not in sum_dict:
sum_dict['vrf'] = {}
continue
# all (inter-cluster and intra-cluster): ENABLED
m = p3.match(line)
if m:
reflection_all_configured = m.groupdict()['all_configured'].lower()
continue
# intra-cluster: ENABLED ENABLED
m = p4.match(line)
if m:
reflection_intra_cluster_configured = m.groupdict()['intra_cluster_configured'].lower()
reflection_intra_cluster_used = m.groupdict()['intra_cluster_used'].lower()
continue
# List of cluster-ids
# Cluster-id #-neighbors C2C-rfl-CFG C2C-rfl-USE
# 192.168.1.1 2 DISABLED DISABLED
m = p5.match(line)
if m:
cluster_ids = m.groupdict()['cluster_ids']
list_of_cluster_ids[cluster_ids] = cluster_ids
list_of_cluster_ids[cluster_ids] = {}
list_of_cluster_ids[cluster_ids]['num_neighbors'] = int(m.groupdict()['num_neighbors'])
list_of_cluster_ids[cluster_ids]['client_to_client_reflection_configured'] = \
m.groupdict()['client_to_client_ref_configured'].lower()
list_of_cluster_ids[cluster_ids]['client_to_client_reflection_used'] = \
m.groupdict()['client_to_client_ref_used'].lower()
continue
for vrf_id, vrf_name in vrf_dict.items():
if 'vrf' not in sum_dict:
sum_dict['vrf'] = {}
if vrf_name not in sum_dict['vrf']:
sum_dict['vrf'][vrf_name] = {}
if 'cluster_id' not in sum_dict['vrf'][vrf_name]:
if not cluster_id:
del sum_dict['vrf']
if cluster_id:
sum_dict['vrf'][vrf_name]['cluster_id'] = cluster_id
if configured_id:
sum_dict['vrf'][vrf_name]['configured_id'] = configured_id
if reflection_all_configured:
sum_dict['vrf'][vrf_name]['reflection_all_configured'] = \
reflection_all_configured
if reflection_intra_cluster_configured:
sum_dict['vrf'][vrf_name]['reflection_intra_cluster_configured'] = \
reflection_intra_cluster_configured
if reflection_intra_cluster_used:
sum_dict['vrf'][vrf_name]['reflection_intra_cluster_used'] = \
reflection_intra_cluster_used
if list_of_cluster_ids:
sum_dict['vrf'][vrf_name]['list_of_cluster_ids'] = list_of_cluster_ids
return sum_dict
#-------------------------------------------------------------------------------
# ==============================================
# Schema for:
# * 'show bgp all neighbors {neighbor} policy'
# ==============================================
class ShowBgpAllNeighborsPolicySchema(MetaParser):
''' Schema for "show bgp all neighbors {neighbor} policy" '''
schema = {
'vrf':
{Any():
{'neighbor':
{Any():
{'address_family':
{Any():
{Optional('nbr_af_route_map_name_in'): str,
Optional('nbr_af_route_map_name_out'): str,
}
},
}
},
}
},
}
# ==============================================
# Parser for:
# * 'show bgp all neighbors {neighbor} policy'
# ==============================================
class ShowBgpAllNeighborsPolicy(ShowBgpAllNeighborsPolicySchema):
''' Parser for "show bgp all neighbors {neighbor} policy" '''
cli_command = 'show bgp all neighbors {neighbor} policy'
def cli(self, neighbor, output=None):
if output is None:
out = self.device.execute(self.cli_command.format(neighbor=neighbor))
else:
out = output
p1 = re.compile(r'^\s*Neighbor: +(?P<neighbor>[a-zA-Z0-9\.\:]+),'
' +Address-Family: +(?P<address_family>[a-zA-Z0-9\s\-\_]+)'
'( +\((?P<vrf>[a-zA-Z0-9]+)\))?$')
p2 = re.compile(r'^\s*route-map +(?P<route_map_name>\S+)'
' +(?P<route_map_direction>[a-zA-Z]+)$')
# Init dictionary
policy_dict = {}
for line in out.splitlines():
line = line.rstrip()
# Neighbor: 10.4.6.6, Address-Family: VPNv4 Unicast (VRF1)
m = p1.match(line)
if m:
neighbor_id = str(m.groupdict()['neighbor'])
address_family = str(m.groupdict()['address_family']).lower()
if m.groupdict()['vrf']:
vrf = str(m.groupdict()['vrf'])
else:
vrf = 'default'
continue
# route-map test in
# route-map test out
m = p2.match(line)
if m:
route_map_name = str(m.groupdict()['route_map_name'])
route_map_direction = str(m.groupdict()['route_map_direction'])
# Init dict
if 'vrf' not in policy_dict:
policy_dict['vrf'] = {}
if vrf not in policy_dict['vrf']:
policy_dict['vrf'][vrf] = {}
if 'neighbor' not in policy_dict['vrf'][vrf]:
policy_dict['vrf'][vrf]['neighbor'] = {}
if neighbor_id not in policy_dict['vrf'][vrf]['neighbor']:
policy_dict['vrf'][vrf]['neighbor'][neighbor_id] = {}
if 'address_family' not in policy_dict['vrf'][vrf]['neighbor']\
[neighbor_id]:
policy_dict['vrf'][vrf]['neighbor'][neighbor_id]\
['address_family'] = {}
if address_family not in policy_dict['vrf'][vrf]['neighbor']\
[neighbor_id]['address_family']:
policy_dict['vrf'][vrf]['neighbor'][neighbor_id]\
['address_family'][address_family] = {}
if route_map_direction == 'in':
policy_dict['vrf'][vrf]['neighbor'][neighbor_id]\
['address_family'][address_family]['nbr_af_route_map_name_in'] = route_map_name
else:
policy_dict['vrf'][vrf]['neighbor'][neighbor_id]\
['address_family'][address_family]['nbr_af_route_map_name_out'] = route_map_name
continue
return policy_dict
#-------------------------------------------------------------------------------
# =======================================================
# Schema for:
# * 'show ip bgp template peer-session {template_name}'
# =======================================================
class ShowIpBgpTemplatePeerSessionSchema(MetaParser):
''' Schema "show ip bgp template peer-session {template_name}" '''
schema = {
'peer_session':
{Any():
{Optional('local_policies'): str ,
Optional('inherited_polices'): str ,
Optional('fall_over_bfd'): bool ,
Optional('suppress_four_byte_as_capability'): bool,
Optional('description'): str,
Optional('disable_connected_check'): bool,
Optional('ebgp_multihop_enable'): bool,
Optional('ebgp_multihop_max_hop'): int,
Optional('local_as_as_no'): int,
Optional('password_text'): str,
Optional('remote_as'): int,
Optional('shutdown'): bool,
Optional('keepalive_interval'): int,
Optional('holdtime'): int,
Optional('transport_connection_mode'): str,
Optional('update_source'): str,
Optional('index'): int,
Optional('inherited_session_commands'):
{Optional('fall_over_bfd'): bool,
Optional('suppress_four_byte_as_capability'): bool,
Optional('description'): str,
Optional('disable_connected_check'): bool,
Optional('ebgp_multihop_enable'): bool,
Optional('ebgp_multihop_max_hop'): int,
Optional('local_as_as_no'): int,
Optional('password_text'): str,
Optional('remote_as'): int,
Optional('shutdown'): bool,
Optional('keepalive_interval'): int,
Optional('holdtime'): int,
Optional('transport_connection_mode'): str,
Optional('update_source'): str,
},
},
},
}
# =======================================================
# Parser for:
# * 'show ip bgp template peer-session {template_name}'
# =======================================================
class ShowIpBgpTemplatePeerSession(ShowIpBgpTemplatePeerSessionSchema):
''' Parser for "show ip bgp template peer-session {template_name}" '''
cli_command = ['show ip bgp template peer-session {template_name}', 'show ip bgp template peer-session']
def cli(self, template_name="", output=None):
# show ip bgp template peer-session <WORD>
if output is None:
if template_name:
cmd = self.cli_command[0].format(template_name=template_name)
else:
cmd = self.cli_command[1]
out = self.device.execute(cmd)
else:
out = output
p1 = re.compile(r'^\s*Template:+(?P<template_id>[0-9\s\S\w]+),'
' +index:(?P<index>[0-9]+)$')
p2 = re.compile(r'^\s*Local +policies:+(?P<local_policies>0x[0-9A-F]+),'
' +Inherited +polices:+(?P<inherited_polices>0x[0-9A-F]+)$')
p3 = re.compile(r'^\s*Locally +configured +session +commands:$')
p4 = re.compile(r'^\s*remote-as +(?P<remote_as>[0-9]+)$')
p5 = re.compile(r'^\s*password +(?P<password_text>[\w\s]+)$')
p6 = re.compile(r'^\s*shutdown$')
p7 = re.compile(r'^\s*ebgp-multihop +(?P<ebgp_multihop_max_no>[0-9]+)$')
p8 = re.compile(r'^\s*update-source +(?P<update_source>[\d\w]+)$')
p9 = re.compile(r'^\s*transport +connection-mode +(?P<transport_connection_mode>[\s\w]+)$')
p10 = re.compile(r'^\s*description +(?P<desc>[\d\S\s\w]+)$')
p11 = re.compile(r'^\s*dont-capability-negotiate +four-octets-as$')
p12 = re.compile(r'^\s*timers +(?P<keepalive_interval>[\d]+)'
' +(?P<holdtime>[\d]+)$')
p13 = re.compile(r'^\s*local-as +(?P<local_as_as_no>[\d]+)$')
p14 = re.compile(r'^\s*disable-connected-check$')
p15 = re.compile(r'^\s*fall-over +bfd$')
p16 = re.compile(r'^\s*Inherited +session +commands:$')
# Init vars
parsed_dict = {}
for line in out.splitlines():
if line.strip():
line = line.rstrip()
else:
continue
# Template:PEER-SESSION, index:1
m = p1.match(line)
if m:
template_id = m.groupdict()['template_id']
index = int(m.groupdict()['index'])
if 'peer_session' not in parsed_dict:
parsed_dict['peer_session'] = {}
if template_id not in parsed_dict['peer_session']:
parsed_dict['peer_session'][template_id] = {}
parsed_dict['peer_session'][template_id]['index'] = index
continue
# Local policies:0x5025FD, Inherited polices:0x0
m = p2.match(line)
if m:
local_policy = m.groupdict()['local_policies']
inherited_policy = m.groupdict()['inherited_polices']
parsed_dict['peer_session'][template_id]['local_policies'] = local_policy
parsed_dict['peer_session'][template_id]['inherited_polices'] = inherited_policy
continue
# Locally configured session commands:
m = p3.match(line)
if m:
flag = False
continue
# remote-as 321
m = p4.match(line)
if m:
remote_as = int(m.groupdict()['remote_as'])
if flag:
parsed_dict['peer_session'][template_id]['inherited_session_commands']['remote_as'] = remote_as
else:
parsed_dict['peer_session'][template_id]['remote_as'] = remote_as
continue
# password is | |
lines. Default is False.
bathymetry: bool, optional
Creates a bathymetry map of the seabed based on an input file. Default is False.
Returns
-------
fig : figure object
To hold the axes of the plot
ax: axis object
To hold the points and drawing of the plot
'''
hidebox = kwargs.get('hidebox' , False ) # toggles whether to show the axes or not
title = kwargs.get('title' , "" ) # optional title for the plot
time = kwargs.get("time" , 0 ) # the time in seconds of when you want to plot
linelabels = kwargs.get('linelabels' , False ) # toggle to include line number labels in the plot
pointlabels = kwargs.get('pointlabels' , False ) # toggle to include point number labels in the plot
endpoints = kwargs.get('endpoints' , False ) # toggle to include the line end points in the plot
bathymetry = kwargs.get("bathymetry" , False ) # toggle (and string) to include bathymetry or not. Can do full map based on text file, or simple squares
water = kwargs.get("water" , 0 ) # option to plot water surface (if > 0)
cmap_bath = kwargs.get("cmap" , 'ocean' ) # matplotlib colormap specification
alpha = kwargs.get("opacity" , 1.0 ) # the transparency of the bathymetry plot_surface
draw_body = kwargs.get("draw_body" , True ) # toggle to draw the Bodies or not
shadow = kwargs.get("shadow" , True ) # toggle to draw the mooring line shadows or not
rang = kwargs.get('rang' , 'hold' ) # colorbar range: if range not used, set it as a placeholder, it will get adjusted later
cbar_bath = kwargs.get('cbar_bath' , False ) # toggle to include a colorbar for a plot or not
cbar_bath_size = kwargs.get('colorbar_size' , 1.0 ) # the scale of the colorbar. Not the same as aspect. Aspect adjusts proportions
colortension = kwargs.get("colortension" , False ) # toggle to draw the mooring lines in colors based on node tensions
cmap_tension = kwargs.get('cmap_tension' , 'rainbow' ) # the type of color spectrum desired for colortensions
cbar_tension = kwargs.get('cbar_tension' , False ) # toggle to include a colorbar of the tensions when colortension=True
# sort out bounds
xs = []
ys = []
zs = [0, -self.depth]
for point in self.pointList:
xs.append(point.r[0])
ys.append(point.r[1])
zs.append(point.r[2])
# if axes not passed in, make a new figure
if ax == None:
fig = plt.figure()
#fig = plt.figure(figsize=(20/2.54,12/2.54), dpi=300)
ax = plt.axes(projection='3d')
else:
fig = ax.get_figure()
# set bounds
if rbound==0:
rbound = max([max(xs), max(ys), -min(xs), -min(ys)]) # this is the most extreme coordinate
if bounds=='default':
ax.set_zlim([-self.depth, 0])
elif bounds=='rbound':
ax.set_xlim([-rbound,rbound])
ax.set_ylim([-rbound,rbound])
ax.set_zlim([-rbound, rbound])
elif bounds=='mooring':
ax.set_xlim([-rbound,0])
ax.set_ylim([-rbound/2,rbound/2])
ax.set_zlim([-self.depth, 0])
# draw things
if draw_body:
for body in self.bodyList:
body.draw(ax)
j = 0
for line in self.lineList:
j = j + 1
if color==None and isinstance(line.type, str):
if 'chain' in line.type:
line.drawLine(time, ax, color=[.1, 0, 0], endpoints=endpoints, shadow=shadow, colortension=colortension, cmap_tension=cmap_tension)
elif 'rope' in line.type or 'polyester' in line.type:
line.drawLine(time, ax, color=[.3,.5,.5], endpoints=endpoints, shadow=shadow, colortension=colortension, cmap_tension=cmap_tension)
else:
line.drawLine(time, ax, color=[0.2,0.2,0.2], endpoints=endpoints, shadow=shadow, colortension=colortension, cmap_tension=cmap_tension)
else:
line.drawLine(time, ax, color=color, endpoints=endpoints, shadow=shadow, colortension=colortension, cmap_tension=cmap_tension)
# Add line labels
if linelabels == True:
ax.text((line.rA[0]+line.rB[0])/2, (line.rA[1]+line.rB[1])/2, (line.rA[2]+line.rB[2])/2, j)
if cbar_tension:
maxten = max([max(line.getLineTens()) for line in self.lineList]) # find the max tension in the System
minten = min([min(line.getLineTens()) for line in self.lineList]) # find the min tension in the System
bounds = range(int(minten),int(maxten), int((maxten-minten)/256))
norm = mpl.colors.BoundaryNorm(bounds, 256) # set the bounds in a norm object, with 256 being the length of all colorbar strings
fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap_tension), label='Tension (N)') # add the colorbar
fig.tight_layout()
# Add point labels
i = 0
for point in self.pointList:
points = []
i = i + 1
if pointlabels == True:
ax.text(point.r[0], point.r[1], point.r[2], i, c = 'r')
if bathymetry==True: # if bathymetry is true, then make squares at each anchor point
if point.attachedEndB[0] == 0 and point.r[2] < -400:
points.append([point.r[0]+250, point.r[1]+250, point.r[2]])
points.append([point.r[0]+250, point.r[1]-250, point.r[2]])
points.append([point.r[0]-250, point.r[1]-250, point.r[2]])
points.append([point.r[0]-250, point.r[1]+250, point.r[2]])
Z = np.array(points)
verts = [[Z[0],Z[1],Z[2],Z[3]]]
ax.add_collection3d(Poly3DCollection(verts, facecolors='limegreen', linewidths=1, edgecolors='g', alpha=alpha))
if isinstance(bathymetry, str): # or, if it's a string, load in the bathymetry file
# parse through the MoorDyn bathymetry file
bathGrid_Xs, bathGrid_Ys, bathGrid = self.readBathymetryFile(bathymetry)
if rang=='hold':
rang = (np.min(-bathGrid), np.max(-bathGrid))
'''
# First method: plot nice 2D squares using Poly3DCollection
nX = len(bathGrid_Xs)
nY = len(bathGrid_Ys)
# store a list of points in the grid
Z = [[bathGrid_Xs[j],bathGrid_Ys[i],-bathGrid[i,j]] for i in range(nY) for j in range(nX)]
# plot every square in the grid (e.g. 16 point grid yields 9 squares)
verts = []
for i in range(nY-1):
for j in range(nX-1):
verts.append([Z[j+nX*i],Z[(j+1)+nX*i],Z[(j+1)+nX*(i+1)],Z[j+nX*(i+1)]])
ax.add_collection3d(Poly3DCollection(verts, facecolors='limegreen', linewidths=1, edgecolors='g', alpha=0.5))
verts = []
'''
# Second method: plot a 3D surface, plot_surface
X, Y = np.meshgrid(bathGrid_Xs, bathGrid_Ys)
bath = ax.plot_surface(X,Y,-bathGrid, cmap=cmap_bath, vmin=rang[0], vmax=rang[1], alpha=alpha)
if cbar_bath_size!=1.0: # make sure the colorbar is turned on just in case it isn't when the other colorbar inputs are used
cbar_bath=True
if cbar_bath:
fig.colorbar(bath, shrink=cbar_bath_size, label='depth (m)')
# draw water surface if requested
#if water > 0:
fig.suptitle(title)
set_axes_equal(ax)
ax.set_zticks([-self.depth, 0]) # set z ticks to just 0 and seabed
if hidebox:
ax.axis('off')
return fig, ax # return the figure and axis object in case it will be used later to update the plot
def plot2d(self, Xuvec=[1,0,0], Yuvec=[0,0,1], ax=None, color=None, **kwargs):
'''Makes a 2D plot of the mooring system objects in their current positions
Parameters
----------
Xuvec : list, optional
plane at which the x-axis is desired. The default is [1,0,0].
Yuvec : lsit, optional
plane at which the y-axis is desired. The default is [0,0,1].
ax : axes, optional
Plot on an existing set of axes
color : string, optional
Some way to control the color of the plot ... TBD <<<
title : string, optional
A title of the plot. The default is "".
Returns
-------
fig : figure object
To hold the axes of the plot
ax: axis object
To hold the points and drawing of the plot
'''
title = kwargs.get('title' , "" ) # optional title for the plot
time = kwargs.get("time" , 0 ) # the time in seconds of when you want to plot
linelabels = kwargs.get('linelabels' , False ) # toggle to include line number labels in the plot
pointlabels = kwargs.get('pointlabels' , False ) # toggle to include point number labels in the plot
bathymetry = kwargs.get("bathymetry" , False ) # toggle (and string) to include bathymetry contours or not based on text file
draw_body = kwargs.get("draw_body" , False ) # toggle to draw the Bodies or not
cmap_bath = kwargs.get("cmap_bath" , 'ocean' ) # matplotlib colormap specification
alpha = kwargs.get("opacity" , 1.0 ) # the transparency of the bathymetry plot_surface
levels = kwargs.get("levels" , 7 ) # the number (or array) of levels in the contour plot
rang = kwargs.get('rang' , 'hold' ) # colorbar range: if range not used, set it as a placeholder, it will get adjusted later
cbar_bath = kwargs.get('colorbar' , False ) # toggle to include a colorbar for a plot or not
cbar_bath_aspect = kwargs.get('cbar_bath_aspect', 20 ) # the proportion of the colorbar. Default is 20 height x 1 width
cbar_bath_ticks = kwargs.get('cbar_bath_ticks' , None ) # the desired tick labels on the colorbar (can be an array)
colortension = kwargs.get("colortension" , False ) # toggle to draw the mooring lines in colors based on node tensions
cmap_tension = kwargs.get('cmap_tension' , 'rainbow' ) # the type of color spectrum desired for colortensions
cbar_tension = kwargs.get('cbar_tension' , False ) # toggle to include a colorbar of the tensions when colortension=True
# if axes not passed in, make a new figure
| |
"**Error** `I do not have 'Kick Users' Permission.`")
return
except discord.HTTPException:
await ctx.send_message(message.channel, "**Error* `Kicking failed.`")
return
async def ban(ctx, message):
"""
Ban a member
:param ctx:
:param message:
:return:
"""
if not message.mentions:
await ctx.send_message(message.channel, "**Error** `Need to mention a user.`")
return
try:
await ctx.ban(message.mentions[0])
await ctx.send_message(message.channel, "**Banned**: `{}` from `{}` :thumbsup:"
.format(message.mentions[0].name, message.server.name))
return
except discord.Forbidden:
await ctx.send_message(message.channel, "**Error** `I do not have 'Ban Users' Permission.`")
return
except discord.HTTPException:
await ctx.send_message(message.channel, "**Error** `Banning failed.`")
return
async def unban(ctx, message):
"""
Unban a member
:param ctx:
:param message:
:return:
"""
try:
ban_list = await ctx.get_bans(message.server)
for m in ban_list:
if m.display_name == message.content[7:]:
await ctx.unban(message.server, m)
await ctx.send_message(message.channel, "**Unbanned**: `{}` from `{}` :thumbsup:"
.format(str(m.name), str(message.server.name)))
return
await ctx.send_message(message.channel, "**Error** `User not found`. Make sure the name is "
"correct. Don't use @ while typing the users name.")
return
except discord.Forbidden:
await ctx.send_message(message.channel, "**Error** `I do not have 'Ban Users' Permission.`")
return
except discord.HTTPException:
await ctx.send_message(message.channel, "**Error** `Banning failed.`")
return
async def levels(ctx, message):
"""
Enable Member Rankings and XP
:param ctx:
:param message:
:return:
"""
cont = message.content[8:]
if cont.lower() == "on":
key = str(message.server.id) + "lvl_track"
red.set(key, "Member Levels: ON")
await ctx.send_message(message.channel, ":loudspeaker: Member Levels are `ON` for `{}`."
.format(str(message.server.name)))
return
elif cont.lower() == "off":
key = message.server.id + "lvl_track"
if red.exists(key):
red.delete(key)
await ctx.send_message(message.channel, "Member Levels are `OFF` for `{}`."
.format(str(message.server.name)))
else:
await ctx.send_message(message.channel, "**Error** `'Member Rankings and Levels' are "
"already turned 'OFF' for your server.`")
else:
await ctx.send_message(message.channel, "Error: Usage `?levels on` or `?levels off`")
return
async def set_levels_an(ctx, message):
"""
Enable/Disable level up announcements
:param ctx:
:param message:
:return:
"""
cont = message.content[15:]
if cont.lower() == "on":
key = str(message.server.id) + "lvl_track_an"
red.set(key, "Member Levels: ON")
await ctx.send_message(message.channel, ":loudspeaker: Member Levels Announcements are `ON` "
"for `{}`.".format(message.server.name))
elif cont.lower() == "off":
key = message.server.id + "lvl_track_an"
if red.exists(key):
red.delete(key)
await ctx.send_message(message.channel, "Member Levels Announcements are `OFF` for `{}`."
.format(message.server.name))
else:
await ctx.send_message(message.channel, "**Error** `'Member Levels Announcements' are "
"already turned 'OFF' for your server.`")
else:
await ctx.send_message(message.channel, "Error: Usage `?set_levels_an on` or "
"`?set_levels_an off`")
async def set_xp_roles(ctx, message):
"""
For osu! Discord Fleet only. Enable xp and leaderboard commands
for specific roles
:param ctx:
:param message:
:return:
"""
cont = message.content[14:]
if cont.lower() == "off":
key = message.server.id + "xp_roles"
red.delete(key)
await ctx.send_message(message.channel, "**Success** `Anyone can use XP and Leaderboards now.`"
.format(message.server.name))
else:
key = message.server.id + "xp_roles"
r_c = discord.utils.get(message.server.roles, name=cont)
if not r_c:
await ctx.send_message(message.channel, "**Error**: `Role does not exist`")
return
red.lpush(key, str(r_c.id))
await ctx.send_message(message.channel, "**Success**: `Role: {} has been added to XP "
"white-list.`".format(r_c.name))
return
async def add_twitch(ctx, message):
"""
Add a new twitch stream
:param ctx:
:param message:
:return:
"""
command = message.content[12:]
switch = command.split(";")
if not len(switch) == 2:
await ctx.send_message(message.channel, "**Error**: Bad request."
"\nUsage: `;;add_twitch #channel; name`"
"\nExample: `?add_twitch #streams; monstercat`")
return
usr_name = switch[1]
clean_pre = usr_name.lstrip()
clean_post = clean_pre.rstrip()
chan = message.channel_mentions
alert_chan_id = chan[0].id
endpoint = "https://api.twitch.tv/kraken/channels/{}".format(str(clean_post))
headers = {"Client-ID": "gdo7uqrj9fvv2yvdg4w4ln6bmvke1kk",
"Accept": "application/vnd.twitchtv.v3+json"}
with aiohttp.ClientSession() as session:
async with session.get(url=endpoint, headers=headers) as resp:
data = await resp.read()
r = json.loads(data.decode("utf-8"))
try:
chan_id = str(r['_id'])
except KeyError:
await ctx.send_message(message.channel, ":scream: **Error** `Invalid Channel...`")
return
chan_name = r['name']
chan_url = r['url']
try:
cursor_n = db.stream_notifications.find({"serv_id": str(message.server.id)})
cursor_t = db.twitch_streams.find({"chan_id": chan_id})
# TODO add a cleanup service that removes unreferenced channel every 24 hours
# --- Add data to the Central Twitch Database --- #
if cursor_t.count() == 0:
new_record = {"chan_id": chan_id,
"chan_name": chan_name,
"latest_stream_timestamp": "",
"latest_stream_id": "",
"game": "",
"status_text": "",
"url": chan_url,
"last_updated": datetime.datetime.utcnow()}
db.twitch_streams.insert_one(new_record)
# --- Add data to the Server Specific Feeds Database --- #
if cursor_n.count() == 0:
new_record = {"serv_id": str(message.server.id),
"twitch_streams": [{"chan_id": chan_id,
"chan_name": chan_name,
"last_stream_timestamp": "",
"last_stream_id": "",
"alert_chan_id": alert_chan_id}],
"last_updated": datetime.datetime.utcnow()}
db.stream_notifications.insert_one(new_record)
await ctx.send_message(message.channel, ":tv: **Success** `Stream: '{}' is added succesfully.`"
.format(chan_name))
return
else:
for c in cursor_n:
for j in c["twitch_streams"]:
if j['chan_id'] == chan_id:
await ctx.send_message(message.channel, "**Error** `This stream already exists.`")
return
field = {"serv_id": str(message.server.id)}
update = {"$push":
{"twitch_streams": {"chan_id": chan_id,
"chan_name": chan_name,
"last_stream_timestamp": "",
"last_stream_id": "",
"alert_chan_id": alert_chan_id}},
"$currentDate":
{"last_updated": {"$type": "date"}}
}
db.stream_notifications.update_one(field, update)
await ctx.send_message(message.channel, ":tv: **Success** `Stream: '{}' is added succesfully.`"
.format(chan_name))
return
except AttributeError:
pass
# TODO: fix this entire function.
# The entire function has a really messed up approach
# to validate feeds. It should be corrected ASAP.
async def add_feed(ctx, message):
"""
Add a rss feed
:param ctx:
:param message:
:return:
"""
command = message.content[10:]
switch = str(command).split(";")
if not len(switch) == 2:
await ctx.send_message(message.channel, "**Error**: Bad request."
"\nUsage: `;;add_feed #channel; URL`"
"\nExample: `?add_feed #feed; "
"http://lapoozza.me/feed/atom`")
return
url_feed = str(switch[1])
clean_pre = url_feed.lstrip()
clean_post = clean_pre.rstrip()
chan = message.channel_mentions
chan_id = chan[0].id
d = feedparser.parse(clean_post)
if "links" not in d.feed:
return
links = d.feed.links
feed_url = ""
last_post_ids = []
latest_publish = None
# FIXME when no links are found in d.feed, there should be a error message
# FIXME http://feeds.thescoreesports.com/lol.rss <<-- doesnt have links field but is perfectly valid
for l in links:
if l['type'] == "application/atom+xml":
try:
a_sort = []
last_post_ids.clear()
for p in d.entries:
last_post_ids.append(p.id)
a_sort.append(datetime.datetime.fromtimestamp(time.mktime(p.updated_parsed)))
b_sort = sorted(a_sort, reverse=True)
latest_publish = b_sort[0]
feed_url = l['href']
except AttributeError:
break
elif l['type'] == "application/rss+xml":
try:
a_sort = []
last_post_ids.clear()
for p in d.entries:
last_post_ids.append(p.id)
a_sort.append(datetime.datetime.fromtimestamp(time.mktime(p.updated_parsed)))
b_sort = sorted(a_sort, reverse=True)
latest_publish = b_sort[0]
feed_url = l['href']
except AttributeError:
break
else:
try:
a_sort = []
last_post_ids.clear()
for p in d.entries:
last_post_ids.append(p.id)
a_sort.append(datetime.datetime.fromtimestamp(time.mktime(p.updated_parsed)))
b_sort = sorted(a_sort, reverse=True)
latest_publish = b_sort[0]
feed_url = clean_post
except AttributeError:
continue
if feed_url == "":
await ctx.send_message(message.channel, "**Error** `Did not find any valid RSS/ATOM feeds "
"in the given link.`")
return
if not latest_publish:
await ctx.send_message(message.channel, ":warning: **Error** `Bad link.``")
return
title = d.feed.title
try:
cursor = db.feeds.find({"serv_id": str(message.server.id)})
if cursor.count() == 0:
new_record = {"serv_id": str(message.server.id),
"feed_stat": "on",
"feeds": [{"feed_url": feed_url,
"channel_id": chan_id,
"title": title,
"last_post_ids": last_post_ids,
"latest_publish": latest_publish}],
"last_updated": datetime.datetime.utcnow()}
db.feeds.insert_one(new_record)
await ctx.send_message(message.channel, ":mailbox: **Success** `Feed: '{}' is added "
"succesfully.`".format(title))
return
else:
for c in cursor:
for j in c["feeds"]:
if j['feed_url'] == feed_url:
await ctx.send_message(message.channel, "**Error** `This feed already exists.`")
return
field = {"serv_id": message.server.id}
update = {
"$push": {
"feeds": {
"feed_url": feed_url,
"channel_id": chan_id,
"title": title,
"last_post_ids": last_post_ids,
"latest_publish": latest_publish
}
},
"$currentDate": {
"last_updated": {
"$type": "date"
}
}
}
db.feeds.update_one(field, update)
await ctx.send_message(message.channel, ":mailbox: **Success** `Feed: '{}' is added "
"succesfully.`".format(title))
return
except AttributeError:
pass
async def del_feed(ctx, message):
"""
Delete RSS feed
:param ctx:
:param message:
:return:
"""
url_feed = message.content[10:]
clean_pre = url_feed.lstrip()
clean_post = clean_pre.rstrip()
try:
cursor = db.feeds.find({"serv_id": message.server.id})
if cursor.count() == 0:
await ctx.send_message(message.channel, "**Error** `Currently 'no' feeds are active.`")
return
for c in cursor:
for j in c["feeds"]:
if j['feed_url'] == clean_post:
title = j['title']
field = {"serv_id": message.server.id}
update = {"$pull":
{"feeds": {"feed_url": clean_post}},
"$currentDate":
{"last_updated": {"$type": "date"}}
}
db.feeds.update_one(field, update)
await ctx.send_message(message.channel, ":mailbox: **Success**: `Feed: '{}' "
"removed succesfully.`".format(title))
return
await ctx.send_message(message.channel, "**Error** `Feed URL not found. Use ?list_feed .`")
return
except AttributeError:
pass
async def list_feed(ctx, message):
"""
List the available RSS feeds
:param ctx:
:param message:
:return:
"""
try:
cursor = db.feeds.find({"serv_id": str(message.server.id)})
if cursor.count() == 0:
await ctx.send_message(message.channel, "**Error** `Currently 'no' feeds are active.`")
return
a = ""
for c in cursor:
for j in c["feeds"]:
feed_url = j['feed_url']
a += "{}\n".format(feed_url)
await ctx.send_message(message.channel, "```css\n{}```".format(a))
return
except AttributeError:
pass
async def xp_blacklist(ctx, message):
"""
Add/ Remove people from XP Blacklist
:param ctx:
:param message:
:return:
"""
allowed_ids = settings["XP_BLACKLIST_IDS"]
if message.author.id in allowed_ids:
switch = message.content[9:]
splits = str(switch).split(";", maxsplit=1)
user_id = splits[0]
action = splits[1]
if action == "add":
red.lpush("xp_black_list", user_id)
await ctx.send_message(message.channel, "**Added** : `{}` to the XP blacklist.".format(user_id))
elif action == "remove":
red.lrem("xp_black_list", 0, user_id)
await ctx.send_message(message.channel, "**Removed** : `{}` from the XP blacklist.".format(user_id))
else:
await ctx.send_message(message.channel, '`This command is reserved for Team Rero only`')
async def list_xp_blacklist(ctx, message):
"""
list
:return:
"""
allowed_ids = settings["XP_BLACKLIST_IDS"]
if message.author.id in allowed_ids:
banned_ids = | |
<filename>osc/osc.py<gh_stars>1-10
#!/usr/bin/env python3
import vpython as vp
import numpy as np
from vpython import *
from time import sleep
AMP = 10.0
RADIUS = 0.2
class Universe:
def __init__(self):
self.viewScene()
def viewScene(self):
""" Canvas Scene """
scene = vp.canvas(
title = "Oscillations",
x = 0, y = 0,
width = 1600, height = 900,
)
class Assets(Universe):
def __init__(self):
"""Physical objects"""
Universe.__init__(self)
self.point0c = sphere( pos = vector(0, 0, 0), color = color.red, radius = RADIUS )
self.point1l = sphere( pos = vector(-1, 0, 0), color = color.red, radius = RADIUS )
self.point2l = sphere( pos = vector(-2, 0, 0), color = color.red, radius = RADIUS )
self.point3l = sphere( pos = vector(-3, 0, 0), color = color.red, radius = RADIUS )
self.point4l = sphere( pos = vector(-4, 0, 0), color = color.red, radius = RADIUS )
self.point5l = sphere( pos = vector(-5, 0, 0), color = color.red, radius = RADIUS )
self.point6l = sphere( pos = vector(-6, 0, 0), color = color.orange, radius = RADIUS )
self.point7l = sphere( pos = vector(-7, 0, 0), color = color.orange, radius = RADIUS )
self.point8l = sphere( pos = vector(-8, 0, 0), color = color.orange, radius = RADIUS )
self.point9l = sphere( pos = vector(-9, 0, 0), color = color.orange, radius = RADIUS )
self.point10l = sphere( pos = vector(-10, 0, 0), color = color.orange, radius = RADIUS )
self.point11l = sphere( pos = vector(-11, 0, 0), color = color.yellow, radius = RADIUS )
self.point12l = sphere( pos = vector(-12, 0, 0), color = color.yellow, radius = RADIUS )
self.point13l = sphere( pos = vector(-13, 0, 0), color = color.yellow, radius = RADIUS )
self.point14l = sphere( pos = vector(-14, 0, 0), color = color.yellow, radius = RADIUS )
self.point15l = sphere( pos = vector(-15, 0, 0), color = color.yellow, radius = RADIUS )
self.point16l = sphere( pos = vector(-16, 0, 0), color = color.green, radius = RADIUS )
self.point17l = sphere( pos = vector(-17, 0, 0), color = color.green, radius = RADIUS )
self.point18l = sphere( pos = vector(-18, 0, 0), color = color.green, radius = RADIUS )
self.point19l = sphere( pos = vector(-19, 0, 0), color = color.green, radius = RADIUS )
self.point20l = sphere( pos = vector(-20, 0, 0), color = color.green, radius = RADIUS )
self.point21l = sphere( pos = vector(-21, 0, 0), color = color.cyan, radius = RADIUS )
self.point22l = sphere( pos = vector(-22, 0, 0), color = color.cyan, radius = RADIUS )
self.point23l = sphere( pos = vector(-23, 0, 0), color = color.cyan, radius = RADIUS )
self.point24l = sphere( pos = vector(-24, 0, 0), color = color.cyan, radius = RADIUS )
self.point25l = sphere( pos = vector(-25, 0, 0), color = color.cyan, radius = RADIUS )
self.point26l = sphere( pos = vector(-26, 0, 0), color = color.blue, radius = RADIUS )
self.point27l = sphere( pos = vector(-27, 0, 0), color = color.blue, radius = RADIUS )
self.point28l = sphere( pos = vector(-28, 0, 0), color = color.blue, radius = RADIUS )
self.point29l = sphere( pos = vector(-29, 0, 0), color = color.blue, radius = RADIUS )
self.point30l = sphere( pos = vector(-30, 0, 0), color = color.blue, radius = RADIUS )
self.point31l = sphere( pos = vector(-31, 0, 0), color = color.purple, radius = RADIUS )
self.point32l = sphere( pos = vector(-32, 0, 0), color = color.purple, radius = RADIUS )
self.point33l = sphere( pos = vector(-33, 0, 0), color = color.purple, radius = RADIUS )
self.point34l = sphere( pos = vector(-34, 0, 0), color = color.purple, radius = RADIUS )
self.point35l = sphere( pos = vector(-35, 0, 0), color = color.purple, radius = RADIUS )
self.point1r = sphere( pos = vector(1, 0, 0), color = color.red, radius = RADIUS )
self.point2r = sphere( pos = vector(2, 0, 0), color = color.red, radius = RADIUS )
self.point3r = sphere( pos = vector(3, 0, 0), color = color.red, radius = RADIUS )
self.point4r = sphere( pos = vector(4, 0, 0), color = color.red, radius = RADIUS )
self.point5r = sphere( pos = vector(5, 0, 0), color = color.red, radius = RADIUS )
self.point6r = sphere( pos = vector(6, 0, 0), color = color.orange, radius = RADIUS )
self.point7r = sphere( pos = vector(7, 0, 0), color = color.orange, radius = RADIUS )
self.point8r = sphere( pos = vector(8, 0, 0), color = color.orange, radius = RADIUS )
self.point9r = sphere( pos = vector(9, 0, 0), color = color.orange, radius = RADIUS )
self.point10r = sphere( pos = vector(10, 0, 0), color = color.orange, radius = RADIUS )
self.point11r = sphere( pos = vector(11, 0, 0), color = color.yellow, radius = RADIUS )
self.point12r = sphere( pos = vector(12, 0, 0), color = color.yellow, radius = RADIUS )
self.point13r = sphere( pos = vector(13, 0, 0), color = color.yellow, radius = RADIUS )
self.point14r = sphere( pos = vector(14, 0, 0), color = color.yellow, radius = RADIUS )
self.point15r = sphere( pos = vector(15, 0, 0), color = color.yellow, radius = RADIUS )
self.point16r = sphere( pos = vector(16, 0, 0), color = color.green, radius = RADIUS )
self.point17r = sphere( pos = vector(17, 0, 0), color = color.green, radius = RADIUS )
self.point18r = sphere( pos = vector(18, 0, 0), color = color.green, radius = RADIUS )
self.point19r = sphere( pos = vector(19, 0, 0), color = color.green, radius = RADIUS )
self.point20r = sphere( pos = vector(20, 0, 0), color = color.green, radius = RADIUS )
self.point21r = sphere( pos = vector(21, 0, 0), color = color.cyan, radius = RADIUS )
self.point22r = sphere( pos = vector(22, 0, 0), color = color.cyan, radius = RADIUS )
self.point23r = sphere( pos = vector(23, 0, 0), color = color.cyan, radius = RADIUS )
self.point24r = sphere( pos = vector(24, 0, 0), color = color.cyan, radius = RADIUS )
self.point25r = sphere( pos = vector(25, 0, 0), color = color.cyan, radius = RADIUS )
self.point26r = sphere( pos = vector(26, 0, 0), color = color.blue, radius = RADIUS )
self.point27r = sphere( pos = vector(27, 0, 0), color = color.blue, radius = RADIUS )
self.point28r = sphere( pos = vector(28, 0, 0), color = color.blue, radius = RADIUS )
self.point29r = sphere( pos = vector(29, 0, 0), color = color.blue, radius = RADIUS )
self.point30r = sphere( pos = vector(30, 0, 0), color = color.blue, radius = RADIUS )
self.point31r = sphere( pos = vector(31, 0, 0), color = color.purple, radius = RADIUS )
self.point32r = sphere( pos = vector(32, 0, 0), color = color.purple, radius = RADIUS )
self.point33r = sphere( pos = vector(33, 0, 0), color = color.purple, radius = RADIUS )
self.point34r = sphere( pos = vector(34, 0, 0), color = color.purple, radius = RADIUS )
self.point35r = sphere( pos = vector(35, 0, 0), color = color.purple, radius = RADIUS )
class InitialConditions(Assets):
def __init__(self):
""" Setting up the initial conditions"""
Assets.__init__(self)
self.t = 0
self.dt = 0.001
class Events(InitialConditions):
def __init__(self):
"""Physics events simulation"""
InitialConditions.__init__(self)
sleep(3)
while (True):
rate(100)
# calculate y position
self.point0c.pos.y = AMP * np.sin( self.point0c.pos.x * self.t );
self.point1l.pos.y = AMP * np.sin( self.point1l.pos.x * self.t );
self.point2l.pos.y = AMP * np.sin( self.point2l.pos.x * self.t );
self.point3l.pos.y = AMP * np.sin( self.point3l.pos.x * self.t );
self.point4l.pos.y = AMP * np.sin( self.point4l.pos.x * self.t );
self.point5l.pos.y = AMP * np.sin( self.point5l.pos.x * self.t );
self.point6l.pos.y = AMP * np.sin( self.point6l.pos.x * self.t );
self.point7l.pos.y = AMP * np.sin( self.point7l.pos.x * self.t );
self.point8l.pos.y = AMP * np.sin( self.point8l.pos.x * self.t );
self.point9l.pos.y = AMP * np.sin( self.point9l.pos.x * self.t );
self.point10l.pos.y = AMP * np.sin( self.point10l.pos.x * self.t );
self.point11l.pos.y = AMP * np.sin( self.point11l.pos.x * self.t );
self.point12l.pos.y = AMP * np.sin( self.point12l.pos.x * self.t );
self.point13l.pos.y = AMP * np.sin( self.point13l.pos.x * self.t );
self.point14l.pos.y = AMP * | |
##############################################
# User Defined Functions for Phrase Learning
##############################################
import pandas as pd
import numpy as np
import re, nltk, time, gc, math
from azureml.logging import get_azureml_logger
run_logger = get_azureml_logger()
run_logger.log('amlrealworld.QnA-matching.phrase-learning','true')
def CleanAndSplitText(frame):
global EMPTY, SPACE, NLTK_PUNKT_EN, SENTENCE_BREAKER
EMPTY = ''
SPACE = ' '
nltk.download("punkt")
NLTK_PUNKT_EN = 'tokenizers/punkt/english.pickle'
SENTENCE_BREAKER = nltk.data.load(NLTK_PUNKT_EN)
textDataOut = []
# This regular expression is for punctuation that we wish to clean out
# We also will split sentences into smaller phrase like units using this expression
rePhraseBreaks = re.compile("[\"\!\?\)\]\}\,\:\;\*\-]*\s+\([0-9]+\)\s+[\(\[\{\"\*\-]*"
"|[\"\!\?\)\]\}\,\:\;\*\-]+\s+[\(\[\{\"\*\-]*"
"|\.\.+" # ..
"|\s*\-\-+\s*" # --
"|\s+\-\s+" # -
"|\:\:+" # ::
"|\s+[\/\(\[\{\"\-\*]+\s*"
"|[\,!\?\"\)\(\]\[\}\{\:\;\*](?=[a-zA-Z])"
"|[\"\!\?\)\]\}\,\:\;]+[\.]*$"
)
# Regex for underbars
regexUnderbar = re.compile('_|_+')
# Regex for space
regexSpace = re.compile(' +')
# Regex for sentence final period
regexPeriod = re.compile("\.$")
# Regex for parentheses
regexParentheses = re.compile("\(\$?")
# Regex for equal sign
regexEqual = re.compile("=")
# Iterate through each document and do:
# (1) Split documents into sections based on section headers and remove section headers
# (2) Split the sections into sentences using NLTK sentence tokenizer
# (3) Further split sentences into phrasal units based on punctuation and remove punctuation
# (4) Remove sentence final periods when not part of a abbreviation
for i in range(0,len(frame)):
# Extract one document from frame
docID = frame.index.values[i]
docText = frame['Text'].iloc[i]
# Set counter for output line count for this document
lineIndex=0
sentences = SENTENCE_BREAKER.tokenize(docText)
for sentence in sentences:
# Split each sentence into phrase level chunks based on punctuation
textSegs = rePhraseBreaks.split(sentence)
numSegs = len(textSegs)
for j in range(0,numSegs):
if len(textSegs[j])>0:
# Convert underbars to spaces
# Underbars are reserved for building the compound word phrases
textSegs[j] = regexUnderbar.sub(" ",textSegs[j])
# Split out the words so we can specially handle the last word
words = regexSpace.split(textSegs[j])
# Remove parentheses and equal signs
words = [regexEqual.sub("", regexParentheses.sub("", w)) for w in words]
phraseOut = ""
last = len(words) -1
for i in range(0, last):
phraseOut += words[i] + " "
# If the last word ends in a period then remove the period
lastWord = regexPeriod.sub("", words[last])
# If the last word is an abbreviation like "U.S."
# then add the word final perios back on
if "\." in lastWord:
lastWord += "."
phraseOut += lastWord
textDataOut.append([docID,lineIndex,phraseOut, phraseOut.lower()])
lineIndex += 1
# Convert to pandas frame
frameOut = pd.DataFrame(textDataOut, columns=['DocID','DocLine','CleanedText', 'LowercaseText'])
return frameOut
# count the number of occurances of all 2-gram, 3-ngram, and 4-gram word sequences.
def ComputeNgramStats(textData,functionwordHash,blacklistHash):
# Create an array to store the total count of all ngrams up to 4-grams
# Array element 0 is unused, element 1 is unigrams, element 2 is bigrams, etc.
ngramCounts = [0]*5;
# Create a list of structures to tabulate ngram count statistics
# Array element 0 is the array of total ngram counts,
# Array element 1 is a hash table of individual unigram counts
# Array element 2 is a hash table of individual bigram counts
# Array element 3 is a hash table of individual trigram counts
# Array element 4 is a hash table of individual 4-gram counts
ngramStats = [ngramCounts, {}, {}, {}, {}]
# Create a regular expression for assessing validity of words
# for phrase modeling. The expression says words in phrases
# must either:
# (1) contain an alphabetic character, or
# (2) be the single charcater '&', or
# (3) be a one or two digit number
reWordIsValid = re.compile('[A-Za-z]|^&$|^\d\d?$')
# Go through the text data line by line collecting count statistics
# for all valid n-grams that could appear in a potential phrase
numLines = len(textData)
for i in range(0, numLines):
# Split the text line into an array of words
wordArray = textData[i].split()
numWords = len(wordArray)
# Create an array marking each word as valid or invalid
validArray = [];
for word in wordArray:
validArray.append(reWordIsValid.match(word) != None)
# Tabulate total raw ngrams for this line into counts for each ngram bin
# The total ngrams counts include the counts of all ngrams including those
# that we won't consider as parts of phrases
for j in range(1,5):
if j<=numWords:
ngramCounts[j] += numWords - j + 1
# Collect counts for viable phrase ngrams and left context sub-phrases
for j in range(0,numWords):
word = wordArray[j]
# Only bother counting the ngrams that start with a valid content word
# i.e., valids words not in the function word list or the black list
if ( ( word not in functionwordHash ) and ( word not in blacklistHash ) and validArray[j] ):
# Initialize ngram string with first content word and add it to unigram counts
ngramSeq = word
if ngramSeq in ngramStats[1]:
ngramStats[1][ngramSeq] += 1
else:
ngramStats[1][ngramSeq] = 1
# Count valid ngrams from bigrams up to 4-grams
stop = 0
k = 1
while (k<4) and (j+k<numWords) and not stop:
n = k + 1
nextNgramWord = wordArray[j+k]
# Only count ngrams with valid words not in the blacklist
if ( validArray[j+k] and nextNgramWord not in blacklistHash ):
ngramSeq += " " + nextNgramWord
if ngramSeq in ngramStats[n]:
ngramStats[n][ngramSeq] += 1
else:
ngramStats[n][ngramSeq] = 1
k += 1
if nextNgramWord not in functionwordHash:
# Stop counting new ngrams after second content word in
# ngram is reached and ngram is a viable full phrase
stop = 1
else:
stop = 1
return ngramStats
# rank potential phrases by the Weighted Pointwise Mutual Information of their constituent words
def RankNgrams(ngramStats,functionwordHash,minCount):
# Create a hash table to store weighted pointwise mutual
# information scores for each viable phrase
ngramWPMIHash = {}
# Go through each of the ngram tables and compute the phrase scores
# for the viable phrases
for n in range(2,5):
i = n-1
for ngram in ngramStats[n].keys():
ngramCount = ngramStats[n][ngram]
if ngramCount >= minCount:
wordArray = ngram.split()
# If the final word in the ngram is not a function word then
# the ngram is a valid phrase candidate we want to score
if wordArray[i] not in functionwordHash:
leftNgram = wordArray[0]
for j in range(1,i):
leftNgram += ' ' + wordArray[j]
rightWord = wordArray[i]
# Compute the weighted pointwise mutual information (WPMI) for the phrase
probNgram = float(ngramStats[n][ngram])/float(ngramStats[0][n])
probLeftNgram = float(ngramStats[n-1][leftNgram])/float(ngramStats[0][n-1])
probRightWord = float(ngramStats[1][rightWord])/float(ngramStats[0][1])
WPMI = probNgram * math.log(probNgram/(probLeftNgram*probRightWord));
# Add the phrase into the list of scored phrases only if WMPI is positive
if WPMI > 0:
ngramWPMIHash[ngram] = WPMI
# Create a sorted list of the phrase candidates
rankedNgrams = sorted(ngramWPMIHash, key=ngramWPMIHash.__getitem__, reverse=True)
# Force a memory clean-up
ngramWPMIHash = None
gc.collect()
return rankedNgrams
# apply the phrase rewrites to training data.
def ApplyPhraseRewrites(rankedNgrams,textData,learnedPhrases,
maxPhrasesToAdd,maxPhraseLength,verbose):
if len(rankedNgrams) == 0:
return
# This function will consider at most maxRewrite
# new phrases to be added into the learned phrase
# list as specified by the calling fuinction
maxRewrite=maxPhrasesToAdd
# If the remaining number of proposed ngram phrases is less
# than the max allowed, then reset maxRewrite to the size of
# the proposed ngram phrases list
numNgrams = len(rankedNgrams)
if numNgrams < maxRewrite:
maxRewrite = numNgrams
# Create empty hash tables to keep track of phrase overlap conflicts
leftConflictHash = {}
rightConflictHash = {}
# Create an empty hash table collecting the set of rewrite rules
# to be applied during this iteration of phrase learning
ngramRewriteHash = {}
# Precompile the regex for finding spaces in ngram phrases
regexSpace = re.compile(' ')
# Initialize some bookkeeping variables
numLines = len(textData)
numPhrasesAdded = 0
numConsidered = 0
lastSkippedNgram = ""
lastAddedNgram = ""
# Collect list up to maxRewrite ngram phrase rewrites
stop | |
res["record"], res["data"]
# index is the row number of the scan in the set of scans
index = scan - 1 # FITS scans are 1-based
if scan in self.scans:
pass
else:
self.scans.append(scan)
if record == 1:
# these data are constant for the scan
self.source = self.info["point"]["current_source"]
if self.source:
self.bintabHDU.data[index]['OBJECT'] = self.source['name']
self.RA = self.source['ra']*24/math.pi # J2000 from radians
self.dec = self.source['dec']*180/math.pi # J2000 from radians
else:
self.logger.warning("add_data_to_fits: no source seleted")
self.bintabHDU.data[index]['OBJECT'] = "no source"
self.RA = 0.0
self.dec = 0.0
self.bintabHDU.data[index]['VELOCITY'] = 0
self.bintabHDU.data[index]['EXPOSURE'] = self.record_int_time
self.bintabHDU.data[index]['BANDWIDT'] = self.bandwidth
self.bintabHDU.data[index]['SCAN'] = scan # int
self.bintabHDU.data[index]['CYCLE'] = 1 # int (0 means bad row)
self.bintabHDU.data[index]['OBSMODE'] = self.obsmode
# self.bintabHDU.data[index]['SIG']
# self.bintabHDU.data[index]['CAL']
# self.bintabHDU.data[index]['TCAL']
self.bintabHDU.data[index]['RESTFREQ'] = self.restfreq
self.bintabHDU.data[index]['OBSFREQ'] = self.obsfreq
self.bintabHDU.data[index]['VELDEF'] = veldef
if self.source:
if "velocity" in self.source["info"]:
self.logger.debug("add_data_to_FITS: source velocity: %s",
self.source["info"]["velocity"])
self.logger.debug("add_data_to_FITS: source velocity type: %s",
type(self.source["info"]["velocity"]))
self.bintabHDU.data[index]['VELOCITY'] = \
self.source["info"]["velocity"]
else:
self.logger.warning("add_data_to_FITS: %s has no velocity",
self.source['name'])
self.bintabHDU.data[index]['VELOCITY'] = 0
else:
# velocity already set to 0 above
pass
self.bintabHDU.data[index]['EQUINOX'] = equinox
# data axis specifications
# frequency
self.bintabHDU.data[index]['CRVAL1'] = \
self.bintabHDU.data[index]['OBSFREQ']
self.bintabHDU.data[index]['CDELT1'] = \
self.bintabHDU.data[index]['BANDWIDT']/self.num_chan
self.bintabHDU.data[index]['CRPIX1'] = 0
if self.SB[0] == 'U': # assume all downconverters the same
self.bintabHDU.data[index]['SIDEBAND'] = +1
self.bintabHDU.data[index]['CDELT1'] = \
self.bintabHDU.data[index]['BANDWIDT']/self.num_chan
elif self.SB[0] == 'L':
self.bintabHDU.data[index]['SIDEBAND'] = -1
self.bintabHDU.data[index]['CDELT1'] = \
-self.bintabHDU.data[index]['BANDWIDT']/self.num_chan
else:
self.logger.error("IF mode %s is not supported", self.SB[0])
# second and third data axes (coordinates)
RAstr = str(self.RA)
decstr = str(self.dec)
coords = ' '.join((RAstr, decstr))
c = astropy.coordinates.SkyCoord(coords, unit=(u.hourangle,u.deg))
self.bintabHDU.data[index]['CRVAL2'] = c.ra.hourangle
self.bintabHDU.data[index]['CRVAL3'] = c.dec.deg
# fourth data axis (polarization)
refval, delta = self.backend.polcodes()
self.bintabHDU.data[index]['CRVAL4'] = refval
self.bintabHDU.data[index]['CDELT4'] = delta
# these data change for every record
# current time
now = time.gmtime() # struct_time tuple
date_obs = time.strftime("%Y/%m/%d", now) # str
self.bintabHDU.data[index]['DATE-OBS'] = date_obs
midnight = time.mktime(dateutil.parser.parse(date_obs).timetuple())
UNIXtime = calendar.timegm(now) # int
# for convenience in Python; not FITS standard
self.bintabHDU.data[index]['UNIXtime'][0,record,0,0,0,0] = UNIXtime
self.bintabHDU.data[index]['TIME'] = UNIXtime - midnight
# sidereal time
#astrotime = astropy.time.Time(UNIXtime, format='unix', scale='utc',
# location=self.location)
#astrotime.delta_ut1_utc = 0 # forget about fraction of second and IERS
astrotime = DT.UnixTime_to_datetime(UNIXtime)
self.bintabHDU.data[index]['LST'][0,record,0,0,0,0] = (
A.greenwich_sidereal_time(
now.tm_year, now.tm_yday + (now.tm_hour+now.tm_min/60.)/24.
)
- self.location.lon.hour
) % 24
self.bintabHDU.data[index]['VFRAME'] = \
Ared.V_LSR(self.RA, self.dec, self.telescope.number, astrotime)
self.bintabHDU.data[index]['RVSYS'] = \
self.bintabHDU.data[index]['VELOCITY'] \
- self.bintabHDU.data[index]['VFRAME']
# the following could be changed to one per scan
self.bintabHDU.data[index]['TAMBIENT'][0,record:0,0,0,0] = self.temperature
self.bintabHDU.data[index]['PRESSURE'][0,record:0,0,0,0] = self.pressure
self.bintabHDU.data[index]['HUMIDITY'][0,record:0,0,0,0] = self.humidity
self.bintabHDU.data[index]['WINDSPEE'][0,record:0,0,0,0] = self.windspeed
self.bintabHDU.data[index]['WINDDIRE'][0,record:0,0,0,0] = self.winddirection
self.bintabHDU.data[index]['BEAMXOFF'][0,record:0,0,0,0] = self.xel_offset
self.bintabHDU.data[index]['BEAMEOFF'][0,record:0,0,0,0] = self.el_offset
# more columns
# get the system temperatures
tsys = self.tsys
self.logger.debug("add_data_to_FITS: TSYS: %s", tsys)
# data array has shape (32768,5)
#self.data_array = np.array(data)
self.logger.debug("add_data_to_FITS: data_array shape is %s",
self.data_array.shape)
#data_only = self.data_array[:,1:] # the first column is frequency
self.logger.debug("add_data_to_FITS: data_only shape is %s",
self.data_array.shape)
for ridx in range(4):
roach = self.roachnames[ridx]
pol = self.pols[ridx]
beam = self.beams[ridx]
data = self.data_array[:,ridx]
self.bintabHDU.data[index]['data'][beam,record,pol,0,0,:] = data
self.bintabHDU.data[index]['TSYS'][beam,record,pol,0,0,0] = tsys[ridx]
self.logger.debug(
"add_data_to_FITS: stored sc %d, rec %d, pol %d, bm %d for %s at row %d",
scan, record, pol, beam, roach, index)
if self.spectra_left == 0:
# save scan
self.save_FITS()
@Pyro5.api.oneway
def two_beam_nod(self,
cycles=1,
scan_time=60.0,
integration_time=None):
"""
"""
default_integration_time = {
"Antenna": 2.0,
"FrontEnd": 2.0,
"Receiver": 2.0,
"Backend": 1.0
}
if integration_time is None:
integration_time = default_integration_time
default_integration_time.update(integration_time)
integration_time = default_integration_time.copy()
self.logger.info(
"two_beam_nod: cycles: {}, scan_time: {}".format(
cycles, scan_time))
# for equip in ["Antenna", "Receiver", "FrontEnd"]:
#for equip in ["Antenna", "Receiver"]:
# if hasattr(self.equipment[equip], "daemon"):
# if not self.equipment[equip].is_alive():
# self.equipment[equip].daemon = True
#
# self.equipment[equip].start_recording(
# interval=integration_time[equip]
# )
# self.logger.debug("two_beam_nod: {} recording".format(equip))
for cycle in range(cycles):
for feed in range(2):
self.two_beam_nod.cb({"done": False,
"cycle": cycle,
"feed": feed})
self.single_scan(
feed,
scan_time=scan_time,
integration_time=integration_time["Backend"]
)
#for equip in ["Antenna", "Receiver", "FrontEnd"]:
# self.equipment[equip].stop_recording()
# self.logger.debug(
# "two_beam_nod: {} recording stopped".format(equip))
self.two_beam_nod.cb({"done": True})
@Pyro5.api.oneway
def single_beam_nodding(self,
cycles=1,
time_per_scan=60.0,
integration_time=5.0,
power_meter_monitor_interval=2.0,
antenna_monitor_interval=2.0):
raise NotImplementedError()
# ---------------------------- Miscellaneous Methods --------------------------
@async_method
def set_obsmode(self, new_mode):
"""
called by client to set observing mode
"""
self.obsmode = new_mode
self.logger.debug("set_obsmode(%s): mode is now %s",
logtime(), self.obsmode)
def set_rest_freq(self, new_freq):
self.restfreq = new_freq
self.logger.info("set_rest_freq (%s): rest frequency is now %f",
logtime(), self.restfreq)
def server_time(self, *args, **kwargs):
self.logger.debug("args: %s", args)
self.logger.debug("keyword args: %s", kwargs)
return datetime.datetime.utcnow().strftime("%Y-%j-%Hh%Mm%Ss")
def get_configs(self):
"""
Gets a dict with context names and paths to their configurations
"""
from MonitorControl.Configurations import configs
return configs
def help(self):
return """
Attributes:
observatory (MonitorControl.Observatory): Observatory instance
equipment (dict): dictionary describing antenna/station hardware
boresight_manager (BoresightManager): post processing manager object
for retrieving old boresight results.
info (dict): dictionary containing information about current status
of different long running calibration/observation methods, as
well as sources, and verifiers.
Methods:
Start-up and shut-down methods:
set_info(path, val)
get_info(path=None)
save_info()
load_info()
close()
Hardware control:
configure(import_path, *args, **kwargs)
hdwr(hdwr, method_name, *args, **kwargs)
list_hdwr()
Source management:
load_sources(loader="json")
get_sources(source_names=None, when=None, filter_fn=None, formatter=None)
report_source_info(name_or_dict, units="degrees")
is_within(name_or_dict, bounds, axis="el")
Observatory details:
_get_observer_info_dict()
Antenna control:
point(name_or_dict)
Data acquisition}:
get_tsys(timestamp=False): return list of Tsys obtained from HP power meters
single_scan(feed, scan_time=60.0, integration_time=5.0): returns a single
spectrum
two_beam_nod(cycles=1, scan_time=60.0, integration_time=None): starts a
sequence of spectral scans in beam and position switching mode
single_beam_nodding(cycles=1, time_per_scan=60.0, integration_time=5.0,
power_meter_monitor_interval=2.0, antenna_monitor_interval=2.0): starts
a position switching sequence
Calibration:
scanning_boresight(el_previous_offset, xel_previous_offset,
limit=99.0, sample_rate=0.3, rate=3.0, settle_time=10.0,
src_name_or_dict=None,two_direction=True, additional_offsets=None,
channel=0, attrs=None)
stepping_boresight(el_previous_offset, xel_previous_offset,
n_points=9, integration_time=2, settle_time=10.0, two_direction=True,
src_name_or_dict=None, additional_offsets=None, channel=0, attrs=None)
get_boresight_analyzer_object(file_path)
get_most_recent_boresight_analyzer_object()
process_minical_calib(cal_data, Tlna=25, Tf=1, Fghz=20, TcorrNDcoupling=0)
tsys_calibration(settle_time=10.0, pm_integration_time=5.0)
stop_tsys_calibration()
tip()
File management:
_create_calibration_file_path(...):
_create_calibration_file_obj(...):
get_boresight_file_paths(...):
Miscellaneous:
server_time(): returns current time
"""
@async_method
def get_projects(self):
"""
get a list of all the projects
"""
projects = []
for project in projcfg.get_projects():
projects.append(project)
projects.sort()
self.get_projects.cb(projects)
return projects
@async_method
def get_activities(self):
"""
get a list of all the activities of the current project
"""
project = self.info['project']['name']
activities = projcfg.get_activity()[project]
self.logger.debug("get_activities: activities: %s", activities)
activities.sort()
self.get_activities.cb(activities)
return activities
@async_method
def get_equipment(self):
"""
get a list of all the devices in the current configuration
"""
devices = {}
for device,obj in list(self.equipment.items()):
if obj != None:
devices[device] = str(obj)
self.get_equipment.cb(devices)
return devices
@async_method
def change_project(self, project, activity=None, context=None):
"""
select new project
"""
self.info['sources'] = {}
self.info['verifiers'] = {}
self.info['project']['name'] = project
self.info['project']["source_dir"] = projects_dir+project+"/Observations"
self.project = project
if activity:
self.activity = activity
else:
self.activity = self.get_default_activity(self.project)
self.logger.debug("change_project: activity is %s", self.activity)
if context:
if context in list(self.get_configs().keys()):
observatory, equipment = MCcfg.station_configuration(context)
else:
context = configs[self.project][self.activity]
self.logger.debug("change_project: context is %s", context)
if context in list(self.get_configs().keys()):
observatory, equipment = MCcfg.station_configuration(context)
else:
# assume a form AAAADDB
activity = context[:4]
dss = int(context[4:6])
band = context[6]
now = time.gmtime()
timestr = "%02d%02d" % (now.tm_hour, now.tm_min)
observatory, equipment = std_configuration(None, self.activity, dss,
now.tm_year, now.tm_yday, timestr, band)
@async_method
def get_activitys_project(self, activity):
"""
get the project associated with the current activity
This is just for an information request by the client. It does not change
the current ptoject.
"""
self.logger.debug("get_activitys_project: called for %s", activity)
project = projcfg.activity_project(activity)
self.logger.debug("get_activitys_project: project is %s", project)
self.get_activitys_project.cb(project)
return project
@async_method
def get_default_activity(self, project):
"""
This assumes a specific format for the project and activity names.
This is just for an information request by the client. It does not change
the current ptoject.
"""
self.logger.debug("get_default_activity: called for %s", project)
activity = get_auto_project(project).split("_")[1]+'0'
self.logger.debug("get_default_activity: activity is %s", activity)
self.get_default_activity.cb(activity)
return activity
@async_method
def get_project_activities(self, project):
"""
Get the activities associated with a project.
This assumes a specific format for the project and activity names. It
should probably go in the Automation module.
"""
self.logger.debug("get_project_activities: called for %s", project)
activity_root = get_auto_project(project).split("_")[1]
proj_activities = []
for activity in self.get_activities():
if activity_root in activity:
proj_activities.append(activity)
proj_activities.sort()
self.logger.debug("get_project_activities: activities are %s",
proj_activities)
self.get_project_activities.cb(proj_activities)
return proj_activities
# ================================= Program ===================================
if __name__ == "__main__":
def create_arg_parser():
"""
create an argument parser and define arguments
"""
import argparse
parser = argparse.ArgumentParser(description="Fire up DSS control server.")
parser.add_argument("--verbose", "-v",
dest="verbose", required=False,
action="store_true", default=True,
help="In verbose mode, the log level is DEBUG; default: False")
parser.add_argument("--simulated", "-s",
dest="simulated", required=False,
action="store_true", default=True,
help="In simulated mode, DSS Server won't attempt to "
+"connect to hardware servers. Default: True")
parser.add_argument("--flask", "-f",
dest="flask", required=False,
action="store_true", default=False,
help="Run server as | |
@classmethod
def language_version(cls, version_flags=None, **kwargs):
r"""Determine the version of this language.
Args:
**kwargs: Keyword arguments are passed to cls.run_executable.
Returns:
str: Version of compiler/interpreter for this language.
"""
if version_flags is None:
version_flags = cls.version_flags
return cls.run_executable(version_flags, **kwargs).splitlines()[0].strip()
@classmethod
def is_installed(cls):
r"""Determine if this model driver is installed on the current
machine.
Returns:
bool: Truth of if this model driver can be run on the current
machine.
"""
return (cls.is_language_installed()
and cls.are_base_languages_installed()
and cls.are_dependencies_installed()
and cls.is_interface_installed() and cls.is_comm_installed()
and cls.is_configured() and (not cls.is_disabled()))
@classmethod
def are_base_languages_installed(cls, missing=None):
r"""Determine if the base languages are installed.
Args:
missing (list, optional): A pre-existing list that
missing base languages should be appended to.
Returns:
bool: True if the base langauges are installed. False otherwise.
"""
out = True
for x in cls.base_languages:
if (not out) and (not isinstance(missing, list)): # pragma: no cover
break
out = import_component('model', x).is_installed()
if isinstance(missing, list) and (not out):
missing.append(x)
if missing:
out = False
return out
@classmethod
def are_dependencies_installed(cls):
r"""Determine if the dependencies are installed for the interface (not
including dependencies needed by a particular communication type).
Returns:
bool: True if the dependencies are installed. False otherwise.
"""
out = (cls.language is not None)
for x in cls.interface_dependencies:
if not out: # pragma: no cover
break
out = cls.is_library_installed(x)
return out
@classmethod
def is_interface_installed(cls):
r"""Determine if the interface library for the associated programming
language is installed.
Returns:
bool: True if the interface library is installed.
"""
out = (cls.language is not None)
if out and (cls.interface_library is not None):
out = cls.is_library_installed(cls.interface_library)
return out
@classmethod
def is_language_installed(cls):
r"""Determine if the interpreter/compiler for the associated programming
language is installed.
Returns:
bool: True if the language interpreter/compiler is installed.
"""
out = False
if cls.language is not None:
try:
out = (shutil.which(cls.language_executable()) is not None)
except NotImplementedError: # pragma: debug
out = False
return out
@classmethod
def identify_source_files(cls, args=None, working_dir=None, **kwargs):
r"""Determine the source file based on model arguments.
Args:
args (list, optional): Arguments provided.
working_dir (str, optional): Working directory.
**kwargs: Additional keyword arguments are ignored.
Returns:
list: Source files.
"""
out = []
if args:
src = args[0]
if (((not cls.is_source_file(src))
and (cls.language_ext is not None)
and (os.path.splitext(src)[-1]
not in cls.get_all_language_ext()))):
src = os.path.splitext(src)[0] + cls.language_ext[0]
if working_dir and (not os.path.isabs(src)):
src = os.path.normpath(os.path.join(working_dir, src))
if os.path.isfile(src):
out.append(src)
return out
@classmethod
def is_source_file(cls, fname):
r"""Determine if the provided file name points to a source files for
the associated programming language by checking the extension.
Args:
fname (str): Path to file.
Returns:
bool: True if the provided file is a source file, False otherwise.
"""
out = False
model_ext = os.path.splitext(fname)[-1]
if len(model_ext) > 0:
out = (model_ext in cls.get_language_ext())
return out
@classmethod
def is_library_installed(cls, lib, **kwargs):
r"""Determine if a dependency is installed.
Args:
lib (str): Name of the library that should be checked.
**kwargs: Additional keyword arguments are ignored.
Returns:
bool: True if the library is installed, False otherwise.
"""
raise NotImplementedError("Method is_library_installed missing for '%s'"
% cls.language)
@classmethod
def is_disabled(cls):
return (cls.cfg.get(cls.language, 'disable', 'false').lower() == 'true')
@classmethod
def is_configured(cls):
r"""Determine if the appropriate configuration has been performed (e.g.
installation of supporting libraries etc.)
Returns:
bool: True if the language has been configured.
"""
# Check for section & diable
disable_flag = cls.is_disabled()
out = (cls.cfg.has_section(cls.language) and (not disable_flag))
# Check for commtypes
if out and (len(cls.base_languages) == 0):
out = (cls.cfg.get(cls.language, 'commtypes', None) is not None)
# Check for config keys
for k in cls._config_keys:
if not out: # pragma: no cover
break
out = (cls.cfg.get(cls.language, k, None) is not None)
return out
@classmethod
def is_comm_installed(cls, commtype=None, skip_config=False, **kwargs):
r"""Determine if a comm is installed for the associated programming
language.
Args:
commtype (str, optional): If provided, this method will only test
for installation of the specified communication type. Defaults
to None and will check for any installed comm.
skip_config (bool, optional): If True, the config list of comms
installed for this language will not be used to determine if
the comm is installed and the class attribute
supported_comm_options will be processed. Defaults to False and
config options are used in order to improve performance after
initial configuration.
platforms (list, optional): Platforms on which the comm can be
installed. Defaults to None and is ignored unless there is a
value for the commtype in supported_comm_options. This
keyword argument is ignored if skip_config is False.
libraries (list, optional): External libraries that are required
by the specified commtype. Defaults to None and is ignored
unless there is a value for the commtype in supported_comm_options.
This keyword argument is ignored if skip_config is False.
**kwargs: Additional keyword arguments are passed to either
is_comm_installed for the base languages, supported languages,
or is_library_installed as appropriate.
Returns:
bool: True if a comm is installed for this language.
"""
# If there are base_languages for this language, use that language's
# driver to check for comm installation.
if len(cls.base_languages) > 0:
out = True
for x in cls.base_languages:
if not out: # pragma: no cover
break
out = import_component('model', x).is_comm_installed(
commtype=commtype, skip_config=skip_config, **kwargs)
return out
if cls.comms_implicit:
if commtype is None:
return True
return (commtype in tools.get_supported_comm())
# Check for installation based on config option
if not skip_config:
installed_comms = cls.cfg.get(cls.language, 'commtypes', [])
if commtype is None:
return (len(installed_comms) > 0)
else:
return (commtype in installed_comms)
# Check for any comm
if commtype is None:
for c in cls.supported_comms:
if cls.is_comm_installed(commtype=c, skip_config=skip_config,
**kwargs):
return True
# Check that comm is explicitly supported
if commtype not in cls.supported_comms:
return False
# Set & pop keywords
for k, v in cls.supported_comm_options.get(commtype, {}).items():
if kwargs.get(k, None) is None:
kwargs[k] = v
platforms = kwargs.pop('platforms', None)
libraries = kwargs.pop('libraries', [])
# Check platforms
if (platforms is not None) and (platform._platform not in platforms):
return False # pragma: windows
# Check libraries
if (libraries is not None):
for lib in libraries:
if not cls.is_library_installed(lib, **kwargs):
return False
# Check for server on RabbitMQ
if commtype in ['rmq', 'rmq_async']:
from yggdrasil.communication.RMQComm import check_rmq_server
if not check_rmq_server():
return False
return True
@classmethod
def configure(cls, cfg):
r"""Add configuration options for this language.
Args:
cfg (CisConfigParser): Config class that options should be set for.
Returns:
list: Section, option, description tuples for options that could not
be set.
"""
out = []
# Section and executable
if (cls.language is not None) and (not cfg.has_section(cls.language)):
cfg.add_section(cls.language)
# Executable type configuration
out += cls.configure_executable_type(cfg)
# Locate executable
if (((not cls.is_language_installed())
and (cls.executable_type is not None))): # pragma: debug
try:
exec_file = cls.language_executable()
if exec_file is not None:
fpath = tools.locate_file(
exec_file, directory_list=cls._executable_search_dirs)
if fpath:
cfg.set(cls.language, cls.executable_type, fpath)
except NotImplementedError:
pass
# Configure libraries
out += cls.configure_libraries(cfg)
# Only do additional configuration if no base languages
if not cls.base_languages:
# Installed comms
comms = []
for c in cls.supported_comms:
if cls.is_comm_installed(commtype=c, cfg=cfg, skip_config=True):
comms.append(c)
cfg.set(cls.language, 'commtypes', comms)
cls.after_registration(cls, cfg=cfg, second_pass=True)
return out
@classmethod
def configure_executable_type(cls, cfg):
r"""Add configuration options specific in the executable type
before the libraries are configured.
Args:
cfg (YggConfigParser): Config class that options should be set for.
Returns:
list: Section, option, description tuples for options that could not
be set.
"""
return []
@classmethod
def configure_libraries(cls, cfg):
r"""Add configuration options for external libraries in this language.
Args:
cfg (YggConfigParser): Config class that options should be set for.
Returns:
list: Section, option, description tuples for options that could not
be set.
"""
return []
def get_io_env(self, input_drivers=None, output_drivers=None):
r"""Get environment variables set by the input/output drivers.
Args:
input_drivers (list, optional): Input drivers. Defaults to the
yaml entry if not provided.
output_drivers (list, optional): Output drivers. Defaults to the
yaml entry if not provided.
Returns:
dict: Environment variables.
"""
if input_drivers is None:
input_drivers = self.yml.get('input_drivers', [])
if output_drivers is None:
output_drivers = self.yml.get('output_drivers', [])
out = {}
if self.copies > 1:
from | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import zulip
import requests
import re
import pprint
import os
from database import VotingTopics
import parsley
class VotingBot():
"""bot takes a zulip username and api key, a word or phrase to respond to,
a search string for giphy, an optional caption or list of captions, and
a list of the zulip streams it should be active in. It then posts a
caption and a randomly selected gif in response to zulip messages.
"""
def __init__(self, zulip_username, zulip_api_key, key_word,
subscribed_streams=[]):
self.username = zulip_username
self.api_key = zulip_api_key
self.key_word = key_word.lower().strip()
self.subscribed_streams = subscribed_streams
self.client = zulip.Client(zulip_username, zulip_api_key)
self.subscriptions = self.subscribe_to_streams()
self.voting_topics = VotingTopics()
@property
def streams(self):
''' Standardizes a list of streams in the form [{'name': stream}]
'''
if not self.subscribed_streams:
streams = [{'name': stream['name']}
for stream in self.get_all_zulip_streams()]
return streams
else:
streams = [{'name': stream} for stream in self.subscribed_streams]
return streams
def get_all_zulip_streams(self):
''' Call Zulip API to get a list of all streams
'''
response = requests.get('https://api.zulip.com/v1/streams',
auth=(self.username, self.api_key))
if response.status_code == 200:
return response.json()['streams']
elif response.status_code == 401:
raise RuntimeError('check yo auth')
else:
raise RuntimeError(':( we failed to GET streams.\n(%s)' % response)
def subscribe_to_streams(self):
''' Subscribes to zulip streams
'''
self.client.add_subscriptions(self.streams)
def respond(self, msg):
''' checks msg against key_word. If key_word is in msg, gets a gif url,
picks a caption, and calls send_message()
'''
# decode if necessary
if type(msg["content"]) == unicode:
content = msg["content"]
else:
content = msg["content"].decode("utf-8", "replace")
first_word = content.split()[0].lower().strip()
# check if it's a relevant message fo the bot
if self.key_word == first_word and msg["sender_email"] != self.username:
self.parse_public_message(msg, content)
elif msg["type"] == "private" and msg["sender_email"] != self.username:
self.parse_private_message(msg, content)
def send_message(self, msg):
''' Sends a message to zulip stream
'''
if msg["type"] == "stream":
msg["to"] = msg['display_recipient']
elif msg["type"] == "private":
msg["to"] = msg["sender_email"]
self.client.send_message(msg)
def parse_public_message(self, msg, content):
'''Parse public message given to the bot.
The resulting actions can be:
-send_results
-send_help
-add_voting_option
-add_vote
-new_voting_topic
-post_error
'''
action, title, arg = self._parse_public_message(content)
if action == "results":
self.send_results(msg, title)
elif action == "help":
msg["type"] = "private"
self.send_help(msg)
elif action == "option":
self.add_voting_option(msg, title.lower(), arg)
elif action == "vote":
self.add_vote(msg, title.lower(), int(arg))
elif action == "topic":
self.new_voting_topic(msg, title, arg)
else:
self.post_error(msg)
@classmethod
def _parse_public_message(cls, content):
# remove key word
len_key_word = len(content.split()[0])
user_content = content[len_key_word:]
user_cont_lines = user_content.split("\n")
# convert multiple lines conttent into one liner
if len(user_cont_lines) == 2:
user_content = user_content.replace("\n", ": ")
elif len(user_cont_lines) > 2:
options = ",".join(user_cont_lines[1:])
user_content = user_cont_lines[0] + ": " + options
# fix colon ":" omission in the message
elif len(user_cont_lines) == 1 and ":" not in user_content:
if "add" in user_content.lower():
i = user_content.lower().index("add")
user_content = user_content[:i - 1] + ":" + \
user_content[i - 1:]
elif "results" in user_content.lower():
i = user_content.lower().index("results")
user_content = user_content[:i - 1] + ":" + \
user_content[i - 1:]
elif user_content.split()[-1].isdigit():
words = user_content.split()
words[-2] += ":"
user_content = " ".join(words)
elif "help" in user_content.lower():
pass
elif "," in user_content:
index = user_content.index(",")
i = user_content.rfind(" ", 0, index)
user_content = user_content[:i] + ":" + \
user_content[i:]
elif len(user_content.split()) > 1:
i = user_content.rfind(" ")
user_content = user_content[:i] + ":" + \
user_content[i:]
else:
raise Exception("one liner with no colon and no fix!" + user_content)
# PEG for one liner
grammar = parsley.makeGrammar("""
not_colon = anything:x ?(':' not in x)
title = <not_colon+>:t ':' -> t.strip()
results = 'results' -> ("results", None)
option = 'add' ':'? ws <anything+>:arg -> ("option", arg.capitalize())
vote = <digit+>:arg -> ("vote", int(arg))
topic = <anything+>:arg -> ("topic", [i.strip() for i in arg.split(",")])
vote_act = results | option | vote | topic
help = ':'? ws 'help' -> ("help", None, None)
voting_msg = title:t ws vote_act:va -> (va[0], t, va[1])
expr = voting_msg | help
""", {})
try:
# if "help" in user_content:
# print "help", user_content
# print user_content
RV = grammar(user_content.lower()).expr()
# print RV
except:
# print user_content
RV = (None, None, None)
return RV
def parse_private_message(self, msg, content):
'''Parse private message given to the bot.
The resulting actions can be:
-add_vote
-send_voting_help
-post_error
-send_partial_results
'''
msg_content = content.lower()
title = msg_content.split("\n")[0]
if content.lower().strip() == "help":
self.send_help(msg)
elif title.strip() in self.voting_topics.keys():
split_msg = msg_content.split("\n")
if len(split_msg) == 2:
regex = re.compile("[0-9]+")
option_number = split_msg[1].split(" ")[0]
if regex.match(option_number):
option_number = int(option_number)
self.add_vote(title.lower(), option_number, msg)
elif split_msg[1].split(" ")[0].strip() == "results":
self.send_partial_results(
title.lower(), msg["sender_email"])
else:
print "regex did not match"
self.send_voting_help(msg)
else:
self.post_error(msg)
else:
print "title not in keys" + title
pprint.pprint(self.voting_topics)
self.send_voting_help(msg)
def send_no_voting_topic(self, msg, title):
pass
def post_error(self, msg):
pass
def send_partial_results(self, title, owner_email):
if title in self.voting_topics and \
owner_email == self.voting_topics[title]["owner_email"]:
results = self._get_topic_results(title)
msg = {"type": "private",
"content": results,
"sender_email": owner_email}
self.send_message(msg)
def new_voting_topic(self, msg, title, options):
'''Create a new voting topic.'''
print "Voting topic", title, "already?:", title.lower() in self.voting_topics
if title.lower() in self.voting_topics:
self.send_repeated_voting(msg)
elif title:
msg["content"] = title
options_dict = {}
for x in range(len(options)):
options_dict[x] = [options[x], 0]
msg["content"] += "\n " + unicode(x) + ". " + options[x]
self.voting_topics[title.lower()] = {"title": title,
"options": options_dict,
"people_who_have_voted": {},
"owner_email": msg["sender_email"]}
self.send_message(msg)
else:
self.send_help(msg)
def add_voting_option(self, msg, title, new_voting_option):
'''Add a new voting option to an existing voting topic.'''
if title.lower().strip() in self.voting_topics:
vote = self.voting_topics[title.lower().strip()]
options = vote["options"]
if self._not_already_there(options, new_voting_option):
options_num = options.keys()
new_option_num = len(options_num)
options[new_option_num] = [new_voting_option, 0]
msg["content"] = "There is a new option in topic: " + title
for x in range(len(options)):
msg["content"] += "\n " + unicode(x) + ". " + options[x][0]
self.send_message(msg)
else:
msg["content"] = new_voting_option + \
" is already an option in topic: " + title + \
"\nDo not attempt to repeat options!"
self.send_message(msg)
self.voting_topics[title.lower().strip()] = vote
def _not_already_there(self, vote_options, new_voting_option):
options = [opt[0] for opt in vote_options.values()]
return new_voting_option not in options
def add_vote(self, msg, title, option_number):
'''Add a vote to an existing voting topic.'''
vote = self.voting_topics[title]
print vote
if option_number in vote["options"].keys():
if msg["sender_email"] not in vote["people_who_have_voted"]:
vote["options"][option_number][1] += 1
vote["people_who_have_voted"][
(msg["sender_email"])] = option_number
msg["content"] = self._get_add_vote_msg(msg, vote,
option_number, False,
title)
else:
old_vote_option = vote[
"people_who_have_voted"][msg["sender_email"]]
vote["options"][old_vote_option][1] += -1
vote["options"][option_number][1] += 1
vote["people_who_have_voted"][
(msg["sender_email"])] = option_number
msg["content"] = self._get_add_vote_msg(msg, vote,
option_number,
True, title)
else:
# print "option in range", type(option_number),
# vote["options"].keys()
msg["content"] = " ".join(["That option is not in the range of the",
"voting options. Here are your options:",
" \n"])
options_list = []
for i in xrange(len(vote["options"])):
new_option = unicode(i) + ". " + vote["options"][i][0]
options_list.append(new_option)
msg["content"] += "\n".join(options_list)
msg["type"] = "private"
self.send_message(msg)
print vote
self.voting_topics[title.strip()] = vote
def _get_add_vote_msg(self, msg, vote, option_number, changed_vote, title):
'''Creates a different msg if the vote was private or public.'''
option_desc = vote["options"][option_number][0]
if changed_vote:
msg_content = "You have changed your vote. \n"
else:
msg_content = ""
if msg["type"] == "private":
msg_content += "One vote in this topic: " + vote["title"] + \
" for this option: " + option_desc
else:
msg_content += "".join(["You just voted for '",
option_desc, "' in ", title])
return msg_content
def send_help(self, msg):
with open("messages/complete_help.md") as f:
msg["content"] = f.read()
self.send_message(msg)
def send_repeated_voting(self, msg):
msg["content"] = "This topic already exists! Choose another name."
self.send_message(msg)
def send_voting_help(self, msg):
with open("messages/voting_help.md") as f:
msg["content"] = f.read()
self.send_message(msg)
def post_error(self, msg):
return
def send_results(self, msg, title):
'''Publicly send results of voting in the thread that was used.'''
if title.lower() in self.voting_topics:
msg["content"] = self._get_topic_results(title)
del self.voting_topics[title.lower()]
self.send_message(msg)
def _get_topic_results(self, title):
vote = self.voting_topics[title.lower().strip()]
results = "The results are in!!!! \nTopic: " + vote["title"]
for option in vote["options"].values():
results += "\n{0} has {1} votes.".format(
option[0], unicode(option[1]))
return results
def delete_voting_topic(self, voting_title):
print "deleting", voting_title
dict_params = {self.voting_topics.KEY_FIELD: unicode(voting_title)}
with self.voting_topics.db as db:
db[self.voting_topics.TABLE].delete(**dict_params)
print voting_title, "deleted from voting_bot.py!"
def main(self):
''' Blocking call that runs forever. Calls self.respond() on every
message received.
'''
self.client.call_on_each_message(lambda msg: self.respond(msg))
# print msg
def main():
zulip_username = '<EMAIL>'
zulip_api_key = os.environ['ZULIP_API_KEY']
key_word = '<PASSWORD>'
subscribed_streams = []
new_bot = VotingBot(zulip_username, zulip_api_key, | |
<filename>ui/file_manager/base/tools/modules.py
#!/usr/bin/env python
#
# Copyright (c) 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
import subprocess
def get_file_lines(file_path):
'''Returns list of lines from a file. '\n' at the end of lines are
removed.'''
with open(file_path, 'r') as f:
file_lines = f.readlines()
return [l.rstrip() for l in file_lines]
def save_file_lines(file_path, file_lines):
'''Saves file_lines in file pointed by `file_path`, '\n' at the end of
lines are added back.'''
with open(file_path, 'w') as f:
for line in file_lines:
f.write(line + '\n')
def get_relative_dependency(path, dir_path):
'''Given a file path, returns formatted BUILD.gn dependency.
Parameters:
path: file path to format as relative dependency.
dir_path: directory from which the relative dependency is calculated.
Returns:
Formatted dependency.
The 3 cases illustrated below are handled:
- ":file_type.m" if returned if file_type.js is in dir_path.
- "metadata:metadata_model.m" is returned if metadata/metadata_model.js
is in dir_path.
- "//ui/file_manager/file_manager/externs:volume_manager.m" is returned if
ui/file_manager/file_manager/externs is not included in dir_path.
'''
split_path = path.split('/')
file_name = split_path.pop().replace('.js', '.m')
split_dir_path = dir_path.split('/')
while (len(split_path) > 0 and len(split_dir_path) > 0
and split_path[0] == split_dir_path[0]):
del split_path[0]
del split_dir_path[0]
if len(split_dir_path) == 0:
# The dependency is within dir_path.
return '/'.join(split_path) + ':' + file_name
else:
return '//' + re.sub(r"\/[a-zA-Z0-9_]+\.js", ":", path) + file_name
def get_index_substr(file_lines, substr):
'''Finds first occurence of `substr` and returns its index in
`file_lines`.'''
for i, line in enumerate(file_lines):
if substr in line:
return i
return -1
def get_end_of_copyright(file_lines):
'''Get index of last line of copyright (after checking that the section
exists).'''
index = get_index_substr(file_lines, 'Copyright 20')
if index < 0:
return -1
while index < len(file_lines) and file_lines[index].startswith('// '):
index += 1
return index - 1
def get_end_of_file_overview(file_lines):
'''Get index of last line of file_overview (after checking that the section
exists).'''
index = get_index_substr(file_lines, '@fileoverview')
if index < 0:
return -1
while index < len(file_lines) and file_lines[index] != ' */':
index += 1
return index
def get_last_index_of_line_starting_with(file_lines, substr):
'''Get last line starting with the given `substr`.'''
last_index = -1
for i, line in enumerate(file_lines):
if line.startswith(substr):
last_index = i
return last_index
def get_end_of_build_imports(file_lines):
'''In BUILD.gn, gets index of last 'import' line at the beginning of the
file.'''
index = get_last_index_of_line_starting_with(file_lines, 'import("//')
if index < 0:
return get_end_of_copyright(file_lines)
return index
def add_js_library(file_lines, file_name, dir_path):
'''Adds js_library target in BUILD.gn.'''
i = file_lines.index('js_library("%s") {' % (file_name))
# Check that the library does exist and hasn't already been converted.
if i < 0:
print 'Unable to find js_library for {}'.format(file_name)
return False
if 'js_library("%s.m") {' % (file_name) in file_lines:
print 'js_library for {}.m already added'.format(file_name)
return False
# Find the end of the library definition.
while i < len(file_lines) and file_lines[i] != '}':
i += 1
i += 1
if i == len(file_lines):
print 'reached end of file'
return False
new_lines = '''
js_library("%s.m") {
sources = [ "$root_gen_dir/%s/%s.m.js" ]
extra_deps = [ ":modulize" ]
}''' % (file_name, dir_path, file_name)
file_lines[i:i] = new_lines.split('\n')
return True
def add_import_line(file_lines, variable, relative_path, is_unittest):
'''Adds import line (import {...} from '...') in JS file.'''
# Construct import line.
import_line = 'import ' if is_unittest else '// #import '
# Check: existing relative path.
i = get_index_substr(file_lines, relative_path)
if i >= 0 and '}' in file_lines[i]:
if variable + '}' in file_lines[i] or variable + ',' in file_lines[i]:
return
split_line = file_lines[i].split('}')
file_lines[i] = '%s, %s}%s' % (split_line[0], variable, split_line[1])
return
import_line += "{%s} from '%s';" % (variable, relative_path)
if import_line in file_lines:
return
# Add clang-format off/on if necessary.
index = 0
if '// clang-format off' in file_lines:
index = file_lines.index('// clang-format off')
else:
# Go to the end of copyright and fileoverview.
index = get_end_of_file_overview(file_lines)
if index < 0:
index = get_end_of_copyright(file_lines)
if (index < 0):
index = 0
index += 1
if len(import_line) > 80:
index += 1
file_lines.insert(index, '// clang-format off')
# Go to the last existing import line.
last_import_line_index = get_last_index_of_line_starting_with(
file_lines, 'import ')
if last_import_line_index >= 0:
index = last_import_line_index
else:
file_lines.insert(index + 1, '')
file_lines.insert(index + 1, '// clang-format on')
elif 'import ' not in file_lines[index + 1]:
file_lines.insert(index, '')
# Add import line.
file_lines.insert(index + 1, import_line)
def add_namespace_rewrite(build_gn_path):
build_file_lines = get_file_lines(build_gn_path)
# Add import("//ui/webui/resources/js/cr.gni").
cr_gni = 'import("//ui/webui/resources/js/cr.gni")'
modulizer_gni = 'import("//ui/webui/resources/tools/js_modulizer.gni")'
if not cr_gni in build_file_lines:
if not modulizer_gni in build_file_lines:
raise ValueError('"js_modulizer.gni" not found')
index = build_file_lines.index(modulizer_gni)
build_file_lines.insert(index, cr_gni)
# Add namespace_rewrites = cr_namespace_rewrites.
namespace_rewrite = ' namespace_rewrites = cr_namespace_rewrites'
if not namespace_rewrite in build_file_lines:
index = get_index_substr(build_file_lines,
'js_modulizer("modulize") {')
if index < 0:
print 'No modulize rule found'
return
while index < len(build_file_lines) and build_file_lines[index] != '':
index += 1
index -= 1
build_file_lines.insert(index, '')
build_file_lines.insert(index + 1, namespace_rewrite)
save_file_lines(build_gn_path, build_file_lines)
def add_hide_third_party(build_gn_path):
build_file_lines = get_file_lines(build_gn_path)
hide_third_party = ' "hide_warnings_for=third_party/",'
prefix_replacement_line = 'browser_resolver_prefix_replacements='
if not hide_third_party in build_file_lines:
index = get_index_substr(build_file_lines, prefix_replacement_line)
if index < 0:
print 'prefix replacement not found in "js_test_gen_html_modules"'
return
build_file_lines.insert(index + 1, hide_third_party)
save_file_lines(build_gn_path, build_file_lines)
def add_dependency(file_lines, rule_first_line, list_name, dependency_line):
'''
Add dependency in BUILD.gn.
Parameters:
file_lines: lines of BUILD.gn file.
rule_first_line: opening line of target to update.
list_name: name of the dependency list, deps', 'input_files' etc...
dependency_line: line to add to the dependency list to update.
'''
# Find a line that starts with deps.
if not rule_first_line in file_lines:
print 'Unable to find ' + rule_first_line
return False
# Find index of `list_name`. Get index of 'sources = [' in case `list_name`
# is not defined.
rule_index = file_lines.index(rule_first_line)
sources_index = -1
insertion_index = -1
single_line_dependency_list = False
for i in range(rule_index, len(file_lines)):
if 'sources = [' in file_lines[i]:
# Jump to the end of the 'sources' list.
while not ']' in file_lines[i]:
i += 1
sources_index = i
if ' {} = '.format(list_name) in file_lines[i]:
# Dependency line found.
if file_lines[i].endswith(']'):
single_line_dependency_list = True
# Go to the end of the dependency list.
while not ']' in file_lines[i]:
if dependency_line == file_lines[i]:
# Dependency already found.
return False
i += 1
insertion_index = i
break
if file_lines[i] == '}':
# End of build rule.
break
if insertion_index == -1:
# Add dependency line, after sources if possible.
index = sources_index + 1 if sources_index > 0 else rule_index + 1
# Define new list over 2 lines: 'list_name = [\n]'
file_lines.insert(index, ' {} = ['.format(list_name))
file_lines.insert(index + 1, ' ]')
insertion_index = index + 1
if single_line_dependency_list:
# Use regex to find characters between [].
result = re.search('\[(.*)\]', file_lines[insertion_index])
existing_dependency = result.group(1).strip()
new_lines = '''\
{} = [
{},
]'''.format(list_name, existing_dependency)
# Rewrite single-line dependency list.
file_lines[insertion_index:insertion_index + 1] = new_lines.split('\n')
insertion_index += 1
# Check for already imported dependency after reformatting.
if file_lines[insertion_index] == dependency_line:
return False
# If there was no existing dependency, remove appropriate line.
if existing_dependency == '':
del file_lines[insertion_index]
# Insert dependency.
file_lines.insert(insertion_index, dependency_line)
return True
def update_build_gn_dependencies(dir_path, file_name, build_gn_path):
print 'Updating BUILD.gn dependencies for ' + file_name
# Get file contents.
file_lines = get_file_lines(build_gn_path)
# Edit file with modules-related targets.
import_gni = 'import("//ui/webui/resources/tools/js_modulizer.gni")'
if not import_gni in file_lines:
index = get_end_of_build_imports(file_lines) + 1
file_lines.insert(index, import_gni)
new_lines = '''
js_modulizer("modulize") {
input_files = [
]
}'''
file_lines.extend(new_lines.split('\n'))
if not add_dependency(file_lines, 'group("closure_compile") {', 'deps',
' ":closure_compile_jsmodules",'):
return
# Add closure_compile_jsmodules rule.
index = get_index_substr(file_lines,
'js_type_check("closure_compile_module") {')
if index < 0:
print 'js_type_check("closure_compile_module") not found'
return
new_lines = '''\
js_type_check("closure_compile_jsmodules") {
deps = [
]
}
'''
file_lines[index:index] = new_lines.split('\n')
if not add_js_library(file_lines, file_name, dir_path):
return
# Add closure dependency.
if not add_dependency(
file_lines, 'js_type_check("closure_compile_jsmodules") {', 'deps',
' ":{}.m",'.format(file_name)):
return
# Add 'modulize' dependency.
if not add_dependency(file_lines, 'js_modulizer("modulize") {',
'input_files', ' "{}.js",'.format(file_name)):
return
# Save file contents.
save_file_lines(build_gn_path, file_lines)
def update_buid_gn_unittest_dependencies(build_gn_path, js_file_path,
file_name):
'''
Rename _unittest.js to _unittest.m.js.
Update test URL in file_manager_jstest.cc.
Update BUILD.gn rules.
It is assumed that if we're converting [file]_unittest.js, [file].js has
been converted already.
Parameters:
build_gn_path: Path of BUILD.gn file used by the file be converted.
js_file_path: Path of the file to be converted
(ui/file_manager/.../..._unittest.m.js)
file_name: Name | |
uirevision=None,
usrc=None,
v=None,
visible=None,
vsrc=None,
w=None,
wsrc=None,
x=None,
xsrc=None,
y=None,
ysrc=None,
z=None,
zsrc=None,
row=None,
col=None,
**kwargs
):
"""
Add a new Cone trace
Use cone traces to visualize vector fields. Specify a vector
field using 6 1D arrays, 3 position arrays `x`, `y` and `z` and
3 vector component arrays `u`, `v`, `w`. The cones are drawn
exactly at the positions given by `x`, `y` and `z`.
Parameters
----------
anchor
Sets the cones' anchor with respect to their x/y/z
positions. Note that "cm" denote the cone's center of
mass which corresponds to 1/4 from the tail to tip.
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here u/v/w norm) or the
bounds set in `cmin` and `cmax` Defaults to `false`
when `cmin` and `cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value should
have the same units as u/v/w norm and if set, `cmin`
must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`cmin` and/or `cmax` to be equidistant to this point.
Value should have the same units as u/v/w norm. Has no
effect when `cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value should
have the same units as u/v/w norm and if set, `cmax`
must be set as well.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.cone.ColorBar` instance or
dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.cone.Hoverlabel` instance
or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variable `norm` Anything contained in
tag `<extra>` is displayed in the secondary box, for
example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
lighting
:class:`plotly.graph_objects.cone.Lighting` instance or
dict with compatible properties
lightposition
:class:`plotly.graph_objects.cone.Lightposition`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the surface. Please note that in
the case of using high `opacity` values for example a
value greater than or equal to 0.5 on two surfaces (and
0.25 with four surfaces), an overlay of multiple
transparent surfaces may not perfectly be sorted in
depth by the webgl API. This behavior may be improved
in the near future and is subject to change.
reversescale
Reverses the color mapping if true. If true, `cmin`
will correspond to the last color in the array and
`cmax` will correspond to the first color.
scene
Sets a reference between this trace's 3D coordinate
system and a 3D scene. If "scene" (the default value),
the (x,y,z) coordinates refer to `layout.scene`. If
"scene2", the (x,y,z) coordinates refer to
`layout.scene2`, and so on.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
sizemode
Determines whether `sizeref` is set as a "scaled" (i.e
unitless) scalar (normalized by the max u/v/w norm in
the vector field) or as "absolute" value (in the same
units as the vector field).
sizeref
Adjusts the cone size scaling. The size of the cones is
determined by their u/v/w norm multiplied a factor and
`sizeref`. This factor (computed internally)
corresponds to the minimum "time" to travel across two
successive x/y/z positions at the average velocity of
those two successive positions. All cones in a given
trace use the same factor. With `sizemode` set to
"scaled", `sizeref` is unitless, its default value is
0.5 With `sizemode` set to "absolute", `sizeref` has
the same units as the u/v/w vector field, its the
default value is half the sample's maximum vector norm.
stream
:class:`plotly.graph_objects.cone.Stream` instance or
dict with compatible properties
text
Sets the text elements associated with the cones. If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
u
Sets the x components of the vector field.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: | |
<gh_stars>10-100
import xarray as xr
import numpy as np
import pandas as pd
def distrib_run_build_beam_pointing_vector(dat: list):
"""
Convenience function for mapping build_beam_pointing_vectors across cluster. Assumes that you are mapping this
function with a list of data.
distrib functions also return a processing status array, here a beamwise array = 2, which states that all
processed beams are at the 'beamvector' status level
Parameters
----------
dat
[hdng, bpa, tiltangle, tx_vecs, rx_vecs, tstmp, tx_reversed, rx_reversed]
Returns
-------
list
[relative azimuth, beam pointing angle, processing_status]
"""
ans = build_beam_pointing_vectors(dat[0], dat[1], dat[2], dat[3][0], dat[3][1], dat[4], dat[5])
# return processing status = 2 for all affected soundings
processing_status = xr.DataArray(np.full_like(dat[1], 2, dtype=np.uint8),
coords={'time': dat[1].coords['time'], 'beam': dat[1].coords['beam']},
dims=['time', 'beam'])
ans.append(processing_status)
return ans
def build_beam_pointing_vectors(hdng: xr.DataArray, bpa: xr.DataArray, tiltangle: xr.DataArray, tx_vecs: xr.DataArray,
rx_vecs: xr.DataArray, tx_reversed: bool = False, rx_reversed: bool = False):
"""
Beam pointing vector is the beam specific vector that arises from the intersection of the tx ping and rx cone
of sensitivity. Points at that area. Is in the geographic coordinate system, built using the tx/rx at time of
ping/receive.
Two components are returned. Relative azimuth, the angle relative to vessel heading that points at the beam
endpoint. Beam pointing angle, the roll corrected angle relative to the horizontal that points down at the beam
endpoint.
Parameters
----------
hdng
2d (time, beam) heading in degrees at ping time for each beam
bpa
2d (time, beam) receiver beam pointing angle
tiltangle
2d (time, beam) transmitter tiltangle on ping
tx_vecs
2 dim (time, xyz) representing tx 3d orientation in space across time
rx_vecs
3 dim (time, beam, xyz) representing rx 3d orientation in space across time/beam
tx_reversed
if true, the transmitter was installed 180° offset in yaw (i.e. backwards)
rx_reversed
if true, the receiver was installed 180° offset in yaw (i.e. backwards)
Returns
-------
xr.DataArray
2dim (time, beam), beam-wise beam azimuth values relative to vessel heading at time of ping
xr.DataArray
2 dim (time, beam) values for beampointingangle at each beam
"""
# main vec (primary head) is accessed using the primary system selection
rx_angle = np.deg2rad(bpa)
tx_angle = np.deg2rad(tiltangle)
if tx_reversed:
tx_angle = -tx_angle
if rx_reversed:
rx_angle = -rx_angle
beamvecs = construct_array_relative_beamvector(tx_vecs, rx_vecs, tx_angle, rx_angle)
rotgeo = return_array_geographic_rotation(tx_vecs, rx_vecs)
bv_geo = build_geographic_beam_vectors(rotgeo, beamvecs)
rel_azimuth = compute_relative_azimuth(bv_geo, hdng)
new_pointing_angle = compute_geo_beam_pointing_angle(bv_geo, rx_angle)
return [rel_azimuth, new_pointing_angle]
def construct_array_relative_beamvector(maintx: xr.DataArray, mainrx: xr.DataArray, tx_angle: xr.DataArray,
rx_angle: xr.DataArray):
"""
Given the orientation vectors representing the transmitter/receiver at time of ping/receive (maintx, mainrx) and
the TX/RX steering angles (tx_angle, rx_angle), determine new 3d beam vector components at the midpoint between
the TX and RX. This would be the 'actual' array relative beam vector.
This is a simplification of the actual scenario, adding error in the xyz due to the difference in path length/
direction of the actual ray from tx-seafloor and seafloor-rx and this co-located assumption (tx-seafloor and
rx-seafloor are the same is the assumption)
x = +FORWARD, y=+STARBOARD, z=+DOWN
Returns:
3d beam vector in co-located array ref frame. Of shape (xyz, time, beam), with 10 times and 200 beams,
beamvecs shape would be (3, 10, 200)
| <xarray.DataArray 'tiltangle' (xyz: 3, time: 10, beam: 200)>
| dask.array<concatenate, shape=(3, 10, 200), dtype=float64, chunksize=(1, 10, 200), chunktype=numpy.ndarray>
| Coordinates:
| * time (time) float64 1.496e+09 1.496e+09 ...
| * beam (beam) int32 0 1 2 3 4 5 6 7 8 ... 194 195 196 197 198 199 200
| * xyz (xyz) object 'x' 'y' 'z'
Parameters
----------
maintx
orientation vector for transmitter at time of transmit, 2dim of shape (time, xyz)
mainrx
orientation vector for receiver at time of receive, 2dim of shape (time, xyz)
tx_angle
transmitter tiltangle for each ping time
rx_angle
receiver beam pointing angle for each ping time
Returns
-------
xr.DataArray
3d beam vector in co-located array ref frame
"""
# delta - alignment angle between tx/rx vecs
delt = np.arccos(xr.dot(maintx, mainrx, dims=['xyz'])) - np.pi / 2
ysub1 = -np.sin(rx_angle)
# solve for components of 3d beam vector
ysub1 = ysub1 / np.cos(delt)
ysub2 = np.sin(tx_angle) * np.tan(delt)
radial = np.sqrt((ysub1 + ysub2) ** 2 + np.sin(tx_angle) ** 2)
x = np.sin(tx_angle)
y = ysub1 + ysub2
z = np.sqrt(1 - radial ** 2)
# generate new dataarray object for beam vectors
newx, _ = xr.broadcast(x, y) # broadcast to duplicate x along beam dimension
beamvecs = xr.concat([newx, y, z], pd.Index(list('xyz'), name='xyz'))
return beamvecs
def return_array_geographic_rotation(maintx: xr.DataArray, mainrx: xr.DataArray):
"""
Use the transmitter/receiver array orientations to build a rotation matrix between the geographic/array rel
reference frame.
Returns rotation matrices at each time/beam, of shape (beam, rot_i, time, xyz)
| <xarray.DataArray 'getitem-82dd48467b1f4e8b4f56bbe5e841cc9f' (beam: 182, rot_i: 3, time: 2, xyz: 3)>
| dask.array<transpose, shape=(182, 3, 2, 3), dtype=float64, chunksize=(182, 3, 2, 1), chunktype=numpy.ndarray>
| Coordinates:
| * rot_i (rot_i) int32 0 1 2
| * time (time) float64 1.496e+09 1.496e+09
| * beam (beam) int32 0 1 2 3 4 5 6 7 8 ... 174 175 176 177 178 179 180 181
| * xyz (xyz) <U1 'x' 'y' 'z'
Parameters
----------
maintx
orientation vector for transmitter at time of transmit, 2dim of shape (time, xyz)
mainrx
orientation vector for receiver at time of receive, 2dim of shape (time, xyz)
Returns
-------
xr.DataArray
rotation matrices at each time/beam, of shape (beam, rot_i, time, xyz)
"""
# build rotation matrix for going from locally level to geographic coord sys
x_prime = maintx
z_prime = cross(x_prime, mainrx, 'xyz')
y_prime = cross(z_prime, x_prime, 'xyz')
rotgeo = xr.concat([x_prime, y_prime, z_prime], pd.Index([0, 1, 2], name='rot_j')).T
# to do the dot product correctly, you need to align the right dimension in both matrices by giving
# them the same name (xyz for rotgeo and bv_geo in this case)
rotgeo = rotgeo.rename({'xyz': 'rot_i'})
rotgeo.coords['rot_i'] = [0, 1, 2]
rotgeo = rotgeo.rename({'rot_j': 'xyz'})
rotgeo.coords['xyz'] = ['x', 'y', 'z']
return rotgeo
def cross(a: xr.DataArray, b: xr.DataArray, spatial_dim: str, output_dtype: np.dtype = None):
"""
Xarray-compatible cross product. Compatible with dask, parallelization uses a.dtype as output_dtype
Parameters
----------
a
xarray DataArray object with a spatial_dim
b
xarray DataArray object with a spatial_dim
spatial_dim
dimension name to be mulitplied through
output_dtype
dtype of output
Returns
-------
xr.DataArray
cross product of a and b along spatial_dim
"""
for d in (a, b):
if spatial_dim not in d.dims:
raise ValueError('dimension {} not in {}'.format(spatial_dim, d))
if d.sizes[spatial_dim] != 3:
raise ValueError('dimension {} has not length 3 in {}'.format(spatial_dim, d))
if output_dtype is None:
output_dtype = a.dtype
c = xr.apply_ufunc(np.cross, a, b,
input_core_dims=[[spatial_dim], [spatial_dim]],
output_core_dims=[[spatial_dim]],
dask='parallelized', output_dtypes=[output_dtype]
)
return c
def build_geographic_beam_vectors(rotgeo: xr.DataArray, beamvecs: xr.DataArray):
"""
Apply rotation matrix to bring transducer rel. beam vectors to geographic ref frame
Parameters
----------
rotgeo
rotation matrices at each time/beam, of shape (beam, rot_i, time, xyz), see return_array_geographic_rotation
beamvecs
3d beam vector in co-located array ref frame (xyz, time, beam), see construct_array_relative_beamvector
Returns
-------
xr.DataArray
beam vectors in geographic ref frame, of shape (time, beam, bv_xyz)
"""
bv_geo = xr.dot(rotgeo, beamvecs, dims='xyz')
bv_geo = bv_geo.rename({'rot_i': 'bv_xyz'})
bv_geo.coords['bv_xyz'] = ['x', 'y', 'z']
bv_geo = bv_geo.transpose('time', 'beam', 'bv_xyz')
return bv_geo
def compute_relative_azimuth(bv_geo: xr.DataArray, heading: xr.DataArray):
"""
Compute the relative azimuth from array to end of beam vector in geographic ref frame
Parameters
----------
bv_geo
beam vectors in geographic ref frame, of shape (time, beam, bv_xyz), see build_geographic_beam_vectors
heading
1 dim array of heading values, coords=time
Returns
-------
xr.DataArray
2dim (time, beam), beam-wise beam azimuth values relative to vessel heading at time of ping
"""
# derive azimuth/angle from the newly created geographic beam vectors
bv_azimuth = np.rad2deg(np.arctan2(bv_geo.sel(bv_xyz='y'), bv_geo.sel(bv_xyz='x')))
rel_azimuth = np.deg2rad((bv_azimuth - heading + 360) % 360)
return rel_azimuth
def compute_geo_beam_pointing_angle(bv_geo: xr.DataArray, rx_angle: xr.DataArray):
"""
Build new beam pointing angle (rel to the vertical) and with the correct sign (+ to starboard) in the geographic
ref frame.
Parameters
----------
bv_geo
beam vectors in geographic ref frame, of shape (time, beam, bv_xyz), see build_geographic_beam_vectors
rx_angle
receiver beam pointing angle for | |
<reponame>x22x22/python-ceph-cfg
# Import Python Libs
from __future__ import absolute_import
# Python imports
import os
import os.path
import platform
import logging
import shlex
import tempfile
try:
import ConfigParser
except:
import configparser as ConfigParser
# local modules
from . import constants
from . import utils
from . import util_which
log = logging.getLogger(__name__)
class Error(Exception):
"""
Error
"""
def __str__(self):
doc = self.__doc__.strip()
return ': '.join([doc] + [str(a) for a in self.args])
def _retrive_osd_details_from_dir(directory):
osd_required_files = set(["ceph_fsid", "fsid", "magic"])
osd_details = {}
dir_content = os.listdir(directory)
if not osd_required_files.issubset(dir_content):
log.debug("osd details not found in:%s" % (directory))
return None
log.debug("Reading osd details from '%s'" % (directory))
with open('%s/ceph_fsid' % (directory), 'r') as infile:
osd_details["ceph_fsid"] = infile.read().strip()
with open('%s/fsid' % (directory), 'r') as infile:
osd_details["fsid"] = infile.read().strip()
with open('%s/magic' % (directory), 'r') as infile:
osd_details["magic"] = infile.read().strip()
# Journel uuid may not exist when partition reused.
path_journal_uuid = '%s/journal_uuid' % (directory)
if os.path.isfile(path_journal_uuid):
log.debug("Reading '%s'" % (path_journal_uuid))
with open('%s/journal_uuid' % (directory), 'r') as infile:
osd_details["journal_uuid"] = infile.read().strip()
# whoami may not exist when OSD has never been activated.
path_whoami = '%s/whoami' % (directory)
if os.path.isfile(path_whoami):
log.debug("Reading '%s'" % (path_whoami))
with open('%s/whoami' % (directory), 'r') as infile:
osd_details["whoami"] = infile.read().strip()
path_link = '%s/journal' % (directory)
if os.path.islink(path_link):
log.debug("Reading '%s'" % (path_link))
osd_details["dev_journal"] = os.path.realpath(path_link)
return osd_details
def retrive_osd_details(device_name):
osd_details = {}
if device_name is None:
return None
try:
tmpd = tempfile.mkdtemp()
log.info("Create temp directory %s" %(tmpd))
try:
out_mnt = utils.execute_local_command(['mount',device_name,tmpd])
if out_mnt['retcode'] == 0:
osd_details = _retrive_osd_details_from_dir(tmpd)
finally:
utils.execute_local_command(['umount',tmpd])
finally:
log.info("Destroy temp directory %s" %(tmpd))
os.rmdir(tmpd)
return osd_details
class model_updater():
"""
Basic model updator retrives data and adds to model
"""
def __init__(self, model):
self.model = model
def defaults_hostname(self):
if self.model.hostname is None:
self.model.hostname = platform.node().split('.')[0]
def defaults_refresh(self):
# Default cluster name / uuid values
if self.model.cluster_name is None and self.model.cluster_uuid is None:
log.info("Defaulting cluster name to 'ceph'")
self.model.cluster_name = "ceph"
if self.model.cluster_name is not None and self.model.cluster_uuid is None:
self.model.cluster_uuid = utils._get_cluster_uuid_from_name(self.model.cluster_name)
log.info("From cluster name '%s' got cluster uuid '%s'" % (self.model.cluster_name, self.model.cluster_uuid))
if self.model.cluster_name is None and self.model.cluster_uuid is not None:
self.model.cluster_name = utils._get_cluster_name_from_uuid(self.model.cluster_uuid)
log.info("From cluster uuid '%s' got cluster name '%s'" % (self.model.cluster_uuid, self.model.cluster_name))
def symlinks_refresh(self):
'''
List all symlinks under /dev/disk/
'''
interesting_dirs = set(["by-path","by-id","by-uuid","by-partuuid"])
paths = {}
for root, dirs, files in os.walk("/dev/disk/"):
path_head, path_tail = os.path.split(root)
if not path_tail in interesting_dirs:
continue
for file_name in files:
file_path = os.path.join(root,file_name)
if not os.path.islink(file_path):
continue
real_path = os.path.realpath(file_path)
if not real_path in paths.keys():
paths[real_path] = []
paths[real_path].append(file_path)
self.model.symlinks = paths
def lsblk_version_refresh(self):
"""
Get lsblk version as this is older on RHEL 7.2
"""
arguments = [ util_which.which_lsblk.path, "--version" ]
output = utils.execute_local_command(arguments)
if output["retcode"] != 0:
raise Error("Failed executing '%s' Error rc=%s, stdout=%s stderr=%s" % (
" ".join(arguments),
output["retcode"],
output["stdout"],
output["stderr"]
))
version_str = output["stdout"].strip()
version_list = shlex.split(version_str)
if len(version_list) < 4:
raise Error("Failed processing lsblk version string '%s'" % (version_str))
version_split = version_list[3].split(".")
self.model.lsblk_version.major = int(version_split[0])
if len(version_split) > 1:
self.model.lsblk_version.minor = int(version_split[1])
if len(version_split) > 2:
self.model.lsblk_version.revision = int(version_split[2])
else:
self.model.lsblk_version.revision = 0
def _lsblk_arguements(self):
"""
Utility function for lsblk
"""
if self.model.lsblk_version.major is None:
self.lsblk_version_refresh()
if self.model.lsblk_version.major < 2:
raise Error("lsblk version too old '%s'" % (self.model.lsblk_version))
if self.model.lsblk_version.major == 2 and self.model.lsblk_version.minor < 23:
raise Error("lsblk version maybe too old '%s'" % (self.model.lsblk_version))
# RHEL 7.2 uses version 2.23.2
if self.model.lsblk_version.major == 2 and self.model.lsblk_version.minor < 25:
# Note we dont have "PARTTYPE"
log.warning("Using lsblk is old, results may be incomplete.")
return [
"--ascii",
"--output",
"NAME,FSTYPE,MOUNTPOINT,PARTLABEL,PARTUUID,PKNAME,ROTA,RQ-SIZE,SCHED,SIZE,TYPE,UUID,VENDOR",
"--pairs",
"--paths",
"--bytes"
]
return [
"--ascii",
"--output-all",
"--pairs",
"--paths",
"--bytes"
]
def partitions_all_refresh_lsblk(self):
'''
List all partition details using lsblk
'''
part_map = {}
cmd = [ util_which.which_lsblk.path ] + self._lsblk_arguements()
output = utils.execute_local_command(cmd)
if output['retcode'] != 0:
raise Error("Failed running: lsblk --ascii --output-all")
all_parts = {}
for line in output['stdout'].split('\n'):
partition = {}
for token in shlex.split(line):
token_split = token.split("=")
if len(token_split) == 1:
continue
key = token_split[0]
value = "=".join(token_split[1:])
if len(value) == 0:
continue
partition[key] = value
part_name = partition.get("NAME")
if part_name is None:
continue
part_type = partition.get("TYPE")
if part_type == "disk":
all_parts[part_name] = partition
continue
disk_name = partition.get("PKNAME")
if not disk_name in all_parts:
continue
part_map[part_name] = disk_name
if None == all_parts[disk_name].get("PARTITION"):
all_parts[disk_name]["PARTITION"] = {}
all_parts[disk_name]["PARTITION"][part_name] = partition
self.model.lsblk = all_parts
self.model.part_pairent = part_map
def partitions_all_refresh_parted(self):
'''
List all partition details using parted
'''
arguments = [
util_which.which_parted.path,
'-s',
'-m',
'-l',
'print'
]
output = utils.execute_local_command(arguments)
if output["retcode"] != 0:
raise Error("Failed executing '%s' Error rc=%s, stdout=%s stderr=%s" % (
" ".join(arguments),
output["retcode"],
output["stdout"],
output["stderr"]
))
lines = output["stdout"].split('\n')
lines_num = len(lines)
if lines_num == 0:
return
chunk_lines = []
chunk_end = int(lines_num - 1)
for line_num in list(reversed(range(lines_num))):
if lines[line_num] == 'BYT;':
chunk_lines.append((int(line_num), chunk_end))
chunk_end = int(line_num)
parted_dict = {}
for chunk_start,chunk_end in chunk_lines:
chunk_list = lines[chunk_start:chunk_end]
disk_line_split = chunk_list[1].split(':')
parted_dict_disk = {
'disk' : disk_line_split[0],
'size' : disk_line_split[1],
'driver' : disk_line_split[2],
'sector_size_logical' : disk_line_split[3],
'sector_size_physical' : disk_line_split[4],
'table' : disk_line_split[5],
'vendor' : disk_line_split[6],
'partition' : {}
}
for chunk_line in range(2,len(chunk_list)):
part_line = chunk_list[chunk_line]
if len(part_line) == 0:
continue
part_line_split = part_line.split(':')
part_path = disk_line_split[0] + part_line_split[0]
part_line_dict = {
'Path' : part_path,
'Number' : part_line_split[0],
'Start' : part_line_split[1],
'End' : part_line_split[2],
'Size' : part_line_split[3],
'File system' : part_line_split[4],
'Flags' : part_line_split[4].split(',')
}
parted_dict_disk['partition'][part_path] = part_line_dict
parted_dict[disk_line_split[0]] = parted_dict_disk
self.model.parted = parted_dict
def partitions_all_refresh(self):
'''
List all partition details
'''
self.partitions_all_refresh_lsblk()
self.partitions_all_refresh_parted()
def discover_partitions_refresh(self):
'''
List all OSD and journal partitions
'''
osd_all = set()
journal_all = set()
osd_details = {}
for diskname in self.model.lsblk.keys():
disk = self.model.lsblk.get(diskname)
if disk is None:
continue
part_struct = disk.get("PARTITION")
if part_struct is None:
continue
for partname in part_struct.keys():
part_details = part_struct.get(partname)
if part_details is None:
continue
mount_point = part_details.get("MOUNTPOINT")
if mount_point == '[SWAP]':
continue
if mount_point is not None:
osd_md = _retrive_osd_details_from_dir(mount_point)
if osd_md is not None:
osd_details[partname] = osd_md
part_type = part_details.get("PARTTYPE")
if part_type == constants.OSD_UUID:
osd_all.add(partname)
if mount_point is not None:
continue
osd_md = retrive_osd_details(partname)
if osd_md is not None:
osd_details[partname] = osd_md
continue
if part_type == constants.JOURNAL_UUID:
journal_all.add(partname)
continue
if mount_point is not None:
continue
fs_type = part_details.get("FSTYPE")
if fs_type is None:
continue
if not fs_type in ['xfs', 'btrfs', 'ext4']:
continue
osd_md = retrive_osd_details(partname)
if osd_md is not None:
osd_details[partname] = osd_md
# Now we combine our data to find incorrectly labeled OSD's
# and build osd data structure discovered_osd
discovered_osd = {}
for osd_dev_data in osd_details.keys():
# Agregate data into osd_all.
osd_all.add(osd_dev_data)
osd_md = osd_details.get(osd_dev_data)
if osd_md is None:
continue
# Agregate data into journal_all.
osd_dev_journel_raw = osd_md.get("dev_journal")
if osd_dev_journel_raw is not None:
journal_all.add(osd_dev_journel_raw)
osd_md["dev"] = osd_dev_data
disk_name = self.model.part_pairent.get(osd_dev_data)
if disk_name is not None:
osd_md["dev_parent"] = disk_name
ceph_fsid = osd_md.get("ceph_fsid")
if not ceph_fsid in discovered_osd.keys():
discovered_osd[ceph_fsid] = []
discovered_osd[ceph_fsid].append(osd_md)
self.model.partitions_osd = osd_all
self.model.partitions_journal = journal_all
self.model.discovered_osd = discovered_osd
def load_confg(self, cluster_name):
configfile = "/etc/ceph/%s.conf" % (cluster_name)
if not os.path.isfile(configfile):
raise Error("Cluster confg file does not exist:'%s'" % configfile)
self.model.ceph_conf.read(configfile)
def mon_members_refresh(self):
try:
mon_initial_members_name_raw = self.model.ceph_conf.get("global","mon_initial_members")
except ConfigParser.NoSectionError:
raise Error("Cluster config file does not have a [global] section")
except ConfigParser.NoOptionError:
raise Error("Cluster config file does not set mon_initial_members")
mon_initial_members_name_cleaned = []
for mon_split in mon_initial_members_name_raw.split(","):
mon_initial_members_name_cleaned.append(mon_split.strip())
try:
mon_initial_members_addr_raw = self.model.ceph_conf.get("global","mon_host")
except ConfigParser.NoOptionError:
raise Error("Cluster config file does not set mon_host")
mon_initial_members_addr_cleaned = []
for mon_split in mon_initial_members_addr_raw.split(","):
mon_initial_members_addr_cleaned.append(mon_split.strip())
if len(mon_initial_members_name_cleaned) != len(mon_initial_members_addr_cleaned):
raise Error("config has different numbers of mon 'names' and ip addresses")
output = []
mon_initial_members_name_len = len(mon_initial_members_name_cleaned)
for idx in range(0,mon_initial_members_name_len):
output.append((
mon_initial_members_name_cleaned[idx],
mon_initial_members_addr_cleaned[idx]
))
self.model.mon_members = output
def ceph_version_refresh(self):
arguments = [
"ceph",
"--version"
]
output = utils.execute_local_command(arguments)
if output["retcode"] != 0:
raise Error("Failed executing '%s' Error rc=%s, stdout=%s stderr=%s" % (
" ".join(arguments),
output["retcode"],
output["stdout"],
output["stderr"]))
version_raw = output["stdout"].strip()
version_raw_split = shlex.split(version_raw)
if len(version_raw_split) != 4 and len(version_raw_split) != 6 :
raise Error("ceph returned an invalid version:'%s' " % (version_raw))
if version_raw_split[0] != "ceph":
raise Error("ceph returned an invalid version first value is not ceph:'%s' " % (version_raw))
if version_raw_split[1] != "version":
raise Error("ceph returned an invalid version second value is | |
x == t[i+0][0] and y == t[i+2][0]) \
or (cmd == "wdand2tagbfr" and x == t[i-2][1] and y == t[i+0][0]) \
or (cmd == "wdand2tagaft" and x == t[i+0][0] and y == t[i+2][1]) \
or (cmd == "lbigram" and x == t[i-1][0] and y == t[i+0][0]) \
or (cmd == "rbigram" and x == t[i+0][0] and y == t[i+1][0]) \
or (cmd == "prevbigram" and x == t[i-2][1] and y == t[i-1][1]) \
or (cmd == "nextbigram" and x == t[i+1][1] and y == t[i+2][1]):
t[i] = [t[i][0], r[1]]
return t[len(o):-len(o)]
def insert(self, i, tag1, tag2, cmd="prevtag", x=None, y=None):
""" Inserts a new rule that updates words with tag1 to tag2,
given constraints x and y, e.g., Context.append("TO < NN", "VB")
"""
if " < " in tag1 and not x and not y:
tag1, x = tag1.split(" < "); cmd="prevtag"
if " > " in tag1 and not x and not y:
x, tag1 = tag1.split(" > "); cmd="nexttag"
lazylist.insert(self, i, [tag1, tag2, cmd, x or "", y or ""])
def append(self, *args, **kwargs):
self.insert(len(self)-1, *args, **kwargs)
def extend(self, rules=[]):
for r in rules:
self.append(*r)
#--- NAMED ENTITY RECOGNIZER -----------------------------------------------------------------------
RE_ENTITY1 = re.compile(r"^http://") # http://www.domain.com/path
RE_ENTITY2 = re.compile(r"^www\..*?\.[com|org|net|edu|de|uk]$") # www.domain.com
RE_ENTITY3 = re.compile(r"^[\w\-\.\+]+@(\w[\w\-]+\.)+[\w\-]+$") # <EMAIL>
class Entities(lazydict, Rules):
def __init__(self, lexicon={}, path="", tag="NNP"):
""" A dictionary of named entities and their labels.
For domain names and e-mail adresses, regular expressions are used.
"""
cmd = (
"pers", # Persons: George/NNP-PERS
"loc", # Locations: Washington/NNP-LOC
"org", # Organizations: Google/NNP-ORG
)
Rules.__init__(self, lexicon, cmd)
self._path = path
self.tag = tag
@property
def path(self):
return self._path
def load(self):
# ["Alexander", "the", "Great", "PERS"]
# {"alexander": [["alexander", "the", "great", "pers"], ...]}
for x in _read(self.path):
x = [x.lower() for x in x.split()]
dict.setdefault(self, x[0], []).append(x)
def apply(self, tokens):
""" Applies the named entity recognizer to the given list of tokens,
where each token is a [word, tag] list.
"""
# Note: we could also scan for patterns, e.g.,
# "my|his|her name is|was *" => NNP-PERS.
i = 0
while i < len(tokens):
w = tokens[i][0].lower()
if RE_ENTITY1.match(w) \
or RE_ENTITY2.match(w) \
or RE_ENTITY3.match(w):
tokens[i][1] = self.tag
if w in self:
for e in self[w]:
# Look ahead to see if successive words match the named entity.
e, tag = (e[:-1], "-"+e[-1].upper()) if e[-1] in self.cmd else (e, "")
b = True
for j, e in enumerate(e):
if i + j >= len(tokens) or tokens[i+j][0].lower() != e:
b = False; break
if b:
for token in tokens[i:i+j+1]:
token[1] = (token[1] == "NNPS" and token[1] or self.tag) + tag
i += j
break
i += 1
return tokens
def append(self, entity, name="pers"):
""" Appends a named entity to the lexicon,
e.g., Entities.append("Hooloovoo", "PERS")
"""
e = [s.lower() for s in entity.split(" ") + [name]]
self.setdefault(e[0], []).append(e)
def extend(self, entities):
for entity, name in entities:
self.append(entity, name)
### SENTIMENT POLARITY LEXICON #####################################################################
# A sentiment lexicon can be used to discern objective facts from subjective opinions in text.
# Each word in the lexicon has scores for:
# 1) polarity: negative vs. positive (-1.0 => +1.0)
# 2) subjectivity: objective vs. subjective (+0.0 => +1.0)
# 3) intensity: modifies next word? (x0.5 => x2.0)
# For English, adverbs are used as modifiers (e.g., "very good").
# For Dutch, adverbial adjectives are used as modifiers
# ("hopeloos voorspelbaar", "ontzettend spannend", "verschrikkelijk goed").
# Negation words (e.g., "not") reverse the polarity of the following word.
# Sentiment()(txt) returns an averaged (polarity, subjectivity)-tuple.
# Sentiment().assessments(txt) returns a list of (chunk, polarity, subjectivity, label)-tuples.
# Semantic labels are useful for fine-grained analysis, e.g.,
# negative words + positive emoticons could indicate cynicism.
# Semantic labels:
MOOD = "mood" # emoticons, emojis
IRONY = "irony" # sarcasm mark (!)
NOUN, VERB, ADJECTIVE, ADVERB = \
"NN", "VB", "JJ", "RB"
RE_SYNSET = re.compile(r"^[acdnrv][-_][0-9]+$")
def avg(list):
return sum(list) / float(len(list) or 1)
class Score(tuple):
def __new__(self, polarity, subjectivity, assessments=[]):
""" A (polarity, subjectivity)-tuple with an assessments property.
"""
return tuple.__new__(self, [polarity, subjectivity])
def __init__(self, polarity, subjectivity, assessments=[]):
self.assessments = assessments
class Sentiment(lazydict):
def __init__(self, path="", language=None, synset=None, confidence=None, **kwargs):
""" A dictionary of words (adjectives) and polarity scores (positive/negative).
The value for each word is a dictionary of part-of-speech tags.
The value for each word POS-tag is a tuple with values for
polarity (-1.0-1.0), subjectivity (0.0-1.0) and intensity (0.5-2.0).
"""
self._path = path # XML file path.
self._language = None # XML language attribute ("en", "fr", ...)
self._confidence = None # XML confidence attribute threshold (>=).
self._synset = synset # XML synset attribute ("wordnet_id", "cornetto_id", ...)
self._synsets = {} # {"a-01123879": (1.0, 1.0, 1.0)}
self.labeler = {} # {"dammit": "profanity"}
self.tokenizer = kwargs.get("tokenizer", find_tokens)
self.negations = kwargs.get("negations", ("no", "not", "n't", "never"))
self.modifiers = kwargs.get("modifiers", ("RB",))
self.modifier = kwargs.get("modifier" , lambda w: w.endswith("ly"))
@property
def path(self):
return self._path
@property
def language(self):
return self._language
@property
def confidence(self):
return self._confidence
def load(self, path=None):
""" Loads the XML-file (with sentiment annotations) from the given path.
By default, Sentiment.path is lazily loaded.
"""
# <word form="great" wordnet_id="a-01123879" pos="JJ" polarity="1.0" subjectivity="1.0" intensity="1.0" />
# <word form="damnmit" polarity="-0.75" subjectivity="1.0" label="profanity" />
if not path:
path = self._path
if not os.path.exists(path):
return
words, synsets, labels = {}, {}, {}
xml = cElementTree.parse(path)
xml = xml.getroot()
for w in xml.findall("word"):
if self._confidence is None \
or self._confidence <= float(w.attrib.get("confidence", 0.0)):
w, pos, p, s, i, label, synset = (
w.attrib.get("form"),
w.attrib.get("pos"),
w.attrib.get("polarity", 0.0),
w.attrib.get("subjectivity", 0.0),
w.attrib.get("intensity", 1.0),
w.attrib.get("label"),
w.attrib.get(self._synset) # wordnet_id, cornetto_id, ...
)
psi = (float(p), float(s), float(i))
if w:
words.setdefault(w, {}).setdefault(pos, []).append(psi)
if w and label:
labels[w] = label
if synset:
synsets.setdefault(synset, []).append(psi)
self._language = xml.attrib.get("language", self._language)
# Average scores of all word senses per part-of-speech tag.
for w in words:
words[w] = dict((pos, [avg(each) for each in zip(*psi)]) for pos, psi in words[w].items())
# Average scores of all part-of-speech tags.
for w, pos in list(words.items()):
words[w][None] = [avg(each) for each in zip(*pos.values())]
# Average scores of all synonyms per synset.
for id, psi in synsets.items():
synsets[id] = [avg(each) for each in zip(*psi)]
dict.update(self, words)
dict.update(self.labeler, labels)
dict.update(self._synsets, synsets)
def synset(self, id, pos=ADJECTIVE):
""" Returns a (polarity, subjectivity)-tuple for the given synset id.
For example, the adjective "horrible" has id 193480 in WordNet:
Sentiment.synset(193480, pos="JJ") => (-0.6, 1.0, 1.0).
"""
id = str(id).zfill(8)
if not id.startswith(("n-", "v-", "a-", "r-")):
if pos == NOUN:
id = "n-" + id
if pos == VERB:
id = "v-" + id
if pos == ADJECTIVE:
id = "a-" + id
if pos == ADVERB:
id = "r-" + id
if dict.__len__(self) == 0:
self.load()
return tuple(self._synsets.get(id, (0.0, 0.0))[:2])
def __call__(self, s, negation=True, **kwargs):
""" Returns a (polarity, subjectivity)-tuple for the given sentence,
with polarity between -1.0 and 1.0 and subjectivity between 0.0 and 1.0.
The sentence can be a string, Synset, Text, Sentence, Chunk, Word, Document, Vector.
An optional weight parameter can be given,
as a function that takes a list of words and returns a weight.
"""
def avg(assessments, weighted=lambda w: 1):
s, n = 0, 0
for words, score in assessments:
w = weighted(words)
s += w * score
n += w
return s / float(n or 1)
# A pattern.en.wordnet.Synset.
# Sentiment(synsets("horrible", "JJ")[0]) => (-0.6, 1.0)
if hasattr(s, "gloss"):
a = [(s.synonyms[0],) + self.synset(s.id, pos=s.pos) + (None,)]
# A synset id.
# Sentiment("a-00193480") => horrible => (-0.6, 1.0) (English WordNet)
# Sentiment("c_267") => verschrikkelijk => (-0.9, 1.0) (Dutch Cornetto)
elif isinstance(s, basestring) and RE_SYNSET.match(s):
a = [(s.synonyms[0],) + self.synset(s.id, pos=s.pos) + (None,)]
# A string of words.
# Sentiment("a horrible movie") => (-0.6, 1.0)
elif isinstance(s, basestring):
a = self.assessments(((w.lower(), None) for w in " ".join(self.tokenizer(s)).split()), negation)
# A pattern.en.Text.
elif hasattr(s, "sentences"):
a = self.assessments(((w.lemma or w.string.lower(), w.pos[:2]) for w in chain(*s)), negation)
# A pattern.en.Sentence or pattern.en.Chunk.
elif hasattr(s, "lemmata"):
a = self.assessments(((w.lemma or w.string.lower(), w.pos[:2]) for w in s.words), negation)
# A pattern.en.Word.
elif hasattr(s, "lemma"):
a = self.assessments(((s.lemma or s.string.lower(), s.pos[:2]),), negation)
# A | |
one!'
])
def handle_bid_for_job(cmd_object, dlg_context, service_registry, **kwargs):
if not cmd_object.job_tag:
return 'Bid for a job by texting the job tag, space, and "bid".'
if not is_valid_job_tag(cmd_object.job_tag):
return REPLY_INVALID_TAG_TPL % cmd_object.job_tag
db_svc = service_registry.lookup('postgres')
with db_svc.txn_scope() as session:
# make sure the job is open
if not job_is_available(cmd_object.job_tag, session, db_svc):
return ' '.join(['The job with tag:',
cmd_object.job_tag,
'is not in the pool of available jobs.',
'Text "opn" for a list of open jobs.'
])
if not courier_is_on_duty(dlg_context.courier.id, session, db_svc):
# automatically place this courier on the duty roster
payload = {
'id': dlg_context.courier.id,
'status': 1 # 1 means on-duty
}
transform_status = update_courier_status_func(payload, service_registry, **kwargs)
if not transform_status.ok:
print(transform_status)
return 'There was an error attempting to auto-update your duty status. Please contact your administrator.'
# only one bid per user (TODO: pluggable bidding policy)
if courier_has_bid(dlg_context.courier.id, cmd_object.job_tag, session, db_svc):
return ' '.join([
'You have already bid on the job:',
cmd_object.job_tag,
"Once the bid window closes, we'll text you if you get the assignment.",
"Good luck!"
])
bidding_window = lookup_open_bidding_window_by_job_tag(cmd_object.job_tag, session, db_svc)
if not bidding_window:
return ' '.join([
"Sorry, the bidding window for job:",
cmd_object.job_tag,
"has closed."
])
# job exists and is available, so bid for it
kwargs['job_tag'] = cmd_object.job_tag
bid = ObjectFactory.create_job_bid(db_svc,
bidding_window_id=bidding_window.id,
courier_id=dlg_context.courier.id,
job_tag=cmd_object.job_tag)
session.add(bid)
return ' '.join([
"Thank you! You've made a bid to accept job:",
cmd_object.job_tag,
"If you get the assignment, we'll text you when the bidding window closes."
])
def handle_accept_job(cmd_object, dlg_context, service_registry, **kwargs):
# TODO: verify (non-stale) assignment, update status table
current_time = datetime.datetime.now()
job_tag = cmd_object.job_tag
if not job_tag:
return 'To accept a job assignment, text the job tag, a space, and "acc".'
db_svc = service_registry.lookup('postgres')
with db_svc.txn_scope() as session:
try:
# first, does this user even own this job?
job_bid = lookup_user_job_bid(job_tag, dlg_context.courier.id, session, db_svc)
if not job_bid:
return 'Sorry, it appears the job with tag %s is either expired or not yours to accept.' % job_tag
if lookup_open_bidding_window_by_job_tag(job_tag, session, db_svc):
return 'Sorry -- the bidding window for this job is still open.'
jobstat = lookup_current_job_status(job_tag, session, db_svc)
if jobstat.status == JOB_STATUS_ACCEPTED:
return 'You have already accepted this job.'
else:
# expire this job status and create a new one
jobstat.expired_ts = current_time
session.add(jobstat)
new_status = ObjectFactory.create_job_status(db_svc,
job_tag=job_tag,
status=JOB_STATUS_ACCEPTED,
write_ts=current_time)
session.add(new_status)
jobdata = lookup_job_data_by_tag(job_tag, session, db_svc)
if not jobdata:
raise Exception('No job data entry found for job tag %s.' % job_tag)
new_assignment = ObjectFactory.create_job_assignment(db_svc,
courier_id=dlg_context.courier.id,
job_id=jobdata.id,
job_tag=job_tag)
session.add(new_assignment)
session.flush()
return ' '.join([
'You have accepted job %s.' % job_tag,
'This job will show up in your personal job queue,',
'which you can review by texting "my".',
'You can get the details of this job by texting',
'The job tag, space, and "dt".',
'Godspeed!'
])
except Exception as err:
print(err)
traceback.print_exc(file=sys.stdout)
session.rollback()
return 'There was an error while attempting to accept this job. Please contact your administrator.'
def handle_help(cmd_object, dlg_context, service_registry, **kwargs):
return compile_help_string()
def handle_job_details(cmd_object, dlg_context, service_registry, **kwargs):
if cmd_object.job_tag == None:
return 'To receive details on a job, text the job tag, a space, and "dt".'
db_svc = service_registry.lookup('postgres')
with db_svc.txn_scope() as session:
job = lookup_job_data_by_tag(cmd_object.job_tag, session, db_svc)
if not job:
return 'The job with tag "%s" is either not in the system, or has already been scheduled.' % cmd_object.job_tag
lines = []
lines.append('pickup address: %s' % job.pickup_address)
lines.append('pickup borough: %s' % job.pickup_borough)
lines.append('pickup neighborhood: %s' % job.pickup_neighborhood)
lines.append('pickup zipcode: %s' % job.pickup_zip)
lines.append('delivery address: %s' % job.pickup_address)
lines.append('delivery borough: %s' % job.delivery_borough)
lines.append('delivery zipcode: %s' % job.delivery_zip)
lines.append('items: %s' % job.items)
return '\n\n'.join(lines)
def handle_en_route(cmd_object, dlg_context, service_registry, **kwargs):
current_time = datetime.datetime.now()
db_svc = service_registry.lookup('postgres')
with db_svc.txn_scope() as session:
try:
jobs = list_accepted_jobs(dlg_context.courier.id, session, db_svc)
if not len(jobs):
return 'There are no jobs in your queue.'
if not cmd_object.job_tag:
if len(jobs) == 1:
# if thereis only one job in the queue, that's the one
job_tag = jobs[0].job_tag
else:
return ' '.join([
"To notify the network that you're en route,",
"please text the tag of the job you're starting,",
'space, then "%s".' % cmd_object.cmdspec.command
])
else:
job_tag = cmd_object.job_tag
if not job_belongs_to_courier(job_tag, dlg_context.courier.id, session, db_svc):
return 'Job with tag %s does not appear to be one of yours.' % job_tag
print('### performing en-route status update for job %s...' % job_tag)
current_job_status = lookup_current_job_status(job_tag,
session,
db_svc)
if current_job_status.status == JOB_STATUS_IN_PROGRESS:
return 'You have already reported en-route status for this job.'
else:
current_job_status.expired_ts = current_time
session.add(current_job_status)
new_job_status = ObjectFactory.create_job_status(db_svc,
job_tag=job_tag,
status=JOB_STATUS_IN_PROGRESS,
write_ts=current_time)
session.add(new_job_status)
session.flush()
return ' '.join([
"You have reported that you're en route for job:",
'%s. Godspeed!' % job_tag
])
except Exception as err:
session.rollback()
print(err)
traceback.print_exc(file=sys.stdout)
return 'There was an error updating the status of this job. Please contact your administrator.'
def handle_cancel_job(cmd_object, dlg_context, service_registry, **kwargs):
if not cmd_object.job_tag:
return 'To cancel a job, text the job tag, a space, and "can".'
job_tag = cmd_object.job_tag
db_svc = service_registry.lookup('postgres')
with db_svc.txn_scope() as session:
if not job_belongs_to_courier(job_tag, dlg_context.courier.id, session, db_svc):
return 'Job with tag %s does not appear to be one of yours.' % job_tag
jstat = lookup_current_job_status(job_tag, session, db_svc)
if jstat.status not in [JOB_STATUS_ACCEPTED, JOB_STATUS_IN_PROGRESS]:
return "You cannot cancel a job unless it's either accepted or in progress."
# 3. update status table and rebroadcast job
update_job_status(job_tag, JOB_STATUS_BROADCAST, session, db_svc)
jobdata = lookup_job_data_by_tag(job_tag, session, db_svc)
return "Recording job cancellation for job tag: %s" % cmd_object.job_tag
def handle_job_finished(cmd_object, dlg_context, service_registry, **kwargs):
current_time = datetime.datetime.now()
db_svc = service_registry.lookup('postgres')
with db_svc.txn_scope() as session:
try:
job_tag = None
jobs = list_accepted_jobs(dlg_context.courier.id, session, db_svc)
if not len(jobs):
return 'There are no jobs in your queue.'
if not cmd_object.job_tag:
if len(jobs) == 1:
# only one job in your queue; this must be it
job_tag = jobs[0].job_tag
elif len(jobs) > 1:
return ' '.join([
'To notify the system that you have completed a job,',
'text the job tag, space, and "%s".' % cmd_object.cmdspec.command
])
else:
job_tag = cmd_object.job_tag
if not job_belongs_to_courier(job_tag, dlg_context.courier.id, session, db_svc):
return 'Job with tag %s does not appear to be one of yours.' % job_tag
print('### performing ->complete status update for job %s...' % job_tag)
current_job_status = lookup_current_job_status(job_tag,
session,
db_svc)
if current_job_status.status == JOB_STATUS_COMPLETED:
return 'You have already reported that this job is complete. Thanks again!'
else:
current_job_status.expired_ts = current_time
session.add(current_job_status)
new_job_status = ObjectFactory.create_job_status(db_svc,
job_tag=job_tag,
status=JOB_STATUS_COMPLETED,
write_ts=current_time)
session.add(new_job_status)
session.flush()
return ' '.join([
'Recording job completion for job tag:',
'%s. Thank you!' % job_tag
])
except Exception as err:
session.rollback()
print(err)
traceback.print_exc(file=sys.stdout)
return 'There was an error updating the status of this job. Please contact your administrator.'
def handle_emergency(cmd_object, dlg_context, service_registry, **kwargs):
# TODO: add broadcast logic
courier_name = '%s %s' % (dlg_context.courier.first_name, dlg_context.courier.last_name)
return ' '.join([
"Reporting an emergency for courier %s" % courier_name,
"with mobile phone number %s." % dlg_context.source_number,
"To report a crime or a life-threatening emergency, ",
"please IMMEDIATELY call 911."
])
def handle_bidding_status_for_job(cmd_object, dlg_context, service_registry, **kwargs):
# TODO: return actual bidding status (is bidding open? closed? Has job been awarded? Accepted?)
if not cmd_object.job_tag:
return 'To see the bidding status of a job, text the job tag, space, and "bst".'
return "Placeholder for reporting bidding status of a job"
def generate_list_my_awarded_jobs(cmd_object, dlg_engine, dlg_context, service_registry, **kwargs):
'''List jobs which have been awarded to this courier in the bidding process
(but not yet accepted).
'''
db_svc = service_registry.lookup('postgres')
with db_svc.txn_scope() as session:
job_records = list_awarded_jobs(dlg_context.courier.id, session, db_svc)
if not len(job_records):
return 'Either you have accepted all your awarded jobs, or you have not been awarded any.'
responder = ListOutputResponder(cmd_object.cmdspec,
parse_sms_message_body,
single_item_noun='accepted job',
plural_item_noun='accepted jobs')
raw_jobs = [j.job_tag for j in job_records]
return responder.generate(command_object=cmd_object,
record_list=raw_jobs,
render_callback=render_job_line,
filter_callback=filter_job_tag,
dialog_context=dlg_context,
dialog_engine=dlg_engine,
service_registry=service_registry)
def generate_list_my_accepted_jobs(cmd_object, dlg_engine, dlg_context, service_registry, **kwargs):
job_records = []
db_svc = service_registry.lookup('postgres')
with db_svc.txn_scope() as session:
JobAssignment = db_svc.Base.classes.job_assignments
JobStatus = db_svc.Base.classes.job_status
# resultset = session.query(JobAssignment).filter(JobAssignment.courier_id == dlg_context.courier.id).all()
for ja, stat in session.query(JobAssignment,
JobStatus).filter(and_(JobStatus.job_tag == JobAssignment.job_tag,
JobStatus.status == JOB_STATUS_ACCEPTED,
JobStatus.expired_ts == None,
JobAssignment.courier_id == dlg_context.courier.id)).all():
job_records.append(stat)
if not len(job_records):
| |
its associated value indicating the destination
:type node: dict, None
:param node_id: The ID of the node in which the message will be published
:type node_id: str, None
:param node_url: The URL of the node in which the message will be published
.. note:: This argument is necessary in the absence of the ``node`` and ``node_id`` arguments.
:type node_url: str, None
:param canonical_url: The search engine-friendly URL to the message
:type canonical_url: str, None
:param context_id: Metadata on a message to identify the message with an external identifier of your choosing
:type context_id: str, None
:param context_url: Metadata on a message representing a URL to associate with the message (external identifier)
:type context_url: str, None
:param is_answer: Designates the message as an answer on a Q&A board
:type is_answer: bool, None
:param is_draft: Indicates whether or not the message is still a draft (i.e. unpublished)
:type is_draft: bool, None
:param read_only: Indicates whether or not the message should be read-only or have replies/comments blocked
:type read_only: bool, None
:param seo_title: The title of the message used for SEO purposes
:type seo_title: str, None
:param seo_description: A description of the message used for SEO purposes
:type seo_description: str, None
:param teaser: The message teaser (used with blog articles)
:type teaser: str, None
:param tags: The query to retrieve tags applied to the message
:type tags: dict, None
:param cover_image: The cover image set for the message
:type cover_image: dict, None
:param images: The query to retrieve images uploaded to the message
:type images: dict, None
:param labels: The query to retrieve labels applied to the message
:type labels: dict, None
:param product_category: The product category (i.e. container for ``products``) associated with the message
:type product_category: dict, None
:param products: The product in a product catalog associated with the message
:type products: dict, None
:param topic: The root message of the conversation in which the message appears
:type topic: dict, None
:param videos: The query to retrieve videos uploaded to the message
:type videos: dict, None
:param parent: The parent of the message
:type parent: str, None
:param status: The message status for messages where conversation.style is ``idea`` or ``contest``
.. caution:: This property is not returned if the message has the default ``Unspecified`` status
assigned. It will only be returned for ideas with a status of ``Completed`` or with a
custom status created in Community Admin.
:type status: dict, None
:param moderation_status: The moderation status of the message
.. note:: Acceptable values are ``unmoderated``, ``approved``, ``rejected``,
``marked_undecided``, ``marked_approved`` and ``marked_rejected``.
:type moderation_status: str, None
:param attachments_to_add: The full path(s) to one or more attachments (e.g. ``path/to/file1.pdf``) to be
added to the message
:type attachments_to_add: str, tuple, list, set, None
:param attachments_to_remove: One or more attachments to remove from the message
.. note:: Each attachment should specify the attachment id of the attachment to
remove, which begins with ``m#_``. (e.g. ``m283_file1.pdf``)
:type attachments_to_remove: str, tuple, list, set, None
:param overwrite_tags: Determines if tags should overwrite any existing tags (where applicable) or if the tags
should be appended to the existing tags (default)
:type overwrite_tags: bool
:param ignore_non_string_tags: Determines if non-strings (excluding iterables) should be ignored rather than
converted to strings (``False`` by default)
:type ignore_non_string_tags: bool
:param msg_id: Message ID of an existing message so that its existing tags can be retrieved (optional)
:type msg_id: str, int, None
:param khoros_object: The core :py:class:`khoros.Khoros` object
.. note:: The core object is only necessary when providing a Message ID as it will be
needed to retrieve the existing tags from the message.
:type khoros_object: class[khoros.Khoros], None
:param action: Defines if the payload will be used to ``create`` (default) or ``update`` a message
:type action: str
:returns: The properly formatted JSON payload
:raises: :py:exc:`TypeError`, :py:exc:`ValueError`, :py:exc:`khoros.errors.exceptions.MissingRequiredDataError`,
:py:exc:`khoros.errors.exceptions.DataMismatchError`
"""
# Define the default payload structure
payload = {
"data": {
"type": "message"
}
}
# Ensure the required fields are defined if creating a message
if action == 'create':
_verify_required_fields(node, node_id, node_url, subject)
# Define the destination
if action == 'create' or any((node, node_id, node_url)):
if not node:
if node_id:
node = {"id": f"{node_id}"}
else:
node = {"id": f"{nodes.get_node_id(url=node_url)}"}
payload['data']['board'] = node
# Add supplied data where appropriate if string or Boolean
supplied_data = {
'body': (body, str),
'subject': (subject, str),
'canonical_url': (canonical_url, str),
'context_id': (context_id, str),
'context_url': (context_url, str),
'is_answer': (is_answer, bool),
'is_draft': (is_draft, bool),
'read_only': (read_only, bool),
'seo_title': (seo_title, str),
'seo_description': (seo_description, str),
'teaser': (teaser, str)
}
for field_name, field_value in supplied_data.items():
if field_value[0]:
if field_value[1] == str:
payload['data'][field_name] = f"{field_value[0]}"
elif field_value[1] == bool:
bool_value = bool(field_value[0]) if isinstance(field_value[0], str) else field_value[0]
payload['data'][field_name] = bool_value
# Add moderation status to payload when applicable
payload = _add_moderation_status_to_payload(payload, moderation_status)
# Add tags to payload when applicable
if tags:
payload = _add_tags_to_payload(payload, tags, _khoros_object=khoros_object, _msg_id=msg_id,
_overwrite_tags=overwrite_tags, _ignore_non_strings=ignore_non_string_tags)
# TODO: Add functionality for remaining non-string and non-Boolean arguments
return payload
def update(khoros_object, msg_id=None, msg_url=None, subject=None, body=None, node=None, node_id=None, node_url=None,
canonical_url=None, context_id=None, context_url=None, cover_image=None, is_draft=None, labels=None,
moderation_status=None, parent=None, product_category=None, products=None, read_only=None, topic=None,
status=None, seo_title=None, seo_description=None, tags=None, overwrite_tags=False,
ignore_non_string_tags=False, teaser=None, attachments_to_add=None, attachments_to_remove=None,
full_response=None, return_id=None, return_url=None, return_api_url=None, return_http_code=None,
return_status=None, return_error_messages=None, split_errors=False, proxy_user_object=None):
"""This function updates one or more elements of an existing message.
.. versionchanged:: 4.4.0
Introduced the ``proxy_user_object`` parameter to allow messages to be updated on behalf of other users.
.. versionadded:: 2.8.0
:param khoros_object: The core :py:class:`khoros.Khoros` object
:type khoros_object: class[khoros.Khoros]
:param msg_id: The ID of the existing message
:type msg_id: str, int, None
:param msg_url: The URL of the existing message
:type msg_url: str, None
:param subject: The title or subject of the message
:type subject: str, None
:param body: The body of the message in HTML format
:type body: str, None
:param node: A dictionary containing the ``id`` key and its associated value indicating the destination
:type node: dict, None
:param node_id: The ID of the node in which the message will be published
:type node_id: str, None
:param node_url: The URL of the node in which the message will be published
.. note:: This argument is necessary in the absence of the ``node`` and ``node_id`` arguments.
:type node_url: str, None
:param canonical_url: The search engine-friendly URL to the message
:type canonical_url: str, None
:param context_id: Metadata on a message to identify the message with an external identifier of your choosing
:type context_id: str, None
:param context_url: Metadata on a message representing a URL to associate with the message (external identifier)
:type context_url: str, None
:param cover_image: The cover image set for the message
:type cover_image: dict, None
:param is_draft: Indicates whether or not the message is still a draft (i.e. unpublished)
:type is_draft: bool, None
:param labels: The query to retrieve labels applied to the message
:type labels: dict, None
:param moderation_status: The moderation status of the message
.. note:: Acceptable values are ``unmoderated``, ``approved``, ``rejected``,
``marked_undecided``, ``marked_approved`` and ``marked_rejected``.
:type moderation_status: str, None
:param parent: The parent of the message
:type parent: str, None
:param product_category: The product category (i.e. container for ``products``) associated with the message
:type product_category: dict, None
:param products: The product in a product catalog associated with the message
:type products: dict, None
:param read_only: Indicates whether or not the message should be read-only or have replies/comments blocked
:type read_only: bool, None
:param topic: The root message of the conversation in which the message appears
:type topic: dict, None
:param status: The message status for messages where conversation.style is ``idea`` or ``contest``
.. caution:: This property is not returned if the message has the default ``Unspecified`` status
assigned. It will only be returned for ideas with a status of Completed or with a
custom status created in Community Admin.
:type status: dict, None
:param seo_title: The title of the message used for SEO purposes
:type seo_title: str, None
:param seo_description: A | |
-4, -5): None,
(0, 34, -4, -4): None,
(0, 34, -4, -3): None,
(0, 34, -4, -2): None,
(0, 34, -4, -1): None,
(0, 34, -4, 0): None,
(0, 34, -4, 1): None,
(0, 34, -4, 2): None,
(0, 34, -4, 3): None,
(0, 34, -4, 4): None,
(0, 34, -4, 5): None,
(0, 34, -3, -5): None,
(0, 34, -3, -4): None,
(0, 34, -3, -3): None,
(0, 34, -3, -2): None,
(0, 34, -3, -1): None,
(0, 34, -3, 0): None,
(0, 34, -3, 1): None,
(0, 34, -3, 2): None,
(0, 34, -3, 3): None,
(0, 34, -3, 4): None,
(0, 34, -3, 5): None,
(0, 34, -2, -5): None,
(0, 34, -2, -4): None,
(0, 34, -2, -3): None,
(0, 34, -2, -2): None,
(0, 34, -2, -1): None,
(0, 34, -2, 0): None,
(0, 34, -2, 1): None,
(0, 34, -2, 2): None,
(0, 34, -2, 3): None,
(0, 34, -2, 4): None,
(0, 34, -2, 5): None,
(0, 34, -1, -5): None,
(0, 34, -1, -4): None,
(0, 34, -1, -3): None,
(0, 34, -1, -2): None,
(0, 34, -1, -1): None,
(0, 34, -1, 0): None,
(0, 34, -1, 1): None,
(0, 34, -1, 2): None,
(0, 34, -1, 3): None,
(0, 34, -1, 4): None,
(0, 34, -1, 5): None,
(0, 34, 0, -5): None,
(0, 34, 0, -4): None,
(0, 34, 0, -3): None,
(0, 34, 0, -2): None,
(0, 34, 0, -1): None,
(0, 34, 0, 0): None,
(0, 34, 0, 1): None,
(0, 34, 0, 2): None,
(0, 34, 0, 3): None,
(0, 34, 0, 4): None,
(0, 34, 0, 5): None,
(0, 34, 1, -5): None,
(0, 34, 1, -4): None,
(0, 34, 1, -3): None,
(0, 34, 1, -2): None,
(0, 34, 1, -1): None,
(0, 34, 1, 0): None,
(0, 34, 1, 1): None,
(0, 34, 1, 2): None,
(0, 34, 1, 3): None,
(0, 34, 1, 4): None,
(0, 34, 1, 5): None,
(0, 34, 2, -5): None,
(0, 34, 2, -4): None,
(0, 34, 2, -3): None,
(0, 34, 2, -2): None,
(0, 34, 2, -1): None,
(0, 34, 2, 0): None,
(0, 34, 2, 1): None,
(0, 34, 2, 2): None,
(0, 34, 2, 3): None,
(0, 34, 2, 4): None,
(0, 34, 2, 5): None,
(0, 34, 3, -5): None,
(0, 34, 3, -4): None,
(0, 34, 3, -3): None,
(0, 34, 3, -2): None,
(0, 34, 3, -1): None,
(0, 34, 3, 0): None,
(0, 34, 3, 1): None,
(0, 34, 3, 2): None,
(0, 34, 3, 3): None,
(0, 34, 3, 4): None,
(0, 34, 3, 5): None,
(0, 34, 4, -5): None,
(0, 34, 4, -4): None,
(0, 34, 4, -3): None,
(0, 34, 4, -2): None,
(0, 34, 4, -1): None,
(0, 34, 4, 0): None,
(0, 34, 4, 1): None,
(0, 34, 4, 2): None,
(0, 34, 4, 3): None,
(0, 34, 4, 4): None,
(0, 34, 4, 5): None,
(0, 34, 5, -5): None,
(0, 34, 5, -4): None,
(0, 34, 5, -3): None,
(0, 34, 5, -2): None,
(0, 34, 5, -1): None,
(0, 34, 5, 0): None,
(0, 34, 5, 1): None,
(0, 34, 5, 2): None,
(0, 34, 5, 3): None,
(0, 34, 5, 4): None,
(0, 34, 5, 5): None,
(0, 35, -5, -5): None,
(0, 35, -5, -4): None,
(0, 35, -5, -3): None,
(0, 35, -5, -2): None,
(0, 35, -5, -1): None,
(0, 35, -5, 0): None,
(0, 35, -5, 1): None,
(0, 35, -5, 2): None,
(0, 35, -5, 3): None,
(0, 35, -5, 4): None,
(0, 35, -5, 5): None,
(0, 35, -4, -5): None,
(0, 35, -4, -4): None,
(0, 35, -4, -3): None,
(0, 35, -4, -2): None,
(0, 35, -4, -1): None,
(0, 35, -4, 0): None,
(0, 35, -4, 1): None,
(0, 35, -4, 2): None,
(0, 35, -4, 3): None,
(0, 35, -4, 4): None,
(0, 35, -4, 5): None,
(0, 35, -3, -5): None,
(0, 35, -3, -4): None,
(0, 35, -3, -3): None,
(0, 35, -3, -2): None,
(0, 35, -3, -1): None,
(0, 35, -3, 0): None,
(0, 35, -3, 1): None,
(0, 35, -3, 2): None,
(0, 35, -3, 3): None,
(0, 35, -3, 4): None,
(0, 35, -3, 5): None,
(0, 35, -2, -5): None,
(0, 35, -2, -4): None,
(0, 35, -2, -3): None,
(0, 35, -2, -2): None,
(0, 35, -2, -1): None,
(0, 35, -2, 0): None,
(0, 35, -2, 1): None,
(0, 35, -2, 2): None,
(0, 35, -2, 3): None,
(0, 35, -2, 4): None,
(0, 35, -2, 5): None,
(0, 35, -1, -5): None,
(0, 35, -1, -4): None,
(0, 35, -1, -3): None,
(0, 35, -1, -2): None,
(0, 35, -1, -1): None,
(0, 35, -1, 0): None,
(0, 35, -1, 1): None,
(0, 35, -1, 2): None,
(0, 35, -1, 3): None,
(0, 35, -1, 4): None,
(0, 35, -1, 5): None,
(0, 35, 0, -5): None,
(0, 35, 0, -4): None,
(0, 35, 0, -3): None,
(0, 35, 0, -2): None,
(0, 35, 0, -1): None,
(0, 35, 0, 0): None,
(0, 35, 0, 1): None,
(0, 35, 0, 2): None,
(0, 35, 0, 3): None,
(0, 35, 0, 4): None,
(0, 35, 0, 5): None,
(0, 35, 1, -5): None,
(0, 35, 1, -4): None,
(0, 35, 1, -3): None,
(0, 35, 1, -2): None,
(0, 35, 1, -1): None,
(0, 35, 1, 0): None,
(0, 35, 1, 1): None,
(0, 35, 1, 2): None,
(0, 35, 1, 3): None,
(0, 35, 1, 4): None,
(0, 35, 1, 5): None,
(0, 35, 2, -5): None,
(0, 35, 2, -4): None,
(0, 35, 2, -3): None,
(0, 35, 2, -2): None,
(0, 35, 2, -1): None,
(0, 35, 2, 0): None,
(0, 35, 2, 1): None,
(0, 35, 2, 2): None,
(0, 35, 2, 3): None,
(0, 35, 2, 4): None,
(0, 35, 2, 5): None,
(0, 35, 3, -5): None,
(0, 35, 3, -4): None,
(0, 35, 3, -3): None,
(0, 35, 3, -2): None,
(0, 35, 3, -1): None,
(0, 35, 3, 0): None,
(0, 35, 3, 1): None,
(0, 35, 3, 2): None,
(0, 35, 3, 3): None,
(0, 35, 3, 4): None,
(0, 35, 3, 5): None,
(0, 35, 4, -5): None,
(0, 35, 4, -4): None,
(0, 35, 4, -3): None,
(0, 35, 4, -2): None,
(0, 35, 4, -1): None,
(0, 35, 4, 0): None,
(0, 35, 4, 1): None,
(0, 35, 4, 2): None,
(0, 35, 4, 3): None,
(0, 35, 4, 4): None,
(0, 35, 4, 5): None,
(0, 35, 5, -5): None,
(0, 35, 5, -4): None,
(0, 35, 5, -3): None,
(0, 35, 5, -2): None,
(0, 35, 5, -1): None,
(0, 35, 5, 0): None,
(0, 35, 5, 1): None,
(0, 35, 5, 2): None,
(0, 35, 5, 3): None,
(0, 35, 5, 4): None,
(0, 35, 5, 5): None,
(1, 32, -5, -5): None,
(1, 32, -5, -4): None,
(1, 32, -5, -3): None,
(1, 32, -5, -2): None,
(1, 32, -5, -1): None,
(1, 32, -5, 0): None,
(1, 32, -5, 1): None,
(1, 32, -5, 2): None,
(1, 32, -5, 3): None,
(1, 32, -5, 4): None,
(1, 32, -5, 5): None,
(1, 32, -4, -5): None,
(1, 32, -4, -4): None,
(1, 32, -4, -3): None,
(1, 32, -4, -2): None,
(1, 32, -4, -1): None,
(1, 32, -4, 0): None,
(1, 32, -4, 1): None,
(1, 32, -4, 2): None,
(1, 32, -4, 3): None,
(1, 32, -4, 4): None,
(1, 32, -4, 5): None,
(1, 32, -3, -5): None,
(1, 32, -3, -4): None,
(1, 32, -3, -3): None,
(1, 32, | |
from numpy.testing import assert_array_equal
import numpy as np
import tempfile
import secrets
import h5py
from hdf5zarr import HDF5Zarr
import pytest
import itertools
class HDF5ZarrBase(object):
##########################################
# basic tests #
##########################################
@pytest.mark.parametrize('visit_type', [None], ids=[""])
def test_consolidate_metadata(self):
zgroup = self.hdf5zarr.consolidate_metadata()
def test_groups(self):
""" test if group exists """
def _test_groups(name, hobj_info):
if hobj_info.type == h5py.h5o.TYPE_GROUP:
self.zgroup[name]
self.zgroup[name.decode('utf-8')]
h5py.h5o.visit(self.hfile.id, _test_groups, info=True)
def test_dsets(self):
""" test if dataset exists """
def _test_dsets(name, hobj_info):
if hobj_info.type == h5py.h5o.TYPE_DATASET:
self.zgroup[name]
self.zgroup[name.decode('utf-8')]
h5py.h5o.visit(self.hfile.id, _test_dsets, info=True)
##########################################
# dataset properties tests #
##########################################
def test_dset_properties_dtype(self):
""" test if dataset dtypes are equal """
def _test_dtype(zobj, hobj, hobj_info):
if hobj_info.type == h5py.h5o.TYPE_DATASET:
assert zobj.dtype == hobj.dtype
self._visit_item(_test_dtype)
def test_dset_properties_shape(self):
""" test if dataset shapes are equal """
def _test_shape(zobj, hobj, hobj_info):
if hobj_info.type == h5py.h5o.TYPE_DATASET:
assert zobj.shape == hobj.shape
self._visit_item(_test_shape)
def test_dset_properties_chunks(self):
""" test if datasets properties are equal """
def _test_chunks(zobj, hobj, hobj_info):
if hobj_info.type == h5py.h5o.TYPE_DATASET:
if hobj.chunks is None:
chunks = tuple(s if s != 0 else 1 for s in hobj.shape)
else:
chunks = hobj.chunks
assert zobj.chunks == chunks
self._visit_item(_test_chunks)
def test_dset_properties_fillvalue(self):
""" test if datasets properties are equal """
def _test_fillvalue(zobj, hobj, hobj_info):
if hobj_info.type == h5py.h5o.TYPE_DATASET:
assert_array_equal(zobj.fill_value, hobj.fillvalue)
self._visit_item(_test_fillvalue)
##########################################
# dataset read tests #
##########################################
def test_zarray_read(self):
""" test if zarr arrays are read """
def _test_dsets_read(zobj, hobj, hobj_info):
if hobj_info.type == h5py.h5o.TYPE_DATASET:
zval = zobj[()]
self._visit_item(_test_dsets_read)
def test_dset_val(self):
""" test if zarr arrays and datasets are equal """
def _test_dset_val(zobj, hobj, hobj_info):
if hobj_info.type == h5py.h5o.TYPE_DATASET:
hval = hobj[()]
zval = zobj[()]
assert_array_equal(hval, zval)
self._visit_item(_test_dset_val)
def test_attrs(self):
""" test if attributes exist """
def _test_attr(zobj, hobj, hobj_info):
for name in hobj.attrs:
zattr = zobj.attrs[name]
self._visit_item(_test_attr)
def test_read_attrs(self):
""" test if attributes are equal """
def _test_read_attrs(zobj, hobj, hobj_info):
for name in hobj.attrs:
hattr = hobj.attrs[name]
zattr = zobj.attrs[name]
assert_array_equal(zattr, hattr)
self._visit_item(_test_read_attrs)
@pytest.fixture(autouse=True)
def visit_files(self, request):
# file number
fnum = request.node.callspec.params['fnum']
self.hfile, self.hdf5zarr = self.file_list[fnum], self.hdf5zarr_list[fnum]
self.zgroup = self.hdf5zarr.zgroup
# visit hdf5 items
# visit types, objects or links
@pytest.fixture(autouse=True, params=["objects_only", "links_only"])
def visit_type(self, request):
self.visittype = request.param
# collect flag
self.fkeep = request.config.getoption("fkeep")
self._ex = []
# visit objects
if self.visittype == "objects_only":
self._visit_item = self.visit_obj_func
elif self.visittype == "links_only":
self._visit_item = self.visit_link_func
elif self.visittype is None:
pass
else:
raise Exception("Invalid visit_type parameter")
yield
if request.node.rep_setup.passed:
if request.node.rep_call.failed:
if self.fkeep and len(self._ex) > 0:
ex, name = self._ex[0]
hobj = self.hfile[name]
else:
hobj = self.hobj
print(f"""HDF5Zarr args: (
filename = '{self.hdf5zarr.filename}',
store = {self.hdf5zarr.store},
store_mode = {self.hdf5zarr.store_mode},
max_chunksize = {self.hdf5zarr.max_chunksize},
)""")
print("executing test failed for", request.node.name, hobj.file.filename)
if hobj.file.filename != self._testfilename and isinstance(hobj, h5py.Dataset):
print(f"""hdf5 Dataset: (
name = '{hobj.name}',
shape = {hobj.shape},
dtype = {hobj.dtype},
chunks = {hobj.chunks},
maxshape = {hobj.maxshape},
track_times = None,
track_order = None,
fillvalue = {hobj.fillvalue},
data = {hobj[()]},
)""")
if self.fkeep and len(self._ex) > 1:
ex_m, name_m = self._ex[np.argmin([self.hfile[name].size for ex, name in _ex])]
if name_m != name:
hobj = self.hfile[name_m]
print("executing test failed for", request.node.name, hobj.file.filename)
if hobj.file.filename != self._testfilename and isinstance(hobj, h5py.Dataset):
print(f"""(
name = '{hobj.name}',
shape = {hobj.shape},
dtype = {hobj.dtype},
chunks = {hobj.chunks},
maxshape = {hobj.maxshape},
track_times = None,
track_order = None,
fillvalue = {hobj.fillvalue},
data = {hobj[()]},
)""")
def visit_obj_func(self, assert_func):
# visit objects
_ex = []
def _test_obj(name, hobj_info):
nonlocal _ex
self.hobj = self.hfile[name]
self.zobj = self.zgroup[name.decode('utf-8')]
if not self.fkeep:
assert_func(self.zobj, self.hobj, hobj_info)
else:
try:
assert_func(self.zobj, self.hobj, hobj_info)
except AssertionError as ex:
_ex.append([ex, name])
h5py.h5o.visit(self.hfile.id, _test_obj, info=True)
self._ex = _ex
# raise only one exception in case of fkeep == True
if self.fkeep and len(self._ex) > 0:
raise self._ex[0][0]
def visit_link_func(self, assert_func):
# visit links
_ex = []
def _test_obj(name, hlink_info):
nonlocal _ex
self.hobj = self.hfile[name]
self.zobj = self.zgroup[name.decode('utf-8')]
hobj_info = h5py.h5g.get_objinfo(self.hobj.id)
if hlink_info.type == h5py.h5l.TYPE_SOFT:
if not self.fkeep:
assert_func(self.zobj, self.hobj, hobj_info)
else:
try:
assert_func(self.zobj, self.hobj, hobj_info)
except AssertionError as ex:
_ex.append([ex, name])
else:
# TO DO
pass
self.hfile.id.links.visit(_test_obj, info=True)
self._ex = _ex
# raise only one exception in case of fkeep == True
if self.fkeep and len(self._ex) > 0:
raise self._ex[0][0]
class TestHDF5Zarr(HDF5ZarrBase):
""" Comparing HDF5Zarr read with h5py """
@classmethod
def setup_class(cls):
# list of numpy dtypes up to 8 bytes
cls.attribute_dtypes = list(set(np.typeDict.values()) -
set([np.void, np.str_, np.bytes_, np.object_, np.timedelta64,
np.complex64, np.complex256, np.float128, np.complex128,
np.datetime64]))
cls.dset_dtypes = cls.attribute_dtypes
cls.depth = 4 # nested groups depth
# all n_* are per group or per object
cls.n_dsets = 4 # number of regular (or scalar) datasets without object references or struct array dtypes in each group
cls.n_groups = 3 # number of groups in each group
cls.n_groupsoftlink = 1 # number of soft links to another group in each group
cls.n_grouphardlink = 1 # TO DO number of hard links to another group in each group
cls.n_dsetsoftlink = 1 # number of soft links to another dataset in each group
cls.n_dsethardlink = 1 # TO DO number of hard links to another dataset in each group
cls.n_objectrefdset = 1 # TO DO number of object reference datasets in each group
cls.n_structarraywithobjectrefdset = 1 # TO DO number of struct array datasets containing object ref dtype in each group
cls.n_structarrayobjectrefdtype = 1 # TO DO number of object ref dtypes if used in a struct array
cls.n_structarrayregulardset = 1 # TO DO number of struct array datasets without object refernce dtype in each group
cls.n_structarraydtypelen = 4 # TO DO length of struct array dtypes in datasets
cls.n_attributes_min = 5 # min number of attributes for each object
cls.n_nulldsets_infile = 1 # TO DO number of null datasets in file
cls.srand = secrets.SystemRandom()
if cls.hdf5files_option:
cls.file_list = [h5py.File(i, 'r') for i in cls.hdf5file_names]
else:
cls.file_list = [cls._create_file(i) for i in cls.hdf5file_names]
cls.hdf5zarr_list = [HDF5Zarr(f.filename, max_chunksize=None) for f in cls.file_list]
# prepend _testfile if hdf5files are not specified
if not cls.hdf5files_option:
cls.file_list.insert(0, cls._testfile())
cls.hdf5zarr_list.insert(0, HDF5Zarr(cls.file_list[0].filename, max_chunksize=None))
# track which temporary files are already saved.
# if hdf5files_option is passed, mark them as already saved
num_files = len(cls.file_list)
cls.fnum_keep = {i: cls.hdf5files_option for i in range(1, num_files)}
# do not save "_testfile"
cls.fnum_keep[0] = True
if not cls.disable_max_chunksize:
cls.file_list = cls.file_list*3
cls.hdf5zarr_list += [None]*num_files*2
for i in range(num_files, num_files*2):
print(i)
cls.hdf5zarr_list[i] = HDF5Zarr(cls.file_list[i].filename, max_chunksize=1000)
for i in range(num_files*2, num_files*3):
cls.hdf5zarr_list[i] = HDF5Zarr(cls.file_list[i].filename, max_chunksize=2**cls.srand.randint(10, 20))
@classmethod
def teardown_class(cls):
for f in cls.file_list:
f.delete = True
f.close()
@classmethod
def _create_file(cls, name):
""" create test hdf5 file """
srand = cls.srand
# create hdf5 file
cls.temp_file = tempfile.NamedTemporaryFile(suffix=".hdf5", prefix=name, delete=False)
cls.temp_file.close()
hfile = h5py.File(cls.temp_file.name, 'w')
# create nested groups
groupnames_prefix = [chr(65+i)for i in range(cls.n_groups)] # e.g. ['A', 'B', 'C']
group_list = [hfile] # list containing all groups
def _create_groups(obj, d):
nonlocal group_list
for c in groupnames_prefix:
g_name = c + str(cls.depth - d)
g = obj.create_group(g_name)
group_list.append(g)
if d > 0:
_create_groups(obj[g_name], d-1)
_create_groups(hfile, cls.depth)
# create softlinks to groups
for g in group_list:
for i in range(cls.n_groupsoftlink):
# do not use rand_rng.choice
target_str = srand.choice(group_list).name
g[f"SoftLg{i}"] = h5py.SoftLink(target_str)
# create datasets
# TO DO, external dsets
# TO DO, compression
srand.shuffle(cls.dset_dtypes)
iter_dtypes = itertools.cycle(cls.dset_dtypes) # shuffle dtypes to cycle over when creating dsets
iter_chunks = itertools.cycle([True, None]) # True or False cycle for auto chunking
iter_track_times = itertools.cycle([False, True]) # True or False cycle for track_times
iter_track_order = itertools.cycle([False, False, True, True]) # True or False cycle for track_order
iter_fillvalue = itertools.cycle([None, True, True, None]) # True or False cycle for track_order
rand_rng = np.random.default_rng()
dset_list = []
for g in group_list:
# TO DO, add test with datasets with zero in dimensions
for i in range(cls.n_dsets):
shape = srand.choices(range(1, 90//(i or 1)), k=i) # dseti has i dimensions
size = np.prod(shape)
dtype = next(iter_dtypes)
if dtype == np.bool_:
data = np.frombuffer(rand_rng.bytes(size*8), dtype=np.int64) > 0
elif dtype == np.datetime64:
data = np.datetime64('1970-01-01T00:00:00', 'ns') + np.frombuffer(rand_rng.bytes(size*8), dtype=np.uint64)
dtype = h5py.opaque_dtype(data.dtype)
data = data.astype(dtype)
else:
data = np.frombuffer(rand_rng.bytes(size*np.dtype(dtype).itemsize), dtype=dtype)
# create_dataset options comptability
if len(shape) > 0:
chunks = next(iter_chunks)
else:
chunks = | |
#! /usr/bin/env python
"""Generate C code from an ASDL description."""
import os, sys
import asdl
TABSIZE = 4
MAX_COL = 80
def get_c_type(name):
"""Return a string for the C name of the type.
This function special cases the default types provided by asdl.
"""
if name in asdl.builtin_types:
return name
else:
return "%s_ty" % name
def reflow_lines(s, depth):
"""Reflow the line s indented depth tabs.
Return a sequence of lines where no line extends beyond MAX_COL
when properly indented. The first line is properly indented based
exclusively on depth * TABSIZE. All following lines -- these are
the reflowed lines generated by this function -- start at the same
column as the first character beyond the opening { in the first
line.
"""
size = MAX_COL - depth * TABSIZE
if len(s) < size:
return [s]
lines = []
cur = s
padding = ""
while len(cur) > size:
i = cur.rfind(' ', 0, size)
# XXX this should be fixed for real
if i == -1 and 'GeneratorExp' in cur:
i = size + 3
assert i != -1, "Impossible line %d to reflow: %r" % (size, s)
lines.append(padding + cur[:i])
if len(lines) == 1:
# find new size based on brace
j = cur.find('{', 0, i)
if j >= 0:
j += 2 # account for the brace and the space after it
size -= j
padding = " " * j
else:
j = cur.find('(', 0, i)
if j >= 0:
j += 1 # account for the paren (no space after it)
size -= j
padding = " " * j
cur = cur[i+1:]
else:
lines.append(padding + cur)
return lines
def is_simple(sum):
"""Return True if a sum is a simple.
A sum is simple if its types have no fields, e.g.
unaryop = Invert | Not | UAdd | USub
"""
for t in sum.types:
if t.fields:
return False
return True
class EmitVisitor(asdl.VisitorBase):
"""Visit that emits lines"""
def __init__(self, file):
self.file = file
self.identifiers = set()
super(EmitVisitor, self).__init__()
def emit_identifier(self, name):
name = str(name)
if name in self.identifiers:
return
self.emit("_Py_IDENTIFIER(%s);" % name, 0)
self.identifiers.add(name)
def emit(self, s, depth, reflow=True):
# XXX reflow long lines?
if reflow:
lines = reflow_lines(s, depth)
else:
lines = [s]
for line in lines:
if line:
line = (" " * TABSIZE * depth) + line
self.file.write(line + "\n")
class TypeDefVisitor(EmitVisitor):
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if is_simple(sum):
self.simple_sum(sum, name, depth)
else:
self.sum_with_constructors(sum, name, depth)
def simple_sum(self, sum, name, depth):
enum = []
for i in range(len(sum.types)):
type = sum.types[i]
enum.append("%s=%d" % (type.name, i + 1))
enums = ", ".join(enum)
ctype = get_c_type(name)
s = "typedef enum _%s { %s } %s;" % (name, enums, ctype)
self.emit(s, depth)
self.emit("", depth)
def sum_with_constructors(self, sum, name, depth):
ctype = get_c_type(name)
s = "typedef struct _%(name)s *%(ctype)s;" % locals()
self.emit(s, depth)
self.emit("", depth)
def visitProduct(self, product, name, depth):
ctype = get_c_type(name)
s = "typedef struct _%(name)s *%(ctype)s;" % locals()
self.emit(s, depth)
self.emit("", depth)
class StructVisitor(EmitVisitor):
"""Visitor to generate typedefs for AST."""
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if not is_simple(sum):
self.sum_with_constructors(sum, name, depth)
def sum_with_constructors(self, sum, name, depth):
def emit(s, depth=depth):
self.emit(s % sys._getframe(1).f_locals, depth)
enum = []
for i in range(len(sum.types)):
type = sum.types[i]
enum.append("%s_kind=%d" % (type.name, i + 1))
emit("enum _%(name)s_kind {" + ", ".join(enum) + "};")
emit("struct _%(name)s {")
emit("enum _%(name)s_kind kind;", depth + 1)
emit("union {", depth + 1)
for t in sum.types:
self.visit(t, depth + 2)
emit("} v;", depth + 1)
for field in sum.attributes:
# rudimentary attribute handling
type = str(field.type)
assert type in asdl.builtin_types, type
emit("%s %s;" % (type, field.name), depth + 1);
emit("};")
emit("")
def visitConstructor(self, cons, depth):
if cons.fields:
self.emit("struct {", depth)
for f in cons.fields:
self.visit(f, depth + 1)
self.emit("} %s;" % cons.name, depth)
self.emit("", depth)
def visitField(self, field, depth):
# XXX need to lookup field.type, because it might be something
# like a builtin...
ctype = get_c_type(field.type)
name = field.name
if field.seq:
if field.type == 'cmpop':
self.emit("asdl_int_seq *%(name)s;" % locals(), depth)
else:
self.emit("asdl_seq *%(name)s;" % locals(), depth)
else:
self.emit("%(ctype)s %(name)s;" % locals(), depth)
def visitProduct(self, product, name, depth):
self.emit("struct _%(name)s {" % locals(), depth)
for f in product.fields:
self.visit(f, depth + 1)
for field in product.attributes:
# rudimentary attribute handling
type = str(field.type)
assert type in asdl.builtin_types, type
self.emit("%s %s;" % (type, field.name), depth + 1);
self.emit("};", depth)
self.emit("", depth)
class PrototypeVisitor(EmitVisitor):
"""Generate function prototypes for the .h file"""
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, type.name)
def visitSum(self, sum, name):
if is_simple(sum):
pass # XXX
else:
for t in sum.types:
self.visit(t, name, sum.attributes)
def get_args(self, fields):
"""Return list of C argument into, one for each field.
Argument info is 3-tuple of a C type, variable name, and flag
that is true if type can be NULL.
"""
args = []
unnamed = {}
for f in fields:
if f.name is None:
name = f.type
c = unnamed[name] = unnamed.get(name, 0) + 1
if c > 1:
name = "name%d" % (c - 1)
else:
name = f.name
# XXX should extend get_c_type() to handle this
if f.seq:
if f.type == 'cmpop':
ctype = "asdl_int_seq *"
else:
ctype = "asdl_seq *"
else:
ctype = get_c_type(f.type)
args.append((ctype, name, f.opt or f.seq))
return args
def visitConstructor(self, cons, type, attrs):
args = self.get_args(cons.fields)
attrs = self.get_args(attrs)
ctype = get_c_type(type)
self.emit_function(cons.name, ctype, args, attrs)
def emit_function(self, name, ctype, args, attrs, union=True):
args = args + attrs
if args:
argstr = ", ".join(["%s %s" % (atype, aname)
for atype, aname, opt in args])
argstr += ", PyArena *arena"
else:
argstr = "PyArena *arena"
margs = "a0"
for i in range(1, len(args)+1):
margs += ", a%d" % i
self.emit("#define %s(%s) _Py_%s(%s)" % (name, margs, name, margs), 0,
reflow=False)
self.emit("%s _Py_%s(%s);" % (ctype, name, argstr), False)
def visitProduct(self, prod, name):
self.emit_function(name, get_c_type(name),
self.get_args(prod.fields),
self.get_args(prod.attributes),
union=False)
class FunctionVisitor(PrototypeVisitor):
"""Visitor to generate constructor functions for AST."""
def emit_function(self, name, ctype, args, attrs, union=True):
def emit(s, depth=0, reflow=True):
self.emit(s, depth, reflow)
argstr = ", ".join(["%s %s" % (atype, aname)
for atype, aname, opt in args + attrs])
if argstr:
argstr += ", PyArena *arena"
else:
argstr = "PyArena *arena"
self.emit("%s" % ctype, 0)
emit("%s(%s)" % (name, argstr))
emit("{")
emit("%s p;" % ctype, 1)
for argtype, argname, opt in args:
if not opt and argtype != "int":
emit("if (!%s) {" % argname, 1)
emit("PyErr_SetString(PyExc_ValueError,", 2)
msg = "field %s is required for %s" % (argname, name)
emit(' "%s");' % msg,
2, reflow=False)
emit('return NULL;', 2)
emit('}', 1)
emit("p = (%s)PyArena_Malloc(arena, sizeof(*p));" % ctype, 1);
emit("if (!p)", 1)
emit("return NULL;", 2)
if union:
self.emit_body_union(name, args, attrs)
else:
self.emit_body_struct(name, args, attrs)
emit("return p;", 1)
emit("}")
emit("")
def emit_body_union(self, name, args, attrs):
def emit(s, depth=0, reflow=True):
self.emit(s, depth, reflow)
emit("p->kind = %s_kind;" % name, 1)
for argtype, argname, opt in args:
emit("p->v.%s.%s = %s;" % (name, argname, argname), 1)
for argtype, argname, opt in attrs:
emit("p->%s = %s;" % (argname, argname), 1)
def emit_body_struct(self, name, args, attrs):
def emit(s, depth=0, reflow=True):
self.emit(s, depth, reflow)
for argtype, argname, opt in args:
emit("p->%s = %s;" % (argname, argname), 1)
for argtype, argname, opt in attrs:
emit("p->%s = %s;" % (argname, argname), 1)
class PickleVisitor(EmitVisitor):
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, type.name)
def visitSum(self, sum, name):
pass
def visitProduct(self, sum, name):
pass
def visitConstructor(self, cons, name):
pass
def visitField(self, sum):
pass
class Obj2ModPrototypeVisitor(PickleVisitor):
def visitProduct(self, prod, name):
code = "static int obj2ast_%s(PyObject* obj, %s* out, PyArena* arena);"
self.emit(code % (name, get_c_type(name)), 0)
visitSum = visitProduct
class Obj2ModVisitor(PickleVisitor):
def funcHeader(self, name):
ctype = get_c_type(name)
self.emit("int", 0)
self.emit("obj2ast_%s(PyObject* obj, %s* out, PyArena* arena)" % (name, ctype), 0)
self.emit("{", 0)
self.emit("int isinstance;", 1)
self.emit("", 0)
def sumTrailer(self, name, add_label=False):
self.emit("", | |
<filename>selfdrive/controls/lib/adaptivecruise.py
import math
import numpy as np
from common.numpy_fast import clip, interp
import selfdrive.messaging as messaging
# lookup tables VS speed to determine min and max accels in cruise
_A_CRUISE_MIN_V = [-1.0, -.8, -.67, -.5, -.30]
_A_CRUISE_MIN_BP = [ 0., 5., 10., 20., 40.]
# need fast accel at very low speed for stop and go
_A_CRUISE_MAX_V = [1., 1., .8, .5, .30]
_A_CRUISE_MAX_BP = [0., 5., 10., 20., 40.]
def calc_cruise_accel_limits(v_ego):
a_cruise_min = interp(v_ego, _A_CRUISE_MIN_BP, _A_CRUISE_MIN_V)
a_cruise_max = interp(v_ego, _A_CRUISE_MAX_BP, _A_CRUISE_MAX_V)
return np.vstack([a_cruise_min, a_cruise_max])
_A_TOTAL_MAX_V = [1.5, 1.9, 3.2]
_A_TOTAL_MAX_BP = [0., 20., 40.]
def limit_accel_in_turns(v_ego, angle_steers, a_target, a_pcm, CP):
#*** this function returns a limited long acceleration allowed, depending on the existing lateral acceleration
# this should avoid accelerating when losing the target in turns
deg_to_rad = np.pi / 180. # from can reading to rad
a_total_max = interp(v_ego, _A_TOTAL_MAX_BP, _A_TOTAL_MAX_V)
a_y = v_ego**2 * angle_steers * deg_to_rad / (CP.steerRatio * CP.wheelBase)
a_x_allowed = math.sqrt(max(a_total_max**2 - a_y**2, 0.))
a_target[1] = min(a_target[1], a_x_allowed)
a_pcm = min(a_pcm, a_x_allowed)
return a_target, a_pcm
def process_a_lead(a_lead):
# soft threshold of 0.5m/s^2 applied to a_lead to reject noise, also not considered positive a_lead
a_lead_threshold = 0.5
a_lead = min(a_lead + a_lead_threshold, 0)
return a_lead
def calc_desired_distance(v_lead):
#*** compute desired distance ***
t_gap = 1.7 # good to be far away
d_offset = 4 # distance when at zero speed
return d_offset + v_lead * t_gap
#linear slope
_L_SLOPE_V = [0.40, 0.10]
_L_SLOPE_BP = [0., 40]
# parabola slope
_P_SLOPE_V = [1.0, 0.25]
_P_SLOPE_BP = [0., 40]
def calc_desired_speed(d_lead, d_des, v_lead, a_lead):
#*** compute desired speed ***
# the desired speed curve is divided in 4 portions:
# 1-constant
# 2-linear to regain distance
# 3-linear to shorten distance
# 4-parabolic (constant decel)
max_runaway_speed = -2. # no slower than 2m/s over the lead
# interpolate the lookups to find the slopes for a give lead speed
l_slope = interp(v_lead, _L_SLOPE_BP, _L_SLOPE_V)
p_slope = interp(v_lead, _P_SLOPE_BP, _P_SLOPE_V)
# this is where parabola and linear curves are tangents
x_linear_to_parabola = p_slope / l_slope**2
# parabola offset to have the parabola being tangent to the linear curve
x_parabola_offset = p_slope / (2 * l_slope**2)
if d_lead < d_des:
# calculate v_rel_des on the line that connects 0m at max_runaway_speed to d_des
v_rel_des_1 = (- max_runaway_speed) / d_des * (d_lead - d_des)
# calculate v_rel_des on one third of the linear slope
v_rel_des_2 = (d_lead - d_des) * l_slope / 3.
# take the min of the 2 above
v_rel_des = min(v_rel_des_1, v_rel_des_2)
v_rel_des = max(v_rel_des, max_runaway_speed)
elif d_lead < d_des + x_linear_to_parabola:
v_rel_des = (d_lead - d_des) * l_slope
v_rel_des = max(v_rel_des, max_runaway_speed)
else:
v_rel_des = math.sqrt(2 * (d_lead - d_des - x_parabola_offset) * p_slope)
# compute desired speed
v_target = v_rel_des + v_lead
# compute v_coast: above this speed we want to coast
t_lookahead = 1. # how far in time we consider a_lead to anticipate the coast region
v_coast_shift = max(a_lead * t_lookahead, - v_lead) # don't consider projections that would make v_lead<0
v_coast = (v_lead + v_target)/2 + v_coast_shift # no accel allowed above this line
v_coast = min(v_coast, v_target)
return v_target, v_coast
def calc_critical_decel(d_lead, v_rel, d_offset, v_offset):
# this function computes the required decel to avoid crashing, given safety offsets
a_critical = - max(0., v_rel + v_offset)**2/max(2*(d_lead - d_offset), 0.5)
return a_critical
# maximum acceleration adjustment
_A_CORR_BY_SPEED_V = [0.4, 0.4, 0]
# speeds
_A_CORR_BY_SPEED_BP = [0., 5., 20.]
def calc_positive_accel_limit(d_lead, d_des, v_ego, v_rel, v_ref, v_rel_ref, v_coast, v_target, a_lead_contr, a_max):
a_coast_min = -1.0 # never coast faster then -1m/s^2
# coasting behavior above v_coast. Forcing a_max to be negative will force the pid_speed to decrease,
# regardless v_target
if v_ref > min(v_coast, v_target):
# for smooth coast we can be agrressive and target a point where car would actually crash
v_offset_coast = 0.
d_offset_coast = d_des/2. - 4.
# acceleration value to smoothly coast until we hit v_target
if d_lead > d_offset_coast + 0.1:
a_coast = calc_critical_decel(d_lead, v_rel_ref, d_offset_coast, v_offset_coast)
# if lead is decelerating, then offset the coast decel
a_coast += a_lead_contr
a_max = max(a_coast, a_coast_min)
else:
a_max = a_coast_min
else:
# same as cruise accel, but add a small correction based on lead acceleration at low speeds
# when lead car accelerates faster, we can do the same, and vice versa
a_max = a_max + interp(v_ego, _A_CORR_BY_SPEED_BP, _A_CORR_BY_SPEED_V) \
* clip(-v_rel / 4., -.5, 1)
return a_max
# arbitrary limits to avoid too high accel being computed
_A_SAT = [-10., 5.]
# do not consider a_lead at 0m/s, fully consider it at 10m/s
_A_LEAD_LOW_SPEED_V = [0., 1.]
# speed break points
_A_LEAD_LOW_SPEED_BP = [0., 10.]
# add a small offset to the desired decel, just for safety margin
_DECEL_OFFSET_V = [-0.3, -0.5, -0.5, -0.4, -0.3]
# speed bp: different offset based on the likelyhood that lead decels abruptly
_DECEL_OFFSET_BP = [0., 4., 15., 30, 40.]
def calc_acc_accel_limits(d_lead, d_des, v_ego, v_pid, v_lead, v_rel, a_lead,
v_target, v_coast, a_target, a_pcm):
#*** compute max accel ***
# v_rel is now your velocity in lead car frame
v_rel = -v_rel # this simplifiess things when thinking in d_rel-v_rel diagram
v_rel_pid = v_pid - v_lead
# this is how much lead accel we consider in assigning the desired decel
a_lead_contr = a_lead * interp(v_lead, _A_LEAD_LOW_SPEED_BP,
_A_LEAD_LOW_SPEED_V) * 0.8
# first call of calc_positive_accel_limit is used to shape v_pid
a_target[1] = calc_positive_accel_limit(d_lead, d_des, v_ego, v_rel, v_pid,
v_rel_pid, v_coast, v_target,
a_lead_contr, a_target[1])
# second call of calc_positive_accel_limit is used to limit the pcm throttle
# control (only useful when we don't control throttle directly)
a_pcm = calc_positive_accel_limit(d_lead, d_des, v_ego, v_rel, v_ego,
v_rel, v_coast, v_target,
a_lead_contr, a_pcm)
#*** compute max decel ***
v_offset = 1. # assume the car is 1m/s slower
d_offset = 1. # assume the distance is 1m lower
if v_target - v_ego > 0.5:
pass # acc target speed is above vehicle speed, so we can use the cruise limits
elif d_lead > d_offset + 0.01: # add small value to avoid by zero divisions
# compute needed accel to get to 1m distance with -1m/s rel speed
decel_offset = interp(v_lead, _DECEL_OFFSET_BP, _DECEL_OFFSET_V)
critical_decel = calc_critical_decel(d_lead, v_rel, d_offset, v_offset)
a_target[0] = min(decel_offset + critical_decel + a_lead_contr,
a_target[0])
else:
a_target[0] = _A_SAT[0]
# a_min can't be higher than a_max
a_target[0] = min(a_target[0], a_target[1])
# final check on limits
a_target = np.clip(a_target, _A_SAT[0], _A_SAT[1])
a_target = a_target.tolist()
return a_target, a_pcm
def calc_jerk_factor(d_lead, v_rel):
# we don't have an explicit jerk limit, so this function calculates a factor
# that is used by the PID controller to scale the gains. Not the cleanest solution
# but we need this for the demo.
# TODO: Calculate Kp and Ki directly in this function.
# the higher is the decel required to avoid a crash, the higher is the PI factor scaling
d_offset = 0.5
v_offset = 2.
a_offset = 1.
jerk_factor_max = 1.0 # can't increase Kp and Ki more than double.
if d_lead < d_offset + 0.1: # add small value to avoid by zero divisions
jerk_factor = jerk_factor_max
else:
a_critical = - calc_critical_decel(d_lead, -v_rel, d_offset, v_offset)
# increase Kp and Ki by 20% for every 1m/s2 of decel required above 1m/s2
jerk_factor = max(a_critical - a_offset, 0.)/5.
jerk_factor = min(jerk_factor, jerk_factor_max)
return jerk_factor
MAX_SPEED_POSSIBLE = 55.
def compute_speed_with_leads(v_ego, angle_steers, v_pid, l1, l2, CP):
# drive limits
# TODO: Make lims function of speed (more aggressive at low speed).
a_lim = [-3., 1.5]
#*** set target speed pretty high, as lead hasn't been considered yet
v_target_lead = MAX_SPEED_POSSIBLE
#*** set accel limits as cruise accel/decel limits ***
a_target = calc_cruise_accel_limits(v_ego)
# Always 1 for now.
a_pcm = 1
#*** limit max accel in sharp turns
a_target, a_pcm = limit_accel_in_turns(v_ego, angle_steers, a_target, a_pcm, CP)
jerk_factor = 0.
if l1 is not None and l1.status:
#*** process noisy a_lead signal from radar processing ***
a_lead_p = process_a_lead(l1.aLeadK)
#*** compute desired distance ***
d_des = calc_desired_distance(l1.vLead)
#*** compute desired speed ***
v_target_lead, v_coast = calc_desired_speed(l1.dRel, d_des, l1.vLead, a_lead_p)
if l2 is not None | |
import os, sys
import numpy as np
from numpy.linalg import inv
import random
#from scipy.optimize import least_squares
from scipy import optimize
from scipy.optimize import least_squares
import helperFunctions
from helperFunctions import genEulerZXZMatrix, genEulerAngles, minimizeReprojection, points3DwrtT0, minimizeReprojectionBA, updateGlobalFeatures, fastDetector, LKTpointsT2, checkDisparity, RansacDetector
import cv2
if __name__ == "__main__":
startFrame = 0
endFrame = 1
NumOfCamChange = endFrame - startFrame
datapath = '/home/inna/SLAM_projects/KITTI_dataset/dataset/sequences/00/' #'{0:02d}'.format(sequence) # '{0:02d}' - adds zero if there is one digit
calibFileName = datapath + '/calib.txt' # calibration data has to be in the same folder, where each specific sequence is
calibFile = open(calibFileName, 'r').readlines()
P0Vals = calibFile[0].split()
ProjL = np.zeros((3,4))
for row in range(3):
for column in range(4):
ProjL[row, column] = float(P0Vals[row*4 + column + 1])
P1Vals = calibFile[1].split()
ProjR = np.zeros((3,4))
for row in range(3):
for column in range(4):
ProjR[row, column] = float(P1Vals[row*4 + column + 1])
leftImagePath = datapath + '/image_0/'
rightImagePath = datapath + '/image_1/'
fpPoseOut = open('svoPoseOut_Clique.txt', 'w')
outtxt = ''
poseFile = datapath + '00.txt'
fpPoseFile = open(poseFile, 'r')
groundTruthTraj = fpPoseFile.readlines()
globalFeatures = []
PmatrSeq = np.zeros((NumOfCamChange,4,4))
camPos6DVector = np.zeros((NumOfCamChange,6))
#np.fill_diagonal(PmatrSeq[0,:,:],1) ==> No need # initializetion of camera position array
#within a sequence
canvasH = 1200
canvasW = 1200
traj = np.zeros((canvasH,canvasW,3), dtype=np.uint8) # create a black image
for frm in range(NumOfCamChange):
imgPath = leftImagePath + '{0:06d}'.format(startFrame + frm) + '.png';
ImT1_L = cv2.imread(imgPath, 0) #0 flag returns a grayscale image
imgPath = rightImagePath + '{0:06d}'.format(startFrame + frm) + '.png';
ImT1_R = cv2.imread(imgPath, 0)
imgPath = leftImagePath + '{0:06d}'.format(startFrame + frm + 1) + '.png';
ImT2_L = cv2.imread(imgPath, 0)
imgPath = rightImagePath + '{0:06d}'.format(startFrame + frm + 1) + '.png';
ImT2_R = cv2.imread(imgPath, 0)
block = 11
#emperical values from P1, P2 as suggested in Ocv documentation
P1 = block * block * 8
P2 = block * block * 32
disparityEngine = cv2.StereoSGBM_create(minDisparity=0,numDisparities=32, blockSize=block, P1=P1, P2=P2)
ImT1_disparity = disparityEngine.compute(ImT1_L, ImT1_R).astype(np.float32)
#cv2.imwrite('disparity.png', ImT1_disparity)
ImT1_disparityA = np.divide(ImT1_disparity, 16.0)
ImT2_disparity = disparityEngine.compute(ImT2_L, ImT2_R).astype(np.float32)
ImT2_disparityA = np.divide(ImT2_disparity, 16.0)
# FAST corner detector, track corner features, detected in image T1.
# using KL optical flow those points will be tracked in T2
trackPoints1 = fastDetector(ImT1_L) # if use "featureEngine.detect(ImT1_L,None)" -- more keypoints are detected
# Lucas kanade optical flow:
trackPoints1_KLT_L, trackPoints2_KLT_L = LKTpointsT2 (trackPoints1, ImT1_L, ImT2_L)
# saveDebugImg(ImT1_L, frm, 'trackedPt', trackPoints1_KLT, color=(0,255,255), postTag='0')
# saveDebugImg(ImT2_L, frm, 'trackedPt', trackPoints2_KLT, color=(0,255,0), postTag='1')
trackPoints1_KLT_L_3d, trackPoints1_KLT_R_3d, trackPoints2_KLT_L_3d = checkDisparity (trackPoints1_KLT_L,
trackPoints2_KLT_L, ImT1_disparityA, ImT2_disparityA)
# 3d point cloud triagulation
numPoints = trackPoints1_KLT_L_3d.shape[0]
d3dPointsT1 = helperFunctions.generate3DPoints(
trackPoints1_KLT_L_3d, trackPoints1_KLT_R_3d, ProjL, ProjR)
# NOTE:
# we do not use information from 3d points in T2
# d3dPointsT2 = helperFunctions.generate3DPoints(
# trackPoints2_KLT_L_3d, trackPoints2_KLT_R_3d, ProjL, ProjR)
# RANSAC:
rotation, translation, optimalInliers = RansacDetector(numPoints, trackPoints2_KLT_L_3d,
d3dPointsT1, ProjL)
# form camera pose matrix w.r.t. T-1
RotAndTrans = np.hstack((rotation, translation))
tempCamPos = np.vstack((RotAndTrans, [0,0,0,1]))
if frm == 0:
tracked3D = points3DwrtT0(d3dPointsT1, np.identity(4)) # our reference is 'frm #0',
# therefore, we take identity matrix (diagonal '1') for frame T0, when we want to convert
# coordinates of the features in T0 to reference T0.
Pcur = tempCamPos
else:
tracked3D = points3DwrtT0(d3dPointsT1, PmatrSeq[frm-1,:,:])# return 3D coords, NO "1"
Pcur = np.matmul(tempCamPos, PmatrSeq[frm-1,:,:])
PmatrSeq[frm,:,:] = Pcur# camera pose matrices for each frame w.r.t. frame T0
camPos6DVector[frm,:] = np.hstack( (genEulerAngles(Pcur[0:3,0:3]), Pcur[0:3,3]) ) # 6D w.r.t. T0
# track features from frame to frame
# OUTPUT:
globalFeatures = updateGlobalFeatures(globalFeatures,
trackPoints1_KLT_L_3d[optimalInliers],
trackPoints2_KLT_L_3d[optimalInliers],
tracked3D[optimalInliers]) # "tracked3D" - 3D coords only, without "1"
# FAST features as an output of RANSAC algorithm:
ftDebug = ImT1_L
for i in optimalInliers:
cv2.circle(ftDebug, (trackPoints2_KLT_L_3d[i,0],
trackPoints2_KLT_L_3d[i,1]), radius = 1, color = (255,0,0))
cv2.imwrite('ftDebug.png', ftDebug)
matList = RotAndTrans.tolist() # convert matrix to list
for val in matList:
for v in val:
outtxt = outtxt + '{0:06e}'.format(v) + ' '
outtxt = outtxt.rstrip()
outtxt = outtxt + '\n'
fpPoseOut.write(outtxt)
# Plot camera positions:
canvasWCorr = 290
canvasHCorr = 200
grndPose = groundTruthTraj[frm].strip().split()
scaleFactor = 100
grndX = int(scaleFactor * float(grndPose[3])) + canvasWCorr
grndY = int(scaleFactor * float(grndPose[11])) + canvasHCorr
print('grndx=',grndX, 'grndy=',grndY)
cv2.circle(traj, (grndX,grndY), 1, (0,0,255), 2)
print('frm=', frm)
if frm == 0:
RANSACCameraX = 0.
RANSACCameraY = 0.
RANSACCameraZ = 0.
else:
RANSACCameraX = (np.linalg.inv(PmatrSeq[frm-1]))[0,3]
RANSACCameraY = (np.linalg.inv(PmatrSeq[frm-1]))[1,3]
RANSACCameraZ = (np.linalg.inv(PmatrSeq[frm-1]))[2,3]
drawRans_x = int(scaleFactor * RANSACCameraX) + canvasWCorr
drawRans_y = int(scaleFactor * RANSACCameraZ) + canvasHCorr
text = "Coordinates for frame %d: x=%2fm y=%2fm z=%2fm" \
%(frm, RANSACCameraX, RANSACCameraY, RANSACCameraZ)
cv2.putText(traj, text, (20, 40+20*frm), cv2.FONT_HERSHEY_PLAIN, 1,
(255,255,255), 1, 8)
cv2.circle(traj, (drawRans_x, drawRans_y), 1, (255, 0, 0), 2)
print('The pose matrices after RANSAC:\n', PmatrSeq)
# Extract data from "tracked3D" for BA:
GlobalFeaturesSeq = np.zeros((1, NumOfCamChange))
for j in range(NumOfCamChange):
GlobalFeaturesSeq[0,j] = len(globalFeatures[j])
TotnumFeatures = int(np.sum(GlobalFeaturesSeq))# sum features over all frames
points2DSeq = np.zeros((TotnumFeatures,4)) # reserve 1 column for IDs
points2DSeq[:, 2] = 1 # assign '1' to a third coordinate
#ID2DSeq = np.zeros((TotnumFeatures)) #store IDs of all feaures that will be used in BA
# array of unique features:
points3DUnique = np.zeros((globalFeatures[-1][-1][-1], 4)) # 3coord + 4th column is ID number
# extract 2D, 3D coordinates:
counter = 0
counterUniq = 0
for j in range(NumOfCamChange):
GlobalFeaturesSeq[0,j] = len(globalFeatures[j])
if j == 0:
for t in range(len(globalFeatures[j])):
points2DSeq[counter,0:2] = globalFeatures[j][t][0]# 2D coords
points2DSeq[counter,3] = globalFeatures[j][t][2] # fill in ID
points3DUnique[counterUniq, 0:3] = globalFeatures[j][t][1]
points3DUnique[counterUniq, 3] = globalFeatures[j][t][2] # add ID of unique 3D coordinate
counterUniq += 1
counter += 1
else:
for t in range(len(globalFeatures[j])):
points2DSeq[counter,0:2] = globalFeatures[j][t][0]# 2D coords
points2DSeq[counter,3] = globalFeatures[j][t][2]# ID
counter += 1
if (np.isin(float(globalFeatures[j][t][2]), points3DUnique[:,4]) == False): # we check if
# the new ID is in the range of IDs that we have added
points3DUnique[counterUniq, 0:3] = globalFeatures[j][t][1]
points3DUnique[counterUniq, 3] = globalFeatures[j][t][2] # add ID of unique 3D coordinate
counterUniq += 1
# Evaluate COST
# NEXT:
# BEFORE computing 'reprError' func:
# take out only part : 3*4, 4th column - 4-6th values of the vector;
# 2. translate camera rotation matrix into Euler angles
# -> create func
# 3. Stack everthing into 1D vector
# 4. In 'reprError' func:
# a. transform 6D array into Pmatrix
# b. add 4th coordinate " 1" into coordinates matrix
# c.
# 5. create Sparcity matrix
# convert vectors of camera positions in each frame and features 3D coordinates into 1D array:
Cam6Dvecs = np.reshape(camPos6DVector, NumOfCamChange*6) # we stack row by row in a vector
point3Dto1Dvect = np.reshape(points3DUnique[:,0:3], points3DUnique.shape[0]*3)# 3D only
# Merge 2 1D vectors into one:
PosCamandPoint1DArray = np.hstack((Cam6Dvecs, point3Dto1Dvect))# merge coordinates of camera positions and
# 3D coordinates of features
guess = PosCamandPoint1DArray
# use features' IDs as well:
reprError = minimizeReprojectionBA(PosCamandPoint1DArray, points3DUnique[:, 3], # 3rd column -> IDs
GlobalFeaturesSeq,
ProjL, points2DSeq)
print("tot_error_Ransac=", np.sum(np.square(reprError)))
opt_Cam3DpointPos = least_squares(minimizeReprojectionBA, guess, #x_scale ='jac',
method='trf', max_nfev=2, verbose = 0,
args=(points3DUnique[:, 4], GlobalFeaturesSeq,
ProjL, points2DSeq))
reprErrorBA = minimizeReprojectionBA(opt_Cam3DpointPos.x, points3DUnique[:, 4],
GlobalFeaturesSeq,
ProjL, points2DSeq)
print("tot_error_BA=", np.sum(np.square(reprErrorBA)))
# Note: cost function can be retrieved using
# opt_Cam3DpointPos.cost, but that value is twice smaller,
# because scipy defines the cost function using the pre-factor 1/2.
# define this as function and use it in Repr error evaluation
# From 1D array to 3D array of coordinates and 6D vect of camera pos:
point3DPosBA = opt_Cam3DpointPos.x[-points3DUnique.shape[0]*3:] # take out data, related to
# 3D positions of feature points
point3DPosBA = point3DPosBA.reshape(points3DUnique.shape[0],3) # convert from 1D to 2D array
# of 3Dcoords
CamPos6dvectBA = opt_Cam3DpointPos.x[0:NumOfCamChange*6] # take out data, related to
# 4x4 camera pose matrix in 'NumOfCamChange'
CamPos6dvectBA = CamPos6dvectBA.reshape(NumOfCamChange,6)
# from 6D cam pose vector to cam Pos matrix:
CamPos3dBA = np.zeros((NumOfCamChange,4,4))
CamPos3dBA[:,0:3, 3] = CamPos6dvectBA[:, 3:6] # store translation data
CamPos3dBA[:,0:3, 0:3]= # transform Euler angles to rotation matrix
# data on camera positions, after BA:
| |
= '> '
debug = False
echo = False
editor = os.environ.get('EDITOR')
if not editor:
if sys.platform[:3] == 'win':
editor = 'notepad'
else:
# Favor command-line editors first so we don't leave the terminal to edit
for editor in ['vim', 'vi', 'emacs', 'nano', 'pico', 'gedit', 'kate', 'subl', 'geany', 'atom']:
if utils.which(editor):
break
feedback_to_output = False # Do not include nonessentials in >, | output by default (things like timing)
locals_in_py = False
quiet = False # Do not suppress nonessential output
timing = False # Prints elapsed time for each command
# To make an attribute settable with the "do_set" command, add it to this ...
# This starts out as a dictionary but gets converted to an OrderedDict sorted alphabetically by key
settable = {'colors': 'Colorized output (*nix only)',
'continuation_prompt': 'On 2nd+ line of input',
'debug': 'Show full error stack on error',
'echo': 'Echo command issued into output',
'editor': 'Program used by ``edit``',
'feedback_to_output': 'Include nonessentials in `|`, `>` results',
'locals_in_py': 'Allow access to your application in py via self',
'prompt': 'The prompt issued to solicit input',
'quiet': "Don't print nonessential feedback",
'timing': 'Report execution times'}
def __init__(self, completekey: str='tab', stdin=None, stdout=None, persistent_history_file: str='',
persistent_history_length: int=1000, startup_script: Optional[str]=None, use_ipython: bool=False,
transcript_files: Optional[List[str]]=None) -> None:
"""An easy but powerful framework for writing line-oriented command interpreters, extends Python's cmd package.
:param completekey: (optional) readline name of a completion key, default to Tab
:param stdin: (optional) alternate input file object, if not specified, sys.stdin is used
:param stdout: (optional) alternate output file object, if not specified, sys.stdout is used
:param persistent_history_file: (optional) file path to load a persistent readline history from
:param persistent_history_length: (optional) max number of lines which will be written to the history file
:param startup_script: (optional) file path to a a script to load and execute at startup
:param use_ipython: (optional) should the "ipy" command be included for an embedded IPython shell
:param transcript_files: (optional) allows running transcript tests when allow_cli_args is False
"""
# If use_ipython is False, make sure the do_ipy() method doesn't exit
if not use_ipython:
try:
del Cmd.do_ipy
except AttributeError:
pass
# If persistent readline history is enabled, then read history from file and register to write to file at exit
if persistent_history_file and rl_type != RlType.NONE:
persistent_history_file = os.path.expanduser(persistent_history_file)
try:
readline.read_history_file(persistent_history_file)
# default history len is -1 (infinite), which may grow unruly
readline.set_history_length(persistent_history_length)
except FileNotFoundError:
pass
import atexit
atexit.register(readline.write_history_file, persistent_history_file)
# Call super class constructor
super().__init__(completekey=completekey, stdin=stdin, stdout=stdout)
# Commands to exclude from the help menu and tab completion
self.hidden_commands = ['eof', 'eos', '_relative_load']
# Commands to exclude from the history command
self.exclude_from_history = '''history edit eof eos'''.split()
self._finalize_app_parameters()
self.initial_stdout = sys.stdout
self.history = History()
self.pystate = {}
self.py_history = []
self.pyscript_name = 'app'
self.keywords = self.reserved_words + [fname[3:] for fname in dir(self) if fname.startswith('do_')]
self.statement_parser = StatementParser(
allow_redirection=self.allow_redirection,
terminators=self.terminators,
multiline_commands=self.multiline_commands,
aliases=self.aliases,
shortcuts=self.shortcuts,
)
self._transcript_files = transcript_files
# Used to enable the ability for a Python script to quit the application
self._should_quit = False
# True if running inside a Python script or interactive console, False otherwise
self._in_py = False
# Stores results from the last command run to enable usage of results in a Python script or interactive console
# Built-in commands don't make use of this. It is purely there for user-defined commands and convenience.
self._last_result = None
# Used to save state during a redirection
self.kept_state = None
self.kept_sys = None
# Codes used for exit conditions
self._STOP_AND_EXIT = True # cmd convention
self._colorcodes = {'bold': {True: '\x1b[1m', False: '\x1b[22m'},
'cyan': {True: '\x1b[36m', False: '\x1b[39m'},
'blue': {True: '\x1b[34m', False: '\x1b[39m'},
'red': {True: '\x1b[31m', False: '\x1b[39m'},
'magenta': {True: '\x1b[35m', False: '\x1b[39m'},
'green': {True: '\x1b[32m', False: '\x1b[39m'},
'underline': {True: '\x1b[4m', False: '\x1b[24m'},
'yellow': {True: '\x1b[33m', False: '\x1b[39m'}}
# Used load command to store the current script dir as a LIFO queue to support _relative_load command
self._script_dir = []
# Used when piping command output to a shell command
self.pipe_proc = None
# Used by complete() for readline tab completion
self.completion_matches = []
# Used to keep track of whether we are redirecting or piping output
self.redirecting = False
# If this string is non-empty, then this warning message will print if a broken pipe error occurs while printing
self.broken_pipe_warning = ''
# If a startup script is provided, then add it in the queue to load
if startup_script is not None:
startup_script = os.path.expanduser(startup_script)
if os.path.exists(startup_script) and os.path.getsize(startup_script) > 0:
self.cmdqueue.append("load '{}'".format(startup_script))
############################################################################################################
# The following variables are used by tab-completion functions. They are reset each time complete() is run
# in reset_completion_defaults() and it is up to completer functions to set them before returning results.
############################################################################################################
# If true and a single match is returned to complete(), then a space will be appended
# if the match appears at the end of the line
self.allow_appended_space = True
# If true and a single match is returned to complete(), then a closing quote
# will be added if there is an unmatched opening quote
self.allow_closing_quote = True
# An optional header that prints above the tab-completion suggestions
self.completion_header = ''
# Use this list if you are completing strings that contain a common delimiter and you only want to
# display the final portion of the matches as the tab-completion suggestions. The full matches
# still must be returned from your completer function. For an example, look at path_complete()
# which uses this to show only the basename of paths as the suggestions. delimiter_complete() also
# populates this list.
self.display_matches = []
# Used by functions like path_complete() and delimiter_complete() to properly
# quote matches that are completed in a delimited fashion
self.matches_delimited = False
# Set the pager(s) for use with the ppaged() method for displaying output using a pager
if sys.platform.startswith('win'):
self.pager = self.pager_chop = 'more'
else:
# Here is the meaning of the various flags we are using with the less command:
# -S causes lines longer than the screen width to be chopped (truncated) rather than wrapped
# -R causes ANSI "color" escape sequences to be output in raw form (i.e. colors are displayed)
# -X disables sending the termcap initialization and deinitialization strings to the terminal
# -F causes less to automatically exit if the entire file can be displayed on the first screen
self.pager = 'less -RXF'
self.pager_chop = 'less -SRXF'
# This boolean flag determines whether or not the cmd2 application can interact with the clipboard
self.can_clip = can_clip
# ----- Methods related to presenting output to the user -----
@property
def visible_prompt(self) -> str:
"""Read-only property to get the visible prompt with any ANSI escape codes stripped.
Used by transcript testing to make it easier and more reliable when users are doing things like coloring the
prompt using ANSI color codes.
:return: prompt stripped of any ANSI escape codes
"""
return utils.strip_ansi(self.prompt)
def _finalize_app_parameters(self) -> None:
"""Finalize the shortcuts and settable parameters."""
# noinspection PyUnresolvedReferences
self.shortcuts = sorted(self.shortcuts.items(), reverse=True)
# Make sure settable parameters are sorted alphabetically by key
self.settable = collections.OrderedDict(sorted(self.settable.items(), key=lambda t: t[0]))
def poutput(self, msg: str, end: str='\n') -> None:
"""Convenient shortcut for self.stdout.write(); by default adds newline to end if not already present.
Also handles BrokenPipeError exceptions for when a commands's output has been piped to another process and
that process terminates before the cmd2 command is finished executing.
:param msg: message to print to current stdout - anything convertible to a str with '{}'.format() is OK
:param end: string appended after the end of the message if not already present, default a newline
"""
if msg is not None and msg != '':
try:
msg_str = '{}'.format(msg)
self.stdout.write(msg_str)
if not msg_str.endswith(end):
self.stdout.write(end)
except BrokenPipeError:
# This occurs if a command's output is being piped to another process and that process | |
minVal = -Globals.Gameplay.ToonVelMax['turning']
maxVal = Globals.Gameplay.ToonVelMax['turning']
if not leftPressed and not rightPressed or self.controlVelocity[0] > maxVal or self.controlVelocity[0] < minVal:
x = self.dampenVelocityVal(self.controlVelocity[0], 'turning', 'turning', minVal, maxVal, dt)
self.controlVelocity[0] = x
minVal = -Globals.Gameplay.ToonVelMax['backward']
maxVal = Globals.Gameplay.ToonVelMax['forward']
if not upPressed and not downPressed or self.controlVelocity[1] > maxVal or self.controlVelocity[1] < minVal:
y = self.dampenVelocityVal(self.controlVelocity[1], 'backward', 'forward', minVal, maxVal, dt)
self.controlVelocity[1] = y
if self.isFuelLeft():
minVal = -Globals.Gameplay.ToonVelMax['fall']
else:
minVal = -Globals.Gameplay.ToonVelMax['fallNoFuel']
maxVal = Globals.Gameplay.ToonVelMax['boost']
if self.controlVelocity[2] > minVal:
if (not self._inputMgr.arrowKeys.jumpPressed() or not self.isFuelLeft()) and not self.isToonOnFloor:
self.controlVelocity[2] -= Globals.Gameplay.ToonAcceleration['fall'] * dt
if self.controlVelocity[2] < 0.0 and self.isToonOnFloor:
self.controlVelocity[2] = 0.0
minVal = -Globals.Gameplay.ToonVelMax['turning']
maxVal = Globals.Gameplay.ToonVelMax['turning']
self.controlVelocity[0] = clamp(self.controlVelocity[0], minVal, maxVal)
minVal = -Globals.Gameplay.ToonVelMax['backward']
maxVal = Globals.Gameplay.ToonVelMax['forward']
self.controlVelocity[1] = clamp(self.controlVelocity[1], minVal, maxVal)
if self.isFuelLeft():
minVal = -Globals.Gameplay.ToonVelMax['fall']
else:
minVal = -Globals.Gameplay.ToonVelMax['fallNoFuel']
maxVal = Globals.Gameplay.ToonVelMax['boost']
self.controlVelocity[2] = clamp(self.controlVelocity[2], minVal, maxVal)
def updateFanVelocity(self, dt):
fanHeight = Globals.Gameplay.FanCollisionTubeHeight
min = Globals.Gameplay.FanMinPower
max = Globals.Gameplay.FanMaxPower
powerRange = max - min
for fan in self.activeFans:
blowVec = fan.getBlowDirection()
blowVec *= Globals.Gameplay.ToonAcceleration['fan'] * dt
if Globals.Gameplay.UseVariableFanPower:
distance = fan.model.getDistance(self.toon)
power = math.fabs(distance / fanHeight - 1.0) * powerRange + min
power = clamp(power, min, max)
blowVec *= power
fanVelocity = self.fanIndex2ToonVelocity[fan.index]
fanVelocity += blowVec
removeList = []
for fan in self.fansStillHavingEffect:
if fan not in self.activeFans:
blowVec = fan.getBlowDirection()
blowVec *= Globals.Gameplay.ToonDeceleration['fan'] * dt
fanVelocity = Vec3(self.fanIndex2ToonVelocity[fan.index])
lastLen = fanVelocity.length()
fanVelocity -= blowVec
if fanVelocity.length() > lastLen:
removeList.append(fan)
else:
self.fanIndex2ToonVelocity[fan.index] = fanVelocity
for fan in removeList:
self.fansStillHavingEffect.remove(fan)
del self.fanIndex2ToonVelocity[fan.index]
self.fanVelocity = Vec3(0.0, 0.0, 0.0)
for fan in self.fansStillHavingEffect:
self.fanVelocity += self.fanIndex2ToonVelocity[fan.index]
minVal = -Globals.Gameplay.ToonVelMax['fan']
maxVal = Globals.Gameplay.ToonVelMax['fan']
self.fanVelocity[0] = clamp(self.fanVelocity[0], minVal, maxVal)
self.fanVelocity[1] = clamp(self.fanVelocity[1], minVal, maxVal)
self.fanVelocity[2] = clamp(self.fanVelocity[2], minVal, maxVal)
def dampenVelocityVal(self, velocityVal, typeNeg, typePos, minVal, maxVal, dt):
if velocityVal > 0.0:
velocityVal -= Globals.Gameplay.ToonDeceleration[typePos] * dt
velocityVal = clamp(velocityVal, 0.0, maxVal)
elif velocityVal < 0.0:
velocityVal += Globals.Gameplay.ToonDeceleration[typeNeg] * dt
velocityVal = clamp(velocityVal, minVal, 0.0)
return velocityVal
def allowFuelDeath(self):
if Globals.Gameplay.DoesToonDieWithFuel:
return True
else:
return not self.isFuelLeft()
def updateToonPos(self, dt):
toonWorldY = self.toon.getY(render)
if self.hasPickedUpFirstPropeller == False:
if toonWorldY > -7.6:
self.toon.setY(-7.6)
elif toonWorldY < -35.0:
self.toon.setY(-35.0)
return
self.velocity = self.controlVelocity + self.fanVelocity
vel = self.velocity * dt
self.toon.setPos(self.toon, vel[0], vel[1], vel[2])
toonPos = self.toon.getPos()
if Globals.Dev.DisableDeath:
pass
elif toonPos[2] < 0.0 and self.state in ['FreeFly', 'FlyingUp'] and self.allowFuelDeath():
self.postSpawnState = 'Running'
self.game.distGame.b_toonDied(self.toon.doId)
if toonPos[2] > self._levelBounds[2][1]:
self.controlVelocity[2] = 0.0
self.fanVelocity[2] = 0.0
toonPos = Vec3(clamp(toonPos[0], self._levelBounds[0][0], self._levelBounds[0][1]), clamp(toonPos[1], self._levelBounds[1][0], self._levelBounds[1][1]), clamp(toonPos[2], self._levelBounds[2][0], self._levelBounds[2][1]))
if self.isHeadInCeiling and toonPos[2] > self.surfacePoint[2]:
toonPos[2] = self.surfacePoint[2]
self.toon.setPos(toonPos)
if self.toon.getY(render) < -10:
self.toon.setY(-10.0)
def printFanInfo(self, string):
if len(self.fanIndex2ToonVelocity) > 0:
self.notify.info('==AFTER %s==' % string)
self.notify.info('Fan velocity:%s' % self.fanVelocity)
if len(self.activeFans) > 0:
self.notify.info('%s' % self.activeFans)
if len(self.fanIndex2ToonVelocity) > 0:
self.notify.info('%s' % self.fanIndex2ToonVelocity)
if len(self.fansStillHavingEffect) > 0:
self.notify.info('%s' % self.fansStillHavingEffect)
def resetFuel(self):
self.setFuel(Globals.Gameplay.FuelNormalAmt)
def isFuelLeft(self):
return self.fuel > 0.0
def setFuel(self, fuel):
self.fuel = fuel
self._guiMgr.setFuel(fuel)
if self.fuel <= 0.0:
fuelState = Globals.Gameplay.FuelStates.FuelEmpty
elif self.fuel < Globals.Gameplay.FuelVeryLowAmt:
fuelState = Globals.Gameplay.FuelStates.FuelVeryLow
elif self.fuel < Globals.Gameplay.FuelLowAmt:
fuelState = Globals.Gameplay.FuelStates.FuelLow
else:
fuelState = Globals.Gameplay.FuelStates.FuelNormal
if fuelState > self.fuelState:
self.game.distGame.b_toonSetBlades(self.toon.doId, fuelState)
if fuelState < self.fuelState:
if self.state in ['FlyingUp', 'FreeFly', 'Running']:
self.game.distGame.b_toonBladeLost(self.toon.doId)
def resetBlades(self):
CogdoFlyingPlayer.resetBlades(self)
self._guiMgr.resetBlades()
def setBlades(self, fuelState):
CogdoFlyingPlayer.setBlades(self, fuelState)
self._guiMgr.setBlades(fuelState)
def bladeLost(self):
CogdoFlyingPlayer.bladeLost(self)
self._bladeBreakSfx.play(volume=0.35)
self._guiMgr.bladeLost()
def updateFuel(self, dt):
if Globals.Dev.InfiniteFuel:
self.setFuel(Globals.Gameplay.FuelNormalAmt)
elif self.state in Globals.Gameplay.DepleteFuelStates and self.fuel > 0.0:
self.setFuel(self.fuel - Globals.Gameplay.FuelBurnRate * dt)
elif self.fuel < 0.0:
self.setFuel(0.0)
def update(self, dt = 0.0):
self.instantaneousVelocity = (self.toon.getPos() - self.oldPos) / dt
self.oldPos = self.toon.getPos()
self.updateFuel(dt)
if self.isFlying():
self.updateToonFlyingState(dt)
if self.state in ['FreeFly', 'FlyingUp', 'Death']:
self.updateControlVelocity(dt)
self.updateFanVelocity(dt)
self.updateToonPos(dt)
self._cameraMgr.update(dt)
def isFlying(self):
if self.state in ['FreeFly', 'FlyingUp']:
return True
else:
return False
def pressedControlWhileRunning(self):
if self.isFuelLeft() and self.state == 'Running':
self.notify.debug('Pressed Control and have fuel')
self.request('FlyingUp')
else:
self.ignore('control')
self.ignore('lcontrol')
self.acceptOnce('control', self.pressedControlWhileRunning)
self.acceptOnce('lcontrol', self.pressedControlWhileRunning)
def setPropellerState(self, propState):
if not self.hasPickedUpFirstPropeller:
propState = CogdoFlyingLocalPlayer.PropStates.Off
if self.propState != propState:
oldState = self.propState
self.propState = propState
if self.propState == CogdoFlyingLocalPlayer.PropStates.Normal:
if not self.propellerSpinLerp.isPlaying():
self.propellerSpinLerp.loop()
self.setPropellerSpinRate(Globals.Gameplay.NormalPropSpeed)
self._guiMgr.setPropellerSpinRate(Globals.Gameplay.NormalPropSpeed)
self._loopPropellerSfx(playRate=0.7, volume=0.8)
elif self.propState == CogdoFlyingLocalPlayer.PropStates.Overdrive:
if not self.propellerSpinLerp.isPlaying():
self.propellerSpinLerp.loop()
self.setPropellerSpinRate(Globals.Gameplay.OverdrivePropSpeed)
self._guiMgr.setPropellerSpinRate(Globals.Gameplay.OverdrivePropSpeed)
self._loopPropellerSfx(playRate=1.1)
elif self.propState == CogdoFlyingLocalPlayer.PropStates.Off:
self.propellerSpinLerp.pause()
self._propellerSfx.stop()
def enterInactive(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self._inputMgr.disable()
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Off)
self.shutdownFlyingBroadcast()
def filterInactive(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitInactive(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
self._inputMgr.enable()
self.activateFlyingBroadcast()
def enterSpawn(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self.toon.b_setAnimState('Happy', 1.0)
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
self.spawnInterval.start()
def filterSpawn(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitSpawn(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
def enterFreeFly(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
if self.oldState in ['Running', 'HitWhileRunning']:
self.toon.jumpStart()
self.toon.setHpr(render, 0, 0, 0)
def filterFreeFly(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitFreeFly(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
def enterFlyingUp(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Overdrive)
if self.oldState in ['Running']:
self.toon.jumpStart()
self.toon.setHpr(render, 0, 0, 0)
def filterFlyingUp(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitFlyingUp(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
def enterHitWhileFlying(self, elapsedTime = 0.0):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self.setEnemyHitting(True)
self._toonHitSfx.play()
self.startHitFlyingToonInterval()
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
def filterHitWhileFlying(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitHitWhileFlying(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
self.enemyHitIval.clearToInitial()
self.coolDownAfterHitInterval.clearToInitial()
self.coolDownAfterHitInterval.start()
def enterInWhirlwind(self, elapsedTime = 0.0):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self._hitByWhirlwindSfx.play()
self.startHitByWhirlwindInterval()
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
def filterInWhirlwind(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitInWhirlwind(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
self.eventIval.clearToInitial()
def enterHitWhileRunning(self, elapsedTime = 0.0):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self.setEnemyHitting(True)
self._toonHitSfx.play()
self.toon.b_setAnimState('FallDown')
self.startHitRunningToonInterval()
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
def filterHitWhileRunning(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitHitWhileRunning(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
self.enemyHitIval.clearToInitial()
self.coolDownAfterHitInterval.clearToInitial()
self.coolDownAfterHitInterval.start()
def enterRunning(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self.toon.b_setAnimState('Happy', 1.0)
if self.oldState not in ['Spawn', 'HitWhileRunning', 'Inactive']:
self.toon.jumpHardLand()
self._collideSfx.play()
self.orthoWalk.start()
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
self.ignore('control')
self.ignore('lcontrol')
self.acceptOnce('control', self.pressedControlWhileRunning)
self.acceptOnce('lcontrol', self.pressedControlWhileRunning)
def filterRunning(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitRunning(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
self.orthoWalk.stop()
self.ignore('control')
self.ignore('lcontrol')
def enterOutOfTime(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
if self.spawnInterval.isPlaying():
self.spawnInterval.clearToInitial()
self.ignoreAll()
self.introGuiSeq.clearToInitial()
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Off)
if not Globals.Dev.NoLegalEagleAttacks:
for eagle in self.legalEaglesTargeting:
messenger.send(CogdoFlyingLegalEagle.RequestRemoveTargetEventName, [eagle.index])
taskMgr.remove('delayedLandOnPlatform')
taskMgr.remove('delayedLandOnWinPlatform')
self.outOfTimeInterval.start()
def filterOutOfTime(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitOutOfTime(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
def enterDeath(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self.propellerSmoke.stop()
self.deathInterval.start()
self.toon.b_setAnimState('jumpAirborne', 1.0)
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Off)
if not Globals.Dev.NoLegalEagleAttacks:
for eagle in self.legalEaglesTargeting:
messenger.send(CogdoFlyingLegalEagle.RequestRemoveTargetEventName, [eagle.index])
def filterDeath(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitDeath(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
self.deathInterval.clearToInitial()
def enterWaitingForWin(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self.resetFuel()
self._guiMgr.hideRefuelGui()
self.waitingForWinSeq.start()
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
if not Globals.Dev.NoLegalEagleAttacks:
self.game.forceClearLegalEagleInterestInToon(self.toon.doId)
def filterWaitingForWin(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitWaitingForWin(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
self.waitingForWinSeq.finish()
self.waitingForWinInterval.clearToInitial()
def enterWin(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self._guiMgr.stopTimer()
self.winInterval.start()
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
def filterWin(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitWin(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
def _destroyEventIval(self):
if hasattr(self, 'eventIval'):
self.eventIval.clearToInitial()
del self.eventIval
def startEventIval(self, ival):
self._destroyEventIval()
self.eventIval = ival
self.eventIval.start()
def _destroyEnemyHitIval(self):
if hasattr(self, 'enemyHitIval'):
self.enemyHitIval.clearToInitial()
del self.enemyHitIval
def startEnemyHitIval(self, ival):
self._destroyEnemyHitIval()
self.enemyHitIval = ival
self.enemyHitIval.start()
def isEnemyHitting(self):
return self.legalEagleHitting
def setEnemyHitting(self, value):
self.legalEagleHitting = value
def shouldLegalEagleBeInFrame(self):
if not self.isLegalEagleTarget():
return False
else:
index = len(self.legalEaglesTargeting) - 1
eagle = self.legalEaglesTargeting[index]
return eagle.shouldBeInFrame()
def startHitRunningToonInterval(self):
| |
specific elements of XML files.
# Please note that this is for READ/WRITE/EXTERNALLY UPDATED access to
# XML files. If all you need is read-only access, see xmlcontroller in paramdb2.py
class synced(object):
# This class represents a paramdb2 entry that is sync'd with an element within one or more xmldoc's
# it functions as a controller for paramdb2
doclist=None # A list; elements are (xmldoc,xmlpath,ETxmlpath,logfunc)
# where xmldoc is the class xmldoc, once fully set up
# xmlpath is The path of the element within xmldoc
mergekwargs=None
# controller members
controlparam=None
id=None
state=None # see CONTROLLER_STATE defines in definition of param class
numpending=None
in_synchronize=None # are we in a synchronize right now
def __init__(self,controlparam,**kwargs):
self.controlparam=controlparam
self.id=id(self)
self.state=controlparam.CONTROLLER_STATE_QUIESCENT
self.numpending=0
self.doclist=[]
self.mergekwargs=kwargs
self.in_synchronize=False
loadgobject() # gobject needed for requestval
pass
def find_a_context_href(self,paramset=None):
# find a suitable context href for xml synchronization
# paramset is a (xmldocu,xmlpath,ETxmlpath,logfunc) tuple
doclist=copy.copy(self.doclist)
if paramset is not None:
doclist.append(paramset)
pass
# First look for anything with a filename set
for (xmldocu,xmlpath,ETxmlpath,logfunc) in doclist:
if xmldocu is not None and xmldocu.filehref is not None:
#sys.stderr.write("find_a_context_href(): %s\n" % (xmldocu.getcontexthref().absurl()))
return xmldocu.getcontexthref()
pass
# Now look for anything
for (xmldocu,xmlpath,ETxmlpath,logfunc) in doclist:
if xmldocu is not None and xmldocu.contexthref is not None:
#sys.stderr.write("find_a_context_href(): %s\n" % (xmldocu.getcontexthref().absurl()))
return xmldocu.getcontexthref()
pass
# import pdb as pythondb
# pythondb.set_trace()
# worst-case fallthrough
sys.stderr.write("xmldoc.find_a_context_href(): Falling through to \"./\"\n")
return dc_value.hrefvalue("./") # current directory!
# adddoc: Add a document that will have a synchronized element.
# xmlpath is the xpath to the element (which should already exist)
# logfunc is a function or method that will be called to
# log changes. It takes the log message as a mandatory parameter,
# then "item", "action", and "value" as optional parameters
# if xmlpath is None then ETxmlpath can be an ETXPath to locate the
# element instead.
# autocreate_parentxpath if not None indicates that we should autocreate a blank element, and gives the xpath of the parent element
# autocreate_tagname gives the tag to create if necessary, and autocreate_insertpos gives where to insert the new element
# (autocreate_insertpos=-1 means add to then end, otherwise gives position in the element)
def adddoc(self,xmldocobj,xmlpath,ETxmlpath=None,logfunc=None,autocreate_parentxpath=None,autocreate_tagname=None,autocreate_insertpos=-1):
try :
retry=True
while retry:
retry=False
#if xmlpath=="dc:summary/dc:expnotes":
# import pdb as pythondb
# pythondb.set_trace()
xmldocobj.lock_rw()
try :
if autocreate_parentxpath is not None:
if ETxmlpath is not None:
ETXobj=etree.ETXPath(ETxmlpath)
xmlellist=ETXobj(xmldocobj.doc)
pass
else:
xmlellist=xmldocobj.xpath(xmlpath)
pass
if len(xmlellist)==0:
# need to autocreate
autocreate_parentlist=xmldocobj.xpath(autocreate_parentxpath)
if len(autocreate_parentlist) < 1:
raise ValueError("Could not find parent path %s to autocreate element" % (autocreate_parentxpath))
xmldocobj.insertelement(autocreate_parentlist[0],autocreate_insertpos,autocreate_tagname)
pass
pass
# sys.stderr.write("%s %s: %s\n" % (xmldocobj._filename,self.controlparam.xmlname,str(self.controlparam.dcvalue)))
self.xmlresync(xmldocobj,xmlpath,ETxmlpath,logfunc=logfunc,initialload=True)
pass
except:
(exctype,value)=sys.exc_info()[:2]
tback=traceback.format_exc()
result=xmldocobj.postexcdialog(exctype,value,tback,initialload=True,cantretry=False)
if result=="retry":
retry=True
continue
else :
raise
pass
finally:
xmldocobj.unlock_rw()
pass
pass
self.doclist.append((xmldocobj,xmlpath,ETxmlpath,logfunc))
xmldocobj.addresyncnotify(self.xmlresync,xmlpath,ETxmlpath,logfunc=logfunc)
pass
except:
# some kind of exception. Do precautionary remdoc and then raise
self.remdoc(xmldocobj,xmlpath,ETxmlpath,logfunc,precautionary=True)
raise
return (xmldocobj,xmlpath,ETxmlpath,logfunc)
def remdoc(self,xmldocobj,xmlpath,ETxmlpath=None,logfunc=None,precautionary=False):
entry=None
for doc in self.doclist:
if doc[0] is xmldocobj and doc[1]==xmlpath and doc[2]==ETxmlpath and doc[3]==logfunc:
entry=doc
break
pass
if entry is None and not precautionary:
raise ValueError("synced: Attempt to remove unknown document and path %s and %s" % (str(xmldocobj),str(xmlpath)))
if entry is not None:
self.doclist.remove(entry)
xmldocobj.remresyncnotify(self.xmlresync,precautionary,entry[1],entry[2],logfunc=logfunc)
pass
pass
#def valueobjfromxml(self,xmldocobj,xmlel):
# # this is a separate method so it can be overridden by derived
# # class for implementing expanding date class
# return self.controlparam.paramtype.fromxml(xmldocobj,xmlel,self.controlparam.defunits)
#def createvalueobj(self,newvalue):
# # this is a separate method so it can be overridden by derived
# # class for implementing expanding date class
# return self.controlparam.paramtype(newvalue,defunits=self.controlparam.defunits)
#def isconsistent(self,newval,oldval):
# # this is a separate method so it can be overridden by derived
# # class for implementing expanding date class
# return newval == self.controlparam.dcvalue
def manualmergedialog(self,humanpath,paramtype,parent,parentsource,descendentlist,descendentsourcelist,contexthref,kwargs):
# Something else must have made sure gtk is loaded!
dialog=gtk.Dialog(title="Manual merge: %s" % (humanpath),buttons=("Cancel and raise error",0,
"Apply",1))
box=dialog.get_content_area()
ScrolledWindow=gtk.ScrolledWindow()
if "gi" in sys.modules: # gtk3
ScrolledWindow.set_policy(gtk.PolicyType.NEVER,gtk.PolicyType.ALWAYS)
pass
else:
ScrolledWindow.set_policy(gtk.POLICY_NEVER,gtk.POLICY_ALWAYS)
pass
box.pack_start(ScrolledWindow,True,True,0)
#Viewport=gtk.Viewport()
#ScrolledWindow.add(Viewport)
VBox=gtk.VBox()
#Viewport.add(VBox)
ScrolledWindow.add_with_viewport(VBox)
#box.add(VBox)
#import pdb as pythondb
#pythondb.set_trace()
if parent is not None:
ParentFrame=gtk.Frame()
ParentFrame.set_label("Parent: from %s" % (str(parentsource)))
ParentTextView=gtk.TextView()
ParentTextBuffer=gtk.TextBuffer()
parentdoc=xmldoc.fromstring("<parent/>",contexthref=contexthref)
parent.xmlrepr(parentdoc,parentdoc.getroot())
ParentTextBuffer.set_text(parentdoc.tostring(pretty_print=True))
ParentTextView.set_buffer(ParentTextBuffer)
if "gi" in sys.modules: # gtk3
ParentTextView.set_wrap_mode(gtk.WrapMode.WORD_CHAR)
pass
else:
ParentTextView.set_wrap_mode(gtk.WRAP_WORD_CHAR)
pass
ParentTextView.set_property('editable',False)
ParentFrame.add(ParentTextView)
VBox.add(ParentFrame)
pass
for deccnt in range(len(descendentlist)):
descendent=descendentlist[deccnt]
desc_src=descendentsourcelist[deccnt]
DescendentFrame=gtk.Frame()
DescendentFrame.set_label("Descendent %d: from %s" % (deccnt+1,str(desc_src)))
DescendentTextView=gtk.TextView()
DescendentTextBuffer=gtk.TextBuffer()
descendentdoc=xmldoc.fromstring("<descendent/>",contexthref=contexthref)
descendent.xmlrepr(descendentdoc,descendentdoc.getroot())
DescendentTextBuffer.set_text(descendentdoc.tostring(pretty_print=True))
DescendentTextView.set_buffer(DescendentTextBuffer)
if "gi" in sys.modules: # gtk3
DescendentTextView.set_wrap_mode(gtk.WrapMode.WORD_CHAR)
pass
else:
DescendentTextView.set_wrap_mode(gtk.WRAP_WORD_CHAR)
pass
DescendentTextView.set_property('editable',False)
DescendentFrame.add(DescendentTextView)
VBox.add(DescendentFrame)
pass
MergedFrame=gtk.Frame()
MergedFrame.set_label("Merged")
MergedTextView=gtk.TextView()
MergedTextBuffer=gtk.TextBuffer()
MergedTextBuffer.set_text("")
MergedTextView.set_buffer(MergedTextBuffer)
if "gi" in sys.modules: # gtk3
MergedTextView.set_wrap_mode(gtk.WrapMode.WORD_CHAR)
pass
else:
MergedTextView.set_wrap_mode(gtk.WRAP_WORD_CHAR)
pass
MergedFrame.add(MergedTextView)
VBox.add(MergedFrame)
box.show_all()
ScrolledWindow.show_all()
#Viewport.show_all()
VBox.show_all()
dialog.show_all()
dialogval=dialog.run()
if dialogval==1:
mergeddoc=xmldoc.fromstring(MergedTextBuffer.get_text(MergedTextBuffer.get_start_iter(),MergedTextBuffer.get_end_iter(),False),contexthref=contexthref)
# ***!!!! Bug: If we have to merge an XMLTreevalue,
# the resulting tree's root is a <parent> or <descendent>
# tag, not what it should be, and therefore
# mergedvalue ends up wrong!
#
# Workaround: User manually puts in correct tag from window title
# Suggested fix: Use correct tag in parent and descendents
mergedvalue=paramtype.fromxml(mergeddoc,mergeddoc.getroot())
dialog.destroy()
return mergedvalue
dialog.destroy()
return None
def domerge(self,humanpath,parent,parentsource,descendentlist,descendentsourcelist,contexthref=None,manualmerge=True,**kwargs):
# this is a separate method so it can be overridden by derived
# class for implementing expanding date class
#print self.controlparam.paramtype
#print parent
#import pdb
#try :
# sys.stderr.write("%s\n" % (type(parent)))
# if parent is not None and "xmltreevalue" in str(type(parent)):
# sys.stderr.write("parent=%s\n\n\ndescendentlist[0]=%s\n\n\ndescendentlist[1]=%s\n\n\n" % (etree.tostring(parent._xmltreevalue__xmldoc.doc,pretty_print=True),etree.tostring(descendentlist[0]._xmltreevalue__xmldoc.doc,pretty_print=True),etree.tostring(descendentlist[1]._xmltreevalue__xmldoc.doc,pretty_print=True)))
# pass
#import pdb as pythondb
#try:
# if self.controlparam.paramtype is dc_value.hrefvalue:
# sys.stderr.write("domerge: contexthref=%s\n" % (contexthref.absurl()))
# pass
try :
result=self.controlparam.paramtype.merge(parent,descendentlist,contexthref=contexthref,**kwargs)
pass
except:
(exctype,value)=sys.exc_info()[:2]
if manualmerge: ###***!!!
if not "gtk" in sys.modules and not ("gi" in sys.modules and hasattr(sys.modules["gi"],"repository") and hasattr(sys.modules["gi"].repository,"Gtk")):
# if nothing else has loaded gtk2 or gtk3
# Just raise it
raise
else:
loadgtk()
result=self.manualmergedialog(humanpath,self.controlparam.paramtype,parent,parentsource,descendentlist,descendentsourcelist,contexthref,kwargs)
if result is None:
raise
pass
pass
else:
raise
pass
#except:
# pythondb.set_trace()
# if parent is not None and "xmltreevalue" in str(type(parent)) and "fubar" in etree.tostring(result._xmltreevalue__xmldoc.doc):
# sys.stderr.write("\nFOUNDIT\n")
return result
#except:
# pdb.post_mortem()
# pass
#pass
def _get_dcvalue_from_file(self,xmldocobj,xmlpath,ETxmlpath):
# xmldocobj must be locked during this process
#sys.stderr.write("get_dcvalue_from_file: filename=%s xmlpath=%s ETxmlpath=%s\n" % (xmldocobj.filename,xmlpath,ETxmlpath))
if xmlpath is not None:
xmlellist=xmldocobj.doc.xpath(xmlpath,namespaces=xmldocobj.namespaces,extensions=xmldocobj.extensions)
if len(xmlellist) > 1:
raise NameError("XPath query %s returned %d elements" % (xmlpath,len(xmlellist)))
if len(xmlellist)==0:
# No element -- append one to the parent
(parent,separator,elname)=xmlpath.rpartition("/")
if parent=="":
parent="." # empty parent means root element, which is obtained through doc.find(".")
pass
xmlel=xmldocobj.addelement(parent,elname)
pass
else :
xmlel=xmlellist[0]
pass
pass
else:
# ETxpath provided, not regular xpath
ETXobj=etree.ETXPath(ETxmlpath)
xmlellist=ETXobj(xmldocobj.doc)
# sys.stderr.write("parent=%s\n" % (parent))
if len(xmlellist) > 1:
raise NameError("ETXPath query %s returned %d elements" % (ETxmlpath,len(xmlellist)))
if len(xmlellist)==0:
# No element -- append one to the parent
splitpath=canonical_etxpath_split(ETxmlpath)
# parent comes from splitting of the last portion of the ETXpath
if etxpath_isabs(ETxmlpath):
parent=canonical_etxpath_absjoin(*splitpath[:-1])
pass
else:
parent=canonical_etxpath_join(*splitpath[:-1])
pass
ParentETXobj=etree.ETXPath(parent)
ParentElements=ParentETXobj(xmldocobj.doc)
if len(ParentElements) != 1:
raise NameError("ETXPath parent query %s returned %d elements" % (parent,len(ParentElements)))
elname=splitpath[-1]
if '[' in elname: # if there was a constraint in the last portion of the etxpath...
elname=elname[:elname.index('[')] # cut it off.
pass
ChildElement=etree.Element(elname,nsmap=ParentElements[0].nsmap)
ParentElements[0].append(ChildElement)
xmlel=ChildElement
xmldocobj.modified=True
pass
else :
xmlel=xmlellist[0]
pass
pass
# newvalue=xmlel.text # raw new value
# newval=None # new value as calculated from dc_value class
#newval=self.controlparam.paramtype.fromxml(xmldocobj,xmlel,self.controlparam.defunits,xml_attribute=self.controlparam.xml_attribute,contextdir=".")
newval=self.controlparam.paramtype.fromxml(xmldocobj,xmlel,self.controlparam.defunits)
#newval=self.valueobjfromxml(xmldocobj,xmlel)
return newval
# xmlresync loads in new data that has already been read in from the xml file
def xmlresync(self,xmldocobj,xmlpath,ETxmlpath,logfunc=None,initialload=False):
# NOTE: xmldocobj MUST be locked (or must be in the process of being locked)!!!
#if xmlpath=="dc:summary/dc:dest":
# import pdb as pythondb
# pythondb.set_trace()
# pass
#sys.stderr.write("xmlresync: %s doc=%s xp=%s etxp=%s in_synchronize=%s\n" % (xmldocobj.filename,str(xmldocobj.doc),xmlpath,ETxmlpath,str(self.in_synchronize)))
# make sure xmldocobj is in our list
# print "this document: ", xmldocobj,xmlpath,logfunc
# print self.doclist
if not initialload:
assert(any([doc[0] is xmldocobj and doc[1]==xmlpath and doc[2]==ETxmlpath and doc[3]==logfunc for doc in self.doclist]))
pass
if self.in_synchronize:
| |
preferences.
:param 'TransportPreferencesResponseArgs' transport_preferences: Preferences related to the shipment logistics of the order.
"""
if notification_preferences is not None:
pulumi.set(__self__, "notification_preferences", notification_preferences)
if transport_preferences is not None:
pulumi.set(__self__, "transport_preferences", transport_preferences)
@property
@pulumi.getter(name="notificationPreferences")
def notification_preferences(self) -> Optional[Sequence['outputs.NotificationPreferenceResponse']]:
"""
Notification preferences.
"""
return pulumi.get(self, "notification_preferences")
@property
@pulumi.getter(name="transportPreferences")
def transport_preferences(self) -> Optional['outputs.TransportPreferencesResponse']:
"""
Preferences related to the shipment logistics of the order.
"""
return pulumi.get(self, "transport_preferences")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ProductDetailsResponse(dict):
"""
Represents product details
"""
def __init__(__self__, *,
device_details: Sequence['outputs.DeviceDetailsResponse'],
hierarchy_information: 'outputs.HierarchyInformationResponse',
count: Optional[int] = None):
"""
Represents product details
:param Sequence['DeviceDetailsResponseArgs'] device_details: list of device details
:param 'HierarchyInformationResponseArgs' hierarchy_information: Hierarchy of the product which uniquely identifies the product
:param int count: Quantity of the product
"""
pulumi.set(__self__, "device_details", device_details)
pulumi.set(__self__, "hierarchy_information", hierarchy_information)
if count is not None:
pulumi.set(__self__, "count", count)
@property
@pulumi.getter(name="deviceDetails")
def device_details(self) -> Sequence['outputs.DeviceDetailsResponse']:
"""
list of device details
"""
return pulumi.get(self, "device_details")
@property
@pulumi.getter(name="hierarchyInformation")
def hierarchy_information(self) -> 'outputs.HierarchyInformationResponse':
"""
Hierarchy of the product which uniquely identifies the product
"""
return pulumi.get(self, "hierarchy_information")
@property
@pulumi.getter
def count(self) -> Optional[int]:
"""
Quantity of the product
"""
return pulumi.get(self, "count")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ProductFamilyResponseResult(dict):
"""
Product Family
"""
def __init__(__self__, *,
availability_information: 'outputs.AvailabilityInformationResponseResult',
cost_information: 'outputs.CostInformationResponseResult',
description: 'outputs.DescriptionResponseResult',
display_name: str,
filterable_properties: Sequence['outputs.FilterablePropertyResponseResult'],
hierarchy_information: 'outputs.HierarchyInformationResponse',
image_information: Sequence['outputs.ImageInformationResponseResult'],
product_lines: Sequence['outputs.ProductLineResponseResult']):
"""
Product Family
:param 'AvailabilityInformationResponseArgs' availability_information: Availability information of the product system.
:param 'CostInformationResponseArgs' cost_information: Cost information for the product system.
:param 'DescriptionResponseArgs' description: Description related to the product system.
:param str display_name: Display Name for the product system.
:param Sequence['FilterablePropertyResponseArgs'] filterable_properties: list of filters supported for a product
:param 'HierarchyInformationResponseArgs' hierarchy_information: Hierarchy information of the product system.
:param Sequence['ImageInformationResponseArgs'] image_information: Image information for the product system.
:param Sequence['ProductLineResponseArgs'] product_lines: List of product lines supported in the product family
"""
pulumi.set(__self__, "availability_information", availability_information)
pulumi.set(__self__, "cost_information", cost_information)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "filterable_properties", filterable_properties)
pulumi.set(__self__, "hierarchy_information", hierarchy_information)
pulumi.set(__self__, "image_information", image_information)
pulumi.set(__self__, "product_lines", product_lines)
@property
@pulumi.getter(name="availabilityInformation")
def availability_information(self) -> 'outputs.AvailabilityInformationResponseResult':
"""
Availability information of the product system.
"""
return pulumi.get(self, "availability_information")
@property
@pulumi.getter(name="costInformation")
def cost_information(self) -> 'outputs.CostInformationResponseResult':
"""
Cost information for the product system.
"""
return pulumi.get(self, "cost_information")
@property
@pulumi.getter
def description(self) -> 'outputs.DescriptionResponseResult':
"""
Description related to the product system.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Display Name for the product system.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="filterableProperties")
def filterable_properties(self) -> Sequence['outputs.FilterablePropertyResponseResult']:
"""
list of filters supported for a product
"""
return pulumi.get(self, "filterable_properties")
@property
@pulumi.getter(name="hierarchyInformation")
def hierarchy_information(self) -> 'outputs.HierarchyInformationResponse':
"""
Hierarchy information of the product system.
"""
return pulumi.get(self, "hierarchy_information")
@property
@pulumi.getter(name="imageInformation")
def image_information(self) -> Sequence['outputs.ImageInformationResponseResult']:
"""
Image information for the product system.
"""
return pulumi.get(self, "image_information")
@property
@pulumi.getter(name="productLines")
def product_lines(self) -> Sequence['outputs.ProductLineResponseResult']:
"""
List of product lines supported in the product family
"""
return pulumi.get(self, "product_lines")
@pulumi.output_type
class ProductLineResponseResult(dict):
"""
Product line
"""
def __init__(__self__, *,
availability_information: 'outputs.AvailabilityInformationResponseResult',
cost_information: 'outputs.CostInformationResponseResult',
description: 'outputs.DescriptionResponseResult',
display_name: str,
filterable_properties: Sequence['outputs.FilterablePropertyResponseResult'],
hierarchy_information: 'outputs.HierarchyInformationResponse',
image_information: Sequence['outputs.ImageInformationResponseResult'],
products: Sequence['outputs.ProductResponseResult']):
"""
Product line
:param 'AvailabilityInformationResponseArgs' availability_information: Availability information of the product system.
:param 'CostInformationResponseArgs' cost_information: Cost information for the product system.
:param 'DescriptionResponseArgs' description: Description related to the product system.
:param str display_name: Display Name for the product system.
:param Sequence['FilterablePropertyResponseArgs'] filterable_properties: list of filters supported for a product
:param 'HierarchyInformationResponseArgs' hierarchy_information: Hierarchy information of the product system.
:param Sequence['ImageInformationResponseArgs'] image_information: Image information for the product system.
:param Sequence['ProductResponseArgs'] products: List of products in the product line
"""
pulumi.set(__self__, "availability_information", availability_information)
pulumi.set(__self__, "cost_information", cost_information)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "filterable_properties", filterable_properties)
pulumi.set(__self__, "hierarchy_information", hierarchy_information)
pulumi.set(__self__, "image_information", image_information)
pulumi.set(__self__, "products", products)
@property
@pulumi.getter(name="availabilityInformation")
def availability_information(self) -> 'outputs.AvailabilityInformationResponseResult':
"""
Availability information of the product system.
"""
return pulumi.get(self, "availability_information")
@property
@pulumi.getter(name="costInformation")
def cost_information(self) -> 'outputs.CostInformationResponseResult':
"""
Cost information for the product system.
"""
return pulumi.get(self, "cost_information")
@property
@pulumi.getter
def description(self) -> 'outputs.DescriptionResponseResult':
"""
Description related to the product system.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Display Name for the product system.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="filterableProperties")
def filterable_properties(self) -> Sequence['outputs.FilterablePropertyResponseResult']:
"""
list of filters supported for a product
"""
return pulumi.get(self, "filterable_properties")
@property
@pulumi.getter(name="hierarchyInformation")
def hierarchy_information(self) -> 'outputs.HierarchyInformationResponse':
"""
Hierarchy information of the product system.
"""
return pulumi.get(self, "hierarchy_information")
@property
@pulumi.getter(name="imageInformation")
def image_information(self) -> Sequence['outputs.ImageInformationResponseResult']:
"""
Image information for the product system.
"""
return pulumi.get(self, "image_information")
@property
@pulumi.getter
def products(self) -> Sequence['outputs.ProductResponseResult']:
"""
List of products in the product line
"""
return pulumi.get(self, "products")
@pulumi.output_type
class ProductResponseResult(dict):
"""
List of Products
"""
def __init__(__self__, *,
availability_information: 'outputs.AvailabilityInformationResponseResult',
configurations: Sequence['outputs.ConfigurationResponseResult'],
cost_information: 'outputs.CostInformationResponseResult',
description: 'outputs.DescriptionResponseResult',
display_name: str,
filterable_properties: Sequence['outputs.FilterablePropertyResponseResult'],
hierarchy_information: 'outputs.HierarchyInformationResponse',
image_information: Sequence['outputs.ImageInformationResponseResult']):
"""
List of Products
:param 'AvailabilityInformationResponseArgs' availability_information: Availability information of the product system.
:param Sequence['ConfigurationResponseArgs'] configurations: List of configurations for the product
:param 'CostInformationResponseArgs' cost_information: Cost information for the product system.
:param 'DescriptionResponseArgs' description: Description related to the product system.
:param str display_name: Display Name for the product system.
:param Sequence['FilterablePropertyResponseArgs'] filterable_properties: list of filters supported for a product
:param 'HierarchyInformationResponseArgs' hierarchy_information: Hierarchy information of the product system.
:param Sequence['ImageInformationResponseArgs'] image_information: Image information for the product system.
"""
pulumi.set(__self__, "availability_information", availability_information)
pulumi.set(__self__, "configurations", configurations)
pulumi.set(__self__, "cost_information", cost_information)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "filterable_properties", filterable_properties)
pulumi.set(__self__, "hierarchy_information", hierarchy_information)
pulumi.set(__self__, "image_information", image_information)
@property
@pulumi.getter(name="availabilityInformation")
def availability_information(self) -> 'outputs.AvailabilityInformationResponseResult':
"""
Availability information of the product system.
"""
return pulumi.get(self, "availability_information")
@property
@pulumi.getter
def configurations(self) -> Sequence['outputs.ConfigurationResponseResult']:
"""
List of configurations for the product
"""
return pulumi.get(self, "configurations")
@property
@pulumi.getter(name="costInformation")
def cost_information(self) -> 'outputs.CostInformationResponseResult':
"""
Cost information for the product system.
"""
return pulumi.get(self, "cost_information")
@property
@pulumi.getter
def description(self) -> 'outputs.DescriptionResponseResult':
"""
Description related to the product system.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Display Name for the product system.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="filterableProperties")
def filterable_properties(self) -> Sequence['outputs.FilterablePropertyResponseResult']:
"""
list of filters supported for a product
"""
return pulumi.get(self, "filterable_properties")
@property
@pulumi.getter(name="hierarchyInformation")
def hierarchy_information(self) -> 'outputs.HierarchyInformationResponse':
"""
Hierarchy information of the product system.
"""
return pulumi.get(self, "hierarchy_information")
@property
@pulumi.getter(name="imageInformation")
def image_information(self) -> Sequence['outputs.ImageInformationResponseResult']:
"""
Image information for the product system.
"""
return pulumi.get(self, "image_information")
@pulumi.output_type
class ShippingAddressResponse(dict):
"""
Shipping address where customer wishes to receive the device.
"""
def __init__(__self__, *,
country: str,
street_address1: str,
address_type: Optional[str] = None,
city: Optional[str] = None,
company_name: Optional[str] = None,
postal_code: Optional[str] = None,
state_or_province: Optional[str] = None,
street_address2: Optional[str] = None,
street_address3: Optional[str] = None,
zip_extended_code: Optional[str] = None):
"""
Shipping address where customer wishes to receive the device.
:param str country: Name of the Country.
:param str street_address1: Street Address line 1.
:param str address_type: Type of address.
:param str city: Name of the City.
:param str company_name: Name of the company.
:param str postal_code: Postal code.
:param str state_or_province: Name of the State or Province.
:param str street_address2: Street Address line 2.
:param str street_address3: Street Address line 3.
:param str zip_extended_code: Extended Zip Code.
"""
pulumi.set(__self__, "country", country)
pulumi.set(__self__, "street_address1", street_address1)
if address_type is not None:
pulumi.set(__self__, "address_type", address_type)
if city is not None:
pulumi.set(__self__, "city", city)
if company_name is not None:
pulumi.set(__self__, "company_name", company_name)
if postal_code is not None:
pulumi.set(__self__, "postal_code", postal_code)
if state_or_province is not None:
pulumi.set(__self__, "state_or_province", state_or_province)
if street_address2 is not None:
pulumi.set(__self__, "street_address2", street_address2)
if street_address3 is not None:
pulumi.set(__self__, "street_address3", street_address3)
if zip_extended_code is not None:
pulumi.set(__self__, "zip_extended_code", zip_extended_code)
@property
@pulumi.getter
def country(self) -> str:
"""
Name of the Country.
"""
return pulumi.get(self, "country")
@property
@pulumi.getter(name="streetAddress1")
def street_address1(self) -> str:
"""
Street Address line 1.
"""
return pulumi.get(self, "street_address1")
@property
@pulumi.getter(name="addressType")
def address_type(self) -> Optional[str]:
"""
Type of address.
"""
return pulumi.get(self, "address_type")
@property
@pulumi.getter
def city(self) -> Optional[str]:
"""
Name of the City.
"""
return pulumi.get(self, "city")
@property
@pulumi.getter(name="companyName")
def company_name(self) -> Optional[str]:
"""
Name of the company.
"""
return pulumi.get(self, "company_name")
@property
@pulumi.getter(name="postalCode")
def postal_code(self) -> Optional[str]:
"""
Postal code.
"""
return pulumi.get(self, "postal_code")
@property
@pulumi.getter(name="stateOrProvince")
def state_or_province(self) -> Optional[str]:
"""
Name | |
import sys
from os import path
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from compound import Compound
import re
import csv
from time import strftime
from time import time
#from profilehooks import profile
#function for auto-py-to-exe
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = path.abspath(".")
return path.join(base_path, relative_path)
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#load UI from main.ui
uic.loadUi(resource_path("view/main.ui"), self)
#default header items
self.header_items = [HeaderItem('name'),
HeaderItem('compound'),
HeaderItem('neutral', charge = 0),
HeaderItem('[M-H]-', charge= -1),
HeaderItem('[M+H]+', charge= 1),
HeaderItem('[M+Na]+', adduct = 'Na', charge= 1),
HeaderItem('[M+K]+', adduct = 'K', charge= 1)]
#default mass precision
self.mass_precision = 4
#search matches
self.search_term = ''
self.matches = []
#default elimination product
self.elimination_product = 'H2O'
#path to csv file
self.save_path = ''
#set UI Graphics
self.init_UI()
#set UI Actions
self.set_actions()
#undo and redo list will be empty at start
self.undo_list = []
self.redo_list = []
#check why table contents necessary!
self.table_content = [[self.t1.item(r,c).text() for c in range(self.t1.columnCount())] for r in range(self.t1.rowCount())]
#counter for undos
self.undo_index = 0
#adding one empty undo at start is necessary
self.add_undo('')
#undo and paste Buttons in Menu are disabled at start
self.actionUndo.setDisabled(True)
self.actionPaste.setDisabled(True)
#temp cells to put in copy and cut cells before paste
self.temp_cells = []
#current state of the table
self.saved = [[self.t1.item(r,c).text() for c in range(self.t1.columnCount())] for r in range(self.t1.rowCount())]
#set a list of table items changed (only compound section)
self.compounds_changed = [0,1]
#self.inputBuilderCalculation.setText('2+1')
#for i in range(0,100):
# self.add_complex_compound()
#initially set up the UI
def init_UI(self):
#Application Title
self.setWindowTitle('Exact Mass Calculator')
#Application icon
self.setWindowIcon(QtGui.QIcon(resource_path('view/atom.png')))
#blue buttons
for btn in (self.btnClearSearch, self.btnClearBuilder, self.btnClearColumn):
btn.setStyleSheet('background-color:#7289DA; border-radius:4px; color:#FFFFFF')
#green buttons
for btn in (self.btnAddBuilder, self.btnAddColumn):
btn.setStyleSheet('background-color:#43B581; border-radius:4px; color:#FFFFFF')
#red buttons
self.btnCalculate.setStyleSheet('background-color:#43B581; border-radius:4px; color:#FFFFFF')#a95fde
#yellow buttons
self.btnFindSearch.setStyleSheet('background-color:#ffbf00; border-radius:4px; color:#FFFFFF')
#white boxes behind the menu items
self.lnMenu.setStyleSheet('border-radius:4px; background-color:#FFFFFF;')
#input field blue border, white background, dark grey text
for field in (self.inputNewColumnName, self.inputNewColumnModify, self.inputNewColumnCharge, self.inputNewColumnAdduct, self.inputBuilderName, self.inputBuilderCalculation, self.inputSearch):
field.setStyleSheet('border-radius:4px; color:#555555; border:2px solid #FFFFFF;')
#dark grey text color in lblNewColumn and lblBuilder lblFind
for label in (self.lblFind, self.lblNewColumn, self.lblBuilder):
label.setStyleSheet('color:#555555;')
#set up and style Headers
header = self.t1.horizontalHeader()
header.setStyleSheet('border-radius:5px;')
header.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
header.setMinimumSectionSize(120)
header.setSectionsMovable(True)
#font and background of table
self.t1.setFont(QtGui.QFont ("Consolas", 12))
self.t1.setStyleSheet('selection-background-color: #7289DA')
#update table and header
self.update_header()
self.update_table()
#Progress Bar
self.progressBar.hide()
#connect buttons with functions
def set_actions(self):
self.btnCalculate.clicked.connect(self.calculate)
self.btnClearBuilder.clicked.connect(self.clear_builder)
self.btnClearColumn.clicked.connect(self.clear_add_column)
self.btnAddColumn.clicked.connect(self.add_column)
self.btnAddBuilder.clicked.connect(self.add_complex_compound)
self.btnFindSearch.clicked.connect(self.find)
self.btnClearSearch.clicked.connect(self.clear_find)
self.t1.minimumSizeHint()
self.actionUndo.triggered.connect(self.undo)
self.actionRedo.triggered.connect(self.redo)
self.actionCopy.triggered.connect(self.copy_cells)
self.actionCut.triggered.connect(self.cut_cells)
self.actionPaste.triggered.connect(self.paste_cells)
self.actionOpen.triggered.connect(self.open_csv)
self.actionSave.triggered.connect(self.save_csv)
self.actionSave_as.triggered.connect(self.save_as_csv)
self.actionExit.triggered.connect(self.exit)
self.actionDelete.triggered.connect(self.delete)
self.actionAdd_Row.triggered.connect(self.add_row)
self.actionDelete_Last_Row.triggered.connect(self.delete_last_row)
self.actionMass_Precision.triggered.connect(self.get_mass_precision)
self.actionElimination_Product.triggered.connect(self.get_elimination_product)
self.actionHelp.triggered.connect(self.display_help)
self.actionAbout_Mass_Calculator.triggered.connect(self.about)
self.resizeEvent = self.resize_table
self.WindowStateChange = self.resize_table
self.actionRedo.setDisabled(True)
self.actionUndo.setDisabled(True)
self.actionSave.setDisabled(True)
self.t1.itemChanged.connect(self.table_changed)
self.t1.doubleClicked.connect(self.table_double_clicked)
self.inputSearch.returnPressed.connect(self.find)
#exit the app and check if save necessary
def exit(self):
if self.saved == [[self.t1.item(r,c).text() for c in range(self.t1.columnCount())] for r in range(self.t1.rowCount())]:
sys.exit(app)
else:
self.exit_save()
#catch close event
def closeEvent(self, event):
if self.saved == [[self.t1.item(r,c).text() for c in range(self.t1.columnCount())] for r in range(self.t1.rowCount())]:
event.accept()
sys.exit(app)
else:
event.ignore()
self.exit_save()
#ask for save if table changed before exit
def exit_save(self):
msgBox = QtWidgets.QMessageBox()
msgBox.setWindowIcon(QtGui.QIcon(resource_path('atom.png')))
msgBox.setWindowTitle('Save?')
msgBox.setText('The table has been modified.')
msgBox.setInformativeText('Do you want to save your changes?')
msgBox.setStandardButtons(QtWidgets.QMessageBox.Save | QtWidgets.QMessageBox.Discard | QtWidgets.QMessageBox.Cancel)
msgBox.setDefaultButton(QtWidgets.QMessageBox.Save)
result = msgBox.exec_()
if result == QtWidgets.QMessageBox.Save:
self.save_csv()
print('saved')
sys.exit(app)
elif result == QtWidgets.QMessageBox.Cancel:
pass
else:
sys.exit(app)
#Undo/Redo Functionality
def add_undo(self, btn_text):
self.actionUndo.setEnabled(True)
self.table_content = [[self.t1.item(r,c).text() for c in range(self.t1.columnCount())] for r in range(self.t1.rowCount())]
self.redo_list = []
header_items = self.header_items[:]
self.undo_list.append((self.table_content, header_items, btn_text, self.undo_index))
if len(self.undo_list) > 10:
self.undo_list = self.undo_list[-10::]
self.undo_index += 1
self.actionRedo.setDisabled(True)
self.actionSave.setEnabled(True)
#restore Table back to last undo
def undo(self):
if len(self.undo_list) > 1:
self.t1.blockSignals(True)
redo = self.undo_list.pop()
self.redo_list.append(redo)
undo = self.undo_list[-1]
self.t1.clear()
new_table = undo[0]
self.header_items = undo[1]
self.update_header()
self.t1.setRowCount(len(new_table))
self.t1.setColumnCount(len(undo[1]))
self.t1.blockSignals(True)
for r in range(len(new_table)):
for c in range(len(new_table[0])):
item = QtWidgets.QTableWidgetItem()
item.setText(new_table[r][c])
if c > 1:
item.setTextAlignment(QtCore.Qt.AlignRight)
self.t1.setItem(r,c,item)
self.t1.blockSignals(False)
self.update_table()
self.undo_done = True
self.actionRedo.setEnabled(True)
self.t1.blockSignals(False)
if len(self.undo_list) == 1:
self.actionUndo.setDisabled(True)
for i in range(0, self.t1.rowCount()):
if self.t1.item(i,1).text() != '' and self.t1.item(i,2).text() == '':
self.compounds_changed.append(i)
#restore table back to last redo
def redo(self):
if len(self.redo_list) > 0 and self.undo_done:
self.t1.blockSignals(True)
redo = self.redo_list.pop()
self.undo_list.append(redo)
self.t1.clear()
new_table = redo[0]
self.t1.setRowCount(len(new_table))
self.t1.setColumnCount(len(new_table[0]))
for r in range(len(new_table)):
for c in range(len(new_table[0])):
item = QtWidgets.QTableWidgetItem()
item.setText(new_table[r][c])
if c > 1:
item.setTextAlignment(QtCore.Qt.AlignRight)
self.t1.setItem(r,c,item)
self.header_items = redo[1]
self.update_header()
self.update_table()
self.undo_done = True
self.t1.blockSignals(False)
self.actionUndo.setEnabled(True)
if len(self.redo_list) == 0:
self.actionRedo.setDisabled(True)
#Table related Functions
#if something in table changed check if data is unsaved
def table_changed(self, item):
column = item.column()
row = item.row() +1
row_count = self.t1.rowCount()
if row == row_count:
self.t1.insertRow(row)
if column == 1:
self.compounds_changed.append(row-1)
self.update_table()
if self.saved == [[self.t1.item(r,c).text() for c in range(self.t1.columnCount())] for r in range(self.t1.rowCount())]:
self.setWindowTitle('Exact Mass Calculator - ' + self.save_path.split('/')[-1])
elif self.save_path == '':
self.setWindowTitle('Exact Mass Calculator - *')
else:
self.setWindowTitle('Exact Mass Calculator - ' + self.save_path.split('/')[-1]+'*')
self.actionSave.setEnabled(True)
self.add_undo('Edit Table')
#dont want empty cells in table, put empty item into each cell
def update_table(self):
self.t1.blockSignals(True)
for i in range(self.t1.rowCount()):
for j in range(len(self.header_items)):
if not self.t1.item(i,j):
item = QtWidgets.QTableWidgetItem()
if j > 2:
item.setTextAlignment(QtCore.Qt.AlignRight)
self.t1.setItem(i, j, item)
self.t1.blockSignals(False)
#update each header item, font, style
def update_header(self):
self.t1.blockSignals(True)
fnt = QtGui.QFont()
fnt.setPointSize(14)
fnt.setBold(True)
fnt.setFamily("Consolas")
header_count = len(self.header_items)
self.t1.setColumnCount(header_count)
for i in range(0, len(self.header_items)):
item = QtWidgets.QTableWidgetItem(self.header_items[i].name)
item.setFont(fnt)
item.setForeground(QtGui.QBrush(QtGui.QColor(85, 85, 85)))
self.t1.setHorizontalHeaderItem(i, item)
self.update_table()
self.t1.blockSignals(False)
#change the size of table after window size has been changed
def resize_table(self, event):
width = self.size().width()
height = self.size().height()
self.t1.setGeometry(190,10, width - 200, height - 50)
#take the in inputs from new column and add new header item accordingly
def add_column(self):
self.inputNewColumnModify.setStyleSheet('border-radius:4px; color:#555555; border:2px solid #FFFFFF')
self.inputNewColumnAdduct.setStyleSheet('border-radius:4px; color:#555555; border:2px solid #FFFFFF')
self.inputNewColumnCharge.setStyleSheet('border-radius:4px; color:#555555; border:2px solid #FFFFFF')
if self.inputNewColumnModify.text() != '' or self.inputNewColumnCharge.text() != '' or self.inputNewColumnAdduct.text() != '':
try:
add = ''
delete = ''
adduct = ''
if self.inputNewColumnModify.text() != '':
find_plus = re.findall(r'\+[A-Z0-9]+', self.inputNewColumnModify.text())
find_minus = re.findall(r'\-[A-Z0-9]+', self.inputNewColumnModify.text())
add = ''.join([a[1:] for a in find_plus])
delete = ''.join([d[1:] for d in find_minus])
if sorted(''.join(find_plus)+''.join(find_minus)) != sorted(self.inputNewColumnModify.text()):
self.inputNewColumnModify.setStyleSheet('border-radius:4px; color:#f04747; border:2px solid #FFFFFF')
return
if self.inputNewColumnCharge.text() == '0' or self.inputNewColumnCharge.text() == '':
charge = 0
else:
charge_amount = re.search(r'\d+', self.inputNewColumnCharge.text())
charge_polarity = re.search(r'(\-|\+)', self.inputNewColumnCharge.text())
if charge_amount == None and charge_polarity.group() == '+':
charge = 1
elif charge_amount == None and charge_polarity.group() == '-':
charge = -1
else:
charge = int(charge_polarity.group()+charge_amount.group())
if self.inputNewColumnAdduct.text() != '' and self.inputNewColumnAdduct.text() in ['K', 'Na', 'Li'] and charge > 0:
adduct = self.inputNewColumnAdduct.text()
elif self.inputNewColumnAdduct.text() not in ['K', 'Na', 'Li','']:
self.inputNewColumnAdduct.setStyleSheet('border-radius:4px; color:#f04747; border:2px solid #FFFFFF')
return
elif self.inputNewColumnAdduct.text() in ['K', 'Na', 'Li'] and charge <= 0:
self.inputNewColumnAdduct.setStyleSheet('border-radius:4px; color:#f04747; border:2px solid #FFFFFF')
self.inputNewColumnCharge.setStyleSheet('border-radius:4px; color:#f04747; border:2px solid #FFFFFF')
return
if self.inputNewColumnName.text() == '':
if charge == 1:
charge_display = ''
elif charge == -1:
charge_display = '-'
else:
charge_display = charge
if len(add) > 0:
name_add = '+'+add
else:
name_add = ''
if len(delete) > 0:
name_delete = '-'+delete
else:
name_delete = ''
if adduct != '':
name = f'[M{name_add}{name_delete}+{charge_display}{adduct}]{charge_display}+'
elif charge == 0:
name = f'[M{name_add}{name_delete}]'
elif charge >= 1:
polarity = charge_polarity.group()
name = f'[M{name_add}{name_delete}{polarity}{charge_display}H]{charge_display}{polarity}'
elif charge <= -1:
polarity = charge_polarity.group()
name = f'[M{name_add}{name_delete}{charge_display}H]{str(charge_display)[1:]}{polarity}'
else:
name = self.inputNewColumnName.text()
self.header_items.append(HeaderItem(name, add = add, delete = delete, adduct = adduct, charge = charge))
self.update_header()
for i in range(0, self.t1.rowCount()):
self.compounds_changed.append(i)
self.add_undo('Add Column')
except:
self.inputNewColumnCharge.setStyleSheet('border-radius:4px; color:#f04747; border:2px solid #FFFFFF')
elif self.inputNewColumnName.text() != '':
print('New retention time will be added')
self.header_items.append(HeaderItem(name=self.inputNewColumnName.text(), rt = 'yes'))
self.update_header()
for i in range(0, self.t1.rowCount()):
item = QtWidgets.QTableWidgetItem()
item.setText('')
item.setTextAlignment(QtCore.Qt.AlignRight)
self.t1.setItem(i, self.t1.columnCount()-1, item)
#clears the input fields in add column
def clear_add_column(self):
self.inputNewColumnName.setText('')
self.inputNewColumnModify.setText('')
self.inputNewColumnCharge.setText('')
self.inputNewColumnAdduct.setText('')
self.inputNewColumnModify.setStyleSheet('border-radius:4px; color:#555555; border:2px solid #FFFFFF')
self.inputNewColumnAdduct.setStyleSheet('border-radius:4px; color:#555555; border:2px solid #FFFFFF')
self.inputNewColumnCharge.setStyleSheet('border-radius:4px; color:#555555; border:2px solid #FFFFFF')
#add a row to the table, default at the end or if row is selected below it
def add_row(self):
self.t1.blockSignals(True)
indexes = self.t1.selectionModel().selectedRows()
if indexes:
row_pos = indexes[-1].row() + 1
else:
row_pos = | |
<reponame>Huang-Shijie-SDUWH/Deep_MVS_Tutorial<filename>PatchmatchNet/models/patchmatch.py
"""
PatchmatchNet uses the following main steps:
1. Initialization: generate random hypotheses;
2. Propagation: propagate hypotheses to neighbors;
3. Evaluation: compute the matching costs for all the hypotheses and choose best solutions.
"""
from typing import List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from .module import ConvBnReLU3D, differentiable_warping, is_empty
class DepthInitialization(nn.Module):
"""Initialization Stage Class"""
def __init__(self, patchmatch_num_sample: int = 1) -> None:
"""Initialize method
Args:
patchmatch_num_sample: number of samples used in patchmatch process
"""
super(DepthInitialization, self).__init__()
self.patchmatch_num_sample = patchmatch_num_sample
def forward(
self,
min_depth: torch.Tensor,
max_depth: torch.Tensor,
height: int,
width: int,
depth_interval_scale: float,
device: torch.device,
depth: torch.Tensor = torch.empty(0),
) -> torch.Tensor:
"""Forward function for depth initialization
Args:
min_depth: minimum virtual depth, (B, )
max_depth: maximum virtual depth, (B, )
height: height of depth map
width: width of depth map
depth_interval_scale: depth interval scale
device: device on which to place tensor
depth: current depth (B, 1, H, W)
Returns:
depth_sample: initialized sample depth map by randomization or local perturbation (B, Ndepth, H, W)
"""
batch_size = min_depth.size()[0]
inverse_min_depth = 1.0 / min_depth
inverse_max_depth = 1.0 / max_depth
if is_empty(depth):
# first iteration of Patchmatch on stage 3, sample in the inverse depth range
# divide the range into several intervals and sample in each of them
patchmatch_num_sample = 48
# [B,Ndepth,H,W]
depth_sample = torch.rand(
size=(batch_size, patchmatch_num_sample, height, width), device=device
) + torch.arange(start=0, end=patchmatch_num_sample, step=1, device=device).view(
1, patchmatch_num_sample, 1, 1
)
depth_sample = inverse_max_depth.view(batch_size, 1, 1, 1) + depth_sample / patchmatch_num_sample * (
inverse_min_depth.view(batch_size, 1, 1, 1) - inverse_max_depth.view(batch_size, 1, 1, 1)
)
return 1.0 / depth_sample
elif self.patchmatch_num_sample == 1:
return depth.detach()
else:
# other Patchmatch, local perturbation is performed based on previous result
# uniform samples in an inversed depth range
depth_sample = (
torch.arange(-self.patchmatch_num_sample // 2, self.patchmatch_num_sample // 2, 1, device=device)
.view(1, self.patchmatch_num_sample, 1, 1).repeat(batch_size, 1, height, width).float()
)
inverse_depth_interval = (inverse_min_depth - inverse_max_depth) * depth_interval_scale
inverse_depth_interval = inverse_depth_interval.view(batch_size, 1, 1, 1)
depth_sample = 1.0 / depth.detach() + inverse_depth_interval * depth_sample
depth_clamped = []
del depth
for k in range(batch_size):
depth_clamped.append(
torch.clamp(depth_sample[k], min=inverse_max_depth[k], max=inverse_min_depth[k]).unsqueeze(0)
)
return 1.0 / torch.cat(depth_clamped, dim=0)
class Propagation(nn.Module):
""" Propagation module implementation"""
def __init__(self) -> None:
"""Initialize method"""
super(Propagation, self).__init__()
def forward(self, depth_sample: torch.Tensor, grid: torch.Tensor) -> torch.Tensor:
# [B,D,H,W]
"""Forward method of adaptive propagation
Args:
depth_sample: sample depth map, in shape of [batch, num_depth, height, width],
grid: 2D grid for bilinear gridding, in shape of [batch, neighbors*H, W, 2]
Returns:
propagate depth: sorted propagate depth map [batch, num_depth+num_neighbors, height, width]
"""
batch, num_depth, height, width = depth_sample.size()
num_neighbors = grid.size()[1] // height
propagate_depth_sample = F.grid_sample(
depth_sample[:, num_depth // 2, :, :].unsqueeze(1),
grid,
mode="bilinear",
padding_mode="border",
align_corners=False
).view(batch, num_neighbors, height, width)
return torch.sort(torch.cat((depth_sample, propagate_depth_sample), dim=1), dim=1)[0]
class Evaluation(nn.Module):
"""Evaluation module for adaptive evaluation step in Learning-based Patchmatch
Used to compute the matching costs for all the hypotheses and choose best solutions.
"""
def __init__(self, G: int = 8) -> None:
"""Initialize method`
Args:
G: the feature channels of input will be divided evenly into G groups
"""
super(Evaluation, self).__init__()
self.G = G
self.pixel_wise_net = PixelwiseNet(self.G)
self.softmax = nn.LogSoftmax(dim=1)
self.similarity_net = SimilarityNet(self.G)
def forward(
self,
ref_feature: torch.Tensor,
src_features: List[torch.Tensor],
ref_proj: torch.Tensor,
src_projs: List[torch.Tensor],
depth_sample: torch.Tensor,
grid: torch.Tensor,
weight: torch.Tensor,
view_weights: torch.Tensor = torch.empty(0),
is_inverse: bool = False
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Forward method for adaptive evaluation
Args:
ref_feature: feature from reference view, (B, C, H, W)
src_features: features from (Nview-1) source views, (Nview-1) * (B, C, H, W), where Nview is the number of
input images (or views) of PatchmatchNet
ref_proj: projection matrix of reference view, (B, 4, 4)
src_projs: source matrices of source views, (Nview-1) * (B, 4, 4), where Nview is the number of input
images (or views) of PatchmatchNet
depth_sample: sample depth map, (B,Ndepth,H,W)
grid: grid, (B, evaluate_neighbors*H, W, 2)
weight: weight, (B,Ndepth,1,H,W)
view_weights: Tensor to store weights of source views, in shape of (B,Nview-1,H,W),
Nview-1 represents the number of source views
is_inverse: Flag for inverse depth regression
Returns:
depth_sample: expectation of depth sample, (B,H,W)
score: probability map, (B,Ndepth,H,W)
view_weights: optional, Tensor to store weights of source views, in shape of (B,Nview-1,H,W),
Nview-1 represents the number of source views
"""
batch, feature_channel, height, width = ref_feature.size()
device = ref_feature.device
num_depth = depth_sample.size()[1]
assert (
len(src_features) == len(src_projs)
), "Patchmatch Evaluation: Different number of images and projection matrices"
if not is_empty(view_weights):
assert (
len(src_features) == view_weights.size()[1]
), "Patchmatch Evaluation: Different number of images and view weights"
# Change to a tensor with value 1e-5
pixel_wise_weight_sum = 1e-5 * torch.ones((batch, 1, 1, height, width), dtype=torch.float32, device=device)
ref_feature = ref_feature.view(batch, self.G, feature_channel // self.G, 1, height, width)
similarity_sum = torch.zeros((batch, self.G, num_depth, height, width), dtype=torch.float32, device=device)
i = 0
view_weights_list = []
for src_feature, src_proj in zip(src_features, src_projs):
warped_feature = differentiable_warping(
src_feature, src_proj, ref_proj, depth_sample
).view(batch, self.G, feature_channel // self.G, num_depth, height, width)
# group-wise correlation
similarity = (warped_feature * ref_feature).mean(2)
# pixel-wise view weight
if is_empty(view_weights):
view_weight = self.pixel_wise_net(similarity)
view_weights_list.append(view_weight)
else:
# reuse the pixel-wise view weight from first iteration of Patchmatch on stage 3
view_weight = view_weights[:, i].unsqueeze(1) # [B,1,H,W]
i = i + 1
similarity_sum += similarity * view_weight.unsqueeze(1)
pixel_wise_weight_sum += view_weight.unsqueeze(1)
# aggregated matching cost across all the source views
similarity = similarity_sum.div_(pixel_wise_weight_sum) # [B, G, Ndepth, H, W]
# adaptive spatial cost aggregation
score = self.similarity_net(similarity, grid, weight) # [B, G, Ndepth, H, W]
# apply softmax to get probability
score = torch.exp(self.softmax(score))
if is_empty(view_weights):
view_weights = torch.cat(view_weights_list, dim=1) # [B,4,H,W], 4 is the number of source views
if is_inverse:
# depth regression: inverse depth regression
depth_index = torch.arange(0, num_depth, 1, device=device).view(1, num_depth, 1, 1)
depth_index = torch.sum(depth_index * score, dim=1)
inverse_min_depth = 1.0 / depth_sample[:, -1, :, :]
inverse_max_depth = 1.0 / depth_sample[:, 0, :, :]
depth_sample = inverse_max_depth + depth_index / (num_depth - 1) * (inverse_min_depth - inverse_max_depth)
depth_sample = 1.0 / depth_sample
else:
# depth regression: expectation
depth_sample = torch.sum(depth_sample * score, dim=1)
return depth_sample, score, view_weights.detach()
class PatchMatch(nn.Module):
"""Patchmatch module"""
def __init__(
self,
propagation_out_range: int = 2,
patchmatch_iteration: int = 2,
patchmatch_num_sample: int = 16,
patchmatch_interval_scale: float = 0.025,
num_feature: int = 64,
G: int = 8,
propagate_neighbors: int = 16,
evaluate_neighbors: int = 9,
stage: int = 3,
) -> None:
"""Initialize method
Args:
propagation_out_range: range of propagation out,
patchmatch_iteration: number of iterations in patchmatch,
patchmatch_num_sample: number of samples in patchmatch,
patchmatch_interval_scale: interval scale,
num_feature: number of features,
G: the feature channels of input will be divided evenly into G groups,
propagate_neighbors: number of neighbors to be sampled in propagation,
stage: number of stage,
evaluate_neighbors: number of neighbors to be sampled in evaluation,
"""
super(PatchMatch, self).__init__()
self.patchmatch_iteration = patchmatch_iteration
self.patchmatch_interval_scale = patchmatch_interval_scale
self.propa_num_feature = num_feature
# group wise correlation
self.G = G
self.stage = stage
self.dilation = propagation_out_range
self.propagate_neighbors = propagate_neighbors
self.evaluate_neighbors = evaluate_neighbors
# Using dictionary instead of Enum since TorchScript cannot recognize and export it correctly
self.grid_type = {"propagation": 1, "evaluation": 2}
self.depth_initialization = DepthInitialization(patchmatch_num_sample)
self.propagation = Propagation()
self.evaluation = Evaluation(self.G)
# adaptive propagation: last iteration on stage 1 does not have propagation,
# but we still define this for TorchScript export compatibility
self.propa_conv = nn.Conv2d(
in_channels=self.propa_num_feature,
out_channels=max(2 * self.propagate_neighbors, 1),
kernel_size=3,
stride=1,
padding=self.dilation,
dilation=self.dilation,
bias=True,
)
nn.init.constant_(self.propa_conv.weight, 0.0)
nn.init.constant_(self.propa_conv.bias, 0.0)
# adaptive spatial cost aggregation (adaptive evaluation)
self.eval_conv = nn.Conv2d(
in_channels=self.propa_num_feature,
out_channels=2 * self.evaluate_neighbors,
kernel_size=3,
stride=1,
padding=self.dilation,
dilation=self.dilation,
bias=True,
)
nn.init.constant_(self.eval_conv.weight, 0.0)
nn.init.constant_(self.eval_conv.bias, 0.0)
self.feature_weight_net = FeatureWeightNet(self.evaluate_neighbors, self.G)
def get_grid(
self, grid_type: int, batch: int, height: int, width: int, offset: torch.Tensor, device: torch.device
) -> torch.Tensor:
"""Compute the offset for adaptive propagation or spatial cost aggregation in adaptive evaluation
Args:
grid_type: type of grid - propagation (1) or evaluation (2)
batch: batch size
height: grid height
width: grid width
offset: grid offset
device: device on which to place tensor
Returns:
generated grid: in the shape of [batch, propagate_neighbors*H, W, 2]
"""
if grid_type == self.grid_type["propagation"]:
if self.propagate_neighbors == 4: # if 4 | |
<filename>skills_taxonomy_v2/analysis/skills_taxonomy_application/Application - Geography.py
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# comment_magics: true
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown]
# # The geographical application
#
# - We have 232394 skill sentences
# - We have 18893 skills
# - We have skills identified from 107434 unique job adverts
# - Of these job adverts 104801 have location information
# - We don't have all skills from each job advert
#
#
# %%
# cd ../../..
# %%
from geopandas import GeoDataFrame
from shapely.geometry import Point
from bokeh.palettes import Turbo256
from skills_taxonomy_v2.getters.s3_data import load_s3_data
# %%
from collections import Counter, defaultdict
import random
from tqdm import tqdm
import json
import plotly.express as px
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import boto3
import geopandas as gpd
# %%
bucket_name = "skills-taxonomy-v2"
s3 = boto3.resource("s3")
# %% [markdown]
# ### Load data
# %%
skill_hierarchy_file = "outputs/skills_hierarchy/2021.09.06_skills_hierarchy.json"
skill_hierarchy = load_s3_data(s3, bucket_name, skill_hierarchy_file)
# %%
sentence_data = load_s3_data(
s3,
bucket_name,
"outputs/skills_extraction/extracted_skills/2021.08.31_sentences_data.json",
)
# %%
sentence_data = pd.DataFrame(sentence_data)
sentence_data = sentence_data[sentence_data["Cluster number"] != -1]
# %%
sentence_data["Cluster number"].nunique()
# %%
# Manual level A names
with open("skills_taxonomy_v2/utils/2021.09.06_level_a_rename_dict.json", "r") as f:
level_a_rename_dict = json.load(f)
# %%
# Add hierarchy information to this df
sentence_data["Hierarchy level A name"] = sentence_data["Cluster number"].apply(
lambda x: level_a_rename_dict[str(skill_hierarchy[str(x)]["Hierarchy level A"])]
)
sentence_data["Hierarchy level B name"] = sentence_data["Cluster number"].apply(
lambda x: skill_hierarchy[str(x)]["Hierarchy level B name"]
)
sentence_data["Hierarchy level C name"] = sentence_data["Cluster number"].apply(
lambda x: skill_hierarchy[str(x)]["Hierarchy level C name"]
)
# %%
sentence_data.head(1)
# %%
job_ids = set(sentence_data["job id"].tolist())
# %%
len(job_ids)
# %%
sentence_data["Hierarchy level B name"].nunique()
# %%
sentence_data["Hierarchy level C name"].nunique()
# %% [markdown]
# ## Import locations for jobs in this data
# This was created in `skills_taxonomy_v2/analysis/skills_taxonomy_application/locations_to_nuts2.py`
#
# There are 107434 job IDs but not all have locations.
# Should be about 104801 long.
# %%
jobs_with_nuts_codes = load_s3_data(
s3,
bucket_name,
"outputs/tk_data_analysis/metadata_location/2021.09.14_jobs_with_nuts_codes.json",
)
# %%
len(jobs_with_nuts_codes)
# %%
nuts_uniq = list()
for k, v in jobs_with_nuts_codes.items():
if len(v) == 6:
nuts_uniq.append(v[5])
# %%
Counter(nuts_uniq)
# %% [markdown]
# ## Merge the metadata with the sentence data
# %%
job_id_loc_dict = jobs_with_nuts_codes
# %%
sentence_data_with_meta = sentence_data.copy()[
sentence_data["job id"].isin(job_id_loc_dict)
]
print(len(sentence_data_with_meta))
sentence_data_with_meta["long-lat"] = sentence_data_with_meta["job id"].apply(
lambda x: job_id_loc_dict.get(x)[1]
)
sentence_data_with_meta["latitude"] = sentence_data_with_meta["job id"].apply(
lambda x: float(job_id_loc_dict.get(x)[1].split(",")[0])
if job_id_loc_dict.get(x)[1]
else None
)
sentence_data_with_meta["longitude"] = sentence_data_with_meta["job id"].apply(
lambda x: float(job_id_loc_dict.get(x)[1].split(",")[1])
if job_id_loc_dict.get(x)[1]
else None
)
sentence_data_with_meta["location_name"] = sentence_data_with_meta["job id"].apply(
lambda x: job_id_loc_dict.get(x)[0]
)
sentence_data_with_meta["region"] = sentence_data_with_meta["job id"].apply(
lambda x: job_id_loc_dict.get(x)[2]
)
sentence_data_with_meta["subregion"] = sentence_data_with_meta["job id"].apply(
lambda x: job_id_loc_dict.get(x)[3]
)
sentence_data_with_meta["NUTs region"] = sentence_data_with_meta["job id"].apply(
lambda x: job_id_loc_dict.get(x)[5] if len(job_id_loc_dict.get(x)) == 6 else None
)
# %%
sentence_data_with_meta.head(2)
# %%
nesta_orange = [255 / 255, 90 / 255, 0 / 255]
# %%
levela_cols = []
for i in range(0, 7):
levela_cols.append(Turbo256[i * round(len(Turbo256) / 7)])
levela_cols = levela_cols[0:6]
# %% [markdown]
# ## Number of data points per location
# %%
sentence_data_with_meta["NUTs region"].value_counts().plot.bar(
xlabel="NUTs region",
ylabel="Number of data points (skill sentences)",
title="Number of data points by NUTs region",
color=nesta_orange,
)
# %%
sentence_data_with_meta["NUTs region"].value_counts()
# %% [markdown]
# ## Plot location proportions of hier A
# %%
prop_level_a_region = sentence_data_with_meta.groupby("region")[
"Hierarchy level A name"
].apply(lambda x: x.value_counts() / len(x))
prop_level_a_region.unstack().plot.barh(
stacked=True,
title="Proportion of skill types in each region",
ylabel="",
xlabel="",
color=levela_cols,
)
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(
"outputs/skills_taxonomy_application/region_application/region_levela_props.pdf",
bbox_inches="tight",
)
# %%
prop_level_a_nuts = sentence_data_with_meta.groupby("NUTs region")[
"Hierarchy level A name"
].apply(lambda x: x.value_counts() / len(x))
prop_level_a_nuts.unstack().plot.barh(
stacked=True,
title="Proportion of skill types in each NUTs region",
ylabel="",
xlabel="",
color=levela_cols,
)
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(
"outputs/skills_taxonomy_application/region_application/nuts_levela_props.pdf",
bbox_inches="tight",
)
# %%
# Another view type
prop_level_a_region.unstack().plot.bar(
stacked=False,
title="Proportion of skill types in each region",
ylabel="",
xlabel="",
figsize=(10, 4),
color=levela_cols,
)
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(
"outputs/skills_taxonomy_application/region_application/region_levela_props_t.pdf",
bbox_inches="tight",
)
# %%
prop_level_a_nuts.reset_index().groupby(["level_1", "NUTs region"]).apply(
lambda x: x["Hierarchy level A name"].iloc[0]
).unstack().plot.bar(
stacked=False, title="Proportion of skill types in each region", figsize=(10, 4)
)
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(
"outputs/skills_taxonomy_application/region_application/nuts_levela_props_t.pdf",
bbox_inches="tight",
)
# %% [markdown]
# ## Proportion of each level A in a box plot
# %%
props = dict(boxes="orange", whiskers="black", medians="black", caps="black")
axes = prop_level_a_nuts.reset_index().boxplot(
by=["level_1"],
column=["Hierarchy level A name"],
vert=False,
figsize=(10, 4),
color=props,
patch_artist=True,
)
axes.set_title("Spread of proportions of skill types over all NUTs regions")
axes.set_xlabel("")
plt.suptitle("")
plt.savefig(
"outputs/skills_taxonomy_application/region_application/nuts_levela_props_box.pdf",
bbox_inches="tight",
)
# %%
name2num_dict = {v: int(k) for k, v in level_a_rename_dict.items()}
# %%
# Only the ones with the biggest spread:
level_a_spread = [
"Information technology and languages",
"Teaching and care",
"Safety, finance, maintenance and service",
"Business administration and management",
]
fig, axs = plt.subplots(1, 4, figsize=(20, 3))
coli = 0
rowi = 0
for i, level_a_name in enumerate(level_a_spread):
color = levela_cols[name2num_dict[level_a_name]]
df = prop_level_a_nuts.reset_index()
sort_props = (
df[df["level_1"] == level_a_name]
.set_index("NUTs region")
.sort_values(by="Hierarchy level A name")
)
sort_props.plot.bar(ax=axs[i], title=level_a_name, legend=False, color=color)
coli += 1
fig.subplots_adjust(hspace=1.3)
plt.savefig(
"outputs/skills_taxonomy_application/region_application/nuts_levela_props_separate_top.pdf",
bbox_inches="tight",
)
# %%
fig, axs = plt.subplots(2, 3, figsize=(20, 10))
coli = 0
rowi = 0
for i, level_a_name in enumerate(
sentence_data_with_meta["Hierarchy level A name"].unique()
):
color = levela_cols[name2num_dict[level_a_name]]
if i != 0 and i % 3 == 0:
rowi += 1
coli = 0
df = prop_level_a_nuts.reset_index()
sort_props = (
df[df["level_1"] == level_a_name]
.set_index("NUTs region")
.sort_values(by="Hierarchy level A name")
)
sort_props.plot.bar(
ax=axs[rowi, coli], title=level_a_name, legend=False, color=color
)
coli += 1
fig.subplots_adjust(hspace=1.3)
plt.savefig(
"outputs/skills_taxonomy_application/region_application/nuts_levela_props_separate_all.pdf",
bbox_inches="tight",
)
# %% [markdown]
# ## Plot circles on map for
# - How many job avderts
# - How mnay from the 4 most diverging level A code
# %%
df = sentence_data_with_meta.groupby("NUTs region")[["latitude", "longitude"]].mean()
# Add the number of unique job ids
df = pd.concat(
[df, sentence_data_with_meta.groupby(["NUTs region"])["job id"].nunique()], axis=1
)
# Add the proportions of each level A
df = pd.concat(
[
df,
prop_level_a_nuts.reset_index().pivot(
index="NUTs region", columns="level_1", values="Hierarchy level A name"
),
],
axis=1,
)
# %%
# Normalise the proportions, otherwise they are all v similar on plot
for col_name in sentence_data_with_meta["Hierarchy level A name"].unique():
df[f"{col_name} - normalised"] = (df[col_name] - df[col_name].min()) / (
df[col_name].max() - df[col_name].min()
)
# %%
geometry = [Point(xy) for xy in zip(df["longitude"], df["latitude"])]
gdf = GeoDataFrame(df, geometry=geometry)
# %%
world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"))
fig, ax_map = plt.subplots(2, 3, figsize=(20, 10))
coli = 0
rowi = 0
for i, col_name in enumerate(
sentence_data_with_meta["Hierarchy level A name"].unique()
):
# color = levela_cols[name2num_dict[col_name]]
# if name2num_dict[col_name] == 0:
# color = "white"
color = [
[1, 1 - c, 0]
for c in gdf[f"{col_name} - normalised"] / max(gdf[f"{col_name} - normalised"])
]
if i != 0 and i % 3 == 0:
rowi += 1
coli = 0
world[world.name == "United Kingdom"].plot(ax=ax_map[rowi, coli], color="black")
gdf.plot(
ax=ax_map[rowi, coli],
marker="o",
color=color,
markersize=200, # gdf[f"{col_name} - normalised"] * 500,
alpha=1,
)
ax_map[rowi, coli].set_title(f"{col_name}")
ax_map[rowi, coli].set_axis_off()
coli += 1
plt.savefig(
"outputs/skills_taxonomy_application/region_application/nuts_levela_props_maps.pdf",
bbox_inches="tight",
)
# %%
world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"))
fig, ax_map = plt.subplots(1, 2, figsize=(15, 5))
world[world.name == "United Kingdom"].plot(ax=ax_map[0], color="black")
gdf.plot(
ax=ax_map[0],
marker="o",
c=[[1, 1 - c, 0] for c in gdf["job id"] / max(gdf["job id"])],
markersize=200, # gdf["job id"] / 10,
alpha=1,
)
ax_map[0].set_title("Number of job adverts in sample by region")
ax_map[0].set_axis_off()
# sentence_data_with_meta["NUTs region"].value_counts().plot.barh(
# xlabel="", ylabel="", title="",
# color=nesta_orange,
# ax=ax_map[1]
# )
nuts_num_jobs = (
sentence_data_with_meta.groupby(["NUTs region"])["job id"].nunique().sort_values()
)
nuts_num_jobs.plot.barh(
xlabel="",
ylabel="",
title="",
color=[[1, 1 - c, 0] for c in nuts_num_jobs / max(nuts_num_jobs)],
ax=ax_map[1],
)
plt.savefig(
"outputs/skills_taxonomy_application/region_application/nuts_numbers_maps.pdf",
bbox_inches="tight",
)
# %% [markdown]
# ## London vs the rest for level B
# %%
sum(sentence_data_with_meta["NUTs region"].notna())
# %%
sum(sentence_data_with_meta["subregion"].notna())
# %%
sentence_data_rest = sentence_data_with_meta[
sentence_data_with_meta["NUTs region"] != "Greater London"
]
level_b_prop_rest = sentence_data_rest["Hierarchy level B name"].value_counts() / len(
sentence_data_rest
)
sentence_data_with_meta_filter = sentence_data_with_meta[
sentence_data_with_meta["subregion"] == "Greater London"
]
level_b_prop_london = sentence_data_with_meta_filter[
"Hierarchy level B name"
].value_counts() / len(sentence_data_with_meta_filter)
london_quotient = level_b_prop_london / level_b_prop_rest
london_quotient = london_quotient[pd.notnull(london_quotient)].sort_values(
ascending=True
)
# Get the level A names for each of these (in same order)
# level_a_names_mapped = [sentence_data[sentence_data['Hierarchy level B name']==i]['Hierarchy level A name'].unique()[0] for i in london_quotient.index]
# level_a_cols_mapped = [levela_cols[name2num_dict[level_a_name]] for level_a_name in level_a_names_mapped]
london_quotient.plot.barh(
figsize=(8, 15),
ylabel="London quotient",
xlabel="Level B hierarchy",
title="London quotient",
color=level_a_cols_mapped,
)
plt.axvline(1, color="black")
color_dict = {k: levela_cols[v] for k, v in name2num_dict.items()}
markers = [
plt.Line2D([0, 0], [0, 0], color=color, marker="o", linestyle="")
for color in color_dict.values()
]
plt.legend(
markers,
color_dict.keys(),
numpoints=1,
title="Level A skill group",
loc="lower right",
)
plt.savefig(
"outputs/skills_taxonomy_application/region_application/london_quotient_levb.pdf",
bbox_inches="tight",
)
# %%
sentence_data_rest = sentence_data_with_meta[
sentence_data_with_meta["NUTs region"] != "Greater London"
]
level_a_prop_rest = sentence_data_rest["Hierarchy level A name"].value_counts() / len(
sentence_data_rest
)
sentence_data_with_meta_filter = sentence_data_with_meta[
sentence_data_with_meta["subregion"] == "Greater London"
]
level_a_prop_london = sentence_data_with_meta_filter[
"Hierarchy level A name"
].value_counts() / len(sentence_data_with_meta_filter)
london_quotient = level_a_prop_london / level_a_prop_rest
london_quotient = london_quotient[pd.notnull(london_quotient)].sort_values(
ascending=True
)
london_quotient.plot.barh(
figsize=(8, 4),
ylabel="London quotient",
xlabel="Level A hierarchy",
title="Greater London quotient",
color=[levela_cols[name2num_dict[i]] for i in london_quotient.keys()],
)
plt.axvline(1, color="black")
plt.savefig(
"outputs/skills_taxonomy_application/region_application/london_quotient_leva.pdf",
bbox_inches="tight",
)
# %% [markdown]
# ## Other outliers
# %%
# The North East has a much higher demand for “Teaching and care”.
region = "North East (England)"
sentence_data_region = sentence_data_with_meta[
sentence_data_with_meta["NUTs region"] == region
]
level_b_prop_region = sentence_data_region[
"Hierarchy level B name"
].value_counts() / len(sentence_data_region)
sentence_data_rest = sentence_data_with_meta[
sentence_data_with_meta["NUTs region"] != region
]
level_b_prop_rest = sentence_data_rest["Hierarchy level B name"].value_counts() / len(
sentence_data_rest
)
region_quotient = level_b_prop_region / level_b_prop_rest
region_quotient = region_quotient[pd.notnull(region_quotient)].sort_values(
ascending=True
)
region_quotient
# %%
sentence_data[sentence_data["Hierarchy level B name"] == "clinical-patients-nursing"][
"Hierarchy level C name"
].value_counts()
# %%
# Wales has a particular low demand for “Customer service and marketing” skills.
region = "Wales"
sentence_data_region = sentence_data_with_meta[
sentence_data_with_meta["NUTs region"] == region
]
level_b_prop_region = sentence_data_region[
"Hierarchy level B name"
].value_counts() / len(sentence_data_region)
sentence_data_rest = sentence_data_with_meta[
sentence_data_with_meta["NUTs region"] != region
]
level_b_prop_rest = sentence_data_rest["Hierarchy level B name"].value_counts() / len(
sentence_data_rest
)
region_quotient = level_b_prop_region / level_b_prop_rest
region_quotient = region_quotient[pd.notnull(region_quotient)].sort_values(
ascending=True
)
region_quotient
# %%
region = "Northern Ireland"
sentence_data_region = sentence_data_with_meta[
sentence_data_with_meta["NUTs region"] == region
]
level_b_prop_region = sentence_data_region[
"Hierarchy level B name"
].value_counts() / | |
):
listener.exitVariable(self)
def variable(self):
localctx = GraphQLParser.VariableContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_variable)
try:
self.enterOuterAlt(localctx, 1)
self.state = 289
self.match(GraphQLParser.T__16)
self.state = 290
self.name()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VariableDefinitionsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def variableDefinition(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(GraphQLParser.VariableDefinitionContext)
else:
return self.getTypedRuleContext(GraphQLParser.VariableDefinitionContext,i)
def getRuleIndex(self):
return GraphQLParser.RULE_variableDefinitions
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVariableDefinitions" ):
listener.enterVariableDefinitions(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVariableDefinitions" ):
listener.exitVariableDefinitions(self)
def variableDefinitions(self):
localctx = GraphQLParser.VariableDefinitionsContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_variableDefinitions)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 292
self.match(GraphQLParser.T__5)
self.state = 294
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 293
self.variableDefinition()
self.state = 296
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==GraphQLParser.T__16):
break
self.state = 298
self.match(GraphQLParser.T__6)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VariableDefinitionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def variable(self):
return self.getTypedRuleContext(GraphQLParser.VariableContext,0)
def type_(self):
return self.getTypedRuleContext(GraphQLParser.Type_Context,0)
def defaultValue(self):
return self.getTypedRuleContext(GraphQLParser.DefaultValueContext,0)
def getRuleIndex(self):
return GraphQLParser.RULE_variableDefinition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVariableDefinition" ):
listener.enterVariableDefinition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVariableDefinition" ):
listener.exitVariableDefinition(self)
def variableDefinition(self):
localctx = GraphQLParser.VariableDefinitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_variableDefinition)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 300
self.variable()
self.state = 301
self.match(GraphQLParser.T__7)
self.state = 302
self.type_()
self.state = 304
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==GraphQLParser.T__17:
self.state = 303
self.defaultValue()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DefaultValueContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def value(self):
return self.getTypedRuleContext(GraphQLParser.ValueContext,0)
def getRuleIndex(self):
return GraphQLParser.RULE_defaultValue
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDefaultValue" ):
listener.enterDefaultValue(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDefaultValue" ):
listener.exitDefaultValue(self)
def defaultValue(self):
localctx = GraphQLParser.DefaultValueContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_defaultValue)
try:
self.enterOuterAlt(localctx, 1)
self.state = 306
self.match(GraphQLParser.T__17)
self.state = 307
self.value()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Type_Context(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def namedType(self):
return self.getTypedRuleContext(GraphQLParser.NamedTypeContext,0)
def listType(self):
return self.getTypedRuleContext(GraphQLParser.ListTypeContext,0)
def getRuleIndex(self):
return GraphQLParser.RULE_type_
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterType_" ):
listener.enterType_(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitType_" ):
listener.exitType_(self)
def type_(self):
localctx = GraphQLParser.Type_Context(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_type_)
self._la = 0 # Token type
try:
self.state = 317
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [GraphQLParser.NAME]:
self.enterOuterAlt(localctx, 1)
self.state = 309
self.namedType()
self.state = 311
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==GraphQLParser.T__18:
self.state = 310
self.match(GraphQLParser.T__18)
pass
elif token in [GraphQLParser.T__14]:
self.enterOuterAlt(localctx, 2)
self.state = 313
self.listType()
self.state = 315
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==GraphQLParser.T__18:
self.state = 314
self.match(GraphQLParser.T__18)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NamedTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def name(self):
return self.getTypedRuleContext(GraphQLParser.NameContext,0)
def getRuleIndex(self):
return GraphQLParser.RULE_namedType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNamedType" ):
listener.enterNamedType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNamedType" ):
listener.exitNamedType(self)
def namedType(self):
localctx = GraphQLParser.NamedTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_namedType)
try:
self.enterOuterAlt(localctx, 1)
self.state = 319
self.name()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ListTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def type_(self):
return self.getTypedRuleContext(GraphQLParser.Type_Context,0)
def getRuleIndex(self):
return GraphQLParser.RULE_listType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterListType" ):
listener.enterListType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitListType" ):
listener.exitListType(self)
def listType(self):
localctx = GraphQLParser.ListTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 64, self.RULE_listType)
try:
self.enterOuterAlt(localctx, 1)
self.state = 321
self.match(GraphQLParser.T__14)
self.state = 322
self.type_()
self.state = 323
self.match(GraphQLParser.T__15)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DirectivesContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def directive(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(GraphQLParser.DirectiveContext)
else:
return self.getTypedRuleContext(GraphQLParser.DirectiveContext,i)
def getRuleIndex(self):
return GraphQLParser.RULE_directives
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDirectives" ):
listener.enterDirectives(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDirectives" ):
listener.exitDirectives(self)
def directives(self):
localctx = GraphQLParser.DirectivesContext(self, self._ctx, self.state)
self.enterRule(localctx, 66, self.RULE_directives)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 326
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 325
self.directive()
self.state = 328
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==GraphQLParser.T__19):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DirectiveContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def name(self):
return self.getTypedRuleContext(GraphQLParser.NameContext,0)
def arguments(self):
return self.getTypedRuleContext(GraphQLParser.ArgumentsContext,0)
def getRuleIndex(self):
return GraphQLParser.RULE_directive
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDirective" ):
listener.enterDirective(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDirective" ):
listener.exitDirective(self)
def directive(self):
localctx = GraphQLParser.DirectiveContext(self, self._ctx, self.state)
self.enterRule(localctx, 68, self.RULE_directive)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 330
self.match(GraphQLParser.T__19)
self.state = 331
self.name()
self.state = 333
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==GraphQLParser.T__5:
self.state = 332
self.arguments()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeSystemDefinitionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def schemaDefinition(self):
return self.getTypedRuleContext(GraphQLParser.SchemaDefinitionContext,0)
def typeDefinition(self):
return self.getTypedRuleContext(GraphQLParser.TypeDefinitionContext,0)
def directiveDefinition(self):
return self.getTypedRuleContext(GraphQLParser.DirectiveDefinitionContext,0)
def getRuleIndex(self):
return GraphQLParser.RULE_typeSystemDefinition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeSystemDefinition" ):
listener.enterTypeSystemDefinition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeSystemDefinition" ):
listener.exitTypeSystemDefinition(self)
def typeSystemDefinition(self):
localctx = GraphQLParser.TypeSystemDefinitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 70, self.RULE_typeSystemDefinition)
try:
self.state = 338
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,29,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 335
self.schemaDefinition()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 336
self.typeDefinition()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 337
self.directiveDefinition()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeSystemExtensionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def schemaExtension(self):
return self.getTypedRuleContext(GraphQLParser.SchemaExtensionContext,0)
def typeExtension(self):
return self.getTypedRuleContext(GraphQLParser.TypeExtensionContext,0)
def getRuleIndex(self):
return GraphQLParser.RULE_typeSystemExtension
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeSystemExtension" ):
listener.enterTypeSystemExtension(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeSystemExtension" ):
listener.exitTypeSystemExtension(self)
def typeSystemExtension(self):
localctx = GraphQLParser.TypeSystemExtensionContext(self, self._ctx, self.state)
self.enterRule(localctx, 72, self.RULE_typeSystemExtension)
try:
self.state = 342
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,30,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 340
self.schemaExtension()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 341
self.typeExtension()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SchemaDefinitionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._rootOperationTypeDefinition = None # RootOperationTypeDefinitionContext
self.fields = list() # of RootOperationTypeDefinitionContexts
def directives(self):
return self.getTypedRuleContext(GraphQLParser.DirectivesContext,0)
def rootOperationTypeDefinition(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(GraphQLParser.RootOperationTypeDefinitionContext)
else:
return self.getTypedRuleContext(GraphQLParser.RootOperationTypeDefinitionContext,i)
def getRuleIndex(self):
return GraphQLParser.RULE_schemaDefinition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSchemaDefinition" ):
listener.enterSchemaDefinition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSchemaDefinition" ):
listener.exitSchemaDefinition(self)
def schemaDefinition(self):
localctx = GraphQLParser.SchemaDefinitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 74, self.RULE_schemaDefinition)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 344
self.match(GraphQLParser.T__20)
self.state = 346
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==GraphQLParser.T__19:
self.state = 345
self.directives()
self.state = 348
self.match(GraphQLParser.T__3)
self.state = 350
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 349
localctx._rootOperationTypeDefinition = self.rootOperationTypeDefinition()
localctx.fields.append(localctx._rootOperationTypeDefinition)
self.state = 352
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << GraphQLParser.T__0) | (1 << GraphQLParser.T__1) | (1 << GraphQLParser.T__2))) != 0)):
break
self.state = 354
self.match(GraphQLParser.T__4)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RootOperationTypeDefinitionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def operationType(self):
return self.getTypedRuleContext(GraphQLParser.OperationTypeContext,0)
def namedType(self):
return self.getTypedRuleContext(GraphQLParser.NamedTypeContext,0)
def getRuleIndex(self):
return GraphQLParser.RULE_rootOperationTypeDefinition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRootOperationTypeDefinition" ):
listener.enterRootOperationTypeDefinition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRootOperationTypeDefinition" ):
listener.exitRootOperationTypeDefinition(self)
def rootOperationTypeDefinition(self):
localctx = GraphQLParser.RootOperationTypeDefinitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 76, self.RULE_rootOperationTypeDefinition)
try:
self.enterOuterAlt(localctx, 1)
self.state = 356
self.operationType()
self.state = 357
self.match(GraphQLParser.T__7)
self.state = 358
self.namedType()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SchemaExtensionContext(ParserRuleContext):
def __init__(self, | |
# Train a GANformer model (pytorch version)
import os
import re
import json
import tempfile
# import torch
from training import misc
import dnnlib
from dnnlib import EasyDict
import argparse
import glob
import sys
import loader
# Conditional set: if property is not None, then assign d[name] := prop
# for every d in a set of dictionaries
def cset(dicts, name, prop):
if not isinstance(dicts, list):
dicts = [dicts]
if prop is not None:
for d in dicts:
d[name] = prop
# Conditional set: if dict[name] is not populated from the command line, then assign dict[name] := prop
def nset(args, name, prop):
flag = f"--{name.replace('_', '-')}"
if flag not in sys.argv:
args[name] = prop
def set_net(name, subnets, lr, reg_interval):
net_config = EasyDict(class_name = f"training.networks.{name}", reg_interval = reg_interval)
net_config.opt_args = EasyDict(class_name = "torch.optim.Adam", lr = lr, betas = [0, 0.99], eps = 1e-8)
for subnet in subnets:
net_config[f"{subnet}_kwargs"] = EasyDict()
return net_config
# Setup configuration based on command line
def setup_config(run_dir, **args):
args = EasyDict(args) # command-line options
train = EasyDict(run_dir = run_dir) # training loop options
vis = EasyDict(run_dir = run_dir) # visualization loop options
if args.reload:
config_fn = os.path.join(run_dir, "training_options.json")
if os.path.exists(config_fn):
# Load config form the experiment existing file (and so ignore command-line arguments)
with open(config_fn, "rt") as f:
config = json.load(f)
return config
misc.log(f"Warning: --reload is set for a new experiment {args.expname}," +
f" but configuration file to reload from {config_fn} doesn't exist.", "red")
# GANformer and baselines default settings
# ----------------------------------------------------------------------------
if args.ganformer_default:
task = args.dataset
nset(args, "mirror_augment", task in ["cityscapes", "ffhq"])
nset(args, "transformer", True)
nset(args, "components_num", {"clevr": 8}.get(task, 16))
nset(args, "latent_size", {"clevr": 128}.get(task, 512))
nset(args, "normalize", "layer")
nset(args, "integration", "mul")
nset(args, "kmeans", True)
nset(args, "use_pos", True)
nset(args, "mapping_ltnt2ltnt", task != "clevr")
nset(args, "style", task != "clevr")
nset(args, "g_arch", "resnet")
nset(args, "mapping_resnet", True)
gammas = {
"ffhq": 10,
"cityscapes": 20,
"clevr": 40,
"bedrooms": 100
}
nset(args, "gamma", gammas.get(task, 10))
if args.baseline == "GAN":
nset(args, "style", False)
nset(args, "latent_stem", True)
## k-GAN and SAGAN are not currently supported in the pytorch version.
## See the TF version for implementation of these baselines!
# if args.baseline == "SAGAN":
# nset(args, "style", False)
# nset(args, "latent_stem", True)
# nset(args, "g_img2img", 5)
# if args.baseline == "kGAN":
# nset(args, "kgan", True)
# nset(args, "merge_layer", 5)
# nset(args, "merge_type", "softmax")
# nset(args, "components_num", 8)
# General setup
# ----------------------------------------------------------------------------
# If the flag is specified without arguments (--arg), set to True
for arg in ["cuda_bench", "allow_tf32", "keep_samples", "style", "local_noise"]:
if args[arg] is None:
args[arg] = True
if not any([args.train, args.eval, args.vis]):
misc.log("Warning: None of --train, --eval or --vis are provided. Therefore, we only print network shapes", "red")
for arg in ["train", "eval", "vis", "last_snapshots"]:
cset(train, arg, args[arg])
if args.gpus != "":
num_gpus = len(args.gpus.split(","))
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
if not (num_gpus >= 1 and num_gpus & (num_gpus - 1) == 0):
misc.error("Number of GPUs must be a power of two")
args.num_gpus = num_gpus
# CUDA settings
for arg in ["batch_size", "batch_gpu", "allow_tf32"]:
cset(train, arg, args[arg])
cset(train, "cudnn_benchmark", args.cuda_bench)
# Data setup
# ----------------------------------------------------------------------------
# For bedrooms, we choose the most common ratio in the
# dataset and crop the other images into that ratio.
ratios = {
"clevr": 0.75,
"bedrooms": 188/256,
"cityscapes": 0.5,
"ffhq": 1.0
}
args.ratio = args.ratio or ratios.get(args.dataset, 1.0)
args.crop_ratio = 0.5 if args.resolution > 256 and args.ratio < 0.5 else None
args.printname = args.expname
for arg in ["total_kimg", "printname"]:
cset(train, arg, args[arg])
dataset_args = EasyDict(
class_name = "training.dataset.ImageFolderDataset",
path = f"{args.data_dir}/{args.dataset}",
max_items = args.train_images_num,
resolution = args.resolution,
ratio = args.ratio,
mirror_augment = args.mirror_augment
)
dataset_args.loader_args = EasyDict(
num_workers = args.num_threads,
pin_memory = True,
prefetch_factor = 2
)
# Optimization setup
# ----------------------------------------------------------------------------
cG = set_net("Generator", ["mapping", "synthesis"], args.g_lr, 4)
cD = set_net("Discriminator", ["mapping", "block", "epilogue"], args.d_lr, 16)
cset([cG, cD], "crop_ratio", args.crop_ratio)
mbstd = min(args.batch_gpu, 4) # other hyperparams behave more predictably if mbstd group size remains fixed
cset(cD.epilogue_kwargs, "mbstd_group_size", mbstd)
# Automatic tuning
if args.autotune:
batch_size = max(min(args.num_gpus * min(4096 // args.resolution, 32), 64), args.num_gpus) # keep gpu memory consumption at bay
batch_gpu = args.batch_size // args.num_gpus
nset(args, "batch_size", batch_size)
nset(args, "batch_gpu", batch_gpu)
fmap_decay = 1 if args.resolution >= 512 else 0.5 # other hyperparams behave more predictably if mbstd group size remains fixed
lr = 0.002 if args.resolution >= 1024 else 0.0025
gamma = 0.0002 * (args.resolution ** 2) / args.batch_size # heuristic formula
cset([cG.synthesis_kwargs, cD], "dim_base", int(fmap_decay * 32768))
nset(args, "g_lr", lr); cset(cG.opt_args, "lr", args.g_lr)
nset(args, "d_lr", lr); cset(cD.opt_args, "lr", args.d_lr)
nset(args, "gamma", gamma)
train.ema_rampup = 0.05
train.ema_kimg = batch_size * 10 / 32
if args.batch_size % (args.batch_gpu * args.num_gpus) != 0:
misc.error("--batch-size should be divided by --batch-gpu * 'num_gpus'")
# Loss and regularization settings
loss_args = EasyDict(class_name = "training.loss.StyleGAN2Loss",
g_loss = args.g_loss, d_loss = args.d_loss,
r1_gamma = args.gamma, pl_weight = args.pl_weight
)
# if args.fp16:
# cset([cG.synthesis_kwargs, cD], "num_fp16_layers", 4) # enable mixed-precision training
# cset([cG.synthesis_kwargs, cD], "conv_clamp", 256) # clamp activations to avoid float16 overflow
# cset([cG.synthesis_kwargs, cD.block_args], "fp16_channels_last", args.nhwc)
# Evaluation and visualization
# ----------------------------------------------------------------------------
from metrics import metric_main
for metric in args.metrics:
if not metric_main.is_valid_metric(metric):
misc.error(f"Unknown metric: {metric}. The valid metrics are: {metric_main.list_valid_metrics()}")
for arg in ["num_gpus", "metrics", "eval_images_num", "truncation_psi"]:
cset(train, arg, args[arg])
for arg in ["keep_samples", "num_heads"]:
cset(vis, arg, args[arg])
args.vis_imgs = args.vis_images
args.vis_ltnts = args.vis_latents
vis_types = ["imgs", "ltnts", "maps", "layer_maps", "interpolations", "noise_var", "style_mix"]
# Set of all the set visualization types option
vis.vis_types = list({arg for arg in vis_types if args[f"vis_{arg}"]})
vis_args = {
"attention": "transformer",
"grid": "vis_grid",
"num": "vis_num",
"rich_num": "vis_rich_num",
"section_size": "vis_section_size",
"intrp_density": "interpolation_density",
# "intrp_per_component": "interpolation_per_component",
"alpha": "blending_alpha"
}
for arg, cmd_arg in vis_args.items():
cset(vis, arg, args[cmd_arg])
# Networks setup
# ----------------------------------------------------------------------------
# Networks architecture
cset(cG.synthesis_kwargs, "architecture", args.g_arch)
cset(cD, "architecture", args.d_arch)
# Latent sizes
if args.components_num > 0:
if not args.transformer: # or args.kgan):
misc.error("--components-num > 0 but the model is not using components. " +
"Add --transformer for GANformer (which uses latent components).")
if args.latent_size % args.components_num != 0:
misc.error(f"--latent-size ({args.latent_size}) should be divisible by --components-num (k={k})")
args.latent_size = int(args.latent_size / args.components_num)
cG.z_dim = cG.w_dim = args.latent_size
cset([cG, vis], "k", args.components_num + 1) # We add a component to modulate features globally
# Mapping network
args.mapping_layer_dim = args.mapping_dim
for arg in ["num_layers", "layer_dim", "resnet", "shared", "ltnt2ltnt"]:
field = f"mapping_{arg}"
cset(cG.mapping_kwargs, arg, args[field])
# StyleGAN settings
for arg in ["style", "latent_stem", "local_noise"]:
cset(cG.synthesis_kwargs, arg, args[arg])
# GANformer
cset([cG.synthesis_kwargs, cG.mapping_kwargs], "transformer", args.transformer)
# Attention related settings
for arg in ["use_pos", "num_heads", "ltnt_gate", "attention_dropout"]:
cset([cG.mapping_kwargs, cG.synthesis_kwargs], arg, args[arg])
# Attention types and layers
for arg in ["start_res", "end_res"]: # , "local_attention" , "ltnt2ltnt", "img2img", "img2ltnt"
cset(cG.synthesis_kwargs, arg, args[f"g_{arg}"])
# Mixing and dropout
for arg in ["style_mixing", "component_mixing"]:
cset(loss_args, arg, args[arg])
cset(cG, "component_dropout", args["component_dropout"])
# Extra transformer options
args.norm = args.normalize
for arg in ["norm", "integration", "img_gate", "iterative", "kmeans", "kmeans_iters"]:
cset(cG.synthesis_kwargs, arg, args[arg])
# Positional encoding
# args.pos_dim = args.pos_dim or args.latent_size
for arg in ["dim", "type", "init", "directions_num"]:
field = f"pos_{arg}"
cset(cG.synthesis_kwargs, field, args[field])
# k-GAN
# for arg in ["layer", "type", "same"]:
# field = "merge_{}".format(arg)
# cset(cG.args, field, args[field])
# cset(cG.synthesis_kwargs, "merge", args.kgan)
# if args.kgan and args.transformer:
# misc.error("Either have --transformer for GANformer or --kgan for k-GAN, not both")
config = EasyDict(train)
config.update(cG = cG, cD = cD, loss_args = loss_args, dataset_args = dataset_args, vis_args = vis)
# Save config file
with open(os.path.join(run_dir, "training_options.json"), "wt") as f:
json.dump(config, f, indent = 2)
return config
# Setup and launching
# ----------------------------------------------------------------------------
##### Experiments management:
# Whenever we start a new experiment we store its result in a directory named 'args.expname:000'.
# When we rerun a training or evaluation command it restores the model from that directory by default.
# If we wish to restart the model training, we can set --restart and then we will store data in a new
# directory: 'args.expname:001' after the first restart, then 'args.expname:002' after the second, etc.
def setup_working_space(args):
# Find the | |
and zero timer and loss.
checkpoint_path = os.path.join(FLAGS.model_dir, "equilid.ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
step_time, loss = 0.0, 0.0
# Run evals on development set and print their perplexity.
for bucket_id in xrange(len(_buckets)):
if len(dev_set[bucket_id]) == 0:
print(" eval: empty bucket %d" % (bucket_id))
continue
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
dev_set, bucket_id)
_, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
eval_ppx = math.exp(float(eval_loss)) if eval_loss < 300 else float(
"inf")
print(" eval: bucket %d perplexity %.2f" % (bucket_id, eval_ppx))
sys.stdout.flush()
def repair(tokens, predictions):
"""
Repairs the language prediction sequence if the number of predictions did not
match the input number of tokens and double-checks that punctuation in the
input is aligned with the prediction's. This function is necessary because
of stochasticity in the LSTM output length and only has much effect
for very short inputs or very long inputs.
"""
# If we made a prediction for each token, return that. NOTE: we could
# probably do some double checking her to make sure
# punctiation/hashtag/mention predictions are where they should be
if len(tokens) == len(predictions):
return predictions
# If we only have words (no punctuation), then trunctate to the right number
# of tokens
if len(set(predictions)) == 1:
return predictions[:len(tokens)]
# See how many languages we estimated
langs = set([x for x in predictions if len(x) == 3])
# If we only ever saw one language (easy case), then we've just screwed up
# the number of tokens so iterate over the tokens and fill in the blanks
if len(langs) == 1:
lang = list(langs)[0]
# This is the output set of assignments, based on realignment
repaired = []
# Figure out where we have punctuation in the input
for i, token in enumerate(tokens):
if re.fullmatch(r"\p{P}+", token):
repaired.append('Punct')
elif re.fullmatch(r"#([\w_]+)", token):
repaired.append('#Hashtag')
elif re.fullmatch(r"@([\w_]+)", token):
repaired.append('@Mention')
elif (token.startswith('http') and ':' in token) \
or token.startswith('pic.twitter'):
repaired.append('URL')
else:
repaired.append(lang)
# print('%s\n%s\n' % (predictions, repaired))
return repaired
else:
# NOTE: the most rigorous thing to do would be a sequence alignment with
# something like Smith-Waterman and then fill in the gaps, but this is
# still likely overkill for the kinds of repair operations we expect
# This is the output set of assignments, based on realignment
repaired = []
n = len(predictions) - 1
# Figure out where we have non-text stuff in the input as anchor points
last_anchor = -1
anchors = []
rep_anchor_counts = []
pred_anchor_counts = []
for pred in predictions:
prev = 0
if len(pred_anchor_counts) > 0:
prev = pred_anchor_counts[-1]
if len(pred) != 3:
pred_anchor_counts.append(1 + prev)
else:
pred_anchor_counts.append(prev)
for i, token in enumerate(tokens):
if re.fullmatch(r"\p{P}+", token):
repaired.append('Punct')
elif re.fullmatch(r"#([\w_]+)", token):
repaired.append('#Hashtag')
elif re.fullmatch(r"@([\w_]+)", token):
repaired.append('@Mention')
elif (token.startswith('http') and ':' in token) \
or token.startswith('pic.twitter'):
repaired.append('URL')
else:
repaired.append(None)
for rep in repaired:
prev = 0
if len(rep_anchor_counts) > 0:
prev = rep_anchor_counts[-1]
if rep is not None:
rep_anchor_counts.append(1 + prev)
else:
rep_anchor_counts.append(prev)
for i in range(len(repaired)):
if repaired[i] is not None:
continue
try:
p = pred_anchor_counts[min(i, len(pred_anchor_counts)-1)]
r = rep_anchor_counts[i]
except IndexError as xcept:
print(repr(xcept))
print(i, len(pred_anchor_counts)-1, min(i, len(pred_anchor_counts)-1))
continue
nearest_lang = 'UNK'
if p < r:
# The prediction has fewer anchors than the repair at this
# point, which means it added too many things, so skip ahead
for j in range(i+1, len(predictions)):
if pred_anchor_counts[min(j, len(pred_anchor_counts)-1)] >= p:
if len(predictions[j]) == 3:
nearest_lang = predictions[j]
break
elif p > r:
# The prediction skipped some input tokens, so rewind until we
# have the same number of anchors
for j in range(min(n, i-1), -1, -1):
if pred_anchor_counts[min(j, n)] <= p:
if len(predictions[min(j, n)]) == 3:
nearest_lang = predictions[min(j, n)]
break
else:
# Just search backwards for a language
for j in range(min(i, n), -1, -1):
if len(predictions[j]) == 3:
nearest_lang = predictions[j]
break
# For early tokens that didn't get an assignment from a backwards
# search, search forward in a limited manner
if nearest_lang is None:
for j in range(i+1+anchors[i], min(n+1, i+5+anchors[i])):
if len(predictions[j]) == 3:
nearest_lang = predictions[j]
repaired[i] = nearest_lang
#print('%s\n%s\n' % (predictions, repaired))
return repaired
cjk_ranges = [
{"from": ord(u"\u3300"), "to": ord(u"\u33ff")},
{"from": ord(u"\ufe30"), "to": ord(u"\ufe4f")},
{"from": ord(u"\uf900"), "to": ord(u"\ufaff")},
{"from": ord(u"\u30a0"), "to": ord(u"\u30ff")},
{"from": ord(u"\u2e80"), "to": ord(u"\u2eff")},
{"from": ord(u"\u4e00"), "to": ord(u"\u9fff")},
{"from": ord(u"\u3400"), "to": ord(u"\u4dbf")},
]
try:
cjk_ranges.extend([
{"from": ord(u"\U00020000"), "to": ord(u"\U0002a6df")},
{"from": ord(u"\U0002a700"), "to": ord(u"\U0002b73f")},
{"from": ord(u"\U0002b740"), "to": ord(u"\U0002b81f")},
{"from": ord(u"\U0002b820"), "to": ord(u"\U0002ceaf")},
{"from": ord(u"\u0002f800"), "to": ord(u"\u0002fa1f")},
])
except TypeError as e:
print('Unable to load extended unicode ranges for CJK character set, ' +
'some CJK language identification results may be unreliable.')
hangul_ranges = [
{"from": ord(u"\uAC00"), "to": ord(u"\uD7AF")},
]
def is_cjk(char):
return any([range["from"] <= ord(char) <= range["to"] for range in cjk_ranges])
def is_hangul(char):
return any([range["from"] <= ord(char) <= range["to"] for range in hangul_ranges])
CJK_PROXY = str(ord(u"\u4E00"))
HANGUL_PROXY = str(ord(u"\uAC00"))
def to_token_ids(text, char_to_id):
"""
Converts input text into its IDs based on a defined vocabularly.
"""
ids = []
for c in text:
# The CJK and Hangul_Syllable unicode blocks are each collapsed into
# single proxy characters since they are primarily used with a single
# language and, because these blocks are huge, this saves significant
# space in the model's lookup table.
if is_cjk(c):
c = CJK_PROXY
elif is_hangul(c):
c = HANGUL_PROXY
else:
c = str(ord(c))
ids.append(char_to_id.get(c, UNK_ID))
return ids
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file."""
# NOTE: the data-to-int conversion uses a +4 offset for indexing due to
# the starting vocabulary. We prepend the rev_vocab here to recognize
# this
rev_vocab = list(_START_VOCAB)
with open(vocabulary_path, "rb") as f:
for line in f:
rev_vocab.append(line.split("\t")[0].strip())
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
def get_langs(text):
token_langs = classify(text)
langs = set([x for x in token_langs if len(x) == 3])
return langs
# The lazily-loaded classifier, which is a tuple of the model
classifier = None
def classify(text):
"""
"""
global classifier
# Ensure the text is always treated as unicode
text = unicode(text)
if classifier is None:
# Prediction uses a small batch size
FLAGS.batch_size = 1
load_model()
# Unpack the classifier into the things we need
sess, model, char_vocab, rev_char_vocab, lang_vocab, rev_lang_vocab = classifier
# Convert the input into character IDs
token_ids = to_token_ids(text, char_vocab)
# print(token_ids)
# Which bucket does it belong to?
possible_buckets = [b for b in xrange(len(_buckets))
if _buckets[b][0] > len(token_ids)]
if len(possible_buckets) == 0:
# Stick it in the last bucket anyway, even if it's too long.
# Gotta predict something! #YOLO. It might be worth logging
# to the user here if we want to be super paranoid though
possible_buckets.append(len(_buckets)-1)
bucket_id = min(possible_buckets)
# Get a 1-element batch to feed the sentence to the model.
#
# NB: Could we speed things up by pushing in multiple instances
# to a single batch?
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
# Get output logits for the sentence.
_, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if EOS_ID in outputs:
outputs = outputs[:outputs.index(EOS_ID)]
predicted_labels = []
try:
predicted_labels = [tf.compat.as_str(rev_lang_vocab[output]) for output in outputs]
except BaseException as e:
print(repr(e))
# Ensure we have predictions for each token
predictions = repair(text.split(), predicted_labels)
return predictions
def load_model():
global classifier
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
# Create model and load parameters.
model = create_model(sess, True)
print("Loading vocabs")
# Load vocabularies.
char_vocab_path = FLAGS.model_dir + '/vocab.src'
lang_vocab_path = FLAGS.model_dir + '/vocab.tgt'
char_vocab, rev_char_vocab = initialize_vocabulary(char_vocab_path)
lang_vocab, rev_lang_vocab = initialize_vocabulary(lang_vocab_path)
classifier = (sess, model, char_vocab, rev_char_vocab, lang_vocab, rev_lang_vocab)
def predict():
# NB: is there a safer way to do this with a using statement if the file
# is | |
= T("Role deleted"),
msg_list_empty = T("No Roles currently defined"))
s3mgr.configure(tablename, main="role")
return s3_rest_controller(prefix, resourcename)
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def membership():
"""
RESTful CRUD controller
@deprecated
"""
prefix = "auth"
resourcename = "membership"
tablename = "auth_membership"
table = db[tablename]
# Model options
table.group_id.represent = s3_role_represent
table.user_id.represent = s3_user_represent
# CRUD Strings
ADD_MEMBERSHIP = T("Add Membership")
LIST_MEMBERSHIPS = T("List Memberships")
s3.crud_strings[tablename] = Storage(
title_create = ADD_MEMBERSHIP,
title_display = T("Membership Details"),
title_list = LIST_MEMBERSHIPS,
title_update = T("Edit Membership"),
title_search = T("Search Memberships"),
subtitle_create = T("Add New Membership"),
subtitle_list = T("Memberships"),
label_list_button = LIST_MEMBERSHIPS,
label_create_button = ADD_MEMBERSHIP,
msg_record_created = T("Membership added"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Membership deleted"),
msg_list_empty = T("No Memberships currently defined"))
s3mgr.configure(tablename, main="user_id")
return s3_rest_controller(prefix, resourcename)
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def usergroup():
"""
User update form with groups
- NB This is currently unused & has no custom view
"""
user = request.vars.user
# redirect to the user list if user id is not given
if user is None:
redirect(URL(f="user"))
return
# Shortcut:
users = db.auth_user
# gather common variables
data = {}
data["user_id"] = user
data["username"] = "%s %s" % (users[user].first_name,
users[user].last_name)
data["role"] = db.auth_group[user].role
# display the standard user details
record = db(users.id == user).select().first()
users.id.readable = False
# Let admin view and modify the registration key
users.registration_key.writable = True
users.registration_key.readable = True
users.registration_key.label = T("Disabled?")
users.registration_key.requires = IS_NULL_OR(IS_IN_SET(["disabled",
"pending"]))
form = SQLFORM(users, record, deletable=True)
# find all groups user belongs to
query = (db.auth_membership.user_id == user)
allgroups = db().select(db.auth_group.ALL)
user_membership = db(query).select(db.auth_membership.ALL)
# db.auth_group[row.group_id].role
#records = SQLTABLE(db(query).select(db.auth_membership.ALL))
# handle the M to M of user to group membership for display
records = []
for group in allgroups:
user_group_count = 0
for row in user_membership:
if (row.group_id == group.id):
records.append([group.role, "on", group.id])
user_group_count += 1
if (user_group_count == 0):
# if the group does not exist currently and is enabled
#if request.has_key(group.id):
if (group.id == 6):
db.auth_membership.insert(user_id = user, group_id = group.id)
records.append([group.role, "on", group.id])
data["heehe"] = "yes %d" % group.id
records.append([group.role, "", group.id])
# Update records for user details
if form.accepts(request.vars): \
response.flash="User %s Updated" % data["username"]
elif form.errors: \
response.flash="There were errors in the form"
# Update records for group membership details
for key in request.vars.keys():
data["m_%s" % key] = request.vars[key]
return dict(data=data, records=records, form=form)
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def users():
"""
List/amend which users are in a Group
@deprecated
"""
try:
group = int(request.args(0))
except TypeError, ValueError:
session.error = T("Need to specify a role!")
redirect(URL(f="group"))
table = db.auth_membership
query = table.group_id == group
title = "%s: %s" % (T("Role"), db.auth_group[group].role)
description = db.auth_group[group].description
# Start building the Return
output = dict(title=title, description=description, group=group)
if auth.settings.username:
username = "username"
else:
username = "email"
# Audit
crud.settings.create_onaccept = lambda form: s3_audit("create", module,
"membership",
form=form,
representation="html")
crud.settings.create_onvalidation = lambda form: group_dupes(form,
"users",
[group])
# Many<>Many selection (Deletable, no Quantity)
item_list = []
sqlrows = db(query).select()
even = True
for row in sqlrows:
if even:
theclass = "even"
even = False
else:
theclass = "odd"
even = True
id = row.user_id
_user = db.auth_user[id]
item_first = _user.first_name
item_second = _user.last_name
item_description = _user[username]
id_link = A(id, _href=URL(f="user", args=[id, "read"]))
checkbox = INPUT(_type="checkbox", _value="on", _name=id,
_class="remove_item")
item_list.append(TR(TD(id_link),
TD(item_first),
TD(item_second),
TD(item_description),
TD(checkbox),
_class=theclass))
if auth.settings.username:
username_label = T("Username")
else:
username_label = T("Email")
table_header = THEAD(TR(TH("ID"),
TH(T("First Name")),
TH(T("Last Name")),
TH(username_label),
TH(T("Remove"))))
table_footer = TFOOT(TR(TD(_colspan=4),
TD(INPUT(_id="submit_delete_button",
_type="submit",
_value=T("Remove")))))
items = DIV(FORM(TABLE(table_header,
TBODY(item_list),
table_footer, _id="list", _class="dataTable display"),
_name="custom", _method="post",
_enctype="multipart/form-data",
_action=URL(f="group_remove_users",
args=[group])))
subtitle = T("Users")
crud.messages.submit_button = T("Add")
crud.messages.record_created = T("Role Updated")
form = crud.create(table, next=URL(args=[group]))
addtitle = T("Add New User to Role")
output.update(dict(subtitle=subtitle, items=items, addtitle=addtitle,
form=form))
return output
# -----------------------------------------------------------------------------
def group_dupes(form, page, arg):
"""
Onvalidation check for duplicate user roles
@deprecated
"""
user = form.latest["user_id"]
group = form.latest["group_id"]
query = (form.table.user_id == user) & (form.table.group_id == group)
items = db(query).select()
if items:
session.error = T("User already has this role")
redirect(URL(page, args=arg))
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def group_remove_users():
"""
Remove users from a group
@deprecated
"""
if len(request.args) == 0:
session.error = T("Need to specify a group!")
redirect(URL(f="group"))
group = request.args(0)
table = db.auth_membership
for var in request.vars:
if str(var).isdigit():
user = var
query = (table.group_id == group) & (table.user_id == user)
db(query).delete()
# Audit
#crud.settings.update_onaccept = lambda form: shn_audit_update(form, "membership", "html")
session.flash = T("Users removed")
redirect(URL(f="users", args=[group]))
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def groups():
"""
List/amend which groups a User is in
@deprecated
"""
try:
user = int(request.args(0))
except TypeError, ValueError:
session.error = T("Need to specify a user!")
redirect(URL(f="user"))
table = db.auth_membership
query = table.user_id == user
title = "%s %s" % (db.auth_user[user].first_name,
db.auth_user[user].last_name)
description = db.auth_user[user].email
# Start building the Return
output = dict(title=title, description=description, user=user)
# Audit
crud.settings.create_onaccept = lambda form: s3_audit("create", module,
"membership",
form=form,
representation="html")
crud.settings.create_onvalidation = lambda form: group_dupes(form,
"groups",
[user])
# Many<>Many selection (Deletable, no Quantity)
item_list = []
sqlrows = db(query).select()
even = True
for row in sqlrows:
if even:
theclass = "even"
even = False
else:
theclass = "odd"
even = True
id = row.group_id
_group = db.auth_group[id]
item_first = _group.role
item_description = _group.description
id_link = A(id, _href=URL(f="group", args=[id, "read"]))
checkbox = INPUT(_type="checkbox", _value="on", _name=id,
_class="remove_item")
item_list.append(TR(TD(id_link),
TD(item_first),
TD(item_description),
TD(checkbox),
_class=theclass))
table_header = THEAD(TR(TH("ID"),
TH(T("Role")),
TH(T("Description")),
TH(T("Remove"))))
table_footer = TFOOT(TR(TD(_colspan=3),
TD(INPUT(_id="submit_delete_button",
_type="submit",
_value=T("Remove")))))
items = DIV(FORM(TABLE(table_header,
TBODY(item_list),
table_footer,
_id="table-container"),
_name="custom", _method="post",
_enctype="multipart/form-data",
_action=URL(f="user_remove_groups",
args=[user])))
subtitle = T("Roles")
crud.messages.submit_button = T("Add")
crud.messages.record_created = T("User Updated")
form = crud.create(table, next=URL(args=[user]))
addtitle = T("Add New Role to User")
output.update(dict(subtitle=subtitle, items=items, addtitle=addtitle,
form=form))
return output
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def user_remove_groups():
"""
Remove groups from a user
@deprecated
"""
if len(request.args) == 0:
session.error = T("Need to specify a user!")
redirect(URL(f="user"))
user = request.args(0)
table = db.auth_membership
for var in request.vars:
if str(var).isdigit():
group = var
query = (table.group_id == group) & (table.user_id == user)
db(query).delete()
# Audit
#crud.settings.update_onaccept = lambda form: shn_audit_update(form, "membership", "html")
session.flash = T("Groups removed")
redirect(URL(f="groups", args=[user]))
# =============================================================================
@auth.s3_requires_membership(1)
def setting():
"""
RESTful CRUD controller
- just used to set the Theme
@ToDo: Deprecate this - move to deployment_settings
"""
tablename = "s3_%s" % resourcename
table = db[tablename]
#table.admin_name.label = T("Admin Name")
#table.admin_email.label = T("Admin Email")
#table.admin_tel.label = T("Admin Tel")
table.theme.label = T("Theme")
table.theme.comment = DIV(A(T("Add Theme"), _class="colorbox",
_href=URL(c="admin", f="theme",
args="create",
vars=dict(format="popup")),
_target="top", _title=T("Add Theme"))),
s3.crud_strings[tablename] = Storage(
title_update = T("Edit Settings"),
msg_record_modified = T("Settings updated"),
label_list_button = None)
s3mgr.configure(tablename,
deletable=False,
listadd=False,
#onvalidation=theme_check,
#update_next = URL(args=[1, "update"])
onaccept=theme_apply)
output = s3_rest_controller("s3", resourcename, list_btn=None)
return output
# =============================================================================
@auth.s3_requires_membership(1)
def theme():
"""
RESTful CRUD controller
- deprecated
"""
tablename = "%s_theme" % module
table = db[tablename]
# Model options
table.name.label = T("Name")
#table.logo.label = T("Logo")
#table.logo.comment = A(SPAN("[Help]"), _class="tooltip", _title=T("Logo") + "|" + T("Name of the file (& optional sub-path) located in static which should be used for the top-left image."))
#table.header_background.label = T("Header Background")
#table.header_background.comment = A(SPAN("[Help]"), _class="tooltip", _title=T("Header Background") + "|" + T("Name of the file (& optional sub-path) located in static which should be used for the background of the header."))
#table.footer.label = T("Footer")
#table.footer.comment = A(SPAN("[Help]"), _class="tooltip", _title=T("Footer") + "|" + T("Name of the file (& optional sub-path) located in views which should be used for footer."))
table.col_background.label = T("Background Color")
table.col_txt.label = T("Text Color for Text blocks")
table.col_txt_background.label = T("Background Color for Text blocks")
table.col_txt_border.label = T("Border Color for Text blocks")
table.col_txt_underline.label = T("Color for Underline of Subheadings")
table.col_menu.label = T("Color of dropdown menus")
table.col_highlight.label = T("Color of selected menu items")
table.col_input.label = T("Color of selected Input fields")
table.col_border_btn_out.label = T("Color of bottom of Buttons when not pressed")
table.col_border_btn_in.label = T("Color of bottom of Buttons when pressed")
table.col_btn_hover.label = T("Color of Buttons when hovering")
# CRUD Strings
ADD_THEME = T("Add Theme")
LIST_THEMES = T("List Themes")
s3.crud_strings[resourcename] = Storage(
title_create = ADD_THEME,
title_display = T("Theme Details"),
title_list = LIST_THEMES,
title_update = T("Edit Theme"),
title_search = T("Search Themes"),
subtitle_create = T("Add New Theme"),
subtitle_list = T("Themes"),
label_list_button = LIST_THEMES,
label_create_button = ADD_THEME,
msg_record_created = T("Theme added"),
msg_record_modified = T("Theme updated"),
msg_record_deleted = T("Theme deleted"),
msg_list_empty = T("No Themes currently defined"))
s3mgr.configure(tablename,
#onvalidation=theme_check,
#list_fields=["id", "name", "logo", "footer", "col_background"],
list_fields=["id",
"name",
"col_background"
])
return s3_rest_controller(module, resourcename)
| |
import os
import numpy as np
from scipy import ndimage
from scipy.signal import fftconvolve, convolve2d
from astropy.modeling import models, fitting
def positional_shift(R,T):
Rc = R[10:-10,10:-10]
Tc = T[10:-10,10:-10]
c = fftconvolve(Rc, Tc[::-1, ::-1])
cind = np.where(c == np.max(c))
print cind
csmall = c[cind[0][0]-10:cind[0][0]+10,cind[1][0]-10:cind[1][0]+10]
csmall = c[cind[0][0]-6:cind[0][0]+7,cind[1][0]-6:cind[1][0]+7]
X,Y = np.indices(csmall.shape)
total =csmall.sum()
dx = (X*csmall).sum()/total - 6 + cind[0][0] - c.shape[0]/2.0
dy = (Y*csmall).sum()/total - 6 + cind[1][0] - c.shape[1]/2.0
return dx, dy
def register(R,T,params):
#Tc = T.data*T.mask
#Rc = R.data*R.mask
Tc_data = T.data.copy()
Rc_data = R.data.copy()
if params.register_using_masks:
Tc_data[T.mask==0] = np.median(Tc_data[T.mask==1])
Rc_data[R.mask==0] = np.median(Rc_data[R.mask==1])
if isinstance(params.fwhm_section,np.ndarray):
w = params.fwhm_section
Tc = Tc_data[w[2]:w[3],w[0]:w[1]].copy()
Rc = Rc_data[w[2]:w[3],w[0]:w[1]].copy()
else:
Tc = Tc_data
Rc = Rc_data
#nx, ny = R.shape
#z = np.arange(-3,4)
#saturated_pixels = np.where(R.mask==0)
#for k in range(saturated_pixels[0].size):
# p = np.array([z+saturated_pixels[0][k],z+saturated_pixels[1][k]])
# px, py = np.meshgrid(p[0],p[1])
# q = np.where((px>=0) & (px<R.data.shape[0]) & (py>=0) & (py<R.data.shape[1]))
# Rc[saturated_pixels[0][k],saturated_pixels[1][k]]= np.median(R.data[px[q],py[q]])
#saturated_pixels = np.where(T.mask==0)
#for k in range(saturated_pixels[0].size):
# p = np.array([z+saturated_pixels[0][k],z+saturated_pixels[1][k]])
# px, py = np.meshgrid(p[0],p[1])
# q = np.where((px>=0) & (px<R.data.shape[0]) & (py>=0) & (py<R.data.shape[1]))
# Tc[saturated_pixels[0][k],saturated_pixels[1][k]]= np.median(T.data[px[q],py[q]])
Rcm = Rc - np.median(Rc)
Tcm = Tc - np.median(Tc)
c = fftconvolve(Rcm, Tcm[::-1, ::-1])
print c.shape
kernel = np.ones((3,3))
c = convolve2d(c,kernel,mode='same')
print c.shape
cind = np.where(c == np.max(c))
print np.max(c)
print cind
print Rc.shape
try:
xshift = cind[0][0]-Rc.shape[0]+1
except IndexError:
print 'Warning:',T.fullname, 'failed to register.'
return None, None, None
yshift = cind[1][0]-Rc.shape[1]+1
imint = max(0,-xshift)
imaxt = min(R.shape[0],R.shape[0]-xshift)
jmint = max(0,-yshift)
jmaxt = min(R.shape[1],R.shape[1]-yshift)
iminr = max(0,xshift)
imaxr = min(R.shape[0],R.shape[0]+xshift)
jminr = max(0,yshift)
jmaxr = min(R.shape[1],R.shape[1]+yshift)
RT = np.zeros(R.shape)
RT[iminr:imaxr,jminr:jmaxr] = T.data[imint:imaxt,jmint:jmaxt]
inv_variance = np.ones(R.shape)
inv_variance[iminr:imaxr,jminr:jmaxr] = T.inv_variance[imint:imaxt,jmint:jmaxt]
#mask = np.ones(R.shape,dtype=bool)
#mask[iminr:imaxr,jminr:jmaxr] = 0
#inv_variance = 1.0/(RT/params.gain +
# (params.readnoise/params.gain)**2) + mask*1.0
RM = np.zeros(R.shape,dtype=bool)
RM[iminr:imaxr,jminr:jmaxr] = T.mask[imint:imaxt,jmint:jmaxt]
return RT, RM, inv_variance
def compute_bleed_mask(d,radius,params):
print 'Computing bleed mask'
kernel = np.array([[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1],
[2,2,2,2,2,2,2,2,2,2],
[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1]])
rad2 = radius*radius
mask = np.ones_like(d,dtype=bool)
dc = convolve2d(d,kernel.T,mode='same')
rad = int(np.ceil(radius))
z = np.arange(2*rad+1)-rad
x,y = np.meshgrid(z,z)
p = np.array(np.where(x**2 + y**2 < rad2))
bad_pixels = np.where(np.abs(dc)>1.1*params.pixel_max)
zp0 = z[p[0]]
zp1 = z[p[1]]
sp0 = bad_pixels[0][:,np.newaxis]
sp1 = bad_pixels[1][:,np.newaxis]
q0 = zp0 + sp0
q1 = zp1 + sp1
q0 = q0.flatten()
q1 = q1.flatten()
s = np.asarray(np.where((q0>=0) & (q0<d.shape[0]) &
(q1>=0) & (q1<d.shape[1])))[0]
mask[q0[s],q1[s]] = 0
for i in range(mask.shape[1]):
if np.sum(mask[:,i]) < 0.85*mask.shape[0]:
mask[:,i] = 0
return mask
def compute_bleed_mask2(d,params):
mask = np.ones_like(d,dtype=bool)
if (params.bleed_mask_multiplier_above == 0) and (params.bleed_mask_multiplier_below == 0):
return mask
for kernel_len in [2,3,5,7,10,15,23,34,51,77]:
lkernel = np.vstack((np.zeros(kernel_len),np.ones(kernel_len),np.zeros(kernel_len)))
lkernel /= np.sum(lkernel)
dl = convolve2d(d,lkernel.T,mode='same')
sy, sx = np.where(dl>params.pixel_max)
for q in range(len(sx)):
ymin = max(0,int(sy[q]-params.bleed_mask_multiplier_below*kernel_len))
ymax = min(d.shape[1],int(sy[q]+params.bleed_mask_multiplier_above*kernel_len))
mask[ymin:ymax,sx[q]] = 0
return mask
def compute_saturated_pixel_mask(im,params):
radius = params.mask_radius
rad2 = radius*radius
rad = int(np.ceil(radius))
z = np.arange(2*rad+1)-rad
x,y = np.meshgrid(z,z)
p = np.array(np.where(x**2 + y**2 < rad2))
mask = np.ones(im.shape,dtype=bool)
saturated_pixels = np.where((im > params.pixel_max) |
(im <= params.pixel_min))
zp0 = z[p[0]]
zp1 = z[p[1]]
sp0 = saturated_pixels[0][:,np.newaxis]
sp1 = saturated_pixels[1][:,np.newaxis]
q0 = zp0 + sp0
q1 = zp1 + sp1
q0 = q0.flatten()
q1 = q1.flatten()
# q = np.array([[],[]])
# for k in range(saturated_pixels[0].size):
# q = np.column_stack([q,np.array([zp0+saturated_pixels[0][k],
# zp1+saturated_pixels[1][k]])])
# q1.append([r for r in zp0+saturated_pixels[0][k]])
# q2.append([r for r in zp1+saturated_pixels[1][k]])
# q = np.array([np.array(q1).flatten(),np.array(q2).flatten()])
s = np.asarray(np.where((q0>=0) & (q0<im.shape[0]) &
(q1>=0) & (q1<im.shape[1])))[0]
mask[q0[s],q1[s]] = 0
return mask
def compute_saturated_pixel_mask_2(im1,im2,radius,params):
rad2 = radius*radius
rad = int(np.ceil(radius))
z = np.arange(2*rad+1)-rad
x,y = np.meshgrid(z,z)
p = np.array(np.where(x**2 + y**2 < rad2))
mask = np.ones(im1.shape,dtype=bool)
saturated_pixels = np.where((im1 > params.pixel_max) |
(im1 <= params.pixel_min) |
(im2 > params.pixel_max) |
(im2 <= params.pixel_min))
for k in range(saturated_pixels[0].size):
q = np.array([z[p[0]]+saturated_pixels[0][k],z[p[1]]+saturated_pixels[1][k]])
s = np.asarray(np.where((q[0]>=0) & (q[0]<im1.shape[0]) &
(q[1]>=0) & (q[1]<im1.shape[1])))[0]
mask[q[0,s],q[1,s]] = 0
return mask
def compute_kernel_saturation_mask(image,params):
cimage = convolve2d(image,params.pixel_saturation_kernel,mode='same')
rad2 = params.mask_radius**2
rad = int(np.ceil(params.mask_radius))
z = np.arange(2*rad+1)-rad
x,y = np.meshgrid(z,z)
p = np.array(np.where(x**2 + y**2 < rad2))
mask = np.ones(image.shape,dtype=bool)
saturated_pixels = np.where(cimage > params.pixel_saturation_kernel_max)
for k in range(saturated_pixels[0].size):
q = np.array([z[p[0]]+saturated_pixels[0][k],z[p[1]]+saturated_pixels[1][k]])
s = np.asarray(np.where((q[0]>=0) & (q[0]<image.shape[0]) &
(q[1]>=0) & (q[1]<image.shape[1])))[0]
mask[q[0,s],q[1,s]] = 0
return mask
def cosmic_ray_clean(data,params):
import cosmics
c = cosmics.cosmicsimage(data, gain=params.gain,
readnoise=params.readnoise, sigclip=20,
sigfrac=0.6, objlim=10)
c.run(maxiter = 3)
return c.cleanarray
def kappa_clip(mask,norm,threshold):
not_finished = True
bmask = np.ones(norm.shape,dtype=bool)
count = 0
while not_finished and count < 10:
nm = bmask*mask*norm
p = np.where(np.abs(nm)>0.0001)
sp = np.std(norm[p])
t = np.where(np.abs(norm) > threshold*sp)
if t:
print 'Rejecting',t[0].shape[0],'pixels'
bmask[t] = 0
count += 1
else:
not_finished = False
return bmask
def boxcar_blur(im):
d = np.zeros(im.shape)
m1 = im.shape[0] - 2
m2 = im.shape[1] - 2
for i in range(3):
for j in range(3):
d[1:m1+1,1:m2+1] += im[i:i+m1,j:j+m2]
d /= 9.0
return d
def convolve_undersample(im):
from scipy.ndimage.filters import convolve
x = np.arange(3)-1
xx,yy = np.meshgrid(x,x)
kernel = 0.25*(np.ones([3,3])-abs(xx*0.5))*(np.ones([3,3])-abs(yy*0.5))
c = convolve(im,kernel)
return c
def convolve_gauss(im,fwhm):
from scipy.ndimage.filters import convolve
sigma = fwhm/(2*np.sqrt(2*np.log(2.0)))
nk = 1 + 2*int(4*sigma)
x = np.arange(nk)-nk/2
xx,yy = np.meshgrid(x,x)
kernel = np.exp(-(xx**2 + yy**2)/(2*sigma**2))
kernel /= np.sum(kernel)
c = convolve(im,kernel)
return c
def convolve_disk(im,radius):
from scipy.ndimage.filters import convolve
radius = int(radius)
diameter = 2*radius + 1
x = np.arange(diameter)-radius
xx,yy = np.meshgrid(x,x)
kernel = np.zeros((diameter,diameter))
kernel[xx**2+yy**2<=radius**2] = 1.0
kernel /= np.sum(kernel)
fp_im = im*1.0
c = convolve(fp_im,kernel)
return c
def apply_photometric_scale(d,c,pdeg):
p = np.zeros(d.shape)
(m,n) = d.shape
eta = (range(n)-0.5*(n-1)*np.ones(n))/(n-1)
xi = (range(m)-0.5*(m-1)*np.ones(m))/(m-1)
x,y = np.meshgrid(eta,xi)
i = 0
for l in range(pdeg+1):
for m in range(pdeg-l+1):
t = (x**l)*(y**m)
p += c[i]*t
i += 1
q = d/p
return q
def undo_photometric_scale(d,c,pdeg,size=None,position=(0,0)):
md,nd = d.shape
if size:
(m,n) = size
else:
(m,n) = d.shape
p = np.zeros([md,nd])
eta = (range(n)-0.5*(n-1)*np.ones(n))/(n-1)
xi = (range(m)-0.5*(m-1)*np.ones(m))/(m-1)
x0,y0 = np.meshgrid(eta,xi)
x = x0[position[0]:position[0]+md,position[1]:position[1]+nd]
y = y0[position[0]:position[0]+md,position[1]:position[1]+nd]
i = 0
for l in range(pdeg+1):
for m in range(pdeg-l+1):
t = (x**l)*(y**m)
p += c[i]*t
i += 1
q = d*p
return q
def compute_fwhm(f,params,width=20,seeing_file='seeing',image_name=False):
g_width = None
if image_name:
fname = f
else:
fname = f.name
if os.path.exists(params.loc_output+os.path.sep+seeing_file):
for line in open(params.loc_output+os.path.sep+seeing_file,'r'):
sline = line.split()
if sline[0] == fname:
g_width = float(sline[1])
g_roundness = float(sline[2])
bgnd = float(sline[3])
signal = float(sline[4])
break
if g_width is None:
if isinstance(params.fwhm_section,np.ndarray):
w = params.fwhm_section
image = f.data[w[2]:w[3],w[0]:w[1]].copy()
mask = f.mask[w[2]:w[3],w[0]:w[1]].copy()
else:
image = f.data.copy()
mask = f.mask.copy()
print image.shape
print mask.shape
bgnd = np.percentile(image[mask==1],30)
image[mask==0] = bgnd
image -= bgnd
signal = image.sum()/image.size
c = fftconvolve(image, image[::-1, ::-1])
xcen = c.shape[0]/2
ycen = c.shape[1]/2
c_small = c[xcen-20:xcen+20,ycen-20:ycen+20]
c_small -= np.min(c_small)
xsize, ysize = c_small.shape
xcen = c_small.shape[0]/2
ycen = c_small.shape[1]/2
y, x = np.mgrid[:xsize, :ysize]
g_init = models.Gaussian2D(amplitude=c_small[xcen,ycen],x_stddev=1,y_stddev=1,x_mean=xcen,y_mean=ycen)
fit_g = fitting.LevMarLSQFitter()
g=fit_g(g_init,x,y,c_small)
gx = g.x_stddev.value
gy = g.y_stddev.value
g_width = np.mean((gx,gy))/np.sqrt(2.0)
g_roundness = np.max((gx,gy))/np.min((gx,gy))
#x1 = int(round(c.shape[0]*0.5))
#x2 = int(round(c.shape[0]*0.5+width))
#y1 = int(round(c.shape[1]*0.5))
#xx = np.arange(x2-x1+1)
#xnew = np.linspace(0,x2-x1,1000)
#fint = interp1d(xx,c[x1:x2+1,y1]-np.min(c[x1:x2+1,y1]),kind='cubic')
#ynew = fint(xnew)
#ymax = max(ynew)
#for i,y in enumerate(ynew):
# if y<ymax/2:
# fw = i*(xnew[1]-xnew[0])
# break
#if not(fw):
# fw = 6.0
p = open(seeing_file,'a')
p.write(f.name+' '+str(g_width)+' '+str(g_roundness)+' '+str(bgnd)+' '+str(signal)+'\n')
p.close()
return g_width, g_roundness, bgnd, signal
def subtract_sky(image,params):
from scipy.linalg import lu_solve, lu_factor, LinAlgError
print 'subtracting sky'
if params.sky_subtract_mode == 'percent':
image2 = image.copy()
if params.pixel_min > 0.1:
p = np.where(image2 > params.pixel_min)
const = np.percentile(image2[p],params.sky_subtract_percent)
else:
const = np.percentile(image2,params.sky_subtract_percent)
image2 -= const
print 'subtracting sky, constant =',const
return image2
else:
degree = params.sky_degree
(ni,mi) = image.shape
sxlen = image.shape[0]/5.0
sylen = image.shape[1]/5.0
x = np.zeros(25)
y = np.zeros(25)
z = np.zeros(25)
k = 0
for i in range(5):
for j in range(5):
section = image[int(i*sxlen):int((i+1)*sxlen),
int(j*sylen):int((j+1)*sylen)].ravel()
z[k] = np.min(section[section>params.pixel_min])
x[k] = ((i+0.5)*sxlen-0.5*(ni-1))/(ni-1)
y[k] = ((j+0.5)*sylen-0.5*(mi-1))/(mi-1)
print x[k],y[k],z[k]
k += 1
ncoeffs = (degree+1)*(degree+2)/2
bf = np.zeros([ncoeffs,k])
m = 0
for i in range(degree+1):
for j in range(degree+1-i):
bf[m,:] = (x[:k]**i) * (y[:k]**j)
m += 1
alpha = np.zeros([ncoeffs,ncoeffs])
beta = np.zeros(ncoeffs)
for i in range(ncoeffs):
for j in range(ncoeffs):
alpha[i,j] = np.sum(bf[i,:]*bf[j,:])
beta[i] = np.sum(z[:k]*bf[i,:])
try:
lu, piv = lu_factor(alpha)
except LinAlgError:
print 'LU decomposition failed in subtract_sky'
return image
c = lu_solve((lu,piv),beta).astype(np.float32).copy()
x = (range(ni)-0.5*(ni-1)*np.ones(ni))/(ni-1)
y = (range(mi)-0.5*(mi-1)*np.ones(mi))/(mi-1)
xx, yy = np.meshgrid(y,x)
m = 0
sky_image = np.zeros_like(image)
print 'coeffs = ',c
| |
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.training import queue_runner
def load_dataset(model, num_gpus, batch_size, output_dims, input_dims, seq_length, size, base_data_path, dataset, istraining, clip_length, video_offset, clip_offset, num_clips, clip_stride, video_step, preproc_debugging=0, shuffle_seed=0, verbose=True):
"""
Function load dataset, setup queue and read data into queue
Args:
:model: tf-activity-recognition framework model object
:num_gpus: Number of gpus to use when training
:batch_size: Number of clips to load into the model each step.
:input_dims: Number of frames used in input
:output_dims: Integer number of classes in current dataset
:seq_length: Length of output sequence expected from LSTM
:size: List detailing height and width of frame
:dataset: Name of dataset being processed
:base_data_path: Full path to root directory containing datasets
:istraining: Boolean variable indicating training/testing phase
:clip_length: Length of clips to cut video into, -1 indicates using the entire video as one clip')
:clip_offset: "none" or "random" indicating where to begin selecting video clips
:num_clips: Number of clips to break video into
:clip_stride: Number of frames that overlap between clips, 0 indicates no overlap and negative values indicate a gap of frames between clips
Return:
Input data tensor, label tensor and name of loaded data (video/image)
"""
# Get a list of tfrecords file names from which to pull videos
filenames = []
number_of_tfrecords = 0
for f in os.listdir(base_data_path):
filenames.append(os.path.join(base_data_path,f))
number_of_tfrecords += 1
# END FOR
if verbose:
print "Number of records available: ", number_of_tfrecords
# END IF
# Create Queue which will read in videos num_gpus at a time (Queue seeded for repeatability of experiments)
tfrecord_file_queue = tf.train.string_input_producer(filenames, shuffle=istraining, name='file_q', seed=shuffle_seed)
# Errors occurring in a model's preprocessing function are not properly traced back when using 'clip_q'.
# If an error occurs stating that "fifo_queue has insufficient elements", then set '--preprocDebugging 1'
# For debugging, a batch_size other than 1 will cause instability
if preproc_debugging:
input_data_tensor, labels_tensor, names_tensor, video_step_tensor, alpha_tensor = _load_video(model, output_dims, input_dims, seq_length, size, base_data_path, dataset, istraining, clip_length, video_offset, clip_offset, num_clips, clip_stride, tfrecord_file_queue, video_step)
else:
tf.set_random_seed(0) # To ensure the numbers are generated for temporal offset consistently
# Number of threads to be used
thread_count = 1
# Initialize queue that will contain multiple clips of the format [[clip_frame_count, height, width, channels], [labels_copied_seqLength], [name_of_video]]
clip_q = tf.FIFOQueue(num_gpus*batch_size*thread_count, dtypes=[tf.float32, tf.int32, tf.string, tf.float32, tf.float32], shapes=[[input_dims, size[0], size[1], 3],[seq_length],[],[],[]])
# Attempts to load num_gpus*batch_size number of clips into queue, if there exist too many clips in a video then this function blocks until the clips are dequeued
enqueue_op = clip_q.enqueue_many(_load_video(model, output_dims, input_dims, seq_length, size, base_data_path, dataset, istraining, clip_length, video_offset, clip_offset, num_clips, clip_stride, tfrecord_file_queue, video_step))
# Initialize the queuerunner and add it to the collection, this becomes initialized in train_test_TFRecords_multigpu_model.py after the Session is begun
qr = tf.train.QueueRunner(clip_q, [enqueue_op]*num_gpus*thread_count)
queue_runner.add_queue_runner(qr)
# Dequeue the required number of clips so that each gpu contains batch_size clips
input_data_tensor, labels_tensor, names_tensor, video_step_tensor, alpha_tensor = clip_q.dequeue_many(num_gpus*batch_size)
# END IF
# Track scalar value defined in a models preprocessing function in a class variable called 'store_alpha'
if hasattr(model, 'store_alpha'):
model.store_alpha = alpha_tensor
model.add_track_variables('Parameterization_Variables', model.store_alpha)
# END IF
return input_data_tensor, labels_tensor, names_tensor
def _load_video(model, output_dims, input_dims, seq_length, size, base_data_path, dataset, istraining, clip_length, video_offset, clip_offset, num_clips, clip_stride, tfrecord_file_queue, video_step):
"""
Function to load a single video and preprocess its' frames
Args:
:model: tf-activity-recognition framework model object
:input_dims: Number of frames used in input
:output_dims: Integer number of classes in current dataset
:seq_length: Length of output sequence expected from LSTM
:size: List detailing height and width of frame
:dataset: Name of dataset being processed
:base_data_path: Full path to root directory containing datasets
:istraining: Boolean variable indicating training/testing phase
:clip_length: Length of clips to cut video into, -1 indicates using the entire video as one clip')
:clip_offset: "none" or "random" indicating where to begin selecting video clips
:num_clips: Number of clips to break video into
:clip_stride: Number of frames that overlap between clips, 0 indicates no overlap and -1 indicates clips are randomly selected and not sequential
:tfrecord_file_queue: A queue containing remaining videos to be loaded for the current epoch
Return:
Input data tensor, label tensor and name of loaded data (video/image)
"""
# Dequeue video data from queue and convert it from TFRecord format (int64 or bytes)
features = _read_tfrecords(tfrecord_file_queue)
frames = tf.cast(features['Frames'], tf.int32)
height = tf.cast(features['Height'], tf.int32)
width = tf.cast(features['Width'], tf.int32)
channel = tf.cast(features['Channels'], tf.int32)
label = tf.cast(features['Label'], tf.int32)
name = features['Name']
# Shape [frames, height, width, channels]
input_data_tensor = tf.reshape(tf.decode_raw(features['Data'], tf.uint8), tf.stack([frames,height,width,channel]))
# BGR to RGB
input_data_tensor = input_data_tensor[...,::-1]
# Reduction in fps to 25 for HMDB51 dataset
if ('HMDB51' in dataset) or ('MIT' in dataset):
input_data_tensor, frames, indices = _reduce_fps(input_data_tensor, frames)
# END IF
# If clip_length == -1 then the entire video is to be used as a single clip
if clip_length <= 0:
clips = [input_data_tensor]
clips = tf.to_int32(clips) # Usually occurs within _extract_clips
else:
clips = _extract_clips(input_data_tensor, frames, num_clips, clip_offset, clip_length, video_offset, clip_stride, height, width, channel)
# END IF
""" Reference of shapes:
clips shape: [num_clips, clip_length or frames, height, width, channels]
model.preprocess_tfrecords input shape: [clip_length or frames, height, width, channels]
"""
# Call preprocessing function related to model chosen that preprocesses each clip as an individual video
if hasattr(model, 'store_alpha'):
clips_tensor = tf.map_fn(lambda clip: model.preprocess_tfrecords(clip[0], tf.shape(clip[0])[0], height, width,channel, input_dims, output_dims, seq_length, size, label, istraining, video_step),
(clips, np.array([clips.get_shape()[0].value]*clips.get_shape()[0].value)), dtype=(tf.float32, tf.float32))
alpha_tensor = clips_tensor[1]
clips_tensor = clips_tensor[0]
else:
clips_tensor = tf.map_fn(lambda clip: model.preprocess_tfrecords(clip, tf.shape(clip)[0], height, width,channel, input_dims, output_dims, seq_length, size, label, istraining, video_step),
clips, dtype=tf.float32)
alpha_tensor = np.array([1.0]*clips.get_shape()[0].value)
# END IF
num_clips = tf.shape(clips_tensor)[0]
video_step = tf.assign_add(video_step, 1)
labels_tensor = tf.tile( [label], [seq_length])
names_tensor = tf.tile( [name], [num_clips])
video_step_tensor = tf.tile([video_step], [num_clips])
""" Reference of shape:
clips_tensor shape: [num_clips, input_dims, size[0], size[1], channels]
"""
return [clips_tensor, tf.tile([labels_tensor], [num_clips,1]), names_tensor, video_step_tensor, alpha_tensor]
def _read_tfrecords(filename_queue):
"""
Function that reads and returns the tfrecords of a selected dataset one at a time
Args:
:filename_queue: A queue of all filenames within a dataset
Return:
Dictionary containing features of a single sample
"""
feature_dict = {}
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
feature_dict['Label'] = tf.FixedLenFeature([], tf.int64)
feature_dict['Data'] = tf.FixedLenFeature([], tf.string)
feature_dict['Frames'] = tf.FixedLenFeature([], tf.int64)
feature_dict['Height'] = tf.FixedLenFeature([], tf.int64)
feature_dict['Width'] = tf.FixedLenFeature([], tf.int64)
feature_dict['Channels'] = tf.FixedLenFeature([], tf.int64)
feature_dict['Name'] = tf.FixedLenFeature([], tf.string)
features = tf.parse_single_example(serialized_example, features=feature_dict)
return features
def _extract_clips(video, frames, num_clips, clip_offset, clip_length, video_offset, clip_stride, height, width, channel):
"""
Function that extracts clips from a video based off of clip specifications
Args:
:video: The video tensor that needs to be split into clips
:frames: The number of frames of the video
:num_clips: Number of clips to break video into
:clip_offset: "none" or "random" indicating where to begin selecting video clips
:clip_length: Length of clips to cut video into, -1 indicates using the entire video as one clip')
:clip_stride: Number of frames that overlap between clips, 0 indicates no overlap and negative values indicate a gap of frames between clips
Return:
A tensor containing the clip(s) extracted from the video (shape [clip_number, clip_frames, height, width, channel])
"""
if video_offset == 'random':
video_start = tf.random_uniform([], maxval=frames-1, dtype=tf.int32)
else:
video_start = 0
if clip_offset == 'random':
video = tf.cond(tf.greater(clip_length, frames),
lambda: _loop_video_with_offset(video, video, 0, frames, height, width, channel, clip_length),
lambda: video)
clip_begin = tf.random_uniform([num_clips], minval=0, maxval=tf.shape(video)[0]-clip_length+1, dtype=tf.int32)
rs = tf.reshape(clip_begin, [num_clips,1,1,1])
video = tf.to_int32(video)
clips = tf.map_fn(lambda clip_start: video[clip_start[0][0][0]:clip_start[0][0][0]+clip_length], rs)
else:
if num_clips > 0:
frames_needed = clip_length + (clip_length-clip_stride) * (num_clips-1)
video = tf.cond(tf.greater(frames_needed, frames-video_start),
lambda: _loop_video_with_offset(video[video_start:,:,:,:], video, frames-video_start, frames, height, width, channel, frames_needed),
lambda: video[video_start:,:,:,:])
clip_begin = tf.range(0, frames_needed, delta = clip_length-clip_stride)[:num_clips]
rs = tf.reshape(clip_begin, [num_clips,1,1,1])
video = tf.to_int32(video)
clips = tf.map_fn(lambda clip_start: video[clip_start[0][0][0]:clip_start[0][0][0]+clip_length], rs)
else:
# Get total number of clips possible given clip_length stride and offset
# Need minimum one clip: loop video until at least have clip_length frames
video = tf.cond(tf.greater(clip_length, frames-video_start),
lambda: _loop_video_with_offset(video[video_start:,:,:,:], video, frames-video_start, frames, height, width, channel, clip_length+video_start),
lambda: video[video_start:,:,:,:])
number_of_clips = tf.cond(tf.greater(clip_length, frames-video_start),
lambda: 1,
lambda: (frames-video_start-clip_length) / (clip_length - clip_stride) + 1)
clip_begin = | |
import os
import abc
import struct
import logging
import time
from collections import OrderedDict, namedtuple
from datetime import datetime
from typing import List, Dict, Optional, Any, Type, TypeVar
from .audit_event import AUDIT_EVENT, get_audit_events
from .bsm_h import *
from .bsm_errors import BSM_ERRORS
from .audit_record import *
from .bsm_token_type import *
logger = logging.getLogger(__name__)
#https://github.com/openbsm/openbsm/blob/master/libbsm/bsm_io.c
AUDIT_HEADER_SIZE = 18
AUDIT_TRAILER_SIZE = 7
Token = TypeVar("Token", bound="BaseToken")
Rec = TypeVar("Record", bound="Record")
class NotImplementedToken(Exception):
pass
class UnknownHeader(Exception):
pass
class Header(BSMStruct):
__fields__ = [
("size", UInt32("size")),
("version", UInt8("version")),
("event_type", EventType("event_type")),
("modifier", UInt16("modifier")),
]
class Header32(BSMStruct):
"""
record byte count 4 bytes
version # 1 byte [2]
event type 2 bytes
event modifier 2 bytes
seconds of time 4 bytes/8 bytes (32-bit/64-bit value)
milliseconds of time 4 bytes/8 bytes (32-bit/64-bit value)
Example:
record = Record()
record.generate(AUT_HEADER32)
"""
token_id = AUT_HEADER32
identifier = "header"
__fields__ = [
("_", Header()),
("time", DateTime("time")),
("msec", MSec("msec")),
]
class Header32_Ex(BSMStruct):
"""
* The Solaris specifications for AUE_HEADER32_EX seem to differ a bit
* depending on the bit of the specifications found. The OpenSolaris source
* code uses a 4-byte address length, followed by some number of bytes of
* address data. This contrasts with the Solaris audit.log.5 man page, which
* specifies a 1-byte length field. We use the Solaris 10 definition so that
* we can parse audit trails from that system.
*
* record byte count 4 bytes
* version # 1 byte [2]
* event type 2 bytes
* event modifier 2 bytes
* address type/length 4 bytes
* [ Solaris man page: address type/length 1 byte]
* machine address 4 bytes/16 bytes (IPv4/IPv6 address)
* seconds of time 4 bytes/8 bytes (32/64-bits)
* nanoseconds of time 4 bytes/8 bytes (32/64-bits)
"""
token_id = AUT_HEADER32_EX
identifier = "header"
__fields__ = [
("_", Header()),
("address", IPAddress("address")),
("time", DateTime("time")),
("msec", MSec("msec"))
]
class Trailer(BSMStruct):
"""
trailer magic 2 bytes show=False
record size 4 bytes
"""
token_id = AUT_TRAILER
identifier = "trailer"
__fields__ = [
("magic", UInt16("magic")),
("count", UInt32("count")),
]
class Argument(BSMStruct):
"""
* argument # 1 byte
* argument value 4 bytes/8 bytes (32-bit/64-bit value)
* text length 2 bytes
* text N bytes + 1 terminating NULL byte
"""
identifier = "argument"
__fields__ = [
("no", UInt8("no")),
("val", UInt32("val")),# Hex
("text", String()),
]
class Arg32(BSMStruct):
token_id = AUT_ARG32
identifier = "argument"
__fields__ = [
("_", Argument()),
]
class Arg64(BSMStruct):
"""
"no": ">b",
"val": ">Q",
"text_size": ">H",
"text": ">{text_size}s",
"""
token_id = AUT_ARG64
identifier = "argument"
__fields__ = [
("no", UInt8("no")),
("val", UInt64("val")),# Hex
("text", String()),
]
class Text(BSMStruct):
token_id = AUT_TEXT
identifier = "text"
__fields__ = [
("data", String()),
]
class Path(BSMStruct):
token_id = AUT_PATH
identifier = "path"
__fields__ = [
("data", String()),
]
class Return(BSMStruct):
identifier = "return"
__fields__ = [
("errno", ReturnString("errno")),
]
class Return32(BSMStruct):
"""
"errno": ">B",
"value": ">I",
"""
token_id = AUT_RETURN32
identifier = "return"
__fields__ = [
("_", Return()),
("value", UInt32("value")),
]
class Return64(BSMStruct):
token_id = AUT_RETURN64
identifier = "return"
__fields__ = [
("_", Return()),
("value", UInt64("value")),
]
class ReturnUuid(BSMStruct):
""" TODO:
{
struct openbsm_uuid uuid_be;
int err = 0;
READ_TOKEN_U_CHAR(buf, len, tok->tt.ret_uuid.no, tok->len, err);
if (err)
return (-1);
READ_TOKEN_BYTES(buf, len, &uuid_be, sizeof(uuid_be), tok->len, err);
if (err)
return (-1);
openbsm_uuid_dec_be(&uuid_be,
(struct openbsm_uuid *)&tok->tt.ret_uuid.uuid);
READ_TOKEN_U_INT16(buf, len, tok->tt.ret_uuid.len, tok->len, err);
if (err)
return (-1);
SET_PTR((char*)buf, len, tok->tt.ret_uuid.text, tok->tt.ret_uuid.len,
tok->len, err);
if (err)
return (-1);
return (0);
}
"""
token_id = AUT_RETURN_UUID
identifier = "ret_uuid"
__fields__ = [
("_", Return()),
("size_of_uuid", UInt16("size_of_uuid")),
("uuid", String()),
]
class Uuid(BSMStruct):
"""
"""
toekn_id = AUT_ARG_UUID
identifier = "uuid"
__fields__ = [
("no", UInt8()),
("uuid_be", "set_uuid_be"),
("uuid", ByteString(length_fmt="H"))
]
def set_uuid_be(self, rec :Rec):
uuid_fields = [
("time_low", UInt32()),
("time_mid", UInt16()),
("time_hi_and_version", UInt16()),
("clock_seq_hi_and_reserved", UInt8()),
("clock_seq_low", UInt8()),
]
uuid_struct_fmt = ">"
size = 0
for name, field in uuid_fields:
uuid_struct_fmt += field.fmt
size += field.size
if size > self.no:
return None
uuid_struct_fmt += f"{self.no-size}s"
uuid_fields.append(("node", None))
uuid = struct.unpack(uuid_struct_fmt, rec.read(self.no))
return OrderedDict(zip([name for name, _ in uuid_fields], uuid))
class Identity(BSMStruct):
"""
"signer_type": ">I",
"signing_size": ">H",
"signing_id": ">{signing_size}s",
"signing_id_truncated": ">B",
"team_id_length": ">H",
"team_id": ">{team_id_length}s",
"team_id_truncated": ">B",
"cdhash_size": ">H",
"cdhash": ">{cdhash_size}s"
"""
token_id = AUT_IDENTITY
identifier = "identity"
__fields__ = [
("signer_type", UInt32("signer_type")),
("signing_id", String()),
("signing_id_truncated", CompleteString("signing_id_truncated")),
("team_id", String()),
("team_id_truncated", CompleteString("team_id_truncated")),
("cbhash", ByteString()),
]
class Subject(BSMStruct):
identifier = "subject"
__fields__ = [
("auid", User("auid")),
("euid", User("euid")),
("egid", Group("egid")),
("ruid", User("ruid")),
("rgid", Group("rgid")),
("pid", Process("pid")),
("sid", UInt32("sid")),
]
class Subject32(BSMStruct):
"""
audit ID 4 bytes
effective user ID 4 bytes
effective group ID 4 bytes
real user ID 4 bytes
real group ID 4 bytes
process ID 4 bytes
session ID 4 bytes
terminal ID
port ID 4 bytes/8 bytes (32-bit/64-bit value)
machine address 4 bytes
"""
token_id = AUT_SUBJECT32
identifier = "subject"
__fields__ = [
("_", Subject()),
("tid_port", UInt32("tid_port")),
("tid_address", IPv4Address("tid_address")),
]
class Subject32_Ex(BSMStruct):
"""
* audit ID 4 bytes
* euid 4 bytes
* egid 4 bytes
* ruid 4 bytes
* rgid 4 bytes
* pid 4 bytes
* sessid 4 bytes
* terminal ID
* portid 4 bytes
* type 4 bytes
* machine id 16 bytes
"""
token_id = AUT_SUBJECT32_EX
identifier = "subject_ex"
__fields__ = [
("_", Subject()),
("tid_port", UInt32("tid_port")),
("tid_address", IPAddress("tid_address")),
]
#TODO: Complete Subject64
class Subject64(BSMStruct):
token_id = AUT_SUBJECT64
identifier = "subject"
__fields__ = [
("_", Subject()),
]
#TODO: Complete Subject64Ex
class Subject64Ex(BSMStruct):
token_id = AUT_SUBJECT64_EX
identifier = "subject_ex"
__fields__ = [
("_", Subject()),
]
class Attr(BSMStruct):
"""
* file access mode 4 bytes
* owner user ID 4 bytes
* owner group ID 4 bytes
* file system ID 4 bytes
* node ID 8 bytes
* device 4 bytes/8 bytes (32-bit/64-bit)
"""
token_id = AUT_ATTR
identifier = "attribute"
__fields__ = [
("mode", UInt32("mode")),
("uid", User("uid")),
("gid", Group("gid")),
("fsid", UInt32("fsid")),
("nodeid", UInt64("nodeid")),
("device", UInt32("device")),
]
class Attr32(BSMStruct):
token_id = AUT_ATTR32
identifier = "attribute"
__fields__ = [
("_", Attr()),
]
class Attr64(BSMStruct):
token_id = AUT_ATTR64
identifier = "attribute"
__fields__ = [
("_", Attr()),
]
class Opaque(BSMStruct):
token_id = AUT_OPAQUE
identifier = "opaque"
__fields__ = [
("data", ByteString())
]
class Exit(BSMStruct):
"""
* status 4 bytes
* return value 4 bytes
"""
token_id = AUT_EXIT
identifier = "exit"
__fields__ = [
("errval", UInt32("errval")),
("retval", UInt32("retval")),
]
class ExecArgs(BSMStruct):
"""
* count 4 bytes
* text count null-terminated string(s)
"""
token_id = AUT_EXEC_ARGS
identifier = "exec arg"
__fields__ = [
("count", UInt32()),
("args", "set_args"),
]
def set_args(self, rec: Rec):
"""TODO: Check AUDIT_MAX_ARGS
for (i = 0; i < tok->tt.execarg.count; i++) {
bptr = buf + tok->len;
if (i < AUDIT_MAX_ARGS)
tok->tt.execarg.text[i] = (char*)bptr;
/* Look for a null terminated string. */
while (bptr && (*bptr != '\0'), {
if (++tok->len >= (u_int32_t)len)
return (-1);
bptr = buf + tok->len;
}
if (!bptr)
return (-1);
tok->len++; /* \0 character */
}
if (tok->tt.execarg.count > AUDIT_MAX_ARGS)
tok->tt.execarg.count = AUDIT_MAX_ARGS;
"""
return [String(length_fmt="", until_null=True)(rec) for i in range(self.count.value)]
class ExecEnv(BSMStruct):
""" TODO:
"""
token_id = AUT_EXEC_ENV
identifier = "exec env"
__fields__ = [
("args", Texts("args")),
]
class OtherFile(BSMStruct):
"""
* seconds of time 4 bytes
* milliseconds of time 4 bytes
* file name len 2 bytes
* file pathname N bytes + 1 terminating NULL byte
"""
token_id = AUT_OTHER_FILE32
identifier = "file"
__fields__ = [
("time", UInt32("time")),
("msec", MSec("msec")),
("pathname", String(with_null=True)),
]
class NewGroups(BSMStruct):
"""
* number groups 2 bytes
* group list count * 4 bytes
"""
token_id = AUT_NEWGROUPS
identifier = "group"
__fields__ = [
("num", UInt16()),
("groups", "set_groups"),
]
def set_groups(self, rec: Rec):
"""
for (i = 0; i<tok->tt.grps.no; i++) {
READ_TOKEN_U_INT32(buf, len, tok->tt.grps.list[i], tok->len,
err);
if (err)
return (-1);
}
"""
return [UInt16.unpack(rec) for i in range(self.num.value)]
class InAddr(BSMStruct):
"""
* Internet addr 4 bytes
"""
token_id = AUT_IN_ADDR
identifier = "ip addr"
__fields__ = [
("addr", IPv4Address("addr")),
]
class InAddrEx(BSMStruct):
"""
type 4 bytes
address 16 bytes
"""
token_id = AUT_IN_ADDR_EX
identifier = "ip addr ex"
__fields__ = [
("address", IPAddress("address")),
]
class Ip(BSMStruct):
""" TODO:
ip header 20 bytes
"""
token_id = AUT_IP
identifier = "ip"
__fields__ = [
("version",UInt32("version")),
("tos",UInt32("tos")),
("len",UInt32("len")),
("id",UInt32("id")),
("offset",UInt32("offset")),
("ttl",UInt32("ttl")),
("prot",UInt32("prot")),
("chksm",UInt32("chksm")),
("src",UInt32("src")),
("dest",UInt32("dest")),
]
class Ipc(BSMStruct):
""" TODO:
* object ID type 1 byte
* | |
x2_type_ids], dim=0).unsqueeze(0).expand(bsz, -1)
indices = x1_indices + [x_len]
x_forwards = []
x_backwards = []
for i in range(len(indices)):
if i == 0:
j = 0
k = indices[0]
sd = th.zeros((bsz, k), dtype=th.long, device=x.device)
td = th.ones((bsz, k), dtype=th.long, device=x.device)
else:
if i == 1:
j = 0
else:
j = indices[i - 2]
k = indices[i]
sd = sent_ids[:, j:k]
td = type_ids[:, j:k] - type_ids[:, j:(j+1)] # zero-one
inp = x[:, j:k]
mk = mask[:, j:k]
# pd = th.cumsum(mk, dim=1).masked_fill(th.logical_not(mk), 0)
pd = None
if stance_logit is None or i == 0:
sl = None
else:
dummy_sl = th.zeros(
(bsz, indices[i-1]-j, stance_logit.size(-1)),
device=stance_logit.device,
dtype=stance_logit.dtype
)
dummy_sl[:, :, 0].fill_(INF)
sl = th.cat([dummy_sl, stance_logit[:, indices[i-1]:k]], dim=1)
# sl = stance_logit[:, j:k]
if disco_logit is None or i == 0:
dl = None
else:
dummy_dl = th.zeros(
(bsz, indices[i-1]-j, disco_logit.size(-1)),
device=disco_logit.device,
dtype=disco_logit.dtype
)
dummy_dl[:, :, 0].fill_(INF)
dl = th.cat([dummy_dl, disco_logit[:, indices[i-1]:k]], dim=1)
# dl = disco_logit[:, j:k]
feat = encoder.forward(
inp,
mask=mk,
sent_ids=sd,
type_ids=td,
pos_ids=pd,
stance_logit=sl,
disco_logit=dl
)[0]
if i == 0:
x_forwards.append(feat)
else:
x_backwards.append(feat[:, :indices[i-1]-j])
x_forwards.append(feat[:, indices[i-1]-j:])
if i == len(indices) - 1: # text
j = indices[i-1] if i > 0 else 0
k = indices[i]
inp = x[:, j:k]
mk = mask[:, j:k]
sd = sent_ids[:, j:k]
td = type_ids[:, j:k] - type_ids[:, j:(j+1)] + 2
# pd = th.cumsum(mk, dim=1).masked_fill(th.logical_not(mk), 0)
pd = None
if stance_logit is None:
sl = None
else:
dummy_sl = th.zeros(
(bsz, indices[i-1]-j, stance_logit.size(-1)),
device=stance_logit.device,
dtype=stance_logit.dtype
)
dummy_sl[:, :, 0].fill_(INF)
sl = th.cat([dummy_sl, stance_logit[:, indices[i-1]:k]], dim=1)
# sl = stance_logit[:, j:k]
if disco_logit is None:
dl = None
else:
dummy_dl = th.zeros(
(bsz, indices[i-1]-j, disco_logit.size(-1)),
device=disco_logit.device,
dtype=disco_logit.dtype
)
dummy_dl[:, :, 0].fill_(INF)
dl = th.cat([dummy_dl, disco_logit[:, indices[i-1]:k]], dim=1)
# dl = disco_logit[:, j:k]
feat = encoder.forward(
inp,
mask=mk,
sent_ids=sd,
type_ids=td,
pos_ids=pd,
stance_logit=sl,
disco_logit=dl
)[0]
x_backwards.append(feat)
xs = []
if isinstance(cell, AttnCell):
for i in range(len(indices)):
j = indices[i-1] if i > 0 else 0
k = indices[i]
xs.append(cell(x_forwards[i], x_backwards[i], mask[:, j:k]))
else:
for i in range(len(indices)):
xs.append(cell(x_forwards[i], x_backwards[i]))
x1_split_sizes = [x1_indices[0]] + [x1_indices[i] - x1_indices[i-1] for i in range(1, len(x1_indices))]
x2_split_sizes = [x2_indices[0]] + [x2_indices[i] - x2_indices[i-1] for i in range(1, len(x2_indices))]
xs = tuple(xs[:-1]) + th.split(xs[-1], x2_split_sizes, dim=1)
masks = th.split(x1_mask, x1_split_sizes, dim=1) + th.split(x2_mask, x2_split_sizes, dim=1)
return xs, masks
# def encode(
# self,
# x1,
# x2,
# x1_mask=None,
# x2_mask=None,
# x1_sent_ids=None,
# x2_sent_ids=None,
# stance_logit=None,
# disco_logit=None
# ):
# if isinstance(self, nn.DataParallel):
# encoder = self.module.encoder
# cell = self.module.cell
# else:
# encoder = self.encoder
# cell = self.cell
# bsz, x1_len = x1.size()
# x2_len = x2.size(1)
# x_len = x1_len + x2_len
# if x1_mask is None:
# x1_mask = th.ones((bsz, x1_len), dtype=th.bool, device=x1.device)
# if x2_mask is None:
# x2_mask = th.ones((bsz, x2_len), dtype=th.bool, device=x2.device)
# if x1_sent_ids is not None:
# x1_indices = process_indices(x1_sent_ids)
# else:
# x1_sent_ids = th.zeros_like(x1)
# x1_indices = th.tensor([x1_len], dtype=th.long, device=x1.device)
# if x2_sent_ids is not None:
# x2_indices = process_indices(x2_sent_ids)
# else:
# x2_sent_ids = th.zeros_like(x2)
# x2_indices = th.tensor([x2_len], dtype=th.long, device=x2.device)
# x1_type_ids, x2_type_ids = process_type_ids(x1_indices, x2_indices, method="segmented")
# num_context = th.max(x1_type_ids, dim=0, keepdim=True)[0] + 1
# dummy_type_ids = self.max_num_context - num_context
# x1_type_ids = x1_type_ids + dummy_type_ids
# x2_type_ids = x2_type_ids + dummy_type_ids
# x1_indices, x2_indices = x1_indices.tolist(), x2_indices.tolist()
# x = th.cat([x1, x2], dim=1)
# mask = th.cat([x1_mask, x2_mask], dim=1)
# # pos_ids = th.cumsum(mask, dim=1).masked_fill(th.logical_not(mask), 0)
# pos_ids = None
# sent_ids = th.cat([x1_sent_ids, x2_sent_ids+x1_sent_ids[:, -1:]+1], dim=1)
# type_ids = th.cat([x1_type_ids, x2_type_ids], dim=0).unsqueeze(0).expand(bsz, -1)
# indices = x1_indices + [x_len]
# dummy_ids = th.ones_like(sent_ids)
# clamped_ids = th.clamp(sent_ids, max=len(x1_indices)) # regard all x2 as a whole
# even_sent_ids = th.bitwise_or(clamped_ids, dummy_ids)
# even_mask = even_sent_ids.unsqueeze(1) == even_sent_ids.unsqueeze(2)
# odd_sent_ids = th.bitwise_or(clamped_ids + 1, dummy_ids)
# odd_mask = odd_sent_ids.unsqueeze(1) == odd_sent_ids.unsqueeze(2)
# even_mask.masked_fill_((mask == 0).unsqueeze(-1), 0)
# odd_mask.masked_fill_((mask == 0).unsqueeze(-1), 0)
# if len(indices) % 2 == 1:
# even_type_ids = th.cat(
# [
# type_ids[:, :x1_len] % 2,
# type_ids[:, x1_len:] - type_ids[:, x1_len:(x1_len+1)] + 2
# ],
# dim=1
# )
# odd_type_ids = (type_ids + 1) % 2
# else:
# even_type_ids = type_ids % 2
# odd_type_ids = th.cat(
# [
# (type_ids[:, :x1_len] + 1) % 2,
# type_ids[:, x1_len:] - type_ids[:, x1_len:(x1_len+1)] + 2
# ],
# dim=1
# )
# even_x = encoder.forward(
# x,
# mask=even_mask,
# sent_ids=sent_ids,
# type_ids=even_type_ids,
# pos_ids=pos_ids,
# stance_logit=stance_logit,
# disco_logit=disco_logit
# )[0]
# odd_x = encoder.forward(
# x,
# mask=odd_mask,
# sent_ids=sent_ids,
# type_ids=odd_type_ids,
# pos_ids=pos_ids,
# stance_logit=stance_logit,
# disco_logit=disco_logit
# )[0]
# x1_split_sizes = [x1_indices[0]] + [x1_indices[i] - x1_indices[i-1] for i in range(1, len(x1_indices))]
# x2_split_sizes = [x2_indices[0]] + [x2_indices[i] - x2_indices[i-1] for i in range(1, len(x2_indices))]
# even_xs = th.split(
# even_x,
# x1_split_sizes + x2_split_sizes,
# dim=1
# )
# odd_xs = th.split(
# odd_x,
# x1_split_sizes + x2_split_sizes,
# dim=1
# )
# masks = th.split(x1_mask, x1_split_sizes, dim=1) + th.split(x2_mask, x2_split_sizes, dim=1)
# xs = []
# if isinstance(cell, AttnCell):
# for i in range(len(even_xs)):
# if i % 2 == 0:
# xs.append(cell(odd_xs[i], even_xs[i], masks[i]))
# else:
# xs.append(cell(even_xs[i], odd_xs[i]))
# else:
# for i in range(len(even_xs)):
# if i % 2 == 0:
# xs.append(cell(odd_xs[i], even_xs[i]))
# else:
# xs.append(cell(even_xs[i], odd_xs[i]))
# return xs, masks
class HAN(nn.Module):
def __init__(self, **kw):
super(HAN, self).__init__()
max_num_text = kw.get("max_num_text", 1)
max_num_context = kw.get("max_num_context", 1)
encoder = kw.get("encoder", "roberta")
hidden_dim = kw.get("hidden_dim", 128)
num_perspectives = kw.get("num_perspectives", 8)
num_labels = kw.get("num_labels", 3)
dropout = kw.get("dropout", 0.0)
self.max_num_context = max_num_context
self.max_num_text = max_num_text
self.drop = nn.Dropout(dropout, inplace=False)
if encoder == "bert":
self.encoder = BertEncoder(num_segments=max_num_text+max_num_context+2, **kw)
dim = self.encoder.get_output_dim()
self.word_linear = nn.Linear(dim, dim)
self.word_attn_vec = nn.Parameter(th.Tensor(dim))
self.sent_encoder = TransformerLayer(
input_dim=dim,
hidden_dim=hidden_dim,
num_heads=num_perspectives,
add_residual=True,
add_gate=False,
pre_lnorm=True,
post_lnorm=False,
dropout=0.0
)
self.sent_linear = nn.Linear(dim, dim)
self.sent_attn_vec = nn.Parameter(th.Tensor(dim))
elif encoder == "albert":
self.encoder = AlbertEncoder(num_segments=max_num_text+max_num_context+2, **kw)
dim = self.encoder.get_output_dim()
self.word_linear = nn.Linear(dim, dim)
self.word_attn_vec = nn.Parameter(th.Tensor(dim))
self.sent_encoder = TransformerLayer(
input_dim=dim,
hidden_dim=hidden_dim,
num_heads=num_perspectives,
add_residual=True,
add_gate=False,
pre_lnorm=True,
post_lnorm=False,
dropout=0.0
)
self.sent_linear = nn.Linear(dim, dim)
self.sent_attn_vec = nn.Parameter(th.Tensor(dim))
elif encoder == "roberta":
self.encoder = RobertaEncoder(num_segments=max_num_text+max_num_context+2, **kw)
dim = self.encoder.get_output_dim()
self.word_linear = nn.Linear(dim, dim)
self.word_attn_vec = nn.Parameter(th.Tensor(dim))
self.sent_encoder = TransformerLayer(
input_dim=dim,
hidden_dim=hidden_dim,
num_heads=num_perspectives,
add_residual=True,
add_gate=False,
pre_lnorm=True,
post_lnorm=False,
dropout=0.0
)
self.sent_linear = nn.Linear(dim, dim)
self.sent_attn_vec = nn.Parameter(th.Tensor(dim))
elif encoder == "xlnet":
self.encoder = XLNetEncoder(num_segments=max_num_text+max_num_context+2, **kw)
dim = self.encoder.get_output_dim()
self.word_linear = nn.Linear(dim, dim)
self.word_attn_vec = nn.Parameter(th.Tensor(dim))
self.sent_encoder = TransformerLayer(
input_dim=dim,
hidden_dim=hidden_dim,
num_heads=num_perspectives,
add_residual=True,
add_gate=False,
pre_lnorm=True,
post_lnorm=False,
dropout=0.0
)
self.sent_linear = nn.Linear(dim, dim)
self.sent_attn_vec = nn.Parameter(th.Tensor(dim))
elif encoder == "lstm":
self.encoder = LSTMEncoder(num_segments=max_num_text+max_num_context+2, **kw)
dim = self.encoder.get_output_dim()
self.word_linear = nn.Linear(dim, dim)
self.word_attn_vec = nn.Parameter(th.Tensor(dim))
self.sent_encoder = nn.LSTM(
input_size=dim,
hidden_size=dim//2,
num_layers=1,
bidirectional=True,
batch_first=True
)
self.sent_linear = nn.Linear(dim, dim)
self.sent_attn_vec = nn.Parameter(th.Tensor(dim))
else:
raise NotImplementedError("Error: encoder=%s is not supported now." % (encoder))
self.fc_layer = MLP(
input_dim=dim,
hidden_dim=hidden_dim,
output_dim=num_labels,
num_mlp_layers=2,
activation="none",
norm_layer="batch_norm"
)
self.drop = nn.Dropout(dropout)
# init
init_weight(self.word_attn_vec, init="uniform")
init_weight(self.word_linear, init="uniform")
init_weight(self.sent_attn_vec, init="uniform")
init_weight(self.sent_linear, init="uniform")
def set_finetune(self, finetune):
assert finetune in ["full", "layers", "last", "type", "none"]
for param in self.parameters():
param.requires_grad = True
self.encoder.set_finetune(finetune)
def forward(
self,
x1,
x2,
x1_mask=None,
x2_mask=None,
x1_sent_ids=None,
x2_sent_ids=None,
stance_logit=None,
disco_logit=None
):
if isinstance(self, nn.DataParallel):
encoder = self.module.encoder
encoder = self.module.encoder
word_attn_vec = self.module.word_attn_vec
word_linear = self.module.word_linear
sent_encoder = self.module.sent_encoder
sent_attn_vec = self.module.sent_attn_vec
sent_linear = self.module.sent_linear
fc_layer = self.module.fc_layer
drop = self.module.drop
else:
encoder = self.encoder
encoder = self.encoder
word_attn_vec = self.word_attn_vec
word_linear = self.word_linear
sent_encoder = self.sent_encoder
sent_attn_vec = self.sent_attn_vec
sent_linear = self.sent_linear
fc_layer = self.fc_layer
drop = self.drop
bsz, x1_len = x1.size()
x2_len = x2.size(1)
x_len = x1_len + x2_len
if x1_mask is None:
x1_mask = th.ones((bsz, x1_len), dtype=th.bool, device=x1.device)
if x2_mask is None:
x2_mask = th.ones((bsz, x2_len), dtype=th.bool, device=x2.device)
if x1_sent_ids is not None:
x1_indices = process_indices(x1_sent_ids)
else:
x1_sent_ids = th.zeros_like(x1)
x1_indices = th.tensor([x1_len], dtype=th.long, device=x1.device)
if x2_sent_ids is not None:
x2_indices = process_indices(x2_sent_ids)
else:
x2_sent_ids = | |
null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('verification_metadata', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='main.verificationmetadata')),
],
options={
'verbose_name': 'historical verification metadata software',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalVerificationMetadataBadge',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('history_change_list', models.TextField(default='')),
('name', models.CharField(blank=True, default='', max_length=200, null=True, verbose_name='Name')),
('badge_type', models.CharField(blank=True, default='', max_length=200, null=True, verbose_name='Type')),
('version', models.CharField(blank=True, default='', max_length=200, null=True, verbose_name='Version')),
('definition_url', models.URLField(blank=True, default='', null=True, verbose_name='Definition URL')),
('logo_url', models.URLField(blank=True, default='', null=True, verbose_name='Logo URL')),
('issuing_org', models.CharField(blank=True, default='', max_length=200, null=True, verbose_name='Issuing Organization')),
('issuing_date', models.DateField(blank=True, null=True, verbose_name='Issuing Date')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('verification_metadata', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='main.verificationmetadata')),
],
options={
'verbose_name': 'historical verification metadata badge',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalVerificationMetadataAudit',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('history_change_list', models.TextField(default='')),
('name', models.CharField(blank=True, default='', max_length=200, null=True, verbose_name='Name')),
('version', models.CharField(blank=True, default='', max_length=200, null=True, verbose_name='Version')),
('url', models.URLField(blank=True, default='', null=True, verbose_name='URL')),
('organization', models.CharField(blank=True, default='', max_length=200, null=True, verbose_name='Organization')),
('verified_results', models.CharField(blank=True, default='', max_length=200, null=True, verbose_name='Verified Results')),
('exceptions', models.CharField(blank=True, default='', max_length=200, null=True, verbose_name='Exceptions')),
('exception_reason', models.CharField(blank=True, default='', max_length=200, null=True, verbose_name='Exception Reason')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('verification_metadata', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='main.verificationmetadata')),
],
options={
'verbose_name': 'historical verification metadata audit',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalVerificationMetadata',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_at', models.DateTimeField(blank=True, editable=False, help_text='Date model was created', verbose_name='created at')),
('updated_at', models.DateTimeField(blank=True, editable=False, help_text='Date model was last updated', verbose_name='updated at')),
('history_change_list', models.TextField(default='')),
('operating_system', models.CharField(default='', max_length=200, verbose_name='Operating System')),
('machine_type', models.CharField(blank=True, default='', max_length=200, null=True, verbose_name='Machine Type')),
('scheduler', models.CharField(blank=True, default='', max_length=200, null=True, verbose_name='Scheduler Module')),
('platform', models.CharField(blank=True, default='', max_length=200, null=True, verbose_name='Platform')),
('processor_reqs', models.CharField(blank=True, default='', max_length=200, null=True, verbose_name='Processor Requirements')),
('host_url', models.URLField(blank=True, default='', null=True, verbose_name='Hosting Institution URL')),
('memory_reqs', models.CharField(blank=True, default='', max_length=200, null=True, verbose_name='Memory Reqirements')),
('packages_info', models.TextField(default='', help_text='Please provide the list of your packages and their versions.', verbose_name='Packages Info')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('creator', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='Creator User')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('last_editor', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='Last Updating User')),
('submission', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='main.submission')),
],
options={
'verbose_name': 'historical verification metadata',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalVerification',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_at', models.DateTimeField(blank=True, editable=False, help_text='Date model was created', verbose_name='created at')),
('updated_at', models.DateTimeField(blank=True, editable=False, help_text='Date model was last updated', verbose_name='updated at')),
('history_change_list', models.TextField(default='')),
('_status', django_fsm.FSMField(choices=[('new', 'New'), ('not_attempted', 'Not Attempted'), ('minor_issues', 'Minor Issues'), ('major_issues', 'Major Issues'), ('success_w_mod', 'Success W Mod'), ('success', 'Success')], default='new', help_text='Was the submission able to be verified', max_length=15, verbose_name='Verification Status')),
('report', models.TextField(default='', verbose_name='Report')),
('code_executability', models.CharField(default='', max_length=2000, verbose_name='Code Executability')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('creator', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='Creator User')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('last_editor', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='Last Updating User')),
('manuscript', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='main.manuscript')),
('submission', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='main.submission')),
],
options={
'verbose_name': 'historical verification',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalUser',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(db_index=True, error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='<NAME>')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('history_change_list', models.TextField(default='')),
('invite_key', models.CharField(blank=True, max_length=64)),
('email', models.EmailField(db_index=True, max_length=254)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('invited_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical user',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalSubmission',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_at', models.DateTimeField(blank=True, editable=False, help_text='Date model was created', verbose_name='created at')),
('updated_at', models.DateTimeField(blank=True, editable=False, help_text='Date model was last updated', verbose_name='updated at')),
('history_change_list', models.TextField(default='')),
('_status', django_fsm.FSMField(choices=[('new', 'New'), ('in_progress_edition', 'In Progress Edition'), ('in_progress_curation', 'In Progress Curation'), ('in_progress_verification', 'In Progress Verification'), ('reviewed_awaiting_report', 'Reviewed Awaiting Report'), ('reviewed_awaiting_approve', 'Reviewed Report Awaiting Approval'), ('returned', 'Returned')], default='new', help_text='The status of the submission in the review process', max_length=25, verbose_name='Submission review status')),
('version_id', models.IntegerField(verbose_name='Version number')),
('high_performance', models.BooleanField(default=False, verbose_name='Does this submission require a high-performance compute environment?')),
('contents_gis', models.BooleanField(default=False, verbose_name='Does this submission contain GIS data and mapping?')),
('contents_proprietary', models.BooleanField(default=False, verbose_name='Does this submission contain restricted or proprietary data?')),
('contents_proprietary_sharing', models.BooleanField(default=False, verbose_name='Are you restricted from sharing this data with Odum for verification only?')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('creator', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='Creator User')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('last_editor', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='Last Updating User')),
('manuscript', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='main.manuscript')),
],
options={
'verbose_name': 'historical submission',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalNote',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_at', models.DateTimeField(blank=True, editable=False, help_text='Date model was created', verbose_name='created at')),
('updated_at', models.DateTimeField(blank=True, editable=False, help_text='Date model was last updated', verbose_name='updated at')),
('history_change_list', models.TextField(default='')),
('text', models.TextField(blank=True, default='', verbose_name='Note Text')),
('ref_file_type', models.CharField(blank=True, choices=[('code', 'Code'), ('data', 'Data'), ('doc_readme', 'Documentation - Readme'), ('doc_codebook', 'Documentation - Codebook'), ('doc_other', 'Documentation - Other')], max_length=14, verbose_name='file type')),
('ref_cycle', models.CharField(choices=[('submission', 'Submission'), ('edition', 'Edition'), ('curation', 'Curation'), ('verification', 'Verification')], max_length=12)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('creator', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='Creator User')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('last_editor', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='Last Updating User')),
('manuscript', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='main.manuscript')),
('note_replied_to', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='main.note')),
('parent_submission', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='main.submission')),
('ref_file', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='main.gitfile')),
],
options={
'verbose_name': 'historical note',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalManuscript',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_at', models.DateTimeField(blank=True, editable=False, help_text='Date model was created', verbose_name='created at')),
('updated_at', models.DateTimeField(blank=True, editable=False, help_text='Date model was last updated', verbose_name='updated at')),
('history_change_list', models.TextField(default='')),
('title', models.CharField(default='', help_text='Title of the manuscript', max_length=200, verbose_name='Manuscript Title')),
('pub_id', models.CharField(blank=True, db_index=True, default='', help_text='The internal ID from the publication', max_length=200, null=True, verbose_name='Publication ID')),
('qual_analysis', models.BooleanField(blank=True, default=False, help_text='Whether this manuscript needs qualitative analysis', null=True, verbose_name='Qualitative Analysis')),
('qdr_review', models.BooleanField(blank=True, default=False, help_text='Was this manuscript reviewed by the Qualitative Data Repository?', null=True, verbose_name='QDR Review')),
('contact_first_name', models.CharField(blank=True, help_text='First name of the publication contact that will be stored in Dataverse', max_length=150, verbose_name='Contact First Name')),
('contact_last_name', models.CharField(blank=True, help_text='Last name of the publication contact that will be stored in Dataverse', max_length=150, verbose_name='Contact Last Name')),
('contact_email', models.EmailField(blank=True, help_text='Email address of the publication contact that will be stored in Dataverse', max_length=254, null=True, verbose_name='Contact Email Address')),
('dataverse_doi', models.CharField(blank=True, help_text='DOI of the publication in Dataverse', max_length=150, verbose_name='Dataverse DOI')),
('description', models.CharField(blank=True, default='', help_text='Additional info about the manuscript', max_length=1024, null=True, verbose_name='Description')),
('subject', models.CharField(blank=True, choices=[('agricultural', 'Agricultural Sciences'), ('arts', 'Arts and Humanities'), ('astronomy', 'Astronomy and Astrophysics'), ('business', 'Business and Management'), ('chemistry', 'Chemistry'), ('computer', 'Computer and Information Science'), ('environmental', 'Earth and Environmental Sciences'), ('engineering', 'Engineering'), ('law', 'Law'), ('mathematics', 'Mathematical Sciences'), ('health', 'Medicine, Health and Life Sciences'), ('physics', 'Physics'), ('social', 'Social Sciences'), ('other', 'Other')], max_length=14, null=True, verbose_name='Subject')),
('_status', django_fsm.FSMField(choices=[('new', 'New'), ('awaiting_init', 'Awaiting Initial Submission'), ('awaiting_resub', 'Awaiting Resubmission'), ('reviewing', 'Reviewing Submission'), ('processing', 'Processing Submission'), ('completed', 'Completed')], default='new', help_text='The overall status of the manuscript in the review process', max_length=15, verbose_name='Manuscript Status')),
('uuid', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('creator', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='Creator User')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('last_editor', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='Last Updating User')),
],
options={
'verbose_name': 'historical manuscript',
'ordering': ('-history_date', '-history_id'),
| |
= A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.assertFalse(a.has_run)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_complete_return_value_email_batch(self, emails):
class A(DummyTask):
def complete(self):
pass # no return value should be an error
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
self.worker.run()
self.assertEqual(emails, [])
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue("1 scheduling failure" in emails[0])
@email_patch
def test_run_error(self, emails):
class A(luigi.Task):
def run(self):
raise Exception("b0rk")
a = A()
luigi.build([a], workers=1, local_scheduler=True)
self.assertEqual(1, len(emails))
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_email_batch(self, emails):
class A(luigi.Task):
owner_email = ['<EMAIL>', '<EMAIL>']
def run(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
worker.add(A())
worker.run()
scheduler.prune()
self.assertEqual(3, len(emails))
self.assertTrue(any('<EMAIL>' in email for email in emails))
self.assertTrue(any('<EMAIL>' in email for email in emails))
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_batch_email_string(self, emails):
class A(luigi.Task):
owner_email = '<EMAIL>'
def run(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
worker.add(A())
worker.run()
scheduler.prune()
self.assertEqual(2, len(emails))
self.assertTrue(any('<EMAIL>' in email for email in emails))
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_no_email(self, emails):
class A(luigi.Task):
def run(self):
raise Exception("b0rk")
luigi.build([A()], workers=1, local_scheduler=True)
self.assertFalse(emails)
@email_patch
def test_task_process_dies_with_email(self, emails):
a = SendSignalTask(signal.SIGKILL)
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(1, len(emails))
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
self.assertTrue(emails[0].find("died unexpectedly with exit code -9") != -1)
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_task_process_dies_no_email(self, emails):
luigi.build([SendSignalTask(signal.SIGKILL)], workers=2, local_scheduler=True)
self.assertEqual([], emails)
@email_patch
def test_task_times_out(self, emails):
class A(luigi.Task):
worker_timeout = 0.0001
def run(self):
time.sleep(5)
a = A()
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(1, len(emails))
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
self.assertTrue(emails[0].find("timed out after 0.0001 seconds and was terminated.") != -1)
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_task_times_out_no_email(self, emails):
class A(luigi.Task):
worker_timeout = 0.0001
def run(self):
time.sleep(5)
luigi.build([A()], workers=2, local_scheduler=True)
self.assertEqual([], emails)
@with_config(dict(worker=dict(retry_external_tasks='true')))
@email_patch
def test_external_task_retries(self, emails):
"""
Test that we do not send error emails on the failures of external tasks
"""
class A(luigi.ExternalTask):
pass
a = A()
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(emails, [])
@email_patch
def test_no_error(self, emails):
class A(DummyTask):
pass
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertEqual(emails, [])
self.worker.run()
self.assertEqual(emails, [])
self.assertTrue(a.complete())
@custom_email_patch({"core": {"error-email": "not-a-real-email-address-for-test-only", 'email-type': 'none'}})
def test_disable_emails(self, emails):
class A(luigi.Task):
def complete(self):
raise Exception("b0rk")
self.worker.add(A())
self.assertEqual(emails, [])
class RaiseSystemExit(luigi.Task):
def run(self):
raise SystemExit("System exit!!")
class SendSignalTask(luigi.Task):
signal = luigi.IntParameter()
def run(self):
os.kill(os.getpid(), self.signal)
class HangTheWorkerTask(luigi.Task):
worker_timeout = luigi.IntParameter(default=None)
def run(self):
while True:
pass
def complete(self):
return False
class MultipleWorkersTest(unittest.TestCase):
@unittest.skip('Always skip. There are many intermittent failures')
# This pass under python3 when run as `nosetests test/worker_test.py`
# but not as `nosetests test`. Probably some side effect on previous tests
@unittest.skipIf(six.PY3, 'This test fail on python3 when run with tox.')
def test_multiple_workers(self):
# Test using multiple workers
# Also test generating classes dynamically since this may reflect issues with
# various platform and how multiprocessing is implemented. If it's using os.fork
# under the hood it should be fine, but dynamic classses can't be pickled, so
# other implementations of multiprocessing (using spawn etc) may fail
class MyDynamicTask(luigi.Task):
x = luigi.Parameter()
def run(self):
time.sleep(0.1)
t0 = time.time()
luigi.build([MyDynamicTask(i) for i in range(100)], workers=100, local_scheduler=True)
self.assertTrue(time.time() < t0 + 5.0) # should ideally take exactly 0.1s, but definitely less than 10.0
def test_zero_workers(self):
d = DummyTask()
luigi.build([d], workers=0, local_scheduler=True)
self.assertFalse(d.complete())
def test_system_exit(self):
# This would hang indefinitely before this fix:
# https://github.com/spotify/luigi/pull/439
luigi.build([RaiseSystemExit()], workers=2, local_scheduler=True)
def test_term_worker(self):
luigi.build([SendSignalTask(signal.SIGTERM)], workers=2, local_scheduler=True)
def test_kill_worker(self):
luigi.build([SendSignalTask(signal.SIGKILL)], workers=2, local_scheduler=True)
def test_purge_multiple_workers(self):
w = Worker(worker_processes=2, wait_interval=0.01)
t1 = SendSignalTask(signal.SIGTERM)
t2 = SendSignalTask(signal.SIGKILL)
w.add(t1)
w.add(t2)
w._run_task(t1.task_id)
w._run_task(t2.task_id)
time.sleep(1.0)
w._handle_next_task()
w._handle_next_task()
w._handle_next_task()
def test_stop_worker_kills_subprocesses(self):
with Worker(worker_processes=2) as w:
hung_task = HangTheWorkerTask()
w.add(hung_task)
w._run_task(hung_task.task_id)
pids = [p.pid for p in w._running_tasks.values()]
self.assertEqual(1, len(pids))
pid = pids[0]
def is_running():
return pid in {p.pid for p in psutil.Process().children()}
self.assertTrue(is_running())
self.assertFalse(is_running())
def test_time_out_hung_worker(self):
luigi.build([HangTheWorkerTask(0.1)], workers=2, local_scheduler=True)
def test_time_out_hung_single_worker(self):
luigi.build([HangTheWorkerTask(0.1)], workers=1, local_scheduler=True)
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/72953986')
@mock.patch('luigi.worker.time')
def test_purge_hung_worker_default_timeout_time(self, mock_time):
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask()
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 5
w._handle_next_task()
self.assertEqual(1, len(w._running_tasks))
mock_time.time.return_value = 6
w._handle_next_task()
self.assertEqual(0, len(w._running_tasks))
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/76645264')
@mock.patch('luigi.worker.time')
def test_purge_hung_worker_override_timeout_time(self, mock_time):
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask(worker_timeout=10)
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 10
w._handle_next_task()
self.assertEqual(1, len(w._running_tasks))
mock_time.time.return_value = 11
w._handle_next_task()
self.assertEqual(0, len(w._running_tasks))
class Dummy2Task(Task):
p = luigi.Parameter()
def output(self):
return MockTarget(self.p)
def run(self):
f = self.output().open('w')
f.write('test')
f.close()
class AssistantTest(unittest.TestCase):
def run(self, result=None):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
self.assistant = Worker(scheduler=self.sch, worker_id='Y', assistant=True)
with Worker(scheduler=self.sch, worker_id='X') as w:
self.w = w
super(AssistantTest, self).run(result)
def test_get_work(self):
d = Dummy2Task('123')
self.w.add(d)
self.assertFalse(d.complete())
self.assistant.run()
self.assertTrue(d.complete())
def test_bad_job_type(self):
class Dummy3Task(Dummy2Task):
task_family = 'UnknownTaskFamily'
d = Dummy3Task('123')
self.w.add(d)
self.assertFalse(d.complete())
self.assertFalse(self.assistant.run())
self.assertFalse(d.complete())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [d.task_id])
def test_unimported_job_type(self):
MODULE_CONTENTS = b'''
import luigi
class UnimportedTask(luigi.Task):
def complete(self):
return False
'''
reg = luigi.task_register.Register._get_reg()
class UnimportedTask(luigi.Task):
task_module = None # Set it here, so it's generally settable
luigi.task_register.Register._set_reg(reg)
task = UnimportedTask()
# verify that it can't run the task without the module info necessary to import it
self.w.add(task)
self.assertFalse(self.assistant.run())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [task.task_id])
# check that it can import with the right module
with temporary_unloaded_module(MODULE_CONTENTS) as task.task_module:
self.w.add(task)
self.assertTrue(self.assistant.run())
self.assertEqual(list(self.sch.task_list('DONE', '').keys()), [task.task_id])
def test_unimported_job_sends_failure_message(self):
class NotInAssistantTask(luigi.Task):
task_family = 'Unknown'
task_module = None
task = NotInAssistantTask()
self.w.add(task)
self.assertFalse(self.assistant.run())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [task.task_id])
self.assertTrue(self.sch.fetch_error(task.task_id)['error'])
class ForkBombTask(luigi.Task):
depth = luigi.IntParameter()
breadth = luigi.IntParameter()
p = luigi.Parameter(default=(0, )) # ehm for some weird reason [0] becomes a tuple...?
def output(self):
return MockTarget('.'.join(map(str, self.p)))
def run(self):
with self.output().open('w') as f:
f.write('Done!')
def requires(self):
if len(self.p) < self.depth:
for i in range(self.breadth):
yield ForkBombTask(self.depth, self.breadth, self.p + (i, ))
class TaskLimitTest(unittest.TestCase):
def tearDown(self):
MockFileSystem().remove('')
@with_config({'core': {'worker-task-limit': '6'}})
def test_task_limit_exceeded(self):
w = Worker()
t = ForkBombTask(3, 2)
w.add(t)
w.run()
self.assertFalse(t.complete())
leaf_tasks = [ForkBombTask(3, 2, branch) for branch in [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1)]]
self.assertEqual(3, sum(t.complete() for t in leaf_tasks),
"should have gracefully completed as much as possible even though the single last leaf didn't get scheduled")
@with_config({'core': {'worker-task-limit': '7'}})
def test_task_limit_not_exceeded(self):
w = Worker()
t = ForkBombTask(3, 2)
w.add(t)
w.run()
self.assertTrue(t.complete())
def test_no_task_limit(self):
w = Worker()
t = ForkBombTask(4, 2)
w.add(t)
w.run()
self.assertTrue(t.complete())
class WorkerConfigurationTest(unittest.TestCase):
def test_asserts_for_worker(self):
"""
Test that Worker() asserts that it's sanely configured
"""
Worker(wait_interval=1) # This shouldn't raise
self.assertRaises(AssertionError, Worker, wait_interval=0)
class WorkerWaitJitterTest(unittest.TestCase):
@with_config({'worker': {'wait_jitter': '10.0'}})
@mock.patch("random.uniform")
@mock.patch("time.sleep")
def test_wait_jitter(self, mock_sleep, mock_random):
""" verify configured jitter amount """
mock_random.return_value = 1.0
w = Worker()
x = w._sleeper()
six.next(x)
mock_random.assert_called_with(0, 10.0)
mock_sleep.assert_called_with(2.0)
mock_random.return_value = 2.0
six.next(x)
mock_random.assert_called_with(0, 10.0)
mock_sleep.assert_called_with(3.0)
@mock.patch("random.uniform")
@mock.patch("time.sleep")
def test_wait_jitter_default(self, mock_sleep, mock_random):
""" verify default jitter is as expected """
mock_random.return_value = 1.0
w = Worker()
x = w._sleeper()
six.next(x)
mock_random.assert_called_with(0, 5.0)
mock_sleep.assert_called_with(2.0)
mock_random.return_value = 3.3
six.next(x)
mock_random.assert_called_with(0, 5.0)
mock_sleep.assert_called_with(4.3)
class KeyboardInterruptBehaviorTest(LuigiTestCase):
def test_propagation_when_executing(self):
"""
Ensure that keyboard interrupts causes luigi to quit when you are
executing tasks.
TODO: Add a test that tests the multiprocessing (--worker >1) case
"""
class KeyboardInterruptTask(luigi.Task):
def run(self):
raise KeyboardInterrupt()
cmd = 'KeyboardInterruptTask --local-scheduler --no-lock'.split(' ')
self.assertRaises(KeyboardInterrupt, luigi_run, cmd)
def test_propagation_when_scheduling(self):
"""
Test that KeyboardInterrupt causes luigi to quit while scheduling.
"""
class KeyboardInterruptTask(luigi.Task):
def complete(self):
raise KeyboardInterrupt()
class ExternalKeyboardInterruptTask(luigi.ExternalTask):
def complete(self):
raise KeyboardInterrupt()
self.assertRaises(KeyboardInterrupt, luigi_run,
['KeyboardInterruptTask', '--local-scheduler', '--no-lock'])
self.assertRaises(KeyboardInterrupt, luigi_run,
['ExternalKeyboardInterruptTask', '--local-scheduler', '--no-lock'])
class WorkerPurgeEventHandlerTest(unittest.TestCase):
@mock.patch('luigi.worker.TaskProcess')
def test_process_killed_handler(self, task_proc):
result = []
@HangTheWorkerTask.event_handler(Event.PROCESS_FAILURE)
def store_task(t, error_msg):
self.assertTrue(error_msg)
result.append(t)
w = Worker()
task = HangTheWorkerTask()
task_process = mock.MagicMock(is_alive=lambda: False, exitcode=-14, task=task)
task_proc.return_value = task_process
w.add(task)
w._run_task(task.task_id)
w._handle_next_task()
self.assertEqual(result, [task])
@mock.patch('luigi.worker.time')
def test_timeout_handler(self, mock_time):
result = []
@HangTheWorkerTask.event_handler(Event.TIMEOUT)
def store_task(t, error_msg):
self.assertTrue(error_msg)
result.append(t)
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask(worker_timeout=1)
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 3
w._handle_next_task()
self.assertEqual(result, [task])
class PerTaskRetryPolicyBehaviorTest(LuigiTestCase):
def setUp(self):
super(PerTaskRetryPolicyBehaviorTest, self).setUp()
self.per_task_retry_count = 3
self.default_retry_count = 1
self.sch = Scheduler(retry_delay=0.1, retry_count=self.default_retry_count, prune_on_get_work=True)
def test_with_all_disabled_with_single_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is
tested.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on single worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count | |
anim[0] in HeadAnimDict[key]:
if (base.localAvatar.style.head == key):
base.localAvatar.unloadAnims([anim[0]], "head", None)
def compileGlobalAnimList():
"""
Munge the anim names and file paths into big dictionaries for the
leg, torso, and head parts. These are used for loading the anims.
"""
# Nowadays we want all anims in the list. It would save work
# in loadphaseAnims if we split this into phases. Optimize later.
phaseList = [Phase3AnimList,Phase3_5AnimList,Phase4AnimList,Phase5AnimList,Phase5_5AnimList,Phase6AnimList,Phase9AnimList,Phase10AnimList, Phase12AnimList]
phaseStrList = ["phase_3", "phase_3.5", "phase_4", "phase_5", "phase_5.5", "phase_6", "phase_9", "phase_10", "phase_12"]
for animList in phaseList:
phaseStr = phaseStrList[phaseList.index(animList)]
for key in list(LegDict.keys()):
LegsAnimDict.setdefault(key, {})
for anim in animList:
file = phaseStr + LegDict[key] + anim[1]
LegsAnimDict[key][anim[0]] = file
for key in list(TorsoDict.keys()):
TorsoAnimDict.setdefault(key, {})
for anim in animList:
file = phaseStr + TorsoDict[key] + anim[1]
TorsoAnimDict[key][anim[0]] = file
for key in list(HeadDict.keys()):
# only load anims for dog heads
if (key.find('d') >= 0):
HeadAnimDict.setdefault(key, {})
for anim in animList:
file = phaseStr + HeadDict[key] + anim[1]
HeadAnimDict[key][anim[0]] = file
def loadDialog():
"""
Load the dialogue audio samples
"""
loadPath = "phase_3.5/audio/dial/"
# load the dog dialogue
DogDialogueFiles = ( "AV_dog_short",
"AV_dog_med",
"AV_dog_long",
"AV_dog_question",
"AV_dog_exclaim",
"AV_dog_howl"
)
# load the audio files and store into the dialogue array
for file in DogDialogueFiles:
DogDialogueArray.append(loader.loadSfx(loadPath + file + ".ogg"))
# load the cat dialogue
catDialogueFiles = ( "AV_cat_short",
"AV_cat_med",
"AV_cat_long",
"AV_cat_question",
"AV_cat_exclaim",
"AV_cat_howl"
)
# load the audio files and store into the dialogue array
for file in catDialogueFiles:
CatDialogueArray.append(loader.loadSfx(loadPath + file + ".ogg"))
# load the horse dialogue
horseDialogueFiles = ( "AV_horse_short",
"AV_horse_med",
"AV_horse_long",
"AV_horse_question",
"AV_horse_exclaim",
"AV_horse_howl"
)
# load the audio files and store into the dialogue array
for file in horseDialogueFiles:
HorseDialogueArray.append(loader.loadSfx(loadPath + file + ".ogg"))
# load the rabbit dialogue
rabbitDialogueFiles = ( "AV_rabbit_short",
"AV_rabbit_med",
"AV_rabbit_long",
"AV_rabbit_question",
"AV_rabbit_exclaim",
"AV_rabbit_howl"
)
# load the audio files and store into the dialogue array
for file in rabbitDialogueFiles:
RabbitDialogueArray.append(loader.loadSfx(loadPath + file + ".ogg"))
# load the mouse dialogue
# for now the mouse reuses the rabbit sounds
mouseDialogueFiles = ( "AV_mouse_short",
"AV_mouse_med",
"AV_mouse_long",
"AV_mouse_question",
"AV_mouse_exclaim",
"AV_mouse_howl"
)
# load the audio files and store into the dialogue array
for file in mouseDialogueFiles:
MouseDialogueArray.append(loader.loadSfx(loadPath + file + ".ogg"))
# load the duck dialogue array
duckDialogueFiles = ( "AV_duck_short",
"AV_duck_med",
"AV_duck_long",
"AV_duck_question",
"AV_duck_exclaim",
"AV_duck_howl"
)
# load the audio files and store into the dialogue array
for file in duckDialogueFiles:
DuckDialogueArray.append(loader.loadSfx(loadPath + file + ".ogg"))
# load the monkey dialogue array
monkeyDialogueFiles = ( "AV_monkey_short",
"AV_monkey_med",
"AV_monkey_long",
"AV_monkey_question",
"AV_monkey_exclaim",
"AV_monkey_howl"
)
# load the audio files and store into the dialogue array
for file in monkeyDialogueFiles:
MonkeyDialogueArray.append(loader.loadSfx(loadPath + file + ".ogg"))
# load the bear dialogue array
bearDialogueFiles = ( "AV_bear_short",
"AV_bear_med",
"AV_bear_long",
"AV_bear_question",
"AV_bear_exclaim",
"AV_bear_howl"
)
# load the audio files and store into the dialogue array
for file in bearDialogueFiles:
BearDialogueArray.append(loader.loadSfx(loadPath + file + ".ogg"))
# load the pig dialogue array
pigDialogueFiles = ( "AV_pig_short",
"AV_pig_med",
"AV_pig_long",
"AV_pig_question",
"AV_pig_exclaim",
"AV_pig_howl"
)
# load the audio files and store into the dialogue array
for file in pigDialogueFiles:
PigDialogueArray.append(loader.loadSfx(loadPath + file + ".ogg"))
def unloadDialog():
global DogDialogueArray
global CatDialogueArray
global HorseDialogueArray
global RabbitDialogueArray
global MouseDialogueArray
global DuckDialogueArray
global MonkeyDialogueArray
global BearDialogueArray
global PigDialogueArray
DogDialogueArray = []
CatDialogueArray = []
HorseDialogueArray = []
RabbitDialogueArray = []
MouseDialogueArray = []
DuckDialogueArray = []
MonkeyDialogueArray = []
BearDialogueArray = []
PigDialogueArray = []
class Toon(Avatar.Avatar, ToonHead):
"""Toon class:"""
notify = DirectNotifyGlobal.directNotify.newCategory("Toon")
afkTimeout = base.config.GetInt('afk-timeout', 600)
# This is the tuple of allowed animations that can be set by using toon.setAnimState().
# If you add an animation that you want to do a setAnimState on please add this
# animation to this list.
setAnimStateAllowedList = (
'off',
'neutral',
'victory',
'Happy',
'Sad',
'Catching',
'CatchEating',
'Sleep',
'walk',
'jumpSquat',
'jump',
'jumpAirborne',
'jumpLand',
'run',
'swim',
'swimhold',
'dive',
'cringe',
'OpenBook',
'ReadBook',
'CloseBook',
'TeleportOut',
'Died',
'TeleportIn',
'Emote',
'SitStart',
'Sit',
'Push',
'Squish',
'FallDown',
'GolfPuttLoop',
'GolfRotateLeft',
'GolfRotateRight',
'GolfPuttSwing',
'GolfGoodPutt',
'GolfBadPutt',
'Flattened',
'CogThiefRunning',
'ScientistJealous',
'ScientistEmcee',
'ScientistWork',
'ScientistLessWork',
'ScientistPlay'
)
def __init__(self):
try:
self.Toon_initialized
return
except:
self.Toon_initialized = 1
Avatar.Avatar.__init__(self)
ToonHead.__init__(self)
self.forwardSpeed = 0.0
self.rotateSpeed = 0.0
# Set Avatar Type (Toon, Teen, or Pirate)
self.avatarType = "toon"
# These members are only used to track the current actual
# animation and play rate in effect when motion.standWalkRunReverse
# is not None.
self.motion = Motion.Motion(self)
self.standWalkRunReverse = None
self.playingAnim = None
self.soundTeleport = None
self.cheesyEffect = ToontownGlobals.CENormal
self.effectTrack = None
self.emoteTrack = None
self.emote = None
self.stunTrack = None
self.__bookActors = []
self.__holeActors = []
self.holeClipPath = None
self.wake = None
self.lastWakeTime = 0
self.numPies = 0
self.pieType = 0
self.pieModel = None
self.__pieModelType = None
# Stunned if recently hit by stomper
self.isStunned = 0
# are we disguised as a suit?
self.isDisguised = 0
self.defaultColorScale = None
self.jar = None
self.setTag('pieCode', str(ToontownGlobals.PieCodeToon))
# Define Toon's Font
# fancy nametag point 1
self.setFont(ToontownGlobals.getToonFont())
# chat balloon sound
self.soundChatBubble = loader.loadSfx("phase_3/audio/sfx/GUI_balloon_popup.ogg")
# The animFSM doesn't really have any restrictions on
# transitions between states--we don't care which anim
# state might follow from the current one; we only want to
# ensure everything gets cleaned up properly.
self.animFSM = ClassicFSM(
'Toon',
[State('off', self.enterOff, self.exitOff),
State('neutral', self.enterNeutral, self.exitNeutral),
State('victory', self.enterVictory, self.exitVictory),
State('Happy', self.enterHappy, self.exitHappy),
State('Sad', self.enterSad, self.exitSad),
State('Catching', self.enterCatching, self.exitCatching),
State('CatchEating', self.enterCatchEating, self.exitCatchEating),
State('Sleep', self.enterSleep, self.exitSleep),
State('walk', self.enterWalk, self.exitWalk),
State('jumpSquat', self.enterJumpSquat, self.exitJumpSquat),
State('jump', self.enterJump, self.exitJump),
State('jumpAirborne', self.enterJumpAirborne, self.exitJumpAirborne),
State('jumpLand', self.enterJumpLand, self.exitJumpLand),
State('run', self.enterRun, self.exitRun),
State('swim', self.enterSwim, self.exitSwim),
State('swimhold', self.enterSwimHold, self.exitSwimHold),
State('dive', self.enterDive, self.exitDive),
State('cringe', self.enterCringe, self.exitCringe),
State('OpenBook', self.enterOpenBook, self.exitOpenBook, ['ReadBook','CloseBook']),
State('ReadBook', self.enterReadBook, self.exitReadBook),
State('CloseBook', self.enterCloseBook, self.exitCloseBook),
State('TeleportOut', self.enterTeleportOut, self.exitTeleportOut),
State('Died', self.enterDied, self.exitDied),
State('TeleportedOut', self.enterTeleportedOut, self.exitTeleportedOut),
State('TeleportIn', self.enterTeleportIn, self.exitTeleportIn),
State('Emote', self.enterEmote, self.exitEmote),
State('SitStart', self.enterSitStart, self.exitSitStart),
State('Sit', self.enterSit, self.exitSit),
State('Push', self.enterPush, self.exitPush),
State('Squish', self.enterSquish, self.exitSquish),
State('FallDown', self.enterFallDown, self.exitFallDown),
State('GolfPuttLoop', self.enterGolfPuttLoop, self.exitGolfPuttLoop),
State('GolfRotateLeft', self.enterGolfRotateLeft, self.exitGolfRotateLeft),
State('GolfRotateRight', self.enterGolfRotateRight, self.exitGolfRotateRight),
State('GolfPuttSwing', self.enterGolfPuttSwing, self.exitGolfPuttSwing),
State('GolfGoodPutt', self.enterGolfGoodPutt, self.exitGolfGoodPutt),
State('GolfBadPutt', self.enterGolfBadPutt, self.exitGolfBadPutt),
State('Flattened', self.enterFlattened, self.exitFlattened),
State('CogThiefRunning', self.enterCogThiefRunning, self.exitCogThiefRunning),
State('ScientistJealous', self.enterScientistJealous, self.exitScientistJealous),
State('ScientistEmcee', self.enterScientistEmcee, self.exitScientistEmcee),
State('ScientistWork', self.enterScientistWork, self.exitScientistWork),
State('ScientistLessWork', self.enterScientistLessWork, self.exitScientistLessWork),
State('ScientistPlay', self.enterScientistPlay, self.enterScientistPlay),
],
# Initial State
'off',
# Final State
'off',
)
self.animFSM.enterInitialState()
# Note: When you add an animation to this animFSM list also add it to
# setAnimStateAllowedList if you want to use setAnimState to change to that animation.
def stopAnimations(self):
assert self.notify.debugStateCall(self, "animFsm")
if not self.animFSM.isInternalStateInFlux():
self.animFSM.request('off')
else:
self.notify.warning('animFSM in flux, state=%s, not requesting off' %
self.animFSM.getCurrentState().getName())
if self.effectTrack != None:
self.effectTrack.finish()
self.effectTrack = None
if self.emoteTrack != None:
self.emoteTrack.finish()
self.emoteTrack = None
if self.stunTrack != None:
self.stunTrack.finish()
self.stunTrack = None
if self.wake:
self.wake.stop()
self.wake.destroy()
self.wake = None
self.cleanupPieModel()
def delete(self):
assert self.notify.debugStateCall(self, "animFsm")
try:
self.Toon_deleted
except:
self.Toon_deleted = 1
self.stopAnimations()
self.rightHands = None
self.rightHand = None
self.leftHands = None
self.leftHand = None
self.headParts = None
self.torsoParts = None
self.hipsParts = None
self.legsParts = None
del self.animFSM
for bookActor in self.__bookActors:
bookActor.cleanup()
del self.__bookActors
for holeActor in self.__holeActors:
holeActor.cleanup()
del self.__holeActors
self.soundTeleport = None
self.motion.delete()
self.motion = None
Avatar.Avatar.delete(self)
ToonHead.delete(self)
# toon methods
def updateToonDNA(self, newDNA, fForce = 0):
"""
update the toon's appearance based on new DNA
"""
assert self.notify.debugStateCall(self, "animFsm")
# Make sure gender is updated (for RobotToons)
self.style.gender = newDNA.getGender()
# test and only update the new parts
oldDNA = self.style
if fForce or (newDNA.head != oldDNA.head):
self.swapToonHead(newDNA.head)
if fForce or (newDNA.torso != oldDNA.torso):
self.swapToonTorso(newDNA.torso, genClothes = 0)
self.loop('neutral')
if fForce or (newDNA.legs != oldDNA.legs):
self.swapToonLegs(newDNA.legs)
# easier just to do these color!'s than to check
self.swapToonColor(newDNA)
self.__swapToonClothes(newDNA)
def setDNAString(self, dnaString):
assert self.notify.debugStateCall(self, "animFsm")
newDNA = ToonDNA.ToonDNA()
newDNA.makeFromNetString(dnaString)
self.setDNA(newDNA)
def setDNA(self, dna):
assert self.notify.debugStateCall(self, "animFsm")
# if we are disguised, don't mess up our custom geom
if hasattr(self, "isDisguised"):
if self.isDisguised:
return
if self.style:
self.updateToonDNA(dna)
else:
# store the DNA
self.style = dna
self.generateToon()
# this no longer works in the Avatar init!
# I moved it here for lack of a better place
# make the drop shadow
self.initializeDropShadow()
self.initializeNametag3d()
def parentToonParts(self):
"""
attach the toon's parts - recurse over all LODs
"""
#import pdb; pdb.set_trace()
assert | |
xoff = log_grid(xpsf, xmax) if xlog_spacing else lin_grid(xpsf, -xmax, xmax)
else:
xoff = xoff_vals
if yoff_vals is None:
ymax = np.abs([yoff_min,yoff_max]).max()
yoff = log_grid(ypsf, ymax) if ylog_spacing else lin_grid(ypsf, -ymax, ymax)
else:
yoff = yoff_vals
# Mask Offset grid positions in arcsec
xgrid_off, ygrid_off = np.meshgrid(xoff, yoff)
xgrid_off, ygrid_off = xgrid_off.flatten(), ygrid_off.flatten()
# Offsets relative to center of mask
xoff_asec, yoff_asec = xy_rot(-1*xgrid_off, -1*ygrid_off, -1*field_rot)
xtel, ytel = siaf_ap.convert(xoff_asec, yoff_asec, 'idl', 'tel')
# Convert from aperture used to create mask into sci pixels for observe aperture
xsci, ysci = self.siaf_ap.convert(xtel, ytel, 'tel', 'sci')
return xsci, ysci
def _calc_psfs_grid(self, sp=None, wfe_drift=0, osamp=1, npsf_per_full_fov=15,
xsci_vals=None, ysci_vals=None, return_coords=None,
use_coeff=True, **kwargs):
"""Create a grid of PSFs across an instrumnet FoV
Create a grid of PSFs across instrument aperture FoV. By default,
imaging observations will be for full detector FoV with regularly
spaced grid. Coronagraphic observations will cover nominal
coronagraphic mask region (usually 10s of arcsec) and will have
logarithmically spaced values.
Keyword Args
============
sp : :mod:`pysynphot.spectrum`
If not specified, the default is flat in phot lam (equal number of photons
per wavelength bin). The default is normalized to produce 1 count/sec within
that bandpass, assuming the telescope collecting area and instrument bandpass.
Coronagraphic PSFs will further decrease this due to the smaller pupil
size and suppression of coronagraphic mask.
If set, then the resulting PSF image will be scaled to generate the total
observed number of photons from the spectrum (ie., not scaled by unit response).
wfe_drift : float
Desired WFE drift value relative to default OPD.
osamp : int
Sampling of output PSF relative to detector sampling.
npsf_per_full_fov : int
Number of PSFs across one dimension of the instrument's field of
view. If a coronagraphic observation, then this is for the nominal
coronagrahic field of view.
xsci_vals: None or ndarray
Option to pass a custom grid values along x-axis in 'sci' coords.
If coronagraph, this instead corresponds to coronagraphic mask axis in arcsec,
which has a slight rotation relative to detector axis in MIRI.
ysci_vals: None or ndarray
Option to pass a custom grid values along y-axis in 'sci' coords.
If coronagraph, this instead corresponds to coronagraphic mask axis in arcsec,
which has a slight rotation relative to detector axis in MIRI.
return_coords : None or str
Option to also return coordinate values in desired frame
('det', 'sci', 'tel', 'idl').
Output is then xvals, yvals, hdul_psfs.
use_coeff : bool
If True, uses `calc_psf_from_coeff`, other WebbPSF's built-in `calc_psf`.
"""
# Observation aperture
siaf_ap_obs = self.siaf_ap
# Produce grid of PSF locations across the field of view
if self.is_coron:
xsci_psf, ysci_psf = coron_grid(self, npsf_per_full_fov,
xoff_vals=xsci_vals, yoff_vals=ysci_vals)
else:
# No need to go beyond detector pixels
# Number of sci pixels in FoV
# Generate grid borders
xvert, yvert = siaf_ap_obs.closed_polygon_points('sci', rederive=False)
xsci_min, xsci_max = int(np.min(xvert)), int(np.max(xvert))
ysci_min, ysci_max = int(np.min(yvert)), int(np.max(yvert))
nx_pix = int(xsci_max - xsci_min)
ny_pix = int(ysci_max - ysci_min)
# Ensure at least 5 PSFs across FoV for imaging
if np.size(npsf_per_full_fov)==1:
xpsf_full = ypsf_full = npsf_per_full_fov
else:
xpsf_full, ypsf_full = npsf_per_full_fov
xpsf = np.max([int(xpsf_full * nx_pix / siaf_ap_obs.XDetSize), 5])
ypsf = np.max([int(ypsf_full * ny_pix / siaf_ap_obs.YDetSize), 5])
# Cut in half for NIRCam SW (4 detectors per FoV)
if self.name.lower()=='nircam' and self.channel.lower()=='short':
xpsf = np.max([int(xpsf / 2), 5])
ypsf = np.max([int(ypsf / 2), 5])
# Create linear set of grid points along x and y axes
if xsci_vals is None:
xsci_vals = np.linspace(xsci_min, xsci_max, xpsf)
if ysci_vals is None:
ysci_vals = np.linspace(ysci_min, ysci_max, ypsf)
# Full set of grid points to generate PSFs
xsci_psf, ysci_psf = np.meshgrid(xsci_vals, ysci_vals)
xsci_psf = xsci_psf.flatten()
ysci_psf = ysci_psf.flatten()
# Convert everything to tel for good measure to store in header
xtel_psf, ytel_psf = siaf_ap_obs.convert(xsci_psf, ysci_psf, 'sci', 'tel')
if use_coeff:
hdul_psfs = self.calc_psf_from_coeff(sp=sp, coord_vals=(xtel_psf, ytel_psf), coord_frame='tel',
wfe_drift=wfe_drift, return_oversample=True, **kwargs)
else:
hdul_psfs = fits.HDUList()
npos = len(xtel_psf)
for xoff, yoff in tqdm(zip(xtel_psf, ytel_psf), total=npos):
res = self.calc_psf(sp=sp, coord_vals=(xoff,yoff), coord_frame='tel',
return_oversample=True)
# If add_distortion take index 2, otherwise index 0
hdu = res[2] if len(res)==4 else res[0]
hdul_psfs.append(hdu)
# Resample if necessary
scale = osamp / self.oversample #hdu.header['OSAMP']
if scale != 1:
for hdu in hdul_psfs:
hdu.data = frebin(hdu.data, scale=scale)
hdu.header['PIXELSCL'] = hdu.header['PIXELSCL'] / scale
hdu.header['OSAMP'] = osamp
if return_coords is None:
return hdul_psfs
elif return_coords=='sci':
xvals, yvals = xsci_psf, ysci_psf
elif return_coords=='tel':
xvals, yvals = xtel_psf, ytel_psf
else:
xvals, yvals = siaf_ap_obs.convert(xsci_psf, ysci_psf, 'sci', return_coords)
return xvals, yvals, hdul_psfs
def _calc_psfs_sgd(self, xoff_asec, yoff_asec, use_coeff=True, return_oversample=True, **kwargs):
"""Calculate small grid dithers PSFs"""
if self.is_coron==False:
_log.warn("`calc_sgd` only valid for coronagraphic observations (set `image_mask` attribute).")
return
if use_coeff:
result = self.calc_psf_from_coeff(coord_frame='idl', coord_vals=(xoff_asec,yoff_asec),
return_oversample=return_oversample, siaf_ap=self.siaf_ap, **kwargs)
else:
log_prev = conf.logging_level
setup_logging('WARN', verbose=False)
npos = len(xoff_asec)
# Return HDUList or array of images?
if kwargs.get('return_hdul',True):
result = fits.HDUList()
for xoff, yoff in tqdm(zip(xoff_asec, yoff_asec), total=npos):
res = self.calc_psf(coord_frame='idl', coord_vals=(xoff,yoff),
return_oversample=return_oversample, **kwargs)
if len(res)==4:
hdu = res[2] if return_oversample else res[3]
else:
hdu = res[0] if return_oversample else res[1]
result.append(hdu)
else:
result = []
for xoff, yoff in tqdm(zip(xoff_asec, yoff_asec), total=npos):
res = self.calc_psf(coord_frame='idl', coord_vals=(xoff,yoff),
return_oversample=return_oversample, **kwargs)
result.append(res)
result = np.asarray(result)
setup_logging(log_prev, verbose=False)
return result
def nrc_mask_trans(image_mask, x, y):
""" Compute the amplitude transmission appropriate for a BLC for some given pixel spacing
corresponding to the supplied Wavefront.
Based on the Krist et al. SPIE paper on NIRCam coronagraph design
*Note* : To get the actual transmission, these values should be squared.
"""
import scipy
if not isinstance(x, np.ndarray):
x = np.asarray([x]).flatten()
y = np.asarray([y]).flatten()
if image_mask[-1]=='R':
r = poppy.accel_math._r(x, y)
if image_mask == 'MASK210R':
sigma = 5.253
elif image_mask == 'MASK335R':
sigma = 3.2927866
elif image_mask == 'MASK430R':
sigma = 2.58832
sigmar = sigma * r
# clip sigma: The minimum is to avoid divide by zero
# the maximum truncates after the first sidelobe to match the hardware
bessel_j1_zero2 = scipy.special.jn_zeros(1, 2)[1]
sigmar.clip(np.finfo(sigmar.dtype).tiny, bessel_j1_zero2, out=sigmar) # avoid divide by zero -> NaNs
transmission = (1 - (2 * scipy.special.j1(sigmar) / sigmar) ** 2)
transmission[r == 0] = 0 # special case center point (value based on L'Hopital's rule)
if image_mask[-1]=='B':
# This is hard-coded to the wedge-plus-flat-regions shape for NIRCAM
# the scale fact should depend on X coord in arcsec, scaling across a 20 arcsec FOV.
# map flat regions to 2.5 arcsec each
# map -7.5 to 2, +7.5 to 6. slope is 4/15, offset is +9.5
wedgesign = 1 if image_mask == 'MASKSWB' else -1 # wide ends opposite for SW and LW
scalefact = (2 + (x * wedgesign + 7.5) * 4 / 15).clip(2, 6)
# Working out the sigma parameter vs. wavelength to get that wedge pattern is non trivial
# This is NOT a linear relationship. See calc_blc_wedge helper fn below.
if image_mask == 'MASKSWB':
polyfitcoeffs = np.array([2.01210737e-04, -7.18758337e-03, 1.12381516e-01,
-1.00877701e+00, 5.72538509e+00, -2.12943497e+01,
5.18745152e+01, -7.97815606e+01, 7.02728734e+01])
elif image_mask == 'MASKLWB':
polyfitcoeffs = np.array([9.16195583e-05, -3.27354831e-03, 5.11960734e-02,
-4.59674047e-01, 2.60963397e+00, -9.70881273e+00,
2.36585911e+01, -3.63978587e+01, 3.20703511e+01])
else:
raise NotImplementedError(f"{image_mask} not a valid name for NIRCam wedge occulter")
sigmas = scipy.poly1d(polyfitcoeffs)(scalefact)
sigmar = sigmas * np.abs(y)
# clip sigma: The minimum is to avoid divide by zero
# the maximum truncates after the first sidelobe to match the hardware
sigmar.clip(min=np.finfo(sigmar.dtype).tiny, max=2 * np.pi, out=sigmar)
transmission = (1 - (np.sin(sigmar) / sigmar) ** 2)
transmission[y == 0] = 0
transmission[np.abs(x) > 10] = 1.0
# Amplitude transmission (square to get intensity transmission; ie., photon throughput)
return transmission
def _transmission_map(self, coord_vals, coord_frame, siaf_ap=None):
if not self.is_coron:
return None
apname_mask = self._psf_coeff_mod['si_mask_apname']
if apname_mask is None:
apname_mask = self.aperturename
siaf_ap_mask = self.siaf[apname_mask]
# Assume cframe corresponds to siaf_ap input
siaf_ap = siaf_ap_mask if siaf_ap is None else siaf_ap
coord_frame = coord_frame.lower()
# Convert to common 'tel' coordinates
cx, cy = np.asarray(coord_vals)
if (siaf_ap.AperName != siaf_ap_mask.AperName):
cx_tel, cy_tel | |
If smaller than necessary, data elements will be dropped from the
output matrix.
Returns:
mat_out : BCOO array with sorted indices and no duplicate indices.
"""
data, indices = _bcoo_sum_duplicates(mat.data, mat.indices, spinfo=mat._info, nse=nse)
return BCOO((data, indices), shape=mat.shape)
def _bcoo_sum_duplicates(data, indices, *, spinfo, nse):
if nse is not None:
nse = core.concrete_or_error(operator.index, nse, "nse argument of bcoo_sum_duplicates.")
return bcoo_sum_duplicates_p.bind(data, indices, spinfo=spinfo, nse=nse)
@bcoo_sum_duplicates_p.def_impl
def _bcoo_sum_duplicates_impl(data, indices, *, spinfo, nse):
props = _validate_bcoo(data, indices, spinfo.shape)
f = functools.partial(_bcoo_sum_duplicates_unbatched, shape=spinfo.shape[props.n_batch:])
for _ in range(props.n_batch):
f = vmap(f)
indices_out, mapping, nse_batched = f(indices)
if nse is None:
nse = 1 if props.n_sparse == 0 else nse_batched.max()
indices_out = _adjust_indices_nse(indices_out, nse=nse, shape=spinfo.shape)
if props.n_sparse == 0:
data = data.sum(props.n_batch, keepdims=True)
data_out = jnp.empty((*map(max, indices.shape[:props.n_batch], data.shape[:props.n_batch]),
nse, *data.shape[props.n_batch + 1:]), dtype=data.dtype)
permute = lambda d_out, m, d: d_out.at[m].add(d, mode='drop')
for _ in range(props.n_batch):
permute = broadcasting_vmap(permute)
data_out = permute(data_out, mapping, data)
return data_out, indices_out
def _adjust_indices_nse(indices, *, nse, shape):
props = _validate_bcoo_indices(indices, shape)
if nse <= props.nse:
indices = indices[..., :nse, :]
else:
fill = lax.broadcast_in_dim(
operand=jnp.array(shape[props.n_batch:props.n_batch + props.n_sparse], dtype=indices.dtype),
shape=(*indices.shape[:-2], nse - props.nse, indices.shape[-1]),
broadcast_dimensions=(indices.ndim - 1,)
)
indices = lax.concatenate([indices, fill], dimension=indices.ndim - 2)
return indices
def _bcoo_sum_duplicates_unbatched(indices, *, shape):
props = _validate_bcoo_indices(indices, shape)
if props.n_sparse == 0:
nse = 1
mapping = jnp.zeros(nse, dtype='int32')
indices_out = jnp.zeros_like(indices, shape=(nse, props.n_sparse))
return indices_out, mapping, nse
fill_value = jnp.expand_dims(jnp.array(shape[:props.n_sparse], dtype=indices.dtype), (0,))
out_of_bounds = (indices >= fill_value).any(-1, keepdims=True)
indices = jnp.where(out_of_bounds, fill_value, indices)
indices_unique, inv_idx, nse = _unique(
indices, axis=0, return_inverse=True, return_true_size=True,
size=props.nse, fill_value=fill_value)
nse = nse - (indices == fill_value).any()
return indices_unique, inv_idx, nse
@bcoo_sum_duplicates_p.def_abstract_eval
def _bcoo_sum_duplicates_abstract_eval(data, indices, *, spinfo, nse):
if nse is None:
raise ValueError("bcoo_sum_duplicates: nse must be specified when using the function within "
"jit, vmap, and other transformations requiring abstract evaluation.")
props = _validate_bcoo(data, indices, spinfo.shape)
indices_out = core.ShapedArray((*indices.shape[:props.n_batch], nse, props.n_sparse),
dtype=indices.dtype, weak_type=indices.weak_type)
data_out = core.ShapedArray(
(*map(max, indices.shape[:props.n_batch], data.shape[:props.n_batch]),
nse, *data.shape[props.n_batch + 1:]), data.dtype, weak_type=data.weak_type)
return data_out, indices_out
def _bcoo_sum_duplicates_batching_rule(batched_args, batch_dims, *, spinfo, nse):
data, indices = batched_args
if any(b not in [0, None] for b in batch_dims):
raise NotImplementedError(f"batch_dims={batch_dims}. Only 0 and None are supported.")
if batch_dims[0] is None:
data = data[None, ...]
if batch_dims[1] is None:
indices = indices[None, ...]
new_spinfo = BCOOInfo(shape=(max(data.shape[0], indices.shape[0]), *spinfo.shape))
data_out, indices_out = bcoo_sum_duplicates_p.bind(data, indices, spinfo=new_spinfo, nse=nse)
out_axes = (0, 0)
# Note: if data is unbatched on input, it will be batched on output.
# However, if indices are unbatched on input, they will be unbatched on output.
if batch_dims[1] is None:
indices_out = indices_out[0]
out_axes = (0, None)
return (data_out, indices_out), tuple(out_axes)
def _bcoo_sum_duplicates_jvp(primals, tangents, *, spinfo, nse):
props = _validate_bcoo(*primals, spinfo.shape)
data, indices = primals
data_dot, _ = tangents
f = functools.partial(_bcoo_sum_duplicates_unbatched, shape=spinfo.shape[props.n_batch:])
for _ in range(props.n_batch):
f = broadcasting_vmap(f)
indices_out, mapping, nse_batched = f(indices)
if nse is None:
nse = jnp.sum(nse_batched)
try:
nse = core.concrete_or_error(operator.index, nse, "nse argument of bcoo_sum_duplicates.")
except core.ConcretizationTypeError:
raise ValueError("bcoo_sum_duplicates: nse must be specified when using the function within "
"jit, vmap, and other transformations requiring abstract evaluation.")
indices_out = _adjust_indices_nse(indices_out, nse=nse, shape=spinfo.shape)
if props.n_sparse == 0:
data = data.sum(props.n_batch, keepdims=True)
data_dot = data_dot.sum(props.n_batch, keepdims=True)
data_out = jnp.empty((*map(max, indices.shape[:props.n_batch], data.shape[:props.n_batch]),
nse, *data.shape[props.n_batch + 1:]), dtype=data.dtype)
data_dot_out = data_out
permute = lambda d_out, m, d: d_out.at[m].add(d, mode='drop')
for _ in range(props.n_batch):
permute = broadcasting_vmap(permute)
data_out = permute(data_out, mapping, data)
indices_dot_out = ad.Zero.from_value(indices_out)
data_dot_out = ad.Zero.from_value(data_out) if type(data_dot) is ad.Zero else permute(data_dot_out, mapping, data_dot)
return (data_out, indices_out), (data_dot_out, indices_dot_out)
_bcoo_sum_duplicates_mhlo = mlir.lower_fun(
_bcoo_sum_duplicates_impl, multiple_results=True)
ad.primitive_jvps[bcoo_sum_duplicates_p] = _bcoo_sum_duplicates_jvp
batching.primitive_batchers[bcoo_sum_duplicates_p] = _bcoo_sum_duplicates_batching_rule
mlir.register_lowering(bcoo_sum_duplicates_p, _bcoo_sum_duplicates_mhlo)
#----------------------------------------------------------------------
# BCOO functions that maybe should be primitives?
def bcoo_add_batch_dim(M):
"""Convert a sparse dimension to a batch dimension
Please note that this function may result in a far less efficient storage scheme
for the matrix (storage required will increase by a factor of `M.shape[0] * M.nse`).
This utility is provided for convenience, e.g. to allow vmapping over non-batched
matrices.
Args:
M: BCOO matrix
Returns:
M2: BCOO matrix with n_batch = M.n_batch + 1 and n_sparse = M.n_sparse - 1
"""
# TODO(jakevdp): allow user-specified nse?
if M.n_sparse == 0:
raise ValueError("Cannot add a batch dimension to a matrix with n_sparse=0")
f = _add_batch_dim
for _ in range(M.n_batch):
f = vmap(f)
return f(M)
def _add_batch_dim(M):
assert M.n_batch == 0
assert M.n_sparse > 0
data = jnp.zeros_like(M.data, shape=(M.shape[0], *M.data.shape))
data = data.at[M.indices[:, 0], jnp.arange(M.nse)].set(M.data)
indices_shape = (M.shape[0], M.nse, M.n_sparse - 1)
if M.n_sparse > 1:
fill_value = jnp.array(M.shape[M.n_batch + 1: M.n_batch + M.n_sparse])
indices = jnp.full_like(M.indices, shape=indices_shape, fill_value=fill_value)
indices = indices.at[M.indices[:, 0], jnp.arange(M.nse)].set(M.indices[:, 1:])
else:
indices = jnp.empty_like(M.indices, shape=indices_shape)
return BCOO((data, indices), shape=M.shape)
def bcoo_broadcast_in_dim(mat, *, shape, broadcast_dimensions):
"""Expand the size and rank of a BCOO array by duplicating the data.
A BCOO equivalence to jax.lax.broadcast_in_dim.
Args:
mat: A BCOO-format array.
shape: The shape of the target array.
broadcast_dimensions: The dimension in the shape of the target array which
each dimension of the operand (``mat``) shape corresponds to.
Returns:
A BCOO-format array containing the target array.
"""
return BCOO(_bcoo_broadcast_in_dim(mat.data, mat.indices, spinfo=mat._info,
shape=shape,
broadcast_dimensions=broadcast_dimensions),
shape=shape)
def _bcoo_broadcast_in_dim(data, indices, *, spinfo, shape, broadcast_dimensions):
"""BCOO equivalent of lax.broadcast_in_dim"""
if len(spinfo.shape) != len(broadcast_dimensions):
raise ValueError(f"spinfo.shape={spinfo.shape} and broadcast_dimensions={broadcast_dimensions} must have the same length")
props = _validate_bcoo(data, indices, spinfo.shape)
batch_dims, sparse_dims, dense_dims = split_list(broadcast_dimensions, [props.n_batch, props.n_sparse])
if max(batch_dims, default=0) > min(sparse_dims, default=len(shape)):
raise ValueError("Cannot mix batch and sparse dimensions during broadcast_in_dim")
if max(sparse_dims, default=0) > min(dense_dims, default=len(shape)):
raise ValueError("Cannot mix sparse and dense dimensions during broadcast_in_dim")
new_n_batch = props.n_batch and 1 + max(broadcast_dimensions[:props.n_batch])
new_n_dense = props.n_dense and len(shape) - min(broadcast_dimensions[-props.n_dense:])
new_n_sparse = len(shape) - new_n_batch - new_n_dense
if np.prod(spinfo.shape[props.n_batch: props.n_batch + props.n_sparse]) != np.prod(shape[new_n_batch:new_n_batch + new_n_sparse]):
raise NotImplementedError("Adding sparse dimensions with lengths != 1")
nse = props.nse
# batch & dense dimensions
new_data = lax.broadcast_in_dim(data,
shape=(*shape[:new_n_batch], nse, *shape[new_n_batch + new_n_sparse:]),
broadcast_dimensions=(*batch_dims, new_n_batch, *(b + 1 - new_n_sparse for b in dense_dims)))
new_indices = lax.broadcast_in_dim(indices,
shape=(*shape[:new_n_batch], nse, props.n_sparse),
broadcast_dimensions=(*batch_dims, new_n_batch, new_n_batch + 1))
# sparse dimensions
new_indices = (jnp.zeros_like(new_indices, shape=(*shape[:new_n_batch], nse, new_n_sparse))
.at[..., jnp.array(sparse_dims, int) - new_n_batch].set(new_indices))
return new_data, new_indices
def _tuple_replace(tup, ind, val):
return tuple(val if i == ind else t for i, t in enumerate(tup))
def bcoo_reduce_sum(mat, *, axes):
"""Sum array element over given axes.
Args:
mat: A BCOO-format array.
shape: The shape of the target array.
axes: A tuple or list or ndarray which contains axes of ``mat`` over which
sum is perfomed.
Returns:
A BCOO-format array containing the result.
"""
out_data, out_indices, out_shape = _bcoo_reduce_sum(
mat.data, mat.indices, spinfo=mat._info, axes=axes)
return BCOO((out_data, out_indices), shape=out_shape)
def _bcoo_reduce_sum(data, indices, *, spinfo, axes):
shape = spinfo.shape
assert all(0 <= a < len(shape) for a in axes)
n_batch, n_sparse, _, nse = _validate_bcoo(data, indices, shape)
axes = sorted(set(axes))
# Sum over dense dimensions -> sum over data
dense_axes = tuple(ax - n_sparse + 1 for ax in axes if ax >= n_batch + n_sparse)
data = data.sum(dense_axes)
if n_sparse:
# zero-out data corresponding to invalid indices.
fill_value = jnp.expand_dims(
jnp.array(shape[n_batch: n_batch + n_sparse]), range(indices.ndim - 1))
mask = jnp.all(indices < fill_value, -1)
if data.ndim > mask.ndim:
mask = lax.expand_dims(mask, tuple(range(mask.ndim, data.ndim)))
data = jnp.where(mask, data, 0)
# Sum over sparse dimensions -> drop index; sum is implicit
sparse_idx = [i for i in range(n_sparse) if i + n_batch not in axes]
if not sparse_idx:
indices = jnp.zeros(_tuple_replace(indices.shape, n_batch + 1, 0), indices.dtype)
else:
indices = indices[..., np.array(sparse_idx)]
# Sum over batch dimensions -> reshape into nse
batch_axes = {ax for ax in axes if ax < n_batch}
# First handle broadcasted batch dimensions
for ax in batch_axes:
if data.shape[ax] == 1:
if indices.shape[ax] == 1:
data = data * shape[ax]
else:
data = lax.broadcast_in_dim(data, _tuple_replace(data.shape, ax, shape[ax]), tuple(range(data.ndim)))
else:
if indices.shape[ax] == 1:
data = data.sum(ax)
assert data.shape[ax] == indices.shape[ax]
new_batch_dims = tuple(sorted(set(range(n_batch)) - batch_axes))
new_batch_shape = tuple(data.shape[i] for i in new_batch_dims)
new_nse = int(nse * np.prod([data.shape[i] for i in batch_axes]))
data = lax.reshape(data,
(*new_batch_shape, new_nse, *data.shape[n_batch + 1:]),
(*new_batch_dims, *batch_axes, *range(n_batch, data.ndim)))
indices = lax.reshape(indices,
(*new_batch_shape, new_nse, *indices.shape[n_batch + 1:]),
(*new_batch_dims, *batch_axes, *range(n_batch, indices.ndim)))
out_shape = tuple(shape[i] | |
from django.contrib.auth.models import User
from django.db.models import Q
from rest_framework import serializers, ISO_8601
from rest_framework.validators import UniqueValidator, UniqueTogetherValidator
from driver.models import Driver, DriverAppUser, GPSLogNew, OTP, GPSDevice, GPSDeviceLog, TracknovateGPSDevice, \
TracknovateGPSDeviceLog, WaytrackerGPSDevice, WaytrackerGPSDeviceLog, TempoGoGPSDevice, TempoGoGPSDeviceLog, \
SecuGPSDevice, SecuGPSDeviceLog, MahindraGPSDevice, MahindraGPSDeviceLog, BharatGPSTrackerLog, GPSDeviceProvider
from fms.models import Document
from owner.models import Vehicle
from restapi.helper_api import DATE_FORMAT, DATETIME_FORMAT
from restapi.serializers.authentication import BankSerializer
from restapi.service.validators import PAN, MOBILE_NUMBER_REGEX, validate_mobile_number, validate_vehicle_number
from utils.models import TaxationID, IDDetails, Address, Bank
class TrackVehicleSerializer(serializers.Serializer):
vehicle_number = serializers.CharField(allow_null=True, max_length=40, read_only=True)
vehicle_status = serializers.ChoiceField(allow_null=True, choices=(
('unloaded', 'unloaded'), ('loading', 'loading'), ('loaded', 'loaded'), ('unloading', 'unloading')),
required=False, read_only=True)
updated_on = serializers.DateTimeField(read_only=True)
driver_name = serializers.CharField(allow_null=True, max_length=50, required=False, read_only=True)
driver_number = serializers.CharField(allow_null=True, max_length=20, required=False, read_only=True)
vehicle_type = serializers.CharField(allow_null=True, max_length=40, required=False, read_only=True)
device_id = serializers.CharField(max_length=50, read_only=True)
source = serializers.CharField(max_length=50, read_only=True)
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
class DriverSerializer(serializers.Serializer):
id = serializers.IntegerField(label='ID', read_only=True)
name = serializers.CharField( max_length=35, required=True)
phone = serializers.RegexField(regex=MOBILE_NUMBER_REGEX,
validators=[UniqueValidator(queryset=Driver.objects.all())])
alt_phone = serializers.RegexField(regex=MOBILE_NUMBER_REGEX, allow_blank=True, allow_null=True, min_length=10,
max_length=10, required=False)
alt_phone2 = serializers.RegexField(regex=MOBILE_NUMBER_REGEX, allow_blank=True, allow_null=True, min_length=10,
max_length=10, required=False)
pan = serializers.RegexField(regex=PAN, allow_blank=True, allow_null=True, max_length=11, required=False)
driving_licence_number = serializers.CharField(allow_null=True, max_length=50, required=False)
driving_licence_location = serializers.CharField(allow_null=True, max_length=50, required=False)
driving_licence_validity = serializers.DateField(allow_null=True, required=False, format=DATE_FORMAT,
input_formats=[DATE_FORMAT, ISO_8601])
smartphone_available = serializers.BooleanField(required=False)
route = serializers.CharField(allow_null=True, allow_blank=True, max_length=255, required=False)
priority_level = serializers.CharField(allow_null=True, max_length=255, required=False)
created_on = serializers.DateTimeField(read_only=True)
updated_on = serializers.DateTimeField(read_only=True)
deleted = serializers.BooleanField(required=False)
deleted_on = serializers.DateTimeField(allow_null=True, required=False)
address = serializers.PrimaryKeyRelatedField(allow_null=True, queryset=Address.objects.all(), required=False,
validators=[UniqueValidator(queryset=Driver.objects.all())])
id_proof = serializers.PrimaryKeyRelatedField(allow_null=True, queryset=IDDetails.objects.all(), required=False,
validators=[UniqueValidator(queryset=Driver.objects.all())])
account_details = serializers.PrimaryKeyRelatedField(allow_null=True, queryset=Bank.objects.all(), required=False,
validators=[UniqueValidator(queryset=Driver.objects.all())])
taxation_id = serializers.PrimaryKeyRelatedField(allow_null=True, queryset=TaxationID.objects.all(), required=False,
validators=[UniqueValidator(queryset=Driver.objects.all())])
driving_licence = serializers.PrimaryKeyRelatedField(allow_null=True, queryset=Document.objects.all(),
required=False)
created_by = serializers.SlugRelatedField(queryset=User.objects.all(), required=False, slug_field="username")
changed_by = serializers.SlugRelatedField(queryset=User.objects.all(), slug_field="username")
def create(self, validated_data):
instance = Driver.objects.create(**validated_data)
return instance
def update(self, instance, validated_data):
Driver.objects.filter(id=instance.id).update(**validated_data)
return Driver.objects.get(id=instance.id)
class FMSDriverSerializer(serializers.Serializer):
id = serializers.IntegerField(label='ID', read_only=True)
name = serializers.CharField(read_only=True)
phone = serializers.CharField(read_only=True)
driving_licence_number = serializers.CharField(read_only=True)
driving_licence_location = serializers.CharField(read_only=True)
pan = serializers.CharField(read_only=True)
driving_licence_validity = serializers.DateField(read_only=True, format=DATE_FORMAT,
input_formats=[DATE_FORMAT, ISO_8601])
docs = serializers.SerializerMethodField()
account_details = serializers.PrimaryKeyRelatedField(allow_null=True, queryset=Bank.objects.all(), required=False,
validators=[UniqueValidator(queryset=Driver.objects.all())])
def to_representation(self, instance):
self.fields['account_details'] = BankSerializer(read_only=True)
return super().to_representation(instance=instance)
def get_docs(self, instance):
return [
{'id': doc.id, 'url': doc.s3_upload.public_url(), 'document_category': doc.document_category,
'document_category_display': doc.get_document_category_display(),
'thumb_url': doc.s3_upload.public_url(),
'bucket': doc.s3_upload.bucket,
'folder': doc.s3_upload.folder,
'uuid': doc.s3_upload.uuid,
'filename': doc.s3_upload.filename,
'validity': None,
} for doc in
instance.driver_files.filter(document_category__in=['DL', 'PAN']).exclude(
Q(s3_upload=None) | Q(deleted=True))
]
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
excluded_fields = [
'driving_licence_number', 'driving_licence_location', 'driving_licence_validity', 'docs'
]
for field in excluded_fields:
kwargs['child'].fields.pop(field)
return serializers.ListSerializer(*args, **kwargs)
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
class DriverAppUserSerializer(serializers.Serializer):
id = serializers.IntegerField(label='ID', read_only=True)
created_on = serializers.DateTimeField(read_only=True)
updated_on = serializers.DateTimeField(read_only=True)
deleted = serializers.BooleanField(required=False)
deleted_on = serializers.DateTimeField(allow_null=True, required=False)
device_id = serializers.CharField(max_length=50, validators=[UniqueValidator(queryset=DriverAppUser.objects.all())])
auth_token = serializers.CharField(max_length=40,
validators=[UniqueValidator(queryset=DriverAppUser.objects.all())])
driver_name = serializers.CharField(allow_null=True, max_length=50, required=False)
driver_number = serializers.CharField(allow_null=True, max_length=20, required=False)
number_verified = serializers.BooleanField(required=False)
driving_licence_number = serializers.CharField(allow_null=True, max_length=20, required=False)
vehicle_number = serializers.CharField(allow_null=True, max_length=40, required=False)
vehicle_type = serializers.CharField(allow_null=True, max_length=40, required=False)
vehicle_status = serializers.ChoiceField(
choices=(('unloaded', 'unloaded'), ('loading', 'loading'), ('loaded', 'loaded'), ('unloading', 'unloading')),
required=False)
location_time = serializers.DateTimeField(allow_null=True, required=False)
latitude = serializers.DecimalField(allow_null=True, decimal_places=10, max_digits=20, required=False)
longitude = serializers.DecimalField(allow_null=True, decimal_places=10, max_digits=20, required=False)
is_active = serializers.BooleanField(required=False)
inactive_sms_sent_at = serializers.DateTimeField(allow_null=True, required=False)
driver = serializers.PrimaryKeyRelatedField(allow_null=True, queryset=Driver.objects.all(), required=False)
created_by = serializers.SlugRelatedField(queryset=User.objects.all(), required=False, slug_field="username")
changed_by = serializers.SlugRelatedField(queryset=User.objects.all(), slug_field="username")
def to_representation(self, instance):
self.fields['driver'] = DriverSerializer(read_only=True)
return super().to_representation(instance=instance)
def create(self, validated_data):
instance = DriverAppUser.objects.create(**validated_data)
return instance
def update(self, instance, validated_data):
DriverAppUser.objects.filter(id=instance.id).update(**validated_data)
return DriverAppUser.objects.get(id=instance.id)
class GPSLogNewSerializer(serializers.Serializer):
id = serializers.IntegerField(label='ID', read_only=True)
created_on = serializers.DateTimeField(read_only=True)
updated_on = serializers.DateTimeField(read_only=True)
deleted = serializers.BooleanField(required=False)
deleted_on = serializers.DateTimeField(allow_null=True, required=False)
datetime = serializers.DateTimeField(help_text='log time', required=True)
device_id = serializers.CharField(help_text='imei or uuid generated on phone', max_length=50, required=True)
latitude = serializers.DecimalField(allow_null=True, decimal_places=10, max_digits=20, required=False)
longitude = serializers.DecimalField(allow_null=True, decimal_places=10, max_digits=20, required=False)
altitude = serializers.FloatField(allow_null=True, required=False)
speed = serializers.FloatField(allow_null=True, required=False)
course = serializers.FloatField(allow_null=True, required=False)
accuracy = serializers.FloatField(allow_null=True, required=False)
provider = serializers.CharField(allow_null=True, max_length=20, required=False)
battery = serializers.FloatField(allow_null=True, required=False)
total_memory = serializers.FloatField(allow_null=True, required=False)
available_memory = serializers.FloatField(allow_null=True, required=False)
threshold = serializers.FloatField(allow_null=True, required=False)
low_memory = serializers.BooleanField(required=False)
android_release = serializers.CharField(allow_null=True, max_length=20, required=False)
android_sdk_int = serializers.IntegerField(allow_null=True, max_value=2147483647, min_value=-2147483648,
required=False)
version_name = serializers.CharField(allow_null=True, max_length=20, required=False)
version_code = serializers.IntegerField(allow_null=True, max_value=2147483647, min_value=-2147483648,
required=False)
brand = serializers.CharField(allow_null=True, max_length=30, required=False)
manufacturer = serializers.CharField(allow_null=True, max_length=30, required=False)
product = serializers.CharField(allow_null=True, max_length=30, required=False)
device = serializers.CharField(allow_null=True, max_length=30, required=False)
model = serializers.CharField(allow_null=True, max_length=30, required=False)
driver_name = serializers.CharField(allow_null=True, max_length=50, required=False)
driver_number = serializers.CharField(allow_null=True, max_length=20, required=False)
driving_licence_number = serializers.CharField(allow_null=True, max_length=20, required=False)
vehicle_number = serializers.CharField(allow_null=True, max_length=40, required=False)
vehicle_type = serializers.CharField(allow_null=True, max_length=40, required=False)
vehicle_status = serializers.ChoiceField(allow_null=True, choices=(
('unloaded', 'unloaded'), ('loading', 'loading'), ('loaded', 'loaded'), ('unloading', 'unloading')),
required=False)
driver = serializers.PrimaryKeyRelatedField(allow_null=True, queryset=DriverAppUser.objects.all(), required=False)
created_by = serializers.SlugRelatedField(queryset=User.objects.all(), required=False, slug_field="username")
changed_by = serializers.SlugRelatedField(queryset=User.objects.all(), slug_field="username")
def to_representation(self, instance):
self.fields['driver'] = DriverAppUserSerializer(read_only=True)
return super().to_representation(instance=instance)
class Meta:
validators = [UniqueTogetherValidator(queryset=GPSLogNew.objects.all(), fields=('device_id', 'datetime'))]
def create(self, validated_data):
instance = GPSLogNew.objects.create(**validated_data)
return instance
def update(self, instance, validated_data):
GPSLogNew.objects.filter(id=instance.id).update(**validated_data)
return GPSLogNew.objects.get(id=instance.id)
class OTPSerializer(serializers.Serializer):
id = serializers.IntegerField(label='ID', read_only=True)
phone = serializers.CharField(max_length=20, validators=[UniqueValidator(queryset=OTP.objects.all())])
expires_at = serializers.DateTimeField()
otp = serializers.CharField(max_length=8)
created_on = serializers.DateTimeField(read_only=True)
updated_on = serializers.DateTimeField(read_only=True)
deleted = serializers.BooleanField(required=False)
deleted_on = serializers.DateTimeField(allow_null=True, required=False)
created_by = serializers.SlugRelatedField(queryset=User.objects.all(), required=False, slug_field="username")
changed_by = serializers.SlugRelatedField(queryset=User.objects.all(), slug_field="username")
def create(self, validated_data):
instance = OTP.objects.create(**validated_data)
return instance
def update(self, instance, validated_data):
OTP.objects.filter(id=instance.id).update(**validated_data)
return OTP.objects.get(id=instance.id)
class GPSDeviceProviderSerializer(serializers.Serializer):
id = serializers.IntegerField(label='ID', read_only=True)
name = serializers.CharField(allow_blank=True, allow_null=True, max_length=50, required=False)
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
class GPSDeviceSerializer(serializers.Serializer):
id = serializers.IntegerField(label='ID', read_only=True)
created_on = serializers.DateTimeField(read_only=True)
updated_on = serializers.DateTimeField(read_only=True)
deleted = serializers.BooleanField(required=False)
deleted_on = serializers.DateTimeField(allow_null=True, required=False)
device_id = serializers.CharField(allow_null=True, max_length=50, required=False)
imei = serializers.CharField(allow_null=True, max_length=40, required=False)
address = serializers.CharField(allow_blank=True, allow_null=True, max_length=500, required=False)
driver_name = serializers.CharField(allow_null=True, max_length=50, required=False)
driver_number = serializers.CharField(allow_null=True,min_length=10, max_length=10, required=False)
driving_licence_number = serializers.CharField(allow_null=True, max_length=20, required=False)
vehicle_number = serializers.CharField(allow_null=True, max_length=40, required=False)
vehicle_type = serializers.CharField(allow_blank=True, allow_null=True, max_length=40, required=False)
vehicle_status = serializers.ChoiceField(
choices=(('unloaded', 'unloaded'), ('loading', 'loading'), ('loaded', 'loaded'), ('unloading', 'unloading')),
required=False)
location_time = serializers.DateTimeField(allow_null=True, required=False, format=DATETIME_FORMAT)
latitude = serializers.DecimalField(allow_null=True, decimal_places=10, max_digits=20, required=False)
longitude = serializers.DecimalField(allow_null=True, decimal_places=10, max_digits=20, required=False)
is_active = serializers.BooleanField(required=False)
created_by = serializers.SlugRelatedField(queryset=User.objects.all(), required=False, slug_field="username")
changed_by = serializers.SlugRelatedField(queryset=User.objects.all(), slug_field="username")
device_provider = serializers.PrimaryKeyRelatedField(allow_null=True, queryset=GPSDeviceProvider.objects.all(),
required=False)
vehicle = serializers.PrimaryKeyRelatedField(allow_null=True, queryset=Vehicle.objects.all(), required=False)
driver = serializers.PrimaryKeyRelatedField(allow_null=True, queryset=Driver.objects.all(), required=False)
device_provider_data = serializers.SerializerMethodField()
class Meta:
validators = [
UniqueTogetherValidator(queryset=GPSDevice.objects.all(), fields=('device_id', 'device_provider'))]
def get_device_provider_data(self, instance):
if isinstance(instance.device_provider, GPSDeviceProvider):
return {'id': instance.device_provider.id, 'name': instance.device_provider.name}
return {'id': -1, 'name': None}
def to_representation(self, instance):
self.fields['driver'] = DriverSerializer(read_only=True)
return super().to_representation(instance=instance)
def create(self, validated_data):
instance = GPSDevice.objects.create(**validated_data)
return instance
def update(self, instance, validated_data):
GPSDevice.objects.filter(id=instance.id).update(**validated_data)
return GPSDevice.objects.get(id=instance.id)
def validate_driver_number(self, value):
if not validate_mobile_number(value) and value:
raise serializers.ValidationError("Not a valid mobile number")
return value
def validate_vehicle_number(self, value):
if not validate_vehicle_number(value) and value:
raise serializers.ValidationError("Not a valid vehicle number")
return value
class GPSDeviceLogSerializer(serializers.Serializer):
id = serializers.IntegerField(label='ID', read_only=True)
created_on = serializers.DateTimeField(read_only=True)
updated_on = serializers.DateTimeField(read_only=True)
deleted = serializers.BooleanField(required=False)
deleted_on = serializers.DateTimeField(allow_null=True, required=False)
location_id = serializers.CharField(max_length=40,
validators=[UniqueValidator(queryset=GPSDeviceLog.objects.all())])
datetime = serializers.DateTimeField(help_text='log time')
vehicle_id = serializers.CharField(help_text='imei or uuid generated on phone', max_length=50)
latitude = serializers.DecimalField(allow_null=True, decimal_places=10, max_digits=20, required=False)
longitude = serializers.DecimalField(allow_null=True, decimal_places=10, max_digits=20, required=False)
altitude = serializers.FloatField(allow_null=True, required=False)
speed = serializers.FloatField(allow_null=True, required=False)
course = serializers.FloatField(allow_null=True, required=False)
accuracy = serializers.FloatField(allow_null=True, required=False)
engine_on = serializers.BooleanField(required=False)
driver_name = serializers.CharField(allow_null=True, max_length=50, required=False)
driver_number = serializers.CharField(allow_null=True, max_length=20, required=False)
driving_licence_number = serializers.CharField(allow_null=True, max_length=20, required=False)
vehicle_number = serializers.CharField(allow_null=True, max_length=40, required=False)
vehicle_type = serializers.CharField(allow_null=True, max_length=40, required=False)
vehicle_status = serializers.ChoiceField(allow_null=True, choices=(
('unloaded', 'unloaded'), ('loading', 'loading'), ('loaded', 'loaded'), ('unloading', 'unloading')),
required=False)
device = serializers.PrimaryKeyRelatedField(allow_null=True, queryset=GPSDevice.objects.all(), required=False)
def to_representation(self, instance):
self.fields['device'] = GPSDeviceSerializer(read_only=True)
return super().to_representation(instance=instance)
def create(self, validated_data):
instance = GPSDeviceLog.objects.create(**validated_data)
return instance
def update(self, instance, validated_data):
GPSDeviceLog.objects.filter(id=instance.id).update(**validated_data)
return GPSDeviceLog.objects.get(id=instance.id)
class TracknovateGPSDeviceSerializer(serializers.Serializer):
id = serializers.IntegerField(label='ID', read_only=True)
created_on = serializers.DateTimeField(read_only=True)
updated_on = serializers.DateTimeField(read_only=True)
deleted = serializers.BooleanField(required=False)
deleted_on = serializers.DateTimeField(allow_null=True, required=False)
phone = serializers.CharField(max_length=20)
sim_number = serializers.CharField(max_length=20)
vehicle_id = serializers.CharField(max_length=40,
validators=[UniqueValidator(queryset=TracknovateGPSDevice.objects.all())])
driver_name = serializers.CharField(allow_null=True, max_length=50, required=False)
driver_number = serializers.CharField(allow_null=True, max_length=20, required=False)
number_verified = serializers.BooleanField(required=False)
current_duration = serializers.CharField(allow_null=True, required=False, style={'base_template': 'textarea.html'})
current_vstatus = serializers.CharField(allow_null=True, required=False, style={'base_template': 'textarea.html'})
driving_licence_number = serializers.CharField(allow_null=True, max_length=20, required=False)
vehicle_number = serializers.CharField(allow_null=True, max_length=40, required=False)
vehicle_type = serializers.CharField(allow_null=True, max_length=40, required=False)
vehicle_status = serializers.ChoiceField(
choices=(('unloaded', 'unloaded'), ('loading', 'loading'), ('loaded', 'loaded'), ('unloading', 'unloading')),
required=False)
location_time = serializers.DateTimeField(allow_null=True, required=False)
is_active = serializers.BooleanField(required=False)
latitude = serializers.DecimalField(allow_null=True, decimal_places=10, max_digits=20, required=False)
longitude = serializers.DecimalField(allow_null=True, decimal_places=10, max_digits=20, required=False)
inactive_sms_sent_at = serializers.DateTimeField(allow_null=True, required=False)
driver = serializers.PrimaryKeyRelatedField(allow_null=True, queryset=Driver.objects.all(),
required=False)
created_by = serializers.SlugRelatedField(queryset=User.objects.all(), required=False, slug_field="username")
changed_by = serializers.SlugRelatedField(queryset=User.objects.all(), slug_field="username")
def to_representation(self, instance):
self.fields['driver'] = DriverSerializer(read_only=True)
return super().to_representation(instance=instance)
def create(self, validated_data):
instance = TracknovateGPSDevice.objects.create(**validated_data)
return instance
def update(self, instance, validated_data):
TracknovateGPSDevice.objects.filter(id=instance.id).update(**validated_data)
return TracknovateGPSDevice.objects.get(id=instance.id)
class TracknovateGPSDeviceLogSerializer(serializers.Serializer):
id = serializers.IntegerField(label='ID', read_only=True)
created_on = serializers.DateTimeField(read_only=True)
updated_on = serializers.DateTimeField(read_only=True)
deleted = serializers.BooleanField(required=False)
deleted_on = serializers.DateTimeField(allow_null=True, required=False)
datetime = serializers.DateTimeField(help_text='log time')
vehicle_id = serializers.CharField(max_length=50)
latitude = serializers.DecimalField(allow_null=True, decimal_places=10, max_digits=20, required=False)
longitude = | |
color=(0, 255, 0) if phase == 'final' else (0, 0, 125), thickness=2)
cv2.imwrite(join(save_dir, in_ + '_t22_4.png'), res_4)
np.save(join(line_save_dir, in_ + '_line_4.npy'), np.array( lines_1 + lines_4))
## Modify t22_3:
## for all the cuts, only take the first.
###################################### t22_4 #######################################
lines_5 = []
if add_t22_5:
im_5= np.copy(res_1)
DEBUG = False
for i in range(N2):
if DEBUG:
print("\n-------------------------------------------------------- \n line", i)
im_x = img
#bi_point_indexes = np.nonzero(line2Line[i, :] > 0)[0]
bi_point_indexes = np.nonzero(mutual_T12[i, :] > -1)[0]
single_point_indexes = np.nonzero(back_T12[i, :] > -1)[0]
online_point_indexes = np.nonzero(within_T12[i, :] > -1)[0]
bx, by = line_2_mix[i, 0], line_2_mix[i, 1]
px, py = line_2_mix[i, 2], line_2_mix[i, 3]
k = line_2_map_junction[i]
if distance((px, py), (bx, by)) < 0.05 * max_side_length:
lines_5.append((px, py, bx, by))
im_5 = addLines(im_5, [(px, py, bx, by)], display_ = False, rand_color=False, color=(40, 255, 0), thickness=2)
if DEBUG:
showIm(img=im_x, name='Line i')
showIm(img=im_5, name='Line 3')
continue
if len(bi_point_indexes) == 0 and len(online_point_indexes) == 0:
im_x = addPoints(im_x, junctions[k:k+1], display_=False, thickness=3, color=(255, 0, 0))
im_x = addLines(im_x, line_2_mix[i:i+1], display_ = False, rand_color=False, color=(0, 255, 0), thickness=2)
ratio, max_loc, _ = pixelRatio((px, py), (bx, by), m3)
if ratio < 0.05:
continue
bx, by = max_loc
intersects = intersections(np.array([px, py, bx, by], dtype=np.float32), np.array(lines_1, dtype=np.float32))
if DEBUG:
im_5 = addPoints(im_5, intersects, display_=False, thickness=3, color=(0, 0, 255))
im_x = addPoints(im_x, intersects, display_=False, thickness=3, color=(0, 0, 255))
if DEBUG:
print("Intersects.", len(intersects))
start_list =[(px, py)]
end_list = []
for int_ in intersects:
start_list.append(int_)
end_list.append(int_)
end_list.append((bx, by))
valid_interval_idx = []
next_start = None
previous_end = None
first_flag = True
for idx, (start, end) in enumerate(zip(start_list, end_list)):
if not first_flag:
break
if next_start is None:
next_start = start
line_ratio, max_pos_idx, max_pos_loc = pixelRatio(start, end, m3)
if DEBUG:
print(" distance of start, end ", distance(start, end), " ratio: ", line_ratio)
adding_line = False
valid_cond = line_ratio > 0.6 and max_pos_loc > 0.75
if idx == 0:
if distance(start, end) < max_side_length/20. or valid_cond:
adding_line = True
elif valid_cond:
adding_line = True
if adding_line:
#if DEBUG:
# print "distance of start, end ", distance(start, end), " ratio: ", line_ratio
previous_end = end
im_x = addLines(im_x, [(start[0], start[1], end[0], end[1])], display_ = False, rand_color=False, color=(255, 0, 255), thickness=2)
im_5 = addLines(im_5, [(start[0], start[1], end[0], end[1])], display_ = False, rand_color=False, color=(40, 255, 0), thickness=2)
lines_5.append((start[0], start[1], end[0], end[1]))
if DEBUG:
showIm(img=im_x, name='Line i')
showIm(img=im_5, name='Line 3')
else:
#if previous_end is not None:
# lines_3.append((next_start[0], next_start[1], previous_end[0], previous_end[1]))
if idx == 0:
first_flag = False
next_start = None
previous_end = None
elif len(bi_point_indexes) == 0 and len(online_point_indexes) > 0:
im_x = addPoints(im_x, junctions[k:k+1], display_=False, thickness=3, color=(255, 0, 0))
im_x = addLines(im_x, line_2_mix[i:i+1], display_ = False, rand_color=False, color=(0, 255, 0), thickness=2)
#perps = [perp_points_12[p_idx, i] for p_idx in online_point_indexes]
im_x = addPoints(im_x, junctions[online_point_indexes], display_=False, thickness=3, color=(0, 0, 255))
online_point_indexes = online_point_indexes.tolist()
online_point_indexes.sort(key=lambda x:distance(perp_points_12[x, i], (px, py)))
ratio, max_loc, _ = pixelRatio((px, py), (bx, by), m3)
if ratio < 0.05:
continue
bx, by = max_loc
start_list =[(px, py)]
end_list = []
if DEBUG:
print("online points: ", len(online_point_indexes))
for on_idx in online_point_indexes:
start_list.append(perp_points_12[on_idx, i])
end_list.append(perp_points_12[on_idx, i])
end_list.append((bx, by))
next_start = None
previous_end = None
for idx, (start, end) in enumerate(zip(start_list, end_list)):
if next_start is None:
next_start = start
line_ratio, max_pos_idx, max_pos_loc = pixelRatio(start, end, m3)
if DEBUG:
print(" distance of start, end ", distance(start, end), " ratio: ", line_ratio)
adding_line = False
valid_cond = line_ratio > 0.6 and max_pos_loc > 0.75
if idx == 0:
if distance(start, end) < max_side_length/20. or valid_cond:
#lines_3.append((start[0], start[1], end[0], end[1]))
adding_line = True
elif valid_cond:
#lines_3.append((start[0], start[1], end[0], end[1]))
adding_line = True
if adding_line:
previous_end = end
#if DEBUG:
# print "distance of start, end ", distance(start, end), " ratio: ", line_ratio
im_x = addLines(im_x, [(start[0], start[1], end[0], end[1])], display_ = False, rand_color=False, color=(255, 0, 255), thickness=2)
im_5 = addLines(im_5, [(start[0], start[1], end[0], end[1])], display_ = False, rand_color=False, color=(40, 255, 0), thickness=2)
lines_5.append((start[0], start[1], end[0], end[1]))
if DEBUG:
showIm(img=im_x, name='Line i')
showIm(img=im_5, name='Line 3')
else:
#if previous_end is not None:
# lines_3.append((next_start[0], next_start[1], previous_end[0], previous_end[1]))
next_start = None
previous_end = None
if DEBUG:
showIm(img=im_x, name='Line i')
if save_t22_5:
print("lines_5: ", len(lines_5))
res_5 = addLines(res_1, lines_5, display_ = False, rand_color=False, color=(0, 255, 0) if phase == 'final' else (122, 0, 125), thickness=2)
cv2.imwrite(join(save_dir, in_ + '_t22_5.png'), res_5)
np.save(join(line_save_dir, in_ + '_line_5.npy'), np.array( lines_1 + lines_5))
def intersections(line, lines_all):
p = line[:2]
intersects = []
for la in lines_all:
intersect = intersectionOfTwoLines(line, la)
if intersect is None :
continue
ip1 = np.inner(line[:2] - intersect, line[2:] - intersect)
ip2 = np.inner(la[:2] - intersect, la[2:] - intersect)
if ip1 >= 0 or ip2 >= 0:
continue
intersects.append(intersect)
intersects.sort(key=lambda x:((x[0] - p[0])**2 + (x[1] - p[1])**2))
nodup = []
for i, it_ in enumerate(intersects):
dup_flag = False
dup_idx = None
for j, nd_ in enumerate(nodup):
if distance(it_, nd_) < 3.:
dup_flag = True
dup_idx = j
break
if not dup_flag:
nodup.append(it_)
return nodup
def pixelRatio(p1, p2, M):
coords = linespace(p1, p2, M.shape)
map_value = M[coords]
#print "\n map_value: ", map_value, "\n"
ratio, max_idx_loc, max_idx = ratioSeq(map_value)
max_idx = int(max_idx)
if len(coords[0]) == 0:
return 0, p1
locx = coords[1][max_idx]
locy = coords[0][max_idx]
return ratio, (locx, locy), max_idx_loc
def ratioSeq(seq):
num = len(seq)
nz_indexes = np.nonzero(seq)[0]
if len(nz_indexes) == 0:
return 0., 0., 0.
max_idx = nz_indexes.max()
#print "num of numzeros ", len(nz_indexes), " max_idx: ", max_idx
return float(len(nz_indexes)) / float(max_idx + 1), float(max_idx + 1)/ float(num), max_idx
def linespace(p1, p2, shape):
x1, y1 = p1
x2, y2 = p2
h, w = shape
x1, y1 = int(x1), int(y1)
x2, y2 = int(x2), int(y2)
x1 = min(max(x1, 0), w - 1)
x2 = min(max(x2, 0), w - 1)
y1 = min(max(y1, 0), h - 1)
y2 = min(max(y2, 0), h - 1)
#xlist = np.arange(x1, x2, 1)
#ylist = np.arange(y1, y2, 1)
num_x = max(x1, x2) - min(x1, x2) + 1
num_y = max(y1, y2) - min(y1, y2) + 1
#print w, h, x1, x2, y1, y2
if num_x < num_y:
xlist = np.linspace(x1, x2, num=num_y)
ylist = np.linspace(y1, y2, num=num_y)
else:
xlist = np.linspace(x1, x2, num=num_x)
ylist = np.linspace(y1, y2, num=num_x)
xlist = xlist.astype(np.int32)
ylist = ylist.astype(np.int32)
ylist[ylist > (h -1)] = h -1
xlist[xlist > (w - 1)] = w - 1
coords = np.vstack((ylist, xlist))
return tuple(coords)
def test_linespace():
linespace((1, 100), (60, 70))
def twoLinesIntersectInMiddle(A, B):
intersect = intersectionOfTwoLines(A, B)
if intersect is None :
return False
ip1 = np.inner(A[:2] - intersect, A[2:] - intersect)
ip2 = np.inner(B[:2] - intersect, B[2:] - intersect)
if ip1 <=0 and ip2 <=0:
return True
else:
return False
def intersectionWithIntervals(in_, intervals):
xs, xe = in_
def getoverlap(b):
return max(0, min(xe, b[1]) - max(xs, b[0]))
inters_ = 0.
for it in intervals:
inters_ = getoverlap(it)
if inters_ > 0:
return True
break
return True if inters_ > 0 else False
def angleBetweenLines(l1, l2):
x = l1[:2] - l1[2:4]
y = l2[:2] - l2[2:4]
theta = np.arccos( np.clip(innerProduct(x, y)/(np.linalg.norm(x) * np.linalg.norm(y) + eps), -1., 1.)) * 180./ pi
return np.minimum(theta, 180. - theta)
def angleBetweenLinesMatrix(L1, L2):
M, N = L1.shape[0], L2.shape[0]
x = L1[:, :2] - L1[:, 2:4]
y = L2[:, :2] - L2[:, 2:4]
x = x[:, np.newaxis, :]
y = y[np.newaxis, :, :]
theta = np.arccos( np.clip(innerProduct(x, y)/(np.linalg.norm(x, axis=-1) * np.linalg.norm(y, axis=-1) + eps), -1., 1.)) * 180./ pi
return np.minimum(theta, 180. - theta)
def cutLines(connection_matrix, within_matrix, lines, points, img=None, DEBUG=False):
M = lines.shape[0]
N = points.shape[0]
assert connection_matrix.shape == (M, N)
cut_lines = np.copy(lines)
for i in range(M):
x1, y1, x2, y2 = lines[i].tolist()
point_indexes = within_matrix[i, :]
point_indexes = np.nonzero(point_indexes==True)[0]
if len(point_indexes) <= 1:
continue
else:
lengths = [(k, (x1 - points[k][0])**2 + (y1 - points[k][1])**2) for k in point_indexes]
lengths.sort(key=lambda x:x[1])
#endpoint_idx = point_indexes[max_idx]
#endpoint_idx = lengths[0 if len(lengths) == 1 else 1][0]
#endpoint_idx = lengths[len(lengths)/2][0]
endpoint_idx = lengths[0][0]
im = addLines(img, lines[i:i+1], display_ = False, color=(0, 0, 255))
im = addPoints(im, [points[k] for k in point_indexes], display_=False if | |
#!/bin/python3
import os
import subprocess
import types
import copy
import re
import json
import datetime
import asyncio
import discord
def findmatchlist(pattern, string):
matches = []
results = re.finditer(pattern, string)
for result in results:
matches.append(result)
return matches
def deleteallmatches(pattern, string):
pattern = pattern if pattern is not None else r'(<[@#][!&]?[0-9]+>|@[A-Za-z0-9]+)'
results = re.finditer(pattern, string)
index = 0
newstring = ''
for result in results:
newstring += string[index:result.start()]
index = result.end()
newstring += string[index:]
return newstring
class HimBotException(Exception):
'''Base class for Exceptions raised by HimBotClient module.'''
pass
class NameException(HimBotException):
'''Added subcommand to target command where command name key already exists.'''
def __init__(self, supercommandname, subcommandname):
super().__init__('Key ' + subcommandname + ' exists in ' + supercommandname + '.')
class PermissionException(HimBotException):
'''User invoked command with no applicable permission.'''
def __init__(self, commandname):
super().__init__('You do not have permission for command: `' + commandname + '`')
class ParameterException(HimBotException):
'''User invoked command with missing parameter.'''
def __init__(self, commandname):
super().__init__('Missing command after: `' + commandname + '`\nType `@HimBot help [command]` for more information.')
class UnknownException(HimBotException):
'''User invoked command with unknown parameter or extra parameter.'''
def __init__(self, commandname):
super().__init__('Unknown command: `' + commandname + '`\nType `@HimBot help [command]` for more information.')
class PermissionItem:
'''Data container for permission.
All id fields are of integer.
None field represent 'Any'.
Thus, PermissionItem() grants access to anyone in any channel in any server.
'''
def __init__(self, guildid = None, channelid = None, *, userid = None, roleid = None):
self.guildid = guildid
self.channelid = channelid
self.userid = userid
self.roleid = roleid
def check(self, textchannel, member):
'''Checks if permission is valid in given channel with given user.
textchannel must be type of discord.TextChannel and member must be type of discord.Member.'''
assert type(textchannel) is discord.TextChannel and type(member) is discord.Member
if self.guildid is None or self.guildid == textchannel.guild.id:
if self.channelid is None or self.channelid == textchannel.id:
if self.userid is None or self.userid == member.id:
if self.roleid is None:
return True
for role in member.roles:
if self.roleid == role.id:
return True
return False
class CommandItem:
'''Represents a command in single level.
name must contain single word with no whitespace.
aliases should be a list/tuple/set of strings or None.
description should be a string or None.
perm describes command permissiona and can be following types:
None to use parent permission,
True to grant all access,
False to deny all access,
a PeremissionItem or a list/tuple/set of PermissionItem for permission specs.
func should be a function or instance method, which can be coroutine, or None.
accepts describes function's parameter behavior and can be following types:
False to indicate no additional parameters (except that of subcommand),
True to indicate any additiona parameters,
or integer to indicate number of additional parameters.
This has no effect if func is None.
supeercommand should be a CommandItem or None.
subcommands should be a list/tuple/set of CommandItem or None.
'''
def __init__(self, name, *, aliases = None, description = None, perm = None, func = None, accepts = False, subcommands = None):
self.name = name
self.aliases = aliases
self.description = description
self.perm = perm
self.func = func
self.accepts = accepts
self.fullname = self.name
self.supercommand = None
self.subcommands = {}
self.subcommandaliases = {}
if subcommands is not None:
self.add(*subcommands)
def __str__(self):
return self.fullname
def __len__(self):
return len(self.subcommands)
def __contains__(self, subcommandname):
return subcommandname in self.subcommands or subcommandname in self.subcommandaliases
def __repr__(self):
return self.fullname
def __copy__(self):
item = CommandItem(self.name, aliases = self.aliases, description = self.description, perm = self.perm, func = self.func, accepts = self.accepts)
item.subcommands = self.subcommands
item.subcommandaliases = self.subcommandaliases
return item
def __deepcopy__(self, memo = None):
pass
def __deepcopy1__(self, memo = None):
memo[id(self)] = self
def setSuper(self, supercommand, memo = None):
'''Sets supercommand and update own and subcommand's fullname.
this commaand must be already subcommand of supercommand.'''
#if memo is None:
# memo = set()
#assert self not in memo
#memo.add(self)
self.supercommand = supercommand
assert self.supercommand.has(self.name)
self.fullname = self.name if self.supercommand is None else supercommand.fullname + ' ' + self.name
for command in self.subcommands.values():
command.setSuper(self, memo)
def add(self, *subcommands):
'''Adds subcommand and alias entry.
subcommand's supercommand field is also updated.'''
for subcommand in subcommands:
if self.has(subcommand.name):
raise NameException(self.name, subcommand.name)
else:
self.subcommands[subcommand.name] = subcommand
subcommand.setSuper(self)
if subcommand.aliases is not None:
for alias in subcommand.aliases:
if self.has(alias):
raise NameException(self.name, alias)
else:
self.subcommandaliases[alias] = subcommand.name
def get(self, subcommandname):
'''Get CommandItem that matches subcommandname'''
return self.subcommands[subcommandname] if subcommandname in self.subcommands else self.subcommands[self.subcommandaliases[subcommandname]]
def has(self, subcommandname):
'''Check if subcommandname is vaild subcommand key of this command'''
return subcommandname in self.subcommands or subcommandname in self.subcommandaliases
def check(self, message):
'''Check if given message's information meets permisssion spec.
message must be that from guild; with discord.TextChannel as type of message.channel and discord.Member as type of message.author'''
if self.perm is None:
return False if self.supercommand is None else self.supercommand.check(message)
elif type(self.perm) is bool:
return self.perm
elif type(self.perm) is PermissionItem:
return self.perm.check(message.channel, message.author)
elif type(self.perm) is dict:
for permname in self.perm:
if self.perm.permname.check(message.channel, message.author):
return True
return False
elif type(self.perm) in (list, set, tuple):
for perm in self.perm:
if perm.check(message.channel, message.author):
return True
return False
else:
return False
def getSubprocessSSHCommand(*remotecommand):
commands = ['ssh', '-i', '~/.ssh/id_rsa_nopasswd', '192.168.1.31']
for item in remotecommand:
commands.append(item)
return commands
class HimBotClient(discord.Client):
'''HimBotClient servers user command to automate remote command task.
commandlist is a reference to root CommandItem that functions as start point for tree search. The root item is expected to have no functionality.
All command set should be defined as child CommandItem of commandlist.
iddata contains integer id for guild, channel, user, and role. It is useful to have these information beforehand, as connection is not established when object is constructed.
permdata contains integer pair of guild, channel, user, and role. It is used to construct PermissionItems.
rootoverride is boolean that indicates that root, that is the application owner, overrides all permission structure.
owneroverride is boolean that indicates that the guild owner overrides all permission in that guild.
adminoveerride is boolean that indicates that the admins override all permission in that guild.
'''
def __init__(self, iddata=None, permdata=None):
super().__init__()
self.appinfo = None
self.iddata = iddata
self.rootoverride = True
self.owneroverride = False
self.adminoverride = False
self.memberperm = PermissionItem(self.iddata['guildid'], self.iddata['primarychannelid'], roleid = self.iddata['userroleid'])
self.subcommandgroup = {
'minecraft': [
CommandItem('spigot', aliases = ['plugin'], func = lambda *_, **__: '"StartMinecraftServerSpigot"'),
CommandItem('vanilla', aliases = ['original'], func = lambda *_, **__: '"StartMinecraftServerVanilla"') ]
}
self.commandlist = CommandItem('@HimBot', description = 'Type `@HimBot help [command]` for more information.', perm = True, subcommands = [
CommandItem('help', description = 'Get information about command.', aliases = [None], perm = True, accepts = True, func = self.cmdHelp),
CommandItem('status', description = 'Get HimBot status.', aliases = ['info', 'version'], perm = True, func = self.cmdStatus),
CommandItem('link', description = 'Get server invitation link.', perm = True, func = self.cmdLink, subcommands = [
CommandItem('raw', description = 'Format invitation as raw text.', func = lambda *_, **__: True) ]),
CommandItem('server', description = 'Controls server computer.', perm = self.memberperm, subcommands = [
CommandItem('wakeup', func = self.cmdServer31Wakeup),
CommandItem('shutdown', perm = False, func = self.cmdServer31Shutdown),
CommandItem('priority', func = self.cmdServer31Priority, subcommands = [
CommandItem('normal', aliases = [None], func = lambda *_, **__: '"SetServerProcessPriorityNormal"'),
CommandItem('minecraft', func = lambda *_, **__: '"SetServerProcessPriorityMinecraft"') ]),
CommandItem('status', func = self.cmdServer31Status) ]),
CommandItem('minecraftserver', description = 'Controls minecraft server.', aliases = ['mcserver', 'minecraft'], perm = self.memberperm, subcommands = [
CommandItem('start', func = self.cmdMinecraftServer31Start, subcommands = copy.copy(self.subcommandgroup['minecraft'])),
CommandItem('stop', func = self.cmdMinecraftServer31Stop, subcommands = copy.copy(self.subcommandgroup['minecraft'])),
CommandItem('priority', func = lambda cmd, res: self.cmdMinecraftServer31Priority(cmd, '"SetServerProcessPriorityMinecraft"')),
CommandItem('status', func = self.cmdMinecraftServer31Status) ])
])
self.versionstring = 'HimBot-DiscordClient v1.2.0'
self.starttime = datetime.datetime.now()
def checkRootPerm(self, user):
return self.rootoverride and user.id == self.appinfo.owner.id
def checkOwnerPerm(self, user):
return self.owneroverride and type(user) is discord.Member and user.guild.owner.id == user.id
def checkAdminPerm(self, user):
if self.adminoverride and type(user) is discord.Member:
if member.guild_permissions.administrator:
return True
for role in user.roles:
if role.permissions.administrator:
return True
return False
def checkClientMentionString(self, string):
#return self.user.mention == string
return re.fullmatch('^<@!?' + str(self.user.id) + '>$', string)
async def on_ready(self):
self.commandlist.alises = [self.user.mention]
self.appinfo = await self.application_info()
print('Logged on as', self.user)
async def on_disconnect(self):
print('Disconnected!')
async def on_resumed(self):
print('Resumed!')
async def on_message(self, message):
'''Determines if sent message is command to HimBot.
If the message is command for HimBot, further analyze command.
If the command chain returns string or list of strings, it will be sent to the same text channel.
If the command chain raises exception, the exception message will be sent as the reply, mentioning the author.
'''
#print(message)
if message.author == self.user \
or type(message.channel) is not discord.TextChannel or type(message.author) is not discord.Member:
return
commands = message.content.lower().split()
print(commands)
if len(commands) > 0 and self.checkClientMentionString(commands[0]) \
and len(message.mentions) > 0 and self.user in message.mentions:
try:
# run down through command chain
result = await self.runCommand(message, commands, self.commandlist)
if type(result) in (list, tuple, set):
for item in result:
await message.channel.send(item)
else:
await message.channel.send(result)
#await message.reply(result)
print(' * Successful')
except HimBotException as exception:
await message.reply(str(exception))
print(' * Failed with ', type(exception))
except Exception as exception:
await message.reply('Internal error occurred.')
raise exception
async def runCommand(self, message, commands, commandlist):
'''Recursively analyze and run the given command list.
message must be from TextChannel with Member as author.
commands is list of string that contains the commands. This list cannot be empty, and each string is expected to be a single word.
commandlist is a reference to current root node to look up next subcommand.
'''
#print('\t', commands)
assert len(commands) > 0
if commandlist.check(message) \
or self.checkAdminPerm(message.author) or self.checkOwnerPerm(message.author) or self.checkRootPerm(message.author):
# a given parameter
if len(commands) > 1:
# a subcommand list
if len(commandlist) > 0:
# subcommand match
if commandlist.has(commands[1]):
result = await self.runCommand(message, commands[1:], commandlist.get(commands[1]))
# subcommand mismatch
else:
raise UnknownException(commands[1])
# no subcommand list
else:
# no function or no additional parameter
if commandlist.func is None or not commandlist.accepts:
raise UnknownException(commands[1])
# expected function | |
% self.layer_number)
print("Num Trans: ", num_trans, ", Num Replay Goals: ", num_replay_goals)
# For each selected transition, update the goal dimension of the selected transition and all prior transitions by using the next state of the selected transition as the new goal. Given new goal, update the reward and finished boolean as well.
for index in range(num_trans):
for i in range(num_replay_goals):
if i == num_replay_goals -1:
future_index = num_trans-1
else:
future_index = np.random.randint(index, num_trans)
new_global_goal = torch.clone(self.temp_goal_replay_storage[future_index][6])
trans_copy = [None if item is None else torch.clone(item) for item in self.temp_goal_replay_storage[index]]
# Update goal to new goal
if self.last_layer and self.FLAGS.vpn:
trans_copy[8] = torch.stack([trans_copy[8], env.pos_image(new_global_goal, trans_copy[8])], dim=0)
if self.relative_subgoals:
state_pos = project_state(env, self.FLAGS, self.layer_number, trans_copy[0])
trans_copy[4] = (new_global_goal - state_pos)
else:
trans_copy[4] = new_global_goal
# Update reward
trans_copy[2] = self.get_reward(new_global_goal, trans_copy[6], goal_thresholds)
# Update finished boolean based on reward
if trans_copy[2] == 0:
trans_copy[5] = True
else:
trans_copy[5] = False
# Add finished transition to replay buffer
if self.FLAGS.all_trans or self.FLAGS.HER:
print("\nNew Goal: ", new_global_goal)
print("Upd Trans %d: " % index, trans_copy)
self.replay_buffer.add(trans_copy)
# Clear storage for preliminary goal replay transitions at end of goal replay
self.temp_goal_replay_storage = []
# Create transition penalizing subgoal if necessary. The target Q-value when this transition is used will ignore next state as the finished boolena = True. Change the finished boolean to False, if you would like the subgoal penalty to depend on the next state.
def penalize_subgoal(self, subgoal, next_state, high_level_goal_achieved, action_label):
transition = [self.current_state, subgoal, self.subgoal_penalty, next_state, self.goal, True, None, action_label, self.current_goal_image]
if self.FLAGS.all_trans or self.FLAGS.penalty:
print("Level %d Penalty Trans: " % self.layer_number, transition)
self.replay_buffer.add(self.copy_transition(transition))
# Determine whether layer is finished training
def return_to_higher_level(self, max_lay_achieved, agent, env, attempts_made):
# Return to higher level if (i) a higher level goal has been reached, (ii) maxed out episode time steps (env.max_actions), (iii) not testing and layer is out of attempts, and (iv) testing, layer is not the highest level, and layer is out of attempts. NOTE: during testing, highest level will continue to ouput subgoals until either (i) the maximum number of episdoe time steps or (ii) the end goal has been achieved.
# Return to previous level when any higher level goal achieved. NOTE: if not testing and agent achieves end goal, training will continue until out of time (i.e., out of time steps or highest level runs out of attempts). This will allow agent to experience being around the end goal.
if max_lay_achieved is not None and max_lay_achieved >= self.layer_number:
return True
if not env.healthy:
return True
# Return when out of time
elif agent.steps_taken >= env.max_actions:
return True
# Return when layer has maxed out attempts
elif not agent.FLAGS.test and attempts_made >= self.time_limit:
return True
# NOTE: During testing, agent will have env.max_action attempts to achieve goal
elif agent.FLAGS.test and self.layer_number < agent.FLAGS.layers-1 and attempts_made >= self.time_limit:
return True
else:
return False
def transform_path(self, path, offset):
for node in path:
node[0] -= offset
node[1] -= offset
return path
# Learn to achieve goals with actions belonging to appropriate time scale. "goal_array" contains the goal states for the current layer and all higher layers
def train(self, agent, env, metrics, subgoal_test = False, episode_num = None):
# print("\nTraining Layer %d" % self.layer_number)
# Set layer's current state and new goal state
self.goal = agent.goal_array[self.layer_number].clone()
self.current_state = agent.current_state
if self.last_layer and self.FLAGS.vpn:
self.current_image = self.to_torch(env.take_snapshot())
self.current_goal_image = torch.stack([self.current_image, env.pos_image(self.goal, self.current_image)], dim=0)
# Reset flag indicating whether layer has ran out of attempts. This will be used for subgoal testing.
self.maxed_out = False
# Display all subgoals if visualizing training and current layer is bottom layer
if self.layer_number == 0 and (agent.FLAGS.show or agent.FLAGS.save_video) and agent.FLAGS.layers > 1:
env.display_subgoals([arr.cpu().numpy() for arr in agent.goal_array], agent.FLAGS)
# Current layer has self.time_limit attempts to each its goal state.
self.attempts_made = 0
while True:
# Select action to achieve goal state using epsilon-greedy policy or greedy policy if in test mode
action, next_subgoal_test = self.choose_action(agent, env, subgoal_test)
if self.layer_number > 0 and not (self.FLAGS.humanoid or self.FLAGS.inject_her_policy):
subgoal_distance = torch.norm(action[:2] if self.relative_subgoals else (action[:2] - self.current_state[:2])).item()
self.agg_metrics['subgoal_distances'].append(subgoal_distance)
goal_subgoal_distance = torch.norm((self.goal[:2] - self.current_state[:2] - action[:2]) if self.relative_subgoals else (self.goal[:2] - action[:2])).item()
self.agg_metrics['goal_subgoal_distance'].append(goal_subgoal_distance)
lower_layer = agent.layers[self.layer_number-1]
lower_action = lower_layer.actor.get_action(self.current_state.unsqueeze(0), action.unsqueeze(0), None)[0]
lower_Q = lower_layer.critic.get_Q_value(self.current_state.unsqueeze(0), action.unsqueeze(0), lower_action.unsqueeze(0), None).item()
self.agg_metrics['lower_Q_val'].append(lower_Q)
# If next layer is not bottom level, propose subgoal for next layer to achieve and determine whether that subgoal should be tested
if self.layer_number > 0:
action_copy = action.clone()
if self.FLAGS.relative_subgoals and self.last_layer:
action_copy -= project_state(env, self.FLAGS, self.layer_number, self.current_state)
agent.goal_array[self.layer_number - 1] = action_copy
goal_status, max_lay_achieved = agent.layers[self.layer_number - 1].train(agent, env, metrics, next_subgoal_test, episode_num)
if self.last_layer and self.FLAGS.learn_sigma and not agent.FLAGS.test:
self.actor.bandit.store_reward(agent.layers[self.layer_number - 1].attempts_made)
# If layer is bottom level, execute low-level action
else:
next_state = self.to_torch(env.execute_action(action.cpu().numpy()))
if self.FLAGS.save_video:
real_image = render_image_for_video(env, self.FLAGS, agent, next_state)
agent.image_path.append(real_image)
# Increment steps taken
agent.steps_taken += 1
if not self.FLAGS.test:
agent.total_steps_taken += 1
if agent.steps_taken >= env.max_actions:
print("Out of actions (Steps: %d)" % agent.steps_taken)
agent.current_state = next_state
if self.FLAGS.relative_subgoals:
for i_layer in range(self.FLAGS.layers-1):
old_pos = project_state(env, self.FLAGS, i_layer, self.current_state)
new_pos = project_state(env, self.FLAGS, i_layer, agent.current_state)
agent.goal_array[i_layer] = agent.goal_array[i_layer] + old_pos - new_pos
# Determine whether any of the goals from any layer was achieved and, if applicable, the highest layer whose goal was achieved
goal_status, max_lay_achieved = agent.check_goals(env)
self.attempts_made += 1
# Perform hindsight learning using action actually executed (low-level action or hindsight subgoal)
if self.layer_number == 0:
hindsight_action = action
else:
# If subgoal action was achieved by layer below, use this as hindsight action
if goal_status[self.layer_number-1]:
hindsight_action = action
# Otherwise, use subgoal that was achieved in hindsight
else:
if self.relative_subgoals:
hindsight_action = torch.clone(env.project_state_to_subgoal(env.sim, agent.current_state) - env.project_state_to_subgoal(env.sim, self.current_state))
else:
hindsight_action = torch.clone(env.project_state_to_subgoal(env.sim, agent.current_state))
# Next, create hindsight transitions if not testing and env still healthy
if not agent.FLAGS.test and env.healthy:
if self.sl_oracle:
transition = [self.current_state, action, 0, agent.current_state, self.goal, True, None, oracle_action(self.FLAGS, self.current_state, self.goal, env)]
self.replay_buffer.add(self.copy_transition(transition))
else:
# Create action replay transition by evaluating hindsight action given current goal
self.perform_action_replay(hindsight_action, agent.current_state, goal_status, oracle_action(self.FLAGS, self.current_state, self.goal, env) if self.semi_oracle else None)
# Create preliminary goal replay transitions. The goal and reward in these transitions will be finalized when this layer has run out of attempts or the goal has been achieved.
self.create_prelim_goal_replay_trans(hindsight_action, agent.current_state, env, agent.FLAGS.layers)
# Penalize subgoals if subgoal testing and subgoal was missed by lower layers after maximum number of attempts
if self.layer_number > 0 and next_subgoal_test and agent.layers[self.layer_number-1].maxed_out:
self.penalize_subgoal(action, agent.current_state, goal_status[self.layer_number], oracle_action(self.FLAGS, self.current_state, self.goal, env) if self.semi_oracle else None)
# Penalize subgoals for the highest level if always penalization on and the lower layers ran out of attempts.
elif self.last_layer and self.FLAGS.always_penalize and agent.layers[self.layer_number-1].maxed_out:
self.penalize_subgoal(action, agent.current_state, goal_status[self.layer_number], oracle_action(self.FLAGS, self.current_state, self.goal, env) if self.semi_oracle else None)
# Penalize subgoals if the lower level think the goal is reachable, but it couldn't reach it. Probably a wall.
elif self.last_layer and self.FLAGS.Q_penalize:
lower_layer = agent.layers[self.layer_number-1]
action_copy = action.clone()
if self.FLAGS.relative_subgoals:
action_copy -= project_state(env, self.FLAGS, self.layer_number, self.current_state)
lower_action,_ = lower_layer.actor.get_target_action(self.current_state.unsqueeze(0), action_copy.unsqueeze(0), None)
lower_Q_val = lower_layer.critic.get_target_Q_value(self.current_state.unsqueeze(0), action_copy.unsqueeze(0), lower_action, None).item()
if lower_Q_val >= -self.FLAGS.time_scale+2 and agent.layers[self.layer_number-1].maxed_out:
self.penalize_subgoal(action, agent.current_state, goal_status[self.layer_number], oracle_action(self.FLAGS, self.current_state, self.goal, env) if self.semi_oracle else None)
elif not agent.FLAGS.test and not env.healthy and self.layer_number == 0:
self.penalize_subgoal(action, agent.current_state, goal_status[self.layer_number], oracle_action(self.FLAGS, self.current_state, self.goal, env) if self.semi_oracle else None)
# Print summary of transition
if agent.FLAGS.verbose:
print("\nEpisode %d, Level %d, Attempt %d" % (episode_num, self.layer_number, self.attempts_made))
# print("Goal Array: ", agent.goal_array, "Max Lay Achieved: ", max_lay_achieved)
print("Old State: ", self.current_state)
print("Hindsight Action: ", hindsight_action)
print("Original Action: ", action)
print("Next State: ", agent.current_state)
print("Goal: ", self.goal)
if self.layer_number == agent.FLAGS.layers - 1:
print("Hindsight Goal: ", env.project_state_to_end_goal(env.sim, agent.current_state))
else:
print("Hindsight Goal: ", env.project_state_to_subgoal(env.sim, agent.current_state))
print("Goal Status: ", goal_status, "\n")
print("All Goals: ", agent.goal_array)
# Update state of current layer
self.current_state = agent.current_state
if self.relative_subgoals:
self.goal = agent.goal_array[self.layer_number].clone()
if self.layer_number == 0 and (agent.FLAGS.show or agent.FLAGS.save_video) and agent.FLAGS.layers > 1:
env.display_subgoals([arr.cpu().numpy() for arr | |
#
# This file just serves to give a better intuition of how we conducted batch experiments
# It is not strictly a part of the DBH infrastructure, just an automation layer one level above it
#
import os
import copy
import dbh
shared = {
'csv': 'dataset.csv',
'label': 'BUG',
'clean': False,
'seed': 1337,
'output': os.path.abspath('output'),
'device': '/device:CPU:0',
'log_device': False,
'calc_completeness': True
}
data_steps = [
# preprocess method
# {
# 'preprocess': [['labels', 'binarize']],
# },
# {
# 'preprocess': [['features', 'normalize'], ['labels', 'binarize']],
# },
{
'preprocess': [['features', 'standardize'], ['labels', 'binarize']],
'resample': 'down',
'resample_amount': 100
},
# resample method/amount
# {
# 'preprocess': [['features', 'standardize'], ['labels', 'binarize']],
# 'resample': 'up',
# 'resample_amount': 100
# },
# {
# 'preprocess': [['features', 'standardize'], ['labels', 'binarize']],
# 'resample': 'up',
# 'resample_amount': 75
# },
# {
# 'preprocess': [['features', 'standardize'], ['labels', 'binarize']],
# 'resample': 'up',
# 'resample_amount': 50
# },
# {
# 'preprocess': [['features', 'standardize'], ['labels', 'binarize']],
# 'resample': 'up',
# 'resample_amount': 25
# },
# {
# 'preprocess': [['features', 'standardize'], ['labels', 'binarize']],
# 'resample': 'down',
# 'resample_amount': 100
# },
# {
# 'preprocess': [['features', 'standardize'], ['labels', 'binarize']],
# 'resample': 'down',
# 'resample_amount': 75
# },
# {
# 'preprocess': [['features', 'standardize'], ['labels', 'binarize']],
# 'resample': 'down',
# 'resample_amount': 50
# },
# {
# 'preprocess': [['features', 'standardize'], ['labels', 'binarize']],
# 'resample': 'down',
# 'resample_amount': 25
# },
]
basic_strategy = [
# ['knn', ''],
# ['bayes', ''],
# ['logistic', '--penalty l2 --solver saga --C 0.2 --tol 0.0001']
# ['svm', ''],
# ['forest', ''],
# ['tree', ''],
# ['tree', '--max-depth 5']
['linear', ''],
# ['logistic', ''],
# ['zeror', ''],
# ['sdnnc', '--layers 3 --neurons 100 --batch 100 --epochs 5 --lr 0.1']
# ['forest', '--max-depth 10 --criterion entropy --n-estimators 5']
]
chosen_prep = {
'preprocess': [['features', 'standardize'], ['labels', 'binarize']],
'resample': 'up',
'resample_amount': 50
}
strategy_steps = [
# ['sdnnc', '--layers 2 --neurons 100 --batch 100 --epochs 5 --lr 0.1'],
# ['sdnnc', '--layers 3 --neurons 100 --batch 100 --epochs 5 --lr 0.1'],
# ['sdnnc', '--layers 4 --neurons 100 --batch 100 --epochs 5 --lr 0.1'],
# ['sdnnc', '--layers 5 --neurons 100 --batch 100 --epochs 5 --lr 0.1'],
# ['sdnnc', '--layers 5 --neurons 150 --batch 100 --epochs 5 --lr 0.1'],
# ['sdnnc', '--layers 5 --neurons 200 --batch 100 --epochs 5 --lr 0.1'],
# ['sdnnc', '--layers 5 --neurons 200 --batch 100 --epochs 2 --lr 0.1'],
# ['sdnnc', '--layers 5 --neurons 200 --batch 100 --epochs 10 --lr 0.1'],
# ['sdnnc', '--layers 5 --neurons 200 --batch 100 --epochs 20 --lr 0.1'],
# ['sdnnc', '--layers 5 --neurons 200 --batch 100 --epochs 10 --lr 0.025'],
# ['sdnnc', '--layers 5 --neurons 200 --batch 100 --epochs 10 --lr 0.05'],
# ['sdnnc', '--layers 5 --neurons 200 --batch 100 --epochs 10 --lr 0.1'],
# ['sdnnc', '--layers 5 --neurons 200 --batch 100 --epochs 10 --lr 0.2'],
# ['sdnnc', '--layers 5 --neurons 200 --batch 100 --epochs 10 --lr 0.3'],
# ['sdnnc', '--layers 5 --neurons 200 --batch 100 --epochs 10 --lr 0.4'],
# ['sdnnc', '--layers 5 --neurons 200 --batch 100 --epochs 10 --lr 0.5'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 100 --lr 0.025'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 100 --lr 0.05'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 100 --lr 0.1'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 100 --lr 0.2'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 100 --lr 0.3'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 100 --lr 0.4'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 100 --lr 0.5'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 100 --lr 0.5 --beta 0.0005'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 100 --lr 0.5 --beta 0.001'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 100 --lr 0.5 --beta 0.002'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 100 --lr 0.5 --beta 0.005'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 100 --lr 0.5 --beta 0.02'],
# ['cdnnc', '--layers 4 --neurons 200 --batch 100 --lr 0.1'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 100 --lr 0.1'],
# ['cdnnc', '--layers 6 --neurons 200 --batch 100 --lr 0.1'],
# ['cdnnc', '--layers 5 --neurons 150 --batch 100 --lr 0.1'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 100 --lr 0.1'],
# ['cdnnc', '--layers 5 --neurons 250 --batch 100 --lr 0.1'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 50 --lr 0.1'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 100 --lr 0.1'],
# ['cdnnc', '--layers 5 --neurons 200 --batch 150 --lr 0.1'],
# ['cdnnc', '--layers 5 --neurons 300 --batch 100 --lr 0.1'],
# ['cdnnc', '--layers 5 --neurons 300 --batch 100 --lr 0.2'],
# ['cdnnc', '--layers 5 --neurons 300 --batch 100 --lr 0.3'],
# ['cdnnc', '--layers 6 --neurons 300 --batch 100 --lr 0.1'],
# ['cdnnc', '--layers 6 --neurons 300 --batch 100 --lr 0.2'],
# ['cdnnc', '--layers 6 --neurons 300 --batch 100 --lr 0.3'],
# ['cdnnc', '--layers 6 --neurons 350 --batch 100 --lr 0.1'],
# ['cdnnc', '--layers 6 --neurons 350 --batch 100 --lr 0.2'],
# ['cdnnc', '--layers 6 --neurons 350 --batch 100 --lr 0.3'],
# ['cdnnc', '--layers 7 --neurons 350 --batch 100 --lr 0.1'],
# ['cdnnc', '--layers 7 --neurons 350 --batch 100 --lr 0.2'],
# ['cdnnc', '--layers 7 --neurons 350 --batch 100 --lr 0.3'],
# later tuning of sklearn algs, too
# ['knn', '--n_neighbors 22'],
# ['knn', '--n_neighbors 24'],
# ['knn', '--n_neighbors 26'],
# ['knn', '--n_neighbors 28'],
# ['knn', '--n_neighbors 30'],
# ['knn', '--n_neighbors 32'],
# ['knn', '--n_neighbors 34'],
# ['knn', '--n_neighbors 36'],
# ['knn', '--n_neighbors 38'],
# ['knn', '--n_neighbors 40'],
# ['knn', '--n_neighbors 42'],
# ['knn', '--n_neighbors 44'],
# ['knn', '--n_neighbors 6 --weights distance'],
# ['knn', '--n_neighbors 8 --weights distance'],
# ['knn', '--n_neighbors 10 --weights distance'],
# ['knn', '--n_neighbors 12 --weights distance'],
# ['knn', '--n_neighbors 14 --weights distance'],
# ['knn', '--n_neighbors 16 --weights distance'],
# ['knn', '--n_neighbors 18 --weights distance'],
# ['knn', '--n_neighbors 20 --weights distance'],
# ['tree', '--max-depth 5'],
# ['tree', '--max-depth 10'],
# ['tree', '--max-depth 20'],
# ['tree', '--max-depth 50'],
# ['tree', '--max-depth 100'],
# ['tree', '--criterion entropy --max-depth 5'],
# ['tree', '--criterion entropy --max-depth 10'],
# ['tree', '--criterion entropy --max-depth 20'],
# ['tree', '--criterion entropy --max-depth 50'],
# ['tree', '--criterion entropy --max-depth 100'],
# Gauss is not parametric, so done :)
# ['svm', '--kernel linear --C 0.1'],
# ['svm', '--kernel linear --C 0.5'],
# ['svm', '--kernel linear --C 1.0'],
# ['svm', '--kernel linear --C 1.5'],
# ['svm', '--kernel linear --C 2.0'],
# ['svm', '--kernel poly --degree 2 --C 0.1'],
# ['svm', '--kernel poly --degree 2 --C 0.5'],
# ['svm', '--kernel poly --degree 2 --C 1.0'],
# ['svm', '--kernel poly --degree 2 --C 1.5'],
# ['svm', '--kernel poly --degree 2 --C 2.0'],
# ['svm', '--kernel poly --degree 3 --C 0.1'],
# ['svm', '--kernel poly --degree 3 --C 0.5'],
# ['svm', '--kernel poly --degree 3 --C 1.0'],
# ['svm', '--kernel poly --degree 3 --C 1.5'],
# ['svm', '--kernel poly --degree 3 --C 2.0'],
# ['svm', '--kernel rbf --C 0.1'],
# ['svm', '--kernel rbf --C 0.5'],
# ['svm', '--kernel rbf --C 1.0'],
# ['svm', '--kernel rbf --C 1.5'],
# ['svm', '--kernel rbf --C 2.0'],
# ['svm', '--kernel rbf --C 2.2'],
# ['svm', '--kernel rbf --C 2.4'],
# ['svm', '--kernel rbf --C 2.6'],
# ['svm', '--kernel rbf --C 2.8'],
# ['svm', '--kernel rbf --C 3.0'],
# ['svm', '--kernel rbf --C 4.0'],
# ['svm', '--kernel rbf --C 5.0'],
# ['svm', '--kernel sigmoid --C 0.1'],
# ['svm', '--kernel sigmoid --C 0.5'],
# ['svm', '--kernel sigmoid --C 1.0'],
# ['svm', '--kernel sigmoid --C 1.5'],
# ['svm', '--kernel sigmoid --C 2.0'],
# ['svm', '--kernel rbf --C 2.6 --gamma 0.005'],
# ['svm', '--kernel rbf --C 2.6 --gamma 0.01'],
# ['svm', '--kernel rbf --C 2.6 --gamma 0.02'],
# ['svm', '--kernel rbf --C 2.6 --gamma 0.05'],
# ['forest', '--max-depth 10 --n-estimators 5'],
# ['forest', '--max-depth 10 --n-estimators 10'],
# ['forest', '--max-depth 10 --n-estimators 20'],
# ['forest', '--max-depth 10 --n-estimators 50'],
# ['forest', '--max-depth 10 --n-estimators 100'],
# ['forest', '--max-depth 10 --criterion entropy --n-estimators 5'],
# ['forest', '--max-depth 10 --criterion entropy --n-estimators 10'],
# ['forest', '--max-depth | |
<gh_stars>1-10
from sklearn.cluster import DBSCAN
import pandas as pd
from pandas.core.indexes.base import InvalidIndexError
import numpy as np
import logging
import os
from clubcpg.ParseBam import BamFileReadParser
from clubcpg.OutputComparisonResults import OutputIndividualMatrixData
from clubcpg.Imputation import Imputation
import datetime
from multiprocessing import Pool
import time
import tempfile
from sklearn.utils import shuffle
class ClusterReads:
"""
This class is used to take a dataframe or matrix of reads and cluster them
:Example:
>>> from clubcpg.ClusterReads import ClusterReads
>>> cluster = ClusterReads(bam_a="/path/to/file.bam", bam_b="/path/to/file.bam", bins_file="/path/to/file.csv", suffix="chr19")
>>> cluster.execute()
"""
def __init__(self, bam_a: str, bam_b=None, bin_size=100, bins_file=None, output_directory=None, num_processors=1,
cluster_member_min=4, read_depth_req=10, remove_noise=True, mbias_read1_5=None,
mbias_read1_3=None, mbias_read2_5=None, mbias_read2_3=None, suffix="", no_overlap=True, permute_labels=False):
self.bam_a = bam_a
self.bam_b = bam_b
self.bin_size = int(bin_size)
self.bins_file = bins_file
self.output_directory = output_directory
self.num_processors = num_processors
self.cluster_member_min = cluster_member_min
self.read_depth_req = read_depth_req
self.remove_noise = remove_noise
self.mbias_read1_5 = mbias_read1_5
self.mbias_read1_3 = mbias_read1_3
self.mbias_read2_5 = mbias_read2_5
self.mbias_read2_3 = mbias_read2_3
self.suffix = suffix
self.no_overlap = no_overlap
self.permute_labels = permute_labels
if bam_b:
self.single_file_mode = False
else:
self.single_file_mode = True
# Remove clusters with less than n members
def filter_data_frame(self, matrix: pd.DataFrame):
"""
Takes a dataframe of clusters and removes any groups with less than self.cluster_member_min members
:param matrix: dataframe of clustered reads
:type: pd.DataFrame
:return: input matrix with some clusters removed
"""
output = matrix.copy()
# duplicate indexes exist from concatenation, reset in the index to prevent dropping unintended rows
output.reset_index(drop=True, inplace=True)
for cluster in output['class'].unique():
df = output[output['class'] == cluster]
if len(df) < self.cluster_member_min:
indexes = df.index
output.drop(indexes, inplace=True)
return output
# Get only the matrices made up of reads from A OR B
@staticmethod
def get_unique_matrices(filtered_matrix):
unique_dfs = []
for label in filtered_matrix['class'].unique():
df = filtered_matrix[filtered_matrix['class'] == label]
if len(df['input'].unique()) == 1: # A or B
unique_dfs.append(df)
return unique_dfs
# Get matrices with reads made up of A AND B
@staticmethod
def get_common_matrices(filtered_matrix):
shared_dfs = []
for label in filtered_matrix['class'].unique():
df = filtered_matrix[filtered_matrix['class'] == label]
if len(df['input'].unique()) > 1: # A and B
shared_dfs.append(df)
return shared_dfs
# Get the means for all unique matrices
def get_unique_means(self, filtered_matrix):
output = []
for matrix in self.get_unique_matrices(filtered_matrix):
input_file = matrix['input'].unique()[0]
matrix_mean = np.array(matrix)[:, :-2].mean()
output.append((input_file, matrix_mean))
return output
# Get the means for all common matrices
def get_common_means(self, filtered_matrix):
output = []
for matrix in self.get_common_matrices(filtered_matrix):
matrix_mean = np.array(matrix)[:, :-2].mean()
output.append(matrix_mean)
return output
# Generate a string label for each bin
@staticmethod
def make_bin_label(chromosome, stop_loc):
return "_".join([chromosome, str(stop_loc)])
@staticmethod
def get_input_counts(df):
output = {}
for input_label in df['input'].unique():
cts = len(df[df['input'] == input_label])
output[input_label] = cts
return output
# Takes the output of process_bins() and converts it into list of lines of data for output
def generate_individual_matrix_data(self, filtered_matrix, chromosome, bin_loc):
"""
Take output of process_bins() and converts it into a list of lines of text data for output
:param filtered_matrix: dataframe returned by :meth:`.ClusterReads.filter_data_frame`
:param chromosome: chromosome as "Chr5"
:param bin_loc: location representing the bin given as the end coordinate, ie 590000
:type filtered_matrix: pd.DataFrame
:type chromosome: string
:type bin_loc: string
:return: comma separated lines extracted from the filtered matrix, containing chromosome and bin info
:rtype: list
"""
# Individual comparisons data
lines = []
unique_groups = self.get_unique_matrices(filtered_matrix)
common_groups = self.get_common_matrices(filtered_matrix)
bin_label = self.make_bin_label(chromosome, bin_loc)
for matrix in unique_groups:
cpg_matrix = np.array(matrix.drop(['class', 'input'], axis=1))
# get a semi-colon separated string of 1s and 0s representing the CpG pattern
cpg_pattern = ";".join([str(int(x)) for x in list(cpg_matrix[0])])
m_mean = cpg_matrix.mean()
num_cpgs = cpg_matrix.shape[1]
read_number = len(matrix)
input_label = matrix['input'].unique()[0]
class_label = matrix['class'].unique()[0]
input_counts = self.get_input_counts(matrix)
split_n_cpgs = ';'.join(["{}={}".format(x[0], x[1]) for x in input_counts.items()])
out_line = ",".join([bin_label, input_label, str(m_mean), str(class_label), str(read_number),
str(num_cpgs), cpg_pattern, split_n_cpgs])
lines.append(out_line)
for matrix in common_groups:
cpg_matrix = np.array(matrix.drop(['class', 'input'], axis=1))
# get a semi-colon separated string of 1s and 0s representing the CpG pattern
cpg_pattern = ";".join([str(int(x)) for x in list(cpg_matrix[0])])
m_mean = cpg_matrix.mean()
num_cpgs = cpg_matrix.shape[1]
read_number = len(matrix)
input_label = "".join(list(matrix['input'].unique()))
class_label = matrix['class'].unique()[0]
input_counts = self.get_input_counts(matrix)
split_n_cpgs = ';'.join(["{}={}".format(x[0], x[1]) for x in input_counts.items()])
out_line = ",".join([bin_label, input_label, str(m_mean), str(class_label), str(read_number),
str(num_cpgs), cpg_pattern, split_n_cpgs])
lines.append(out_line)
return lines
@staticmethod
def attempt_cpg_position_correction(reads, parser: BamFileReadParser):
"""
Take the reads and a parser object, attempted cpg position correction and return corrected reads
:param reads: parsed reads from BamFileReadParser
:param parser: an instance of the BamFileReadParser object
:return: reads with CpG positions corrected
"""
corrected_reads = parser.correct_cpg_positions(reads)
return corrected_reads
# MAIN METHOD
def process_bins(self, bin):
"""
This is the main method and should be called using Pool.map It takes one bin location and uses the other helper
functions to get the reads, form the matrix, cluster it with DBSCAN, and output the cluster data as text lines
ready to writing to a file.
:param bin: string in this format: "chr19_55555"
:return: a list of lines representing the cluster data from that bin
"""
try:
chromosome, bin_loc = bin.split("_")
except ValueError:
return None
bin_loc = int(bin_loc)
# Create bam parser and parse reads
bam_parser_A = BamFileReadParser(self.bam_a, 20, read1_5=self.mbias_read1_5, read1_3=self.mbias_read1_3,
read2_5=self.mbias_read2_5, read2_3=self.mbias_read2_3, no_overlap=self.no_overlap)
reads_A = bam_parser_A.parse_reads(chromosome, bin_loc - self.bin_size, bin_loc)
if not self.single_file_mode:
bam_parser_B = BamFileReadParser(self.bam_b, 20, read1_5=self.mbias_read1_5, read1_3=self.mbias_read1_3,
read2_5=self.mbias_read2_5, read2_3=self.mbias_read2_3, no_overlap=self.no_overlap)
reads_B = bam_parser_B.parse_reads(chromosome, bin_loc - self.bin_size, bin_loc)
# This try/catch block returns None for a bin if any discrepancies in the data format of the bins are detected.
# The Nones are filtered out during the output of the data
try:
# create matrix drop NA
# This matrix is actually a pandas dataframe
matrix_A = bam_parser_A.create_matrix(reads_A).dropna()
# Attempt to correct CpG Position if necessary
if len(matrix_A) == 0:
reads_A = self.attempt_cpg_position_correction(reads_A, bam_parser_A)
matrix_A = bam_parser_A.create_matrix(reads_A).dropna()
if not self.single_file_mode:
matrix_B = bam_parser_B.create_matrix(reads_B).dropna()
# attempt to correct CpG position in B if necessary
if len(matrix_B) == 0:
reads_B = self.attempt_cpg_position_correction(reads_B, bam_parser_B)
matrix_B = bam_parser_B.create_matrix(reads_B).dropna()
except ValueError as e:
logging.error("ValueError when creating matrix at bin {}. Stack trace will be below if log level=DEBUG".format(bin))
logging.debug(str(e))
return None
except InvalidIndexError as e:
logging.error("Invalid Index error when creating matrices at bin {}".format(bin))
logging.debug(str(e))
return None
# if read depths are still not a minimum, skip
if matrix_A.shape[0] < self.read_depth_req:
return None
if not self.single_file_mode:
if matrix_B.shape[0] < self.read_depth_req:
return None
# create labels and add to dataframe
# If two files label each A and B, otherwise use file_name as label
if not self.single_file_mode:
try:
labels_A = ['A'] * len(matrix_A)
matrix_A['input'] = labels_A
labels_B = ['B'] * len(matrix_B)
matrix_B['input'] = labels_B
except TypeError:
logging.debug("TypeError when adding labels at bin {}".format(bin))
return None
else:
labels_A = [os.path.basename(self.bam_a)] * len(matrix_A)
matrix_A['input'] = labels_A
if not self.single_file_mode:
try:
# ensure they have the same CpG positions
matrix_B.columns = matrix_A.columns
full_matrix = pd.concat([matrix_A, matrix_B], sort=False)
except ValueError as e:
logging.error("Matrix concat error in bin {}".format(bin))
# logging.debug(str(e))
return None
else:
full_matrix = matrix_A
if self.permute_labels:
# Randomly permute the input labels, has no effect in single file mode
full_matrix['input'] = shuffle(full_matrix['input'].values)
full_matrix = full_matrix.sort_values(by='input')
# Get data without labels for clustering
data_to_cluster = np.array(full_matrix)[:, :-1]
# Create DBSCAN classifier and cluster add cluster classes to df
clf = DBSCAN(min_samples=2)
try:
labels = clf.fit_predict(data_to_cluster)
except ValueError as e:
# log error
logging.error("ValueError when trying to cluster bin {}".format(bin))
logging.debug(str(e))
return None
full_matrix['class'] = labels
# Filter out any clusters with less than a minimum
filtered_matrix = self.filter_data_frame(full_matrix)
if self.remove_noise:
filtered_matrix = filtered_matrix[filtered_matrix['class'] != -1]
# return generate_output_data(filtered_matrix, chromosome, bin_loc)
return self.generate_individual_matrix_data(filtered_matrix, chromosome, bin_loc)
def execute(self, return_only=False):
"""
This method will start multiprocessing execution of this class.
:param return_only: Whether to return the results as a variabel (True) or write to file (False)
:type return_only: bool
:return: list of lists if :attribute: `return_only` False otherwise None
:rtype: list or None
"""
start_time = datetime.datetime.now().strftime("%y-%m-%d")
def track_progress(job, update_interval=60):
while job._number_left > 0:
logging.info("Tasks remaining = {0}".format(
job._number_left * job._chunksize
))
time.sleep(update_interval)
bins = []
with open(self.bins_file, "r") as f:
for line in f:
data = line.strip().split(",")
bins.append(data[0])
pool = Pool(processes=self.num_processors)
results = pool.map_async(self.process_bins, bins)
track_progress(results)
results = results.get()
if return_only:
return results
else:
output = OutputIndividualMatrixData(results)
output.write_to_output(self.output_directory, "Clustering.{}{}.{}".format(os.path.basename(self.bam_a), self.suffix, start_time))
class ClusterReadsWithImputation(ClusterReads):
"""
This class is used to perfom | |
return {'copy_file_path': fq_path}
@patch.object(DataFileUtil, "download_staging_file",
side_effect=mock_download_staging_file)
def test_upload_fail_bad_fastq_file_staging(self, download_staging_file):
self.fail_upload_reads(
{'sequencing_tech': 'tech',
'wsname': self.ws_info[1],
'fwd_staging_file_name': 'Sample1_invalid.fastq',
'name': 'bar'
},
'Invalid FASTQ file - Path: /kb/module/work/tmp/Sample1_invalid.fastq. ' +
'Input Staging : Sample1_invalid.fastq.')
@patch.object(DataFileUtil, "download_staging_file",
side_effect=mock_download_staging_file)
def test_upload_fail_invalid_paired_fastq_file_staging(self, download_staging_file):
self.fail_upload_reads_regex(
{'sequencing_tech': 'tech',
'wsname': self.ws_info[1],
'fwd_staging_file_name': 'Sample1_invalid.fastq',
'rev_staging_file_name': 'Sample1_invalid.fastq',
'name': 'bar'
},
'Invalid FASTQ file - Path: /kb/module/work/tmp/(.*).inter.fastq. ' +
'Input Staging files - FWD Staging file : Sample1_invalid.fastq, ' +
'REV Staging file : Sample1_invalid.fastq. ' +
'FWD Path : /kb/module/work/tmp/Sample1_invalid.fastq. ' +
'REV Path : /kb/module/work/tmp/Sample1_invalid.fastq.')
def test_upload_fail_bad_paired_end_reads_web(self):
url_prefix = 'https://anl.box.com/shared/static/'
self.fail_upload_reads_regex(
{'sequencing_tech': 'tech',
'wsname': self.ws_info[1],
'fwd_file_url': url_prefix + 'lph9l0ye6yqetnbk04cx33mqgrj4b85j.fq',
'rev_file_url': url_prefix + 'k0y8lkkpt1bxr04h6necwm7vewsvgm28.fastq',
'download_type': 'Direct Download',
'name': 'bar',
'interleaved': 0
},
'Interleave failed - reads files do not have ' +
'an equal number of records. ' +
'Forward Path /kb/module/work/tmp/(.*)/small.forward.fq, '
'Reverse Path /kb/module/work/tmp/(.*)/Sample5_noninterleaved.1.fastq.'
'Forward File URL https://anl.box.com/shared/static/' +
'lph9l0ye6yqetnbk04cx33mqgrj4b85j.fq, ' +
'Reverse File URL https://anl.box.com/shared/static/' +
'k0y8lkkpt1bxr04h6necwm7vewsvgm28.fastq.'
)
def test_upload_fail_bad_fastq_file_web(self):
self.fail_upload_reads_regex(
{'sequencing_tech': 'tech',
'wsname': self.ws_info[1],
'fwd_file_url': 'https://www.dropbox.com/s/0qndz66qopp5kyt/Sample1_invalid.fastq',
'download_type': 'DropBox',
'name': 'bar'
},
'Invalid FASTQ file - Path: /kb/module/work/tmp/(.*)/Sample1_invalid.fastq. ' +
'Input URL : https://www.dropbox.com/s/0qndz66qopp5kyt/Sample1_invalid.fastq.')
def test_upload_fail_bad_paired_fastq_file_web(self):
self.fail_upload_reads_regex(
{'sequencing_tech': 'tech',
'wsname': self.ws_info[1],
'fwd_file_url': 'https://www.dropbox.com/s/0qndz66qopp5kyt/Sample1_invalid.fastq',
'rev_file_url': 'https://www.dropbox.com/s/whw8ho6ipwv3gpl/Sample_rev.fq',
'download_type': 'DropBox',
'name': 'bar'
},
'Invalid FASTQ file - Path: /kb/module/work/tmp/(.*).inter.fastq. ' +
'Input URLs - ' +
'FWD URL : https://www.dropbox.com/s/0qndz66qopp5kyt/Sample1_invalid.fastq, ' +
'REV URL : https://www.dropbox.com/s/whw8ho6ipwv3gpl/Sample_rev.fq. ' +
'FWD Path : /kb/module/work/tmp/(.*)/Sample1_invalid.fastq. ' +
'REV Path : /kb/module/work/tmp/(.*)/Sample_rev.fq.')
def test_upload_fail_paired_bad_fastq_file(self):
print('*** upload_fail_bad_fastq_file***')
self.fail_upload_reads_regex(
{'sequencing_tech': 'tech',
'wsname': self.ws_info[1],
'fwd_file': 'data/Sample1_invalid.fastq',
'rev_file': 'data/Sample_rev.fq',
'name': 'bar'
},
'Invalid FASTQ file - Path: /kb/module/work/tmp/(.*).inter.fastq. ' +
'Input Files Paths - FWD Path : /kb/module/test/data/Sample1_invalid.fastq, ' +
'REV Path : /kb/module/test/data/Sample_rev.fq.')
def test_upload_fail_paired_bad_fastq(self):
print('*** upload_fail_bad_fastq ***')
ret1 = self.upload_file_to_shock('data/Sample1_invalid.fastq')
ret2 = self.upload_file_to_shock('data/Sample_rev.fq')
self.fail_upload_reads_regex(
{'sequencing_tech': 'tech',
'wsname': self.ws_info[1],
'fwd_id': ret1['shock_id'],
'rev_id': ret2['shock_id'],
'name': 'bar'
},
('Invalid FASTQ file - Path: /kb/module/work/tmp/(.*).inter.fastq. ' +
'Input Shock IDs - FWD Shock ID : {}, ' +
'REV Shock ID : {}. ' +
'FWD File Name : Sample1_invalid.fastq. ' +
'REV File Name : Sample_rev.fq. ' +
'FWD Path : /kb/module/work/tmp/fwd/Sample1_invalid.fastq. ' +
'REV Path : /kb/module/work/tmp/rev/Sample_rev.fq.').format(
ret1['shock_id'],
ret2['shock_id']))
self.delete_shock_node(ret1['shock_id'])
self.delete_shock_node(ret2['shock_id'])
def test_upload_fail_interleaved_for_single(self):
ret = self.upload_file_to_shock('data/Sample5_interleaved.fastq')
self.fail_upload_reads(
{'sequencing_tech': 'tech',
'wsname': self.ws_info[1],
'fwd_id': ret['shock_id'],
'name': 'bar'
},
'Invalid FASTQ file - Path: /kb/module/work/tmp/fwd/Sample5_interleaved.fastq. ' +
'Input Shock ID : ' + ret['shock_id'] +
'. File Name : Sample5_interleaved.fastq.')
self.delete_shock_node(ret['shock_id'])
def test_bad_paired_end_reads(self):
ret1 = self.upload_file_to_shock('data/small.forward.fq')
ret2 = self.upload_file_to_shock('data/Sample5_noninterleaved.1.fastq')
self.fail_upload_reads({'fwd_id': ret1['shock_id'],
'rev_id': ret2['shock_id'],
'sequencing_tech': 'seqtech-pr1',
'wsname': self.ws_info[1],
'name': 'pairedreads1',
'interleaved': 0},
'Interleave failed - reads files do not have ' +
'an equal number of records. forward Shock node ' +
ret1['shock_id'] +
', filename small.forward.fq, reverse Shock node ' +
ret2['shock_id'] +
', filename Sample5_noninterleaved.1.fastq',
do_startswith=True)
self.delete_shock_node(ret1['shock_id'])
self.delete_shock_node(ret2['shock_id'])
def test_missing_line_paired_end_reads(self):
ret1 = self.upload_file_to_shock(
'data/Sample5_noninterleaved.1.missing_line.fastq')
ret2 = self.upload_file_to_shock('data/Sample5_noninterleaved.1.fastq')
self.fail_upload_reads({'fwd_id': ret1['shock_id'],
'rev_id': ret2['shock_id'],
'sequencing_tech': 'seqtech-pr1',
'wsname': self.ws_info[1],
'name': 'pairedreads1',
'interleaved': 0},
'Reading FASTQ record failed - non-blank lines are not a ' +
'multiple of four. ' +
'Shock node ' + ret1['shock_id'] +
', Shock filename ' +
'Sample5_noninterleaved.1.missing_line.fastq')
self.delete_shock_node(ret1['shock_id'])
self.delete_shock_node(ret2['shock_id'])
def test_bad_paired_end_reads_file(self):
fwdtf = 'small.forward.fq'
revtf = 'Sample5_noninterleaved.1.fastq'
fwdtarget = os.path.join(self.scratch, fwdtf)
revtarget = os.path.join(self.scratch, revtf)
shutil.copy('data/' + fwdtf, fwdtarget)
shutil.copy('data/' + revtf, revtarget)
self.fail_upload_reads({'fwd_file': fwdtarget,
'rev_file': revtarget,
'sequencing_tech': 'seqtech-pr1',
'wsname': self.ws_info[1],
'name': 'pairedreads1',
'interleaved': 0},
'Interleave failed - reads files do not have ' +
'an equal number of records. Forward Path ' +
'/kb/module/work/tmp/small.forward.fq, ' +
'Reverse Path /kb/module/work/tmp/Sample5_noninterleaved.1.fastq.')
@patch.object(DataFileUtil, "download_staging_file",
side_effect=mock_download_staging_file)
def test_bad_paired_end_staging_reads_file(self, download_staging_file):
fwdtf = 'small.forward.fq'
revtf = 'Sample5_noninterleaved.1.fastq'
self.fail_upload_reads(
{'fwd_staging_file_name': fwdtf,
'rev_staging_file_name': revtf,
'sequencing_tech': 'seqtech-pr1',
'wsname': self.ws_info[1],
'name': 'pairedreads1',
'interleaved': 0},
'Interleave failed - reads files do not have ' +
'an equal number of records. Forward Path ' +
'/kb/module/work/tmp/small.forward.fq, ' +
'Reverse Path /kb/module/work/tmp/Sample5_noninterleaved.1.fastq.' +
'Forward Staging file name small.forward.fq, ' +
'Reverse Staging file name Sample5_noninterleaved.1.fastq.')
def test_missing_line_paired_end_reads_file(self):
fwdtf = 'Sample5_noninterleaved.1.missing_line.fastq'
revtf = 'Sample5_noninterleaved.1.fastq'
fwdtarget = os.path.join(self.scratch, fwdtf)
revtarget = os.path.join(self.scratch, revtf)
shutil.copy('data/' + fwdtf, fwdtarget)
shutil.copy('data/' + revtf, revtarget)
self.fail_upload_reads({'fwd_file': fwdtarget,
'rev_file': revtarget,
'sequencing_tech': 'seqtech-pr1',
'wsname': self.ws_info[1],
'name': 'pairedreads1',
'interleaved': 0},
'Reading FASTQ record failed - non-blank lines are not a ' +
'multiple of four.',
do_startswith=True
)
def test_missing_line_paired_end_reads_file_web(self):
fwd_file_url = 'https://www.dropbox.com/s/tgyutgfwn3qndxc/'
fwd_file_url += 'Sample5_noninterleaved.1.fastq?dl=0'
rev_file_url = 'https://www.dropbox.com/s/f8r3olh6hqpuzkh/'
rev_file_url += 'Sample5_noninterleaved.1.missing_line.fastq'
self.fail_upload_reads(
{'sequencing_tech': 'tech',
'wsname': self.ws_info[1],
'fwd_file_url': fwd_file_url,
'rev_file_url': rev_file_url,
'download_type': 'DropBox',
'name': 'bar'
},
'Reading FASTQ record failed - non-blank lines are not a ' +
'multiple of four. ' +
'File URL https://www.dropbox.com/s/f8r3olh6hqpuzkh/' +
'Sample5_noninterleaved.1.missing_line.fastq, ' +
'Shock node None, Shock filename None')
@patch.object(DataFileUtil, "download_staging_file",
side_effect=mock_download_staging_file)
def test_upload_fail_bad_paired_fastq_file_staging(self, download_staging_file):
self.fail_upload_reads(
{'sequencing_tech': 'tech',
'wsname': self.ws_info[1],
'fwd_staging_file_name': 'Sample5_noninterleaved.1.missing_line.fastq',
'rev_staging_file_name': 'Sample5_noninterleaved.1.missing_line.fastq',
'name': 'bar'
},
'Reading FASTQ record failed - non-blank lines are not a ' +
'multiple of four. ' +
'Staging file name Sample5_noninterleaved.1.missing_line.fastq, ' +
'Shock node None, Shock filename None')
# Download tests ########################################################
def test_download_one(self):
self.download_success(
{'frbasic': {
'md5': {'fwd': self.MD5_SM_F, 'rev': self.MD5_SM_R},
'fileext': {'fwd': 'fwd', 'rev': 'rev'},
'obj': dictmerge(
self.STD_OBJ_KBF_P,
{'files': {'type': 'paired',
'otype': 'paired',
'fwd_name': 'small.forward.fq',
'rev_name': 'small.reverse.fq'
},
'ref': self.staged['frbasic']['ref']
})
}
}
)
def test_download_with_no_handle_filename(self):
self.download_success(
{'no_filename': {
'md5': {'fwd': self.MD5_SM_F, 'rev': self.MD5_SM_R},
'fileext': {'fwd': 'fwd', 'rev': 'rev'},
'obj': dictmerge(
self.STD_OBJ_KBF_P,
{'files': {'type': 'paired',
'otype': 'paired',
'fwd_name': 'small.forward.fq',
'rev_name': 'small.reverse.fq'
},
'ref': self.staged['no_filename']['ref']
})
}
}
)
def test_multiple(self):
self.download_success(
{'frbasic': {
'md5': {'fwd': self.MD5_SM_F, 'rev': self.MD5_SM_R},
'fileext': {'fwd': 'fwd', 'rev': 'rev'},
'obj': dictmerge(
self.STD_OBJ_KBF_P,
{'files': {'type': 'paired',
'otype': 'paired',
'fwd_name': 'small.forward.fq',
'rev_name': 'small.reverse.fq'
},
'ref': self.staged['frbasic']['ref']
})
},
'intbasic': {
'md5': {'fwd': self.MD5_SM_I},
'fileext': {'fwd': 'inter'},
'obj': dictmerge(
self.STD_OBJ_KBF_P,
{'files': {'type': 'interleaved',
'otype': 'interleaved',
'fwd_name': 'interleaved.fq',
'rev_name': None,
'rev': None
},
'ref': self.staged['intbasic']['ref']
})
}
}
)
def test_single_end(self):
self.download_success(
{'single_end': {
'md5': {'fwd': self.MD5_SM_F},
'fileext': {'fwd': 'single'},
'obj': dictmerge(
self.STD_OBJ_KBF_S,
{'files': {'type': 'single',
'otype': 'single',
'fwd_name': 'small.forward.fq',
'rev_name': None,
'rev': None
},
'ref': self.staged['single_end']['ref']
})
},
'single_end_kbassy': {
'md5': {'fwd': self.MD5_SM_R},
'fileext': {'fwd': 'single'},
'obj': dictmerge(
self.STD_OBJ_KBA,
{'files': {'type': 'single',
'otype': 'single',
'fwd_name': 'small.reverse.fq',
'rev_name': None,
'rev': None
},
'ref': self.staged['single_end_kbassy']['ref']
})
},
'single_end_gz': {
'md5': {'fwd': self.MD5_SM_F},
'fileext': {'fwd': 'single'},
'obj': dictmerge(
self.STD_OBJ_KBF_S,
{'files': {'type': 'single',
'otype': 'single',
'fwd_name': 'small.forward.fq.gz',
'rev_name': None,
'rev': None
},
'ref': self.staged['single_end_gz']['ref']
})
},
'single_end_kbassy_gz': {
'md5': {'fwd': self.MD5_SM_R},
'fileext': {'fwd': 'single'},
'obj': dictmerge(
self.STD_OBJ_KBA,
{'files': {'type': 'single',
'otype': 'single',
'fwd_name': 'small.reverse.fq.gz',
'rev_name': None,
'rev': None
},
'ref': self.staged['single_end_kbassy_gz']['ref']
})
}
}
)
def test_paired(self):
self.download_success(
{'frbasic': {
'md5': {'fwd': self.MD5_SM_F, 'rev': self.MD5_SM_R},
'fileext': {'fwd': 'fwd', 'rev': 'rev'},
'obj': dictmerge(
self.STD_OBJ_KBF_P,
{'files': {'type': 'paired',
'otype': 'paired',
'fwd_name': 'small.forward.fq',
'rev_name': 'small.reverse.fq'
},
'ref': self.staged['frbasic']['ref']
})
},
'frbasic_kbassy': {
'md5': {'fwd': self.MD5_SM_F, 'rev': self.MD5_SM_R},
'fileext': {'fwd': 'fwd', 'rev': 'rev'},
'obj': dictmerge(
self.STD_OBJ_KBA,
{'files': {'type': 'paired',
'otype': 'paired',
'fwd_name': 'small.forward.fq',
'rev_name': 'small.reverse.fq'
},
'ref': self.staged['frbasic_kbassy']['ref']
})
},
'frbasic_gz': {
'md5': {'fwd': self.MD5_SM_F, 'rev': self.MD5_SM_R},
'fileext': {'fwd': 'fwd', 'rev': 'rev'},
'obj': dictmerge(
self.STD_OBJ_KBF_P,
{'files': {'type': 'paired',
'otype': 'paired',
'fwd_name': 'small.forward.fq.gz',
'rev_name': 'small.reverse.fq'
},
'ref': self.staged['frbasic_gz']['ref']
})
},
'frbasic_kbassy_gz': {
'md5': {'fwd': self.MD5_SM_F, 'rev': self.MD5_SM_R},
'fileext': {'fwd': 'fwd', 'rev': 'rev'},
'obj': dictmerge(
self.STD_OBJ_KBA,
{'files': {'type': 'paired',
'otype': 'paired',
'fwd_name': 'small.forward.fq',
'rev_name': 'small.reverse.fq.gz'
},
'ref': self.staged['frbasic_kbassy_gz']['ref']
})
}
}
)
def test_interleaved(self):
self.download_success(
{'intbasic': {
'md5': {'fwd': self.MD5_SM_I},
'fileext': {'fwd': 'inter'},
'obj': dictmerge(
self.STD_OBJ_KBF_P,
{'files': {'type': 'interleaved',
'otype': 'interleaved',
'fwd_name': 'interleaved.fq',
'rev_name': None,
'rev': None
},
'ref': self.staged['intbasic']['ref']
})
},
'intbasic_kbassy': {
'md5': {'fwd': self.MD5_SM_I},
'fileext': {'fwd': 'inter'},
'obj': dictmerge(
self.STD_OBJ_KBA,
{'files': {'type': 'interleaved',
'otype': 'interleaved',
'fwd_name': 'interleaved.fq',
'rev_name': None,
'rev': None
},
'ref': self.staged['intbasic_kbassy']['ref']
})
},
'intbasic_gz': {
'md5': {'fwd': self.MD5_SM_I},
'fileext': {'fwd': 'inter'},
'obj': dictmerge(
self.STD_OBJ_KBF_P,
{'files': {'type': 'interleaved',
'otype': 'interleaved',
'fwd_name': 'interleaved.fq.gz',
'rev_name': None,
'rev': None
},
'ref': self.staged['intbasic_gz']['ref']
})
},
'intbasic_kbassy_gz': {
'md5': {'fwd': self.MD5_SM_I},
'fileext': {'fwd': 'inter'},
'obj': dictmerge(
self.STD_OBJ_KBA,
{'files': {'type': 'interleaved',
'otype': 'interleaved',
'fwd_name': 'interleaved.fq.gz',
'rev_name': None,
'rev': None
},
'ref': self.staged['intbasic_kbassy_gz']['ref']
})
}
}, interleave='none'
)
# test some compressed, some uncompressed
def test_fr_to_interleave(self):
fn = 'Sample5_noninterleaved.1.blank_lines.fastq'
self.download_success(
{'frbasic': {
'md5': {'fwd': self.MD5_FR_TO_I},
'fileext': {'fwd': 'inter'},
'obj': dictmerge(
self.STD_OBJ_KBF_P,
{'files': {'type': 'interleaved',
'otype': 'paired',
| |
0,
"iopsvalue": "(0/101)",
"throughputvalue": "(0/404)",
"iops": "101",
"iopscontrol": "true",
"throughput": "404",
"tpcontrol": "true",
"blocksize": "4k",
"latency": "15",
"graceallowed": true,
"type": "1",
"revisionnumber": 2,
"managedstate": "Available",
"configurationstate": "sync",
"status": "Online",
"standardproviops": 0,
"operatingblocksize": 0,
"operatingcachehit": 0,
"operatingiops": 0,
"standardoperatingiops": 0
}]
}}"""
# A fake list iSCSI auth user response of cloudbyte's elasticenter
FAKE_LIST_ISCSI_AUTH_USER_RESPONSE = """{ "listiSCSIAuthUsersResponse" : {
"count":1 ,
"authuser" : [{
"id": "53d00164-a974-31b8-a854-bd346a8ea937",
"accountid": "12d41531-c41a-4ab7-abe2-ce0db2570119",
"authgroupid": "537744eb-c594-3145-85c0-9<PASSWORD>",
"chapusername": "fakeauthgroupchapuser",
"chappassword": "<PASSWORD>",
"mutualchapusername": "fakeauthgroupmutualchapuser",
"mutualchappassword": "<PASSWORD>"
}]
}}"""
# A fake list iSCSI auth group response of cloudbyte's elasticenter
FAKE_LIST_ISCSI_AUTH_GROUP_RESPONSE = """{ "listiSCSIAuthGroupResponse" : {
"count":2 ,
"authgroup" : [{
"id": "32d935ee-a60f-3681-b792-d8ccfe7e8e7f",
"name": "None",
"comment": "None"
}, {
"id": "537744eb-c594-3145-85c0-96079922b894",
"name": "fakeauthgroup",
"comment": "Fake Auth Group For Openstack "
}]
}}"""
# This dict maps the http commands of elasticenter
# with its respective fake responses
MAP_COMMAND_TO_FAKE_RESPONSE = {}
MAP_COMMAND_TO_FAKE_RESPONSE['deleteFileSystem'] = (
json.loads(FAKE_DELETE_FILE_SYSTEM_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["listFileSystem"] = (
json.loads(FAKE_LIST_FILE_SYSTEM_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["deleteSnapshot"] = (
json.loads(FAKE_DELETE_STORAGE_SNAPSHOT_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["updateVolumeiSCSIService"] = (
json.loads(FAKE_UPDATE_VOLUME_ISCSI_SERVICE_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["createStorageSnapshot"] = (
json.loads(FAKE_CREATE_STORAGE_SNAPSHOT_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["listAccount"] = (
json.loads(FAKE_LIST_ACCOUNT_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["listTsm"] = (
json.loads(FAKE_LIST_TSM_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["addQosGroup"] = (
json.loads(FAKE_ADD_QOS_GROUP_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["queryAsyncJobResult"] = (
json.loads(FAKE_QUERY_ASYNC_JOB_RESULT_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["createVolume"] = (
json.loads(FAKE_CREATE_VOLUME_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["listVolumeiSCSIService"] = (
json.loads(FAKE_LIST_VOLUME_ISCSI_SERVICE_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE["listiSCSIInitiator"] = (
json.loads(FAKE_LIST_ISCSI_INITIATOR_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE['cloneDatasetSnapshot'] = (
json.loads(FAKE_CLONE_DATASET_SNAPSHOT_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE['updateFileSystem'] = (
json.loads(FAKE_UPDATE_FILE_SYSTEM_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE['updateQosGroup'] = (
json.loads(FAKE_UPDATE_QOS_GROUP_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE['listStorageSnapshots'] = (
json.loads(FAKE_LIST_STORAGE_SNAPSHOTS_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE['listiSCSIAuthUser'] = (
json.loads(FAKE_LIST_ISCSI_AUTH_USER_RESPONSE))
MAP_COMMAND_TO_FAKE_RESPONSE['listiSCSIAuthGroup'] = (
json.loads(FAKE_LIST_ISCSI_AUTH_GROUP_RESPONSE))
class CloudByteISCSIDriverTestCase(testtools.TestCase):
def setUp(self):
super(CloudByteISCSIDriverTestCase, self).setUp()
self._configure_driver()
def _configure_driver(self):
configuration = conf.Configuration(None, None)
# initialize the elasticenter iscsi driver
self.driver = cloudbyte.CloudByteISCSIDriver(
configuration=configuration)
# override some parts of driver configuration
self.driver.configuration.cb_tsm_name = 'openstack'
self.driver.configuration.cb_account_name = 'CustomerA'
self.driver.configuration.cb_auth_group = 'fakeauthgroup'
self.driver.configuration.cb_apikey = '<KEY>'
self.driver.configuration.san_ip = '172.16.51.30'
def _side_effect_api_req(self, cmd, params, version='1.0'):
"""This is a side effect function.
The return value is determined based on cmd argument.
The signature matches exactly with the method it tries
to mock.
"""
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_api_req_to_create_vol(self, cmd, params, version='1.0'):
"""This is a side effect function."""
if cmd == 'createVolume':
return {}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_api_req_to_delete_file_system(
self, cmd, params, version='1.0'):
"""This is a side effect function."""
if cmd == 'deleteFileSystem':
return {}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_api_req_to_query_asyncjob_response(
self, cmd, params, version='1.0'):
"""This is a side effect function."""
if cmd == 'queryAsyncJobResult':
return {}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_api_req_to_query_asyncjob(
self, cmd, params, version='1.0'):
"""This is a side effect function."""
if cmd == 'queryAsyncJobResult':
return {'queryasyncjobresultresponse': {'jobstatus': 0}}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_api_req_to_list_tsm(self, cmd, params, version='1.0'):
"""This is a side effect function."""
if cmd == 'listTsm':
return {}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _none_response_to_list_tsm(self, cmd, params, version='1.0'):
"""This is a side effect function."""
if cmd == 'listTsm':
return {"listTsmResponse": {}}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_api_req_to_list_iscsi_auth_group(self, cmd, params,
version='1.0'):
"""This is a side effect function."""
if cmd == 'listiSCSIAuthGroup':
return {}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_api_req_to_list_iscsi_auth_user(self, cmd, params,
version='1.0'):
"""This is a side effect function."""
if cmd == 'listiSCSIAuthUser':
return {}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_enable_chap(self):
"""This is a side effect function."""
self.driver.cb_use_chap = True
def _side_effect_disable_chap(self):
"""This is a side effect function."""
self.driver.cb_use_chap = False
def _side_effect_api_req_to_list_filesystem(
self, cmd, params, version='1.0'):
"""This is a side effect function."""
if cmd == 'listFileSystem':
return {}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_api_req_to_list_vol_iscsi_service(
self, cmd, params, version='1.0'):
"""This is a side effect function."""
if cmd == 'listVolumeiSCSIService':
return {}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_api_req_to_list_iscsi_initiator(
self, cmd, params, version='1.0'):
"""This is a side effect function."""
if cmd == 'listiSCSIInitiator':
return {}
return MAP_COMMAND_TO_FAKE_RESPONSE[cmd]
def _side_effect_create_vol_from_snap(self, cloned_volume, snapshot):
"""This is a side effect function."""
return {}
def _side_effect_create_snapshot(self, snapshot):
"""This is a side effect function."""
model_update = {}
model_update['provider_id'] = "devpool1/acc1openstacktsm/DS1@DS1Snap1"
return model_update
def _side_effect_get_connection(self, host, url):
"""This is a side effect function."""
return_obj = {}
return_obj['http_status'] = 200
# mock the response data
return_obj['data'] = MAP_COMMAND_TO_FAKE_RESPONSE['listTsm']
return_obj['error'] = None
return return_obj
def _side_effect_get_err_connection(self, host, url):
"""This is a side effect function."""
return_obj = {}
return_obj['http_status'] = 500
# mock the response data
return_obj['data'] = None
return_obj['error'] = "Http status: 500, Error: Elasticenter "
"is not available."
return return_obj
def _side_effect_get_err_connection2(self, host, url):
"""This is a side effect function."""
msg = ("Error executing CloudByte API %(cmd)s , Error: %(err)s" %
{'cmd': 'MockTest', 'err': 'Error'})
raise exception.VolumeBackendAPIException(msg)
def _get_fake_volume_id(self):
# Get the filesystems
fs_list = MAP_COMMAND_TO_FAKE_RESPONSE['listFileSystem']
filesystems = fs_list['listFilesystemResponse']['filesystem']
# Get the volume id from the first filesystem
volume_id = filesystems[0]['id']
return volume_id
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_execute_and_get_response_details')
def test_api_request_for_cloudbyte(self, mock_conn):
# Test - I
# configure the mocks with respective side-effects
mock_conn.side_effect = self._side_effect_get_connection
# run the test
data = self.driver._api_request_for_cloudbyte('listTsm', {})
# assert the data attributes
self.assertEqual(1, data['listTsmResponse']['count'])
# Test - II
# configure the mocks with side-effects
mock_conn.reset_mock()
mock_conn.side_effect = self._side_effect_get_err_connection
# run the test
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
'Bad or unexpected response from the storage volume '
'backend API: Failed to execute CloudByte API'):
self.driver._api_request_for_cloudbyte('listTsm', {})
# Test - III
# configure the mocks with side-effects
mock_conn.reset_mock()
mock_conn.side_effect = self._side_effect_get_err_connection2
# run the test
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
'Error executing CloudByte API'):
self.driver._api_request_for_cloudbyte('listTsm', {})
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_delete_volume(self, mock_api_req):
# prepare the dependencies
fake_volume_id = self._get_fake_volume_id()
volume = {'id': fake_volume_id, 'provider_id': fake_volume_id}
# Test-I
mock_api_req.side_effect = self._side_effect_api_req
# run the test
self.driver.delete_volume(volume)
# assert that 7 api calls were invoked
self.assertEqual(7, mock_api_req.call_count)
# Test-II
# reset & re-configure mock
volume['provider_id'] = None
mock_api_req.reset_mock()
mock_api_req.side_effect = self._side_effect_api_req
# run the test
self.driver.delete_volume(volume)
# assert that no api calls were invoked
self.assertEqual(0, mock_api_req.call_count)
# Test-III
# re-configure the dependencies
volume['provider_id'] = fake_volume_id
# reset & re-configure mock
mock_api_req.reset_mock()
# configure or re-configure the mocks
mock_api_req.side_effect = (
self._side_effect_api_req_to_delete_file_system)
# Now run the test & assert the exception
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume,
volume)
# assert that 6 api calls were invoked
self.assertEqual(6, mock_api_req.call_count)
# Test - IV
# reset the mocks
mock_api_req.reset_mock()
# configure or re-configure the mocks
mock_api_req.side_effect = (
self._side_effect_api_req_to_query_asyncjob_response)
# Now run the test & assert the exception
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume,
volume)
# assert that 7 api calls were invoked
self.assertEqual(7, mock_api_req.call_count)
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_delete_snapshot(self, mock_api_req):
snapshot = {
'id': 'SomeID',
'provider_id': 'devpool1/acc1openstacktsm/DS1@DS1Snap1',
'display_name': 'DS1Snap1',
'volume_id': 'SomeVol',
'volume': {
'display_name': 'DS1'
}
}
# Test - I
# now run the test
self.driver.delete_snapshot(snapshot)
# assert that 1 api call was invoked
self.assertEqual(1, mock_api_req.call_count)
# Test - II
# reconfigure the dependencies
snapshot['provider_id'] = None
# reset & reconfigure the mock
mock_api_req.reset_mock()
mock_api_req.side_effect = self._side_effect_api_req
# now run the test
self.driver.delete_snapshot(snapshot)
# assert that no api calls were invoked
self.assertEqual(0, mock_api_req.call_count)
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_create_snapshot(self, mock_api_req):
# prepare the dependencies
fake_volume_id = self._get_fake_volume_id()
snapshot = {
'id': 'c60890b1-f236-46f2-9e6d-51e6e592cee6',
'display_name': 'DS1Snap1',
'volume_id': 'SomeVol',
'volume': {
'display_name': 'DS1',
'provider_id': fake_volume_id
}
}
# Test - I
# configure the mocks with respective side-effects
mock_api_req.side_effect = self._side_effect_api_req
# now run the test
model_update = self.driver.create_snapshot(snapshot)
# assert that 2 api calls were invoked
self.assertEqual(2, mock_api_req.call_count)
self.assertEqual('DS1@snap_c60890b1f23646f29e6d51e6e592cee6',
model_update['provider_id'])
# Test - II
# reconfigure the dependencies
snapshot['volume']['provider_id'] = None
# reset & reconfigure the mock
mock_api_req.reset_mock()
mock_api_req.side_effect = self._side_effect_api_req
# now run the test & assert the exception
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
'Bad or unexpected response from the storage volume '
'backend API: Failed to create snapshot'):
self.driver.create_snapshot(snapshot)
# assert that no api calls were invoked
self.assertEqual(0, mock_api_req.call_count)
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_create_volume(self, mock_api_req):
# prepare the dependencies
fake_volume_id = self._get_fake_volume_id()
volume = {
'id': fake_volume_id,
'size': 22
}
# Test - I
# enable CHAP
self._side_effect_enable_chap()
# configure the mocks with respective side-effects
mock_api_req.side_effect = self._side_effect_api_req
# now run the test
provider_details = self.driver.create_volume(volume)
# assert equality checks for certain configuration attributes
self.assertEqual(
'openstack', self.driver.configuration.cb_tsm_name)
self.assertEqual(
'CustomerA', self.driver.configuration.cb_account_name)
self.assertEqual(
'fakeauthgroup', self.driver.configuration.cb_auth_group)
# assert the result
self.assertEqual(
'CHAP fakeauthgroupchapuser fakeauthgroupchapsecret',
provider_details['provider_auth'])
self.assertThat(
provider_details['provider_location'],
matchers.Contains('172.16.50.35:3260'))
# assert the invoked api calls to CloudByte Storage
self.assertEqual(11, mock_api_req.call_count)
# Test - II
# reset the mock
mock_api_req.reset_mock()
# disable CHAP
self._side_effect_disable_chap()
# configure the mocks with respective side-effects
mock_api_req.side_effect = self._side_effect_api_req
# now run the test
provider_details = self.driver.create_volume(volume)
# assert equality checks for certain configuration attributes
self.assertEqual(
'openstack', self.driver.configuration.cb_tsm_name)
self.assertEqual(
'CustomerA', self.driver.configuration.cb_account_name)
# assert the result
self.assertEqual(
None,
provider_details['provider_auth'])
self.assertThat(
provider_details['provider_location'],
matchers.Contains('172.16.50.35:3260'))
# assert the invoked api calls to CloudByte Storage
self.assertEqual(9, mock_api_req.call_count)
# Test - III
# reconfigure the dependencies
volume['id'] = 'NotExists'
del volume['size']
# reset & reconfigure the mock
| |
not sum(1 for tid in scc if not in_flight[tid].dcommit):
errlog('\n!!! Database wedged !!!')
errlog('Transactions: %s\n!!! !!!\n', ' '.join(map(str, sorted(scc))))
raise DBWedged
tid_watched = set(tid_watch or [0])
def tx_create(pid):
if not (pid % 300):
fail_if_wedged()
if not tid_watched:
yield from sys_exit(0)
in_flight[pid] = t = Transaction(pid)
t.begin = yield from sys_now()
tracker.on_start(pid)
def finish(pid):
then,now = t.begin, (yield from sys_now())
histo_add(resp_histo, then-now)
def i_commit(t):
Q = q and t.tid not in tid_watch
Q or e('\tI-commit %d', t.tid)
assert not t.deps
assert not t.war
t.icommit = get_next_stamp()
# clear dependencies
commits = set()
for rid,v in t.footprint.items():
R = Q and rid not in rid_watch
if v.t is t:
# I created this version; delete its predecessor
R or e('\tFinalize version %d of rid=%d', t.tid, rid)
assert v.prev.clobber is t
if v.prev.r:
Q or e('\t--> unlinking previous version pid=%d with readers: %s',
v.prev.t.tid, ' '.join(map(str, (x.tid for x in v.prev.r))))
#bad assertion: v.prev.clobber can D-commit with readers
#assert not v.prev.r
#v.prev = None
#v.t = zerotx
x = v.clobber
if x and x.deps:
X = Q and x.tid not in tid_watch
X or e('\tpid %d I-commit no longer blocked on %d', x.tid, t.tid)
x.deps.discard(t)
if not x.deps and not x.war and x.dcommit and not x.icommit:
assert not x.icommit
assert x not in commits
commits.add(x)
X or e('\t=> remaining deps={%s} war={%s}',
' '.join(map(str, (d.tid for d in (x.deps or ())))),
' '.join(map(str, (d.tid for d in x.war))))
for x in v.r:
X = Q and x.tid not in tid_watch
# if x accessed a version I created, I ended up in
# x.deps. If x then committed, I also ended up in
# x.war; I may also be in x.war due to it
# clobbering some version I read, but there's no
# harm removing myself now (the read will be
# removed soon enough).
X or e('\tpid %d I-commit no longer blocked on %d', x.tid, t.tid)
if x.deps:
x.deps.discard(t)
if not x.deps and not x.war and x.dcommit and not x.icommit:
assert not x.icommit
assert x not in commits
commits.add(x)
X or e('\t=> remaining deps={%s} war={%s}',
' '.join(map(str, (d.tid for d in (x.deps or ())))),
' '.join(map(str, (d.tid for d in x.war))))
else:
# remove myself from the version's read set
Q or e('\tRemove %d from read set of rid=%d', t.tid, rid)
v.r.remove(t)
x = v.clobber
if x and x.war:
X = Q and x.tid not in tid_watch
# my read no longer prevents x from I-committing its clobber of v
x.war.discard(t)
X or e('\tpid %d I-commit no longer blocked on WAR %d', x.tid, t.tid)
X or e('\t=> remaining WAR deps: %s',
' '.join(map(str, (d.tid for d in x.war))))
if not x.war and not x.deps and x.dcommit and not x.icommit:
assert not x.icommit
#bad assertion: new versions could arrive after I D-commit
#assert v is db[rid].prev
assert x not in commits
commits.add(x)
X or e('\trid=%s pid=%d dcommit=%s WAR={%s} deps={%s}',
rid, x.tid, x.dcommit,
' '.join(map(str, (d.tid for d in x.war))),
' '.join(map(str, (d.tid for d in x.deps))))
del in_flight[t.tid]
for x in commits:
i_commit(x)
if t.durable:
tracker.on_finalize(t.tid)
def check_for_cycles(pid, name, verbose=False):
deps = collections.defaultdict(dict)
for x in in_flight.values():
for rid,v in x.footprint.items():
if v.t is x:
deps[x.tid].setdefault(v.prev.t.tid, ('ww', rid))
for d in v.prev.r:
deps[x.tid].setdefault(d.tid, ('rw', rid))
else:
deps[x.tid].setdefault(v.t.tid, ('wr', rid))
dstring = lambda dcommit: dcommit and ('@%s' % dcommit) or ''
scc = tarjan_incycle(deps, pid)
if scc:
scc = set(scc)
if verbose and scc:
errlog('\t=> %s dependency cycle contains:', name)
edges = ['%s%s %s(%s) %s%s' % (tid,dstring(in_flight[tid].dcommit),
dtype,rid,dep,
dstring(in_flight[dep].dcommit))
for tid in scc
for dep,(dtype,rid) in deps[tid].items()
if dep in scc]
errlog('\t\t%s' % '\n\t\t'.join(sorted(edges)))
return scc
def tx_commit(pid):
Q = q and pid not in tid_watch
t = in_flight[pid]
yield from sys_busy(random.randint(ONE_TICK, 2*ONE_TICK), color='yellow')
Q or e('Commit %s', pid)
tid_watched.discard(pid)
# /// BEGIN CRITICAL SECTION ///
# check for Dangerous Skew
'''Dangerous Skew arises when transaction Ti imposes a WAR dependency
on Tj, and Tj committed before at least one of Ti's
dependencies.
'''
# construct WAR set (as of D-commit); we'll install it at all
# clobbered versions after the commit succeeds.
assert not t.war
for v in t.footprint.values():
if v.t is t:
if v.prev.clobber:
# raced with another writer and lost
raise UncommittedClobber
if not v.prev.t.icommit:
# still not I-committed
t.deps.add(v.prev.t)
t.war |= v.prev.r
else:
assert t in v.r
if not v.t.icommit:
t.deps.add(v.t)
# check for Dangerous Structures
if danger_check >= 1 and t.war:
q or e('\t%d WAR dependencies --> check for Dangerous Structures', len(t.war))
# T2 in some Dangerous Structure with T3 already committed?
t1 = next(iter(t.war))
for v in t.footprint.values():
t3 = v.clobber
if t3 and t3.dcommit:
q or e('\tAbort: T2 in a Dangerous Structure: %s --RW--> %s --RW--> %s',
t1.tid, pid, t3.tid)
raise DangerousStruct2
# T3 in some Dangerous Structure with T2 already committed?
for t2 in t.war:
if t2.war and t2.dcommit:
t1 = next(iter(t2.war))
q or e('\tAbort: T3 in a Dangerous Structure: %s --RW--> %s --RW--> %s',
t1.tid, t2.tid, pid)
raise DangerousStruct3
# install clobbered versions
for rid,v in t.clobbers.items():
v.prev.clobber = t
db[rid] = v
t.dcommit = get_next_stamp() # writes now visible to non-dependents
# /// END CRITICAL SECTION ///
# save space: clobbered versions no longer need read set
#for rid,v in t.clobbers.items():
# v.prev.r = None
for x in t.war:
assert not x.icommit
assert x.tid in in_flight
for x in t.deps:
assert not x.icommit
assert x.tid in in_flight
if not t.war and not t.deps:
i_commit(t)
else:
Q or e('\tI-commit blocked on war={%s} deps={%s}',
' '.join(map(str, (d.tid for d in t.war))),
' '.join(map(str, (d.tid for d in t.deps))))
tracker.on_finish(pid, True)
if ww_blocks:
for rid in t.clobbers:
c,w = clobbering.pop(rid)
assert c is t
if w:
X = Q and w.tid not in tid_watch
X or e('\tUnblocking pid=%s from rid=%s', w.tid, rid)
clobbering[rid] = (w,None)
w.blocked = False
yield from sys_unpark(w.tid)
else:
Q or e('\tNobody waiting on rid=%d', rid)
yield from sys_sleep(random.randint(5*ONE_TICK, 10*ONE_TICK))
yield from sys_busy(random.randint(ONE_TICK, 2*ONE_TICK), color='orange')
t.durable = True
if t.icommit:
tracker.on_finalize(pid)
def tx_abort(pid):
Q = q and pid not in tid_watch
t = in_flight[pid]
Q or e('Abort %d', pid)
commits = set()
for rid,v in t.footprint.items():
R = Q and rid not in rid_watch
if v.t is t:
# I created this version, delete it
R or e('\tRoll back update of rid=%d', rid)
# nobody else can see this version
assert not v.clobber and not v.r
assert t not in v.r
continue
if v.r:
R or e('\tRemove %d from read set of rid=%d', t.tid, rid)
v.r.remove(t)
else:
q or e('\tUh-oh! rid=%d was neither read nor written by me (v.r=%s)', rid, v.r)
assert not 'reachable'
x = v.clobber
if x and x.war:
X = Q and x.tid not in tid_watch
x.war.discard(t)
X or e('\tpid %d I-commit no longer blocked on WAR %d', x.tid, pid)
X or e('\t=> remaining WAR deps: %s',
' '.join(map(str, (d.tid for d in x.war))))
if not x.war and not x.deps and x.dcommit and not x.icommit:
assert not x.icommit
assert x not in commits
commits.add(x)
else:
q or e('\trid=%s still has readers waiting to I-commit: %s',
rid, ' '.join(map(str, (d.tid for d in x.war))))
elif x:
q or e('\tskipping pid=%d with empty WAR', x.tid)
t.dcommit = t.icommit = t.durable = get_next_stamp()
del in_flight[t.tid]
for x in commits:
i_commit(x)
if ww_blocks:
if t.last_write is not None:
t.clobbers[t.last_write] = None
for rid in t.clobbers:
c,w = clobbering.pop(rid, no_clobber)
if c is t:
if w:
X = Q and w.tid not in tid_watch
X or e('\tUnblocking pid=%s', w.tid)
clobbering[rid] = (w,None)
w.blocked = False
yield from sys_unpark(w.tid)
else:
Q or e('\tReleasing clobber slot on rid=%d', rid)
else:
# happens if we aborted due to | |
32.926, 15.639, 34.545,
VERTEX, 32.941, 15.667, 34.572,
VERTEX, 33.067, 15.780, 34.542,
VERTEX, 33.193, 15.892, 34.513,
VERTEX, 33.319, 16.005, 34.485,
VERTEX, 33.387, 16.055, 34.446,
VERTEX, 33.535, 16.164, 34.363,
VERTEX, 33.683, 16.273, 34.280,
VERTEX, 33.831, 16.382, 34.199,
VERTEX, 33.979, 16.492, 34.119,
VERTEX, 34.127, 16.603, 34.040,
VERTEX, 34.275, 16.714, 33.962,
VERTEX, 34.310, 16.740, 33.944,
VERTEX, 34.440, 16.800, 33.797,
VERTEX, 34.572, 16.862, 33.652,
VERTEX, 34.705, 16.926, 33.508,
VERTEX, 34.838, 16.991, 33.366,
VERTEX, 34.973, 17.058, 33.225,
VERTEX, 35.108, 17.127, 33.086,
VERTEX, 35.245, 17.196, 32.947,
VERTEX, 35.381, 17.267, 32.810,
VERTEX, 35.519, 17.339, 32.673,
VERTEX, 35.656, 17.411, 32.537,
VERTEX, 35.636, 17.302, 32.357,
VERTEX, 35.600, 17.177, 32.174,
VERTEX, 35.550, 17.043, 32.001,
VERTEX, 35.508, 16.948, 31.895,
END,
BEGIN, LINE_LOOP,
VERTEX, 34.367, 14.074, 31.104,
VERTEX, 34.207, 14.100, 31.233,
VERTEX, 34.047, 14.125, 31.361,
VERTEX, 33.887, 14.150, 31.489,
VERTEX, 33.727, 14.175, 31.617,
VERTEX, 33.566, 14.199, 31.744,
VERTEX, 33.405, 14.223, 31.872,
VERTEX, 33.245, 14.246, 32.000,
VERTEX, 33.084, 14.269, 32.127,
VERTEX, 32.922, 14.291, 32.254,
VERTEX, 32.761, 14.313, 32.382,
VERTEX, 32.622, 14.331, 32.491,
VERTEX, 32.767, 14.462, 32.460,
VERTEX, 32.911, 14.593, 32.430,
VERTEX, 33.055, 14.724, 32.400,
VERTEX, 33.200, 14.854, 32.370,
VERTEX, 33.344, 14.985, 32.340,
VERTEX, 33.488, 15.116, 32.310,
VERTEX, 33.633, 15.246, 32.280,
VERTEX, 33.777, 15.377, 32.250,
VERTEX, 33.921, 15.508, 32.220,
VERTEX, 34.065, 15.639, 32.190,
VERTEX, 34.210, 15.770, 32.160,
VERTEX, 34.354, 15.901, 32.131,
VERTEX, 34.498, 16.032, 32.101,
VERTEX, 34.643, 16.162, 32.071,
VERTEX, 34.787, 16.293, 32.042,
VERTEX, 34.931, 16.424, 32.012,
VERTEX, 35.075, 16.555, 31.983,
VERTEX, 35.219, 16.686, 31.953,
VERTEX, 35.364, 16.817, 31.924,
VERTEX, 35.508, 16.948, 31.895,
VERTEX, 35.544, 16.883, 31.833,
VERTEX, 35.604, 16.738, 31.709,
VERTEX, 35.650, 16.584, 31.591,
VERTEX, 35.680, 16.421, 31.478,
VERTEX, 35.695, 16.252, 31.373,
VERTEX, 35.694, 16.077, 31.276,
VERTEX, 35.678, 15.898, 31.188,
VERTEX, 35.646, 15.717, 31.110,
VERTEX, 35.600, 15.535, 31.041,
VERTEX, 35.538, 15.354, 30.984,
VERTEX, 35.463, 15.174, 30.938,
VERTEX, 35.374, 14.999, 30.904,
VERTEX, 35.271, 14.828, 30.881,
VERTEX, 35.157, 14.664, 30.871,
VERTEX, 35.032, 14.508, 30.873,
VERTEX, 34.897, 14.362, 30.887,
VERTEX, 34.754, 14.225, 30.914,
VERTEX, 34.602, 14.101, 30.952,
VERTEX, 34.527, 14.047, 30.976,
END,
BEGIN, LINE_LOOP,
VERTEX, 32.589, 14.318, 32.492,
VERTEX, 32.595, 14.329, 32.542,
VERTEX, 32.608, 14.330, 32.517,
VERTEX, 32.622, 14.331, 32.491,
END,
BEGIN, LINE_LOOP,
VERTEX, 33.475, 13.481, 30.812,
VERTEX, 33.287, 13.436, 30.860,
VERTEX, 33.101, 13.406, 30.928,
VERTEX, 32.922, 13.392, 31.015,
VERTEX, 32.751, 13.394, 31.119,
VERTEX, 32.592, 13.412, 31.239,
VERTEX, 32.455, 13.445, 31.365,
VERTEX, 32.409, 13.550, 31.537,
VERTEX, 32.361, 13.654, 31.709,
VERTEX, 32.357, 13.663, 31.724,
VERTEX, 32.404, 13.798, 31.881,
VERTEX, 32.451, 13.932, 32.039,
VERTEX, 32.499, 14.067, 32.197,
VERTEX, 32.547, 14.201, 32.354,
VERTEX, 32.589, 14.318, 32.492,
VERTEX, 32.622, 14.331, 32.491,
VERTEX, 32.762, 14.313, 32.381,
VERTEX, 32.923, 14.291, 32.254,
VERTEX, 33.084, 14.269, 32.126,
VERTEX, 33.245, 14.246, 31.999,
VERTEX, 33.406, 14.223, 31.872,
VERTEX, 33.567, 14.199, 31.744,
VERTEX, 33.727, 14.175, 31.616,
VERTEX, 33.887, 14.150, 31.488,
VERTEX, 34.048, 14.125, 31.360,
VERTEX, 34.208, 14.100, 31.232,
VERTEX, 34.367, 14.074, 31.104,
VERTEX, 34.527, 14.047, 30.976,
VERTEX, 34.443, 13.973, 30.923,
VERTEX, 34.292, 13.861, 30.856,
VERTEX, 34.128, 13.757, 30.809,
VERTEX, 33.953, 13.664, 30.783,
VERTEX, 33.771, 13.582, 30.778,
VERTEX, 33.583, 13.514, 30.793,
END,
BEGIN, LINE_LOOP,
VERTEX, 33.697, 17.268, 35.620,
VERTEX, 33.643, 17.162, 35.591,
VERTEX, 33.555, 16.993, 35.546,
VERTEX, 33.467, 16.824, 35.501,
VERTEX, 33.378, 16.655, 35.457,
VERTEX, 33.289, 16.486, 35.413,
VERTEX, 33.199, 16.317, 35.370,
VERTEX, 33.108, 16.148, 35.327,
VERTEX, 33.018, 15.979, 35.285,
VERTEX, 32.959, 15.871, 35.258,
VERTEX, 32.851, 15.955, 35.408,
VERTEX, 32.742, 16.037, 35.559,
VERTEX, 32.633, 16.119, 35.709,
VERTEX, 32.522, 16.198, 35.859,
VERTEX, 32.410, 16.277, 36.010,
VERTEX, 32.297, 16.354, 36.160,
VERTEX, 32.182, 16.430, 36.310,
VERTEX, 32.067, 16.504, 36.460,
VERTEX, 32.221, 16.592, 36.392,
VERTEX, 32.384, 16.684, 36.319,
VERTEX, 32.546, 16.773, 36.245,
VERTEX, 32.709, 16.861, 36.169,
VERTEX, 32.872, 16.945, 36.091,
VERTEX, 33.035, 17.026, 36.010,
VERTEX, 33.198, 17.104, 35.926,
VERTEX, 33.362, 17.178, 35.840,
VERTEX, 33.520, 17.246, 35.754,
END,
BEGIN, LINE_LOOP,
VERTEX, 30.355, 17.788, 37.197,
VERTEX, 30.534, 17.753, 37.124,
VERTEX, 30.712, 17.719, 37.051,
VERTEX, 30.891, 17.684, 36.977,
VERTEX, 31.069, 17.650, 36.903,
VERTEX, 31.248, 17.616, 36.829,
VERTEX, 31.426, 17.583, 36.754,
VERTEX, 31.603, 17.550, 36.679,
VERTEX, 31.781, 17.517, 36.603,
VERTEX, 31.958, 17.485, 36.526,
VERTEX, 32.135, 17.454, 36.448,
VERTEX, 32.311, 17.423, 36.368,
VERTEX, 32.486, 17.394, 36.286,
VERTEX, 32.660, 17.366, 36.203,
VERTEX, 32.834, 17.339, 36.117,
VERTEX, 33.007, 17.314, 36.030,
VERTEX, 33.179, 17.290, 35.940,
VERTEX, 33.350, 17.268, 35.848,
VERTEX, 33.520, 17.246, 35.754,
VERTEX, 33.487, 17.232, 35.773,
VERTEX, 33.325, 17.161, 35.860,
VERTEX, 33.163, 17.087, 35.944,
VERTEX, 33.002, 17.010, 36.026,
VERTEX, 32.841, 16.929, 36.106,
VERTEX, 32.680, 16.845, 36.182,
VERTEX, 32.519, 16.759, 36.257,
VERTEX, 32.359, 16.670, 36.330,
VERTEX, 32.199, 16.579, 36.402,
VERTEX, 32.067, 16.504, 36.460,
VERTEX, 31.905, 16.574, 36.536,
VERTEX, 31.742, 16.643, 36.611,
VERTEX, 31.578, 16.711, 36.685,
VERTEX, 31.413, 16.778, 36.758,
VERTEX, 31.247, 16.845, 36.830,
VERTEX, 31.081, 16.910, 36.901,
VERTEX, 30.994, 16.943, 36.938,
VERTEX, 30.859, 17.087, 36.996,
VERTEX, 30.724, 17.230, 37.052,
VERTEX, 30.587, 17.373, 37.108,
VERTEX, 30.451, 17.516, 37.163,
VERTEX, 30.314, 17.660, 37.217,
VERTEX, 30.177, 17.803, 37.270,
VERTEX, 30.176, 17.822, 37.270,
END,
BEGIN, LINE_LOOP,
VERTEX, 34.301, 18.249, 34.375,
VERTEX, 34.249, 18.115, 34.529,
VERTEX, 34.197, 17.979, 34.682,
VERTEX, 34.147, 17.843, 34.834,
VERTEX, 34.098, 17.705, 34.985,
VERTEX, 34.051, 17.565, 35.136,
VERTEX, 34.005, 17.527, 35.200,
VERTEX, 33.903, 17.441, 35.340,
VERTEX, 33.800, 17.355, 35.481,
VERTEX, 33.697, 17.268, 35.620,
VERTEX, 33.675, 17.265, 35.637,
VERTEX, 33.521, 17.246, 35.754,
VERTEX, 33.442, 17.256, 35.798,
VERTEX, 33.266, 17.279, 35.893,
VERTEX, 33.090, 17.303, 35.987,
VERTEX, 32.912, 17.328, 36.078,
VERTEX, 32.734, 17.355, 36.167,
VERTEX, 32.554, 17.383, 36.254,
VERTEX, 32.374, 17.413, 36.339,
VERTEX, 32.193, 17.443, 36.421,
VERTEX, 32.011, 17.475, 36.502,
VERTEX, 31.829, 17.508, 36.582,
VERTEX, 31.647, 17.542, 36.661,
VERTEX, 31.464, 17.576, 36.738,
VERTEX, 31.280, 17.610, 36.815,
VERTEX, 31.097, 17.645, 36.892,
VERTEX, 30.913, 17.680, 36.968,
VERTEX, 30.729, 17.715, 37.044,
VERTEX, 30.545, 17.751, 37.119,
VERTEX, 30.361, 17.786, 37.194,
VERTEX, 30.177, 17.822, 37.269,
VERTEX, 30.120, 17.934, 37.210,
VERTEX, 30.028, 18.149, 37.084,
VERTEX, 29.955, 18.365, 36.946,
VERTEX, 29.916, 18.525, 36.834,
VERTEX, 29.936, 18.576, 36.780,
VERTEX, 30.031, 18.638, 36.674,
VERTEX, 30.155, 18.718, 36.537,
VERTEX, 30.278, 18.798, 36.399,
VERTEX, 30.401, 18.877, 36.262,
VERTEX, 30.524, 18.956, 36.124,
VERTEX, 30.648, 19.034, 35.986,
VERTEX, 30.772, 19.111, 35.848,
VERTEX, 30.896, 19.188, 35.710,
VERTEX, 30.952, 19.183, 35.681,
VERTEX, 31.131, 19.169, 35.589,
VERTEX, 31.309, 19.153, 35.499,
VERTEX, 31.488, 19.136, 35.409,
VERTEX, 31.666, 19.117, 35.321,
VERTEX, 31.845, 19.094, 35.234,
VERTEX, 32.023, 19.069, 35.148,
VERTEX, 32.202, 19.040, 35.065,
VERTEX, 32.380, 19.007, 34.983,
VERTEX, 32.559, 18.968, 34.905,
VERTEX, 32.737, 18.926, 34.829,
VERTEX, 32.916, 18.878, 34.755,
VERTEX, 33.094, 18.826, 34.683,
VERTEX, 33.273, 18.770, 34.614,
VERTEX, 33.451, 18.711, 34.546,
VERTEX, 33.630, 18.650, 34.480,
VERTEX, 33.808, 18.586, 34.415,
VERTEX, 33.987, 18.521, 34.350,
VERTEX, 34.165, 18.454, 34.287,
VERTEX, 34.344, 18.386, 34.224,
VERTEX, 34.354, 18.382, 34.220,
END,
BEGIN, LINE_LOOP,
VERTEX, 34.806, 19.188, 33.950,
VERTEX, 34.714, 19.028, 34.003,
VERTEX, 34.622, 18.867, 34.057,
VERTEX, 34.532, 18.706, 34.111,
VERTEX, 34.443, 18.545, 34.165,
VERTEX, 34.354, 18.383, 34.219,
VERTEX, 34.221, 18.434, 34.266,
VERTEX, 34.043, 18.501, 34.329,
VERTEX, 33.865, 18.566, 34.393,
VERTEX, 33.688, 18.630, 34.457,
VERTEX, 33.510, 18.693, 34.523,
VERTEX, 33.332, 18.752, 34.590,
VERTEX, 33.154, 18.809, 34.658,
VERTEX, 32.977, 18.862, 34.728,
VERTEX, 32.799, 18.911, 34.801,
VERTEX, 32.621, 18.955, 34.876,
VERTEX, 32.444, 18.995, 34.953,
VERTEX, 32.266, 19.030, 35.033,
VERTEX, 32.088, 19.060, 35.116,
VERTEX, 31.911, 19.087, 35.200,
VERTEX, 31.733, 19.110, 35.286,
VERTEX, 31.555, 19.130, 35.374,
VERTEX, 31.378, 19.148, 35.463,
VERTEX, 31.200, 19.164, 35.553,
VERTEX, 31.023, 19.179, 35.643,
VERTEX, 30.898, 19.188, 35.707,
VERTEX, 30.927, 19.403, 35.671,
VERTEX, 30.954, 19.618, 35.637,
VERTEX, 30.979, 19.834, 35.605,
VERTEX, 31.092, 19.793, 35.550,
VERTEX, 31.265, 19.732, 35.467,
VERTEX, 31.439, 19.674, 35.384,
VERTEX, 31.612, 19.617, 35.302,
VERTEX, 31.787, 19.564, 35.220,
VERTEX, 31.962, 19.514, 35.138,
VERTEX, 32.138, 19.469, 35.056,
VERTEX, 32.315, 19.429, 34.976,
VERTEX, 32.494, 19.395, 34.896,
VERTEX, 32.673, 19.367, 34.816,
VERTEX, 32.854, 19.346, 34.737,
VERTEX, 33.037, 19.330, 34.659,
VERTEX, 33.220, 19.319, 34.582,
VERTEX, 33.404, 19.313, 34.505,
VERTEX, 33.589, 19.311, 34.428,
VERTEX, 33.774, 19.311, 34.352,
VERTEX, 33.960, 19.313, 34.276,
VERTEX, 34.147, 19.318, 34.200,
VERTEX, 34.333, 19.324, 34.125,
VERTEX, 34.520, 19.331, 34.049,
VERTEX, 34.708, 19.340, 33.974,
VERTEX, 34.895, 19.349, 33.898,
VERTEX, 34.898, 19.349, 33.897,
END,
BEGIN, LINE_LOOP,
VERTEX, 30.001, 18.967, 36.701,
VERTEX, 30.089, 19.115, 36.600,
VERTEX, 30.178, 19.264, 36.500,
VERTEX, 30.267, 19.411, 36.399,
VERTEX, 30.357, 19.558, 36.297,
VERTEX, 30.447, 19.705, 36.195,
VERTEX, 30.537, 19.851, 36.092,
VERTEX, 30.628, 19.996, 35.989,
VERTEX, 30.658, 19.981, 35.956,
VERTEX, 30.792, 19.918, 35.809,
VERTEX, 30.926, 19.857, 35.663,
VERTEX, 30.979, 19.834, 35.605,
VERTEX, 30.954, 19.618, 35.637,
VERTEX, 30.927, 19.403, 35.671,
VERTEX, 30.898, 19.188, 35.707,
VERTEX, 30.882, 19.179, 35.725,
VERTEX, 30.760, 19.103, 35.861,
VERTEX, 30.638, 19.027, 35.997,
VERTEX, 30.516, 18.950, 36.133,
VERTEX, 30.394, 18.872, 36.269,
VERTEX, 30.273, 18.794, 36.405,
VERTEX, 30.151, 18.716, 36.540,
VERTEX, 30.030, 18.637, 36.675,
VERTEX, 29.936, 18.576, 36.780,
VERTEX, 29.908, 18.606, 36.811,
VERTEX, 29.907, 18.616, 36.811,
VERTEX, 29.913, 18.817, 36.801,
END,
BEGIN, LINE_LOOP,
VERTEX, 29.936, 18.576, 36.780,
VERTEX, 29.916, 18.524, 36.835,
VERTEX, 29.910, 18.570, 36.823,
VERTEX, 29.908, 18.605, 36.811,
END,
BEGIN, LINE_LOOP,
VERTEX, 34.898, 19.349, 33.897,
VERTEX, 34.711, 19.340, 33.972,
VERTEX, 34.522, 19.331, 34.048,
VERTEX, 34.334, 19.324, 34.125,
VERTEX, 34.146, 19.318, 34.201,
VERTEX, 33.958, 19.313, 34.277,
VERTEX, 33.770, 19.311, 34.354,
VERTEX, 33.583, 19.311, 34.431,
VERTEX, 33.397, 19.313, 34.508,
VERTEX, 33.211, 19.320, 34.586,
VERTEX, 33.026, 19.331, 34.664,
VERTEX, 32.843, 19.347, 34.742,
VERTEX, 32.660, 19.369, 34.822,
VERTEX, 32.479, 19.398, 34.902,
VERTEX, 32.300, 19.432, 34.983,
VERTEX, 32.121, 19.473, 35.064,
VERTEX, 31.944, 19.519, 35.146,
VERTEX, 31.767, 19.570, 35.229,
VERTEX, 31.591, 19.624, 35.312,
VERTEX, 31.416, 19.681, 35.395,
VERTEX, 31.242, 19.741, 35.479,
VERTEX, 31.067, 19.802, 35.562,
VERTEX, 30.979, 19.834, 35.605,
VERTEX, 31.011, 19.975, 35.478,
VERTEX, 31.042, 20.120, 35.355,
VERTEX, 31.071, 20.266, 35.234,
VERTEX, 31.098, 20.416, 35.115,
VERTEX, 31.106, 20.460, 35.082,
VERTEX, 31.228, 20.548, 34.942,
VERTEX, 31.349, 20.639, 34.804,
VERTEX, 31.470, 20.733, 34.668,
VERTEX, 31.590, 20.831, 34.533,
VERTEX, 31.710, 20.933, 34.401,
VERTEX, 31.830, 21.037, 34.270,
VERTEX, 31.949, 21.144, 34.140,
VERTEX, 32.068, 21.253, 34.012,
VERTEX, 32.186, 21.364, 33.885,
VERTEX, 32.304, 21.478, 33.760,
VERTEX, 32.422, 21.593, 33.635,
VERTEX, 32.483, 21.584, 33.608,
VERTEX, 32.701, 21.533, 33.523,
VERTEX, 32.920, 21.466, 33.451,
VERTEX, 33.137, 21.383, 33.393,
VERTEX, 33.350, 21.284, 33.347,
VERTEX, 33.559, 21.170, 33.316,
VERTEX, 33.761, 21.042, 33.299,
VERTEX, 33.955, 20.902, 33.296,
VERTEX, 34.140, 20.749, 33.307,
VERTEX, 34.313, 20.586, 33.333,
VERTEX, 34.474, 20.413, 33.372,
VERTEX, 34.622, 20.232, 33.426,
VERTEX, 34.755, 20.044, 33.492,
VERTEX, 34.873, 19.851, 33.572,
VERTEX, 34.974, 19.654, 33.664,
VERTEX, 35.038, 19.499, 33.743,
END,
BEGIN, LINE_LOOP,
VERTEX, 30.680, 20.135, 35.835,
VERTEX, 30.734, 20.273, 35.682,
VERTEX, 30.789, 20.412, 35.529,
VERTEX, 30.846, 20.549, 35.376,
VERTEX, 30.966, 20.508, 35.239,
VERTEX, 31.106, 20.460, 35.081,
VERTEX, 31.081, 20.321, 35.190,
VERTEX, 31.049, 20.155, 35.325,
VERTEX, 31.015, 19.993, 35.463,
VERTEX, 30.979, 19.834, 35.605,
VERTEX, 30.854, 19.890, 35.742,
VERTEX, 30.727, 19.948, 35.881,
VERTEX, 30.628, 19.996, 35.989,
END,
BEGIN, LINE_LOOP,
VERTEX, 32.546, 20.830, 32.041,
VERTEX, 32.439, 20.671, 32.070,
VERTEX, 32.331, 20.512, 32.100,
VERTEX, 32.222, 20.353, 32.129,
VERTEX, 32.114, 20.195, 32.158,
VERTEX, 32.005, 20.036, 32.187,
VERTEX, 31.895, 19.879, 32.216,
VERTEX, 31.785, 19.722, 32.245,
VERTEX, 31.673, 19.565, 32.274,
VERTEX, 31.561, 19.409, 32.303,
VERTEX, 31.448, 19.254, 32.332,
VERTEX, 31.334, 19.100, 32.360,
VERTEX, 31.218, 18.947, 32.388,
VERTEX, 31.221, 18.976, 32.441,
VERTEX, 31.228, 19.073, 32.617,
VERTEX, 31.234, 19.170, 32.793,
VERTEX, 31.238, 19.267, 32.969,
VERTEX, 31.239, 19.364, 33.145,
VERTEX, 31.237, 19.462, 33.320,
VERTEX, 31.232, 19.560, 33.495,
VERTEX, 31.225, 19.659, | |
<reponame>dgursoy/tomopy
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2016-17, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2017-17. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
import numpy as np
import logging
import warnings
from skimage import transform as tf
from skimage.feature import register_translation
from tomopy.recon.algorithm import recon
from tomopy.sim.project import project
import dxchange
import numpy as np
logger = logging.getLogger(__name__)
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2016-17, UChicago Argonne, LLC."
__docformat__ = 'restructuredtext en'
__all__ = ['align_seq',
'align_joint',
'scale',
'tilt',
'add_jitter',
'add_noise',
'blur_edges',
'shift_images']
def align_seq(
prj, ang, fdir='.', iters=10, pad=(0, 0),
blur=True, center=None, algorithm='sirt',
upsample_factor=10, rin=0.5, rout=0.8,
save=False, debug=True):
"""
Aligns the projection image stack using the sequential
re-projection algorithm :cite:`Gursoy:17`.
Parameters
----------
prj : ndarray
3D stack of projection images. The first dimension
is projection axis, second and third dimensions are
the x- and y-axes of the projection image, respectively.
ang : ndarray
Projection angles in radians as an array.
iters : scalar, optional
Number of iterations of the algorithm.
pad : list-like, optional
Padding for projection images in x and y-axes.
blur : bool, optional
Blurs the edge of the image before registration.
center: array, optional
Location of rotation axis.
algorithm : {str, function}
One of the following string values.
'art'
Algebraic reconstruction technique :cite:`Kak:98`.
'gridrec'
Fourier grid reconstruction algorithm :cite:`Dowd:99`,
:cite:`Rivers:06`.
'mlem'
Maximum-likelihood expectation maximization algorithm
:cite:`Dempster:77`.
'sirt'
Simultaneous algebraic reconstruction technique.
'tv'
Total Variation reconstruction technique
:cite:`Chambolle:11`.
'grad'
Gradient descent method with a constant step size
upsample_factor : integer, optional
The upsampling factor. Registration accuracy is
inversely propotional to upsample_factor.
rin : scalar, optional
The inner radius of blur function. Pixels inside
rin is set to one.
rout : scalar, optional
The outer radius of blur function. Pixels outside
rout is set to zero.
save : bool, optional
Saves projections and corresponding reconstruction
for each algorithm iteration.
debug : book, optional
Provides debugging info such as iterations and error.
Returns
-------
ndarray
3D stack of projection images with jitter.
ndarray
Error array for each iteration.
"""
# Needs scaling for skimage float operations.
prj, scl = scale(prj)
# Shift arrays
sx = np.zeros((prj.shape[0]))
sy = np.zeros((prj.shape[0]))
conv = np.zeros((iters))
# Pad images.
npad = ((0, 0), (pad[1], pad[1]), (pad[0], pad[0]))
prj = np.pad(prj, npad, mode='constant', constant_values=0)
# Register each image frame-by-frame.
for n in range(iters):
# Reconstruct image.
rec = recon(prj, ang, center=center, algorithm=algorithm)
# Re-project data and obtain simulated data.
sim = project(rec, ang, center=center, pad=False)
# Blur edges.
if blur:
_prj = blur_edges(prj, rin, rout)
_sim = blur_edges(sim, rin, rout)
else:
_prj = prj
_sim = sim
# Initialize error matrix per iteration.
err = np.zeros((prj.shape[0]))
# For each projection
for m in range(prj.shape[0]):
# Register current projection in sub-pixel precision
shift, error, diffphase = register_translation(
_prj[m], _sim[m], upsample_factor)
err[m] = np.sqrt(shift[0]*shift[0] + shift[1]*shift[1])
sx[m] += shift[0]
sy[m] += shift[1]
# Register current image with the simulated one
tform = tf.SimilarityTransform(translation=(shift[1], shift[0]))
prj[m] = tf.warp(prj[m], tform, order=5)
if debug:
print('iter=' + str(n) + ', err=' + str(np.linalg.norm(err)))
conv[n] = np.linalg.norm(err)
if save:
dxchange.write_tiff(prj, fdir + '/tmp/iters/prj/prj')
dxchange.write_tiff(sim, fdir + '/tmp/iters/sim/sim')
dxchange.write_tiff(rec, fdir + '/tmp/iters/rec/rec')
# Re-normalize data
prj *= scl
return prj, sx, sy, conv
def align_joint(
prj, ang, fdir='.', iters=10, pad=(0, 0),
blur=True, center=None, algorithm='sirt',
upsample_factor=10, rin=0.5, rout=0.8,
save=False, debug=True):
"""
Aligns the projection image stack using the joint
re-projection algorithm :cite:`Gursoy:17`.
Parameters
----------
prj : ndarray
3D stack of projection images. The first dimension
is projection axis, second and third dimensions are
the x- and y-axes of the projection image, respectively.
ang : ndarray
Projection angles in radians as an array.
iters : scalar, optional
Number of iterations of the algorithm.
pad : list-like, optional
Padding for projection images in x and y-axes.
blur : bool, optional
Blurs the edge of the image before registration.
center: array, optional
Location of rotation axis.
algorithm : {str, function}
One of the following string values.
'art'
Algebraic reconstruction technique :cite:`Kak:98`.
'gridrec'
Fourier grid reconstruction algorithm :cite:`Dowd:99`,
:cite:`Rivers:06`.
'mlem'
Maximum-likelihood expectation maximization algorithm
:cite:`Dempster:77`.
'sirt'
Simultaneous algebraic reconstruction technique.
'tv'
Total Variation reconstruction technique
:cite:`Chambolle:11`.
'grad'
Gradient descent method with a constant step size
upsample_factor : integer, optional
The upsampling factor. Registration accuracy is
inversely propotional to upsample_factor.
rin : scalar, optional
The inner radius of blur function. Pixels inside
rin is set to one.
rout : scalar, optional
The outer radius of blur function. Pixels outside
rout is set to zero.
save : bool, optional
Saves projections and corresponding reconstruction
for each algorithm iteration.
debug : book, optional
Provides debugging info such as iterations and error.
Returns
-------
ndarray
3D stack of projection images with jitter.
ndarray
Error array for each iteration.
"""
# Needs scaling for skimage float operations.
prj, scl = scale(prj)
# Shift arrays
sx = np.zeros((prj.shape[0]))
sy = np.zeros((prj.shape[0]))
conv = np.zeros((iters))
# Pad images.
npad = ((0, 0), (pad[1], pad[1]), (pad[0], pad[0]))
prj = np.pad(prj, npad, mode='constant', constant_values=0)
# Initialization of reconstruction.
rec = 1e-12 * np.ones((prj.shape[1], prj.shape[2], prj.shape[2]))
# Register each image frame-by-frame.
for n in range(iters):
if np.mod(n, 1) == 0:
_rec = rec
# Reconstruct image.
rec = recon(prj, ang, center=center, algorithm=algorithm,
num_iter=1, init_recon=_rec)
# Re-project data and obtain simulated data.
sim = project(rec, ang, center=center, pad=False)
# Blur edges.
if blur:
_prj = blur_edges(prj, rin, rout)
_sim = blur_edges(sim, rin, rout)
else:
_prj = prj
_sim = sim
# Initialize error matrix per iteration.
err = np.zeros((prj.shape[0]))
# For each projection
for m in range(prj.shape[0]):
# Register current projection in sub-pixel precision
shift, error, diffphase = register_translation(
_prj[m], _sim[m], upsample_factor)
err[m] = np.sqrt(shift[0]*shift[0] + shift[1]*shift[1])
sx[m] += shift[0]
sy[m] += shift[1]
# Register current image with the simulated one
tform = tf.SimilarityTransform(translation=(shift[1], shift[0]))
prj[m] = | |
# -*- coding: utf-8 -*-
# @Date : 2022/4/30 15:52
# @Author : WangYihao
# @File : trainer.py
import os
import platform
import random
import time
from decimal import Decimal
import numpy as np
from loguru import logger
from prettytable import PrettyTable
from tqdm import tqdm
from fairscale.optim.oss import OSS
import torch
from torch import optim, nn, distributed
from torch.cuda.amp import GradScaler, autocast
from torch.backends import cudnn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DistributedSampler, DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms as T
from my_utils import data
from my_utils.models import create_model
from my_utils.utils import AverageMeter, correct_rate
cudnn.benchmark = True
def seed_worker(worker_id):
# print(torch.initial_seed())
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
def _set_seed(seed, deterministic=False):
"""
seed manually to make runs reproducible
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option
for CUDNN backend
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
cudnn.deterministic = True
cudnn.benchmark = False
class Trainer(object):
def __init__(self, cfg):
tic = time.time()
self.model_cfgs = cfg['model_configs']
self.train_cfgs = cfg['train_configs']
self.dataset_cfgs = cfg['dataset_configs']
self.loader_kwargs = cfg['loader_kwargs']
self.optim_kwargs = cfg['optim_kwargs']
self.schedule_cfgs = cfg['schedule_configs']
self.dist_cfgs = cfg['distributed_configs']
self.log_cfgs = cfg['log_configs']
if self.dist_cfgs['distributed']:
distributed.init_process_group(backend='nccl',
init_method='tcp://127.0.0.1:' + self.dist_cfgs['port'],
world_size=self.dist_cfgs['world_size'],
rank=self.dist_cfgs['local_rank'])
_set_seed(self.train_cfgs['seed'] + self.dist_cfgs['local_rank'], deterministic=True)
if torch.cuda.is_available():
self.device = torch.device(f'cuda:{self.dist_cfgs["local_rank"]}')
else:
self.device = torch.device("cpu")
self.dist_cfgs['device'] = self.device
save_time = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())
run_dir = os.path.join(os.getcwd(),
f"{self.log_cfgs['log_dir']}/"
f"{self.model_cfgs['type']}/"
f"KS_{self.model_cfgs['kernel_size']}_"
f"ACT_{self.model_cfgs['act']}_"
f"Norm_{self.model_cfgs['norm']}")
self.log_dir = os.path.join(run_dir, save_time)
self.ckpt_dir = os.path.join(self.log_dir, 'checkpoints')
os.makedirs(self.ckpt_dir, exist_ok=True)
if self.dist_cfgs['local_rank'] == 0:
self.writer = SummaryWriter(log_dir=self.log_dir)
self.start_epoch = 0
self.steps = 0
self.epoch = 0
self.min_loss = float('inf')
self.val_best_acc_total = 0.0
self.val_metrics = {'current_acc': 0.0, 'best_acc': 0.0,
'best_epoch': 0}
self._build_model()
if self.train_cfgs['mode'] == 'train':
self.train_loader, self.train_sampler = self._load_dataset(phase='train')
self.val_loader, self.val_sampler = self._load_dataset(phase='val')
if self.train_cfgs['mode'] == 'test':
self.test_loader, self.test_sampler = self._load_dataset(phase='test')
self._load_optimizer()
if self.dist_cfgs['distributed']:
self.model = nn.SyncBatchNorm.convert_sync_batchnorm(self.model)
self.model = DDP(self.model,
device_ids=[self.dist_cfgs['local_rank']],
output_device=self.dist_cfgs['local_rank'],
find_unused_parameters=False)
if self.train_cfgs['resume']:
checkpoint_path = self.train_cfgs['resume_path']
assert os.path.exists(checkpoint_path)
self.load_checkpoint(checkpoint_path)
if self.dist_cfgs['local_rank'] == 0:
print(f"{time.time()-tic} sec are used to initialize a Trainer.")
def _build_model(self):
self.model = create_model(**self.model_cfgs)
self.model.to(self.device)
def _load_dataset(self, phase='train'):
dataset = data.make_dataset(
phase=phase,
dataset_dir=self.train_cfgs['dataset_dir'],
transform=T.Compose([
T.ToTensor(),
T.Resize((self.dataset_cfgs['fig_resize'],) * 2),
T.Normalize(self.dataset_cfgs['mean'], self.dataset_cfgs['std'])
])
)
sampler = DistributedSampler(dataset, shuffle=True) \
if self.dist_cfgs['distributed'] else None
data_loader = DataLoader(dataset,
sampler=sampler,
worker_init_fn=seed_worker,
shuffle=(sampler is None),
drop_last=(phase == 'train'),
**self.loader_kwargs)
return data_loader, sampler
def _load_optimizer(self):
base_optimizer = None
optim_type = self.optim_kwargs.pop('optim_type')
if optim_type == 'SGD':
base_optimizer = optim.SGD
self.optim_kwargs['momentum'] = 0.9
elif optim_type == 'Adam':
base_optimizer = optim.Adam
self.optim_kwargs['betas'] = (0.9, 0.999)
elif optim_type == 'AdamW':
base_optimizer = optim.AdamW
self.optim_kwargs['betas'] = (0.9, 0.999)
else:
print(f"{optim_type} not support.")
exit(0)
if self.dist_cfgs['distributed']:
# Wrap a base optimizer into OSS
self.optimizer = OSS(
params=self.model.parameters(),
optim=base_optimizer,
**self.optim_kwargs,
)
else:
self.optimizer = base_optimizer(
params=self.model.parameters(),
**self.optim_kwargs,
)
if self.schedule_cfgs['schedule_type'] == 'cosine_warm':
self.schedule_cfgs['max_epoch'] = \
int((self.schedule_cfgs['cos_mul'] ** self.schedule_cfgs['cos_iters'] - 1) / \
(self.schedule_cfgs['cos_mul'] - 1) * self.schedule_cfgs['cos_T'])
self.scheduler = \
optim.lr_scheduler.CosineAnnealingWarmRestarts(self.optimizer,
T_0=self.schedule_cfgs['cos_T'], T_mult=2)
if self.train_cfgs['amp']:
self.scaler = GradScaler()
def run(self):
for epoch in range(self.start_epoch, self.schedule_cfgs['max_epoch']):
if self.dist_cfgs['distributed']:
self.train_sampler.set_epoch(epoch)
train_loss, train_acc = self.train(epoch)
self.min_loss = min(self.min_loss, train_loss)
val_loss, val_acc = self.val(epoch)
self.epoch += 1
if self.dist_cfgs['local_rank'] == 0:
for i, param_group in enumerate(self.optimizer.param_groups):
self.writer.add_scalar(tag=f'optimizer/lr_group_{i}',
scalar_value=param_group['lr'],
global_step=epoch)
self.writer.add_scalars('Metric/acc', {'train': train_acc, 'val': val_acc}, epoch + 1)
self.writer.add_scalars('Metric/loss', {'train': train_loss, 'val': val_loss}, epoch + 1)
self.scheduler.step()
if ((epoch + 1) % self.log_cfgs['save_epoch_interval'] == 0) \
or (epoch + 1) == self.schedule_cfgs['max_epoch']:
checkpoint_path = os.path.join(self.ckpt_dir, f"epoch_{(epoch + 1)}.pth")
self.save_checkpoint(checkpoint_path)
if self.dist_cfgs['distributed']:
distributed.destroy_process_group()
def train(self, epoch):
self.model.train()
len_loader = len(self.train_loader)
iter_loader = iter(self.train_loader)
loss_recorder = AverageMeter()
acc_recorder = AverageMeter()
pbar = None
if self.dist_cfgs['local_rank'] == 0:
pbar = tqdm(total=len_loader,
dynamic_ncols=True,
ascii=(platform.version() == 'Windows'))
for step in range(len_loader):
try:
inputs, labels = next(iter_loader)
except Exception as e:
logger.critical(e)
continue
inputs = inputs.to(self.device)
labels = labels.to(self.device)
batch_size = inputs.size(0)
if self.train_cfgs['amp']:
with autocast():
loss, preds = self.model((inputs, labels))
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
else:
loss, preds = self.model((inputs, labels))
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
self.steps += 1
loss = loss.detach().clone()
acc_recorder.update(correct_rate(preds, labels), batch_size)
if self.dist_cfgs['distributed']:
distributed.reduce(loss, 0)
loss /= self.dist_cfgs['world_size']
loss_recorder.update(loss.item(), batch_size)
if self.dist_cfgs['local_rank'] == 0:
last_lr = [param_group['lr'] for param_group in self.optimizer.param_groups]
last_lr_string = "lr " + ' '.join(f"{Decimal(lr):.1E}" for lr in last_lr)
pbar.set_description(
f"train epoch {epoch + 1}/{self.schedule_cfgs['max_epoch']} "
f"Iter {self.steps}/{len_loader * self.schedule_cfgs['max_epoch']} "
f"{last_lr_string} "
f"---- "
f"loss {loss_recorder.avg:.4f} "
f"top1_acc {acc_recorder.avg:.2%}")
pbar.update()
if self.steps % self.log_cfgs['snapshot_interval'] == 0:
checkpoint_path = os.path.join(self.ckpt_dir, "latest.pth")
self.save_checkpoint(checkpoint_path)
if self.dist_cfgs['local_rank'] == 0:
pbar.close()
logger.info(
f"train epoch {epoch + 1}/{self.schedule_cfgs['max_epoch']} "
f"Iter {self.steps}/{len_loader * self.schedule_cfgs['max_epoch']} "
f"---- "
f"loss {loss_recorder.avg:.4f} "
f"top1_acc {acc_recorder.avg:.2%}")
return loss_recorder.avg, acc_recorder.avg
def val(self, epoch):
self.model.eval()
len_loader = len(self.val_loader)
iter_loader = iter(self.val_loader)
loss_recorder = AverageMeter()
acc_recorder = AverageMeter()
pbar = None
if self.dist_cfgs['local_rank'] == 0:
pbar = tqdm(total=len_loader,
dynamic_ncols=True,
ascii=(platform.version() == 'Windows'))
for step in range(len_loader):
try:
inputs, labels = next(iter_loader)
except Exception as e:
logger.critical(e)
continue
inputs = inputs.to(self.device)
labels = labels.to(self.device)
batch_size = inputs.size(0)
with torch.no_grad():
if self.train_cfgs['amp']:
with autocast():
loss, preds = self.model((inputs, labels))
else:
loss, preds = self.model((inputs, labels))
loss = loss.detach().clone()
acc_recorder.update(correct_rate(preds, labels), batch_size)
if self.dist_cfgs['distributed']:
distributed.reduce(loss, 0)
loss /= self.dist_cfgs['world_size']
loss_recorder.update(loss.item(), batch_size)
if self.dist_cfgs['local_rank'] == 0:
pbar.set_description(
f"val epoch {epoch + 1}/{self.schedule_cfgs['max_epoch']} "
f"Step {step}/{len_loader} "
f"------ "
f"loss {loss_recorder.avg:.4f} "
f"top1_acc {acc_recorder.avg:.2%}")
pbar.update()
if self.dist_cfgs['local_rank'] == 0:
pbar.close()
logger.info(
f"val epoch {epoch + 1}/{self.schedule_cfgs['max_epoch']} "
f"------ "
f"loss {loss_recorder.avg:.4f} "
f"top1_acc {acc_recorder.avg:.2%}")
self.val_metrics['current_acc'] = acc_recorder.avg
if acc_recorder.avg > self.val_metrics['best_acc']:
self.val_metrics['best_acc'] = acc_recorder.avg
self.val_metrics['best_epoch'] = epoch + 1
checkpoint_path = os.path.join(self.ckpt_dir, "best.pth")
self.save_checkpoint(checkpoint_path)
res_table = PrettyTable()
res_table.add_column('Phase', ['Current Acc', 'Best Acc', 'Best Epoch'])
res_table.add_column('Val', [f"{self.val_metrics['current_acc']:.2%}",
f"{self.val_metrics['best_acc']:.2%}",
self.val_metrics['best_epoch']])
logger.info(f'Performance on validation set at epoch: {epoch + 1}')
logger.info('\n' + res_table.get_string())
return loss_recorder.avg, acc_recorder.avg
def save_checkpoint(self, path):
# self.optimizer.consolidate_state_dict()
if not os.path.exists(os.path.split(path)[0]):
os.makedirs(os.path.split(path)[0])
if self.dist_cfgs['local_rank'] == 0:
save_dict = {
'model': self.model.state_dict(),
# 'optimizer': self.optimizer.state_dict(),
'epoch': self.epoch,
'iteration': self.steps,
'best_val_acc': self.val_metrics['best_acc'],
'best_epoch': self.val_metrics['best_epoch'],
'val_best_acc_total': self.val_best_acc_total,
}
torch.save(save_dict, path)
def load_checkpoint(self, path):
ckpt = None
if self.dist_cfgs['local_rank'] == 0:
ckpt = torch.load(path, map_location={'cuda:0': f'cuda:{self.dist_cfgs["local_rank"]}'})
self.model.load_state_dict(ckpt['model'])
self.optimizer.load_state_dict(ckpt['optimizer'])
self.start_epoch = ckpt['epoch']
self.steps = ckpt['iteration']
self.val_metrics['best_epoch'] = ckpt['best_epoch']
self.val_metrics['best_acc'] = ckpt['best_val_acc']
self.val_best_acc_total = ckpt['val_best_acc_total']
# def trainer(model, optimizer, scheduler, loss_fn, train_loader,
# check_fn, check_loaders, batch_step, save_dir, log_every=10, epochs=2, writer=None):
# """
#
# Args:
# batch_step (int):
# epochs (int):
# log_every (int): log info per log_every batches.
# writer :
#
# Returns:
# batch_step (int):
# """
# device = get_device(model)
# # batch_size = train_loader.batch_size
# check_loader_train = check_loaders['train']
# check_loader_val = check_loaders['val']
# iters = len(train_loader)
# max_val_acc = 0.75
#
# for epoch in range(1, epochs + 1):
# tic = time.time()
# for batch_idx, (X, Y) in enumerate(train_loader):
# batch_step += 1
# model.train()
# X = X.to(device, dtype=torch.float32)
# Y = Y.to(device, dtype=torch.int64)
# # print(X.device, model.device)
# scores = model(X)
# loss = loss_fn(scores, Y)
# if writer is not None:
# writer.add_scalar('Metric/loss', loss.item(), batch_step)
# writer.add_scalar('Hpara/lr', optimizer.param_groups[0]['lr'], batch_step)
#
# # back propagate
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# scheduler.step(batch_step / iters)
#
# # check accuracy
# if batch_idx % log_every == 0:
# model.eval()
# train_acc = check_fn(model, check_loader_train, training=True)
# val_acc = check_fn(model, check_loader_val, training=True)
# if writer is not None:
# writer.add_scalars('Metric/acc', {'train': train_acc, 'val': val_acc}, batch_step)
# print(f'Epoch: {epoch} [{batch_idx}/{iters}]\tLoss: {loss:.4f}\t'
# f'Val acc: {100. * val_acc:.1f}%')
# if val_acc > max_val_acc:
# max_val_acc = val_acc
# save_model(model, optimizer, scheduler,
# save_dir=save_dir, acc=100 * val_acc)
#
# print(f'====> Epoch: {epoch}\tTime: {time.time() - tic}s')
#
# return batch_step
#
#
# def train_a_model(model_configs=None, train_configs=None, loader_kwargs=None):
# """
# Train a model from zero.
# """
# if train_configs is None:
# train_configs = {
# 'log_dir': 'finalruns',
# 'dataset_dir': '/home/wangyh/01-Projects/03-my/Datasets/polygons_unfilled_64_3',
# 'batch_size': 256,
# 'epochs': 50,
# 'device': 'cuda:7',
# 'optim': 'Adam',
# 'lr': 1e-4,
# 'schedule': 'cosine_warm',
# 'cos_T': 15,
# 'cos_mul': 2,
# 'cos_iters': 3,
# 'momentum': 0.9,
# 'weight_decay': 0.05,
# }
# # make dataset
# fig_resize = 64
# # mean, std = torch.tensor(0.2036), torch.tensor(0.4027) # polygons_unfilled_32_2
# mean, std = torch.tensor(0.1094), torch.tensor(0.3660) # polygons_unfilled_64_3
# T = transforms.Compose([
# transforms.ToTensor(),
# transforms.Resize((fig_resize, fig_resize)),
# transforms.Normalize(mean, std)
# ])
# if loader_kwargs is None:
# loader_kwargs = {
# 'batch_size': train_configs['batch_size'], # default:1
# 'shuffle': True, # default:False
# 'num_workers': 4, # default:0
# 'pin_memory': True, # default:False
# 'drop_last': True, # default:False
# 'prefetch_factor': 4, # default:2
# 'persistent_workers': False # default:False
# }
# train_loader, test_loader, check_loaders = data.make_dataset(
# dataset_dir=train_configs['dataset_dir'],
# loader_kwargs=loader_kwargs,
# transform=T
# )
#
# # create model
# if model_configs is None:
# model_configs = {
# 'type': 'simple_conv',
# 'kernel_size': 3,
# 'depths': (1, 1, 1),
# 'dims': (4, 8, 16)
# }
# model, optimizer, scheduler = [None] * 3
# # define model
# model = create_model(**model_configs)
# model = model.to(train_configs['device'])
#
# # define optimizer
# if train_configs['optim'] == 'Adam':
# optimizer = optim.Adam(params=[{'params': model.parameters(), 'initial_lr': train_configs['lr']}],
# lr=train_configs['lr'],
# weight_decay=train_configs['weight_decay'])
# elif | |
import functools
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.transforms as transforms
import matplotlib.pyplot as plt
import scipy.interpolate as interp
import scipy.optimize as opt
from .stats import poisson_interval
__all__ = [
"cms_label", "legend_data_mc", "data_mc", "data", "mc", "heatmap",
"annotate_heatmap",
"process_names", "process_colours",
"impacts", "nllscan",
]
def cms_label(ax, label, lumi=35.9, energy=13):
ax.text(
0, 1, r'$\mathbf{CMS}\ \mathit{'+label+'}$',
ha='left', va='bottom', transform=ax.transAxes,
)
ax.text(
1, 1, r'${:.1f}\ \mathrm{{fb}}^{{-1}}$ ({:.0f} TeV)'.format(lumi, energy),
ha='right', va='bottom', transform=ax.transAxes,
)
def legend_data_mc(
ax, df_data, df_mc, label, add_ratios=True, offaxis=True, legend_kw={},
):
handles, labels = ax[0].get_legend_handles_labels()
if add_ratios:
# sort by process total
tdf_mc = pd.pivot_table(
df_mc, index=label, columns="parent",
values="sum_w", aggfunc=np.sum,
)
tdf_mc = tdf_mc[tdf_mc.sum(axis=0).sort_values().index]
data_idx = labels.index("Data")
data_label = labels.pop(data_idx)
labels = (labels+[data_label])[::-1]
data_handle = handles.pop(data_idx)
handles = (handles+[data_handle])[::-1]
df_data_sum = df_data.sum()
tdf_mc_sum = tdf_mc.sum()
fractions = [
df_data_sum["sum_w"]/tdf_mc_sum.sum(), 1.,
] + list((tdf_mc_sum / tdf_mc_sum.sum()).values[::-1])
fraction_labels = [
"{:.3f} {}".format(fractions[idx], labels[idx])
for idx in range(len(labels))
]
else:
handles = handles[::-1]
fraction_labels = labels[::-1]
kwargs = dict(legend_kw)
kwargs_noloc = dict(kwargs)
kwargs_noloc.pop("loc", None)
if offaxis:
box = ax[0].get_position()
ax[0].set_position([box.x0, box.y0, box.width*0.8, box.height])
ax[0].legend(
handles, fraction_labels, bbox_to_anchor=(1, 1), **kwargs_noloc
)
box = ax[1].get_position()
ax[1].set_position([box.x0, box.y0, box.width*0.8, box.height])
else:
ax[0].legend(handles, fraction_labels, **kwargs)
handles, labels = ax[1].get_legend_handles_labels()
if offaxis:
ax[1].legend(handles, labels, bbox_to_anchor=(1, 1), **kwargs_noloc)
else:
ax[1].legend(handles, labels, **kwargs)
def bin_lows_to_edges_cents(lows):
edges = np.array(list(lows)+[2*lows[-1]-lows[-2]])
cents = (edges[:-1] + edges[1:])/2.
return edges, cents
def data(ax, df, label, bins, data_kw={}):
bin_edges, bin_cents = bin_lows_to_edges_cents(bins)
# draw
kwargs = dict(fmt='o', lw=1, color='black', label='Data')
kwargs.update(data_kw)
mask = (df["sum_ww"]==0.)
neff = df["sum_w"]**2 / df["sum_ww"]
neff[mask] = 0.
scale = df["sum_w"]/neff
scale[mask] = 1.
down, up = poisson_interval(neff, scale=scale)
ax.errorbar(
bin_cents, df["sum_w"], yerr=[df["sum_w"]-down, up-df["sum_w"]],
**kwargs,
)
def poisson_interval_with_checks(x, variance):
down, up = poisson_interval(x**2/variance, scale=variance/x)
mask = (variance==0.)
down[mask] = 0.
up[mask] = np.inf
return down, up
def mc(
ax, df, label, bins, mcstat=False, mc_kw={}, mcstat_kw={}, proc_kw={},
zorder=0, interval_func=poisson_interval_with_checks
):
stacked = mc_kw.pop("stacked") if "stacked" in mc_kw else False
bin_edges, bin_cents = bin_lows_to_edges_cents(bins)
# preprocess mc
tdf = pd.pivot_table(
df, index=label, columns="parent",
values="sum_w", aggfunc=np.sum,
)
# sort by process total
tdf_procsum = tdf.sum(axis=0)
tdf = tdf[tdf_procsum.sort_values().index]
# mc
procs = tdf.columns.to_series()
cumsum = tdf.iloc[:,0].copy(deep=True)
cumsum.values[:] = 0.
for idx, proc in enumerate(tdf.columns):
if stacked:
prev_cumsum = cumsum.copy(deep=True)
cumsum += tdf[proc]
else:
cumsum = tdf[proc]
color = proc_kw.get("colours", {}).get(proc, "blue")
kwargs = {
"color": color, "ec": color,
"label": proc_kw.get("labels", {}).get(proc, proc),
}
kwargs.update(mc_kw)
kwargs["zorder"] = -idx
ax.hist(bin_cents, bins=bin_edges, weights=cumsum, **kwargs)
if mcstat:
tdf_ww_up = pd.pivot_table(
df, index=label, columns="parent",
values="sum_ww_up", aggfunc=np.sum,
)
_, up = interval_func(tdf.values[:,0], tdf_ww_up.values[:,0])
tdf_ww_down = pd.pivot_table(
df, index=label, columns="parent",
values="sum_ww_down", aggfunc=np.sum,
)
down, _ = interval_func(tdf.values[:,0], tdf_ww_down.values[:,0])
kwargs = dict(color='black', alpha=0.2)
kwargs.update(mcstat_kw)
ax.fill_between(
bin_edges, list(up)+[list(up)[-1]],
list(down)+[list(down)[-1]],
step='post', **kwargs
)
def data_mc(
ax, df_data, df_mc, label, bins,
sigs=[], blind=False, log=True, legend=True, ratio=True, sm_total=True,
mcstat_top=False, mcstat=True, add_ratios=True, show_zeros=False,
mc_kw={}, sig_kw={}, mcstat_kw={}, sm_kw={}, data_kw={}, proc_kw={},
legend_kw={}, cms_kw={}, interval_func=poisson_interval_with_checks,
):
_df_data = df_data.copy(deep=True)
_df_mc = df_mc.copy(deep=True)
if not show_zeros:
_df_data.loc[_df_data["sum_w"]==0.,"sum_w"] = np.nan
# only mc sum_ww can be asymmetric
if "sum_ww_up" not in _df_mc:
_df_mc["sum_ww_up"] = _df_mc["sum_ww"]
if "sum_ww_down" not in _df_mc:
_df_mc["sum_ww_down"] = _df_mc["sum_ww"]
# collect signals if set
sigs = sigs[::-1]
sig_mask = ~_df_mc.index.get_level_values("parent").isin(sigs)
df_sig = _df_mc.loc[~sig_mask].copy(deep=True)
df_mc_sm = _df_mc.loc[sig_mask].copy(deep=True)
# preprocessing
df_mc_sum = df_mc_sm.groupby(label).sum()
df_mc_sum.loc[:,"parent"] = "SMTotal"
df_mc_sum = df_mc_sum.groupby(["parent", label]).sum()
# draw
if log:
ax[0].set_yscale('log')
bin_edges, _ = bin_lows_to_edges_cents(bins)
ax[0].set_xlim(bin_edges.min(), bin_edges.max())
# signals - top panel
sig_kw_ = dict(histtype='step', zorder=1)
sig_kw_.update(sig_kw)
if len(sigs) > 0:
mc(
ax[0], df_sig, label, bins, mcstat=False, mc_kw=sig_kw_,
proc_kw=proc_kw, interval_func=interval_func,
)
# MC - top panel
mc_kw_ = dict(stacked=True)
mc_kw_.update(mc_kw)
mc(
ax[0], df_mc_sm, label, bins, mcstat=False,
mc_kw=mc_kw_, proc_kw=proc_kw, interval_func=interval_func,
)
# SM total - top panel
if sm_total:
mc_kw_ = dict(histtype='step')
mc_kw_.update(sm_kw)
mcstat_kw_ = dict(label="", color="black", alpha=0.2)
mcstat_kw_.update(mcstat_kw)
mc(
ax[0], df_mc_sum, label, bins, mcstat=mcstat_top, mc_kw=mc_kw_,
mcstat_kw=mcstat_kw_, proc_kw=proc_kw, interval_func=interval_func,
)
# Data - top panel
if not blind:
data(ax[0], _df_data, label, bins, data_kw=data_kw)
# CMS label - top panel
kwargs = dict(label="Preliminary", lumi=35.9, energy=13)
kwargs.update(cms_kw)
#cms_label(ax[0], **kwargs)
# SM total ratio - bottom panel
df_mc_sum_ratio = df_mc_sum.copy()
df_mc_sum_ratio.loc[:,"sum_w"] = 1.
df_mc_sum_ratio.loc[:,"sum_ww_up"] = (
df_mc_sum["sum_ww_up"]/df_mc_sum["sum_w"]**2
)
df_mc_sum_ratio.loc[:,"sum_ww_down"] = (
df_mc_sum["sum_ww_down"]/df_mc_sum["sum_w"]**2
)
if ratio:
mc_kw_ = dict(label="", histtype='step')
mc_kw_.update(sm_kw)
mcstat_kw_ = dict(label="MC stat. unc.", color="black", alpha=0.2)
mcstat_kw_.update(mcstat_kw)
mc(
ax[1], df_mc_sum_ratio, label, bins, mcstat=mcstat, mc_kw=mc_kw_,
mcstat_kw=mcstat_kw_, proc_kw=proc_kw, interval_func=interval_func,
)
# Data ratio - bottom panel
if not blind:
kwargs = dict(data_kw)
kwargs["label"] = ""
df_data_ratio = _df_data.copy()
df_data_ratio.loc[:,"sum_w"] = _df_data["sum_w"]/df_mc_sum["sum_w"].values
df_data_ratio.loc[:,"sum_ww"] = _df_data["sum_ww"]/df_mc_sum["sum_w"].values**2
data(ax[1], df_data_ratio, label, bins, data_kw=kwargs)
if legend:
offaxis = legend_kw.pop("offaxis", True)
kwargs = dict(labelspacing=0.05)
kwargs.update(legend_kw)
legend_data_mc(
ax, _df_data, _df_mc, label, add_ratios=add_ratios,
offaxis=offaxis, legend_kw=kwargs,
)
return ax
def heatmap(
data, row_labels, col_labels, ax, cbar_kw=dict(fraction=0.046, pad=0.04),
cbarlabel="", grid_kw={}, tick_kw={}, **kwargs,
):
if not ax:
ax = plt.gca()
im = ax.imshow(data, **kwargs)
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
ax.tick_params(**tick_kw)
# Rotate the tick labels and set their alignment.
plt.setp(
ax.get_xticklabels(), ha="right", #rotation=-30,
rotation_mode="anchor",
)
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
gkw = dict(which="minor", color="w", linestyle='-', linewidth=2)
gkw.update(grid_kw)
ax.grid(**gkw)
ax.tick_params(
which="minor", bottom=False, left=False, top=False, right=False,
)
ax.tick_params(
which="major", bottom=False, left=False, top=False, right=False,
)
return im, cbar
def annotate_heatmap(
im, data=None, valfmt="{x:.2f}", textcolors=["black", "white"],
cthreshold=lambda z: True, vthreshold=lambda z: True, **textkw,
):
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
kw = dict(ha="center", va="center")
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = mpl.ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[int(cthreshold(data[i, j]))])
if not vthreshold(data[i, j]):
continue
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
return texts
process_colours = {
"SMTotal": "black",
"MET": "black",
"SingleMuon": "black",
"SingleElectron": "black",
"ZJetsToNuNu": "#80b1d3",
"WJetsToLNu": "#b3de69",
"WJetsToENu": "#b2df8a",
"WJetsToMuNu": "#b3de69",
"WJetsToTauNu": "#8dd3c7",
"WJetsToTauLNu": "#8dd3c7",
"WJetsToTauHNu": "#8dd3c7",
"Diboson": "#fdb462",
"DYJetsToLL": "#ffed6f",
"DYJetsToEE": "#fff6b3",
"DYJetsToMuMu": "#ffed6f",
"DYJetsToTauTau": "#ffe41a",
"DYJetsToTauLTauL": "#ffe41a",
"DYJetsToTauHTauL": "#ffe41a",
"DYJetsToTauHTauH": "#ffe41a",
"EWKV2Jets": "#bebada",
"SingleTop": "#fccde5",
"TTJets": "#bc80bd",
"Top": "#bc80bd",
"QCD": "#fb8072",
"G1Jet": "#ccebc5",
"VGamma": "#ffffb3",
"Minor": "#d9d9d9",
"MinorBkgs": "#d9d9d9",
}
process_names = {
"SMTotal": "SM total",
"MET": "MET",
"SingleMuon": "Single Muon",
"SingleElectron": "Single Electron",
"ZJetsToNuNu": "$Z(\\rightarrow \\nu\\nu)+j$",
"WJetsToLNu": "$W(\\rightarrow l\\nu)+j$",
"WJetsToENu": "$W(\\rightarrow e\\nu)+j$",
"WJetsToMuNu": "$W(\\rightarrow \\mu\\nu)+j$",
"WJetsToTauNu": "$W(\\rightarrow \\tau\\nu)+j$",
"WJetsToTauLNu": "$W(\\rightarrow \\tau_{l}\\nu)+j$",
"WJetsToTauHNu": "$W(\\rightarrow \\tau_{h}\\nu)+j$",
"Diboson": "Diboson",
"DYJetsToLL": "$Z/\\gamma^{*}(\\rightarrow ll)+j$",
"DYJetsToEE": "$Z/\\gamma^{*}(\\rightarrow ee)+j$",
"DYJetsToMuMu": "$Z/\\gamma^{*}(\\rightarrow \\mu\\mu)+j$",
"DYJetsToTauTau": "$Z/\\gamma^{*}(\\rightarrow \\tau\\tau)+j$",
"DYJetsToTauLTauL": "$Z/\\gamma^{*}(\\rightarrow \\tau_{l}\\tau_{l})+j$",
"DYJetsToTauHTauL": "$Z/\\gamma^{*}(\\rightarrow \\tau_{l}\\tau_{h})+j$",
"DYJetsToTauHTauH": "$Z/\\gamma^{*}(\\rightarrow \\tau_{h}\\tau_{h})+j$",
"EWKV2Jets": "VBS",
"SingleTop": "Single Top",
"TTJets": "$t\\bar{t}+j$",
"QCD": "QCD multijet",
"G1Jet": "$\\gamma+j$",
"VGamma": "$V+\\gamma$",
"Minor": "Minor",
"MinorBkgs": "Minor",
}
nuisance_names = {
"d1kqcd": r'$\delta^{(1)}k_{\mathrm{QCD}}$',
"d2kqcd": r'$\delta^{(2)}k_{\mathrm{QCD}}$',
"d3kqcd": r'$\delta^{(3)}k_{\mathrm{QCD}}$',
"d1kew": r'$\delta^{(1)}k_{\mathrm{EW}}$',
"d2keww": r'$\delta^{(2)}k_{\mathrm{EW}}^{\mathrm{W}}$',
"d2kewz": r'$\delta^{(2)}k_{\mathrm{EW}}^{\mathrm{Z}}$',
"d3keww": r'$\delta^{(3)}k_{\mathrm{EW}}^{\mathrm{W}}$',
"d3kewz": r'$\delta^{(3)}k_{\mathrm{EW}}^{\mathrm{Z}}$',
"dkmix": r'$\delta k_{\mathrm{mix}}$',
"jesTotal": r'JES',
"jerSF": r'JER',
"unclust": r'Unclustered energy',
"lhePdfWeight": r'PDF',
"btagSF": r'$b$-tag veto',
"photonIdLoose": r'Photon id. veto',
"photonPixelSeedVeto": r'Photon pixel veto',
"tauIdTight": r'$\tau_h$-tag id. selection',
"tauIdVLoose": r'$\tau_h$-tag id. veto',
"muonIdLooseSyst": r'Muon id. veto (syst.)',
"muonIdLooseStat": r'Muon id. veto (stat.)',
"muonIsoLooseSyst": r'Muon iso. veto (syst.)',
"muonIsoLooseStat": r'Muon iso. veto (stat.)',
"muonIdTightSyst": r'Muon id. selection (syst.)',
"muonIdTightStat": r'Muon id. selection (stat.)',
"muonIsoTightSyst": r'Muon iso. selection (syst.)',
"muonIsoTightStat": r'Muon iso. selection (stat.)',
"eleIdIsoVeto": r'Electron id. veto',
"eleIdIsoTight": r'Electron id. selection',
"eleReco": r'Electron reconstruction',
"eleTrig": r'Electron trigger',
"prefiring": r'ECAL timing',
"pileup": r'Pileup',
"lumi": r'Luminosity',
"metTrig0MuSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($0\mu$)',
"metTrig1MuSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($1\mu$)',
"metTrig2MuSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($2\mu$)',
"metTrigReferenceTriggerSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger (ref.)',
"metTrigMonojetSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($\p_{\mathrm{T}}^{\mathrm{miss}}+\mathrm{jets}$)',
"metTrigSingleMuonSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($\mu+\mathrm{jets}$)',
"metTrigDoubleMuonSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($\mu\mu+\mathrm{jets}$)',
"metTrigSingleTauSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($\tau_h+\mathrm{jets}$)',
"metTrigSingleElectronSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($e+\mathrm{jets}$)',
"metTrigDoubleElectronSyst": r'$p_{\mathrm{T}}^{\mathrm{miss}}$ trigger ($ee+\mathrm{jets}$)',
}
def impacts(data, ax=None, converter=nuisance_names):
if ax is None:
fig, ax = plt.subplots(
figsize=(4,4), dpi=150,
ncols=2, nrows=1,
sharex=False, sharey=True,
gridspec_kw={"hspace": 0., "wspace": 0.},
)
ax[0].minorticks_off()
ax[1].minorticks_off()
ax[1].set_yticklabels([])
y = data["poi_paramdown"].values
x = np.linspace(0., len(y), len(y)+1)
ax[1].hist(
x[:-1], bins=x, weights=y,
color='#1f78b4', alpha=0.8,
orientation='horizontal',
label=r'$-1\sigma$',
)
y = data["poi_paramup"].values
ax[1].hist(
x[:-1], bins=x, weights=y,
color='#e31a1c', alpha=0.8,
orientation='horizontal',
label=r'$+1\sigma$',
)
xmax = np.max(np.abs(ax[1].get_xlim()))
ax[1].set_xlim(-1.1*xmax, 1.1*xmax)
ax[1].set_ylim(0, len(y))
ax[1].axvline(0, lw=1, color='gray', alpha=0.8)
y = data["param_value"].values
yerr = (
-1*data["param_merrdown"].values,
data["param_merrup"].values,
)
ax[0].errorbar(
y, (x[:-1]+x[1:])/2., xerr=yerr,
fmt='o', color='black',
ms=4, capsize=4,
)
xmax = data.eval("param_value+param_merrup").max()
xmax = max(xmax, data.eval("-(param_value+param_merrdown)").max())
xmax = int(xmax)+1
ax[0].set_xlim(-xmax, xmax)
for pos in range(xmax):
ax[0].axvline(pos, lw=1, | |
variance
weights = [data_parser.sample_regression_weight(xi, yi,
p=model.sample_weight_scaling_factor)
for xi, yi in zip(seqs, outputs)]
if norm_factor is not None:
weights = [w / norm_factor for w in weights]
return weights
all_true = []
all_predictions = []
for seqs, outputs in test_ds:
sample_weight = determine_sample_weights(seqs, outputs,
norm_factor=test_weight_mean)
sample_weight = tf.constant(sample_weight)
y_true, y_pred = tf_test_step(seqs, outputs,
sample_weight=sample_weight)
if model.regression:
test_r2_score_metric(y_true, y_pred)
test_pearson_corr_metric(y_true, y_pred)
test_spearman_corr_metric(y_true, y_pred)
all_true += list(tf.reshape(y_true, [-1]).numpy())
all_predictions += list(tf.reshape(y_pred, [-1]).numpy())
# Check the ordering: y_test should be the same as all_true
# (y_test is a 2d array: [[value], [value], ..., [value]] so it must
# be flattened prior to comparison)
assert np.allclose(y_test.flatten(), all_true)
# See note in train_and_validate() for discrepancy between loss and
# weighted metric values.
print('TEST DONE')
print(' Test metrics:')
print(' Loss: {}'.format(test_loss_metric.result()))
if model.regression:
print(' MSE: {}'.format(test_mse_metric.result()))
print(' Weighted MSE: {}'.format(test_mse_weighted_metric.result()))
print(' MAE: {}'.format(test_mae_metric.result()))
print(' MAPE: {}'.format(test_mape_metric.result()))
print(' R^2 score: {}'.format(test_r2_score_metric.result()))
print(' r-Pearson: {}'.format(test_pearson_corr_metric.result()))
print(' r-Spearman: {}'.format(test_spearman_corr_metric.result()))
else:
print(' BCE: {}'.format(test_bce_metric.result()))
print(' Weighted BCE: {}'.format(test_bce_weighted_metric.result()))
print(' Accuracy: {}'.format(test_accuracy_metric.result()))
print(' AUC-ROC: {}'.format(test_auc_roc_metric.result()))
print(' AUC-PR: {}'.format(test_auc_pr_metric.result()))
test_loss = test_loss_metric.result()
if model.regression:
test_mse = test_mse_metric.result()
test_pearson_corr = test_pearson_corr_metric.result()
test_spearman_corr = test_spearman_corr_metric.result()
else:
test_bce = test_bce_metric.result()
test_bce_weighted = test_bce_weighted_metric.result()
test_auc_roc = test_auc_roc_metric.result()
test_auc_pr = test_auc_pr_metric.result()
test_loss_metric.reset_states()
test_mse_metric.reset_states()
test_mse_weighted_metric.reset_states()
test_mae_metric.reset_states()
test_mape_metric.reset_states()
test_r2_score_metric.reset_states()
test_pearson_corr_metric.reset_states()
test_spearman_corr_metric.reset_states()
test_bce_metric.reset_states()
test_bce_weighted_metric.reset_states()
test_accuracy_metric.reset_states()
test_auc_roc_metric.reset_states()
test_auc_pr_metric.reset_states()
if model.regression and y_train is not None:
# Print what the MSE would be if only predicting the mean of
# the training data
print(' MSE on test data if predicting mean of train data:',
np.mean(np.square(np.mean(y_train) - np.array(all_true))))
x_test_pos = [data_parser.pos_for_input(xi) for xi in x_test]
if write_test_tsv:
# Determine features for all input sequences
seq_feats = []
for i in range(len(x_test)):
seq_feats += [data_parser.seq_features_from_encoding(x_test[i])]
cols = ['target', 'target_without_context', 'guide',
'hamming_dist', 'cas13a_pfs', 'crrna_pos', 'true_activity',
'predicted_activity']
with gzip.open(write_test_tsv, 'wt') as fw:
def write_row(row):
fw.write('\t'.join(str(x) for x in row) + '\n')
# Write header
write_row(cols)
# Write row for each data point
for i in range(len(x_test)):
def val(k):
if k == 'true_activity':
return all_true[i]
elif k == 'predicted_activity':
return all_predictions[i]
elif k == 'crrna_pos':
if x_test_pos is None:
# Use -1 if position is unknown
return -1
else:
# x_test_pos[i] gives position of x_test[i]
return x_test_pos[i]
else:
return seq_feats[i][k]
write_row([val(k) for k in cols])
if plot_roc_curve:
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
fpr, tpr, thresholds = roc_curve(all_true, all_predictions)
plt.figure(1)
plt.plot(fpr, tpr)
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.show()
plt.savefig(plot_roc_curve)
if plot_predictions:
import matplotlib.pyplot as plt
plt.figure(1)
plt.scatter(all_true, all_predictions, c=x_test_pos)
plt.xlabel('True value')
plt.ylabel('Predicted value')
plt.title('True vs. predicted values')
plt.show()
plt.savefig(plot_predictions)
if model.regression:
test_metrics = {'loss': test_loss.numpy(), 'mse': test_mse.numpy(),
'r-pearson': test_pearson_corr,
'r-spearman': test_spearman_corr}
else:
test_metrics = {'loss': test_loss.numpy(), 'bce': test_bce.numpy(),
'weighted-bce': test_bce_weighted.numpy(),
'auc-pr': test_auc_pr.numpy(),
'auc-roc': test_auc_roc.numpy()}
return test_metrics
#####################################################################
#####################################################################
#####################################################################
#####################################################################
#####################################################################
#####################################################################
# Functions to train and test using Keras
#
# Compared to the custom functions above, this provides less
# flexibility but makes it simpler and possible to train across
# multiple GPUs.
#####################################################################
#####################################################################
def train_with_keras(model, x_train, y_train, x_validate, y_validate,
max_num_epochs=50):
"""Fit a model using Keras.
The model must have already been compiled (e.g., with construct_model()
above).
Args:
model: compiled model, e.g., output by construct_model()
x_train/y_train: training data
x_validate/y_validate: validation data; also used for early stopping
max_num_epochs: maximum number of epochs to train for; note that
the number it is trained for should be less due to early stopping
"""
# Setup early stopping
# The validation data is only used for early stopping
es = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
mode='min', patience=10, restore_best_weights=True)
# Fit the model
model.fit(x_train, y_train, validation_data=(x_validate, y_validate),
batch_size=model.batch_size, callbacks=[es],
class_weight=model.class_weight,
epochs=max_num_epochs,
verbose=2)
def test_with_keras(model, x_test, y_test, data_parser, write_test_tsv=None,
callback=None, regression=False):
"""Test a model.
This prints metrics.
Args:
model: model object
x_test, y_test: testing input and outputs (labels, if
classification)
data_parser: data parser object from parse_data
write_test_tsv: if set, path to TSV at which to write data on predictions
as well as the testing sequences (one row per data point)
callback: if set, a function to call that accepts the true and
predicted test values -- called like callback(y_true, f_pred)
regression: True iff this is testing a model for regression;
this is only used if model.regression is not available
Returns:
dict with test metrics at the end (keys are 'loss'
and ('bce' or 'mse') and ('auc-roc' or 'r-spearman'))
"""
# model may not have batch_size if it is loaded from a SavedModel
# serialization
# But the batch_size should not matter for testing, so just use 32
if hasattr(model, 'batch_size'):
batch_size = model.batch_size
else:
batch_size = 32
# Likewise, model may not have regression attribute if it is loaded
# from a SavedModel serialization
# Override the argument given to this function if it does; otherwise
# just use the argument
if hasattr(model, 'regression'):
regression = model.regression
# Evaluate on test data
test_metrics = model.evaluate(x_test, y_test,
batch_size=batch_size)
# Turn test_metrics from list into dict
test_metrics = dict(zip(model.metrics_names, test_metrics))
y_true = y_test
y_pred = model.predict(x_test, batch_size=batch_size)
if write_test_tsv:
x_test_pos = [data_parser.pos_for_input(xi) for xi in x_test]
# Determine features for all input sequences
seq_feats = []
for i in range(len(x_test)):
seq_feats += [data_parser.seq_features_from_encoding(x_test[i])]
cols = ['target', 'target_without_context', 'guide',
'hamming_dist', 'cas13a_pfs', 'crrna_pos', 'true_activity',
'predicted_activity']
with gzip.open(write_test_tsv, 'wt') as fw:
def write_row(row):
fw.write('\t'.join(str(x) for x in row) + '\n')
# Write header
write_row(cols)
# Write row for each data point
for i in range(len(x_test)):
def val(k):
if k == 'true_activity':
yt = y_true[i]
assert len(yt) == 1
return yt[0]
elif k == 'predicted_activity':
yp = y_pred[i]
assert len(yp) == 1
return yp[0]
elif k == 'crrna_pos':
if x_test_pos is None:
# Use -1 if position is unknown
return -1
else:
# x_test_pos[i] gives position of x_test[i]
return x_test_pos[i]
else:
return seq_feats[i][k]
write_row([val(k) for k in cols])
if regression:
mse_metric = tf.keras.metrics.MeanSquaredError()
mse_metric(y_true, y_pred)
mse = mse_metric.result().numpy()
pearson_corr_metric = Correlation('pearson_corr')
pearson_corr_metric(y_true, y_pred)
pearson_corr = pearson_corr_metric.result()
spearman_corr_metric = Correlation('spearman_corr')
spearman_corr_metric(y_true, y_pred)
spearman_corr = spearman_corr_metric.result()
test_metrics = {'loss': test_metrics['loss'],
'mse': mse,
'r-pearson': pearson_corr,
'r-spearman': spearman_corr}
else:
bce_metric = tf.keras.metrics.BinaryCrossentropy()
bce_metric(y_true, y_pred)
bce = bce_metric.result().numpy()
auc_pr_metric = tf.keras.metrics.AUC(num_thresholds=500, curve='PR')
auc_pr_metric(y_true, y_pred)
auc_pr = auc_pr_metric.result().numpy()
auc_roc_metric = tf.keras.metrics.AUC(num_thresholds=500, curve='ROC')
auc_roc_metric(y_true, y_pred)
auc_roc = auc_roc_metric.result().numpy()
test_metrics = {'loss': test_metrics['loss'],
'bce': bce,
'auc-pr': auc_pr,
'auc-roc': auc_roc}
print('TEST METRICS:', test_metrics)
if callback is not None:
callback(y_true, y_pred)
return test_metrics
#####################################################################
#####################################################################
#####################################################################
#####################################################################
def main():
# Read arguments and data
args = parse_args()
if args.load_model:
# Read saved parameters and load them into the args namespace
print('Loading parameters for model..')
load_path_params = os.path.join(args.load_model,
'model.params.pkl')
with open(load_path_params, 'rb') as f:
saved_params = pickle.load(f)
params = vars(args)
for k, v in saved_params.items():
print("Setting argument '{}'={}".format(k, v))
params[k] = v
if args.test_split_frac:
train_and_validate_split_frac = 1.0 - args.test_split_frac
if not (args.load_model or args.load_model_as_tf_savedmodel):
# Since this will be training a model, reserve validation
# data (25%) for early stopping
validate_frac = 0.2*train_and_validate_split_frac
train_frac = train_and_validate_split_frac - validate_frac
else:
train_frac = train_and_validate_split_frac
validate_frac = 0.0
split_frac = (train_frac, validate_frac, args.test_split_frac)
else:
split_frac = None
# Set seed and read data
set_seed(args.seed)
data_parser = read_data(args, split_frac=split_frac)
x_train, y_train = data_parser.train_set()
x_validate, y_validate = data_parser.validate_set()
x_test, y_test = data_parser.test_set()
# Determine, based on the dataset, whether to do regression or
# classification
if args.dataset == 'cas13':
if args.cas13_classify:
regression = False
else:
regression = True
if regression and args.plot_roc_curve:
raise Exception(("Can only use --plot-roc-curve when doing "
"classification"))
if not regression and args.plot_predictions:
raise Exception(("Can only use --plot-predictions when doing "
"regression"))
if args.load_model and args.load_model_as_tf_savedmodel:
raise Exception(("Cannot set both --load-model and "
"--load-model-as-tf-savedmodel"))
if args.load_model:
# Load the model weights; the model architecture is specified
# by params
print('Loading model weights..')
model = load_model(args.load_model, params, x_train, y_train)
print('Done loading model.')
elif args.load_model_as_tf_savedmodel:
# Load a model saved with TensorFlow's SavedModel format
# This contains both model architecture and weights
model = tf.keras.models.load_model(
args.load_model_as_tf_savedmodel)
else:
# Construct model
params = vars(args)
model = construct_model(params, x_train.shape, regression,
compile_for_keras=True, y_train=y_train)
# Train the model, with validation
train_with_keras(model, x_train, y_train, x_validate, y_validate)
if args.filter_test_data_by_classification_score:
if not regression:
raise Exception(("Can only use --filter-test-data-by-classification-"
"score when testing regression"))
classification_test_tsv, score_threshold = args.filter_test_data_by_classification_score
score_threshold = | |
# Copyright (C) 2019 <NAME>.L.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import datetime
import enum
import itertools
import re
import textwrap
from dateutils import DateUtils
from phpreport import PHPReport
from phpreport import TaskFilter
class PeriodOfWork():
def __init__(self, start_date, num_days, task_filter=TaskFilter(), tasks=None):
self.start_date = start_date
self.num_days = num_days
self.users = set()
self.tasks = tasks
self.task_filter = task_filter.create_same_filter_with_different_dates(
self.start_date,
DateUtils.from_date_offset(self.start_date, num_days - 1))
def get_users(self):
return self.users
def set_tasks(self, tasks):
self.tasks = []
for task in tasks:
self.add_task(task)
def add_task(self, task):
self.tasks.append(task)
self.users.add(task.user)
def add_task_if_starts_in_period(self, task):
ending_date = DateUtils.from_date_offset(self.start_date, self.num_days)
if task.date < self.start_date:
return False
if task.date >= ending_date:
return False
self.add_task(task)
return True
def filter_tasks(self, date=None, day_offset=None,
user=None, only_onsite=False):
if date is None and day_offset is not None:
date = DateUtils.from_date_offset(self.start_date, day_offset)
def filter_task(task):
if user is not None and task.user != user:
return False
if date is not None and not DateUtils.same_date(task.date, date):
return False
if only_onsite and not task.onsite:
return False
return True
return list(filter(filter_task, self.tasks))
def get_all_dates(self):
return [DateUtils.from_date_offset(self.start_date, offset)
for offset in range(0, self.num_days)]
def time_worked(self, date=None, day_offset=None,
user=None, only_onsite=False):
return sum([task.length() for task in
self.filter_tasks(date, day_offset, user, only_onsite)],
datetime.timedelta())
@staticmethod
def fetch_tasks_for_all(periods):
filters = [period.task_filter for period in periods]
tasks = PHPReport.get_tasks_for_task_filters(filters)
for pair in zip(periods, tasks):
pair[0].set_tasks(pair[1])
class WeekOfWork(PeriodOfWork):
def __init__(self, year, week, task_filter=TaskFilter(), tasks=None):
self.week = week
self.year = year
date = DateUtils.from_week_number(year, week)
super(WeekOfWork, self).__init__(date, 7, task_filter, tasks)
def __str__(self):
return "Week %i of %i" % (self.week, self.year)
def short_string(self):
return "Week {}/{} ".format(self.week, self.year)
def wiki_string(self):
return "Week%i-%i" % (self.week, self.year)
@classmethod
def create_array_of_weeks_between_dates(cls, start, end, task_filter):
week_dates = DateUtils.get_weeks_in_date_range(start, end)
weeks = [cls(*DateUtils.year_and_week_number(week_date),
task_filter=task_filter, tasks=[]) for week_date in week_dates]
return weeks
@classmethod
def create_from_string(cls, string, task_filter):
dates = DateUtils.date_range_from_string(string)
weeks = cls.create_array_of_weeks_between_dates(dates[0], dates[1],
task_filter)
cls.fetch_tasks_for_all(weeks)
return weeks
@classmethod
def create_for_entire_project(cls, task_filter):
assert task_filter.project
tasks = PHPReport.get_tasks_for_task_filters([task_filter])
tasks = [item for sublist in tasks for item in sublist]
if not tasks:
return []
first_date = last_date = tasks[0].date
for task in tasks[1:]:
if task.date < first_date:
first_date = task.date
if task.date > last_date:
last_date = task.date
weeks = cls.create_array_of_weeks_between_dates(first_date, last_date,
task_filter)
def add_task_to_weeks(task):
for week in weeks:
if week.add_task_if_starts_in_period(task):
return
raise Exception("Could not assign task to week.")
for task in tasks:
add_task_to_weeks(task)
return weeks
class AggregateReport():
def __init__(self, time_periods, formatter, header, wiki_string):
self.header = header
self.wiki_string = wiki_string
self.time_periods = time_periods
self.formatter = formatter
self.parent = None
@staticmethod
def generate_report_for_period(period, table_contents):
amount = period.time_worked()
amount_onsite = period.time_worked(only_onsite=True)
hours_worked_string = DateUtils.format_delta_as_hours(amount)
if amount_onsite:
hours_worked_string += " (%s onsite)" % \
DateUtils.format_delta_as_hours(amount_onsite)
table_contents.append([period.short_string(), hours_worked_string])
return (amount, amount_onsite)
def generate_report(self):
self.formatter.generate_header(self.header)
table_contents = []
total = datetime.timedelta()
total_onsite = datetime.timedelta()
for period in self.time_periods:
(time, time_onsite) = AggregateReport.generate_report_for_period(period, table_contents)
total += time
total_onsite += time_onsite
self.formatter.generate_table(table_contents, has_headers=False)
self.formatter.generate_header(
"Total hours worked: %s" % DateUtils.format_delta_as_hours(total))
self.formatter.generate_header(
"Total onsite hours worked: %s" % DateUtils.format_delta_as_hours(total_onsite))
return self.formatter.flatten()
class DetailedReport():
def __init__(self, time_period, parent, formatter, include_story=True):
if parent:
header = "{0} for {1}".format(time_period, parent.header)
wiki_string = "{0}-{1}".format(parent.wiki_string, time_period.wiki_string())
else:
header = "{0} for {1}".format(time_period, str(time_period.task_filter))
wiki_string = time_period.wiki_string()
self.header = header
self.wiki_string = wiki_string
self.time_period = time_period
self.formatter = formatter
self.parent = parent
self.include_story = include_story
self.pieces = []
@staticmethod
def format_date(date):
return date.strftime("%d %b")
def time_worked(self, user=None, total=False):
if total:
return [DateUtils.format_delta_as_hours(self.time_period.time_worked(user=user))]
all_dates = self.time_period.get_all_dates()
return [DateUtils.format_delta_as_hours(self.time_period.time_worked(date=x, user=user)) for x in all_dates]
def generate_hours(self):
table = []
table.append([""] + list(map(DetailedReport.format_date, self.time_period.get_all_dates())) + ["Total"])
for user in sorted(self.time_period.get_users()):
table.append([user.login] +
self.time_worked(user=user) +
self.time_worked(user=user, total=True))
table.append(["everyone"] +
self.time_worked() +
self.time_worked(total=True))
self.formatter.generate_table(table)
onsite_time = self.time_period.time_worked(only_onsite=True)
if onsite_time > datetime.timedelta(0):
self.formatter.generate_large_text("Onsite hours worked: %s" % DateUtils.format_delta_as_hours(onsite_time))
def get_stories_for_day_and_user(self, user, date):
tasks_for_day = self.time_period.filter_tasks(date=date, user=user)
def get_story(task):
story = ""
if self.include_story:
story += self.formatter.format_story(task.story)
if story:
story += " "
return story
all_stories = [get_story(task) + task.text for task in tasks_for_day]
# Many times people add a lot of duplicate descriptions. Just output one of each.
all_stories = set(all_stories)
# Strip out duplicated whitespace
return re.compile(r'\s+').sub(' ', " ".join(all_stories)).strip()
def generate_stories_for_user(self, user):
self.formatter.generate_section_header("Stories for %s" % user.login)
all_dates = self.time_period.get_all_dates()
contents = [(date.strftime("%A"), self.get_stories_for_day_and_user(user, date)) for date in all_dates]
self.formatter.generate_aligned_list(contents)
def generate_report(self):
self.pieces = []
self.formatter.generate_header(self.header)
self.generate_hours()
for user in sorted(self.time_period.users):
self.generate_stories_for_user(user)
return self.formatter.flatten()
class TextFormatter():
def __init__(self):
self.pieces = []
def generate_table_row(self, columns, lengths, header=False):
format_string = ""
for length in lengths:
format_string += "%%-%i.%is " % (length, length)
self.pieces.append(format_string % tuple(columns))
self.pieces.append("\n")
@staticmethod
def generate_column_length_list(table):
lengths = [list(map(len, x)) for x in table] # Generate a table of lengths.
# Turn the table of lengths into a row of max lengths for each column.
return list(map(max, list(zip(*lengths))))
def generate_table(self, table, has_headers=True):
if not table:
return
lengths = TextFormatter.generate_column_length_list(table)
self.generate_table_row(table[0], lengths, header=has_headers)
for row in table[1:]:
self.generate_table_row(row, lengths)
def generate_aligned_list(self, contents):
first_column_size = max([len(content[0]) for content in contents])
format_string = "%%%i.%is: %%s\n" % (first_column_size, first_column_size)
indent = (first_column_size + 2) * ' ' # Enough to account for the day name offset.
width = 80 - len(indent)
for content in contents:
second_column = textwrap.fill(content[1],
break_long_words=False, # Don't break URLs.
width=width,
initial_indent=indent,
subsequent_indent=indent).strip()
self.pieces.append(format_string % (content[0], second_column))
def generate_header(self, header):
self.pieces.append("\n%s\n" % header)
def generate_section_header(self, header):
self.pieces.append("\n%s\n" % header)
def generate_large_text(self, text):
self.pieces.append("%s\n" % text)
@classmethod
def format_story(cls, story):
if story:
return "[{}]".format(story)
return ""
def flatten(self):
return "".join(self.pieces)
class TwikiFormatter(TextFormatter):
def generate_table_row(self, columns, lengths=None, header=False, highlight_first=True):
first = "| *%s* "
if not highlight_first:
first = "| %s"
if header:
format_string = first + (len(columns) - 2) * " | *%s*" + " | *%s* |"
else:
format_string = first + (len(columns) - 2) * " | %s" + " | %s |"
self.pieces.append(format_string % tuple(columns))
self.pieces.append("\n")
def generate_table(self, table, has_headers=True):
if len(table) < 10 or has_headers:
return super(TwikiFormatter, self).generate_table(table, has_headers)
def chunks_of_n(list_to_chunk, num_chunks):
for i in range(0, len(list_to_chunk), num_chunks):
yield list_to_chunk[i:i + num_chunks]
def transpose_table(table):
return list(map(list, itertools.zip_longest(*table, fillvalue=['', ''])))
table = transpose_table(chunks_of_n(table, 10))
for row in table:
row = sum(row, [])
self.generate_table_row(row, highlight_first=False)
def generate_header(self, header):
self.pieces.append("\n---++%s\n" % header)
def generate_section_header(self, header):
self.pieces.append("\n---++++%s\n" % header)
def generate_aligned_list(self, contents):
for content in contents:
self.pieces.append(" * *%s* - %s\n" % (content[0], content[1]))
class MarkdownFormatter(TextFormatter):
def generate_table(self, table, has_headers=True):
return ""
def generate_header(self, header):
self.pieces.append("\n# %s\n" % header)
def generate_section_header(self, header):
self.pieces.append("\n## %s\n" % header)
def generate_aligned_list(self, contents):
self.pieces.append("\n")
for content in contents:
self.pieces.append(" * **%s** %s\n" % (content[0], content[1]))
def format_story(self, story):
if story:
return "*{}*".format(story)
return ""
class ReportCreator():
class Mode(enum.Enum):
PROJECT, AGGREGATE, DETAIL = range(3)
def __init__(self, args):
self.args = args
self.task_filter = args.to_task_filter()
if not args.time:
self.time_periods = WeekOfWork.create_for_entire_project(self.task_filter)
self.mode = ReportCreator.Mode.PROJECT
elif args.time:
self.time_periods = WeekOfWork.create_from_string(args.time, self.task_filter)
if len(self.time_periods) > 1:
self.mode = ReportCreator.Mode.AGGREGATE
else:
self.mode = ReportCreator.Mode.DETAIL
self.parent_report = None
self.reports = []
def formatter(self):
if self.args.formatter == "twiki":
return TwikiFormatter()
if self.args.formatter == "markdown":
return MarkdownFormatter()
return TextFormatter()
def create_parent_report(self):
if self.mode == ReportCreator.Mode.PROJECT:
project = self.task_filter.project
return AggregateReport(self.time_periods, self.formatter(),
project.description,
re.sub(r'[^a-zA-Z0-9]', '', project.description) + "Report")
if self.mode == ReportCreator.Mode.AGGREGATE:
return | |
<gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from libtbx import easy_run
import iotbx.cif
from libtbx.test_utils import approx_equal, show_diff
from six.moves import cStringIO as StringIO
import iotbx.mtz
import os
sf_5r82 = """\
data_r5r82sf
#
_audit.revision_id 1_0
_audit.creation_date 2020-03-11
_audit.update_record "Initial release"
#
_cell.entry_id 5r82
_cell.length_a 112.665
_cell.length_b 52.848
_cell.length_c 44.468
_cell.angle_alpha 90.000
_cell.angle_beta 102.966
_cell.angle_gamma 90.000
#
_diffrn.id 1
_diffrn.details "data from final refinement with ligand, final.mtz"
_diffrn.crystal_id 1
#
_diffrn_radiation_wavelength.id 1
_diffrn_radiation_wavelength.wavelength 0.9126
#
_entry.id 5r82
#
_exptl_crystal.id 1
#
_reflns_scale.group_code 1
#
_symmetry.entry_id 5r82
_symmetry.space_group_name_H-M "C 1 2 1"
_symmetry.Int_Tables_number 5
#
loop_
_refln.crystal_id
_refln.wavelength_id
_refln.scale_group_code
_refln.index_h
_refln.index_k
_refln.index_l
_refln.status
_refln.F_meas_au
_refln.F_meas_sigma_au
_refln.F_calc_au
_refln.phase_calc
_refln.pdbx_FWT
_refln.pdbx_PHWT
_refln.pdbx_DELFWT
_refln.pdbx_DELPHWT
_refln.fom
1 1 1 -86 0 8 o ? ? 14.57 0.00 8.89 0.00 0.00 0.00 0.00
1 1 1 -85 1 3 o ? ? 74.69 159.82 45.89 159.75 0.00 0.00 0.00
1 1 1 -85 1 4 o ? ? 18.89 136.18 11.62 136.53 0.00 0.00 0.00
1 1 1 -85 1 5 o ? ? 40.54 44.81 24.89 44.97 0.00 0.00 0.00
1 1 1 -85 1 6 o ? ? 14.55 265.49 8.91 267.15 0.00 0.00 0.00
1 1 1 -85 1 7 o ? ? 38.21 15.80 23.59 15.42 0.00 0.00 0.00
1 1 1 -85 1 8 o ? ? 47.35 181.29 29.24 181.19 0.00 0.00 0.00
1 1 1 -85 1 9 o 56.21 26.85 38.50 89.75 41.26 89.77 17.50 89.77 0.58
1 1 1 -85 1 10 o 65.09 31.91 27.40 104.11 36.22 104.55 19.20 104.55 0.41
1 1 1 -85 1 11 o 54.28 26.34 24.33 27.77 29.23 27.86 14.13 27.86 0.41
1 1 1 -85 1 12 o 58.89 28.38 21.26 158.09 27.96 158.29 14.93 158.29 0.35
1 1 1 -85 3 4 o ? ? 16.70 93.70 10.28 93.61 0.00 0.00 0.00
1 1 1 -85 3 5 o ? ? 18.82 319.51 11.62 319.61 0.00 0.00 0.00
1 1 1 -85 3 6 o ? ? 9.64 143.57 6.02 143.97 0.00 0.00 0.00
1 1 1 -85 3 7 o ? ? 34.35 260.46 21.18 260.07 0.00 0.00 0.00
1 1 1 -85 3 8 o ? ? 30.57 8.43 18.77 8.70 0.00 0.00 0.00
1 1 1 -85 3 9 o 66.81 31.32 64.85 81.01 57.84 80.86 17.89 80.86 0.73
1 1 1 -85 3 10 o 59.74 28.33 64.04 132.52 49.17 132.36 9.69 132.36 0.74
1 1 1 -85 3 11 o 50.02 24.64 5.34 93.51 6.70 95.13 3.34 95.13 0.10
1 1 1 -85 5 5 o ? ? 29.69 339.80 18.22 340.06 0.00 0.00 0.00
1 1 1 -85 5 6 o ? ? 52.33 264.97 32.14 265.05 0.00 0.00 0.00
1 1 1 -85 5 7 o ? ? 63.54 232.41 39.02 232.26 0.00 0.00 0.00
1 1 1 -85 5 8 o ? ? 25.64 55.65 15.80 55.84 0.00 0.00 0.00
1 1 1 -85 5 9 o 44.64 22.72 27.71 201.58 23.51 202.12 6.57 202.12 0.45
1 1 1 -85 5 10 o ? ? 48.38 118.56 29.58 118.56 0.00 0.00 0.00
1 1 1 -84 0 1 o ? ? 3.94 180.00 2.34 180.00 0.00 0.00 0.00
1 1 1 -84 0 2 o ? ? 9.64 180.00 5.88 180.00 0.00 0.00 0.00
1 1 1 -84 0 3 o ? ? 57.42 180.00 35.50 180.00 0.00 0.00 0.00
1 1 1 -84 0 4 o ? ? 5.23 0.00 3.15 0.00 0.00 0.00 0.00
1 1 1 -84 0 5 o ? ? 3.86 180.00 2.39 180.00 0.00 0.00 0.00
1 1 1 -84 0 6 o ? ? 21.93 0.00 13.65 0.00 0.00 0.00 0.00
1 1 1 -84 0 7 o ? ? 7.45 0.00 4.61 0.00 0.00 0.00 0.00
1 1 1 -84 0 8 o 73.12 45.70 7.69 0.00 15.82 0.00 11.03 0.00 0.14
#
data_r5r82Asf
#
_cell.entry_id 5r82
_cell.length_a 112.665
_cell.length_b 52.848
_cell.length_c 44.468
_cell.angle_alpha 90.000
_cell.angle_beta 102.966
_cell.angle_gamma 90.000
#
_diffrn.id 1
_diffrn.details "data from original reflections, data.mtz"
_diffrn.crystal_id 1
#
_diffrn_radiation_wavelength.id 1
_diffrn_radiation_wavelength.wavelength 0.9126
#
_entry.id 5r82
#
_exptl_crystal.id 1
#
_reflns_scale.group_code 1
#
_symmetry.entry_id 5r82
_symmetry.space_group_name_H-M "C 1 2 1"
_symmetry.Int_Tables_number 5
#
loop_
_refln.crystal_id
_refln.wavelength_id
_refln.scale_group_code
_refln.index_h
_refln.index_k
_refln.index_l
_refln.status
_refln.F_meas_au
_refln.F_meas_sigma_au
1 1 1 -86 0 7 o ? ?
1 1 1 -86 0 8 o ? ?
1 1 1 -85 1 3 o ? ?
1 1 1 -85 1 4 o ? ?
1 1 1 -85 1 5 o ? ?
1 1 1 -85 1 6 o ? ?
1 1 1 -85 1 7 o ? ?
1 1 1 -85 1 8 o ? ?
1 1 1 -85 1 9 o 1.28 0.61
1 1 1 -85 1 10 o 1.47 0.72
1 1 1 -85 1 11 o 1.21 0.59
1 1 1 -85 1 12 o 1.30 0.63
1 1 1 -85 3 4 o ? ?
1 1 1 -85 3 5 o ? ?
1 1 1 -85 3 6 o ? ?
1 1 1 -85 3 7 o ? ?
1 1 1 -85 3 8 o ? ?
1 1 1 -85 3 9 o 1.53 0.72
1 1 1 -85 3 10 o 1.35 0.64
1 1 1 -85 3 11 o 1.12 0.55
1 1 1 -85 3 12 o ? ?
1 1 1 -85 5 5 o ? ?
1 1 1 -85 5 6 o ? ?
1 1 1 -85 5 7 o ? ?
1 1 1 -85 5 8 o ? ?
1 1 1 -85 5 9 o 1.02 0.52
1 1 1 -85 5 10 o ? ?
1 1 1 -84 0 1 o ? ?
1 1 1 -84 0 2 o ? ?
1 1 1 -84 0 3 o ? ?
1 1 1 -84 0 4 o ? ?
1 1 1 -84 0 5 o ? ?
1 1 1 -84 0 6 o ? ?
1 1 1 -84 0 7 o ? ?
1 1 1 -84 0 8 o 1.71 1.07
#
data_r5r82Bsf
#
_cell.entry_id 5r82
_cell.length_a 112.665
_cell.length_b 52.848
_cell.length_c 44.468
_cell.angle_alpha 90.000
_cell.angle_beta 102.970
_cell.angle_gamma 90.000
#
_diffrn.id 1
_diffrn.details "data for ligand evidence map (PanDDA event map), event_map_1.mtz"
_diffrn.crystal_id 1
#
_diffrn_radiation_wavelength.id 1
_diffrn_radiation_wavelength.wavelength 0.9126
#
_entry.id 5r82
#
_exptl_crystal.id 1
#
_reflns_scale.group_code 1
#
_symmetry.entry_id 5r82
_symmetry.space_group_name_H-M "P 1"
_symmetry.Int_Tables_number 1
#
loop_
_refln.crystal_id
_refln.wavelength_id
_refln.scale_group_code
_refln.index_h
_refln.index_k
_refln.index_l
_refln.status
_refln.F_meas_au
_refln.F_meas_sigma_au
_refln.pdbx_PHWT
1 1 1 -85 -6 7 o 183.35 1.00 42.48
1 1 1 -85 -6 8 o 204.28 1.00 118.73
1 1 1 -85 -5 5 o 408.51 1.00 26.47
1 1 1 -85 -5 6 o 368.34 1.00 -17.28
1 1 1 -85 -5 7 o 105.28 1.00 -108.00
1 1 1 -85 -5 8 o 447.04 1.00 49.71
1 1 1 -85 -5 9 o 151.27 1.00 76.05
1 1 1 -85 -5 10 o 177.73 1.00 -11.88
1 1 1 -85 -4 4 o 176.60 1.00 137.96
1 1 1 -85 -4 5 o 94.48 1.00 45.34
1 1 1 -85 -4 6 o 269.47 1.00 3.56
1 1 1 -85 -4 7 o 146.26 1.00 103.92
1 1 1 -85 -4 8 o 251.33 1.00 152.35
1 1 1 -85 -4 9 o 189.51 1.00 -158.44
1 1 1 -85 -4 10 o 340.08 1.00 50.25
1 1 1 -85 -4 11 o 85.03 1.00 42.71
1 1 1 -85 -3 4 o 458.44 1.00 -148.81
1 1 1 -85 -3 5 o 928.82 1.00 25.99
1 1 1 -85 -3 6 o 266.68 1.00 -145.70
1 1 1 -85 -3 7 o 524.04 1.00 20.59
1 1 1 -85 -3 8 o 824.38 1.00 126.20
1 1 1 -85 -3 9 o 334.69 1.00 -9.45
1 1 1 -85 -3 10 o 367.92 1.00 -34.78
1 1 1 -85 -3 11 o 622.58 1.00 74.86
1 1 1 -85 -2 3 o 197.98 1.00 -127.10
1 1 1 -85 -2 4 o 140.13 1.00 -111.56
#
#END OF REFLECTIONS
"""
sf_5r82_mtz_results = {'r5r82sf': """\
Title: phenix.cif_as_mtz
Space group symbol from file: C2
Space group number from file: 5
Space group from matrices: C 1 2 1 (No. 5)
Point group symbol from file: 2
Number of crystals: 2
Number of Miller indices: 33
Resolution range: 1.34113 1.30997
History:
Crystal 1:
Name: HKL_base
Project: HKL_base
Id: 0
Unit cell: (112.665, 52.848, 44.468, 90, 102.966, 90)
Number of datasets: 1
Dataset 1:
Name: HKL_base
Id: 0
Wavelength: 0
Number of columns: 0
Crystal 2:
Name: crystal_0
Project: project_0
Id: 2
Unit cell: (112.665, 52.848, 44.468, 90, 102.966, 90)
Number of datasets: 1
Dataset 1:
Name: dataset
Id: 1
Wavelength: 0.9126
Number of columns: 13
label #valid %valid min max type
H 33 100.00% -86.00 -84.00 H: index h,k,l
K 33 100.00% 0.00 5.00 H: index h,k,l
L 33 100.00% 1.00 12.00 H: index h,k,l
R-free-flags 33 100.00% 1.00 1.00 I: integer
FOBS 9 27.27% 44.64 73.12 F: amplitude
SIGFOBS 9 27.27% 22.72 45.70 Q: standard deviation
FC 33 100.00% 3.86 74.69 F: amplitude
| |
None,
'CUNEIFORM SIGN EN OPPOSING EN': None,
'CUNEIFORM SIGN EN SQUARED': None,
'CUNEIFORM SIGN EN TIMES GAN2': None,
'CUNEIFORM SIGN EN TIMES GAN2 TENU': None,
'CUNEIFORM SIGN EN TIMES ME': None,
'CUNEIFORM SIGN EREN': None,
'CUNEIFORM SIGN ERIN2': None,
'CUNEIFORM SIGN ESH2': None,
'CUNEIFORM SIGN EZEN': None,
'CUNEIFORM SIGN EZEN TIMES A': None,
'CUNEIFORM SIGN EZEN TIMES A PLUS LAL': None,
'CUNEIFORM SIGN EZEN TIMES A PLUS LAL TIMES LAL': None,
'CUNEIFORM SIGN EZEN TIMES AN': None,
'CUNEIFORM SIGN EZEN TIMES BAD': None,
'CUNEIFORM SIGN EZEN TIMES DUN3 GUNU': None,
'CUNEIFORM SIGN EZEN TIMES DUN3 GUNU GUNU': None,
'CUNEIFORM SIGN EZEN TIMES HA': None,
'CUNEIFORM SIGN EZEN TIMES HA GUNU': None,
'CUNEIFORM SIGN EZEN TIMES IGI GUNU': None,
'CUNEIFORM SIGN EZEN TIMES KASKAL': None,
'CUNEIFORM SIGN EZEN TIMES KASKAL SQUARED': None,
'CUNEIFORM SIGN EZEN TIMES KU3': None,
'CUNEIFORM SIGN EZEN TIMES LA': None,
'CUNEIFORM SIGN EZEN TIMES LAL TIMES LAL': None,
'CUNEIFORM SIGN EZEN TIMES LI': None,
'CUNEIFORM SIGN EZEN TIMES LU': None,
'CUNEIFORM SIGN EZEN TIMES U2': None,
'CUNEIFORM SIGN EZEN TIMES UD': None,
'CUNEIFORM SIGN GA': None,
'CUNEIFORM SIGN GA GUNU': None,
'CUNEIFORM SIGN GA2': None,
'CUNEIFORM SIGN GA2 OVER GA2': None,
'CUNEIFORM SIGN GA2 TIMES A PLUS DA PLUS HA': None,
'CUNEIFORM SIGN GA2 TIMES A PLUS HA': None,
'CUNEIFORM SIGN GA2 TIMES A PLUS IGI': None,
'CUNEIFORM SIGN GA2 TIMES AB2 TENU PLUS TAB': None,
'CUNEIFORM SIGN GA2 TIMES AN': None,
'CUNEIFORM SIGN GA2 TIMES ASH': None,
'CUNEIFORM SIGN GA2 TIMES ASH2 PLUS GAL': None,
'CUNEIFORM SIGN GA2 TIMES BAD': None,
'CUNEIFORM SIGN GA2 TIMES BAR PLUS RA': None,
'CUNEIFORM SIGN GA2 TIMES BUR': None,
'CUNEIFORM SIGN GA2 TIMES BUR PLUS RA': None,
'CUNEIFORM SIGN GA2 TIMES DA': None,
'CUNEIFORM SIGN GA2 TIMES DI': None,
'CUNEIFORM SIGN GA2 TIMES DIM TIMES SHE': None,
'CUNEIFORM SIGN GA2 TIMES DUB': None,
'CUNEIFORM SIGN GA2 TIMES EL': None,
'CUNEIFORM SIGN GA2 TIMES EL PLUS LA': None,
'CUNEIFORM SIGN GA2 TIMES EN': None,
'CUNEIFORM SIGN GA2 TIMES EN TIMES GAN2 TENU': None,
'CUNEIFORM SIGN GA2 TIMES GAN2 TENU': None,
'CUNEIFORM SIGN GA2 TIMES GAR': None,
'CUNEIFORM SIGN GA2 TIMES GI': None,
'CUNEIFORM SIGN GA2 TIMES GI4': None,
'CUNEIFORM SIGN GA2 TIMES GI4 PLUS A': None,
'CUNEIFORM SIGN GA2 TIMES GIR2 PLUS SU': None,
'CUNEIFORM SIGN GA2 TIMES HA PLUS LU PLUS ESH2': None,
'CUNEIFORM SIGN GA2 TIMES HAL': None,
'CUNEIFORM SIGN GA2 TIMES HAL PLUS LA': None,
'CUNEIFORM SIGN GA2 TIMES HI PLUS LI': None,
'CUNEIFORM SIGN GA2 TIMES HUB2': None,
'CUNEIFORM SIGN GA2 TIMES IGI GUNU': None,
'CUNEIFORM SIGN GA2 TIMES ISH PLUS HU PLUS ASH': None,
'CUNEIFORM SIGN GA2 TIMES KAK': None,
'CUNEIFORM SIGN GA2 TIMES KASKAL': None,
'CUNEIFORM SIGN GA2 TIMES KID': None,
'CUNEIFORM SIGN GA2 TIMES KID PLUS LAL': None,
'CUNEIFORM SIGN GA2 TIMES KU3 PLUS AN': None,
'CUNEIFORM SIGN GA2 TIMES LA': None,
'CUNEIFORM SIGN GA2 TIMES ME PLUS EN': None,
'CUNEIFORM SIGN GA2 TIMES MI': None,
'CUNEIFORM SIGN GA2 TIMES NUN': None,
'CUNEIFORM SIGN GA2 TIMES NUN OVER NUN': None,
'CUNEIFORM SIGN GA2 TIMES PA': None,
'CUNEIFORM SIGN GA2 TIMES SAL': None,
'CUNEIFORM SIGN GA2 TIMES SAR': None,
'CUNEIFORM SIGN GA2 TIMES SHE': None,
'CUNEIFORM SIGN GA2 TIMES SHE PLUS TUR': None,
'CUNEIFORM SIGN GA2 TIMES SHID': None,
'CUNEIFORM SIGN GA2 TIMES SUM': None,
'CUNEIFORM SIGN GA2 TIMES TAK4': None,
'CUNEIFORM SIGN GA2 TIMES U': None,
'CUNEIFORM SIGN GA2 TIMES UD': None,
'CUNEIFORM SIGN GA2 TIMES UD PLUS DU': None,
'CUNEIFORM SIGN GABA': None,
'CUNEIFORM SIGN GABA CROSSING GABA': None,
'CUNEIFORM SIGN GAD': None,
'CUNEIFORM SIGN GAD OVER GAD GAR OVER GAR': None,
'CUNEIFORM SIGN GAL': None,
'CUNEIFORM SIGN GAL GAD OVER GAD GAR OVER GAR': None,
'CUNEIFORM SIGN GALAM': None,
'CUNEIFORM SIGN GAM': None,
'CUNEIFORM SIGN GAN': None,
'CUNEIFORM SIGN GAN2': None,
'CUNEIFORM SIGN GAN2 CROSSING GAN2': None,
'CUNEIFORM SIGN GAN2 OVER GAN2': None,
'CUNEIFORM SIGN GAN2 TENU': None,
'CUNEIFORM SIGN GAR': None,
'CUNEIFORM SIGN GAR3': None,
'CUNEIFORM SIGN GASHAN': None,
'CUNEIFORM SIGN GESHTIN': None,
'CUNEIFORM SIGN GESHTIN TIMES KUR': None,
'CUNEIFORM SIGN GI': None,
'CUNEIFORM SIGN GI CROSSING GI': None,
'CUNEIFORM SIGN GI TIMES E': None,
'CUNEIFORM SIGN GI TIMES U': None,
'CUNEIFORM SIGN GI4': None,
'CUNEIFORM SIGN GI4 CROSSING GI4': None,
'CUNEIFORM SIGN GI4 OVER GI4': None,
'CUNEIFORM SIGN GIDIM': None,
'CUNEIFORM SIGN GIR2': None,
'CUNEIFORM SIGN GIR2 GUNU': None,
'CUNEIFORM SIGN GIR3': None,
'CUNEIFORM SIGN GIR3 TIMES A PLUS IGI': None,
'CUNEIFORM SIGN GIR3 TIMES GAN2 TENU': None,
'CUNEIFORM SIGN GIR3 TIMES IGI': None,
'CUNEIFORM SIGN GIR3 TIMES LU PLUS IGI': None,
'CUNEIFORM SIGN GIR3 TIMES PA': None,
'CUNEIFORM SIGN GISAL': None,
'CUNEIFORM SIGN GISH': None,
'CUNEIFORM SIGN GISH CROSSING GISH': None,
'CUNEIFORM SIGN GISH TENU': None,
'CUNEIFORM SIGN GISH TIMES BAD': None,
'CUNEIFORM SIGN GISH TIMES TAK4': None,
'CUNEIFORM SIGN GU': None,
'CUNEIFORM SIGN GU CROSSING GU': None,
'CUNEIFORM SIGN GU2': None,
'CUNEIFORM SIGN GU2 GUNU': None,
'CUNEIFORM SIGN GU2 TIMES KAK': None,
'CUNEIFORM SIGN GU2 TIMES KAK TIMES IGI GUNU': None,
'CUNEIFORM SIGN GU2 TIMES NUN': None,
'CUNEIFORM SIGN GU2 TIMES SAL PLUS TUG2': None,
'CUNEIFORM SIGN GUD': None,
'CUNEIFORM SIGN GUD OVER GUD LUGAL': None,
'CUNEIFORM SIGN GUD TIMES A PLUS KUR': None,
'CUNEIFORM SIGN GUD TIMES KUR': None,
'CUNEIFORM SIGN GUL': None,
'CUNEIFORM SIGN GUM': None,
'CUNEIFORM SIGN GUM TIMES SHE': None,
'CUNEIFORM SIGN GUR': None,
'CUNEIFORM SIGN GUR7': None,
'CUNEIFORM SIGN GURUN': None,
'CUNEIFORM SIGN GURUSH': None,
'CUNEIFORM SIGN HA': None,
'CUNEIFORM SIGN HA GUNU': None,
'CUNEIFORM SIGN HA TENU': None,
'CUNEIFORM SIGN HAL': None,
'CUNEIFORM SIGN HI': None,
'CUNEIFORM SIGN HI TIMES ASH': None,
'CUNEIFORM SIGN HI TIMES ASH2': None,
'CUNEIFORM SIGN HI TIMES BAD': None,
'CUNEIFORM SIGN HI TIMES DISH': None,
'CUNEIFORM SIGN HI TIMES GAD': None,
'CUNEIFORM SIGN HI TIMES KIN': None,
'CUNEIFORM SIGN HI TIMES NUN': None,
'CUNEIFORM SIGN HI TIMES SHE': None,
'CUNEIFORM SIGN HI TIMES U': None,
'CUNEIFORM SIGN HU': None,
'CUNEIFORM SIGN HUB2': None,
'CUNEIFORM SIGN HUB2 TIMES AN': None,
'CUNEIFORM SIGN HUB2 TIMES HAL': None,
'CUNEIFORM SIGN HUB2 TIMES KASKAL': None,
'CUNEIFORM SIGN HUB2 TIMES LISH': None,
'CUNEIFORM SIGN HUB2 TIMES UD': None,
'CUNEIFORM SIGN HUL2': None,
'CUNEIFORM SIGN I': None,
'CUNEIFORM SIGN I A': None,
'CUNEIFORM SIGN IB': None,
'CUNEIFORM SIGN IDIM': None,
'CUNEIFORM SIGN IDIM OVER IDIM BUR': None,
'CUNEIFORM SIGN IDIM OVER IDIM SQUARED': None,
'CUNEIFORM SIGN IG': None,
'CUNEIFORM SIGN IGI': None,
'CUNEIFORM SIGN IGI DIB': None,
'CUNEIFORM SIGN IGI GUNU': None,
'CUNEIFORM SIGN IGI OVER IGI SHIR OVER SHIR UD OVER UD': None,
'CUNEIFORM SIGN IGI RI': None,
'CUNEIFORM SIGN IL': None,
'CUNEIFORM SIGN IL TIMES GAN2 TENU': None,
'CUNEIFORM SIGN IL2': None,
'CUNEIFORM SIGN IM': None,
'CUNEIFORM SIGN IM CROSSING IM': None,
'CUNEIFORM SIGN IM OPPOSING IM': None,
'CUNEIFORM SIGN IM SQUARED': None,
'CUNEIFORM SIGN IM TIMES TAK4': None,
'CUNEIFORM SIGN IMIN': None,
'CUNEIFORM SIGN IN': None,
'CUNEIFORM SIGN IR': None,
'CUNEIFORM SIGN ISH': None,
'CUNEIFORM SIGN KA': None,
'CUNEIFORM SIGN KA TIMES A': None,
'CUNEIFORM SIGN KA TIMES AD': None,
'CUNEIFORM SIGN KA TIMES AD PLUS KU3': None,
'CUNEIFORM SIGN KA TIMES ASH2': None,
'CUNEIFORM SIGN KA TIMES BAD': None,
'CUNEIFORM SIGN KA TIMES BALAG': None,
'CUNEIFORM SIGN KA TIMES BAR': None,
'CUNEIFORM SIGN KA TIMES BI': None,
'CUNEIFORM SIGN KA TIMES ERIN2': None,
'CUNEIFORM SIGN KA TIMES ESH2': None,
'CUNEIFORM SIGN KA TIMES GA': None,
'CUNEIFORM SIGN KA TIMES GAL': None,
'CUNEIFORM SIGN KA TIMES GAN2 TENU': None,
'CUNEIFORM SIGN KA TIMES GAR': None,
'CUNEIFORM SIGN KA TIMES GAR PLUS SHA3 PLUS A': None,
'CUNEIFORM SIGN KA TIMES GI': None,
'CUNEIFORM SIGN KA TIMES GIR2': None,
'CUNEIFORM SIGN KA TIMES GISH CROSSING GISH': None,
'CUNEIFORM SIGN KA TIMES GISH PLUS SAR': None,
'CUNEIFORM SIGN KA TIMES GU': None,
'CUNEIFORM SIGN KA TIMES GUR7': None,
'CUNEIFORM SIGN KA TIMES IGI': None,
'CUNEIFORM SIGN KA TIMES IM': None,
'CUNEIFORM SIGN KA TIMES KAK': None,
'CUNEIFORM SIGN KA TIMES KI': None,
'CUNEIFORM SIGN KA TIMES KID': None,
'CUNEIFORM SIGN KA TIMES LI': None,
'CUNEIFORM SIGN KA TIMES LU': None,
'CUNEIFORM SIGN KA TIMES ME': None,
'CUNEIFORM SIGN KA TIMES ME PLUS DU': None,
'CUNEIFORM SIGN KA TIMES ME PLUS GI': None,
'CUNEIFORM SIGN KA TIMES ME PLUS TE': None,
'CUNEIFORM SIGN KA TIMES MI': None,
'CUNEIFORM SIGN KA TIMES MI PLUS NUNUZ': None,
'CUNEIFORM SIGN KA TIMES NE': None,
'CUNEIFORM SIGN KA TIMES NUN': None,
'CUNEIFORM SIGN KA TIMES PI': None,
'CUNEIFORM SIGN KA TIMES RU': None,
'CUNEIFORM SIGN KA TIMES SA': None,
'CUNEIFORM SIGN KA TIMES SAR': None,
'CUNEIFORM SIGN KA TIMES SHA': None,
'CUNEIFORM SIGN KA TIMES SHE': None,
'CUNEIFORM SIGN KA TIMES SHID': None,
'CUNEIFORM SIGN KA TIMES SHU': None,
'CUNEIFORM SIGN KA TIMES SIG': None,
'CUNEIFORM SIGN KA TIMES SUHUR': None,
'CUNEIFORM SIGN KA TIMES TAR': None,
'CUNEIFORM SIGN KA TIMES U': None,
'CUNEIFORM SIGN KA TIMES U2': None,
'CUNEIFORM SIGN KA TIMES UD': None,
'CUNEIFORM SIGN KA TIMES UMUM TIMES PA': None,
'CUNEIFORM SIGN KA TIMES USH': None,
'CUNEIFORM SIGN KA TIMES ZI': None,
'CUNEIFORM SIGN KA2': None,
'CUNEIFORM SIGN KA2 CROSSING KA2': None,
'CUNEIFORM SIGN KAB': None,
'CUNEIFORM SIGN KAD2': None,
'CUNEIFORM SIGN KAD3': None,
'CUNEIFORM SIGN KAD4': None,
'CUNEIFORM SIGN KAD5': None,
'CUNEIFORM SIGN KAD5 OVER KAD5': None,
'CUNEIFORM SIGN KAK': None,
'CUNEIFORM SIGN KAK TIMES IGI GUNU': None,
'CUNEIFORM SIGN KAL': None,
'CUNEIFORM SIGN KAL CROSSING KAL': None,
'CUNEIFORM SIGN KAL TIMES BAD': None,
'CUNEIFORM SIGN KAM2': None,
'CUNEIFORM SIGN KAM4': None,
'CUNEIFORM SIGN | |
per non-poly CGNS zone/solution
2. Use the Zone Number for the blockID
Examples:
7 kLow bcwallviscous wing
4 jHigh bcsymmetryplane sym
5 khigh bcoutflowsubsonic turb_inlet BCDataSet_1 BCInFlowSubsonic Dirichlet PressureStagnation 1234.0 TemperatureStagnation 4556.0
"""
p_sub.add_argument(
"bcFile",
help="File containing additional bc info." + bcFile_txt,
)
p_sub.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'writebcinfo' mode --------------------
p_sub = subparsers.add_parser(
"writebcinfo",
help="Writes boundary condition information to a file.",
formatter_class=argparse.RawTextHelpFormatter,
)
p_sub.add_argument("gridFile", help="Name of input CGNS file")
p_sub.add_argument(
"bcOutFile",
default=None,
help="A file containing bc info." + bcFile_txt,
)
# ------------ Options for 'rebunch' mode --------------------
p_bunch = subparsers.add_parser("rebunch", help="Rebunch offwall spacing (experimental)")
p_bunch.add_argument("gridFile", help="Name of input CGNS file")
p_bunch.add_argument("spacing", help="The desired off-wall spacing", type=float)
p_bunch.add_argument("outFile", nargs="?", default=None, help="Optional output file")
p_bunch.add_argument(
"--extraCells",
help="Number of additional cells to use in re-bunching. *SHOULD BE A MG NUMBER*.",
type=int,
default=0,
)
p_bunch.add_argument("--nodes", help="Only rebunch the first 'nodes' in the offwall direction", type=int, default=1)
# ------------ Options for 'cgns2plot3d' mode --------------------
p3d = subparsers.add_parser("cgns2plot3d", help="Convert a cgns file to a plain plot3d file")
p3d.add_argument("gridFile", help="Name of input CGNS file")
p3d.add_argument("plot3dFile", help="Name of output plot3d file")
# ------------ Options for 'plot3dtocgns' mode --------------------
p3dtoc = subparsers.add_parser(
"plot3d2cgns",
help="""Convert a multiblock, unformatted fortran, big-endian, multiblock plot3d file to a plain
cgns file. This specific format is widely used at NASA and Boeing.""",
)
p3dtoc.add_argument("plot3dFile", help="Name of input plot3d file")
p3dtoc.add_argument("gridFile", help="Name of output CGNS file")
# ------------ Options for 'randomize' mode --------------------
p_ran = subparsers.add_parser("randomize", help="Randomize the block orientation and order. Useful for testing.")
p_ran.add_argument("gridFile", help="Name of input CGNS file")
p_ran.add_argument(
"seed",
type=int,
default=0,
help="Seed for random generator. Specifying a seed will make process deterministic.",
)
p_ran.add_argument("--keepRHS", help="Keep right hand coordinate system", action="store_true", default=False)
p_ran.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'reorder' mode --------------------
p_reorder = subparsers.add_parser(
"reorder",
help="""Sort blocks in an alpha-numerical order. It can also add extra digits
to the integers at the end of the block names to facilitate ordering.""",
)
p_reorder.add_argument("gridFile", help="Name of input CGNS file")
p_reorder.add_argument(
"intDigits",
type=int,
default=5,
help="""Number of digits used for the integers. When CGNSlib generates a CGNS file
(when converting from a plot3d file, for instance), it does not add extra digits to the integers
when naming zones. This becomes a problem when you have more than 10 zones because the ordering will be:
Zone1, Zone11, Zone12, ..., Zone19, Zone2, Zone21, ...
This method will add extra digits to the zone names to give the correct ordering.""",
)
p_reorder.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'symmZero' mode --------------------
p_sym = subparsers.add_parser("symmZero", help="Hard-zero any nodes on symmetry plane BCs.")
p_sym.add_argument("gridFile", help="Name of input CGNS file")
p_sym.add_argument("sym", help="Normal for possible symmetry plane.", choices=["x", "y", "z"])
p_sym.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'symmZeroNoBC' mode --------------------
p_symnobc = subparsers.add_parser(
"symmZeroNoBC",
help="Hard-zero any nodes within a given tolerance of the symmetry plane. BCs are not taken into account.",
)
p_symnobc.add_argument("gridFile", help="Name of input CGNS file")
p_symnobc.add_argument("sym", help="Normal for possible symmetry plane.", choices=["x", "y", "z"])
p_symnobc.add_argument(
"--tol",
help="Distance tolerance to bring nodes to symmetry plane (default: %(default)s)",
type=float,
default=1.0e-5,
)
p_symnobc.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'timeCombine' mode --------------------
p_tc = subparsers.add_parser(
"timeCombine", help="Combine cgns files from time accurate simulation into unsteady tecplot file."
)
p_tc.add_argument("baseName", help="baseName of the files. Use %%d to denote the counter.")
p_tc.add_argument("outFile", nargs="?", default=None, help="Output file name. If not given, unsteady.plt is used")
# ------------ Options for 'double2D' mode --------------------
p_dd = subparsers.add_parser("double2D", help="Take a 2d mesh one cell wide and make it two cells wide.")
p_dd.add_argument("gridFile", help="Name of input CGNS file")
p_dd.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'combine' mode --------------------
p_dd = subparsers.add_parser("combine", help="Take 2 or more cgns files and combine them into a single file.")
p_dd.add_argument("gridFiles", metavar="files", type=str, nargs="+", help="Name of CGNS files to combine")
p_dd.add_argument("outFile", type=str, help="Output CGNS file name")
# ------------ Options for 'removeBlocks' mode --------------------
p_rm = subparsers.add_parser(
"removeBlocks",
help="""Remove blocks from a cgns file. The user should ensure that the final mesh
is still valid in terms of boundary conditions and connectivities.""",
)
p_rm.add_argument("gridFile", help="Name of input CGNS file")
p_rm.add_argument(
"blockIDs",
metavar="files",
type=int,
nargs="+",
help="IDs (integers) of the blocks that will be removed. The integers should be 1-indexed",
)
p_rm.add_argument("outFile", type=str, help="Output CGNS file name")
# ------------ Options for 'explode' mode --------------------
p_exp = subparsers.add_parser(
"explode", help="Take one multiblock cgns file and explodes it into single-block cgns files."
)
p_exp.add_argument("gridFile", type=str, help="Name of input multiblock CGNS file")
p_exp.add_argument(
"outFile",
nargs="?",
default=None,
help="""Optional reference to name output files. An integer will be added to the end.
If none is given, the input filename will be used as reference.
All connectivity information between different blocks is lost in this step, only
internal connectivity remains.""",
)
# ------------ Options for 'explodeKmin' mode --------------------
p_expkmin = subparsers.add_parser(
"explodeKmin",
help="Take one multiblock cgns file and explodes it into single-block plot3d files that contains only the K=1 faces.",
)
p_expkmin.add_argument("gridFile", type=str, help="Name of input multiblock CGNS file")
p_expkmin.add_argument(
"outFile",
nargs="?",
default=None,
help="""Optional reference to name output files. An integer will be added to the end.
if none is given, the input filename will be used as reference.""",
)
# ------------ Options for 'explodeByZoneName' mode --------------------
p_expkmin = subparsers.add_parser(
"explodeByZoneName",
help="""Take one multiblock cgns file and explode it into multiple multiblock
cgns files based on the zone name from the blocks.""",
)
p_expkmin.add_argument("gridFile", type=str, help="Name of input multiblock CGNS file")
# ------------ Options for 'cartesian' mode --------------------
p_cart = subparsers.add_parser(
"cartesian", help="Generates a background cartesian mesh", formatter_class=argparse.RawTextHelpFormatter
)
p_cart.add_argument("gridFile", help="Name of input CGNS file")
p_cart.add_argument(
"cartFile",
help="""File containing background mesh info. The file must consist of
4 lines contaning the following data:
<extensionXneg> <extensionYneg> <extensionZneg>
<extensionXpos> <extensionYpos> <extensionZpos>
<numNodesX> <numNodesY> <numNodesZ>
<weightGRX> <weightGRY> <weightGRZ>
where:
extension is the distance of the cartesian box
face to the corresponding bounding box face divided by the
bounding box length. We need 2 values of extension per
direction as we have two parallel faces for each one of them.
numNodes is the number of nodes that should be used along the
edges of the cartesian mesh. If you want one symmetry plane
in the z direction, for instance, you need to set one of the
extensionZ values to 0. If you want two symmetry planes in
the z direction, (e.g. to run a 2D case) you need to set both
extensionZ values to 0.
weightGR are values between 0.0 and 1.0 used to balance edge
growth ratio and cell volume resolution mismatch during the
optimization. If weightGR = 0, the optimizer will not care
about the growth ratios at the farfield and will just try
to match the bounding box resolution. If weightGR = 1, the
optimizer will not care about the bounding box resolution
and will just try to get an uniform growth ratio. This
results in an uniform mesh.
example:
10 10 0
10 10 10
65 65 65
0.1 0.1 0.1
""",
)
p_cart.add_argument(
"outFile",
help="""Name of output CGNS file. The output file contains only one cartesian block.
The input mesh is not included and BCs are applied.""",
)
# ------------ Options for 'simpleCart' mode --------------------
p_sub = subparsers.add_parser("simpleCart", help="Generates a background cartesian mesh")
p_sub.add_argument("gridFile", help="Name of input CGNS file")
p_sub.add_argument("dh", help="Uniform spacing size", type=float)
p_sub.add_argument("hExtra", help="Extension in each dimension", type=float)
p_sub.add_argument("nExtra", help="Number of nodes to use for extension", type=int)
p_sub.add_argument("sym", help="Normal for possible sym plane", type=str)
p_sub.add_argument("mgcycle", help="Minimum MG cycle to enforce", type=int)
p_sub.add_argument("outFile", help="Name of output CGNS file")
# ------------ Options for 'explicitCart' mode --------------------
p_sub = subparsers.add_parser("explicitCart", help="Generates a background cartesian mesh")
p_sub.add_argument("xmin", type=float, help="min x coordinate")
p_sub.add_argument("ymin", type=float, help="min y coordinate")
p_sub.add_argument("zmin", type=float, help="min z coordinate")
p_sub.add_argument("xmax", type=float, help="max x coordinate")
p_sub.add_argument("ymax", type=float, help="max y coordinate")
p_sub.add_argument("zmax", type=float, help="max z coordinate")
p_sub.add_argument("dh", help="Uniform spacing size", type=float)
p_sub.add_argument("hExtra", help="Extension in each dimension", type=float)
p_sub.add_argument("nExtra", help="Number of nodes to use for extension", type=int)
p_sub.add_argument("sym", | |
#!/usr/bin/env python
## EPlusInterface (EPI) - An interface for EnergyPlus
## Copyright (C) 2004 <NAME>
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
"""legacy code from EPlusInterface"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six import StringIO
from io import FileIO
from decorator import decorator
import eppy.EPlusInterfaceFunctions.mylib1 as mylib1
import eppy.EPlusInterfaceFunctions.mylib2 as mylib2
import eppy.EPlusInterfaceFunctions.iddgroups as iddgroups
import eppy.EPlusInterfaceFunctions.iddindex as iddindex
def nocomment(astr, com):
"""
just like the comment in python.
removes any text after the phrase 'com'
"""
alist = astr.splitlines()
for i in range(len(alist)):
element = alist[i]
pnt = element.find(com)
if pnt != -1:
alist[i] = element[:pnt]
return "\n".join(alist)
def get_nocom_vars(astr):
"""
input 'astr' which is the Energy+.idd file as a string
returns (st1, st2, lss)
st1 = with all the ! comments striped
st2 = strips all comments - both the '!' and '\\'
lss = nested list of all the variables in Energy+.idd file
"""
nocom = nocomment(astr, "!") # remove '!' comments
st1 = nocom
nocom1 = nocomment(st1, "\\") # remove '\' comments
st1 = nocom
st2 = nocom1
# alist = string.split(st2, ';')
alist = st2.split(";")
lss = []
# break the .idd file into a nested list
# =======================================
for element in alist:
# item = string.split(element, ',')
item = element.split(",")
lss.append(item)
for i in range(0, len(lss)):
for j in range(0, len(lss[i])):
lss[i][j] = lss[i][j].strip()
if len(lss) > 1:
lss.pop(-1)
# =======================================
# st1 has the '\' comments --- looks like I don't use this
# lss is the .idd file as a nested list
return (st1, st2, lss)
def removeblanklines(astr):
"""remove the blank lines in astr"""
lines = astr.splitlines()
lines = [line for line in lines if line.strip() != ""]
return "\n".join(lines)
def _readfname(fname):
"""copied from extractidddata below.
It deals with all the types of fnames"""
try:
if isinstance(fname, (file, StringIO)):
astr = fname.read()
else:
astr = open(fname, "rb").read()
except NameError:
if isinstance(fname, (FileIO, StringIO)):
astr = fname.read()
else:
astr = mylib2.readfile(fname)
return astr
@decorator
def make_idd_index(extract_func, fname, debug):
"""generate the iddindex"""
astr = _readfname(fname)
# fname is exhausted by the above read
# reconstitute fname as a StringIO
fname = StringIO(astr)
# glist = iddgroups.iddtxt2grouplist(astr.decode('ISO-8859-2'))
blocklst, commlst, commdct = extract_func(fname)
name2refs = iddindex.makename2refdct(commdct)
ref2namesdct = iddindex.makeref2namesdct(name2refs)
idd_index = dict(name2refs=name2refs, ref2names=ref2namesdct)
commdct = iddindex.ref2names2commdct(ref2namesdct, commdct)
return blocklst, commlst, commdct, idd_index
@decorator
def embedgroupdata(extract_func, fname, debug):
"""insert group info into extracted idd"""
astr = _readfname(fname)
# fname is exhausted by the above read
# reconstitute fname as a StringIO
fname = StringIO(astr)
try:
astr = astr.decode("ISO-8859-2")
except Exception as e:
pass # for python 3
glist = iddgroups.iddtxt2grouplist(astr)
blocklst, commlst, commdct = extract_func(fname)
# add group information to commlst and commdct
# glist = getglist(fname)
commlst = iddgroups.group2commlst(commlst, glist)
commdct = iddgroups.group2commdct(commdct, glist)
return blocklst, commlst, commdct
@make_idd_index
@embedgroupdata
def extractidddata(fname, debug=False):
"""
extracts all the needed information out of the idd file
if debug is True, it generates a series of text files.
Each text file is incrementally different. You can do a diff
see what the change is
-
this code is from 2004.
it works.
I am trying not to change it (until I rewrite the whole thing)
to add functionality to it, I am using decorators
So if
Does not integrate group data into the results (@embedgroupdata does it)
Does not integrate iddindex into the results (@make_idd_index does it)
"""
try:
if isinstance(fname, (file, StringIO)):
astr = fname.read()
try:
astr = astr.decode("ISO-8859-2")
except AttributeError:
pass
else:
astr = mylib2.readfile(fname)
# astr = astr.decode('ISO-8859-2') -> mylib1 does a decode
except NameError:
if isinstance(fname, (FileIO, StringIO)):
astr = fname.read()
try:
astr = astr.decode("ISO-8859-2")
except AttributeError:
pass
else:
astr = mylib2.readfile(fname)
# astr = astr.decode('ISO-8859-2') -> mylib2.readfile has decoded
(nocom, nocom1, blocklst) = get_nocom_vars(astr)
astr = nocom
st1 = removeblanklines(astr)
if debug:
mylib1.write_str2file("nocom2.txt", st1.encode("latin-1"))
# find the groups and the start object of the group
# find all the group strings
groupls = []
alist = st1.splitlines()
for element in alist:
lss = element.split()
if lss[0].upper() == "\\group".upper():
groupls.append(element)
# find the var just after each item in groupls
groupstart = []
for i in range(len(groupls)):
iindex = alist.index(groupls[i])
groupstart.append([alist[iindex], alist[iindex + 1]])
# remove the group commentline
for element in groupls:
alist.remove(element)
if debug:
st1 = "\n".join(alist)
mylib1.write_str2file("nocom3.txt", st1.encode("latin-1"))
# strip each line
for i in range(len(alist)):
alist[i] = alist[i].strip()
if debug:
st1 = "\n".join(alist)
mylib1.write_str2file("nocom4.txt", st1.encode("latin-1"))
# ensure that each line is a comment or variable
# find lines that don't start with a comment
# if this line has a comment in it
# then move the comment to a new line below
lss = []
for i in range(len(alist)):
# find lines that don't start with a comment
if alist[i][0] != "\\":
# if this line has a comment in it
pnt = alist[i].find("\\")
if pnt != -1:
# then move the comment to a new line below
lss.append(alist[i][:pnt].strip())
lss.append(alist[i][pnt:].strip())
else:
lss.append(alist[i])
else:
lss.append(alist[i])
alist = lss[:]
if debug:
st1 = "\n".join(alist)
mylib1.write_str2file("nocom5.txt", st1.encode("latin-1"))
# need to make sure that each line has only one variable - as in WindowGlassSpectralData,
lss = []
for element in alist:
# if the line is not a comment
if element[0] != "\\":
# test for more than one var
llist = element.split(",")
if llist[-1] == "":
tmp = llist.pop()
for elm in llist:
if elm[-1] == ";":
lss.append(elm.strip())
else:
lss.append((elm + ",").strip())
else:
lss.append(element)
ls_debug = alist[:] # needed for the next debug - 'nocom7.txt'
alist = lss[:]
if debug:
st1 = "\n".join(alist)
mylib1.write_str2file("nocom6.txt", st1.encode("latin-1"))
if debug:
# need to make sure that each line has only one variable - as in WindowGlassSpectralData,
# this is same as above.
# but the variables are put in without the ';' and ','
# so we can do a diff between 'nocom7.txt' and 'nocom8.txt'. Should be identical
lss_debug = []
for element in ls_debug:
# if the line is not a comment
if element[0] != "\\":
# test for more than one var
llist = element.split(",")
if llist[-1] == "":
tmp = llist.pop()
for elm in llist:
if elm[-1] == ";":
lss_debug.append(elm[:-1].strip())
else:
lss_debug.append((elm).strip())
else:
lss_debug.append(element)
ls_debug = lss_debug[:]
st1 = "\n".join(ls_debug)
mylib1.write_str2file("nocom7.txt", st1.encode("latin-1"))
# replace each var with '=====var======'
# join into a string,
# split using '=====var====='
for i in range(len(lss)):
# if the line is not a comment
if lss[i][0] != "\\":
lss[i] = "=====var====="
st2 = "\n".join(lss)
lss = st2.split("=====var=====\n")
lss.pop(0) # the above split generates an extra item at start
if debug:
fname = "nocom8.txt"
fhandle = open(fname, "wb")
k = 0
for i in range(len(blocklst)):
for j in range(len(blocklst[i])):
atxt = blocklst[i][j] + "\n"
fhandle.write(atxt)
atxt = lss[k]
fhandle.write(atxt.encode("latin-1"))
k = k + 1
fhandle.close()
# map the structure of the comments -(this is 'lss' now) to
# the structure of blocklst - blocklst is a nested list
# make lss a similar nested list
k = 0
lst = []
for i in range(len(blocklst)):
lst.append([])
for j in range(len(blocklst[i])):
lst[i].append(lss[k])
k = k + 1
if debug:
fname = "nocom9.txt"
fhandle = open(fname, "wb")
k = 0
for i in range(len(blocklst)):
for j in range(len(blocklst[i])):
atxt = blocklst[i][j] + "\n"
fhandle.write(atxt)
fhandle.write(lst[i][j].encode("latin-1"))
k = k + 1
fhandle.close()
# break up multiple line comment so that it is a list
for i in range(len(lst)):
for j in range(len(lst[i])):
lst[i][j] = lst[i][j].splitlines()
# remove the '\'
for k in range(len(lst[i][j])):
lst[i][j][k] = lst[i][j][k][1:]
commlst = lst
# copied with minor modifications from readidd2_2.py -- which has been erased ha !
clist = lst
lss = []
for i in range(0, len(clist)):
alist = []
for j in range(0, len(clist[i])):
itt = clist[i][j]
ddtt = {}
for element in itt:
if len(element.split()) == 0:
break
ddtt[element.split()[0].lower()] = []
for element in itt:
if len(element.split()) == 0:
break
# ddtt[element.split()[0].lower()].append(string.join(element.split()[1:]))
ddtt[element.split()[0].lower()].append(" ".join(element.split()[1:]))
alist.append(ddtt)
lss.append(alist)
commdct | |
be specified with four bytes:
gateway_bytes = 4
# Pattern build up holding lists:
dot_subnet = []
dot_gateway = []
# This list will be returned by the function
routes = []
# Parse the byte stream one byte at a time:
for byte in byte_stream:
# Convert the bytes to decimal
byte = int(byte, 16)
# If the subnet_mask is set then we're looking for the subnet and
# gateway bytes that must appear after
if subnet_mask:
if net_bytes:
dot_subnet.append(str(byte))
net_bytes -= 1
# Once the net_bytes counter is depleted we know the next
# bytes in the stream represent the gateway
else:
dot_gateway.append(str(byte))
gateway_bytes -= 1
# Once the gateway_bytes are taken from the stream a complete
# route is present and stored. There are potentially additional
# routes to process so it must reset the control logic variables
# for the next route to be determined.
if not gateway_bytes:
while len(dot_subnet) < 4:
dot_subnet.append('0')
subnet = '.'.join(dot_subnet)
gateway = '.'.join(dot_gateway)
routes.append((subnet, subnet_mask, gateway))
# Reset the following for the next bytes in the stream:
subnet_mask = 0
dot_subnet = []
dot_gateway = []
gateway_bytes = 4
else:
# Subnet_mask is determined by bit position,
# where its always leading so as to determine
# the byte positions for the subnet and gateway
subnet_mask = byte
# The number of bytes following the subnet entry
# that represent the subnet and gateway
net_bytes = subnet_mask / 8
return routes
def get_default_nic():
"""
Returns the NIC with the default route, which is usually where the DHCP
response packet can be found
"""
# by default, check en1 (Macbook Pro, etc.)
nic = 'en1'
route_table = get_route_table()
# check the route_table for a default route and parse the nic
for line in route_table.splitlines():
# assumes one default route exists:
if re.search('^default', line):
nic = line.split()[5]
return nic
def get_hardware_link_state(nic):
"""
Report the hardware link state of a specified nic
nic:
specify a device name, such as 'en1'
Returns the link state as a string, which was reported from networksetup
"""
# get the media state of the specific nic
cmd = 'networksetup -getmedia %s' % nic
try:
p = subprocess.Popen(cmd.split(), stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
except subprocess.CalledProcessError:
stdout = ''
state = ''
if stdout:
for line in stdout.splitlines():
if re.search('^Active: ', line):
state = line.split()[1].strip()
return state
def get_ip_addresses(interface):
"""
Determine the IPv4 addreses for the specified interface
interface:
the macOS network interface name, such as 'en1' for /dev/en1
Returns a list of "ip address, netmask" tuples
"""
cmd = '/sbin/ifconfig %s inet' % interface
p = subprocess.Popen(cmd.split(), stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
addresses = []
for line in stdout.splitlines():
inet = re.search(r'inet ((?:[0-9]{1,3}\.){3}[0-9]{1,3}) '
r'netmask (0x[f0]{8}) '
r'broadcast ((?:[0-9]{1,3}\.){3}[0-9]{1,3})',
line)
if inet:
ip_address = inet.groups()[0]
mask = bin(int(inet.groups()[1], 0)).count('1')
broadcast = inet.groups()[2]
addresses.append((ip_address, mask, broadcast))
return addresses
def get_ipv4_interfaces():
"""
Returns a list of system networking interfaces
"""
# show all the interfaces and ipv4 networking info
cmd = 'ifconfig -a inet'
p = subprocess.Popen(cmd.split(), stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout
def get_ipv4_routes(route_table):
"""
The route table has several types of routes in it,
this will filter out all but the ipv4 routes.
The filters out the default route
Returns a list of lists (line by line route output)
"""
only_ipv4_routes = []
for item in route_table:
if len(item) >= 6:
if re.match(r'\d+\.\d+\.\d+\.\d+', item[1]):
if 'default' not in item[0]:
only_ipv4_routes.append(item)
return only_ipv4_routes
def get_option(packet, option_code):
"""
Parses for the option_code's data from ipconfig output
packet:
the packet data from "ipconfig getpacket"
option_code:
the DHCP option code to parse, for example "option_121"
Returns a list populated with each line of packet data corresponding to
the DHCP option code.
"""
option = False
option_data = []
for line in packet.splitlines():
if option:
if line:
option_data.append(line)
else:
option = False
# this has to come after the decoder for the option line
if option_code in line:
option = True
return option_data
def get_packet(interface):
"""
Retrieve the DHCP packet information to obtain the option data
See the uppermost docstring for information on DHCP option 121
not appearing in the responses due to the fact that they are not
requested in older client versions.
interface:
the macOS network interface name, such as 'en1' for /dev/en1
Returns the getpacket data for the interface as a list of strings
"""
cmd = '/usr/sbin/ipconfig getpacket %s' % interface
try:
p = subprocess.Popen(cmd.split(), stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
except subprocess.CalledProcessError:
stdout = ''
return stdout
def get_route_table():
"""
Returns a routing table
"""
# only show the ipv4 routing table without name resolution:
cmd = 'netstat -f inet -rn'
p = subprocess.Popen(cmd.split(), stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout
def get_route_table_with_masks():
"""
Return a route table with the subnet routes 0 padded. Subnets without
a bit
"""
# Parse the netstat output into a split line by line list of lists
route_table = [item.split() for item in get_route_table().splitlines()]
# Build a list of only the IPv4 static routes
only_ipv4_routes = get_ipv4_routes(route_table)
# Build the final list in presentable view
routes = []
for route in only_ipv4_routes:
# Subnet, mask, gateway and interface
if '/' in route[0]:
target, mask = route[0].split('/')
else:
target = route[0]
mask = ''
# Pads out the route entries as an IP (target) within
# the subnet.
while target.count('.') < 3:
target = target + '.0'
if not mask:
bit_subnet = ip_address_to_32bit(target)
if bit_subnet[:1] == '0':
mask = '8'
if bit_subnet[:2] == '10':
mask = '16'
if bit_subnet[:3] == '11':
mask = '24'
# The new routing moves the old fields over except the padded address
# and adds in the bits field to preserve the subnet information that
# may have been removed from the old address in route[0]
routes.append([target, mask, route[1], route[5]])
return routes
def ip_address_to_32bit(address):
"""
Returns IP address converted to a "32bit" long binary string
127.0.0.1 will be returned as 01111111000000000000000000000001
"""
binary = bin(struct.unpack('!L', socket.inet_aton(address))[0])
# Remove the leading '0b' text:
binary = binary[2:]
# Pad the number's string until it is 32 characters wide
while len(binary) < 32:
binary = '0' + binary
return binary
def route_cmd(route, routeverb=''):
"""
Adds a specified route with the UNIX route command
route - a tuple with three fields:
subnet - network address to route
mask - length of subnet mask in bits (24, etc.)
gateway - the next hop router for the packet
UNIX Errors:
"route: writing to routing socket: File exists"
This does not check if a current route exists, which will cause a UNIX
error to display. This can usually be ignored as the route was likely
set from the last time the interface was enabled.
"add net 192.168.1.0: gateway 192.168.0.1: File exists"
If there is a conflicting route present, it will display a similar
error directly after the routing sockets File exists error is displayed
for the same route add operation.
Simply remove the conflicting static routes as need be per the desired
network topology, disable and enable the interface again (to run this
script) and the DHCP specified static routes will populate as expected.
Returns the stdout from the operation
"""
routeverb = routeverb or 'add'
subnet = route[0]
mask = route[1]
gateway = route[2]
cmd = 'route %s %s/%s %s' % (routeverb, subnet, mask, gateway)
p = subprocess.Popen(cmd.split(), stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout
def set_routes(routes, addresses, gatewaycheck, static_routes):
"""
Checks to see if the specified route should be added to the UNIX routing
table.
Returns a dictionary of the stdouts from each route attempted
"""
stdouts = []
current_routes = get_route_table_with_masks()
# Add in the forceroutes from the override file:
if static_routes:
for static in static_routes.split(';'):
subnet = static.split('/')[0].strip()
mask = (static.split('/')[1].strip()).split()[0]
gateway = (static.split('/')[1].strip()).split()[1]
routes.append([subnet, mask, gateway])
for route in routes:
| |
# This is a simulator for monopoly
import logging
import csv
from enum import IntEnum
from math import ceil
from random import randrange, shuffle
from monopoly_ai_sim.board import MonopolyBoardPosition, RentIdx
from monopoly_ai_sim.cards import MonopolyDeck, MonopolyCard
from monopoly_ai_sim.auction import MonopolyAuction
logger = logging.getLogger('monopoly_ai_simulator')
class JailState(IntEnum):
NOT_IN_JAIL = -1
JAIL_TURN_1 = 0
JAIL_TURN_2 = 1
JAIL_TURN_3 = 2
class MonopolyGame():
def __init__(self, players=None):
# Monopoly Game constants
self.STARTING_CASH = 1500
self.GO_INCOME = 200
self.DOUBLES_TO_JAIL = 3
self.LUXURY_TAX = 100
self.INCOME_TAX_OPTION = 200
self.POSITION_GO = 0
self.POSITION_JAIL = 10
self.ESCAPE_JAIL_COST = 50
self.INITIAL_HOUSE_COUNT = 32
self.INITIAL_HOTEL_COUNT = 12
# Board positions and cards are populated by CSV
self.house_count = self.INITIAL_HOUSE_COUNT
self.hotel_count = self.INITIAL_HOTEL_COUNT
self.board_positions = {}
self.group_id_to_position = {}
self.chance_deck = MonopolyDeck()
self.community_chest_deck = MonopolyDeck()
self.players = players
# Populate the board positional data
with open('monopoly_ai_sim/game_data.csv', 'r') as f:
reader = csv.reader(f)
next(reader, None) # Skip the header
for row in reader:
board_position = MonopolyBoardPosition(row)
if board_position.position in self.board_positions:
logger.debug("Error parsing CSV file, multiple entries map to the same position")
self.board_positions[board_position.position] = board_position
if board_position.property_group not in self.group_id_to_position:
self.group_id_to_position[board_position.property_group] = [board_position]
else:
self.group_id_to_position[board_position.property_group].append(board_position)
# Populate the chance cards
with open('monopoly_ai_sim/chance.csv', 'r') as f:
reader = csv.reader(f)
for row in reader:
chance_card = MonopolyCard(row, self)
self.chance_deck.cards.append(chance_card)
self.chance_deck.shuffle()
# Populate the chance cards
with open('monopoly_ai_sim/community_chest.csv', 'r') as f:
reader = csv.reader(f)
for row in reader:
community_chest_card = MonopolyCard(row, self)
self.community_chest_deck.cards.append(community_chest_card)
self.community_chest_deck.shuffle()
# If exists returns the winner of the game
def get_winner(self):
if not self.players:
return None
winner = None
for player in self.players:
if not player.is_bankrupt:
if winner:
return None
else:
winner = player
return winner
# Player starts at go and gets 1500 to start as per rules
# IDEA: We should see how changing starting value of this game will affect the outcome
def init_player(self, player):
player.cash = self.STARTING_CASH
player.position = self.POSITION_GO
player.otherPlayers = [p for p in self.players if (player is not p)]
def play_jail(self, player):
logger.debug("Player " + str(player.id) + " plays turn " + str(player.jail_state) + " of jail")
if player.jail_state > JailState.JAIL_TURN_3:
player.jail_state = JailState.NOT_IN_JAIL
return
if len(player.get_out_of_jail_free) > 0 and player.use_get_out_jail_free(self):
logger.debug("Player " + str(player.id) + " uses get out of jail free card to escape jail")
card = player.get_out_of_jail_free.pop()
card.drawn = False
player.jail_state = JailState.NOT_IN_JAIL
# NOTE: For now assume you cant manage properties in jail
# This is not Shawshank Redemption
if player.cash >= self.ESCAPE_JAIL_COST and player.pay_to_escape_jail(self):
logger.debug("Player " + str(player.id) + " pays to leave jail")
player.give_cash_to(self, None, self.ESCAPE_JAIL_COST)
player.jail_state = JailState.NOT_IN_JAIL
# Update the jail state, and if we've been in jail long enough, we can escape
player.jail_state += 1
return
def send_to_nearest_utility(self, player):
while not self.board_positions[player.position].is_utility:
if player.position == 0:
player.cash += self.GO_INCOME
player.position = (player.position+1) % len(self.board_positions)
def send_to_nearest_railroad(self, player):
while not self.board_positions[player.position].is_railroad:
if player.position == 0:
player.cash += self.GO_INCOME
player.position = (player.position+1) % len(self.board_positions)
def check_property_group_and_update_player(self, board_position):
if not board_position:
return
# For railroads and utilities update the game based on the
if board_position.is_railroad or board_position.is_utility:
new_rent_idx = -1
if board_position.property_group in self.group_id_to_position:
property_group = self.group_id_to_position[board_position.property_group]
else:
raise ValueError("Property with invalid group_id = " + str(board_position.group_id) + " found")
for board_property in property_group:
if board_property.owner is board_position.owner:
new_rent_idx += 1
# Update the rent index on all properties of the same type the owner owns
for board_property in property_group:
if board_property.owner is board_position.owner:
board_position.rent_idx = RentIdx(new_rent_idx)
# Normal property
else:
player_owns_all_properties_in_group = True
owner = board_position.owner
if board_position.rent_idx > RentIdx.ONLY_DEED:
return
if board_position.property_group in self.group_id_to_position:
property_group = self.group_id_to_position[board_position.property_group]
for board_property in property_group:
if board_property.owner is not owner:
player_owns_all_properties_in_group = False
break
if player_owns_all_properties_in_group:
for board_property in property_group:
board_property.rent_idx = RentIdx.GROUP_COMPLETE_NO_HOUSES
else:
for board_property in property_group:
if board_property.owner is owner:
board_property.rent_idx = RentIdx.ONLY_DEED
"""
BUYING PROPERTY... Whenever you land on an unowned property you may buy that property from the Bank at its printed price. You receive the Title Deed card showing ownership; place it face up in
front of you.
If you do not wish to buy the property, the Banker sells it at auction
to the highest bidder. The buyer pays the Bank the amount of the bid
in cash and receives the Title Deed card for that property. Any player, including the one who declined the option to buy it at the printed
price, may bid. Bidding may start at any price.
"""
def process_property(self, player, current_position, dice):
if not current_position.is_property:
return
if current_position.owner is None:
# If the player can purchase the property and has enough cash to do so, let him
if player.should_purchase_property(self, current_position):
player.purchase_property(self, current_position, current_position.cost_to_buy)
else:
# Auction the property
auction = MonopolyAuction(current_position, self.players)
winner = auction.get_auction_winner()
if winner:
winner.purchase_property(self, current_position, auction.last_offer)
# If someone owns the property and it isn't mortgaged, pay up!
elif current_position.owner is not player and not current_position.is_mortgaged:
amount_owed = 0
# We need to pay the owner of the property
if current_position.is_railroad:
amount_owed = current_position.rents[current_position.rent_idx]
elif current_position.is_utility:
amount_owed = (dice[0] + dice[1]) * current_position.rents[current_position.rent_idx]
else:
amount_owed = current_position.rents[current_position.rent_idx]
logger.debug("Player " + str(player.id) + " owes $" + str(amount_owed) + " to Player " + str(
current_position.owner.id) + " for rent @ " + current_position.name)
player.give_cash_to(self, current_position.owner, amount_owed)
return
@staticmethod
def roll_dice():
d1 = randrange(1, 6)
d2 = randrange(1, 6)
return d1, d2
# Plays the turn for the player per rules of the gam
def play_turn(self, player):
# Bankrupt players cannot play!
if player.is_bankrupt:
return
if player.jail_state != JailState.NOT_IN_JAIL:
self.play_jail(player)
num_doubles_rolled = 0
doubles_rolled = True
while doubles_rolled and not player.is_bankrupt:
doubles_rolled = False
d1, d2 = self.roll_dice()
logger.debug("Player " + str(player.id) + " rolls " + str((d1, d2)))
# Rolling doubles will get you out of jail
# Rolling 3 doubles will get you into jail
if d1 == d2:
doubles_rolled = True
num_doubles_rolled += 1
if num_doubles_rolled == self.DOUBLES_TO_JAIL:
logger.debug("Player " + str(player.id) + " goes to jail by rolling " + str(self.DOUBLES_TO_JAIL) + " doubles")
num_doubles_rolled = 0
player.jail_state = JailState.JAIL_TURN_1
elif player.jail_state != JailState.NOT_IN_JAIL:
logger.debug("Player " + str(player.id) + " escapes jail by rolling doubles")
player.jail_state = JailState.NOT_IN_JAIL
# If we failed to roll doubles in jail, we need to skip this turn!
if player.jail_state != JailState.NOT_IN_JAIL:
return
# Update the position
player.position = (player.position + d1 + d2)
# If we passed or landed on go, collect the money
if player.position >= len(self.board_positions):
player.cash += self.GO_INCOME
player.position %= len(self.board_positions)
# Someone owns this position, we need to pay rent to them
while not player.is_bankrupt:
# Process what to do for this position
if player.position not in range(0, len(self.board_positions)):
raise ValueError("Player " + str(player.id) + " in invalid board position " + str(player.position))
current_position = self.board_positions[player.position]
if current_position.is_chance:
card = self.chance_deck.draw_and_perform(player)
position_changed = card.type == "set_spot"
# If our position changed, we need to to reprocess
if position_changed:
continue
break
elif current_position.is_community_chest:
card = self.community_chest_deck.draw_and_perform(player)
position_changed = card.type == "set_spot"
# If our position changed, we need to to reprocess
if position_changed:
continue
break
elif current_position.name == "Go to Jail":
player.jail_state = JailState.JAIL_TURN_1
player.position = self.POSITION_JAIL
break
elif current_position.name == "Luxury Tax":
player.give_cash_to(self, None, self.LUXURY_TAX)
break
elif current_position.name == "Income Tax":
amount_owed = min(self.INCOME_TAX_OPTION, int(ceil(player.get_asset_value() * .10)))
player.give_cash_to(self, None, amount_owed)
break
else:
self.process_property(player, current_position, (d1, d2))
break
# Allow any player to buy/un-mortgage properties
# The order in which this is done is random so that one player doesn't have
# an advantage over the limited number of house/hotel pieces
player_purchase_order = self.players[:]
shuffle(player_purchase_order)
for purchasing_player in player_purchase_order:
if not purchasing_player.is_bankrupt:
purchasing_player.unmortgage_properties()
purchasing_player.purchase_houses(self)
return
# Start a simulation with the provided players
# players - players in order, first player in this list will play first
def do_simulation(self, players=None):
if not self.players:
if not players:
return None
else:
self.players = players
logger.debug("Starting simulation")
for player in self.players:
self.init_player(player)
# Keep playing until there is a winner
turn_counter = 0
winner = None
while not winner:
for player in self.players:
self.play_turn(player)
winner = self.get_winner()
turn_counter += 1
logger.debug("-------------------------")
| |
<gh_stars>1-10
#!/usr/bin/env python3
# Copyright (c) 2019 Bitcoin Association
# Distributed under the Open BSV software license, see the accompanying file LICENSE.
'''
Check different scenarios on how reorg affects contents of mempool and journal.
# chain reorg as a set operation on the chains of blocks containing sets of transactions
# (set(old_tip)) + set(mempool)) - set(new_tip) = new_mempool
# 0 ( A + {} ) - {} = A
# 1 ( {} + B ) - {} = B
# 2 ( C1 + C2 ) - {} = C1+C2
# 3 ( D + {} ) - D = {}
# 4 ( {} + E ) - E = {}
# 5 ( F1 + F2 ) - F1 = F2
# 6 ( G1 + G2 ) - G1+G2 = {}
# 7 ( Hx + {} ) - Hy = {}
# 8 ( Ix1 + Ix2 ) - Iy = {}
Where:
- Each letter is a separate (valid) transaction chain
- suffixes `x` and `y` are doublespend variants chains starting at the same UTXO
- suffixes `1` and `2` are first and second part of the same transaction chain
Two mechanisms for forcing a reorg are tested:
- new_tip is made better(longer) than old_tip
- old_tip is invalidated and so the equally good new_tip is chosen.
'''
from time import sleep
import socket
import itertools
import heapq
from test_framework.blocktools import create_block, create_coinbase
from test_framework.cdefs import ONE_GIGABYTE
from test_framework.key import CECKey
from test_framework.mininode import CTransaction, msg_tx, CTxIn, COutPoint, CTxOut, msg_block, msg_tx
from test_framework.script import CScript, SignatureHashForkId, SIGHASH_ALL, SIGHASH_FORKID, OP_CHECKSIG
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until
_cntr = 1
def new_key():
k = CECKey()
global _cntr
k.set_secretbytes(_cntr.to_bytes(6, byteorder='big'))
_cntr += 1
return k
class UTXO:
def __init__(self, tx, ndx, key):
self.tx = tx
self.ndx = ndx
self.key = key
_cntr2 = 1
def get_tip(connection):
return connection.rpc.getblock(connection.rpc.getbestblockhash())
def get_block_hash(block):
if isinstance(block, dict):
return block["hash"]
else:
return block.hash
def knows_of_block(connection, block):
def predicate(*, _block_hash=get_block_hash(block)):
try:
tmp = connection.rpc.getblock(_block_hash)
print(f"node knows of block {tmp['hash']} by {_block_hash}")
assert tmp["hash"] == _block_hash
return True
except:
print(f"node knows noting about block {_block_hash}")
return False
return predicate
def block_is_tip(connection, block):
def predicate(*, _block_hash=get_block_hash(block)):
ret = connection.rpc.getbestblockhash() == _block_hash
if ret:
print(f"node tip is block {_block_hash}")
return ret
return predicate
def make_and_send_block_ex(connection, vtx, *, tip=None, wait_for_tip=True):
"Create and send block with coinbase, returns conbase (tx, key) tuple"
if tip is None:
tip = get_tip(connection)
else:
tip = connection.rpc.getblock(get_block_hash(tip))
coinbase_key = new_key()
coinbase_tx = create_coinbase(tip["height"] + 1, coinbase_key.get_pubkey())
coinbase_tx.rehash()
global _cntr2
_cntr2 += 1
block = create_block(int(tip["hash"], 16), coinbase_tx, tip["time"] + _cntr2)
if vtx:
block.vtx.extend(vtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
block.solve()
msg = msg_block(block)
connection.send_message(msg)
if wait_for_tip:
wait_until(block_is_tip(connection, block), timeout=15)
else:
wait_until(knows_of_block(connection, block), timeout=15)
return UTXO(coinbase_tx, 0, coinbase_key), connection.rpc.getblock(get_block_hash(block))
def make_and_send_block(connection, vtx, *, tip=None, wait_for_tip=True):
return make_and_send_block_ex(connection, vtx, tip, wait_for_tip)[0]
def create_tx(utxos, n_outputs, fee_delta=0):
total_input = 0
tx = CTransaction()
for utxo in utxos:
tx.vin.append(CTxIn(COutPoint(utxo.tx.sha256, utxo.ndx), b"", 0xffffffff))
total_input += utxo.tx.vout[utxo.ndx].nValue
amount_per_output = total_input // n_outputs - len(utxos)*300 - n_outputs*200 - 100 - fee_delta
new_utxos = []
for i in range(n_outputs):
k = new_key()
new_utxos.append(UTXO(tx, i, k))
tx.vout.append(CTxOut(amount_per_output, CScript([k.get_pubkey(), OP_CHECKSIG])))
for input_ndx, (utxo, input) in enumerate(zip(utxos, tx.vin)):
sighash = SignatureHashForkId(utxo.tx.vout[utxo.ndx].scriptPubKey, tx, input_ndx, SIGHASH_ALL | SIGHASH_FORKID, utxo.tx.vout[utxo.ndx].nValue)
input.scriptSig = CScript([utxo.key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))])
tx.rehash()
return tx, new_utxos
def split(utxos, n_inputs, n_outputs, fee_delta=0):
new_utxos = []
transactions = []
for _ in split_iter(utxos, n_inputs, n_outputs, new_utxos, transactions, fee_delta):
pass
return transactions, new_utxos
def split_iter(utxos, n_inputs, n_outputs, new_utxos=None, transactions=None, fee_delta=0):
for ndx in range(0, len(utxos), n_inputs):
tx, xx = create_tx(utxos[ndx : ndx+n_inputs], n_outputs, fee_delta)
if new_utxos is not None:
new_utxos.extend(xx)
if transactions is not None:
transactions.append(tx)
yield tx, xx
def make_tx_chain(utxo, chain_length, fee_delta=0):
def gen():
utxos = [utxo]
for a in range(chain_length):
tx, utxos = create_tx(utxos, 1, fee_delta=fee_delta)
yield tx
return list(gen())
def chop(x, n=2):
"""Chop sequence into n approximately equal slices
>>> chop(range(10), n=3)
[[0, 1, 2], [3, 4, 5, 6], [7, 8, 9]]
>>> chop(range(3), n=10)
[[], [0], [], [], [1], [], [], [], [2], []]
"""
x = list(x)
if n < 2:
return [x]
def gen():
m = len(x) / n
i = 0
for _ in range(n-1):
yield x[round(i):round(i + m)]
i += m
yield x[round(i):]
return list(gen())
def splice(*iters):
"""
>>> print(*splice('abc', 'de', 'f'))
a d f b e c
"""
nothing = object()
return (x
for x in itertools.chain(
*itertools.zip_longest(
*iters,
fillvalue=nothing))
if x is not nothing)
def make_blocks_from(conn, root_block, nblocks, *txs_lists, wait_for_tip=True):
def gen(root_block, nblocks):
for i, txs in enumerate(chop(splice(*txs_lists), n=nblocks), start=1):
_, root_block = make_and_send_block_ex(conn, txs, tip=root_block, wait_for_tip=wait_for_tip)
yield root_block
return list(gen(root_block, nblocks))
def submit_to_mempool(conn, *txs_lists):
txs = list(splice(*txs_lists))
expected_mempool_size = conn.rpc.getmempoolinfo()["size"] + len(txs)
for tx in txs:
conn.send_message(msg_tx(tx))
# All planned transactions should be accepted into the mempool
wait_until(lambda: conn.rpc.getmempoolinfo()["size"] == expected_mempool_size)
class property_dict(dict):
def __getattr__(self, k): return self.__getitem__(k)
def __setattr__(self, k, v): return self.__setitem__(k, v)
def tx_ids(txs):
return [tx if isinstance(tx, str) else tx.hash for tx in txs]
class tx_set_context(dict):
def __init__(self, context={}, **subsets):
context = dict(context)
context.update(subsets)
super().__init__((k, tx_ids(v)) for k,v in context.items())
class tx_set(set):
def __init__(self, _members=(), *, _name=None):
if isinstance(_members, tx_set):
_name = _name if _name is not None else _members._name
self._name = _name if _name is not None else 'set'
super().__init__(tx_ids(_members))
def explain(self, other, *, context):
if not isinstance(other, tx_set):
other = tx_set(other)
if not isinstance(context, tx_set_context):
context = tx_set_context(context)
ret = ""
explained = set()
for n, v in context.items():
if not self.intersection(v):
continue
if other.intersection(v):
ret += self._explain_range(n, v, other)
ret += " "
else:
ret += f"no {n} "
explained.update(other.intersection(v))
missing = self.difference(explained)
if missing:
if ret:
ret += "and "
ret += f"missing from {self._name} are "
for n, v in context.items():
if not self.intersection(v):
continue
if missing.intersection(v):
ret += self._explain_range(n, v, missing)
ret += " "
missing.difference_update(v)
if missing:
ret += ", ".join(sorted(missing))
ret += " "
unexpected = other.difference(self)
if unexpected:
if ret:
ret += "and "
ret += f"unexpected "
for n, v in context.items():
if unexpected.intersection(v):
ret += self._explain_range(n, v, unexpected)
ret += " "
unexpected.difference_update(v)
if unexpected:
ret += ", ".join(sorted(unexpected))
ret += " "
return f"{other._name} is {ret}"
def _explain_range(self, n, v, elements):
def find_slices():
last = None
for i in sorted(map(v.index, elements.intersection(v))):
if last is None:
last = slice(i, i+1)
elif last.stop == i:
last = slice(last.start, i+1)
else:
yield last
last = None
if last is not None:
yield last
def show_slices(slices):
for s in slices:
start = str(s.start) if s.start > 0 else ""
stop = str(s.stop) if s.start > 0 or s.stop < len(v) else ""
yield f"{n}[{start}:{stop}]" if s.start+1 != s.stop else f"{n}[{s.start}]"
return " ".join(show_slices(find_slices()))
c = property_dict(A="abc", B="def", C="ghi", Z="xyz")
e = tx_set(c.A + c.B + c.C, _name="'e'")
a = tx_set("abcdegixyq", _name="'a'")
# assert e == a, e.explain(a, context=c)
class ReorgTests(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.setup_nodes()
def setup_nodes(self):
self.add_nodes(self.num_nodes)
def run_test(self):
with self.run_node_with_connections("Xxxxxxxxxxxxx",
0,
["-checkmempool=1",
'-whitelist=127.0.0.1',
'-genesisactivationheight=1',
'-jbafillafternewblock=1'
],
number_of_connections=1) as (conn,):
self.log.info("making coinbase")
fee_delta = 10
utxo, _ = make_and_send_block_ex(conn, [])
conn.rpc.generate(110)
# we will mine two competing chains, old and new
# the old one will be one block longer than the new
# We will use several strategies to switch the chains
def reorg_by_invalidateblock(conn, old_chain, new_chain):
"""Invalidate last two blocks of the old chain to make the new chain longer"""
conn.rpc.invalidateblock(old_chain[-2]["hash"])
wait_until(lambda: conn.rpc.getbestblockhash() == new_chain[-1]["hash"], timeout=10)
return new_chain[-1]
def reorg_by_mining(conn, old_chain, new_chain):
"""Mine two more blocks on the new chain to make the new chain longer"""
more_chain = make_blocks_from(conn, new_chain[-1], 1, wait_for_tip=False)
more_chain = make_blocks_from(conn, more_chain[-1], 1, wait_for_tip=True)
return more_chain[-1]
reorg_strategies = [reorg_by_invalidateblock, reorg_by_mining]
txs, case_utxos = split([utxo], 1, len(reorg_strategies), fee_delta=fee_delta)
_, root_block_data = make_and_send_block_ex(conn, txs)
for strategy, case_utxo in zip(reorg_strategies, case_utxos):
self.check_reorg_cases(conn, root_block_data, strategy, case_utxo=case_utxo, fee_delta=fee_delta)
def check_reorg_cases(self, conn, root_block_data, instigate_reorg, case_utxo, fee_delta):
self.log.info("Check reorg cases with %s", instigate_reorg.__name__)
n_cases = 9
txs, new_utxos = split([case_utxo], 1, n_cases, fee_delta=fee_delta)
utxo, root_block_data = make_and_send_block_ex(conn, txs)
# stay below 25 chain limit as the whole thing may end up in the mempool
tx_chain_depth = 24
# see docstring above
self.log.info("preparing transactions")
| |
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import os
import re
import shlex
import subprocess
import signal
import csv
import logging
import json
import time
from datetime import datetime as dt
from requests.exceptions import RequestException
import glob
import traceback
import random
from badge import *
from badge_discoverer import BadgeDiscoverer, BeaconDiscoverer
from badge_manager_server import BadgeManagerServer
from beacon_manager_server import BeaconManagerServer
from badge_manager_standalone import BadgeManagerStandalone
from beacon_manager_standalone import BeaconManagerStandalone
import hub_manager
from settings import DATA_DIR, LOG_DIR
log_file_name = LOG_DIR + 'hub.log'
scans_file_name = DATA_DIR + 'scan.txt'
pending_file_prefix = DATA_DIR + 'pending_'
audio_archive_file_name = DATA_DIR + 'audio_archive.txt'
proximity_archive_file_name = DATA_DIR + 'proximity_archive.txt'
standalone_audio_file = DATA_DIR + 'audio_data.txt'
standalone_proximity_file = DATA_DIR + 'proximity_data.txt'
AUDIO = "audio"
PROXIMITY = "proximity"
SCAN_DURATION = 3 # seconds
#NOTE try to keep under 100MB or so due to memory constraints
MAX_PENDING_FILE_SIZE = 15000000 # in bytes, so 15MB
# create logger with 'badge_server'
logger = logging.getLogger('badge_server')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(log_file_name)
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
# formatter = logging.Formatter('%(asctime)s - %(levelname)s - [%(mac)s] %(message)s')
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
def round_float_for_log(x):
return float("{0:.3f}".format(x))
def has_chunks(filename):
"""
Returns true if there is data in the file, and false otherwise
"""
return os.path.exists(filename) and os.path.getsize(filename) > 0
def offload_data():
"""
Send pending files to server and move pending to archive
Return True on success, False on failure
"""
#TODO test with standalone
#NOTE not currently doing anything with the True/False
# return values, might decide to do something later
pending_files = sorted(glob.glob(pending_file_prefix + "*"))
for pending_file_name in pending_files:
logger.debug("Sending {} to server".format(pending_file_name))
if not has_chunks(pending_file_name):
continue
chunks = []
with open(pending_file_name, "r") as pending_file:
for line in pending_file:
chunks.append(json.loads(line))
# real quick grab the data type from the first data entry
data_type = "audio" if "audio" in chunks[0]["type"] else "proximity"
# fire away!
try:
chunks_written = hub_manager.send_data_to_server(logger, data_type, chunks)
if chunks_written == len(chunks):
logger.debug("Successfully wrote {} data entries to server"
.format(len(chunks)))
else:
# this seems unlikely to happen but is good to keep track of i guess
logger.error("Data mismatch: {} data entries were not written to server"
.format(len(chunks) - chunks_written))
logger.error("Error sending data from file {} to server!"
.format(pending_file_name))
return False
# write to archive and erase pending file
with open(get_archive_name(data_type), "a") as archive_file:
for chunk in chunks:
archive_file.write(json.dumps(chunk) + "\n")
os.remove(pending_file_name)
except RequestException as e:
s = traceback.format_exc()
logger.error("Error sending data from file {} to server!"
.format(pending_file_name))
logger.error("{},{}".format(e,s))
return False
return True
def get_archive_name(data_type):
"""
Return the name of the archive file for the passed data type
"""
if data_type == AUDIO:
return audio_archive_file_name
else:
return proximity_archive_file_name
def get_proximity_name(mode="server"):
"""
return the name of the existing pending proximity file,
or a new one if either one doesn't exist or if
the existing file is > MAX_PENDING_FILE_SIZE
"""
if mode == "server":
return _get_pending_file_name(PROXIMITY)
else:
return standalone_proximity_file
def get_audio_name(mode="server"):
if mode == "server":
return _get_pending_file_name(AUDIO)
else:
return standalone_audio_file
def _get_pending_file_name(data_type):
"""
If there are no current pending files < MAX_PENDING_FILE_SIZE in size,
return a new pending filename
Else, return an existing one.
"""
filenames = filter(
lambda x: os.path.getsize(x) < MAX_PENDING_FILE_SIZE,
glob.glob("{}*{}*".format(pending_file_prefix, data_type)))
if len(filenames) == 0:
return _create_pending_file_name(data_type)
else:
return filenames[0]
def _create_pending_file_name(data_type):
"""
Create a pending file name for the given data_type
Uses the current date/time to create a unique filename
"""
now = dt.now().strftime("%Y%m%d%H%M%S")
filename = "{}{}_{}.txt".format(pending_file_prefix, now, data_type)
if os.path.exists(filename):
# this seems unlikely to happen, but just in case :)
# get the number of pending files that match this time and add one
files = glob.glob("{}{}*{}*".format(pending_file_prefix, now, data_type))
now = '_'.join((now, str(len(files) + 1)))
filename = "{}{}_{}.txt".format(pending_file_prefix, now, data_type)
return filename
def dialogue(bdg, activate_audio, activate_proximity, mode="server"):
"""
Attempts to read data from the device specified by the address. Reading is handled by gatttool.
:param bdg:
:return:
"""
ret = bdg.pull_data(activate_audio, activate_proximity)
addr = bdg.addr
if ret == 0:
logger.info("Successfully pulled data")
# if we were able to pull data, we saw the badge again
bdg.last_seen_ts = time.time()
else:
logger.info("Errors pulling data.")
if bdg.dlg.chunks:
logger.info("Chunks received: {}".format(len(bdg.dlg.chunks)))
logger.info("saving chunks to file")
# store in JSON file
with open(get_audio_name(mode), "a") as fout:
for chunk in bdg.dlg.chunks:
ts_with_ms = round_float_for_log(ts_and_fract_to_float(chunk.ts, chunk.fract))
log_line = {
'type': "audio received",
'log_timestamp': round_float_for_log(time.time()),
'log_index': -1, # need to find a good accumulator.
'data': {
'voltage': round_float_for_log(chunk.voltage),
'timestamp': ts_with_ms,
'sample_period': chunk.sampleDelay,
'num_samples': len(chunk.samples),
'samples': chunk.samples,
'badge_address': addr,
'member': bdg.key,
'member_id':bdg.badge_id
}
}
logger.debug("Chunk timestamp: {0:.3f}, Voltage: {1:.3f}, Delay: {2}, Samples in chunk: {3}".format(
ts_with_ms, chunk.voltage, chunk.sampleDelay, len(chunk.samples)))
#logger.debug(json.dumps(log_line))
json.dump(log_line, fout)
fout.write('\n')
logger.info("done writing")
# update badge object to hold latest timestamps
last_chunk = bdg.dlg.chunks[-1]
last_chunk_ts_pretty = dt.fromtimestamp(last_chunk.ts).strftime("%Y-%m-%d@%H:%M:%S UTC")
if bdg.is_newer_audio_ts(last_chunk.ts, last_chunk.fract):
logger.debug("Setting last badge audio timestamp to {} {} ({})".format(
last_chunk.ts, last_chunk.fract, last_chunk_ts_pretty))
bdg.set_audio_ts(last_chunk.ts, last_chunk.fract)
else:
logger.debug("Keeping existing timestamp ({}.{}) for {}. Last chunk timestamp was: {}.{} ({})"
.format(bdg.last_audio_ts_int,bdg.last_audio_ts_fract,bdg.addr,
last_chunk.ts, last_chunk.fract, last_chunk_pretty))
else:
logger.info("No mic data ready")
if bdg.dlg.scans:
logger.info("Proximity scans received: {}".format(len(bdg.dlg.scans)))
logger.info("saving proximity scans to file")
with open(get_proximity_name(mode), "a") as fout:
for scan in bdg.dlg.scans:
ts_with_ms = round_float_for_log(scan.ts)
log_line = {
'type': "proximity received",
'log_timestamp': round_float_for_log(time.time()),
'log_index': -1, # need to find a good accumulator.
'data': {
'voltage': round_float_for_log(scan.voltage),
'timestamp': ts_with_ms,
'badge_address': addr,
'rssi_distances':
{
device.ID: {'rssi': device.rssi, 'count': device.count} for device in scan.devices
},
'member': bdg.key,
'member_id': bdg.badge_id
}
}
logger.debug("SCAN: scan timestamp: {0:.3f}, voltage: {1:.3f}, Devices in scan: {2}".format(
ts_with_ms, scan.voltage, scan.numDevices))
#logger.info(json.dumps(log_line))
json.dump(log_line, fout)
fout.write('\n')
# update badge object to hold latest timestamps
last_scan = bdg.dlg.scans[-1]
last_scan_ts_pretty = dt.fromtimestamp(last_scan.ts).strftime("%Y-%m-%d@%H:%M:%S UTC")
logger.debug("Setting last badge proximity timestamp to {} ([])".format(
last_scan.ts, last_scan_ts_pretty))
bdg.last_proximity_ts = last_scan.ts
else:
logger.info("No proximity scans ready")
def scan_for_devices(devices_whitelist, show_all=False):
bd = BadgeDiscoverer(logger)
try:
all_devices = bd.discover(scan_duration=SCAN_DURATION)
except Exception as e: # catch *all* exceptions
logger.error("[Badges] Scan failed,{}".format(e))
all_devices = {}
scanned_devices = []
for addr,device_info in all_devices.iteritems():
if addr in devices_whitelist:
logger.debug("\033[1;7m\033[1;32mFound {}, added. Device info: {}\033[0m".format(addr, device_info))
scanned_devices.append({'mac':addr,'device_info':device_info})
else:
if show_all:
logger.debug("Found {}, but not on whitelist. Device info: {}".format(addr, device_info))
pass
time.sleep(2) # requires sometimes to prevent connection from failing
return scanned_devices
def scan_for_bc_devices(devices_whitelist, show_all=False):
bc = BeaconDiscoverer(logger)
try:
all_bc_devices = bc.discover(scan_duration=SCAN_DURATION)
except Exception as e: # catch *all* exceptions
logger.error("[Beacons] Scan failed,{}".format(e))
all_bc_devices = {}
scanned_bc_devices = []
for addr,device_info in all_bc_devices.iteritems():
if addr in devices_whitelist:
logger.debug("\033[1;7m\033[1;32mFound {}, added. Device info: {}\033[0m".format(addr, device_info))
scanned_bc_devices.append({'mac':addr,'device_info':device_info})
else:
if show_all:
logger.debug("Found {}, but not on whitelist. Device info: {}".format(addr, device_info))
pass
time.sleep(2) # requires sometimes to prevent connection from failing
return scanned_bc_devices
def create_badge_manager_instance(mode,timestamp):
if mode == "server":
mgr = BadgeManagerServer(logger=logger)
else:
mgr = BadgeManagerStandalone(logger=logger,timestamp=timestamp)
return mgr
def create_beacon_manager_instance(mode,timestamp):
if mode == "server":
mgrb = BeaconManagerServer(logger=logger)
else:
mgrb = BeaconManagerStandalone(logger=logger,timestamp=timestamp)
return mgrb
def reset():
'''
Resets and reconfigures Bluetooth parameters. The specific parameters affect connection speed negotiation. It's
not pretty, but safer to change the conn params this way
:return:
'''
# Resets BLE hci
logger.info("Resetting bluetooth")
reset_command = "hciconfig hci0 reset"
args = shlex.split(reset_command)
p = subprocess.Popen(args)
# israspberry pi?
logger.info("Setting bluetooth connection parameters")
if os.uname()[4][:3] == 'arm':
logger.info("Raspberry Pi detected, changing bluetooth connection parameters")
with open("/sys/kernel/debug/bluetooth/hci0/conn_min_interval", "w") as connparam:
connparam.write("16")
with open("/sys/kernel/debug/bluetooth/hci0/conn_max_interval", "w") as connparam:
connparam.write("17")
else:
logger.warn("Not a Raspberry Pi, Bluetooth connection parameters remain untouched (communication may be slower)")
time.sleep(2) # requires sleep after reset
logger.info("Done resetting bluetooth")
def kill_bluepy():
"""
Kill orphaned/leftover/defunct bluepy-helper processes
I'd like to move this to a separate utility file or something when
we refactor
"""
# get all the bluepy-helper processes
CMD="/bin/ps ax | grep bluepy-helper | grep -v grep | awk '{ print $1 }'"
p = subprocess.Popen(CMD, shell=True, stdout=subprocess.PIPE)
pidstr = p.communicate()[0]
pids = pidstr.split("\n")
pids = [int(pid) for pid in pids if pid.isdigit()]
mypid = os.getpid()
# dont wanna kill our process by accident :)
if mypid in pids:
pids.remove(mypid)
for pid in pids:
# KILL KILL KILL
try:
os.kill(int(pid), signal.SIGKILL)
# we waitpid to clean up defunct processes
os.waitpid(int(pid), 0)
logger.info("Process with PID {} killed".format(pid))
except OSError as err:
logger.error("Unable to kill process with pid {}".format(pid))
logger.error(err)
def pull_devices(mgr, mgrb, start_recording):
logger.info('Started pulling')
activate_audio = False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.