input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
# Copyright (c) 2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from datetime import datetime, timedelta
from decimal import Decimal
from annoying.fields import JSONField
from django_fsm import FSMField, transition, TransitionNotAllowed, post_transition
from model_utils import Choices
from django.apps import apps
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings
from django.core.exceptions import ValidationError, NON_FIELD_ERRORS
from django.urls import reverse
from django.core.validators import MinValueValidator
from django.db import models
from django.db import transaction as db_transaction
from django.db.models import Max, ForeignKey, F
from django.template.loader import select_template
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible, force_text
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from django.utils.module_loading import import_string
from silver.currencies import CurrencyConverter, RateNotFound
from silver.models.billing_entities import Customer, Provider
from silver.models.documents.entries import DocumentEntry
from silver.models.documents.pdf import PDF
from silver.utils.international import currencies
_storage = getattr(settings, 'SILVER_DOCUMENT_STORAGE', None)
if _storage:
_storage_klass = import_string(_storage[0])
_storage = _storage_klass(*_storage[1], **_storage[2])
PAYMENT_DUE_DAYS = getattr(settings, 'SILVER_DEFAULT_DUE_DAYS', 5)
logger = logging.getLogger(__name__)
def documents_pdf_path(document, filename):
path = '{prefix}{company}/{doc_name}/{date}/{filename}'.format(
company=slugify(force_text(
document.provider.company or document.provider.name)),
date=document.issue_date.strftime('%Y/%m'),
doc_name=('%ss' % document.__class__.__name__).lower(),
prefix=getattr(settings, 'SILVER_DOCUMENT_PREFIX', ''),
filename=filename)
return path
class BillingDocumentQuerySet(models.QuerySet):
def due_this_month(self):
return self.filter(
state=BillingDocumentBase.STATES.ISSUED,
due_date__gte=timezone.now().date().replace(day=1)
)
def due_today(self):
return self.filter(
state=BillingDocumentBase.STATES.ISSUED,
due_date__exact=timezone.now().date()
)
def overdue(self):
return self.filter(
state=BillingDocumentBase.STATES.ISSUED,
due_date__lt=timezone.now().date()
)
def overdue_since_last_month(self):
return self.filter(
state=BillingDocumentBase.STATES.ISSUED,
due_date__lt=timezone.now().date().replace(day=1)
)
class BillingDocumentManager(models.Manager):
def get_queryset(self):
return super(BillingDocumentManager, self).get_queryset() \
.select_related('customer', 'provider',
'related_document')
def get_billing_documents_kinds():
return ((subclass.__name__.lower(), subclass.__name__)
for subclass in BillingDocumentBase.__subclasses__())
@python_2_unicode_compatible
class BillingDocumentBase(models.Model):
objects = BillingDocumentManager.from_queryset(BillingDocumentQuerySet)()
class STATES(object):
DRAFT = 'draft'
ISSUED = 'issued'
PAID = 'paid'
CANCELED = 'canceled'
STATE_CHOICES = Choices(
(STATES.DRAFT, _('Draft')),
(STATES.ISSUED, _('Issued')),
(STATES.PAID, _('Paid')),
(STATES.CANCELED, _('Canceled'))
)
kind = models.CharField(get_billing_documents_kinds, max_length=8, db_index=True)
related_document = models.ForeignKey('self', blank=True, null=True,
related_name='reverse_related_document', on_delete=models.CASCADE)
series = models.CharField(max_length=20, blank=True, null=True,
db_index=True)
number = models.IntegerField(blank=True, null=True, db_index=True)
customer = models.ForeignKey('Customer', on_delete=models.CASCADE)
provider = models.ForeignKey('Provider', on_delete=models.CASCADE)
archived_customer = JSONField(default=dict, null=True, blank=True)
archived_provider = JSONField(default=dict, null=True, blank=True)
due_date = models.DateField(null=True, blank=True)
issue_date = models.DateField(null=True, blank=True, db_index=True)
paid_date = models.DateField(null=True, blank=True)
cancel_date = models.DateField(null=True, blank=True)
sales_tax_percent = models.DecimalField(max_digits=4, decimal_places=2,
validators=[MinValueValidator(0.0)],
null=True, blank=True)
sales_tax_name = models.CharField(max_length=64, blank=True, null=True)
currency = models.CharField(
choices=currencies, max_length=4, default='USD',
help_text='The currency used for billing.'
)
transaction_currency = models.CharField(
choices=currencies, max_length=4,
help_text='The currency used when making a transaction.'
)
transaction_xe_rate = models.DecimalField(
max_digits=16, decimal_places=4, null=True, blank=True,
help_text='Currency exchange rate from document currency to '
'transaction_currency.'
)
transaction_xe_date = models.DateField(
null=True, blank=True,
help_text='Date of the transaction exchange rate.'
)
pdf = ForeignKey(PDF, null=True, on_delete=models.CASCADE)
state = FSMField(choices=STATE_CHOICES, max_length=10, default=STATES.DRAFT,
verbose_name="State",
help_text='The state the invoice is in.')
_total = models.DecimalField(max_digits=19, decimal_places=2,
null=True, blank=True)
_total_in_transaction_currency = models.DecimalField(max_digits=19,
decimal_places=2,
null=True, blank=True)
_last_state = None
_document_entries = None
class Meta:
unique_together = ('kind', 'provider', 'series', 'number')
ordering = ('-issue_date', 'series', '-number')
def __init__(self, *args, **kwargs):
super(BillingDocumentBase, self).__init__(*args, **kwargs)
if not self.kind:
self.kind = self.__class__.__name__.lower()
else:
for subclass in BillingDocumentBase.__subclasses__():
if subclass.__name__.lower() == self.kind:
self.__class__ = subclass
self._last_state = self.state
def _get_entries(self):
if not self._document_entries:
self._document_entries = getattr(self, self.kind + '_entries').all()
return self._document_entries
def compute_total_in_transaction_currency(self):
return sum([Decimal(entry.total_in_transaction_currency)
for entry in self._get_entries()])
def compute_total(self):
return sum([Decimal(entry.total)
for entry in self._get_entries()])
def mark_for_generation(self):
self.pdf.mark_as_dirty()
def _issue(self, issue_date=None, due_date=None):
if issue_date:
self.issue_date = datetime.strptime(issue_date, '%Y-%m-%d').date()
elif not self.issue_date and not issue_date:
self.issue_date = timezone.now().date()
if not self.transaction_xe_rate:
if not self.transaction_xe_date:
self.transaction_xe_date = self.issue_date
try:
xe_rate = CurrencyConverter.convert(1, self.currency,
self.transaction_currency,
self.transaction_xe_date)
except RateNotFound:
raise TransitionNotAllowed('Couldn\'t automatically obtain an '
'exchange rate.')
self.transaction_xe_rate = xe_rate
if due_date:
self.due_date = datetime.strptime(due_date, '%Y-%m-%d').date()
elif not self.due_date and not due_date:
delta = timedelta(days=PAYMENT_DUE_DAYS)
self.due_date = timezone.now().date() + delta
if not self.sales_tax_name:
self.sales_tax_name = self.customer.sales_tax_name
if not self.sales_tax_percent:
self.sales_tax_percent = self.customer.sales_tax_percent
if not self.number:
self.number = self._generate_number()
self.archived_customer = self.customer.get_archivable_field_values()
self._total = self.compute_total()
self._total_in_transaction_currency = self.compute_total_in_transaction_currency()
@transition(field=state, source=STATES.DRAFT, target=STATES.ISSUED)
def issue(self, issue_date=None, due_date=None):
self._issue(issue_date=issue_date, due_date=due_date)
def _pay(self, paid_date=None):
if paid_date:
self.paid_date = datetime.strptime(paid_date, '%Y-%m-%d').date()
if not self.paid_date and not paid_date:
self.paid_date = timezone.now().date()
@transition(field=state, source=STATES.ISSUED, target=STATES.PAID)
def pay(self, paid_date=None):
self._pay(paid_date=paid_date)
def _cancel(self, cancel_date=None):
if cancel_date:
self.cancel_date = datetime.strptime(cancel_date, '%Y-%m-%d').date()
if not self.cancel_date and not cancel_date:
self.cancel_date = timezone.now().date()
@transition(field=state, source=STATES.ISSUED, target=STATES.CANCELED)
def cancel(self, cancel_date=None):
self._cancel(cancel_date=cancel_date)
def sync_related_document_state(self):
if self.related_document and self.state != self.related_document.state:
state_transition_map = {
BillingDocumentBase.STATES.ISSUED: 'issue',
BillingDocumentBase.STATES.CANCELED: 'cancel',
BillingDocumentBase.STATES.PAID: 'pay'
}
transition_name = state_transition_map[self.state]
bound_transition_method = getattr(self.related_document, transition_name)
bound_transition_method()
def clone_into_draft(self):
copied_fields = {
'customer': self.customer,
'provider': self.provider,
'currency': self.currency,
'sales_tax_percent': self.sales_tax_percent,
'sales_tax_name': self.sales_tax_name
}
clone = self.__class__._default_manager.create(**copied_fields)
clone.state = self.STATES.DRAFT
# clone entries too
for entry in self._entries:
entry_clone = entry.clone()
document_type_name = self.__class__.__name__.lower()
setattr(entry_clone, document_type_name, clone)
entry_clone.save()
clone.save()
return clone
def clean(self):
super(BillingDocumentBase, self).clean()
# The only change that is allowed if the document is in issued state
# is the state chage from issued to paid
# !! TODO: If _last_state == 'issued' and self.state == 'paid' || 'canceled'
# it should also be checked that the other fields are the same bc.
# right now a document can be in issued state and someone could
# send a request which contains the state = 'paid' and also send
# other changed fields and the request would be accepted bc. only
# the state is verified.
if self._last_state == self.STATES.ISSUED and\
self.state not in [self.STATES.PAID, self.STATES.CANCELED]:
msg = 'You cannot edit the document once it is in issued state.'
raise ValidationError({NON_FIELD_ERRORS: msg})
if self._last_state == self.STATES.CANCELED:
msg = 'You cannot edit the document once it is in canceled state.'
raise ValidationError({NON_FIELD_ERRORS: msg})
# If it's in paid state => don't allow any changes
if self._last_state == self.STATES.PAID:
msg = 'You cannot edit the document once it is in paid state.'
raise ValidationError({NON_FIELD_ERRORS: msg})
if self.transactions.exclude(currency=self.transaction_currency).exists():
message = 'There are unfinished transactions of this document that use a ' \
'different currency.'
raise ValidationError({'transaction_currency': message})
def save(self, *args, **kwargs):
if not self.transaction_currency:
self.transaction_currency = self.customer.currency or self.currency
if not self.series:
self.series = self.default_series
# Generate the number
if not self.number and self.state != BillingDocumentBase.STATES.DRAFT:
self.number = self._generate_number()
# Add tax info
if not self.sales_tax_name:
self.sales_tax_name = self.customer.sales_tax_name
if not self.sales_tax_percent:
self.sales_tax_percent = self.customer.sales_tax_percent
self._last_state = self.state
with db_transaction.atomic():
# Create pdf object
if not self.pdf and self.state != self.STATES.DRAFT:
self.pdf = PDF.objects.create(upload_path=self.get_pdf_upload_path(), dirty=1)
super(BillingDocumentBase, self).save(*args, **kwargs)
def _generate_number(self, default_starting_number=1):
"""Generates the number for a proforma/invoice."""
default_starting_number = max(default_starting_number, 1)
documents = self.__class__._default_manager.filter(
provider=self.provider, series=self.series
)
if not documents.exists():
# An invoice/proforma with this provider and series does not exist
if self.series == self.default_series:
return self._starting_number
else:
return default_starting_number
else:
# An invoice with this provider and series already exists
max_existing_number = documents.aggregate(
Max('number')
)['number__max']
if max_existing_number:
if self._starting_number and self.series == self.default_series:
return max(max_existing_number + 1, self._starting_number)
else:
return max_existing_number + 1
else:
return default_starting_number
def series_number(self):
if self.series:
if self.number:
return "%s-%d" % (self.series, self.number)
else:
return "%s-draft-id:%d" % (self.series, self.pk)
else:
return "draft-id:%d" % self.pk
series_number.short_description = 'Number'
series_number = property(series_number)
def __str__(self):
return '%s %s => %s [%.2f %s]' % (self.series_number,
self.provider.billing_name,
self.customer.billing_name,
self.total, self.currency)
@property
def updateable_fields(self):
return ['customer', 'provider', 'due_date', 'issue_date', 'paid_date',
'cancel_date', 'sales_tax_percent', 'sales_tax_name',
'currency']
@property
def admin_change_url(self):
url_base = 'admin:{app_label}_{klass}_change'.format(
app_label=self._meta.app_label,
klass=self.__class__.__name__.lower())
url = reverse(url_base, args=(self.pk,))
return '<a href="{url}">{display_series}</a>'.format(
url=url, display_series=self.series_number)
@property
def _entries(self):
# entries iterator which replaces the invoice/proforma from the DB with
# self. We need this in generate_pdf so that the data in PDF has the
# lastest state for the document. Without this we get in template:
#
# invoice.issue_date != entry.invoice.issue_date
#
# which is obviously false.
document_type_name = self.__class__.__name__ # Invoice or Proforma
kwargs = {document_type_name.lower(): self}
entries = DocumentEntry.objects.filter(**kwargs)
for entry in entries:
if document_type_name.lower() == 'invoice':
entry.invoice = self
if document_type_name.lower() == 'proforma':
entry.proforma = self
yield(entry)
def get_template_context(self, state=None):
customer = Customer(**self.archived_customer)
provider = Provider(**self.archived_provider)
if state is None:
state = self.state
return {
'document': self,
'provider': provider,
'customer': customer,
'entries': self._entries,
'state': state
}
def get_template(self, | |
The most general expression for the conductivity is given by the Kubo formula
.. math::
\\sigma(q,\\omega)=\\frac{ie^2\\omega}{q^2}\\chi^0(q,\\omega).
where :math:`\\chi^0` is the Polarizibility. If ``q`` is nonzero, this form is used.
However, it is common to use a simpler limiting cases of this expression.
The local conductivity (called when ``q==0``) is the one which is most familiar
and it relates the surface current to the electric field linearly
.. math::
\\mathbf K(\\omega)=\\sigma(\\omega)\\mathbf E
It can be found from the nonlocal conductivity by taking the limit :math:`\\lim_{q\\to 0}\\sigma(q,\\omega)=\\sigma(\\omega)`
and takes the form :math:`\\sigma(\\omega)=\\sigma_{intra}(\\omega)+\\sigma_{inter}(\\omega)`,
where the intraband and interband components are given by
.. math::
\\sigma_{intra}(\\omega) = \\frac{2ie^2k_BT}{\\pi\\hbar^2(\\omega+i\\gamma)}\\ln\\left [ 2 \\cosh \\frac{E_F}{2k_BT} \\right ]
and
.. math::
\\sigma_{inter}(\\omega) = \\frac{e^2}{4\\hbar}\\left [ H(\\hbar\\omega/2) + \\frac{4i}{\\pi}\\hbar ( \\omega +i \\gamma )\\int_0^\\infty \\frac{H( \\epsilon )-H(\\hbar\\omega /2)}{\\hbar^2(\\omega +i\\gamma )^2-4\\epsilon^2} d\\epsilon\\right ]
where
.. math::
H(\\epsilon) = f(-\\epsilon)-f(\\epsilon) = \\frac{\\sinh(\\epsilon/k_BT)}{\\cosh(E_F/k_BT) + \\cosh(\\epsilon/k_BT)}
For ``T=0`` these expressions reduce to
.. math::
\\sigma_{intra}(\\omega) = \\frac{ie^2E_F}{\\pi\\hbar^2(\\omega+i\\gamma)}
.. math::
\\sigma_{inter}(\\omega) = \\frac{e^2}{4\\hbar}\\left [ \\Theta(\\hbar\\omega - 2E_F) + \\frac{i}{\\pi} \\ln\\left [\\frac{2E_F-\\hbar\\omega}{2E_F+\\hbar\\omega} \\right ] \\right ]
References
----------
[1] <NAME>. (2017).
From Classical to Quantum Plasmonics in Three and Two Dimensions (Cham: Springer International Publishing).
http://link.springer.com/10.1007/978-3-319-48562-1
'''
# Local case
if np.all(q) == 0:
if T!=0:
intra_pre = 4 * _c.sigma_0 * (2*1j*sc.k*T) / (sc.pi*sc.hbar)
inter_pre = _c.sigma_0
### Intraband Contribution ###
# Using np.logaddexp() avoids the large cosh in ln( cosh(1/T) )
x = FermiLevel / (2*sc.k*T)
intra = lambda w: intra_pre * ( 1 / (w + 1j*gamma) ) * np.logaddexp(x,-x)
### Interband Contribution ###
H = lambda energy: sd.FermiDirac(-energy-FermiLevel,T) - sd.FermiDirac(energy-FermiLevel,T)
integrand = lambda energy,w: ( H(energy) - H(sc.hbar*w/2) ) / (sc.hbar**2 * (w + 1j*gamma)**2 - 4 * energy**2)
def integral(w):
result = np.empty_like(w,dtype=complex)
for i, frequency in np.ndenumerate(w):
integrand_re = lambda e: np.real(integrand(e,frequency))
integrand_im = lambda e: np.imag(integrand(e,frequency))
result[i] =( integrate.quad(integrand_re,0,10*FermiLevel,points=(FermiLevel/sc.hbar,2*FermiLevel/sc.hbar))[0]
+ 1j*integrate.quad(integrand_im,0,10*FermiLevel,points=(FermiLevel/sc.hbar,2*FermiLevel/sc.hbar))[0] )
return result
inter = lambda w: inter_pre * ( H(sc.hbar * w / 2) +
(4*1j/sc.pi) * sc.hbar*(w + 1j*gamma)*integral(w) )
conductivity= intra(omega) + inter(omega)
if T==0:
intra = lambda w: 1j*_c.sigma_0 * 4*FermiLevel / (sc.pi*sc.hbar* (w + 1j*gamma))
inter = lambda w: _c.sigma_0 * ( np.heaviside(sc.hbar*w - 2*FermiLevel,0.5) +
(1j/sc.pi) * np.log(np.abs((2*FermiLevel-sc.hbar*w)/(2*FermiLevel+sc.hbar*w))))
conductivity = intra(omega) + inter(omega)
# Nonlocal case
else:
if T==0:
conductivity = 1j*sc.elementary_charge**2 * (omega / q**2) * Polarizibility(q,omega,gamma,FermiLevel,T)
else:
pass
return conductivity
def OpticalConductivityMatrix(q,omega,gamma, FermiLevel,T):
'''
Returns the conductivity matrix of monolayer graphene.
Parameters
----------
q: array-like, rad/m
Wavenumber
omega: array-like, rad/s
Angular frequency
FermiLevel: scalar, J
the Fermi energy
gamma: scalar, rad/s
scattering rate
T: scalar, K
Temperature
Returns
----------
sigma_matrix: 2x2 numpy array, conductivity of monolayer graphene
'''
conductivity_matrix = np.array([[ScalarOpticalConductivity(q,omega,gamma,FermiLevel,T),0],
[0,ScalarOpticalConductivity(q,omega,gamma,FermiLevel,T)]])
return conductivity_matrix
def Permittivity(q, omega,FermiLevel,T, gamma,epsR,model=None):
'''
Returns the in-plane permittivity of graphene.
Parameters
----------
q: array-like, rad/m
Wavenumber
omega: array-like, rad/s
Angular frequency
epsR: scalar, unitless
background relative permittivity
Notes
-----
Permittivity relates to the scalar optical conductivity through the expression
.. math::
\\epsilon(q, \\omega) = \\epsilon_0 + \\frac{i\\sigma(q,\\omega)}{\\omega}
At :math:`\\omega=0`, we can use the expression found in Ref. 2,
.. math::
\\epsilon(q) = \\kappa^* + \\frac{2\\pi e^2}{\\kappa q}\\Pi^+(q)
References
----------
[1] “Lumerical: Modeling Methodology.” n.d. Accessed April 1, 2019.
https://apps.lumerical.com/other_application_graphene_simulation_tips.html.
[2] <NAME>., and <NAME>. (2007).
Dielectric function, screening, and plasmons in two-dimensional graphene.
Phys. Rev. B 75, 205418. https://link.aps.org/doi/10.1103/PhysRevB.75.205418.
'''
if model=='Lumerical:Falkovsky':
'''
Use eqn 10 of Ref 1
'''
x1 = sc.elementary_charge
x2 = sc.hbar
x3 = FermiLevel
x4 = _c.vF
x5 = Mobility(T,mu0,mu0T) # mobility at the new temperature
x6 = epsR
x7 = _c.thickness # 3.4 angstroms by default
x8 = sc.epsilon_0
prefactor = x1**2*x3 / ( sc.pi * x2**2)
denominator = omega**2 + ( x1*x4**2 / (x5*x3) )**2
term1 = - prefactor*(omega*x8*x7)**(-1) * omega / denominator
term2 = 1j*prefactor * (x1*x4**2 / (omega*x5*x3*x8*x7)) / denominator
eps = x6 + term1 + term2
return sc.epsilon_0*eps
elif np.all(omega==0):
pass
else:
eps = 1 + 1j*ScalarOpticalConductivity(q,omega,gamma,FermiLevel,T,model)/(omega*sc.epsilon_0)
return eps
def FresnelReflection(q,omega,gamma,FermiLevel,T,eps1,eps2,polarization):
'''
The Fresnel Reflection coefficients of light incident from above (medium 1, eps1).
Equation 5.4 of Ref 1
Parameters
----------
q: array-like, rad/m
Wavenumber at which to evaluate FresnelReflection.
In-plane momentum of incident light.
omega: array-like, rad/s
Angular frequency of incident light.
eps1: scalar, unitless
Permittivity in upper half-space
eps2: scalar, unitless
Permittivity in lower half-space
polarization: string
's'/'TE' or 'p'/'TM' for s- or p-polarization.
Examples
--------
Plot the TM polarized Fresnel Reflection coefficient. This will highlight the plasmon.
Replicates Fig. 5.2 in Ref [1].
.. plot::
>>> import matplotlib.pyplot as plt
>>> import matplotlib.cm as cm
>>> from graphenemodeling.graphene import monolayer as mlg
>>> from scipy.constants import elementary_charge, hbar
>>> eV = elementary_charge
>>> FermiLevel = 0.4 * eV
>>> gamma = 0.012 * eV / hbar
>>> kF = mlg.FermiWavenumber(FermiLevel,model='LowEnergy')
>>> q = np.linspace(1e-2,3,num=200) * kF
>>> w = np.linspace(1e-2,3,num=200) * FermiLevel / hbar
>>> fresnelTM = mlg.FresnelReflection(q,w[:,np.newaxis],gamma,FermiLevel,T=0,
... eps1=1,eps2=1,
... polarization='TM')
>>> fig, ax = plt.subplots(figsize=(6,6))
>>> ax.imshow(-np.imag(fresnelTM),
... extent=(q[0]/kF,q[-1]/kF,hbar*w[0]/FermiLevel,hbar*w[-1]/FermiLevel),
... origin='lower',aspect='auto',cmap=cm.hot,vmin=-16,vmax=0)
>>> ax.set_xlabel('$q/k_F$')
>>> ax.set_ylabel('$\\hbar\\omega/E_F$')
>>> ax.set_ylim(0,3)
>>> ax.set_xlim(0,3)
>>> fig.suptitle('Fresnel Reflection Coefficient (TM)')
>>> plt.show()
References
----------
[1] <NAME>. (2017).
From Classical to Quantum Plasmonics in Three and Two Dimensions (Cham: Springer International Publishing).
http://link.springer.com/10.1007/978-3-319-48562-1.
'''
kperp1, kperp2 = np.sqrt(eps1*(omega/sc.speed_of_light)**2 - q**2 + 1e-9*1j), np.sqrt(eps2*(omega/sc.speed_of_light)**2 - q**2 + 1e-9*1j)
sigma = ScalarOpticalConductivity(q,omega,gamma,FermiLevel,T)
if polarization=='p' or polarization=='TM':
numerator = eps2*kperp1 - eps1*kperp2 + ( sigma*kperp1*kperp2 / (sc.epsilon_0*omega) )
denominator = eps2*kperp1 + eps1*kperp2 + ( sigma*kperp1*kperp2 / (sc.epsilon_0*omega) )
if polarization=='s' or polarization=='TE':
numerator = kperp1 - kperp2 - sc.mu_0*omega*sigma
denominator = kperp1 + kperp2 + sc.mu_0*omega*sigma
return numerator / denominator
def LocalDensityOfStates(d,omega,gamma,FermiLevel,T):
'''
The LDOS a distance d above a plane of graphene.
Eqn 44 of SM of Ref 1
Assuning plane in vauum for now.
References
-----------
[1] Miller et al. 2017
'''
k0 = (omega/sc.speed_of_light)
ldos0 = k0**2 / (2*sc.pi**2*sc.speed_of_light) # Free space LDOS
integral = np.empty_like(d)
for i, d0 in np.ndenumerate(d):
k0w = (omega/sc.speed_of_light)
Im_rp = lambda q: np.imag( FresnelReflection(q,omega,gamma,FermiLevel,T,1,1,'p') )
integrand = lambda q: (q**2/k0w**3)*Im_rp(q)*np.exp(-2*q*d0)
a,b = 1e-3, np.abs(_c.K) # Increasing this bound does not lead to more convergence
q_plasmon=InversePlasmonDispersion(omega,gamma,FermiLevel,1,1,T,model='nonlocal')
integral[i] = integrate.quad(integrand,a,b,
points=(q_plasmon),limit=100)[0]
return ldos0 * integral
#####################
# Phonon Properties #
#####################
def PhononSelfEnergy():
pass
##############
# Plasmonics #
##############
def PlasmonDispersion(q,gamma,FermiLevel,eps1,eps2,T,model):
'''
Returns the nonretarded plasmon dispersion relations E(q) for a surface
plasmon in an infinite sheet of graphene sandwiched between two
dielectrics eps1 and eps2.
All values returned are assumed to be at zero temperature with no loss (gamma).
Parameters
----------
q: array-like, rad/m
Wavenumber of the plasmon
eps1: scalar, unitless
Permittivity in upper half-space
eps2: scalar, unitless
Permittivity in lower half-space
model: string
'intra' for intraband dispersion,
'local' uses the intraband + interband constributions to the conductivity, and
'nonlocal' for fully nonlocal conductivity.
Returns
-------
omega: array-like
Frequency of the plasmon with wavenumber q
Notes
-----
``model=='intra'`` uses
.. math::
\\omega = \\frac{1}{\\hbar}\\sqrt{\\frac{e^2\\epsilon_F}{2\\pi\\epsilon_0\\bar\\epsilon}q}
``model=='local'`` uses
.. math::
1-\\frac{i\\text{Im}[\\sigma(q,\\omega)]}{2i\\epsilon_0\\bar\\epsilon\\omega}=0
and finally, ``model=='nonlocal'`` uses
.. math::
1 - \\frac{\\sigma(q,\\omega)q}{2i\\epsilon_0\\bar\\epsilon\\omega} = 0
Examples
--------
Plot the three expressions for the dispersion relation. Replicates Fig. 5.2 in Ref. [1].
.. plot::
>>> from graphenemodeling.graphene import monolayer as mlg
>>> from scipy.constants import elementary_charge, hbar
>>> eV = elementary_charge
>>> gamma=0.012 * eV / hbar
>>> eF = 0.4*eV
>>> kF = mlg.FermiWavenumber(eF,model='LowEnergy')
>>> q = np.linspace(1e-3,3,num=200) * kF
>>> disp_intra = mlg.PlasmonDispersion(q,gamma,eF,eps1=1,eps2=1,T=0,model='intra')
>>> disp_local = mlg.PlasmonDispersion(q,gamma,eF,eps1=1,eps2=1,T=0,model='local')
>>> disp_nonlocal = mlg.PlasmonDispersion(q,gamma,eF,eps1=1,eps2=1,T=0,model='nonlocal')
>>> fig, ax = plt.subplots(figsize=(6,6))
>>> ax.plot(q/kF,hbar*disp_intra/eF,'--',label='Intraband')
<...
>>> ax.plot(q/kF,hbar*disp_local/eF,'r-.',label='Local')
<...
>>> ax.plot(q[:190]/kF,hbar*disp_nonlocal[:190]/eF,'g',label='Nonlocal')
<...
>>> ax.plot(q/kF,q/kF,color='gray',linestyle='--')
<...
>>> ax.set_xlabel('$q/k_F$')
>>> ax.set_ylabel('$\\hbar\\omega/E_F$')
>>> ax.set_xlim(0,3)
>>> ax.set_ylim(0,3)
>>> plt.legend()
>>> plt.show()
Plot dispersion relation with a lower half-space permittivity of :math:`\\epsilon=4`` (an approximation for hexagonal boron nitride).
Replicates Fig. 1d in Ref. [2].
.. plot::
>>> from graphenemodeling.graphene import monolayer as mlg
>>> from scipy.constants import elementary_charge, hbar
>>> eV = elementary_charge
>>> gamma=0.012 * eV / hbar
>>> eF = 0.4*eV
>>> kF | |
input, its input regularizer is from dw1.
conv = self.GetConv('conv1')
gamma = self.GetGammaAbsValue('conv1')
# The effective size for dw can be computed from its gamma, and
# the loss may be computed as follows:
# gamma_dw = self.GetGammaAbsValue('dw1')
# expected_loss = _coeff(conv) * (
# gamma.sum() * (gamma_dw > 0.45).sum() + gamma_dw.sum() *
# (gamma > 0.45).sum())
# However, since dw cannot change shape because its input doesn't have a
# regularizer, the real loss we expect should be:
expected_loss = _coeff(conv) * (gamma.sum() * NUM_CHANNELS)
self.assertNear(expected_loss, self.loss([conv]), expected_loss * 1e-5)
# Dw2 depthwise convolution is connected to conv2 (grouped regularizer).
conv = self.GetConv('conv2')
gamma_conv = self.GetGammaAbsValue('conv2')
dw = self.GetConv('dw2')
gamma_dw = self.GetGammaAbsValue('dw2')
gamma = np.maximum(gamma_dw, gamma_conv).sum()
expected_loss = _coeff(conv) * (gamma * 3 + (gamma > 0.45).sum() * 0)
self.assertNear(expected_loss, self.loss([conv]), expected_loss * 1e-5)
expected_loss = _coeff(dw) * gamma * 2
self.assertNear(expected_loss, self.loss([dw]), expected_loss * 1e-5)
class GammaFlopLossWithDepthwiseConvNoBatchNormTest(
tf.test.TestCase, GammaFlopLossWithDepthwiseConvTestBase):
"""Test flop_regularizer for un-batchnormed depthwise convolutions.
This test is used to confirm that when depthwise convolution is not BNed, it
will not be considered towards the regularizer, but it will be counted towards
the cost.
This design choice is for backward compatibility for users who did not
regularize depthwise convolutions. However, the cost will be reported
regardless in order to be faithful to the real computation complexity.
"""
def setUp(self):
self._depthwise_use_batchnorm = False
super(GammaFlopLossWithDepthwiseConvNoBatchNormTest, self).setUp()
self.BuildWithBatchNorm()
with self.cached_session():
self.Init()
def GetSession(self):
return self.cached_session()
def testCost(self):
# Dw1 has NUM_CHANNELS inputs (from the image).
conv = self.GetConv('dw1')
self.assertEqual(_coeff(conv) * 3, self.cost([conv]))
# Conv1 has 7 gammas above 0.45, and 3 inputs (from dw1).
conv = self.GetConv('conv1')
self.assertEqual(_coeff(conv) * 7 * 3, self.cost([conv]))
# Conv2 has 11 active outputs and NUM_CHANNELS inputs (from the image).
conv = self.GetConv('conv2')
self.assertEqual(_coeff(conv) * 11 * NUM_CHANNELS, self.cost([conv]))
# Dw2 has 11 inputs (pass-through from the Conv2).
conv = self.GetConv('dw2')
self.assertEqual(_coeff(conv) * 11, self.cost([conv]))
# Conv3 has 10 gammas above 0.45, and 7 + 11 inputs from conv1 and dw2.
conv = self.GetConv('conv3')
self.assertEqual(_coeff(conv) * 10 * 18, self.cost([conv]))
def testRegularizer(self):
# Dw1 depthwise convolution is connected to the input (no regularizer).
conv = self.GetConv('dw1')
expected_loss = 0.0
self.assertNear(expected_loss, self.loss([conv]), expected_loss * 1e-5)
# Conv1 takes Dw1 as input, but it's not affected by dw1 because depthwise
# is not BNed.
conv = self.GetConv('conv1')
gamma = self.GetGammaAbsValue('conv1')
expected_loss = _coeff(conv) * (gamma.sum() * NUM_CHANNELS)
self.assertNear(expected_loss, self.loss([conv]), expected_loss * 1e-5)
# Dw2 depthwise convolution is connected to conv2 (pass through).
dw = self.GetConv('dw2')
gamma = self.GetGammaAbsValue('conv2')
expected_loss = _coeff(dw) * gamma.sum() * 2
self.assertNear(expected_loss, self.loss([dw]), expected_loss * 1e-5)
class GammaFlopResidualConnectionsLossTest(tf.test.TestCase):
"""Tests flop_regularizer for a network with residual connections."""
def setUp(self):
super(GammaFlopResidualConnectionsLossTest, self).setUp()
tf.set_random_seed(7)
self._threshold = 0.6
def BuildModel(self, resnet_fn, block_fn):
# We use this model as a test case because the slim.nets.resnet module is
# used in some production.
#
# The model looks as follows:
#
# Image --> unit_1/shortcut
# Image --> unit_1/conv1 --> unit_1/conv2 --> unit_1/conv3
#
# unit_1/shortcut + unit_1/conv3 --> unit_1 (residual connection)
#
# unit_1 --> unit_2/conv1 -> unit_2/conv2 --> unit_2/conv3
#
# unit_1 + unit_2/conv3 --> unit_2 (residual connection)
#
# In between, there are strided convolutions and pooling ops, but these
# should not affect the regularizer.
blocks = [
block_fn('block1', base_depth=7, num_units=2, stride=2),
]
image = tf.constant(0.0, shape=[1, 2, 2, NUM_CHANNELS])
net = resnet_fn(
image, blocks, include_root_block=False, is_training=False)[0]
net = tf.reduce_mean(net, axis=(1, 2))
return slim.layers.fully_connected(net, 23, scope='FC')
def BuildGraphWithBatchNorm(self, resnet_fn, block_fn):
params = {
'trainable': True,
'normalizer_fn': slim.batch_norm,
'normalizer_params': {
'scale': True
}
}
with slim.arg_scope([slim.layers.conv2d, slim.layers.separable_conv2d],
**params):
self.net = self.BuildModel(resnet_fn, block_fn)
def InitGamma(self):
assignments = []
gammas = {}
for v in tf.global_variables():
if v.op.name.endswith('/gamma'):
assignments.append(v.assign(tf.random_uniform(v.shape)))
gammas[v.op.name] = v
with self.cached_session() as s:
s.run(assignments)
self._gammas = s.run(gammas)
def GetGamma(self, short_name):
tokens = short_name.split('/')
name = ('resnet_v1/block1/' + tokens[0] + '/bottleneck_v1/' + tokens[1] +
'/BatchNorm/gamma')
return self._gammas[name]
def GetOp(self, short_name):
if short_name == 'FC':
return tf.get_default_graph().get_operation_by_name('FC/MatMul')
tokens = short_name.split('/')
name = ('resnet_v1/block1/' + tokens[0] + '/bottleneck_v1/' + tokens[1] +
'/Conv2D')
return tf.get_default_graph().get_operation_by_name(name)
def NumAlive(self, short_name):
return np.sum(self.GetGamma(short_name) > self._threshold)
def GetCoeff(self, short_name):
return _coeff(self.GetOp(short_name))
def testCost(self):
self.BuildGraphWithBatchNorm(resnet_v1.resnet_v1, resnet_v1.resnet_v1_block)
self.InitGamma()
res_alive = np.logical_or(
np.logical_or(
self.GetGamma('unit_1/shortcut') > self._threshold,
self.GetGamma('unit_1/conv3') > self._threshold),
self.GetGamma('unit_2/conv3') > self._threshold)
self.gamma_flop_reg = flop_regularizer.GammaFlopsRegularizer(
[self.net.op], self._threshold)
expected = {}
expected['unit_1/shortcut'] = (
self.GetCoeff('unit_1/shortcut') * np.sum(res_alive) * NUM_CHANNELS)
expected['unit_1/conv1'] = (
self.GetCoeff('unit_1/conv1') * self.NumAlive('unit_1/conv1') *
NUM_CHANNELS)
expected['unit_1/conv2'] = (
self.GetCoeff('unit_1/conv2') * self.NumAlive('unit_1/conv2') *
self.NumAlive('unit_1/conv1'))
expected['unit_1/conv3'] = (
self.GetCoeff('unit_1/conv3') * np.sum(res_alive) *
self.NumAlive('unit_1/conv2'))
expected['unit_2/conv1'] = (
self.GetCoeff('unit_2/conv1') * self.NumAlive('unit_2/conv1') *
np.sum(res_alive))
expected['unit_2/conv2'] = (
self.GetCoeff('unit_2/conv2') * self.NumAlive('unit_2/conv2') *
self.NumAlive('unit_2/conv1'))
expected['unit_2/conv3'] = (
self.GetCoeff('unit_2/conv3') * np.sum(res_alive) *
self.NumAlive('unit_2/conv2'))
expected['FC'] = 2.0 * np.sum(res_alive) * 23.0
# TODO(e1): Is there a way to use Parametrized Tests to make this more
# elegant?
with self.cached_session():
for short_name in expected:
cost = self.gamma_flop_reg.get_cost([self.GetOp(short_name)]).eval()
self.assertEqual(expected[short_name], cost)
self.assertEqual(
sum(expected.values()),
self.gamma_flop_reg.get_cost().eval())
class GroupLassoFlopRegTest(tf.test.TestCase):
def assertNearRelatively(self, expected, actual):
self.assertNear(expected, actual, expected * 1e-6)
def testFlopRegularizer(self):
tf.reset_default_graph()
tf.set_random_seed(7907)
with slim.arg_scope(
[slim.layers.conv2d, slim.layers.conv2d_transpose],
weights_initializer=tf.random_normal_initializer):
# Our test model is:
#
# -> conv1 --+
# / |--[concat]
# image --> conv2 --+
# \
# -> convt
#
# (the model has two "outputs", convt and concat).
#
image = tf.constant(0.0, shape=[1, 17, 19, NUM_CHANNELS])
conv1 = slim.layers.conv2d(
image, 13, [7, 5], padding='SAME', scope='conv1')
conv2 = slim.layers.conv2d(
image, 23, [1, 1], padding='SAME', scope='conv2')
self.concat = tf.concat([conv1, conv2], 3)
self.convt = slim.layers.conv2d_transpose(
image, 29, [7, 5], stride=3, padding='SAME', scope='convt')
self.name_to_var = {v.op.name: v for v in tf.global_variables()}
with self.cached_session():
tf.global_variables_initializer().run()
threshold = 1.0
flop_reg = flop_regularizer.GroupLassoFlopsRegularizer(
[self.concat.op, self.convt.op], threshold=threshold, l1_fraction=0)
with self.cached_session() as s:
evaluated_vars = s.run(self.name_to_var)
def group_norm(weights, axis=(0, 1, 2)): # pylint: disable=invalid-name
return np.sqrt(np.mean(weights**2, axis=axis))
reg_vectors = {
'conv1': group_norm(evaluated_vars['conv1/weights'], (0, 1, 2)),
'conv2': group_norm(evaluated_vars['conv2/weights'], (0, 1, 2)),
'convt': group_norm(evaluated_vars['convt/weights'], (0, 1, 3))
}
num_alive = {k: np.sum(r > threshold) for k, r in reg_vectors.items()}
total_outputs = (
reg_vectors['conv1'].shape[0] + reg_vectors['conv2'].shape[0])
total_alive_outputs = sum(num_alive.values())
assert total_alive_outputs > 0, (
'All outputs are dead - test is trivial. Decrease the threshold.')
assert total_alive_outputs < total_outputs, (
'All outputs are alive - test is trivial. Increase the threshold.')
coeff1 = _coeff(_get_op('conv1/Conv2D'))
coeff2 = _coeff(_get_op('conv2/Conv2D'))
coefft = _coeff(_get_op('convt/conv2d_transpose'))
expected_flop_cost = NUM_CHANNELS * (
coeff1 * num_alive['conv1'] + coeff2 * num_alive['conv2'] +
coefft * num_alive['convt'])
expected_reg_term = NUM_CHANNELS * (
coeff1 * np.sum(reg_vectors['conv1']) + coeff2 * np.sum(
reg_vectors['conv2']) + coefft * np.sum(reg_vectors['convt']))
with self.cached_session():
self.assertEqual(
round(expected_flop_cost), round(flop_reg.get_cost().eval()))
self.assertNearRelatively(expected_reg_term,
flop_reg.get_regularization_term().eval())
def testFlopRegularizerWithMatMul(self):
"""Test the MatMul op regularizer with FLOP network regularizer.
Set up a two layer fully connected network.
"""
tf.reset_default_graph()
tf.set_random_seed(1234)
# Create the variables, and corresponding values.
x = tf.constant(1.0, shape=[2, 6], name='x', dtype=tf.float32)
w = tf.get_variable('w', shape=(6, 4), dtype=tf.float32)
b = tf.get_variable('b', shape=(4), dtype=tf.float32)
w2 = tf.get_variable('w2', shape=(4, 1), dtype=tf.float32)
b2 = tf.get_variable('b2', shape=(1), dtype=tf.float32)
w_value = np.arange(24).reshape((6, 4)).astype('float32')
b_value = np.arange(4).reshape(4).astype('float32')
w2_value = np.arange(21, 25).reshape((4, 1)).astype('float32')
b2_value = np.arange(1).astype('float32')
# Build the test network model.
net = tf.nn.relu(tf.matmul(x, w, name='matmul1') + b)
output = tf.nn.relu(tf.matmul(net, w2, name='matmul2') + b2)
# Assign values to network parameters.
with self.cached_session() as session:
session.run([
w.assign(w_value),
b.assign(b_value),
w2.assign(w2_value),
b2.assign(b2_value)
])
# Create FLOPs network regularizer.
threshold = 32.0
flop_reg = flop_regularizer.GroupLassoFlopsRegularizer([output.op],
threshold, 0)
# Compute expected regularization vector and alive vector.
def group_norm(weights, axis=(0, 1, 2)): # pylint: disable=invalid-name
return np.sqrt(np.mean(weights**2, axis=axis))
expected_reg_vector1 = group_norm(w_value, axis=(0,))
expected_reg_vector2 = group_norm(w2_value, axis=(0,))
# Since the threshold is 32, and the L2 norm of columns in matrix w is
# (29.66479301, 31.71750259, 33.82307053, 35.97220993). Thus, the alive
# vector for w should be (0, 0, 1, 1). The alive vector is [1] since the L2
# norm for w2_value is 45.055521 > 32.
# Compute the expected FLOPs cost and expected regularization term.
matmul1_live_input = 6
matmul1_live_output = sum(expected_reg_vector1 > threshold)
matmul2_live_output = sum(expected_reg_vector2 > threshold)
expected_flop_cost = (
_coeff(_get_op('matmul1')) * matmul1_live_input * matmul1_live_output +
_coeff(_get_op('matmul2')) * matmul1_live_output * matmul2_live_output)
regularizer1 = np.sum(expected_reg_vector1)
regularizer2 = np.sum(expected_reg_vector2)
expected_reg_term = | |
import re
import os
import logging
from copy import deepcopy
from .exceptions import InterpolationError
from .backends import YAMLBackend, SopsYAMLBackend, JSONBackend
from .util import LookupDict, sym_lookup
function = re.compile(r'''%\{(scope|hiera|lookup|literal|alias)\(['"](?:::|)([^"']*)["']\)\}''')
interpolate = re.compile(r'''%\{(?:::|)([^\}]*)\}''')
rformat = re.compile(r'''%{(?:::|)([a-zA-Z_-|\d]+)}''')
LOGGER = logging.getLogger(__name__)
class Merge(object):
def __init__(self, typ, deep=False):
self.typ = typ
self.deep = deep
if typ == dict:
self.value = LookupDict()
else:
self.value = typ()
def merge_value(self, value):
if isinstance(self.value, list):
self.value += list(value)
elif isinstance(self.value, set):
self.value = self.value | set(value)
elif isinstance(self.value, dict):
if self.deep:
self.value = self.deep_merge(self.value, value)
else:
for k, v in value.items():
if k not in self.value:
self.value[k] = v
elif isinstance(self.value, str):
self.value = value
else:
raise TypeError("Cannot handle merge_value of type %s", type(self.value))
def deep_merge(self, a, b):
'''recursively merges dicts. not just simple a['key'] = b['key'], if
both a and bhave a key who's value is a dict then dict_merge is called
on both values and the result stored in the returned dictionary.'''
if not isinstance(b, dict):
return b
result = deepcopy(a)
for k, v in b.items():
if k in result and isinstance(result[k], dict):
result[k] = self.deep_merge(result[k], v)
elif k in result and isinstance(result[k], list):
if isinstance(v, list):
v = [_ for _ in v if _ not in result[k]]
result[k] += deepcopy(v)
else:
result[k].append(v)
else:
result[k] = deepcopy(v)
return result
class ScopedHiera(object):
def __init__(self, hiera, context={}):
self.hiera = hiera
self.context = context
def has(self, key, **kwargs):
kwargs.update(self.context)
return self.hiera.has(key, **kwargs)
def get(self, key, default=None, merge=None, merge_deep=False, throw=False, context={}, **kwargs):
new_context = {}
new_context.update(self.context)
new_context.update(context)
new_context.update(kwargs)
return self.hiera.get(key, default, merge, merge_deep, throw, new_context)
def __getattr__(self, name):
if hasattr(self.hiera, name):
return getattr(self.hiera, name)
raise AttributeError
class Hiera(object):
"""
The Hiera object represents a first-class interaction between Python and
Hiera data. It takes a base-hiera config YAML file, and exposes methods
to retrieve and fully resolve Hiera data.
# XXX fix doc, this can be a dict
:param base_config: The Hiera base configuration: file path, file-like object, or dict
:param backends: A list of backends to use for loading, by default this is
YAMLBackend, SopsYAMLBackend and JSONBackend
:param context: Any dictionary of format/context variables to default for the
liftime of this instance.
:param kwargs: Any additional kwargs will be added to the context
"""
def __init__(self, base_config, backends=None, base_path=None, context={}, **kwargs):
self.base_config = base_config
self.context = context
self.context.update(kwargs)
self.cache = {}
self.paths = []
self.load(backends or [YAMLBackend, SopsYAMLBackend, JSONBackend])
def load(self, backends, base_path=None):
"""
This function loads the base Hiera configuration, attempting to parse and
build state based on it. This will raise exceptions if the loading process
fails due to invalid configuration.
"""
# If we don't have a file-like object, attempt to open as a file path
if type(self.base_config) is dict:
self.base = self.base_config
if base_path is None:
self.base_path = os.getcwd()
else:
self.base_path = base_path
else:
if not hasattr(self.base_config, 'read'):
self.base_path = os.path.dirname(self.base_config)
self.base_config = open(self.base_config)
else:
self.base_path = os.getcwd()
# Load our base YAML configuration
self.base = YAMLBackend.load_ordered(self.base_config)
if not self.base:
raise Exception("Failed to parse base Hiera configuration")
# Load all backends
self.backends = {}
for backend in sym_lookup(self.base, 'backends'):
obj = [i for i in backends if i.NAME == backend]
if not len(obj):
raise Exception("Invalid Backend: `{}`".format(backend))
self.backends[backend] = obj[0](self, sym_lookup(self.base, backend))
# Make sure we have at least a single backend
if not len(self.backends):
raise Exception("No backends could be loaded")
hierarchy = sym_lookup(self.base, 'hierarchy')
if hierarchy is None:
raise Exception("Invalid Base Hiera Config: missing hierarchy key")
self.hierarchy = []
# Load our heirarchy
for path in hierarchy:
self.hierarchy.append(rformat.sub("{\g<1>}", path, count=0))
# Load our backends
for backend in list(self.backends.values()):
backend.datadir = rformat.sub("{\g<1>}", backend.datadir, count=0)
# Now pre-load/cache a bunch of global stuff. If context vars where provided
# in the constructor, we'll also load those files into the cache.
self.get(None)
def load_directory(self, path, backend=None):
"""
Walks an entire directory and attempts to load all relevant data files
based on our backends. Optionally can only load for one backend.
"""
for root, dirs, files in os.walk(path):
for f in files:
backend = backend or self.backends.get(':{}'.format(os.path.splitext(f)[-1]))
if backend:
yield self.load_file(os.path.join(root, f), backend)
def load_file(self, path, backend, ignore_cache=False):
"""
Attempts to load a file for a specific backend, caching the result.
"""
if path not in self.cache or ignore_cache:
try:
self.cache[path] = backend.load(backend.read_file(path))
except Exception as e:
raise Exception("Failed to load file {}: `{}`".format(path, e))
return path
def can_resolve(self, s):
"""
Returns true if any resolving or interpolation can be done on the provided
string
"""
if (isinstance(s, str) or isinstance(s, str)) and (function.findall(s) or interpolate.findall(s)):
return True
return False
def resolve_function(self, s, paths, context, merge):
"""
Attempts to fully resolve a hiera function call within a value. This includes
interpolation for relevant calls.
"""
calls = function.findall(s)
# If this is an alias, just replace it (doesn't require interpolation)
if len(calls) == 1 and calls[0][0] == 'alias':
if function.sub("", s) != "":
raise Exception("Alias can not be used for string interpolation: `{}`".format(s))
try:
value = self.get_key(calls[0][1], paths, context, merge)
return value
except KeyError as e:
raise InterpolationError("Alias lookup failed: key '{}' does not exist".format(calls[0][1]))
# Iterate over all function calls and string interpolate their resolved values
for call, arg in calls:
if call == 'hiera' or call == 'lookup':
replace = self.get_key(arg, paths, context, merge)
elif call == 'scope':
replace = context.get(arg)
elif call == 'literal':
replace = arg
elif call == 'alias':
raise Exception("Invalid alias function call: `{}`".format(s))
if not replace:
raise Exception("Could not resolve value for function call: `{}`".format(s))
if isinstance(replace, str):
# Replace only the current function call with our resolved value
s = function.sub(replace, s, 1)
elif call == 'scope':
s = replace
else:
raise Exception("Resolved value is not a string for function call: `{}`".format(s))
return s
def resolve_interpolates(self, s, context):
"""
Attempts to resolve context-based string interpolation
"""
interps = interpolate.findall(s)
for i in interps:
# XXX - should this throw an error, interpolate to an empty string, or be configurable?
# what does ruby hiera do?
s = interpolate.sub((context.get(i) or ''), s, 1)
return s
def resolve(self, s, paths, context, merge):
"""
Fully resolves an object, including function and interpolation based resolving.
"""
if isinstance(s, dict):
return self.resolve_dict(s, paths, context, merge)
elif isinstance(s, list):
return list(self.resolve_list(s, paths, context, merge))
elif not self.can_resolve(s):
return s
base = self.resolve_function(s, paths, context, merge)
# If we can string interpolate the result, lets do that
if isinstance(base, str):
base = self.resolve_interpolates(base, context)
return base
def resolve_dict(self, obj, paths, context, merge):
"""
Recursively and completely resolves all Hiera interoplates/functions
within a dictionary.
"""
new_obj = LookupDict()
for k, v in obj.items():
new_obj[k] = self.resolve(v, paths, context, merge)
return new_obj
def resolve_list(self, obj, paths, context, merge):
for item in obj:
yield self.resolve(item, paths, context, merge)
def get_key(self, key, paths, context, merge):
"""
Get the value of a key within hiera, resolving if required
"""
merges = {}
for path in paths:
if self.cache[path] is not None and key is not None:
cache = None
try:
cache = self.cache[path].lookup(key)
except KeyError as e:
pass
if cache is not None:
if merge and not key in merges:
merges[key] = Merge(merge.typ, merge.deep)
value = self.resolve(cache, paths, context, (merges[key] if merge and merge.deep else merge))
if merge and merges[key]:
merges[key].merge_value(value)
else:
return value
if merge and key in merges and merges[key].value is not None:
return merges[key].value
else:
if key!= None and len(key.split('.')) > 1:
LOGGER.error("Lookup key: '{}' not found. Make sure you are providing this key in YAML configuration.".format(key))
raise KeyError(key)
def scoped(self, context={}, **kwargs):
context.update(kwargs)
return ScopedHiera(self, context)
def has(self, key, **kwargs):
"""
Returns true if the key exists in hiera, false otherwise
"""
try:
self.get(key, throw=True, **kwargs)
return True
except KeyError:
return False
def get(self, key, default=None, merge=None, merge_deep=False, throw=False, context={}, **kwargs):
"""
Attempts to retrieve a hiera variable by fully resolving its location.
:param key: They Hiera key to retrieve
:param default: If the Hiera key is not found, return this value
| |
<gh_stars>0
import itertools
import hashlib
import pypipegraph as ppg
import numpy as np
import pandas as pd
from pandas import DataFrame
from mbf_genomics import DelayedDataFrame
from mbf_qualitycontrol import register_qc, qc_disabled
from mbf_genomics.util import parse_a_or_c_to_anno
from mbf_genomics.annotator import Annotator
from typing import List, Dict, Tuple, Any
from pypipegraph import Job
import dppd
import dppd_plotnine # noqa: F401
dp, X = dppd.dppd()
# import pypipegraph as ppg
class ComparisonAnnotator(Annotator):
def __init__(
self,
comparisons,
group_a,
group_b,
comparison_strategy,
laplace_offset=1 / 1e6,
other_groups_for_variance=[],
):
"""Create a comparison (a - b)"""
self.comparisons = comparisons
if hasattr(comparison_strategy, "__call__"):
self.comparison_strategy = comparison_strategy()
else:
self.comparison_strategy = comparison_strategy
if isinstance(
self.comparison_strategy.columns, str
): # pragma: no cover definsive
raise ValueError(
"ComparisonStrategy %s had a string as columns, must be a list"
% self.comparison_strategy
)
self.comp = (group_a, group_b)
self.other_groups_for_variance = other_groups_for_variance
self.columns = []
self.column_lookup = {}
for col in sorted(self.comparison_strategy.columns):
cn = self.name_column(col)
self.columns.append(cn)
self.column_lookup[col] = cn
self.laplace_offset = laplace_offset
self.result_dir = self.comparisons.result_dir / f"{group_a}_vs_{group_b}"
self.result_dir.mkdir(exist_ok=True, parents=True)
self._check_comparison_groups(group_a, group_b)
if len(self.columns[0]) >= 60:
self.cache_name = (
"Comp %s" % hashlib.md5(self.columns[0].encode("utf-8")).hexdigest()
)
try:
self.vid = self._build_vid()
except AttributeError: # the sample annotators don't have a vid
pass
def _build_vid(self):
a = set()
b = set()
all_columns = True
for s in self.comparisons.groups_to_samples[self.comp[0]]:
if s[0] is not None:
a.add(s[0].vid)
all_columns = False
for s in self.comparisons.groups_to_samples[self.comp[1]]:
if s[0] is not None:
b.add(s[0].vid)
all_columns = False
if a or b:
return sorted(a) + ["vs"] + sorted(b)
elif all_columns:
raise AttributeError("No vids - as expected")
def name_column(self, col):
if self.comparison_strategy.supports_other_samples:
supports_other_samples = ",Other=%s" % bool(self.other_groups_for_variance)
else:
supports_other_samples = ""
return f"Comp. {self.comp[0]} - {self.comp[1]} {col} ({self.comparison_strategy.name}{supports_other_samples})"
def __getitem__(self, itm):
"""look up the full column name from log2FC, p, FDR, etc"""
return self.column_lookup[itm]
def filter(self, filter_definition, new_name=None, sheet_name=None):
"""Turn a filter definition [(column, operator, threshold)...]
into a filtered genes object.
Example:
comp.filter(genes, '2x', [
('FDR', '<=', 0.05) # a name from our comparison strategy - inspect column_lookup to list
('log2FC', '|>', 1), #absolute
...
(anno, '>=', 50),
((anno, 1), '>=', 50), # for the second column of the annotator
((anno, 'columnX'), '>=', 50), # for the second column of the annotator
('annotator_columnX', '=>' 50), # search for an annotator with that column. Use if exactly one, complain otherwise
]
"""
lookup = self.column_lookup.copy()
for c in self.columns:
lookup[c] = c
subset_relevant_columns = set(lookup.values())
subset_relevant_columns.update(self.sample_columns(self.comp[0]))
subset_relevant_columns.update(self.sample_columns(self.comp[1]))
for g in self.other_groups_for_variance:
subset_relevant_columns.update(self.sample_columns(g))
further_filters = []
add_direction = False
thresholds = {}
filter_str = []
for column, op, threshold in sorted(filter_definition):
if op == "==":
oop = "="
elif op == ">":
oop = ">"
elif op == "<":
oop = "<"
elif op == ">=":
oop = "≥"
elif op == "<=":
oop = "≤"
elif op == "|>":
oop = "|>"
elif op == "|<":
oop = "|<"
elif op == "|>=":
oop = "|≥"
elif op == "|<=":
oop = "|≤"
else:
oop = op
filter_str.append(f"{column}_{oop}_{threshold:.2f}")
subset_relevant_columns.add(lookup[column])
if column == "log2FC":
if "|" in op:
add_direction = True
thresholds[column] = threshold
if new_name is None:
filter_str = "__".join(filter_str)
new_name = f"Filtered_{self.comparison_strategy.name}_{self.comp[0]}-{self.comp[1]}_{filter_str}"
if "log2FC" in lookup:
further_filters.append(("logFC", lookup["log2FC"], 2, thresholds.get('log2FC', 0)))
if add_direction:
further_filters.append(("Direction", lookup["log2FC"], 1, 0))
for x in ["p", "FDR"]: # less than
if x in lookup:
further_filters.append((x, lookup[x], 5, thresholds.get(x, 1)))
for x in ["minExpression"]: # min of columns > x
if x in lookup:
further_filters.append((x, [lookup[x]], 4, thresholds.get(x, 0)))
# we need the filter func for the plotting, so we do it ourselves
filter_func, annos = self.comparisons.ddf.definition_to_function(
filter_definition, lookup
)
kwargs = {}
if hasattr(self, "vid"):
kwargs["vid"] = self.vid
res = self.comparisons.ddf.filter(
new_name,
filter_func,
annotators=annos,
column_lookup=lookup,
result_dir=self.result_dir / new_name,
sheet_name=sheet_name,
**kwargs,
)
if not qc_disabled():
if "p" in self.comparison_strategy.columns:
self.register_qc_volcano(self.comparisons.ddf, res, filter_func)
# self.register_qc_ma_plot(self.comparisons.ddf, res, filter_func)
res.plot_columns = self.samples()
res.venn_annotator = self
res.subset_relevant_columns = subset_relevant_columns
res.further_filter_columns = further_filters
return res
def calc(self, df):
columns_a = list(self.sample_columns(self.comp[0]))
columns_b = list(self.sample_columns(self.comp[1]))
columns_other = {}
for g in self.other_groups_for_variance:
columns_other[g] = self.sample_columns(g)
comp = self.comparison_strategy.compare(
df, columns_a, columns_b, columns_other, self.laplace_offset,
)
res = {}
for col in sorted(self.comparison_strategy.columns):
res[self.name_column(col)] = comp[col]
return pd.DataFrame(res)
def dep_annos(self):
"""Return other annotators"""
res = []
for generator in [self.samples(), self.other_samples()]:
for k in generator:
a = parse_a_or_c_to_anno(k)
if a is not None:
res.append(a)
return res
def deps(self, ddf):
from mbf_genomics.util import freeze
sample_info = []
for ac in self.samples():
group = self.comparisons.sample_column_to_group[ac[1]]
sample_info.append(
(group, ac[0].get_cache_name() if ac[0] is not None else "None", ac[1])
)
sample_info.sort()
parameters = freeze(
[
(
# self.comparison_strategy.__class__.__name__ , handled by column name
sample_info,
# self.comp, # his handled by column name
self.laplace_offset,
)
]
)
res = [ppg.ParameterInvariant(self.get_cache_name(), parameters)]
res.extend(getattr(self.comparison_strategy, "deps", lambda: [])())
return res
def samples(self):
"""Return anno, column for samples used"""
for x in list(self.comp) + self.other_groups_for_variance:
for s in self.comparisons.groups_to_samples[x]:
yield s
def other_samples(self):
"""Return anno, column for additional samples used for variance"""
for x in self.other_groups_for_variance:
for s in self.comparisons.groups_to_samples[x]:
yield s
def sample_columns(self, group):
for s in self.comparisons.groups_to_samples[group]:
yield s[1]
def _check_comparison_groups(self, *groups):
for x in groups:
if x not in self.comparisons.groups_to_samples:
raise ValueError(f"Comparison group {x} not found")
if (
len(self.comparisons.groups_to_samples[x])
< self.comparison_strategy.min_sample_count
):
raise ValueError(
"Too few samples in %s for %s" % (x, self.comparison_strategy.name)
)
def register_qc_volcano(self, genes, filtered=None, filter_func=None):
"""perform a volcano plot
"""
if filtered is None:
output_filename = genes.result_dir / "volcano.png"
else:
output_filename = filtered.result_dir / "volcano.png"
def plot(output_filename):
df = (
dp(genes.df)
.mutate(
significant=filter_func(genes.df)
if filter_func is not None
else "tbd."
)
.pd
)
no_sig_lower = (df["significant"] & (df[self["log2FC"]] < 0)).sum()
no_sig_higher = (df["significant"] & (df[self["log2FC"]] > 0)).sum()
(
dp(df)
.p9()
.scale_color_many_categories(name="regulated", shift=3)
.scale_y_continuous(
name="p",
trans=dp.reverse_transform("log10"),
labels=lambda xs: ["%.2g" % x for x in xs],
)
.add_vline(xintercept=1, _color="blue")
.add_vline(xintercept=-1, _color="blue")
.add_hline(yintercept=0.05, _color="blue")
.add_rect( # shade 'simply' significant regions
xmin="xmin",
xmax="xmax",
ymin="ymin",
ymax="ymax",
_fill="lightgrey",
data=pd.DataFrame(
{
"xmin": [-np.inf, 1],
"xmax": [-1, np.inf],
"ymin": [0, 0],
"ymax": [0.05, 0.05],
}
),
_alpha=0.8,
)
.add_scatter(self["log2FC"], self["p"], color="significant")
.title(f"# regulated down/ up: {no_sig_lower} / {no_sig_higher}")
# .coord_trans(x="reverse", y="reverse") #broken as of 2019-01-31
.render(output_filename, width=8, height=6, dpi=300)
)
return register_qc(
ppg.FileGeneratingJob(output_filename, plot).depends_on(
genes.add_annotator(self),
ppg.FunctionInvariant(
str(output_filename) + "_filter_func", filter_func
),
)
)
def register_qc_ma_plot(self, genes, filtered, filter_func):
"""perform an MA plot - not a straight annotator.register_qc function,
but called by .filter
"""
output_filename = filtered.result_dir / "ma_plot.png"
def plot(output_filename):
from statsmodels.nonparametric.smoothers_lowess import lowess
print(genes.df.columns)
print(list(self.sample_columns(self.comp[0])))
print(list(self.sample_columns(self.comp[1])))
df = genes.df[
list(self.sample_columns(self.comp[0]))
+ list(self.sample_columns(self.comp[1]))
]
df = df.assign(significant=filter_func(genes.df))
pdf = []
loes_pdfs = []
# Todo: how many times can you over0lopt this?
for a, b in itertools.combinations(
[x for x in df.columns if not "significant" == x], 2
):
np_a = np.log2(df[a] + self.laplace_offset)
np_b = np.log2(df[b] + self.laplace_offset)
A = (np_a + np_b) / 2
M = np_a - np_b
local_pdf = pd.DataFrame(
{
"A": A,
"M": M,
"a": self.comparisons.get_plot_name(a),
"b": self.comparisons.get_plot_name(b),
"significant": df["significant"],
}
).sort_values("M")
chosen = np.zeros(len(local_pdf), bool)
chosen[:500] = True
chosen[-500:] = True
chosen[np.random.randint(0, len(chosen), 1000)] = True
pdf.append(local_pdf)
fitted = lowess(M, A, is_sorted=False)
loes_pdfs.append(
pd.DataFrame(
{
"a": self.comparisons.get_plot_name(a),
"b": self.comparisons.get_plot_name(b),
"A": fitted[:, 0],
"M": fitted[:, 1],
}
)
)
pdf = pd.concat(pdf)
pdf = pdf.assign(ab=[a + ":" + b for (a, b) in zip(pdf["a"], pdf["b"])])
loes_pdf = pd.concat(loes_pdfs)
loes_pdf = loes_pdf.assign(
ab=[a + ":" + b for (a, b) in zip(loes_pdf["a"], loes_pdf["b"])]
)
(
dp(pdf)
.p9()
.theme_bw(10)
.add_hline(yintercept=0, _color="lightblue")
.add_hline(yintercept=1, _color="lightblue")
.add_hline(yintercept=-1, _color="lightblue")
.scale_color_many_categories(name="significant", shift=3)
.add_point("A", "M", color="significant", _size=1, _alpha=0.3)
.add_line("A", "M", _color="blue", data=loes_pdf)
.facet_wrap(["ab"])
.title(f"MA {filtered.name}\n{self.comparisons.find_variable_name()}")
.render(output_filename, width=8, height=6)
)
return register_qc(
ppg.FileGeneratingJob(output_filename, plot)
.depends_on(genes.add_annotator(self))
.depends_on(self.comparisons.deps)
)
class ComparisonAnnotatorMulti(ComparisonAnnotator):
"""
Annotator for multi-factor comparison.
Based on a main factor and a list of multiple other factor, this
creates an annotator that annotates DEG analysis results for a multi-factor
design. Interaction terms may be specified as a list of tuples which may be
empty.
if an empty interactions list is provided, the analysis just controls
for different levels of other_factors and report the main effect.
Parameters
----------
name : str
Annotator name, used for cache names and test of uniqueness.
comparisons : Comparisons
Comparisons instance containing the groups to be analyzed.
main_factor | |
(" + str(authenticators.messageData[1]) + ")", 400
return json.dumps(authenticators.messageData)
'''
PATH: /im/as/authenticator
N/A ACTIONS: POST, PUT, PATCH, DELETE
'''
def post_as_authenticator(self):
return "NOT AVAILABLE", 405
def put_as_authenticator(self):
return "NOT AVAILABLE", 405
def patch_as_authenticator(self):
return "NOT AVAILABLE", 405
def delete_as_authenticator(self):
return "NOT AVAILABLE", 405
'''
PATH: /im/as/authenticator/{authenticatorId}
ACTION: GET
DESCRIPTION: Return "True" if a required autheticator is
a available, or "False" if it is not.
ARGUMENT: --
RETURN: - 200 (HTTP) + Boolean [1]
- Integer error code (HTTP)
'''
def get_as_a_authenticatorId(self, authenticatorId):
request = IrModels.IrMessage().fromData(IrModels.IrManagement().fromData("AS", "get_as_a_authId", authenticatorId), "AS", "IM")
authenticators = self.__asIr.sendMessage(request)
if type(authenticators.messageData) == tuple:
return "ERROR CODE #3 (AS): IM/AS ERROR DURING AUTHENTICATOR INSTANCE OPERATION (" + str(authenticators.messageData[1]) + ")", 400
return json.dumps(authenticators.messageData)
'''
PATH: /im/as/authenticator/{authenticatorId}
N/A ACTIONS: POST, PUT, PATCH, DELETE
'''
def post_as_a_authenticatorId(self):
return "NOT AVAILABLE", 405
def put_as_a_authenticatorId(self):
return "NOT AVAILABLE", 405
def patch_as_a_authenticatorId(self):
return "NOT AVAILABLE", 405
def delete_as_a_authenticatorId(self):
return "NOT AVAILABLE", 405
'''
PATH: /im/as/running_authenticator
ACTION: GET
DESCRIPTION: Retrieve the currently running authenticator agent
in the access subsystem.
ARGUMENT: --
RETURN: - 200 (HTTP) + String [1]
- Integer error code (HTTP)
'''
def get_as_running_authenticator(self):
request = IrModels.IrMessage().fromData(IrModels.IrManagement().fromData("AS", "get_as_running_auth", None), "AS", "IM")
authenticators = self.__asIr.sendMessage(request)
if type(authenticators.messageData) == tuple:
return "ERROR CODE #3 (AS): IM/AS ERROR DURING AUTHENTICATOR INSTANCE OPERATION (" + str(authenticators.messageData[1]) + ")", 400
return json.dumps(authenticators.messageData)
'''
PATH: /im/as/running_authenticator
N/A ACTIONS: POST, PUT, PATCH, DELETE
'''
def post_as_running_authenticator(self):
return "NOT AVAILABLE", 405
def put_as_running_authenticator(self):
return "NOT AVAILABLE", 405
def patch_as_running_authenticator(self):
return "NOT AVAILABLE", 405
def delete_as_running_authenticator(self):
return "NOT AVAILABLE", 405
'''
PATH: /im/as/running_authenticator/{authenticatorId}
ACTION: GET
DESCRIPTION: Return "True" if a required authenticator is
running, or "False" if it is not.
ARGUMENT: --
RETURN: - 200 (HTTP) + String [1]
- Integer error code (HTTP)
'''
def get_as_ra_authenticatorId(self, authenticatorId):
request = IrModels.IrMessage().fromData(IrModels.IrManagement().fromData("AS", "get_as_ra_authId", authenticatorId), "AS", "IM")
authenticators = self.__asIr.sendMessage(request)
if type(authenticators.messageData) == tuple:
return "ERROR CODE #3 (AS): IM/AS ERROR DURING AUTHENTICATOR INSTANCE OPERATION (" + str(authenticators.messageData[1]) + ")", 400
return json.dumps(authenticators.messageData)
'''
PATH: /im/as/running_authenticator/{authenticatorId}
ACTION: POST
DESCRIPTION: Retrieve the required authenticator and execute
it in the access subsystem.
ARGUMENT: --
RETURN: - 200 (HTTP) + String [1]
- Integer error code (HTTP)
'''
def post_as_ra_authenticatorId(self, authenticatorId):
request = IrModels.IrMessage().fromData(IrModels.IrManagement().fromData("AS", "post_as_ra_authId", authenticatorId), "AS", "IM")
authenticators = self.__asIr.sendMessage(request)
if type(authenticators.messageData) == tuple:
return "ERROR CODE #3 (AS): IM/AS ERROR DURING AUTHENTICATOR INSTANCE OPERATION (" + str(authenticators.messageData[1]) + ")", 400
return json.dumps(authenticators.messageData)
'''
PATH: /im/as/running_authenticator/{authenticatorId}
N/A ACTIONS: PUT, PATCH, DELETE
'''
def put_as_ra_authenticatorId(self):
return "NOT AVAILABLE", 405
def patch_as_ra_authenticatorId(self):
return "NOT AVAILABLE", 405
def delete_as_ra_authenticatorId(self):
return "NOT AVAILABLE", 405
'''
PATH: /im/as/user
ACTION: GET
DESCRIPTION: Retrieve all the available users in the
access subsystem.
ARGUMENT: --
RETURN: - 200 (HTTP) + VibUserInstance [0..N]
- Integer error code (HTTP)
'''
def get_as_user(self):
request = IrModels.IrMessage().fromData(IrModels.IrManagement().fromData("AS", "get_as_user", None), "AS", "IM")
users = self.__asIr.sendMessage(request)
if type(users.messageData) == tuple:
return "ERROR CODE #3 (AS): IM/AS ERROR DURING USER INSTANCE OPERATION (" + str(users.messageData[1]) + ")", 400
return json.dumps([u.toDictionary() for u in users.messageData])
'''
PATH: /im/as/user
ACTION: POST
DESCRIPTION: Send a new user to be saved in the access
subsystem.
ARGUMENT: --
RETURN: - 200 (HTTP) + VibUserInstance [1]
- Integer error code (HTTP)
'''
def post_as_user(self, vibUserInstance):
try:
vibUserInstance = VibModels.VibUserInstance().fromDictionary(json.loads(vibUserInstance))
except Exception as e:
return "ERROR CODE #0 (AS): INVALID USER INSTANCE PROVIDED (" + str(e) + ")", 400
request = IrModels.IrMessage().fromData(IrModels.IrManagement().fromData("AS", "post_as_user", vibUserInstance), "AS", "IM")
user = self.__asIr.sendMessage(request)
if type(user.messageData) == tuple:
return "ERROR CODE #3 (AS): IM/AS ERROR DURING USER INSTANCE OPERATION (" + str(user.messageData[1]) + ")", 400
return json.dumps(user.messageData.toDictionary())
'''
PATH: /im/as/user
N/A ACTIONS: PUT, PATCH, DELETE
'''
def put_as_user(self):
return "NOT AVAILABLE", 405
def patch_as_user(self):
return "NOT AVAILABLE", 405
def delete_as_user(self):
return "NOT AVAILABLE", 405
'''
PATH: /im/as/user/{userId}
ACTION: GET
DESCRIPTION: Retrieve a particular user from the access
subsystem.
ARGUMENT: --
RETURN: - 200 (HTTP) + VibUserInstance [1]
- Integer error code (HTTP)
'''
def get_as_u_userId(self, userId):
request = IrModels.IrMessage().fromData(IrModels.IrManagement().fromData("AS", "get_as_u_userId", userId), "AS", "IM")
user = self.__asIr.sendMessage(request)
if type(user.messageData) == tuple:
return "ERROR CODE #3 (AS): IM/AS ERROR DURING USER INSTANCE OPERATION (" + str(user.messageData[1]) + ")", 400
return json.dumps(user.messageData.toDictionary())
'''
PATH: /im/as/user/{userId}
ACTION: PATCH
DESCRIPTION: Update a particular user from the access
subsystem.
ARGUMENT: --
RETURN: - 200 (HTTP) + VibUserInstance [1]
- Integer error code (HTTP)
'''
def patch_as_u_userId(self, userId, vibUserInstance):
try:
vibUserInstance = VibModels.VibUserInstance().fromDictionary(json.loads(vibUserInstance))
if userId != vibUserInstance.userId:
return "ERROR CODE #0 (AS): INVALID USER INSTANCE PROVIDED (" + str(userId) + " != " + str(vibUserInstance.userId) + ")", 400
except Exception as e:
return "ERROR CODE #0 (AS): INVALID USER INSTANCE PROVIDED (" + str(e) + ")", 400
request = IrModels.IrMessage().fromData(IrModels.IrManagement().fromData("AS", "patch_as_u_userId", vibUserInstance), "AS", "IM")
user = self.__asIr.sendMessage(request)
if type(user.messageData) == tuple:
return "ERROR CODE #3 (AS): IM/AS ERROR DURING USER INSTANCE OPERATION (" + str(user.messageData[1]) + ")", 400
return json.dumps(user.messageData.toDictionary())
'''
PATH: /im/as/user/{userId}
ACTION: DELETE
DESCRIPTION: DELETE a particular user from the access
subsystem.
ARGUMENT: --
RETURN: - 200 (HTTP) + VibUserInstance [1]
- Integer error code (HTTP)
'''
def delete_as_u_userId(self, userId):
request = IrModels.IrMessage().fromData(IrModels.IrManagement().fromData("AS", "delete_as_u_userId", userId), "AS", "IM")
user = self.__asIr.sendMessage(request)
if type(user.messageData) == tuple:
return "ERROR CODE #3 (AS): IM/AS ERROR DURING USER INSTANCE OPERATION (" + str(user.messageData[1]) + ")", 400
return json.dumps(user.messageData.toDictionary())
'''
PATH: /im/as/user/{userId}
N/A ACTIONS: POST, PUT
'''
def post_as_u_userId(self):
return "NOT AVAILABLE", 405
def put_as_u_userId(self):
return "NOT AVAILABLE", 405
'''
PATH: /im/as/credential
ACTION: GET
DESCRIPTION: Retrieve all the available credentials in the
access subsystem.
ARGUMENT: --
RETURN: - 200 (HTTP) + VibCredentialInstance [0..N]
- Integer error code (HTTP)
'''
def get_as_credential(self):
request = IrModels.IrMessage().fromData(IrModels.IrManagement().fromData("AS", "get_as_credential", None), "AS", "IM")
credentials = self.__asIr.sendMessage(request)
if type(credentials.messageData) == tuple:
return "ERROR CODE #3 (AS): IM/AS ERROR DURING CREDENTIAL INSTANCE OPERATION (" + str(credentials.messageData[1]) + ")", 400
return json.dumps([c.toDictionary() for c in credentials.messageData])
'''
PATH: /im/as/credential
ACTION: POST
DESCRIPTION: Send a new credential to be saved in the access
subsystem.
ARGUMENT: --
RETURN: - 200 (HTTP) + VibCredentialInstance [1]
- Integer error code (HTTP)
'''
def post_as_credential(self, vibCredentialInstance):
try:
vibCredentialInstance = VibModels.VibCredentialInstance().fromDictionary(json.loads(vibCredentialInstance))
except:
return "ERROR CODE #0 (AS): INVALID CREDENTIAL INSTANCE PROVIDED", 400
request = IrModels.IrMessage().fromData(IrModels.IrManagement().fromData("AS", "post_as_credential", vibCredentialInstance), "AS", "IM")
credential = self.__asIr.sendMessage(request)
if type(credential.messageData) == tuple:
return "ERROR CODE #3 (AS): IM/AS ERROR DURING CREDENTIAL OPERATION (" + str(credential.messageData[1]) + ")", 400
return json.dumps(credential.messageData.toDictionary())
'''
PATH: /im/as/credential
N/A ACTIONS: PUT, PATCH, DELETE
'''
def put_as_credential(self):
return "NOT AVAILABLE", 405
def patch_as_credential(self):
return "NOT AVAILABLE", 405
def delete_as_credential(self):
return "NOT AVAILABLE", 405
'''
PATH: /im/as/credential/{userId}/{vnfId}
ACTION: GET
DESCRIPTION: Retrieve a particular credential from the access
subsystem.
ARGUMENT: --
RETURN: - 200 (HTTP) + VibCredentialInstance [1]
- Integer error code (HTTP)
'''
def get_as_c_credentialId(self, userId, vnfId):
request = IrModels.IrMessage().fromData(IrModels.IrManagement().fromData("AS", "get_as_c_credentialId", (userId, vnfId)), "AS", "IM")
credential = self.__asIr.sendMessage(request)
if type(credential.messageData) == tuple:
return "ERROR CODE #3 (AS): IM/AS ERROR DURING CREDENTIAL INSTANCE OPERATION (" + str(credential.messageData[1]) + ")", 400
return json.dumps(credential.messageData.toDictionary())
'''
PATH: /im/as/credential/{userId}/{vnfId}
ACTION: DELETE
DESCRIPTION: Delete a particular credential from the access
subsystem.
ARGUMENT: --
RETURN: - 200 (HTTP) + VibCredentialInstance [1]
- Integer error code (HTTP)
'''
def delete_as_c_credentialId(self, userId, vnfId):
request = IrModels.IrMessage().fromData(IrModels.IrManagement().fromData("AS", "delete_as_c_credentialId", (userId, vnfId)), "AS", "IM")
credential = self.__asIr.sendMessage(request)
if type(credential.messageData) == tuple:
return "ERROR CODE #3 (AS): IM/AS ERROR DURING MONITORING AGENT INSTANCE OPERATION (" + str(credential.messageData[1]) + ")", 400
return json.dumps(credential.messageData.toDictionary())
'''
PATH: /im/as/credential/{userId}/{vnfId}
N/A ACTIONS: POST, PUT, PATCH
'''
def post_as_c_credentialId(self):
return "NOT AVAILABLE", 405
def put_as_c_credentialId(self):
return "NOT AVAILABLE", 405
def patch_as_c_credentialId(self):
return "NOT AVAILABLE", 405
'''
PATH: /im/as/credential/user/{userId}
ACTION: GET
DESCRIPTION: Retrieve the credentials of a particular
user from the access subsystem.
ARGUMENT: --
RETURN: - 200 (HTTP) + VibCredentialInstance [0..N]
- Integer error code (HTTP)
'''
def get_as_c_userId(self, userId):
request = IrModels.IrMessage().fromData(IrModels.IrManagement().fromData("AS", "get_as_c_userId", userId), "AS", "IM")
credentials = self.__asIr.sendMessage(request)
if type(credentials.messageData) == tuple:
return "ERROR CODE #3 (AS): IM/AS ERROR DURING CREDENTIAL INSTANCE OPERATION (" + str(credentials.messageData[1]) + ")", 400
return json.dumps([c.toDictionary() for c in credentials.messageData])
'''
PATH: /im/as/credential/user/{userId}
N/A ACTIONS: POST, PUT, PATCH, DELETE
'''
def post_as_c_userId(self):
return "NOT AVAILABLE", 405
def put_as_c_userId(self):
return "NOT AVAILABLE", 405
def patch_as_c_userId(self):
return "NOT AVAILABLE", 405
def delete_as_c_userId(self):
return "NOT AVAILABLE", 405
'''
PATH: /im/as/credential/vnf/{vnfId}
ACTION: GET
DESCRIPTION: Retrieve the credentials of a particular
vnf from the access subsystem.
ARGUMENT: --
RETURN: - 200 (HTTP) + VibCredentialInstance [0..N]
- Integer error code (HTTP)
'''
def get_as_c_vnfId(self, vnfId):
request = IrModels.IrMessage().fromData(IrModels.IrManagement().fromData("AS", "get_as_c_vnfId", vnfId), "AS", "IM")
credentials = self.__asIr.sendMessage(request)
if type(credentials.messageData) == tuple:
return "ERROR CODE #3 (AS): IM/AS ERROR DURING CREDENTIAL INSTANCE OPERATION (" + str(credentials.messageData[1]) + ")", 400
return json.dumps([c.toDictionary() for c in credentials.messageData])
'''
PATH: /im/as/credential/vnf/{vnfId}
N/A ACTIONS: POST, PUT, PATCH, DELETE
'''
def post_as_c_vnfId(self):
return "NOT AVAILABLE", 405
def put_as_c_vnfId(self):
return "NOT AVAILABLE", 405
def patch_as_c_vnfId(self):
return "NOT AVAILABLE", 405
def delete_as_c_vnfId(self):
return "NOT AVAILABLE", 405
'''
PATH: /im/as/vnfm/running_vnfm
ACTION: GET
DESCRIPTION: Retrieve the currently running vnfm driver in
the access subsystem.
ARGUMENT: --
RETURN: - 200 (HTTP) + String [1]
- Integer error code (HTTP)
'''
def get_as_vnfm_running_vnfm(self):
request = IrModels.IrMessage().fromData(IrModels.IrManagement().fromData("AS", "get_as_vnfm_running_vnfm", None), "AS", "IM")
driver = self.__asIr.sendMessage(request)
if type(driver.messageData) == tuple:
return "ERROR CODE #3 (AS): IM/AS ERROR DURING VNFM DRIVER INSTANCE OPERATION (" + str(driver.messageData[1]) + ")", 400
return json.dumps(driver.messageData)
'''
PATH: /im/as/vnfm/running_vnfm
N/A ACTIONS: POST, PUT, PATCH, DELETE
'''
def post_as_vnfm_running_vnfm(self):
return "NOT AVAILABLE", 405
def put_as_vnfm_running_vnfm(self):
return "NOT AVAILABLE", 405
def patch_as_vnfm_running_vnfm(self):
return "NOT AVAILABLE", 405
def delete_as_vnfm_running_vnfm(self):
return "NOT AVAILABLE", 405
'''
PATH: /im/as/vnfm/running_vnfm/{vnfmId}
ACTION: GET
DESCRIPTION: Return "True" if a required VNF manager is
running, or "False" if it is not.
ARGUMENT: --
RETURN: - 200 (HTTP) + Boolean [1]
- Integer | |
import json
import re
from struct import unpack
BLEND_MODE = (
'normal',
'additive',
'multiply',
'screen'
)
ATTACHMENT_TYPE = (
'region',
'boundingbox',
'mesh',
'skinnedmesh'
)
scale = 1
class BinaryStream:
def __init__(self, base_stream):
self.base_stream = base_stream
def readBool(self):
return self.readByte() != 0
def readByte(self):
return ord(self.base_stream.read(1))
def readBytes(self, length):
return self.base_stream.read(length)
def readInt(self):
b = self.readByte()
result = b & 0x7f
if (b & 0x80):
b = self.readByte()
result |= (b & 0x7f) << 7
if (b & 0x80):
b = self.readByte()
result |= (b & 0x7f) << 14
if (b & 0x80):
b = self.readByte()
result |= (b & 0x7f) << 21
if (b & 0x80):
b = self.readByte()
result |= (b & 0x7f) << 28
result -= 0xffffffff + 1
return result
def readIntArray(self):
arr = [self.readInt() for i in range(self.readInt())]
return arr
def readShort(self):
return self.unpack('>H', 2)
def readShortArray(self):
arr = [self.readShort() for i in range(self.readInt())]
return arr
def readFloat(self):
return self.unpack('>f', 4)
def readFloatArray(self):
arr = [self.readFloat() * scale for i in range(self.readInt())]
return arr
def readString(self):
length = self.readInt()
if length == 0:
return None
if length == 1:
return ""
length -= 1
return self.unpack(str(length) + 's', length).decode()
def readHex(self, length):
return self.readBytes(length).hex()
def unpack(self, fmt, length=1):
return unpack(fmt, self.readBytes(length))[0]
def readSkin():
if (slotCnt := stream.readInt()) == 0:
return None
skin = {}
for i in range(slotCnt):
slotIdx = stream.readInt()
slot = {}
for i in range(stream.readInt()):
name = stream.readString()
attachment = readAttachment(name)
slot[name] = attachment
skin[data['slots'][slotIdx]['name']] = slot
return skin
def readAttachment(attachmentName):
if (name := stream.readString()) is None:
name = attachmentName
at = ATTACHMENT_TYPE[stream.readInt()]
if at == 'region':
if (path := stream.readString()) is None:
path = name
region = {}
region['type'] = 'region'
region['name'] = name
region['path'] = path
region['x'] = stream.readFloat() * scale
region['y'] = stream.readFloat() * scale
region['scaleX'] = stream.readFloat()
region['scaleY'] = stream.readFloat()
region['rotation'] = stream.readFloat()
region['width'] = stream.readFloat() * scale
region['height'] = stream.readFloat() * scale
region['color'] = stream.readHex(4)
return region
if at == 'boundingbox':
box = {}
box['type'] = 'boundingbox'
box['name'] = name
box['vertices'] = readFloatArray()
return box
if at == 'mesh':
if (path := stream.readString()) is None:
path = name
mesh = {}
mesh['type'] = 'mesh'
mesh['name'] = name
mesh['path'] = path
mesh['uvs'] = stream.readFloatArray()
mesh['triangles'] = stream.readShortArray()
mesh['vertices'] = stream.readFloatArray()
mesh['color'] = stream.readHex(4)
mesh['hull'] = stream.readInt()
if nonessential:
mesh['edges'] = stream.readIntArray()
mesh['width'] = stream.readFloat() * scale
mesh['height'] = stream.readFloat() * scale
return mesh
if at == 'skinnedmesh':
if (path := stream.readString()) is None:
path = name
skinnedmesh = {}
skinnedmesh['type'] = 'skinnedmesh'
skinnedmesh['name'] = name
skinnedmesh['path'] = path
skinnedmesh['uvs'] = stream.readFloatArray()
skinnedmesh['triangles'] = stream.readShortArray()
skinnedmesh['vertices'] = []
for i in range(stream.readInt()):
skinnedmesh['vertices'].append(stream.readFloat())
skinnedmesh['color'] = stream.readHex(4)
skinnedmesh['hull'] = stream.readInt()
if nonessential:
skinnedmesh['edges'] = stream.readIntArray()
skinnedmesh['width'] = stream.readFloat() * scale
skinnedmesh['height'] = stream.readFloat() * scale
return skinnedmesh
return None
def readAnimation():
animation = {}
duration = 0
slots = {}
for i in range(stream.readInt()):
slotIdx = stream.readInt()
slotMap = {}
for j in range(stream.readInt()):
timelineType = stream.readByte()
frameCnt = stream.readInt()
if timelineType == 4:
timeline = []
for frameIdx in range(frameCnt):
timeline.append({})
timeline[frameIdx]['time'] = stream.eradFloat()
timeline[frameIdx]['color'] = stream.readHex(4)
if frameIdx < frameCnt - 1:
readCurve(frameIdx, timeline)
slotMap['color'] = timeline
elif timelineType == 3:
timeline = []
for frameIdx in range(frameCnt):
timeline.append({})
timeline[frameIdx]['time'] = stream.readFloat()
timeline[frameIdx]['name'] = stream.readString()
slotMap['attachment'] = timeline
duration = max(duration, timeline[frameCnt-1]['time'])
slots[data['slots'][slotIdx]['name']] = slotMap
animation['slots'] = slots
bones = {}
for i in range(stream.readInt()):
boneIdx = stream.readInt()
boneMap = {}
for j in range(stream.readInt()):
timelineType = stream.readByte()
frameCnt = stream.readInt()
if timelineType == 1:
timeline = []
for frameIdx in range(frameCnt):
timeline.append({})
timeline[frameIdx]['time'] = stream.readFloat()
timeline[frameIdx]['angle'] = stream.readFloat()
if frameIdx < frameCnt - 1:
readCurve(frameIdx, timeline)
boneMap['rotate'] = timeline
elif timelineType == 2 or timelineType == 0:
timeline = []
timelineScale = 1
if timelineType == 2:
timelineScale = scale
for frameIdx in range(frameCnt):
timeline.append({})
timeline[frameIdx]['time'] = stream.readFloat()
timeline[frameIdx]['x'] = stream.readFloat()
timeline[frameIdx]['y'] = stream.readFloat()
if frameIdx < frameCnt - 1:
readCurve(frameIdx, timeline)
if timelineType == 0:
boneMap['scale'] = timeline
else:
boneMap['translate'] = timeline
elif timelineType == 5 or timelineType == 6:
timeline = []
for frameIdx in range(frameCnt):
timeline.append({})
timeline[frameIdx]['time'] = stream.readFloat()
if timelineType == 5:
timeline[frameIdx]['x'] = stream.readBool()
elif timelineType == 6:
timeline[frameIdx]['y'] = stream.readBool()
if timelineType == 5:
boneMap['flipX'] = timeline
else:
boneMap['flipY'] = timeline
duration = max(duration, timeline[frameCnt-1]['time'])
bones[data['bones'][boneIdx]['name']] = boneMap
animation['bones'] = bones
ik = {}
for i in range(stream.readInt()):
ikIdx = stream.readInt()
frameCnt = stream.readInt()
timeline = []
for frameIdx in range(frameCnt):
timeline.append({})
timeline[frameIdx]['time'] = stream.readFloat()
timeline[frameIdx]['mix'] = stream.readFloat()
timeline[frameIdx]['bendPositive'] = stream.readBool()
if frameIdx < frameCnt - 1:
readCurve(frameIdx, timeline)
ik[data[ikIdx]] = timeline
animation['ik'] = ik
ffd = {}
for i in range(stream.readInt()):
skinIdx = stream.readInt()
slotMap = {}
for j in range(stream.readInt()):
slotIdx = stream.readInt()
meshMap = {}
for k in range(stream.readInt()):
meshName = stream.readString()
frameCnt = stream.readInt()
attachment = None
attachments = data['skins'][data['skinName'][skinIdx]]\
[data['slots'][slotIdx]['name']]
for attachmentName in attachments:
if attachments[attachmentName]['name'] == meshName:
attachment = attachments[attachmentName]
if not attachment:
print("FFD attachment not found: " + meshName);
timeline = []
for frameIdx in range(frameCnt):
time = stream.readFloat()
if attachment['type'] == 'mesh':
vertexCnt = len(attachment['vertices'])
else:
vertexCnt = len(attachment['uvs']) * 3 * 3
# This maybe wrong
vertices = []
for verticeIdx in range(vertexCnt):
vertices[verticeIdx] = 0.0
bugFixMultiplicator = 0.1
if (end := stream.readInt()) == 0:
if attachment['type'] == 'mesh':
for verticeIdx in range(vertexCnt):
vertices[verticeIdx] += attachment['vertices']\
[verticeIdx] * \
bugFixMultiplicator
else:
start = (v := stream.readInt())
end += start
while v < end:
vertices[v] = stream.readFloat() * scale
v += 1
if attachment['type'] == 'mesh':
meshVertices = attachment['vertices']
for v in range(len(vertices)):
vertices[v] += meshVertices[v] * \
bugFixMultiplicator
timeline.append({})
timeline[frameIdx]['time'] = time
timeline[frameIdx]['vertices'] = vertices
if frameIdx < frameCnt - 1:
readCurve(frameIdx, timeline)
meshMap[meshName] = timeline
duration = max(duration, timeline[frameCnt-1]['time'])
slotMap[data['slots'][slotIdx]['name']] = meshMap
ffd[data['skinsName'][skinIdx]] = slotMap
animation['ffd'] = ffd
if (drawOrderCnt := stream.readInt()):
drawOrders = []
for i in range(drawOrderCnt):
drawOrderMap = {}
offsets = []
for j in range(stream.readInt()):
offsetMap = {}
offsetMap['slot'] = data['slots'][stream.readInt()]['name']
offsetMap['offset'] = stream.readInt()
offsets.append(offsetMap)
drawOrderMap['offsets'] = offsets
drawOrderMap['time'] = stream.readFloat()
drawOrders.append(drawOrderMap)
duration = max(duration, drawOrders[drawOrderCnt-1]['time'])
animation['drawOrder'] = drawOrders
if (eventCnt := stream.readInt()):
events = []
for i in range(eventCnt):
events.append({})
time = stream.readFloat()
events[i]['name'] = (name := data['eventsName'][stream.readInt()])
events[i]['int'] = stream.readInt()
events[i]['float'] = stream.readFloat()
events[i]['string'] = stream.readString() if stream.readBool() \
else ""
events[i]['time'] = time
duration = max(duration, events[eventCnt-1]['time'])
animation['events'] = events
return animation
def readCurve(frameIdx, timeline):
if (curve := stream.readByte()) == 1:
timeline[frameIdx]['curve'] = 'stepped'
elif curve == 2:
timeline[frameIdx]['curve'] = [stream.readFloat(), stream.readFloat(),
stream.readFloat(), stream.readFloat()]
def repl(mo):
x = str(mo.group())
a, b = x.split('e-')
c, d = a.split('.')
return '0.' + '0' * (int(b)-1) + c + d
with open('Kalina.skel.txt', 'rb') as f:
stream = BinaryStream(f)
data = {}
# skeleton
data['skeleton'] = {}
data['skeleton']['hash'] = stream.readString()
data['skeleton']['spine'] = stream.readString()
data['skeleton']['width'] = stream.readFloat()
data['skeleton']['height'] = stream.readFloat()
if (nonessential := stream.readBool()):
data['skeleton']['images'] = stream.readString()
# Bones
data['bones'] = []
for i in range(stream.readInt()):
data['bones'].append({})
data['bones'][i]['name'] = stream.readString()
data['bones'][i]['parent'] = None if (parentIdx := stream.readInt()-1) == -1 \
else data['bones'][parentIdx]['name']
data['bones'][i]['x'] = stream.readFloat() * scale
data['bones'][i]['y'] = stream.readFloat() * scale
data['bones'][i]['scaleX'] = stream.readFloat()
data['bones'][i]['scaleY'] = stream.readFloat()
data['bones'][i]['rotation'] = stream.readFloat()
data['bones'][i]['length'] = stream.readFloat() * scale
data['bones'][i]['flipX'] = stream.readBool()
data['bones'][i]['flipY'] = stream.readBool()
data['bones'][i]['inheritScale'] = stream.readBool()
data['bones'][i]['inheritRotation'] = stream.readBool()
if nonessential:
data['bones'][i]['color'] = stream.readHex(4)
# Ik Constraints
if (ikIdx := stream.readInt()):
data['ik'] = []
for i in range(ikIdx):
data['ik'].append({})
data['ik']['name'] = stream.readString()
data['ik']['bones'] = []
for i in range(stream.readInt()):
data['ik']['bones'].append(data['ik'][stream.readInt()]['name'])
data['ik']['target'] = data['ik'][stream.readInt()]['name']
data['ik']['mix'] = stream.readFloat()
data['bendPositive'] = stream.readBool()
# Slots
data['slots'] = []
for i in range(stream.readInt()):
data['slots'].append({})
data['slots'][i]['name'] = stream.readString()
data['slots'][i]['bone'] = data['bones'][stream.readInt()]['name']
data['slots'][i]['color'] = stream.readHex(4)
data['slots'][i]['attachment'] = stream.readString()
data['slots'][i]['blend'] = BLEND_MODE[stream.readInt()]
# Default Skin
data['skins'] = {}
data['skinsName'] = []
skins = {}
if (defaultSkin := readSkin()) is not None:
data['skins']['default'] = defaultSkin
data['skinsName'].append('default')
# Skin
for i in range(stream.readInt()):
skinName = readString()
skin = stream.readSkin()
skins[skinName] = skin
data['skinsName'].append(skinName)
# Events
data['events'] = {}
data['eventsName'] = []
| |
the current working Strike List.([{id: 'b/b/v/f'}, {id: 'aa/f/h'}])
@staticmethod
def _strikeList_operations_add(self, strike):
"""
Adds a list of strikes to the current working Strike List.([{id: 'b/b/v/f'}, {id: 'aa/f/h'}])
:param strike (list): The list of strikes to add.
list of object with fields
id (java.lang.String): Strike path.
"""
appWrapper = self._wrapper
r = appWrapper.session.post(url='https://' + appWrapper.host + '/bps/api/v2/core/strikeList/operations/add', headers={'content-type': 'application/json'}, data=json.dumps({'strike': strike}), verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200):
return json.loads(r.content) if jsonContent else r.content
else:
return {'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}
### Adds a flow to the current working SuperFlow
@staticmethod
def _superflow_operations_addAction(self, flowid, name, actionid, source):
"""
Adds a flow to the current working SuperFlow
:param flowid (java.lang.Integer): The flow id.
:param name (java.lang.String): The name of the action definition.
:param actionid (java.lang.Integer): The new action id.
:param source (java.lang.String): The action source.
"""
appWrapper = self._wrapper
r = appWrapper.session.post(url='https://' + appWrapper.host + '/bps/api/v2/core/superflow/operations/addAction', headers={'content-type': 'application/json'}, data=json.dumps({'flowid': flowid, 'name': name, 'actionid': actionid, 'source': source}), verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200):
return json.loads(r.content) if jsonContent else r.content
else:
return {'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}
### null
@staticmethod
def _network_operations_list(self, userid, clazz, sortorder, sort, limit, offset):
"""
:param userid (string):
:param clazz (string):
:param sortorder (string):
:param sort (string):
:param limit (integer):
:param offset (integer):
:return returnArg (list):
list of object with fields
name (string):
type (string):
author (string):
createdOn (string):
"""
appWrapper = self._wrapper
r = appWrapper.session.post(url='https://' + appWrapper.host + '/bps/api/v2/core/network/operations/list', headers={'content-type': 'application/json'}, data=json.dumps({'userid': userid, 'clazz': clazz, 'sortorder': sortorder, 'sort': sort, 'limit': limit, 'offset': offset}), verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200):
return json.loads(r.content) if jsonContent else r.content
else:
return {'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}
### Removes a SuperFlow from the current working Application Profile.
@staticmethod
def _appProfile_operations_remove(self, superflow):
"""
Removes a SuperFlow from the current working Application Profile.
:param superflow (string): The name of the super flow.
"""
appWrapper = self._wrapper
r = appWrapper.session.post(url='https://' + appWrapper.host + '/bps/api/v2/core/appProfile/operations/remove', headers={'content-type': 'application/json'}, data=json.dumps({'superflow': superflow}), verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200):
return json.loads(r.content) if jsonContent else r.content
else:
return {'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}
### Sets the card mode of a board
@staticmethod
def _topology_operations_setCardMode(self, board, mode):
"""
Sets the card mode of a board
:param board (java.lang.Integer):
:param mode (java.lang.Integer): the new mode: 10 for BPS-L23, 7 for BPS-L47, 3 for Non-BPS
"""
appWrapper = self._wrapper
r = appWrapper.session.post(url='https://' + appWrapper.host + '/bps/api/v2/core/topology/operations/setCardMode', headers={'content-type': 'application/json'}, data=json.dumps({'board': board, 'mode': mode}), verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200):
return json.loads(r.content) if jsonContent else r.content
else:
return {'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}
### Sets the card speed of a board
@staticmethod
def _topology_operations_setCardSpeed(self, board, speed):
"""
Sets the card speed of a board
:param board (java.lang.Integer):
:param speed (java.lang.Integer):
"""
appWrapper = self._wrapper
r = appWrapper.session.post(url='https://' + appWrapper.host + '/bps/api/v2/core/topology/operations/setCardSpeed', headers={'content-type': 'application/json'}, data=json.dumps({'board': board, 'speed': speed}), verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200):
return json.loads(r.content) if jsonContent else r.content
else:
return {'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}
### Sets the card fanout of a board
@staticmethod
def _topology_operations_setCardFanout(self, board, fanout):
"""
Sets the card fanout of a board
:param board (java.lang.Integer):
:param fanout (java.lang.Integer):
"""
appWrapper = self._wrapper
r = appWrapper.session.post(url='https://' + appWrapper.host + '/bps/api/v2/core/topology/operations/setCardFanout', headers={'content-type': 'application/json'}, data=json.dumps({'board': board, 'fanout': fanout}), verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200):
return json.loads(r.content) if jsonContent else r.content
else:
return {'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}
### Sets the card fanout of a board
@staticmethod
def _topology_operations_setPerfAcc(self, board, perfacc):
"""
Sets the card fanout of a board
:param board (java.lang.Integer):
:param perfacc (java.lang.Boolean):
"""
appWrapper = self._wrapper
r = appWrapper.session.post(url='https://' + appWrapper.host + '/bps/api/v2/core/topology/operations/setPerfAcc', headers={'content-type': 'application/json'}, data=json.dumps({'board': board, 'perfacc': perfacc}), verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200):
return json.loads(r.content) if jsonContent else r.content
else:
return {'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}
### null
@staticmethod
def _network_operations_saveAs(self, name, regenerateOldStyle=True, force=False):
"""
:param name (string): The new name given for the current working network config
:param regenerateOldStyle (boolean): Force to apply the changes made on the loaded network configuration. Force to generate a network from the old one.
:param force (boolean): Force to save the network config. It replaces a pre-existing config having the same name.
"""
appWrapper = self._wrapper
r = appWrapper.session.post(url='https://' + appWrapper.host + '/bps/api/v2/core/network/operations/saveAs', headers={'content-type': 'application/json'}, data=json.dumps({'name': name, 'regenerateOldStyle': regenerateOldStyle, 'force': force}), verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200):
return json.loads(r.content) if jsonContent else r.content
else:
return {'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}
### null
@staticmethod
def _network_operations_save(self, name=None, regenerateOldStyle=True, force=True):
"""
:param name (string): The new name given for the current working network config
:param regenerateOldStyle (boolean): Force to apply the changes made on the loaded network configuration. Force to generate a network from the old one.
:param force (boolean): Force to save the network config. It replaces a pre-existing config having the same name.
"""
appWrapper = self._wrapper
r = appWrapper.session.post(url='https://' + appWrapper.host + '/bps/api/v2/core/network/operations/save', headers={'content-type': 'application/json'}, data=json.dumps({'name': name, 'regenerateOldStyle': regenerateOldStyle, 'force': force}), verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200):
return json.loads(r.content) if jsonContent else r.content
else:
return {'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}
### null
@staticmethod
def _appProfile_operations_exportAppProfile(self, name, attachments, filepath):
"""
:param name (java.lang.String): The name of the test model to be exported.
:param attachments (java.lang.Boolean): True if object attachments are needed.
:param filepath (java.lang.String): The local path where to save the exported object.
"""
appWrapper = self._wrapper
r = appWrapper.session.post(url='https://' + appWrapper.host + '/bps/api/v2/core/appProfile/operations/exportAppProfile', headers={'content-type': 'application/json'}, data=json.dumps({'name': name, 'attachments': attachments, 'filepath': filepath}), verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200):
with open(filepath, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
fd.write(chunk)
fd.close()
r.close()
return {'status_code': r.status_code, 'content': 'success'}
else:
return {'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}
### Imports a test model, given as a file.
@staticmethod
def _testmodel_operations_importModel(self, name, filename, force):
"""
Imports a test model, given as a file.
:param name (java.lang.String): The name of the object being imported
:param filename (java.lang.String): The file containing the object
:param force (java.lang.Boolean): Force to import the file and the object having the same name will be replaced.
"""
appWrapper = self._wrapper
files = {'file': (name, open(filename, 'rb'), 'application/xml')}
r = appWrapper.session.post(url='https://' + appWrapper.host + '/bps/api/v2/core/testmodel/operations/importModel', files=files, data={'fileInfo':str({'name': name, 'filename': filename, 'force': force})}, verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200):
return json.loads(r.content) if jsonContent else r.content
else:
return {'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}
### Imports an application profile, given as a file.
@staticmethod
def _appProfile_operations_importAppProfile(self, name, filename, force):
"""
Imports an application profile, given as a file.
:param name (java.lang.String): The name of the object being imported
:param filename (java.lang.String): The file containing the object
:param force (java.lang.Boolean): Force to import the file and the object having the same name will be replaced.
"""
appWrapper = self._wrapper
files = {'file': (name, open(filename, 'rb'), 'application/xml')}
r = appWrapper.session.post(url='https://' + appWrapper.host + '/bps/api/v2/core/appProfile/operations/importAppProfile', files=files, data={'fileInfo':str({'name': name, 'filename': filename, 'force': force})}, verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200):
return json.loads(r.content) if jsonContent else r.content
else:
return {'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}
### Imports a network neighborhood model, given as a file.
@staticmethod
def _network_operations_importNetwork(self, name, filename, force):
"""
Imports a network neighborhood model, given as a file.
:param name (java.lang.String): The name of the object being imported
:param filename (java.lang.String): The file containing the | |
# -*- coding: utf8 -*-
#
# Module PLOT
#
# Part of Nutils: open source numerical utilities for Python. Jointly developed
# by HvZ Computational Engineering, TU/e Multiscale Engineering Fluid Dynamics,
# and others. More info at http://nutils.org <<EMAIL>>. (c) 2014
"""
The plot module aims to provide a consistent interface to various plotting
backends. At this point `matplotlib <http://matplotlib.org/>`_ and `vtk
<http://vtk.org>`_ are supported.
"""
from __future__ import print_function, division
from . import numpy, log, core, cache, numeric, _
import os, warnings, sys, subprocess
class BasePlot( object ):
'base class for plotting objects'
def __init__ ( self, name=None, ndigits=0, index=None, outdir=None ):
'constructor'
self.path = outdir or core.getoutdir()
self.name = name
self.index = index
self.ndigits = ndigits
def getpath( self, name, index, ext ):
if name is None:
name = self.name
if index is None:
index = self.index
if self.ndigits and index is None:
index = _getnextindex( self.path, name, ext )
if index is not None:
name += str(index).rjust( self.ndigits, '0' )
name += '.' + ext
log.path( name )
return os.path.join( self.path, name )
def __enter__( self ):
'enter with block'
assert self.name, 'name must be set to use as with-context'
return self
def __exit__( self, exc_type, exc_value, exc_tb ):
'exit with block'
if not exc_type:
self.save( self.name, self.index )
try:
self.close()
except Exception as e:
log.error( 'failed to close:', e )
def __del__( self ):
try:
self.close()
except Exception as e:
log.error( 'failed to close:', e )
def save( self, name=None, index=None ):
pass
def close( self ):
pass
class PyPlot( BasePlot ):
'matplotlib figure'
def __init__( self, name=None, imgtype=None, ndigits=3, index=None, **kwargs ):
'constructor'
import matplotlib
matplotlib.use( 'Agg', warn=False )
from matplotlib import pyplot
BasePlot.__init__( self, name, ndigits=ndigits, index=index )
self.imgtype = imgtype or core.getprop( 'imagetype', 'png' )
self._fig = pyplot.figure( **kwargs )
self._pyplot = pyplot
def __enter__( self ):
'enter with block'
# make this figure active
self._pyplot.figure(self._fig.number)
return super( PyPlot, self ).__enter__()
def __getattr__( self, attr ):
pyplot = self.__dict__['_pyplot'] # avoid recursion
return getattr( pyplot, attr )
def close( self ):
'close figure'
if not self._fig:
return # already closed
try:
self._pyplot.close( self._fig )
except Exception as e:
log.warning( 'failed to close figure: {}'.format(e) )
self._fig = None
def save( self, name=None, index=None ):
'save images'
assert self._fig, 'figure is closed'
for ext in self.imgtype.split( ',' ):
self.savefig( self.getpath(name,index,ext) )
def segments( self, points, color='black', **kwargs ):
'plot line'
segments = numpy.concatenate( [ numpy.array([xy[:-1],xy[1:]]).swapaxes(0,1) for xy in points ], axis=0 )
from matplotlib.collections import LineCollection
lc = LineCollection( segments, **kwargs )
ax = self.gca()
ax.add_collection( lc )
if isinstance( color, str ):
lc.set_color( color )
else:
array = numpy.concatenate( [ .5 * ( v[:-1] + v[1:] ) for v in color ], axis=0 )
lc.set_array( array )
self.sci( lc )
return lc
def mesh( self, points, values=None, edgecolors='k', edgewidth=.1, mergetol=0, setxylim=True, aspect='equal', tight=True, **kwargs ):
'plot elemtwise mesh'
kwargs.pop( 'triangulate', None ) # ignore deprecated argument
if not isinstance( points, numpy.ndarray ) and points[0].shape[1] == 1: # line plot
if values is not None:
self.segments( [ numpy.concatenate( [x,y[:,_]], axis=1 ) for x, y in zip( points, values ) ], values )
return
if isinstance( points, numpy.ndarray ): # bulk data
assert points.shape[-1] == 2
import matplotlib.tri
tri = matplotlib.tri.Triangulation( *points.reshape(-1,2).T )
edgecolors = 'none'
if values is not None:
values = values.ravel()
else: # mesh data
tri, edges = triangulate( points, mergetol )
if values is not None:
values = numpy.concatenate( values, axis=0 )
if values is not None:
self.tripcolor( tri, values, shading='gouraud', **kwargs )
if edgecolors != 'none':
self.segments( edges, linewidth=edgewidth )
if aspect:
( self.aspect if tight else self.axis )( aspect )
if setxylim:
self.autoscale( enable=True, axis='both', tight=True )
return tri
def aspect( self, *args, **kwargs ):
self.gca().set_aspect( *args, **kwargs )
def tripcolor( self, *args, **kwargs ):
import matplotlib.tri
assert len(args) >= 2
if isinstance( args[0], numpy.ndarray ) and isinstance( args[1], numpy.ndarray ):
# args = x, y[, triangles[, mask]], values
tri = matplotlib.tri.Triangulation( *args[:-1] )
values = args[-1]
else:
assert len(args) == 2
tri, values = args
if not isinstance( tri, matplotlib.tri.Triangulation ):
tri, edges = triangulate( tri, mergetol )
if not isinstance( values, numpy.ndarray ):
values = numpy.concatenate( values, axis=0 )
assert len(tri.x) == len(values)
mask = ~numpy.isfinite( values )
if mask.any():
tri = matplotlib.tri.Triangulation( tri.x, tri.y, tri.triangles, mask[tri.triangles].any(axis=1) )
return self._pyplot.tripcolor( tri, values, **kwargs )
def tricontour( self, tri, values, every=None, levels=None, mergetol=0, **kwargs ):
assert not every or levels is None, '"every" and "levels" arguments are mutually exclusive'
import matplotlib.tri
if not isinstance( tri, matplotlib.tri.Triangulation ):
tri, edges = triangulate( tri, mergetol )
if not isinstance( values, numpy.ndarray ):
values = numpy.concatenate( values, axis=0 )
assert len(tri.x) == len(values)
if every:
levels = numpy.arange( int(min(values)/every), int(max(values)/every)+1 ) * every
return self._pyplot.tricontour( tri, values, levels=levels, **kwargs )
def streamplot( self, tri, velo, spacing, bbox=None, mergetol=1e-5, linewidth=None, color=None, **kwargs ):
if isinstance( spacing, numpy.ndarray ):
# compatibility with original streamplot function definition
x = tri
y = velo
u = spacing
v = bbox
else:
import matplotlib.tri
if not isinstance( tri, matplotlib.tri.Triangulation ):
tri, edges = triangulate( tri, mergetol=mergetol )
if not isinstance( velo, numpy.ndarray ):
velo = numpy.concatenate( velo, axis=0 )
assert len(tri.x) == len(velo)
if bbox is None:
xlim = min(tri.x), max(tri.x)
ylim = min(tri.y), max(tri.y)
else:
xlim, ylim = bbox
nx = int( ( xlim[-1] - xlim[0] ) / spacing )
ny = int( ( ylim[-1] - ylim[0] ) / spacing )
assert nx > 0 and ny > 0
x = .5 * (xlim[0]+xlim[-1]) + ( numpy.arange(nx) - (nx-1)/2 ) * spacing
y = .5 * (ylim[0]+ylim[-1]) + ( numpy.arange(ny) - (ny-1)/2 ) * spacing
uv = interpolate( tri, numeric.meshgrid(x,y).T, velo, mergetol=mergetol )
u = uv[...,0]
v = uv[...,1]
assert isinstance( x, numpy.ndarray ) and x.ndim == 1
assert isinstance( y, numpy.ndarray ) and y.ndim == 1
assert isinstance( u, numpy.ndarray ) and u.shape == (len(y),len(x))
assert isinstance( v, numpy.ndarray ) and v.shape == (len(y),len(x))
if linewidth is not None and linewidth < 0: # convention: negative linewidth is scaled with velocity magnitude
linewidth = -linewidth * numpy.sqrt( u**2 + v**2 )
if color is None: # default: color mapped to velocity magnitude
color = numpy.sqrt( u**2 + v**2 )
return self._pyplot.streamplot( x, y, u, v, linewidth=linewidth, color=color, **kwargs )
def polycol( self, verts, facecolors='none', **kwargs ):
'add polycollection'
from matplotlib import collections
if facecolors != 'none':
assert isinstance(facecolors,numpy.ndarray) and facecolors.shape == (len(verts),)
array = facecolors
facecolors = None
polycol = collections.PolyCollection( verts, facecolors=facecolors, **kwargs )
if facecolors is None:
polycol.set_array( array )
self.gca().add_collection( polycol )
self.sci( polycol )
return polycol
def slope_marker( self, x, y, slope=None, width=.2, xoffset=0, yoffset=.2, color='0.5' ):
'slope marker'
ax = self.gca()
if slope is None:
x_, x = x[-2:]
y_, y = y[-2:]
slope = numpy.log(y/y_) / numpy.log(x/x_)
slope = numpy.round( slope * 100 ) / 100.
if float(slope) > 0:
width = -width
xscale = ax.get_xscale()
xmin, xmax = ax.get_xlim()
if xscale == 'linear':
W = ( xmax - xmin ) * width
x0 = x - W
xc = x - .5 * W
elif xscale == 'log':
W = numpy.log10( xmax / xmin ) * width
x0 = x * 10**-W
xc = x * 10**(-.5*W)
else:
raise Exception( 'unknown x-axis scale %r' % xscale )
yscale = ax.get_yscale()
H = W * float(slope)
if yscale == 'linear':
y0 = y - H
yc = y - .5 * H
elif yscale == 'log':
y0 = y * 10**-H
yc = y * 10**(-.5*H)
else:
raise Exception( 'unknown x-axis scale %r' % xscale )
from matplotlib import transforms
dpi = self.gcf().dpi_scale_trans
shifttrans = ax.transData + transforms.ScaledTranslation( xoffset, numpy.sign(H) * yoffset, dpi )
triangle = self.Polygon( [ (x0,y0), (x,y), (xc,y) ], closed=False, ec=color, fc='none', transform=shifttrans )
ax.add_patch( triangle )
self.text( xc, yc, str(slope), color=color,
horizontalalignment = 'right' if W > 0 | |
<reponame>ACSLab/pheeno_robot_code<gh_stars>1-10
"""
Author: <NAME> based heavily upon <NAME>'s tracking code
from UCLA REU (camera.py).
"""
# Import NumPy Libraries
import numpy as np
from numpy import average as avg
from numpy import subtract as sub
# Import System Libraries
import math
# Import OpenCV Libraries
import cv2
def update(x):
""" Get track bar position and update its value. """
global VAL
global MIN_SIZE
global MAX_SIZE
global MIN_VAL
global MAX_VAL
global DARKNESS_THRESHOLD
VAL = cv2.getTrackbarPos('VAL', 'trackFrame')
MIN_SIZE = cv2.getTrackbarPos('MIN_SIZE', 'trackFrame')
MAX_SIZE = cv2.getTrackbarPos('MAX_SIZE', 'trackFrame')
MIN_VAL = cv2.getTrackbarPos('MIN_VAL', 'trackFrame')
MAX_VAL = cv2.getTrackbarPos('MAX_VAL', 'trackFrame')
DARKNESS_THRESHOLD = cv2.getTrackbarPos('DARKNESS_THRESHOLD', 'trackFrame')
""" Create window and track bars. """
cv2.namedWindow('trackFrame', cv2.WINDOW_NORMAL)
cv2.createTrackbar('VAL', 'trackFrame', VAL, 200, update)
cv2.createTrackbar('MIN_SIZE', 'trackFrame', MIN_SIZE, 2000, update)
cv2.createTrackbar('MAX_SIZE', 'trackFrame', MAX_SIZE, 4000, update)
cv2.createTrackbar('MIN_VAL', 'trackFrame', MIN_VAL, 200, update)
cv2.createTrackbar('MAX_VAL', 'trackFrame', MAX_VAL, 2000, update)
cv2.createTrackbar('DARKNESS_THRESHOLD', 'trackFrame',
DARKNESS_THRESHOLD, 100, update)
class RobotData(object):
"""
Robot Data class
Attributes
----------
center : Set of integers
The center position of the robot.
orientation : float
Something
updated : bool
The state of the classes attributes. If false, it will allow for
future value updates.
Methods
-------
update(center, angle)
Updates all the attributes of the class.
reset()
Resets the updated boolean attribute for future value updates.
"""
def __init__(self, center, orientation):
self.center = center
self.orientation = orientation
self.updated = True
# center is an (x,y) tuple, orientation is an angle in degrees measured
# from the positive x axis, frame is a number which designates which frame
# the robot is in, and updated is a boolean which tells if that particular
# robot has been updated
def __repr__(self):
new_center = integerize(
(self.center[0] * CMperPIXEL, self.center[1] * CMperPIXEL))
return ("Robot at " + str(new_center) + " with orientation " +
str(self.orientation) + ".")
def update(self, updated_center, updated_angle):
self.center = updated_center
self.orientation = updated_angle
self.updated = True
def reset(self):
self.updated = False
def threshold(src, value=100):
ret, thresh = cv2.threshold(src, VAL, 255, cv2.THRESH_BINARY)
return thresh
def findAprilTags(thresh, img):
# Contouring
contourImage, contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return filter(lambda c: isTag(c, img), contours)
def isTag(c, img):
""" Determines if the image is a tag, based on its area and intensity. """
return (MIN_SIZE < cv2.contourArea(c) < MAX_SIZE) and \
(MIN_VAL < averageValue(c) < MAX_VAL) and \
goodAspectRatio(c)
def goodAspectRatio(c):
_, (width, height), _ = cv2.minAreaRect(c)
aspect_ratio = max([width / height, height / width])
return 1 < aspect_ratio < 2
def averageValue(img):
height, width = img.shape[:2]
val = avg(img.sum(axis=0).sum(axis=0))
return val / (height * width)
def drawTags(tagList, img):
""" Draw all contours in red, with thickness 2 """
cv2.drawContours(img, tagList, -1, DARK_RED, 2)
def drawCorners(bottom, left, top, right, img):
""" Draw contours in varying colors on an OpenCV image. """
# Draw all contours in green, with thickness 2.
cv2.circle(img, tuple(bottom), 1, GREEN, 2)
# Draw all contours in blue, with thickness 2.
cv2.circle(img, tuple(top), 1, BLUE, 2)
# Draw all contours in dark red, with thickness 2.
cv2.circle(img, tuple(left), 1, DARK_RED, 2)
# Draw all contours in a custom color, with thickness 2.
cv2.circle(img, tuple(right), 1, CUSTOM_COLOR, 2)
def drawRobots(img):
""" Marks each robot with its index and angle on an OpenCV image. """
arrow_length = 22
for index in Robots:
new_center = integerize(Robots[index].center)
angle = Robots[index].orientation
if index in MAPPING and MAPPING[index] != 'l':
cv2.circle(img, integerize(new_center), 2, RED, 4)
# Robot's name
cv2.putText(img, str(index), (new_center[0] + 28,
new_center[1]),
cv2.FONT_HERSHEY_SIMPLEX,
.7, CUSTOM_COLOR, 2)
if index in MAPPING and MAPPING[index] != 'l':
p2 = integerize((new_center[0] + arrow_length * math.cos(angle),
new_center[1] - arrow_length * math.sin(angle)))
cv2.line(img, new_center, p2, (255, 255, 0), 2, 2)
def updateDict(tag_list, img, thresh):
global Robots
tag_views = []
for tag in tag_list:
rect = cv2.minAreaRect(tag)
tag_img = getTagImg(tag, rect, img)
id_matrix = identify(tag_img)
# Get's the commands from the message to see if they are for
# the gripper.
if id_matrix is None:
continue
index = matrixToIndex(id_matrix)
if index is None:
# Get's the commands from the message to see if they are for
# the gripper.
continue
tag_views.append(tag_img)
angle = calculateAngle(tag, rect, id_matrix)
Robots[index] = RobotData(rect[0], angle)
# remove any robots from our list that were not updated
Robots = {key: rob for key, rob in Robots.items() if rob.updated}
# Get's the commands from the message to see if they are for the gripper.
for r in Robots.values():
r.reset()
return tag_views
def getTagImg(tag, rect, img):
"""
Extracts the image of the tag from the main image, and rotates it
appropriately.
"""
bottom, left, top, right = cv2.boxPoints(rect)
# drawCorners(bottom, left, top, right, imageTrack)
try:
if dist(left, top) < dist(left, bottom):
pos_slope = False
theta = math.atan((left[1] - bottom[1]) / (left[0] - bottom[0]))
else:
pos_slope = True
theta = math.atan((right[1] - bottom[1]) / (right[0] - bottom[0]))
except ZeroDivisionError:
theta = math.atan(float('inf')) # slope is pi/2
height = dist(right, bottom)
width = dist(right, top)
if pos_slope:
width, height = height, width
f_center = rect[0][0], rect[0][1]
return subimage(img, f_center, theta, width, height)
# Developed from code by user xaedes of stack overflow
# http://stackoverflow.com/questions/11627362/how-to-straighten-a-rotated-rectangle-area-of-an-image-using-opencv-in-python
def subimage(image, center, theta, width, height):
v_x = (np.cos(theta), np.sin(theta))
v_y = (-np.sin(theta), np.cos(theta))
s_x = center[0] - v_x[0] * (width / 2) - v_y[0] * (height / 2)
s_y = center[1] - v_x[1] * (width / 2) - v_y[1] * (height / 2)
mapping = np.array([[v_x[0], v_y[0], s_x],
[v_x[1], v_y[1], s_y]])
return cv2.warpAffine(image, mapping, (width, height),
flags=cv2.WARP_INVERSE_MAP,
borderMode=cv2.BORDER_REPLICATE)
def identify(img):
XBUF = 6 # pixels of buffer zone
YBUF = 3
matrix = np.zeros((VCELLS, HCELLS), dtype=bool)
threshed = threshold(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), 132)
h, w, _ = np.shape(img)
x, y = 1, 1
dx = int((w - 2 * XBUF) / float(HCELLS))
dy = int((h - 2 * YBUF) / float(VCELLS))
for i in range(HCELLS):
for j in range(VCELLS):
# Because we're interested in the white squares now
white = not isBlack(threshed,
(x + XBUF + i * dx, y + YBUF + j * dy),
(x + XBUF + (i + 1) * dx,
y + YBUF + (j + 1) * dy), dx * dy, img)
if white is not None:
matrix[j, i] = white
else:
return None
return matrix
def largestContour(contour_list):
contour = None
size = 0
for current in contour_list:
current_area = cv2.contourArea(current)
if current_area > size:
contour = current
size = current_area
return contour
def isBlack(img, p1, p2, area, defacing):
# dark squares will have an intensity below this percentage
DT = DARKNESS_THRESHOLD / 100.0
intensity = 0
p1, p2 = integerize(p1), integerize(p2)
for x in range(p1[0], p2[0]):
for y in range(p1[1], p2[1]):
intensity += bool(img[y, x])
if x in (p1[0], p2[0] - 1) or y in (p1[1], p2[1] - 1):
defacing[y, x] = RED
if area == 0:
return None # this means that we are picking up some edge motion
filled = (intensity / float((p2[1] - p1[1]) * (p2[0] - p1[0]))) < DT
return filled
def dist(p1, p2):
""" Calculates the cartesian distance between two numpy array points. """
return np.linalg.norm(sub(p1, p2))
def calculateAngle(tag, rect, id_matrix):
bottom, left, top, right = cv2.boxPoints(rect)
drawCorners(bottom, left, top, right, imageTrack)
if dist(left, top) < dist(left, bottom):
if left[0] == bottom[0]:
theta = math.atan(-float('inf')) # avoid division by zero
else:
theta = math.atan2((bottom[1] - left[1]), (left[0] - bottom[0]))
theta -= math.pi / 2
else:
if right[0] == bottom[0]:
theta = math.atan(-float('inf')) # avoid division by zero
else:
theta = math.atan2((bottom[1] - left[1]), (left[0] - bottom[0]))
# Top is light
if id_matrix[0, 0] and id_matrix[1, 0] and id_matrix[2, 0]:
return theta
# Bottom is light
elif id_matrix[0, 2] and id_matrix[1, 2] and id_matrix[2, 2]:
return theta + math.pi
# no else case because any such matricies would already be filtered out by
# matrixToIndex (returns None)
def binaryDigitsToDecimalString(L):
return str(int(''.join([str(int(x)) for x in L]), 2))
def matrixToIndex(matrix):
if np.all(matrix[:, 0]):
index = binaryDigitsToDecimalString(
matrix[:, 2]) + binaryDigitsToDecimalString(matrix[:, 1])
elif np.all(matrix[:, 2]):
index = binaryDigitsToDecimalString(
matrix[:, 0][::-1]) + \
binaryDigitsToDecimalString(matrix[:, 1][::-1])
else:
index = None
return index
def fixRobotData(robot_id, tag_center_dist):
global Robots
if robot_id in MAPPING and MAPPING[robot_id] != 'l':
center = Robots[robot_id].center
angle = Robots[robot_id].orientation
true_center = (center[0] + math.cos(angle) * tag_center_dist,
center[1] - math.sin(angle) * tag_center_dist)
Robots[robot_id].center = | |
<gh_stars>10-100
#!/usr/bin/env python3
"""Starts the user interface of gmoshui."""
from _version import __version__
from view import mainwindow, progressdialog
from PySide2 import QtCore, QtGui
from PySide2.QtWidgets import *
from PySide2.QtGui import *
from PySide2.QtCore import *
from functools import partial
import workshoputils
import addoninfo
import gmpublish
import gmafile
import sys
import shiboken2 as shiboken
import os
import re
from gmodfolder import GModFolder
class ControlMainWindow(QMainWindow):
"""Spawns the main window"""
def __init__(self, parent=None):
super(ControlMainWindow, self).__init__(parent)
self.ui = mainwindow.Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowTitle("GMosh UI " + __version__)
# Create settings
QtCore.QCoreApplication.setOrganizationName("FPtje")
QtCore.QCoreApplication.setOrganizationDomain("github.com/FPtje/gmosh")
QtCore.QCoreApplication.setApplicationName("gmoshui")
self.ui.settings = QSettings()
def main():
"""Main method"""
app = QApplication(sys.argv)
mysw = ControlMainWindow()
initialiseUI(mysw.ui)
mysw.show()
sys.exit(app.exec_())
def errorMsg(s):
"""Show an error message box"""
msgBox = QMessageBox()
msgBox.setText(s)
msgBox.exec_()
class OutLog:
"""Redirect stdout to ui of program"""
def __init__(self, signal, out=None):
"""
"""
self.signal = signal
self.out = out
def write(self, m):
self.signal.emit(m)
if self.out:
self.out.write(m)
def flush(x):
pass
class WorkBackground(QtCore.QThread):
"""Run something in the background"""
target = id
signal = QtCore.Signal(str)
finished = QtCore.Signal()
def run(self):
oldstdout = sys.stdout
sys.stdout = OutLog(self.signal, sys.stdout)
self.result = self.target()
self.signal.emit("<br /><h3>FINISHED</h3>")
sys.stdout = oldstdout
self.finished.emit()
def createProgressDialog(work, onresult=id):
"""Create progress dialog"""
dialog = QDialog()
ui = progressdialog.Ui_Dialog()
ui.setupUi(dialog)
ui.progressText.clear()
ui.buttonBox.setEnabled(False)
def onThreadOutput(text):
if not shiboken.isValid(ui) or not shiboken.isValid(ui.progressText): return
ui.progressText.moveCursor(QTextCursor.End)
if text[0] == "\r":
#cursor = QTextCursor(ui.progressText.textCursor())
ui.progressText.moveCursor(QTextCursor.StartOfLine, QTextCursor.KeepAnchor)
ui.progressText.moveCursor(QTextCursor.PreviousCharacter, QTextCursor.KeepAnchor)
ui.progressText.insertHtml(text.replace('\n', '<br />'))
def enableButtons():
ui.buttonBox.setEnabled(True)
onresult(thread.result)
thread = WorkBackground()
thread.target = work
thread.signal.connect(onThreadOutput)
thread.finished.connect(enableButtons)
thread.start()
dialog.show()
dialog.exec_()
thread.exit()
del thread
#######
# Addon tools signals
#######
def addRecentAddon(widget, addon):
recentAddons = widget.settings.value("addontools/recentaddons", [])
if not recentAddons: recentAddons = []
if type(recentAddons) is str: recentAddons = [recentAddons]
if addon in recentAddons: return False
recentAddons.insert(0, addon)
widget.settings.setValue("addontools/recentaddons", recentAddons)
return True
def removeRecentAddon(widget, addon):
recentAddons = widget.settings.value("addontools/recentaddons", [])
if type(recentAddons) is str: recentAddons = [recentAddons]
recentAddons.remove(addon)
widget.settings.setValue("addontools/recentaddons", recentAddons)
def moveRecentAddon(widget, fr, to):
recentAddons = widget.settings.value("addontools/recentaddons", [])
if type(recentAddons) is str: recentAddons = [recentAddons]
recentAddons.insert(to, recentAddons.pop(fr))
widget.settings.setValue("addontools/recentaddons", recentAddons)
illegalFilesFoundMessage = """\
<h1>Illegal files found!</h1>
<p>The addon contains some files that are not allowed to be in a GMA file.</p>
<p>These are the illegal files:</p>
%s
<p>It is recommended to either remove those files or to copy paste the above list in the "Files to ignore" box</p>
"""
def addonVerifyClicked(widget, show_ok = True):
verified, badlist = widget.currentAddon.verify_files()
if verified:
if show_ok: errorMsg("No illegal files were found. You're good to go!")
return True
dialog = QDialog()
ui = progressdialog.Ui_Dialog()
ui.setupUi(dialog)
ui.progressText.setText(illegalFilesFoundMessage % '<br />'.join(badlist))
ui.buttonBox.setEnabled(True)
dialog.show()
dialog.exec_()
return False
def addonCreateGMAClicked(widget):
fileName, _ = QFileDialog.getSaveFileName(None,
"Store GMA file", os.path.join(widget.settings.value("addontools/lastgmafolder", ''), 'out.gma'), "GMA files (*.gma)")
if not fileName: return
# Force .gma extension
fileName = os.path.splitext(fileName)[0] + '.gma'
folder, _ = os.path.split(fileName)
# Store last used folder location
widget.settings.setValue("addontools/lastgmafolder", folder)
# Verify the addon
if not addonVerifyClicked(widget, False): return
createProgressDialog(partial(widget.currentAddon.compress, fileName))
def publishNew(widget, publisher):
succeeded, output = publisher.create()
if succeeded:
widget.currentAddon.save_changes()
print("<h1>Upload succeeded!</h1><br />")
print("<p>The addon has been uploaded. Do check it out at </p>")
print('<a href="http://steamcommunity.com/sharedfiles/filedetails/?id=%s">http://steamcommunity.com/sharedfiles/filedetails/?id=%s</a>' % (output, output))
print("<br /><p>Note that you will have to change the visibility of this addon in the above link to make it visible for everyone.</p>")
return
print("<h1>Upload failed!</h1> <br />")
print("<p>The upload has failed! The error message can be read below:</p><br />")
print("<br /><tt>")
print(output)
print("</tt>")
def addonPublishClicked(widget):
if not widget.currentAddon.has_workshop_id() and not widget.currentAddon.getlogo():
errorMsg("Error: When uploading a new addon to the workshop, a 512x512 jpeg image must be given.\n"
"Please either enter a workshop id or provide a 512x512 jpeg image.")
return
if not addonVerifyClicked(widget, False): return
changelog = widget.addonChangelog.toPlainText() or widget.currentAddon.getdefault_changelog() or ''
publisher = gmpublish.GmPublish(widget.currentAddon)
if widget.currentAddon.has_workshop_id():
createProgressDialog(partial(publisher.update, changelog))
return
ok = QMessageBox.warning(None, "Upload new addon", "This will be uploaded as a new addon on the workshop. To update an existing addon, please fill in the workshop ID of that addon. Are you sure you want to upload this addon?", QMessageBox.Yes, QMessageBox.No) == QMessageBox.Yes
if not ok: return
createProgressDialog(partial(publishNew, widget, publisher))
def moveSelectedRecentAddon(widget, direction):
selected = widget.recentAddons.selectedIndexes()
for s in selected:
rowText = s.data()
path = widget.recentAddons.model().itemFromIndex(s).path
item = QStandardItem(rowText)
item.path = path
widget.recentAddons.model().removeRow(s.row())
widget.recentAddons.model().insertRow(s.row() + direction, item)
widget.recentAddons.selectionModel().clearSelection()
widget.recentAddons.selectionModel().select(widget.recentAddons.model().indexFromItem(item),
QItemSelectionModel.Select | QItemSelectionModel.Rows)
moveRecentAddon(widget, s.row(), s.row() + direction)
break
enableRecentAddonsUpDownButtons(widget)
def addonMoveUpClicked(widget):
moveSelectedRecentAddon(widget, -1)
def addonMoveDownClicked(widget):
moveSelectedRecentAddon(widget, 1)
def addRecentFolderClicked(widget):
fileName, _ = QFileDialog.getOpenFileName(None,
"Open addon.json file", widget.settings.value("selectAddonLastFolder", None), "addon.json files (*.json)")
if not fileName: return
folder, _ = os.path.split(fileName)
# Store last used folder location
widget.settings.setValue("selectAddonLastFolder", folder)
try:
addoninfo.get_addon_info(fileName)
except Exception:
errorMsg("%s does not contain valid json!" % fileName)
return
if not addRecentAddon(widget, fileName): return
item = QStandardItem(shortenPath(fileName))
item.path = fileName
widget.recentAddons.model().insertRow(0, item)
widget.recentAddons.selectionModel().clearSelection()
widget.recentAddons.selectionModel().select(widget.recentAddons.model().indexFromItem(item),
QItemSelectionModel.Select | QItemSelectionModel.Rows)
recentFolderSelected(widget, widget.recentAddons.model().indexFromItem(item))
def removeRecentFolderClicked(widget):
selected = widget.recentAddons.selectedIndexes()
for s in selected:
removeRecentAddon(widget, widget.recentAddons.model().itemFromIndex(s).path)
widget.recentAddons.model().removeRow(s.row())
# Select first item
if not widget.recentAddons.model().hasIndex(0, 0): return
widget.recentAddons.selectionModel().clearSelection()
firstItem = widget.recentAddons.model().index(0, 0)
widget.recentAddons.selectionModel().select(firstItem,
QItemSelectionModel.Select | QItemSelectionModel.Rows)
recentFolderSelected(widget, firstItem)
def recentFolderSelected(widget, index):
enableRecentAddonsUpDownButtons(widget)
path = widget.recentAddons.model().itemFromIndex(index).path
try:
addonInfo = addoninfo.get_addon_info(path)
except Exception:
return
widget.currentAddon = addonInfo or addoninfo.GModAddon(dict(), '.')
if not addonInfo: return
widget.addonChangelog.setText(addonInfo.getdefault_changelog())
widget.addonDefaultChangelog.setText(addonInfo.getdefault_changelog())
widget.addonTitle.setText(addonInfo.gettitle())
widget.addonDescription.setText(addonInfo.getdescription())
widget.addonIgnore.setText('\n'.join(addonInfo.getignored()))
tags = addonInfo.gettags()
widget.addonWorkshopid.setValue(addonInfo.getworkshopid())
widget.addonType.setCurrentIndex(widget.addonType.findText(addonInfo.gettype()))
widget.addonTag1.setCurrentIndex(widget.addonTag1.findText(len(tags) > 0 and tags[0] or 'None'))
widget.addonTag2.setCurrentIndex(widget.addonTag2.findText(len(tags) > 1 and tags[1] or 'None'))
widget.addonImage.setText(addonInfo.getlogo())
widget.addonPublish.setEnabled(True)
widget.addonVerify.setEnabled(True)
widget.addonCreateGMA.setEnabled(True)
widget.addonChangelog.setEnabled(True)
widget.addonSave.setEnabled(True)
widget.addonSaveAs.setEnabled(True)
widget.addonReset.setEnabled(True)
def addonSaveClicked(widget):
widget.currentAddon.save_changes()
def addonSaveAsClicked(widget):
fileName, _ = QFileDialog.getSaveFileName(None,
"Store addon.json file", os.path.join(widget.settings.value("addontools/lastsaveasfolder", ''), 'addon.json'), "json files (*.json)")
if not fileName: return
# Force .json extension
fileName = os.path.splitext(fileName)[0] + '.json'
folder, _ = os.path.split(fileName)
# Store last used folder location
widget.settings.setValue("addontools/lastsaveasfolder", folder)
widget.currentAddon.setfile(fileName)
widget.currentAddon.save_changes()
if not addRecentAddon(widget, fileName): return
# Add to recent addons list
item = QStandardItem(shortenPath(fileName))
item.path = fileName
widget.recentAddons.model().insertRow(0, item)
widget.recentAddons.selectionModel().clearSelection()
widget.recentAddons.selectionModel().select(widget.recentAddons.model().indexFromItem(item),
QItemSelectionModel.Select | QItemSelectionModel.Rows)
def addonResetClicked(widget):
selected = widget.recentAddons.selectedIndexes()
for s in selected:
recentFolderSelected(widget, s)
break
def updateAddonInfo(widget, key, target, fnvalue, *args):
value = fnvalue(target) if callable(fnvalue) else fnvalue
if not value:
if key in widget.currentAddon.data:
widget.currentAddon.data.pop(key)
else:
widget.currentAddon.data[key] = value
def updateAddonTags(widget, val):
tag1 = widget.addonTag1.currentText()
tag2 = widget.addonTag2.currentText()
tags = []
if tag1 and tag1 != 'None': tags.append(tag1)
if tag2 and tag2 != 'None': tags.append(tag2)
widget.currentAddon.data['tags'] = tags
def selectAddonImage(widget):
fileName, _ = QFileDialog.getOpenFileName(None,
"Open jpg file", widget.settings.value("addontools/lastlogofolder", None), "jpeg files (*.jpg *.jpeg)")
if not fileName: return
folder, _ = os.path.split(fileName)
# Store last used folder location
widget.settings.setValue("addontools/lastlogofolder", folder)
widget.addonImage.setText(fileName)
widget.currentAddon.data['logo'] = fileName
#######
# GMA tools signals
#######
def split_path(p):
"""Helper to split a path into components"""
a,b = os.path.split(p)
return (split_path(a) if len(a) and len(b) else []) + [b]
def folder_hierarchy(files):
"""Helper function that creates a hierarchy of folders and files"""
hierarchy = dict()
hierarchy['name'] = "GMA File" + ' ' * 40
hierarchy['children'] = dict()
hierarchy['size'] = 0
hierarchy['path'] = ''
for f in files:
split = split_path(f['name'])
hierarchy['size'] = hierarchy['size'] + f['puresize']
cur_h = hierarchy # Current hierarchy
i = 0
for sub in split:
i = i + 1
if not sub in cur_h['children']:
cur_h['children'][sub] = dict()
cur_h['children'][sub]['children'] = dict()
cur_h = cur_h['children'][sub]
cur_h['name'] = sub
cur_h['path'] = '/'.join(split[0:i])
cur_h['size'] = 'size' in cur_h and cur_h['size'] + f['puresize'] or f['puresize']
return hierarchy
def populate(model, hierarchy, root = None):
"""Populates the GMA file tree from a hierarchy created with folder_hierarchy"""
node = QStandardItem(hierarchy['name'])
size = QStandardItem(gmafile.sizeof_simple(hierarchy['size']))
node.filePath = size.filePath = hierarchy['path']
root.appendRow([node, size]) if root else model.appendRow([node, size])
for child in iter(sorted(hierarchy['children'])):
populate(model, hierarchy['children'][child], node)
return node
def openGmaFile(widget, fileName, error = True):
if fileName == '': return
try:
info = gmafile.gmaInfo(fileName)
except Exception:
if error: errorMsg("Could not recognise the format of this file!")
return
widget.settings.setValue("gmatools/lastgmafile", fileName)
widget.gmaName.setText(info['addon_name'])
widget.gmaDescription.setText('description' in info and info['description'] or info['addon_description'])
widget.gmaAuthor.setText(info['addon_author'])
widget.gmaAuthorID.setValue(float(info['steamid']))
widget.gmaTimestamp.setDateTime(QtCore.QDateTime.fromTime_t(info['timestamp']))
widget.gmaTags.setText('tags' in info and ', '.join(info['tags']) or '')
widget.gmaType.setText('type' in info and info['type'] or '')
# Tree view
model = QStandardItemModel()
model.setHorizontalHeaderLabels(['File', 'Size'])
widget.gmaFiles.setModel(model)
# Fill in data
hierarchy = folder_hierarchy(info['files'])
root = populate(model, hierarchy)
rootIndex = model.indexFromItem(root)
widget.gmaFiles.resizeColumnToContents(0)
# Expand the root node
widget.gmaFiles.expand(rootIndex)
# Select root node
widget.gmaFiles.selectionModel().select(rootIndex, QItemSelectionModel.Select)
# Enable the extract button
widget.gmaExtract.setEnabled(True)
widget.gmaOpen.setEnabled(True)
def gmaSelectEdited(widget, fileName):
openGmaFile(widget, fileName, False)
def gmaSelectEditingFinished(widget):
openGmaFile(widget, widget.gmaSelect.text(), True)
def gmaSelectFile(widget):
fileName, _ = QFileDialog.getOpenFileName(None,
"Open GMA file", widget.settings.value("selectGMALastFolder", None), "GMA files (*.gma)")
if not fileName: return
folder, _ = os.path.split(fileName)
# Store last used folder location
widget.settings.setValue("selectGMALastFolder", folder)
if not fileName: return
folder, _ = os.path.split(fileName)
# Store last used folder location
widget.settings.setValue("selectGMALastFolder", folder)
widget.gmaSelect.setText(fileName)
openGmaFile(widget, fileName)
def gmaExtract(widget):
selected = widget.gmaFiles.selectedIndexes()
selectedPaths = set()
for i in selected:
if not i.model().itemFromIndex(i).filePath: continue
selectedPaths.add(i.model().itemFromIndex(i).filePath)
| |
[]
r1430_1500_tomorrow = []
r1500_1530_tomorrow = []
r1530_1600_tomorrow = []
r1600_1630_tomorrow = []
r1630_1700_tomorrow = []
r1700_1730_tomorrow = []
r1730_1800_tomorrow = []
r1800_1830_tomorrow = []
r1830_1900_tomorrow = []
r1900_1930_tomorrow = []
r1930_2000_tomorrow = []
r2000_2030_tomorrow = []
r2030_2100_tomorrow = []
r2100_2130_tomorrow = []
r2130_2200_tomorrow = []
r2200_2230_tomorrow = []
r2230_2300_tomorrow = []
r2300_2330_tomorrow = []
r2330_0000_tomorrow = []
# print request.GET
barbershop_name = barbershop_name
username = user_name
# username = request.GET['user_name']
user = User.objects.get(username=username)
barbershops = Barbershop.objects.filter(user=user)
# barbershop_name = request.GET['barbershop_name']
# barbershops = Barbershop.objects.filter(user=user)
for barbershop in barbershops:
if barbershop.name == barbershop_name:
barbershop = Barbershop.objects.get(name=barbershop_name)
now = datetime.datetime.now()
today = now.date()
yesterday = today - datetime.timedelta(1)
tomorrow = today + datetime.timedelta(1)
start_date = barbershop.start_date
end_date = barbershop.end_date
print yesterday
print today
print tomorrow
reservations = Reservations.objects.filter(barbershop=barbershop, start_date=today)
reservations_yesterday = Reservations.objects.filter(barbershop=barbershop, start_date=yesterday)
reservations_tomorrow = Reservations.objects.filter(barbershop=barbershop, start_date=tomorrow)
for reservation in reservations:
if '00:00' <= reservation.start_time < '00:30':
r0000_0030.append(reservation)
if '00:00' < reservation.end_time <= '00:30' or reservation.start_time < '00:30' < reservation.end_time:
if r0000_0030.count(reservation) == 0:
r0000_0030.append(reservation)
if '00:30' <= reservation.start_time < '01:00':
r0030_0100.append(reservation)
if '00:30' < reservation.end_time <= '01:00' or reservation.start_time < '01:00' < reservation.end_time:
if r0030_0100.count(reservation) == 0:
r0030_0100.append(reservation)
if '01:00' <= reservation.start_time < '01:30':
r0100_0130.append(reservation)
if '01:00' < reservation.end_time <= '01:30' or reservation.start_time < '01:30' < reservation.end_time:
if r0100_0130.count(reservation) == 0:
r0100_0130.append(reservation)
if '01:30' <= reservation.start_time < '02:00':
r0130_0200.append(reservation)
if '01:30' < reservation.end_time <= '02:00' or reservation.start_time < '02:00' < reservation.end_time:
if r0130_0200.count(reservation) == 0:
r0130_0200.append(reservation)
if '02:00' <= reservation.start_time < '02:30':
r0200_0230.append(reservation)
if '02:00' < reservation.end_time <= '02:30' or reservation.start_time < '02:30' < reservation.end_time:
if r0200_0230.count(reservation) == 0:
r0200_0230.append(reservation)
if '02:30' <= reservation.start_time < '03:00':
r0230_0300.append(reservation)
if '02:30' < reservation.end_time <= '03:00' or reservation.start_time < '03:00' < reservation.end_time:
if r0230_0300.count(reservation) == 0:
r0230_0300.append(reservation)
if '03:00' <= reservation.start_time < '03:30':
r0300_0330.append(reservation)
if '03:00' < reservation.end_time <= '03:30' or reservation.start_time < '03:30' < reservation.end_time:
if r0300_0330.count(reservation) == 0:
r0300_0330.append(reservation)
if '03:30' <= reservation.start_time < '04:00':
r0330_0400.append(reservation)
if '03:30' < reservation.end_time <= '04:00' or reservation.start_time < '04:00' < reservation.end_time:
if r0330_0400.count(reservation) == 0:
r0330_0400.append(reservation)
if '04:00' <= reservation.start_time < '04:30':
r0400_0430.append(reservation)
if '04:00' < reservation.end_time <= '04:30' or reservation.start_time < '04:30' < reservation.end_time:
if r0400_0430.count(reservation) == 0:
r0400_0430.append(reservation)
if '04:30' <= reservation.start_time < '05:00':
r0430_0500.append(reservation)
if '04:30' < reservation.end_time <= '05:00' or reservation.start_time < '05:00' < reservation.end_time:
if r0430_0500.count(reservation) == 0:
r0430_0500.append(reservation)
if '05:00' <= reservation.start_time < '05:30':
r0500_0530.append(reservation)
if '05:00' < reservation.end_time <= '05:30' or reservation.start_time < '05:30' < reservation.end_time:
if r0500_0530.count(reservation) == 0:
r0500_0530.append(reservation)
if '05:30' <= reservation.start_time < '06:00':
r0530_0600.append(reservation)
if '05:30' < reservation.end_time <= '06:00' or reservation.start_time < '06:00' < reservation.end_time:
if r0530_0600.count(reservation) == 0:
r0530_0600.append(reservation)
if '06:00' <= reservation.start_time < '06:30':
r0600_0630.append(reservation)
if '06:00' < reservation.end_time <= '06:30' or reservation.start_time < '06:30' < reservation.end_time:
if r0600_0630.count(reservation) == 0:
r0600_0630.append(reservation)
if '06:30' <= reservation.start_time < '07:00':
r0630_0700.append(reservation)
if '06:30' < reservation.end_time <= '07:00' or reservation.start_time < '07:00' < reservation.end_time:
if r0630_0700.count(reservation) == 0:
r0630_0700.append(reservation)
if '07:00' <= reservation.start_time < '07:30':
r0700_0730.append(reservation)
if '07:00' < reservation.end_time <= '07:30' or reservation.start_time < '07:30' < reservation.end_time:
if r0700_0730.count(reservation) == 0:
r0700_0730.append(reservation)
if '07:30' <= reservation.start_time < '08:00':
r0730_0800.append(reservation)
if '07:30' < reservation.end_time <= '08:00' or reservation.start_time < '08:00' < reservation.end_time:
if r0730_0800.count(reservation) == 0:
r0730_0800.append(reservation)
if '08:00' <= reservation.start_time < '08:30':
r0800_0830.append(reservation)
if '08:00' < reservation.end_time <= '08:30' or reservation.start_time < '08:30' < reservation.end_time:
if r0800_0830.count(reservation) == 0:
r0800_0830.append(reservation)
if '08:30' <= reservation.start_time < '09:00':
r0830_0900.append(reservation)
if '08:30' < reservation.end_time <= '09:00' or reservation.start_time < '09:00' < reservation.end_time:
if r0830_0900.count(reservation) == 0:
r0830_0900.append(reservation)
if '09:00' <= reservation.start_time < '09:30':
r0900_0930.append(reservation)
if '09:00' < reservation.end_time <= '09:30' or reservation.start_time < '09:30' < reservation.end_time:
if r0900_0930.count(reservation) == 0:
r0900_0930.append(reservation)
if '09:30' <= reservation.start_time < '10:00':
r0930_1000.append(reservation)
if '09:30' < reservation.end_time <= '10:00' or reservation.start_time < '10:00' < reservation.end_time:
if r0930_1000.count(reservation) == 0:
r0930_1000.append(reservation)
if '10:00' <= reservation.start_time < '10:30':
r1000_1030.append(reservation)
if '10:00' < reservation.end_time <= '10:30' or reservation.start_time < '10:30' < reservation.end_time:
if r1000_1030.count(reservation) == 0:
r1000_1030.append(reservation)
if '10:30' <= reservation.start_time < '11:00':
r1030_1100.append(reservation)
if '10:30' < reservation.end_time <= '11:00' or reservation.start_time < '11:00' < reservation.end_time:
if r1030_1100.count(reservation) == 0:
r1030_1100.append(reservation)
if '11:00' <= reservation.start_time < '11:30':
r1100_1130.append(reservation)
if '11:00' < reservation.end_time <= '11:30' or reservation.start_time < '11:30' < reservation.end_time:
if r1100_1130.count(reservation) == 0:
r1100_1130.append(reservation)
if '11:30' <= reservation.start_time < '12:00':
r1130_1200.append(reservation)
if '11:30' < reservation.end_time <= '12:00' or reservation.start_time < '12:00' < reservation.end_time:
if r1130_1200.count(reservation) == 0:
r1130_1200.append(reservation)
if '12:00' <= reservation.start_time < '12:30':
r1200_1230.append(reservation)
if '12:00' < reservation.end_time <= '12:30' or reservation.start_time < '12:30' < reservation.end_time:
if r1200_1230.count(reservation) == 0:
r1200_1230.append(reservation)
if '12:30' <= reservation.start_time < '13:00':
r1230_1300.append(reservation)
if '12:30' < reservation.end_time <= '13:00' or reservation.start_time < '13:00' < reservation.end_time:
if r1230_1300.count(reservation) == 0:
r1230_1300.append(reservation)
if '13:00' <= reservation.start_time < '13:30':
r1300_1330.append(reservation)
if '13:00' < reservation.end_time <= '13:30' or reservation.start_time < '13:30' < reservation.end_time:
if r1300_1330.count(reservation) == 0:
r1300_1330.append(reservation)
if '13:30' <= reservation.start_time < '14:00':
r1330_1400.append(reservation)
if '13:30' < reservation.end_time <= '14:00' or reservation.start_time < '14:00' < reservation.end_time:
if r1330_1400.count(reservation) == 0:
r1330_1400.append(reservation)
if '14:00' <= reservation.start_time < '14:30':
r1400_1430.append(reservation)
if '14:00' < reservation.end_time <= '14:30' or reservation.start_time < '14:30' < reservation.end_time:
if r1400_1430.count(reservation) == 0:
r1400_1430.append(reservation)
if '14:30' <= reservation.start_time < '15:00':
r1430_1500.append(reservation)
if '14:30' < reservation.end_time <= '15:00' or reservation.start_time < '15:00' < reservation.end_time:
if r1430_1500.count(reservation) == 0:
r1430_1500.append(reservation)
if '15:00' <= reservation.start_time < '15:30':
r1500_1530.append(reservation)
if '15:00' < reservation.end_time <= '15:30' or reservation.start_time < '15:30' < reservation.end_time:
if r1500_1530.count(reservation) == 0:
r1500_1530.append(reservation)
if '15:30' <= reservation.start_time < '16:00':
r1530_1600.append(reservation)
if '15:30' < reservation.end_time <= '16:00' or reservation.start_time < '16:00' < reservation.end_time:
if r1530_1600.count(reservation) == 0:
r1530_1600.append(reservation)
if '16:00' <= reservation.start_time < '16:30':
r1600_1630.append(reservation)
if '16:00' < reservation.end_time <= '16:30' or reservation.start_time < '16:30' < reservation.end_time:
if r1600_1630.count(reservation) == 0:
r1600_1630.append(reservation)
if '16:30' <= reservation.start_time < '17:00':
r1630_1700.append(reservation)
if '16:30' < reservation.end_time <= '17:00' or reservation.start_time < '17:00' < reservation.end_time:
if r1630_1700.count(reservation) == 0:
r1630_1700.append(reservation)
if '17:00' <= reservation.start_time < '17:30':
r1700_1730.append(reservation)
if '17:00' < reservation.end_time <= '17:30' or reservation.start_time < '17:30' < reservation.end_time:
if r1700_1730.count(reservation) == 0:
r1700_1730.append(reservation)
if '17:30' <= reservation.start_time < '18:00':
r1730_1800.append(reservation)
if '17:30' < reservation.end_time <= '18:00' or reservation.start_time < '18:00' < reservation.end_time:
if r1730_1800.count(reservation) == 0:
r1730_1800.append(reservation)
if '18:00' <= reservation.start_time < '18:30':
r1800_1830.append(reservation)
if '18:00' < reservation.end_time <= '18:30' or reservation.start_time < '18:30' < reservation.end_time:
if r1800_1830.count(reservation) == 0:
r1800_1830.append(reservation)
if '18:30' <= reservation.start_time < '19:00':
r1830_1900.append(reservation)
if '18:30' < reservation.end_time <= '19:00' or reservation.start_time < '19:00' < reservation.end_time:
if r1830_1900.count(reservation) == 0:
r1830_1900.append(reservation)
if '19:00' <= reservation.start_time < '19:30':
r1900_1930.append(reservation)
if '19:00' < reservation.end_time <= '19:30' or reservation.start_time < '19:30' < reservation.end_time:
if r1900_1930.count(reservation) == 0:
r1900_1930.append(reservation)
if '19:30' <= reservation.start_time < '20:00':
r1930_2000.append(reservation)
if '19:30' < reservation.end_time <= '20:00' or reservation.start_time < '20:00' < reservation.end_time:
if r1930_2000.count(reservation) == 0:
r1930_2000.append(reservation)
if '20:00' <= reservation.start_time < '20:30':
r2000_2030.append(reservation)
if '20:00' < reservation.end_time <= '20:30' or reservation.start_time < '20:30' < reservation.end_time:
if r2000_2030.count(reservation) == 0:
r2000_2030.append(reservation)
if '20:30' <= reservation.start_time < '21:00':
r2030_2100.append(reservation)
if '20:30' < reservation.end_time <= '21:00' or reservation.start_time < '21:00' < reservation.end_time:
if r2030_2100.count(reservation) == 0:
r2030_2100.append(reservation)
if '21:00' <= reservation.start_time < '21:30':
r2100_2130.append(reservation)
if '21:00' < reservation.end_time <= '21:30' or reservation.start_time < '21:30' < reservation.end_time:
if r2100_2130.count(reservation) == 0:
r2100_2130.append(reservation)
if '21:30' <= reservation.start_time < '22:00':
r2130_2200.append(reservation)
if '21:30' < reservation.end_time <= '22:00' or reservation.start_time < '22:00' < reservation.end_time:
if r2130_2200.count(reservation) == 0:
r2130_2200.append(reservation)
if '22:00' <= reservation.start_time < '22:30':
r2200_2230.append(reservation)
if '22:00' < reservation.end_time <= '22:30' or reservation.start_time < '22:30' < reservation.end_time:
if r2200_2230.count(reservation) == 0:
r2200_2230.append(reservation)
if '22:30' <= reservation.start_time < '23:00':
r2230_2300.append(reservation)
if '22:30' < reservation.end_time <= '23:00' or reservation.start_time < '23:00' < reservation.end_time:
if r2230_2300.count(reservation) == 0:
r2230_2300.append(reservation)
if '23:00' <= reservation.start_time < '23:30':
r2300_2330.append(reservation)
if '23:00' < reservation.end_time <= '23:30' or reservation.start_time < '23:30' < reservation.end_time:
if r2300_2330.count(reservation) == 0:
r2300_2330.append(reservation)
if '23:30' <= reservation.start_time < '00:00':
r2330_0000.append(reservation)
if '23:30' < reservation.end_time <= | |
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import time
import logging
import warnings
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from newrelic.api.application import Application, application_instance
from newrelic.api.transaction import Transaction, current_transaction
from newrelic.common.async_proxy import async_proxy, TransactionContext
from newrelic.common.encoding_utils import (obfuscate, json_encode,
decode_newrelic_header, ensure_str)
from newrelic.core.attribute import create_attributes, process_user_attribute
from newrelic.core.attribute_filter import DST_BROWSER_MONITORING, DST_NONE
from newrelic.packages import six
from newrelic.common.object_names import callable_name
from newrelic.common.object_wrapper import FunctionWrapper, wrap_object
_logger = logging.getLogger(__name__)
_js_agent_header_fragment = '<script type="text/javascript">%s</script>'
_js_agent_footer_fragment = '<script type="text/javascript">'\
'window.NREUM||(NREUM={});NREUM.info=%s</script>'
# Seconds since epoch for Jan 1 2000
JAN_1_2000 = time.mktime((2000, 1, 1, 0, 0, 0, 0, 0, 0))
MICROSECOND_MIN = JAN_1_2000 * 1000000.0
MILLISECOND_MIN = JAN_1_2000 * 1000.0
def _parse_time_stamp(time_stamp):
"""
Converts time_stamp to seconds. Input can be microseconds,
milliseconds or seconds
Divide the timestamp by the highest resolution divisor. If
the result is older than Jan 1 2000, then pick a lower
resolution divisor and repeat. It is safe to assume no
requests were queued for more than 10 years.
"""
now = time.time()
if time_stamp > MICROSECOND_MIN:
divisor = 1000000.0
elif time_stamp > MILLISECOND_MIN:
divisor = 1000.0
elif time_stamp > JAN_1_2000:
divisor = 1.0
else:
return 0.0
converted_time = time_stamp / divisor
# If queue_start is in the future, return 0.0.
if converted_time > now:
return 0.0
return converted_time
TRUE_VALUES = {'on', 'true', '1'}
FALSE_VALUES = {'off', 'false', '0'}
def _lookup_environ_setting(environ, name, default=False):
if name not in environ:
return default
flag = environ[name]
if isinstance(flag, six.string_types):
flag = flag.lower()
if flag in TRUE_VALUES:
return True
elif flag in FALSE_VALUES:
return False
return flag
def _parse_synthetics_header(header):
# Return a dictionary of values from Synthetics header
# Returns empty dict, if version is not supported.
synthetics = {}
version = None
try:
if len(header) > 0:
version = int(header[0])
if version == 1:
synthetics['version'] = version
synthetics['account_id'] = int(header[1])
synthetics['resource_id'] = header[2]
synthetics['job_id'] = header[3]
synthetics['monitor_id'] = header[4]
except Exception:
return
return synthetics
def _remove_query_string(url):
url = ensure_str(url)
out = urlparse.urlsplit(url)
return urlparse.urlunsplit((out.scheme, out.netloc, out.path, '', ''))
def _is_websocket(environ):
return environ.get('HTTP_UPGRADE', '').lower() == 'websocket'
class WebTransaction(Transaction):
unicode_error_reported = False
QUEUE_TIME_HEADERS = ('x-request-start', 'x-queue-start')
def __init__(self, application, name, group=None,
scheme=None, host=None, port=None, request_method=None,
request_path=None, query_string=None, headers=None,
enabled=None):
super(WebTransaction, self).__init__(application, enabled)
# Flags for tracking whether RUM header and footer have been
# generated.
self.rum_header_generated = False
self.rum_footer_generated = False
if not self.enabled:
return
# Inputs
self._request_uri = request_path
self._request_method = request_method
self._request_scheme = scheme
self._request_host = host
self._request_params = {}
self._request_headers = {}
try:
self._port = int(port)
except Exception:
self._port = None
# Response
self._response_headers = {}
self._response_code = None
if headers is not None:
try:
headers = headers.items()
except Exception:
pass
for k, v in headers:
k = ensure_str(k)
if k is not None:
self._request_headers[k.lower()] = v
# Capture query request string parameters, unless we're in
# High Security Mode.
if query_string and not self._settings.high_security:
query_string = ensure_str(query_string)
try:
params = urlparse.parse_qs(
query_string,
keep_blank_values=True)
self._request_params.update(params)
except Exception:
pass
self._process_queue_time()
self._process_synthetics_header()
self._process_context_headers()
if name is not None:
self.set_transaction_name(name, group, priority=1)
elif request_path is not None:
self.set_transaction_name(request_path, 'Uri', priority=1)
def _process_queue_time(self):
for queue_time_header in self.QUEUE_TIME_HEADERS:
value = self._request_headers.get(queue_time_header)
if not value:
continue
value = ensure_str(value)
try:
if value.startswith('t='):
self.queue_start = _parse_time_stamp(float(value[2:]))
else:
self.queue_start = _parse_time_stamp(float(value))
except Exception:
pass
if self.queue_start > 0.0:
break
def _process_synthetics_header(self):
# Check for Synthetics header
settings = self._settings
if settings.synthetics.enabled and \
settings.trusted_account_ids and \
settings.encoding_key:
encoded_header = self._request_headers.get('x-newrelic-synthetics')
encoded_header = encoded_header and ensure_str(encoded_header)
if not encoded_header:
return
decoded_header = decode_newrelic_header(
encoded_header,
settings.encoding_key)
synthetics = _parse_synthetics_header(decoded_header)
if synthetics and \
synthetics['account_id'] in \
settings.trusted_account_ids:
# Save obfuscated header, because we will pass it along
# unchanged in all external requests.
self.synthetics_header = encoded_header
self.synthetics_resource_id = synthetics['resource_id']
self.synthetics_job_id = synthetics['job_id']
self.synthetics_monitor_id = synthetics['monitor_id']
def _process_context_headers(self):
# Process the New Relic cross process ID header and extract
# the relevant details.
if self._settings.distributed_tracing.enabled:
self.accept_distributed_trace_headers(self._request_headers)
else:
client_cross_process_id = \
self._request_headers.get('x-newrelic-id')
txn_header = self._request_headers.get('x-newrelic-transaction')
self._process_incoming_cat_headers(client_cross_process_id,
txn_header)
def process_response(self, status_code, response_headers):
"""Processes response status and headers, extracting any
details required and returning a set of additional headers
to merge into that being returned for the web transaction.
"""
if not self.enabled:
return []
# Extract response headers
if response_headers:
try:
response_headers = response_headers.items()
except Exception:
pass
for header, value in response_headers:
header = ensure_str(header)
if header is not None:
self._response_headers[header.lower()] = value
try:
self._response_code = int(status_code)
# If response code is 304 do not insert CAT headers. See:
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
if self._response_code == 304:
return []
except Exception:
pass
if self.client_cross_process_id is None:
return []
# Generate CAT response headers
try:
read_length = int(self._request_headers.get('content-length'))
except Exception:
read_length = -1
return self._generate_response_headers(read_length)
def _update_agent_attributes(self):
if 'accept' in self._request_headers:
self._add_agent_attribute('request.headers.accept',
self._request_headers['accept'])
try:
content_length = int(self._request_headers['content-length'])
self._add_agent_attribute('request.headers.contentLength',
content_length)
except:
pass
if 'content-type' in self._request_headers:
self._add_agent_attribute('request.headers.contentType',
self._request_headers['content-type'])
if 'host' in self._request_headers:
self._add_agent_attribute('request.headers.host',
self._request_headers['host'])
if 'referer' in self._request_headers:
self._add_agent_attribute('request.headers.referer',
_remove_query_string(self._request_headers['referer']))
if 'user-agent' in self._request_headers:
self._add_agent_attribute('request.headers.userAgent',
self._request_headers['user-agent'])
if self._request_method:
self._add_agent_attribute('request.method', self._request_method)
if self._request_uri:
self._add_agent_attribute('request.uri', self._request_uri)
try:
content_length = int(self._response_headers['content-length'])
self._add_agent_attribute('response.headers.contentLength',
content_length)
except:
pass
if 'content-type' in self._response_headers:
self._add_agent_attribute('response.headers.contentType',
self._response_headers['content-type'])
if self._response_code is not None:
self._add_agent_attribute('response.status',
str(self._response_code))
return super(WebTransaction, self)._update_agent_attributes()
def browser_timing_header(self):
"""Returns the JavaScript header to be included in any HTML
response to perform real user monitoring. This function returns
the header as a native Python string. In Python 2 native strings
are stored as bytes. In Python 3 native strings are stored as
unicode.
"""
if not self.enabled:
return ''
if self._state != self.STATE_RUNNING:
return ''
if self.background_task:
return ''
if self.ignore_transaction:
return ''
if not self._settings:
return ''
if not self._settings.browser_monitoring.enabled:
return ''
if not self._settings.license_key:
return ''
# Don't return the header a second time if it has already
# been generated.
if self.rum_header_generated:
return ''
# Requirement is that the first 13 characters of the account
# license key is used as the key when obfuscating values for
# the RUM footer. Will not be able to perform the obfuscation
# if license key isn't that long for some reason.
if len(self._settings.license_key) < 13:
return ''
# Return the RUM header only if the agent received a valid value
# for js_agent_loader from the data collector. The data
# collector is not meant to send a non empty value for the
# js_agent_loader value if browser_monitoring.loader is set to
# 'none'.
if self._settings.js_agent_loader:
header = _js_agent_header_fragment % self._settings.js_agent_loader
# To avoid any issues with browser encodings, we will make sure
# that the javascript we inject for the browser agent is ASCII
# encodable. Since we obfuscate all agent and user attributes, and
# the transaction name with base 64 encoding, this will preserve
# those strings, if they have values outside of the ASCII character
# set. In the case of Python 2, we actually then use the encoded
# value as we need a native string, which for Python 2 is a byte
# string. If encoding as ASCII fails we will return an empty
# string.
try:
if six.PY2:
header = header.encode('ascii')
else:
header.encode('ascii')
except UnicodeError:
if not WebTransaction.unicode_error_reported:
_logger.error('ASCII encoding of js-agent-header failed.',
header)
WebTransaction.unicode_error_reported = True
header = ''
else:
header = ''
# We remember if we have returned a non empty string value and
# if called a second time we will not return it again. The flag
# will also be used to check whether the footer should be
# generated.
if header:
self.rum_header_generated = True
return header
def browser_timing_footer(self):
"""Returns the JavaScript footer to be included in any HTML
response to perform real user monitoring. This function returns
the footer as a native Python string. In | |
For now, will
# resort to ssh into the host and start a new "rinetd" instance each
# time a new vmattach is issued.
try :
_proc_man = ProcessManagement(username = "root", \
hostname = obj_attr_list["host_cloud_ip"], \
cloud_name = obj_attr_list["cloud_name"])
if operation == "setup" :
_cmd = "echo \"0.0.0.0 " + obj_attr_list["prov_cloud_port"] + ' '
_cmd += obj_attr_list["cloud_ip"] + " 22\" > /tmp/"
_cmd += obj_attr_list["cloud_vm_name"] + ".rinetd.conf; rinetd -c "
_cmd += "/tmp/" + obj_attr_list["cloud_vm_name"] + ".rinetd.conf"
_rexcpt = True
else:
_cmd = "sudo pkill -9 -f 'rinetd -c /tmp/" + obj_attr_list["cloud_vm_name"]
_cmd += ".rinetd.conf" + "'; sudo rm -rf /tmp/"
_cmd += obj_attr_list["cloud_vm_name"] + ".rinetd.conf"
_rexcpt = False
_msg = operation.capitalize() + " port mapping (" + obj_attr_list["prov_cloud_port"]
_msg += " -> 22) for " + obj_attr_list["name"]
_msg += " (cloud-assigned uuid " + obj_attr_list["cloud_vm_uuid"] + ") "
_msg += "running on libvirt host \"" + obj_attr_list["host_name"] + "\""
cbdebug(_msg, True)
_status, _result_stdout, _fmsg = _proc_man.run_os_command(_cmd, raise_exception = _rexcpt)
_status = 0
except ProcessManagement.ProcessManagementException as obj:
_status = obj.status
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "Error while attempting to " + operation + " port mapping for " + obj_attr_list["name"]
_msg += " (cloud-assigned uuid " + obj_attr_list["cloud_vm_uuid"] + ") "
_msg += "running on LXD host \"" + obj_attr_list["host_name"] + "\""
_msg += " in " + self.get_description() + " \"" + obj_attr_list["cloud_name"] + "\" : "
_msg += _fmsg
cberr(_msg, True)
raise CldOpsException(_msg, _status)
else :
_msg = "Successfully " + operation + " port mapping for " + obj_attr_list["name"]
_msg += " (cloud-assigned uuid " + obj_attr_list["cloud_vm_uuid"] + ") "
_msg += "running on LXD host \"" + obj_attr_list["host_name"] + "\""
_msg += " in " + self.get_description() + " \"" + obj_attr_list["cloud_name"]
_msg += "\"."
cbdebug(_msg)
return _status, _msg
def generate_libvirt_vv_template(self, obj_attr_list, boot = False) :
'''
TBD
'''
_xml_file = ""
_xml_file += "\t<volume>\n"
if boot :
obj_attr_list["cloud_vv_data_name"] = obj_attr_list["cloud_vv_name"]
obj_attr_list["cloud_vv_name"] = obj_attr_list["cloud_vv_name"].replace("-vv","-vbv")
if int(obj_attr_list["boot_volume_snapshot_size"]) > int(self.vhw_config[obj_attr_list["size"]]["vstorage"]) :
_xml_file += "\t<capacity unit=\"M\">" + str(int(obj_attr_list["boot_volume_snapshot_size"])) + "</capacity>\n"
else :
_xml_file += "\t<capacity unit=\"M\">" + str(int(self.vhw_config[obj_attr_list["size"]]["vstorage"])) + "</capacity>\n"
else :
obj_attr_list["cloud_vv_name"] = obj_attr_list["cloud_vv_data_name"]
_xml_file += "\t<capacity unit=\"G\">" + obj_attr_list["cloud_vv"] + "</capacity>\n"
_vol_name = obj_attr_list["cloud_vv_name"]
_xml_file += "\t<name>" + obj_attr_list["cloud_vv_name"] + "</name>\n"
_xml_file += "\t<target>\n"
_xml_file += "\t\t<permissions>\n"
_xml_file += "\t\t\t<mode>0777</mode>\n"
_xml_file += "\t\t</permissions>\n"
_xml_file += "\t\t<path>" + obj_attr_list["pool_path"] + "</path>\n"
if boot :
_vol_format = "qcow2"
else :
_vol_format = "raw"
obj_attr_list["cloud_vv_type"] = _vol_format
_xml_file += "\t\t<format type='" + _vol_format + "'/>\n"
_xml_file += "\t</target>\n"
if boot :
_backing_path = obj_attr_list["boot_volume_snapshot_path"]
_backing_format = obj_attr_list["boot_volume_format"]
else :
_backing_path = "none"
_backing_format = "none"
if _backing_path != "none" :
_xml_file += "\t<backingStore>\n"
_xml_file += "\t\t<path>" + _backing_path + "</path>\n"
_xml_file += "\t\t<format type='" + _backing_format + "'/>\n"
_xml_file += "\t</backingStore>\n"
_xml_file += "\t</volume>\n"
_vol_path = obj_attr_list["pool_path"] + '/' + _vol_name
obj_attr_list["volume_list"] += _vol_name + ':' + _vol_path + ':' + _vol_format + ':' + _backing_path + ':' + _backing_format + ','
return _xml_file
@trace
def generate_mac_addr(self, obj_attr_list) :
'''
This function is designed to pseudo-determinstically generate MAC addresses.
The standard 6-byte MAC address is splitup as follows:
| prefix (X bytes long) | selector byte | suffix (Y bytes long) |
For example:
1. The user sets an X-byte long 'mac_prefix' == '12:34'. This is used to
represent all experiments in a shared cluster controlled by PLMloud.
For each shared cluster, this prefix should never need to change.
This prefix is also used in the DHCP server configuration to ensure
that requests from outside VMs are not answered to VMs that do not
belong to this cluster. If there is more than one private DHCP server
in the cluster, then, this mac_prefix should be changed, otherwise not.
2. The selector byte is generated automatically to provide additional
uniqueness and predictability in the MAC address to prevent
collisions among users of the same shared cluster. It is a hash of
the username of the benchmark combined with the hostname of the VM
running the benchmark.
3. The remaining Y-byte suffix is generated at provisioning time. This is done
by having the datastore maintain a counter that represents the last used
MAC address. An increasing counter ensures that collisions never happen
but only requires a small amount of memory even when the number of Y
bytes in the suffix is very large.
'''
# Form the 1st two parts of the MAC address
_mac_prefix = "52:54:00"
bytes_needed = (17 - len(_mac_prefix)) / 3 - 1
unique_mac_selector_key = obj_attr_list["cloud_vm_name"] + obj_attr_list["experiment_id"]
selector_hd = sha256(unique_mac_selector_key.encode('utf-8')).hexdigest()
selector_pos = randint(0,len(selector_hd)-2)
selector_byte = selector_hd[selector_pos:selector_pos+2]
mac = _mac_prefix + ":" + selector_byte
for x in range(0, int(bytes_needed)) :
byte = ((int(obj_attr_list["counter"]) >> (8 * ((int(bytes_needed) - 1) - x))) & 0xff)
mac += (":%02x" % (byte))
obj_attr_list["cloud_vm_mac"] = mac.replace('-', ':')
return True
def generate_libvirt_vm_template(self, obj_attr_list) :
'''
TBD
'''
if obj_attr_list["hypervisor"] == "xen" :
_xml_template = "<domain type='xen' "
else :
_xml_template = "<domain type='kvm' "
_xml_template += ">\n"
_xml_template += "\t<name>" + str(obj_attr_list["cloud_vm_name"]) + "</name>\n"
# _xml_template += "\t<uuid>" + str(instance_attr_list["cloud_uuid"]) + "</uuid>\n"
_xml_template += "\t<memory>" + str(int(self.vhw_config[obj_attr_list["size"]]["vmem"]) * 1024) + "</memory>\n"
_xml_template += "\t<currentMemory>" + str(int(self.vhw_config[obj_attr_list["size"]]["vmem"]) * 1024) + "</currentMemory>\n"
if obj_attr_list["arch"] == "ppc64" or obj_attr_list["arch"] == "ppc64le" :
_xml_template += "\t<vcpu placement='static'>" + str(int(self.vhw_config[obj_attr_list["size"]]["vcpus"])) + "</vcpu>\n"
_xml_template += "\t<resource>\n"
_xml_template += "\t\t<partition>/machine</partition>\n"
_xml_template += "\t</resource>\n"
else :
_xml_template += "\t<vcpu>" + str(int(self.vhw_config[obj_attr_list["size"]]["vcpus"])) + "</vcpu>\n"
_xml_template += "\t<os>\n"
if obj_attr_list["hypervisor"] == "xen" :
_xml_template += "\t\t<type arch='x86_64' machine='xenfv'>hvm</type>\n"
else :
if obj_attr_list["arch"] == "ppc64" or obj_attr_list["arch"] == "ppc64le" :
_xml_template += "\t\t<type arch='ppc64' machine='pseries'>hvm</type>\n"
else :
_xml_template += "\t\t<type arch='x86_64' machine='pc'>hvm</type>\n"
if obj_attr_list["hypervisor"] == "xen" :
_xml_template += "\t\t<loader>/usr/lib/xen/boot/hvmloader</loader>\n"
_xml_template += "\t\t<boot dev='hd'/>\n"
_xml_template += "\t</os>\n"
_xml_template += "\t<features>\n"
_xml_template += "\t\t<acpi/>\n"
_xml_template += "\t\t<apic/>\n"
# _xml_template += "\t\t<pae/>\n"
_xml_template += "\t</features>\n"
_xml_template += "\t<cpu mode='host-model'>\n"
_xml_template += "\t<model fallback='allow'/>\n"
_xml_template += "\t</cpu>\n"
_xml_template += "\t<clock offset='utc'>\n"
_xml_template += "\t\t<timer name='rtc' tickpolicy='catchup'/>\n"
_xml_template += "\t\t<timer name='pit' tickpolicy='delay'/>\n"
_xml_template += "\t\t<timer name='hpet' present='no'/>\n"
_xml_template += "\t</clock>\n"
_xml_template += "\t<devices>\n"
_xml_template += "\t\t<emulator>" + obj_attr_list["emulator"] + "</emulator>\n"
_disk_number = 0
for _volume in obj_attr_list["volume_list"].split(',') + [ "cloud-init" + ':' + obj_attr_list["host_remote_dir"] + obj_attr_list["cloud_vm_name"] + ".iso:" + "raw" + ':' + "none" + ':' + "none" ] :
if _volume.count(':') == 4 :
_vol_name, _vol_path, _vol_format, _backing_path, _backing_format = _volume.split(':')
_xml_template += "\t\t<disk type='file' device='disk'>\n"
_xml_template += "\t\t\t<driver name='qemu' type='" + _vol_format + "'/>\n"
_xml_template += "\t\t\t<source file='" + _vol_path + "'/>\n"
if _backing_path != "none" :
_xml_template += "\t\t\t<backingStore type='file'>\n"
_xml_template += "\t\t\t\t<source file='" + _backing_path + "'/>\n"
_xml_template += "\t\t\t\t<format type='" + _backing_format + "'/>\n"
_xml_template += "\t\t\t</backingStore>\n"
_xml_template += "\t\t\t<target dev='"
if obj_attr_list["diskmode"] == "virtio" :
_xml_template += "v"
elif obj_attr_list["diskmode"] == "ide" :
_xml_template += "h"
elif obj_attr_list["diskmode"] == "scsi" :
_xml_template += "s"
_xml_template += "d" + chr(ord('a') + _disk_number) + "' bus='" + obj_attr_list["diskmode"] + "'/>\n"
_xml_template += "\t\t</disk>\n"
_disk_number += 1
if obj_attr_list["arch"] == "ppc64" or obj_attr_list["arch"] == "ppc64le" :
_xml_template += "\t\t<controller type='usb' index='0'>\n"
_xml_template += "\t\t\t<alias name='usb0'/>\n"
_xml_template += "\t\t</controller>\n"
_xml_template += "\t\t<controller type='pci' index='0' model='pci-root'>\n"
_xml_template += "\t\t\t<alias name='pci.0'/>\n"
_xml_template += "\t\t</controller>\n"
_xml_template += "\t\t<controller type='scsi' index='0'>\n"
_xml_template += "\t\t\t<alias name='scsi0'/>\n"
_xml_template += "\t\t\t<address type='spapr-vio' reg='0x2000'/>\n"
_xml_template += "\t\t</controller>\n"
_xml_template += "\t\t<interface type='bridge'>\n"
_xml_template += "\t\t\t<source bridge='" + obj_attr_list["network_bridge_name"] + "'/>\n"
_xml_template += "\t\t\t<mac address='" + str(obj_attr_list["cloud_vm_mac"]) + "'/>\n"
if obj_attr_list["netmode"] == "virtio" :
_xml_template += "\t\t\t<model type='virtio'/>\n"
_xml_template += "\t\t</interface>\n"
for _vnic in obj_attr_list["extra_vnics"] :
_xml_template += "\t\t<interface type='bridge'>\n"
_xml_template += "\t\t\t<source bridge='" + _vnic[1] | |
"""Tests the dependency analyzer and `dump` backend."""
from unittest import TestCase
import os
import tempfile
from plumbum import local
from .common import run_vhdeps
DIR = os.path.realpath(os.path.dirname(__file__))
class TestDump(TestCase):
"""Tests the dependency analyzer and `dump` backend."""
def test_basic(self):
"""Test basic functionality of the dump backend"""
code, out, _ = run_vhdeps('dump', '-i', DIR + '/simple/multiple-ok')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top work 2008 ' + DIR + '/simple/multiple-ok/bar_tc.vhd',
'top work 2008 ' + DIR + '/simple/multiple-ok/baz.vhd',
'top work 2008 ' + DIR + '/simple/multiple-ok/foo_tc.vhd',
]) + '\n')
def test_to_file(self):
"""Test outputting a dependency dump to a file"""
with tempfile.TemporaryDirectory() as tempdir:
code, _, _ = run_vhdeps(
'dump',
'-i', DIR + '/simple/multiple-ok',
'-o', tempdir+'/output')
self.assertEqual(code, 0)
with open(tempdir+'/output', 'r') as fildes:
self.assertEqual(fildes.read(), '\n'.join([
'top work 2008 ' + DIR + '/simple/multiple-ok/bar_tc.vhd',
'top work 2008 ' + DIR + '/simple/multiple-ok/baz.vhd',
'top work 2008 ' + DIR + '/simple/multiple-ok/foo_tc.vhd',
]) + '\n')
def test_default_include(self):
"""Test implicit working directory inclusion"""
with local.cwd(DIR + '/simple/multiple-ok'):
code, out, err = run_vhdeps('dump')
self.assertEqual(code, 0)
self.assertTrue('Including the current working directory recursively by default' in err)
self.assertEqual(out, '\n'.join([
'top work 2008 ' + DIR + '/simple/multiple-ok/bar_tc.vhd',
'top work 2008 ' + DIR + '/simple/multiple-ok/baz.vhd',
'top work 2008 ' + DIR + '/simple/multiple-ok/foo_tc.vhd',
]) + '\n')
def test_default_include_by_file(self):
"""Test including files instead of directories"""
code, out, _ = run_vhdeps(
'dump',
'-i', DIR + '/simple/multiple-ok',
'-i', DIR + '/simple/all-good/test_tc.vhd')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top work 2008 ' + DIR + '/simple/multiple-ok/bar_tc.vhd',
'top work 2008 ' + DIR + '/simple/multiple-ok/baz.vhd',
'top work 2008 ' + DIR + '/simple/multiple-ok/foo_tc.vhd',
'top work 2008 ' + DIR + '/simple/all-good/test_tc.vhd',
]) + '\n')
def test_default_include_by_glob(self):
"""Test including files using glob syntax"""
code, out, _ = run_vhdeps(
'dump',
'-i', DIR + '/simple/multiple-ok/ba*.vhd')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top work 2008 ' + DIR + '/simple/multiple-ok/bar_tc.vhd',
'top work 2008 ' + DIR + '/simple/multiple-ok/baz.vhd',
]) + '\n')
def test_default_filters(self):
"""Test the default version/mode filters"""
code, out, _ = run_vhdeps('dump', '-i', DIR + '/simple/filtering')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top work 2008 ' + DIR + '/simple/filtering/new.08.vhd',
'top work 1993 ' + DIR + '/simple/filtering/old.93.vhd',
'top work 2008 ' + DIR + '/simple/filtering/simulation.sim.vhd',
]) + '\n')
def test_fixed_version_1993(self):
"""Test the required version filter"""
code, out, _ = run_vhdeps('dump', '-i', DIR + '/simple/filtering', '-v93')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top work 1993 ' + DIR + '/simple/filtering/old.93.vhd',
'top work 1993 ' + DIR + '/simple/filtering/simulation.sim.vhd',
]) + '\n')
def test_desired_version(self):
"""Test the desired version filter"""
code, out, _ = run_vhdeps('dump', '-i', DIR + '/simple/filtering', '-d93')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top work 2008 ' + DIR + '/simple/filtering/new.08.vhd',
'top work 1993 ' + DIR + '/simple/filtering/old.93.vhd',
'top work 1993 ' + DIR + '/simple/filtering/simulation.sim.vhd',
]) + '\n')
def test_synthesis(self):
"""Test the synthesis filter"""
code, out, _ = run_vhdeps('dump', '-i', DIR + '/simple/filtering', '-msyn')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top work 2008 ' + DIR + '/simple/filtering/new.08.vhd',
'top work 1993 ' + DIR + '/simple/filtering/old.93.vhd',
'top work 2008 ' + DIR + '/simple/filtering/synthesis.syn.vhd',
]) + '\n')
def test_no_filtering(self):
"""Test all filters disabled"""
code, out, _ = run_vhdeps('dump', '-i', DIR + '/simple/filtering', '-mall')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top work 2008 ' + DIR + '/simple/filtering/new.08.vhd',
'top work 1993 ' + DIR + '/simple/filtering/old.93.vhd',
'top work 2008 ' + DIR + '/simple/filtering/simulation.sim.vhd',
'top work 2008 ' + DIR + '/simple/filtering/synthesis.syn.vhd',
]) + '\n')
def test_selected_entities(self):
"""Test toplevel entity selection"""
code, out, _ = run_vhdeps('dump', 'new', 'old', '-i', DIR + '/simple/filtering')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top work 2008 ' + DIR + '/simple/filtering/new.08.vhd',
'top work 1993 ' + DIR + '/simple/filtering/old.93.vhd',
]) + '\n')
def test_selected_entity_glob(self):
"""Test toplevel entity selection with fnmatch globs"""
code, out, _ = run_vhdeps('dump', 's*', '-i', DIR + '/simple/filtering')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top work 2008 ' + DIR + '/simple/filtering/simulation.sim.vhd',
]) + '\n')
def test_selected_entity_no_match(self):
"""Test toplevel entity selection with globs that don't match
anything"""
code, out, err = run_vhdeps('dump', 's*', 'x*', '-i', DIR + '/simple/filtering')
self.assertEqual(code, 0)
self.assertTrue('Warning: work.x* did not match anything.' in err)
self.assertEqual(out, '\n'.join([
'top work 2008 ' + DIR + '/simple/filtering/simulation.sim.vhd',
]) + '\n')
def test_conflict(self):
"""Test conflicting entities (defined in multiple files)"""
code, _, err = run_vhdeps(
'dump',
'-i', DIR + '/simple/all-good',
'-i', DIR + '/simple/timeout')
self.assertEqual(code, 1)
self.assertTrue('ResolutionError: entity work.test_tc is defined in '
'multiple, ambiguous files:' in err)
def test_ignore_pragmas(self):
"""Test ignore-use pragmas"""
code, _, _ = run_vhdeps('dump', '-i', DIR + '/complex/ignore-use')
self.assertEqual(code, 0)
def test_missing_package(self):
"""Test missing package detection/error"""
code, _, err = run_vhdeps('dump', '-i', DIR + '/complex/vhlib/util/UtilMem64_pkg.vhd')
self.assertEqual(code, 1)
self.assertTrue('complex/vhlib/util/UtilMem64_pkg.vhd' in err)
self.assertTrue('could not find package work.utilstr_pkg' in err)
def test_missing_component(self):
"""Test missing component detection/error"""
code, _, err = run_vhdeps('dump', '-i', DIR + '/complex/missing-component')
self.assertEqual(code, 1)
self.assertTrue('could not find component declaration for missing' in err)
def test_black_box_enforce(self):
"""Test black box detection/error"""
code, _, err = run_vhdeps(
'dump',
'-i', DIR + '/complex/vhlib/util',
'-i', DIR + '/complex/vhlib/stream/Stream_pkg.vhd',
'-i', DIR + '/complex/vhlib/stream/StreamBuffer.vhd')
self.assertEqual(code, 1)
self.assertTrue('complex/vhlib/stream/StreamBuffer.vhd' in err)
self.assertTrue('black box: could not find entity work.streamfifo' in err)
def test_black_box_ignore(self):
"""Test ignoring a black box through the -x flag"""
code, _, _ = run_vhdeps(
'dump',
'-i', DIR + '/complex/vhlib/util',
'-x', DIR + '/complex/vhlib/stream/Stream_pkg.vhd',
'-i', DIR + '/complex/vhlib/stream/StreamBuffer.vhd')
self.assertEqual(code, 0)
def test_missing_filtered(self):
"""Test detection of missing dependencies due to active filters"""
code, _, err = run_vhdeps('dump', '-i', DIR + '/complex/missing-filtered')
self.assertEqual(code, 1)
self.assertTrue('entity work.synth_only is defined, but only in files '
'that were filtered out:' in err)
self.assertTrue('synth_only.syn.vhd is synthesis-only' in err)
def test_libraries(self):
"""Test multiple libraries"""
code, out, _ = run_vhdeps(
'dump',
'-i', DIR + '/simple/all-good',
'-i', 'timeout:' + DIR + '/simple/timeout')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top timeout 2008 ' + DIR + '/simple/timeout/test_tc.vhd',
'top work 2008 ' + DIR + '/simple/all-good/test_tc.vhd',
]) + '\n')
def test_version_override(self):
"""Test version overrides in the include flag"""
code, out, _ = run_vhdeps(
'dump',
'-i', DIR + '/simple/all-good',
'-i', '93:timeout:' + DIR + '/simple/timeout')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top timeout 1993 ' + DIR + '/simple/timeout/test_tc.vhd',
'top work 2008 ' + DIR + '/simple/all-good/test_tc.vhd',
]) + '\n')
def test_ambiguous_08(self):
"""Test disambiguation by default desired version"""
code, out, _ = run_vhdeps('dump', '-i', DIR + '/simple/ambiguous')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top work 2008 ' + DIR + '/simple/ambiguous/test.08.sim.vhd',
]) + '\n')
def test_ambiguous_93(self):
"""Test disambiguation by specific desired version"""
code, out, _ = run_vhdeps('dump', '-i', DIR + '/simple/ambiguous', '-d', '93')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top work 1993 ' + DIR + '/simple/ambiguous/test.93.sim.vhd',
]) + '\n')
def test_ambiguous_syn(self):
"""Test disambiguation by synthesis vs. simulation mode"""
code, out, _ = run_vhdeps('dump', '-i', DIR + '/simple/ambiguous', '-m', 'syn')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top work 2008 ' + DIR + '/simple/ambiguous/test.syn.vhd',
]) + '\n')
def test_component_circle(self):
"""Test recursive instantiation using components"""
code, out, _ = run_vhdeps('dump', '-i', DIR + '/complex/component-circle')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'dep work 2008 ' + DIR + '/complex/component-circle/a.vhd',
'dep work 2008 ' + DIR + '/complex/component-circle/b.vhd',
]) + '\n')
def test_component_in_inst(self):
"""Test component keyword in instantiation"""
code, out, _ = run_vhdeps('dump', '-i', DIR + '/complex/component-in-inst')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top work 2008 ' + DIR + '/complex/component-in-inst/a.vhd',
'dep work 2008 ' + DIR + '/complex/component-in-inst/b.vhd',
]) + '\n')
def test_entity_circle(self):
"""Test the error message for a true circular dependency"""
code, _, err = run_vhdeps('dump', '-i', DIR + '/complex/entity-circle')
self.assertEqual(code, 1)
self.assertTrue('ResolutionError: circular dependency:' in err)
def test_multi_unit_circle(self):
"""Test circular dependencies caused by multiple design units per
file"""
code, _, err = run_vhdeps('dump', '-i', DIR + '/complex/multi-unit-circle')
self.assertEqual(code, 1)
self.assertTrue('ResolutionError: circular dependency:' in err)
def test_multi_unit_design(self):
"""Test dependency analysis when multiple entities are defined per
file"""
code, out, _ = run_vhdeps('dump', '-i', DIR + '/complex/multi-unit-design')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'dep work 2008 ' + DIR + '/complex/multi-unit-design/ab.vhd',
'dep work 2008 ' + DIR + '/complex/multi-unit-design/cd.vhd',
'top work 2008 ' + DIR + '/complex/multi-unit-design/test_tc.vhd',
]) + '\n')
def test_multi_tc_per_file(self):
"""Test the dump backend with multiple test cases per file"""
code, out, _ = run_vhdeps('dump', '-i', DIR + '/complex/multi-tc-per-file')
self.assertEqual(code, 0)
self.assertEqual(out, '\n'.join([
'top work 2008 ' + | |
<reponame>Kami-DiscordBot/Predeactor-Cogs
import logging
from contextlib import suppress
from datetime import datetime
from typing import Optional, Union
import discord
from redbot.core import Config, commands
from redbot.core.bot import Red
from redbot.core.utils.chat_formatting import bold, error, humanize_list
from .abc import CompositeMetaClass
from .api import Challenge
from .commands import OwnerCommands, Settings
from .errors import (
AlreadyHaveCaptchaError,
AskedForReload,
DeletedValueError,
LeftServerError,
MissingRequiredValueError,
)
from .events import Listeners
from .informations import (
__author__,
__patchnote__,
__patchnote_version__,
__version__,
)
from .utils import build_kick_embed
DEFAULT_GLOBAL = {"log_level": 50}
DEFAULT_GUILD = {
"channel": None, # The channel where the captcha is sent.
"logschannel": None, # Where logs are sent.
"enabled": False, # If challenges must be activated.
"autoroles": [], # Roles to give.
"temprole": None, # Temporary role to give.
"type": "plain", # Captcha type.
"timeout": 5, # Time in minutes before kicking.
"retry": 3, # The numnber of retry allowed.
}
log = logging.getLogger("red.predeactor.captcha")
class Captcha(
Settings,
OwnerCommands,
Listeners,
commands.Cog,
name="Captcha",
metaclass=CompositeMetaClass,
):
"""A Captcha defensive system. to challenge the new users and protect yourself a bit more of
raids."""
def __init__(self, bot: Red) -> None:
super().__init__()
self.bot: Red = bot
self.data: Config = Config.get_conf(None, identifier=495954056, cog_name="Captcha")
self.data.register_global(**DEFAULT_GLOBAL)
self.data.register_guild(**DEFAULT_GUILD)
self.running = {}
self.version = __version__
self.patchnote = __patchnote__
self.patchnoteconfig = None
async def send_or_update_log_message(
self,
guild: discord.Guild,
message_content: str,
message_to_update: Optional[discord.Message] = None,
*,
allowed_tries: tuple = None,
member: discord.Member = None,
file: discord.File = None,
embed: discord.Embed = None,
ignore_error: bool = True,
) -> Optional[discord.Message]:
"""
Send a message or update one in the log channel.
"""
time = datetime.now().strftime("%H:%M - %w/%d/%Y")
content = ""
if message_to_update:
content += message_to_update.content + "\n"
content += (
f"{bold(str(time))}{f' {member.mention}' if member else ''}"
f"{f' ({allowed_tries[0]}/{allowed_tries[1]})' if allowed_tries else ''}: "
f"{message_content}"
)
log_channel_id: Union[int, None] = await self.data.guild(guild).logschannel()
if not log_channel_id:
if ignore_error:
return None
raise MissingRequiredValueError("Missing logging channel ID.")
log_channel: discord.TextChannel = self.bot.get_channel(log_channel_id)
if log_channel and message_to_update:
try:
await message_to_update.edit(
content=content,
file=file,
embed=embed,
allowed_mentions=discord.AllowedMentions(users=False),
)
except discord.HTTPException:
if message_to_update.embeds and (
message_to_update.embeds[0].title == "Message reached his maximum capacity!"
):
# To avoid edit spam or something... smh
return message_to_update
await message_to_update.edit(
content=message_to_update.content,
file=file,
embed=discord.Embed(
colour=discord.Colour.red().value,
title="Message reached his maximum capacity!",
description=(
"I am unable to log more since the characters limit on this "
"message has been reached."
),
),
allowed_mentions=discord.AllowedMentions(users=False),
)
return message_to_update
if log_channel:
return await log_channel.send(
content,
file=file,
embed=embed,
allowed_mentions=discord.AllowedMentions(users=False),
)
raise DeletedValueError("Logging channel may have been deleted.")
async def basic_check(self, member: discord.Member) -> bool:
"""
Check the basis from a member; used when a member join the server.
"""
if member.bot:
return False
if await self.bot.cog_disabled_in_guild(self, member.guild):
return False
return await self.data.guild(member.guild).enabled()
async def create_challenge_for(self, member: discord.Member) -> Challenge:
"""
Create a Challenge class for an user and append it to the running challenges.
"""
if member.id in self.running:
raise AlreadyHaveCaptchaError("The user already have a captcha object running.")
captcha = Challenge(self.bot, member, await self.data.guild(member.guild).all())
self.running[member.id] = captcha
return captcha
async def delete_challenge_for(self, member: discord.Member) -> bool:
try:
del self.running[member.id]
return True
except KeyError:
return False
def is_running_challenge(self, member_or_id: Union[discord.Member, int]) -> bool:
if isinstance(member_or_id, discord.Member):
member_or_id = int(member_or_id.id)
return member_or_id in self.running
def obtain_challenge(self, member_or_id: Union[discord.Member, int]) -> Challenge:
if isinstance(member_or_id, discord.Member):
member_or_id = int(member_or_id.id)
if not self.is_running_challenge(member_or_id):
raise KeyError("User is not challenging any Captcha.")
return self.running[member_or_id]
async def give_temprole(self, challenge: Challenge) -> None:
temprole = challenge.config["temprole"]
if temprole:
try:
await challenge.member.add_roles(
challenge.guild.get_role(temprole), reason="Beginning Captcha challenge."
)
except discord.Forbidden:
raise PermissionError('Bot miss the "manage_roles" permission.')
async def remove_temprole(self, challenge: Challenge) -> None:
temprole = challenge.config["temprole"]
if temprole:
try:
await challenge.member.remove_roles(
challenge.guild.get_role(temprole), reason="Finishing Captcha challenge."
)
except discord.Forbidden:
raise PermissionError('Bot miss the "manage_roles" permission.')
async def realize_challenge(self, challenge: Challenge) -> bool:
# Seems to be the last goddamn function I'll be writing...
limit = await self.data.guild(challenge.member.guild).retry()
is_ok = None
timeout = False
await self.give_temprole(challenge)
try:
while is_ok is not True:
if challenge.trynum > limit:
break
try:
this = await challenge.try_challenging()
except TimeoutError:
timeout = True
break
except AskedForReload:
challenge.trynum += 1
continue
except LeftServerError:
return False
except TypeError:
# In this error, the user reacted with an invalid (Most probably custom)
# emoji. While I expect administrator to remove this permissions, I still
# need to handle, so we're fine if we don't increase trynum.
continue
if this is False:
challenge.trynum += 1
try:
await challenge.messages["answer"].delete()
except discord.Forbidden:
await self.send_or_update_log_message(
challenge.guild,
error(bold("Unable to delete member's answer.")),
challenge.messages.get("logs"),
member=challenge.member,
)
is_ok = False
else:
is_ok = True
failed = challenge.trynum > limit
logmsg = challenge.messages["logs"]
if failed or timeout:
reason = (
"Retried the captcha too many time."
if failed
else "Didn't answer to the challenge."
)
try:
await self.nicely_kick_user_from_challenge(challenge, reason)
await self.send_or_update_log_message(
challenge.guild,
bold(f"User kicked for reason: {reason}"),
logmsg,
member=challenge.member,
)
except PermissionError:
await self.send_or_update_log_message(
challenge.guild,
error(bold("Permission missing for kicking member!")),
logmsg,
member=challenge.member,
)
return True
roles = [
challenge.guild.get_role(role)
for role in await self.data.guild(challenge.guild).autoroles()
]
try:
await self.congratulation(challenge, roles)
await self.remove_temprole(challenge)
await self.send_or_update_log_message(
challenge.guild,
bold("Roles added, Captcha passed."),
logmsg,
member=challenge.member,
)
except PermissionError:
roles_name = [role.name for role in roles]
try:
await challenge.member.send(
f"Please contact the administrator of {challenge.guild.name} for obtaining "
"access of the server, I was unable to add you the roles on the server.\nYou "
f"should have obtained the following roles: "
f"{humanize_list(roles_name) if roles_name else 'None.'}"
)
except discord.Forbidden:
await challenge.channel.send(
challenge.member.mention
+ ": "
+ f"Please contact the administrator of {challenge.guild.name} for obtaining "
"access of the server, I was unable to add you the roles on the server.\nYou "
f"should have obtained the following roles: "
f"{humanize_list(roles_name) if roles_name else 'None.'}",
delete_after=10,
)
await self.send_or_update_log_message(
challenge.guild,
error(bold("Permission missing for giving roles! Member alerted.")),
logmsg,
member=challenge.member,
)
finally:
try:
await challenge.cleanup_messages()
except PermissionError:
await self.send_or_update_log_message(
challenge.guild,
error(bold("Missing permissions for deleting all messages for verification!")),
challenge.messages.get("logs"),
member=challenge.member,
)
return True
async def congratulation(self, challenge: Challenge, roles: list) -> None:
"""
Congrats to a member! He finished the captcha!
"""
# Admin may have set channel to be DM, checking for manage_roles is useless since
# it always return False, instead, we're taking a random text channel of the guild
# to check our permission for kicking.
channel = (
challenge.channel
if not isinstance(challenge.channel, discord.DMChannel)
else challenge.guild.text_channels[0]
)
if not channel.permissions_for(self.bot.get_guild(challenge.guild.id).me).manage_roles:
raise PermissionError('Bot miss the "manage_roles" permission.')
await challenge.member.add_roles(*roles, reason="Passed Captcha successfully.")
async def nicely_kick_user_from_challenge(self, challenge: Challenge, reason: str) -> bool:
# We're gonna check our permission first, to avoid DMing the user for nothing.
# Admin may have set channel to be DM, checking for kick_members is useless since
# it always return False, instead, we're taking a random text channel of the guild
# to check our permission for kicking.
channel = (
challenge.channel
if not isinstance(challenge.channel, discord.DMChannel)
else challenge.guild.text_channels[0]
)
if not channel.permissions_for(self.bot.get_guild(challenge.guild.id).me).kick_members:
raise PermissionError('Bot miss the "kick_members" permission.')
with suppress(discord.Forbidden, discord.HTTPException):
await challenge.member.send(embed=build_kick_embed(challenge.guild, reason))
try:
await challenge.guild.kick(challenge.member, reason=reason)
except discord.Forbidden:
raise PermissionError("Unable to kick member.")
return True
# PLEASE DON'T TOUCH THOSE FUNCTIONS WITH YOUR COG OR EVAL. Thanks. - Pred
# Those should only be used by the cog - 4 bags of None of your business.
def format_help_for_context(self, ctx: commands.Context) -> str:
"""
This will put some text at the top of the main help. ([p]help Captcha)
Thank to Sinbad.
"""
pre_processed = super().format_help_for_context(ctx)
return "{pre_processed}\n\nAuthor: {authors}\nVersion: {version}".format(
pre_processed=pre_processed,
authors=humanize_list(__author__),
version=self.version,
)
async def _initialize(self, send_patchnote: bool = True) -> None:
"""
An initializer for the cog.
It just set the logging level and send the patchnote if asked.
"""
log_level = await self.data.log_level()
log.setLevel(log_level)
log.info("Captcha logging level has been set to: {lev}".format(lev=log_level))
log.debug(
"This logging level is reserved for testing and monitoring purpose, set the "
"level to 2 if you prefer to be alerted by less minor events or doesn't want to help "
"debugging this cog."
)
if send_patchnote:
await self._send_patchnote()
async def _send_patchnote(self) -> None:
await self.bot.wait_until_red_ready()
self.patchnoteconfig = notice = Config.get_conf(
None,
identifier=4145125452,
cog_name="PredeactorNews",
)
notice.register_user(version="0")
async with notice.get_users_lock():
old_patchnote_version: str = await notice.user(self.bot.user).version()
if old_patchnote_version != __patchnote_version__:
log.info("New version of patchnote detected! Delivering... (¬‿¬ | |
underworld_graph = {
992: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(75,61)",
"elevation": 0,
"w": 966
},
966: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(74,61)",
"elevation": 0,
"e": 992,
"w": 960
},
960: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(73,61)",
"elevation": 0,
"e": 966,
"w": 956
},
956: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(72,61)",
"elevation": 0,
"e": 960,
"w": 902
},
902: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(71,61)",
"elevation": 0,
"e": 956,
"w": 874
},
874: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(70,61)",
"elevation": 0,
"e": 902,
"w": 762
},
762: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(69,61)",
"elevation": 0,
"e": 874,
"w": 728
},
728: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(68,61)",
"elevation": 0,
"n": 741,
"e": 762,
"w": 724
},
741: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(68,62)",
"elevation": 0,
"s": 728,
"e": 793
},
793: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(69,62)",
"elevation": 0,
"n": 808,
"e": 901,
"w": 741
},
808: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(69,63)",
"elevation": 0,
"n": 821,
"s": 793,
"e": 920
},
821: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(69,64)",
"elevation": 0,
"n": 974,
"s": 808,
"e": 953
},
974: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(69,65)",
"elevation": 0,
"s": 821
},
953: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(70,64)",
"elevation": 0,
"w": 821
},
920: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(70,63)",
"elevation": 0,
"e": 946,
"w": 808
},
946: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(71,63)",
"elevation": 0,
"w": 920
},
901: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(70,62)",
"elevation": 0,
"w": 793
},
724: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(67,61)",
"elevation": 0,
"n": 737,
"s": 748,
"e": 728,
"w": 711
},
737: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(67,62)",
"elevation": 0,
"n": 756,
"s": 724
},
756: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(67,63)",
"elevation": 0,
"s": 737,
"e": 868
},
868: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(68,63)",
"elevation": 0,
"n": 885,
"w": 756
},
885: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(68,64)",
"elevation": 0,
"s": 868
},
748: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(67,60)",
"elevation": 0,
"n": 724,
"s": 772,
"e": 764
},
772: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(67,59)",
"elevation": 0,
"n": 748,
"s": 780
},
780: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(67,58)",
"elevation": 0,
"n": 772,
"s": 818
},
818: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(67,57)",
"elevation": 0,
"n": 780,
"s": 877,
"e": 829
},
877: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(67,56)",
"elevation": 0,
"n": 818,
"s": 997,
"e": 937
},
997: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(67,55)",
"elevation": 0,
"n": 877
},
937: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(68,56)",
"elevation": 0,
"w": 877
},
829: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(68,57)",
"elevation": 0,
"e": 912,
"w": 818
},
912: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(69,57)",
"elevation": 0,
"w": 829
},
764: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(68,60)",
"elevation": 0,
"s": 769,
"e": 848,
"w": 748
},
769: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(68,59)",
"elevation": 0,
"n": 764,
"s": 799,
"e": 847
},
799: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(68,58)",
"elevation": 0,
"n": 769,
"e": 908
},
908: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(69,58)",
"elevation": 0,
"w": 799
},
847: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(69,59)",
"elevation": 0,
"w": 769
},
848: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(69,60)",
"elevation": 0,
"e": 853,
"w": 764
},
853: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(70,60)",
"elevation": 0,
"s": 958,
"e": 939,
"w": 848
},
958: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(70,59)",
"elevation": 0,
"n": 853,
"s": 972
},
972: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(70,58)",
"elevation": 0,
"n": 958
},
939: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(71,60)",
"elevation": 0,
"w": 853
},
711: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(66,61)",
"elevation": 0,
"n": 721,
"e": 724,
"w": 633
},
721: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(66,62)",
"elevation": 0,
"s": 711
},
633: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(65,61)",
"elevation": 0,
"e": 711,
"w": 623
},
623: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(64,61)",
"elevation": 0,
"n": 609,
"e": 633
},
609: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(64,62)",
"elevation": 0,
"n": 603,
"s": 623,
"e": 652
},
603: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(64,63)",
"elevation": 0,
"n": 618,
"s": 609,
"w": 520
},
618: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(64,64)",
"elevation": 0,
"s": 603,
"e": 631
},
631: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(65,64)",
"elevation": 0,
"s": 646,
"w": 618
},
646: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(65,63)",
"elevation": 0,
"n": 631,
"e": 662
},
662: {
"title": "Darkness",
"description": "You are standing on grass and surrounded by darkness.",
"terrain": "NORMAL",
"coordinates": "(66,63)",
"elevation": 0,
"n": 675,
"w": 646
},
| |
<filename>libs/html5lib/filters/optionaltags.py
import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody','thead','tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == | |
<reponame>ozdanborne/felix
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
# Copyright 2015 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_frules
~~~~~~~~~~~~~~~~~~~~~~~~~
Tests of iptables rules generation function.
"""
import logging
from mock import Mock, patch, call
from netaddr import IPAddress
from calico.felix import frules
from calico.felix.fiptables import IptablesUpdater
from calico.felix.futils import FailedSystemCall, IPV4
from calico.felix.test.base import BaseTestCase, load_config
_log = logging.getLogger(__name__)
EXPECTED_TOP_LEVEL_DEPS = {
'felix-INPUT': set(['felix-FROM-ENDPOINT', 'felix-FROM-HOST-IF']),
'felix-OUTPUT': set(['felix-TO-HOST-IF']),
'felix-FORWARD': set(['felix-FROM-ENDPOINT', 'felix-TO-ENDPOINT']),
'felix-FAILSAFE-IN': set(), 'felix-FAILSAFE-OUT': set()
}
class TestRules(BaseTestCase):
@patch("calico.felix.futils.check_call", autospec=True)
@patch("calico.felix.frules.devices", autospec=True)
@patch("calico.felix.frules.HOSTS_IPSET_V4", autospec=True)
def test_install_global_rules(self, m_ipset, m_devices, m_check_call):
m_devices.interface_exists.return_value = False
m_devices.interface_up.return_value = False
m_set_ips = m_devices.set_interface_ips
env_dict = {
"FELIX_ETCDADDR": "localhost:4001",
"FELIX_HOSTNAME": "myhost",
"FELIX_INTERFACEPREFIX": "tap",
"FELIX_METADATAADDR": "192.168.127.12",
"FELIX_METADATAPORT": "1234",
"FELIX_IPINIPENABLED": "True",
"FELIX_IPINIPMTU": "1480",
"FELIX_DEFAULTENDPOINTTOHOSTACTION": "RETURN"
}
config = load_config("felix_missing.cfg", env_dict=env_dict)
config.IP_IN_IP_ADDR = IPAddress("10.0.0.1")
m_v4_upd = Mock(spec=IptablesUpdater)
m_v6_upd = Mock(spec=IptablesUpdater)
m_v6_raw_upd = Mock(spec=IptablesUpdater)
m_v4_nat_upd = Mock(spec=IptablesUpdater)
m_v6_nat_upd = Mock(spec=IptablesUpdater)
frules.install_global_rules(config, m_v4_upd, m_v4_nat_upd,
ip_version=4)
frules.install_global_rules(config, m_v6_upd, m_v6_nat_upd,
ip_version=6, raw_updater=m_v6_raw_upd)
self.assertEqual(
m_v4_nat_upd.ensure_rule_inserted.mock_calls,
[
call("POSTROUTING --out-interface tunl0 "
"-m addrtype ! --src-type LOCAL --limit-iface-out "
"-m addrtype --src-type LOCAL "
"-j MASQUERADE",
async=False),
call("PREROUTING --jump felix-PREROUTING", async=False),
call("POSTROUTING --jump felix-POSTROUTING", async=False)
]
)
m_v4_upd.ensure_rule_inserted.assert_has_calls([
call("INPUT --jump felix-INPUT", async=False),
call("OUTPUT --jump felix-OUTPUT", async=False),
call("FORWARD --jump felix-FORWARD", async=False)
]
)
expected_chains = {
'felix-FIP-DNAT': [],
'felix-FIP-SNAT': [],
'felix-PREROUTING': [
'--append felix-PREROUTING --jump felix-FIP-DNAT',
'--append felix-PREROUTING --protocol tcp --dport 80 --destination '
'169.254.169.254/32 --jump DNAT --to-destination 192.168.127.12:1234'
],
'felix-POSTROUTING': [
'--append felix-POSTROUTING --jump felix-FIP-SNAT'
]
}
m_v4_nat_upd.rewrite_chains.assert_called_once_with(
expected_chains,
{'felix-PREROUTING': set(['felix-FIP-DNAT']),
'felix-POSTROUTING': set(['felix-FIP-SNAT'])},
async=False
)
expected_chains = {
'felix-INPUT': [
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --jump MARK --set-mark 0/0x4000000',
'--append felix-INPUT --in-interface tap+ --jump MARK --set-mark 0x4000000/0x4000000',
'--append felix-INPUT --goto felix-FROM-HOST-IF --match mark --mark 0/0x4000000',
'--append felix-INPUT --protocol tcp --destination 192.168.127.12 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 68 --dport 67 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump felix-FROM-ENDPOINT'
],
'felix-OUTPUT': [
'--append felix-OUTPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-OUTPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-OUTPUT --jump MARK --set-mark 0/0x4000000',
'--append felix-OUTPUT --out-interface tap+ --jump MARK --set-mark 0x4000000/0x4000000',
'--append felix-OUTPUT --goto felix-TO-HOST-IF --match mark --mark 0/0x4000000',
],
'felix-FORWARD': [
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-FORWARD --jump felix-FROM-ENDPOINT --in-interface tap+',
'--append felix-FORWARD --jump felix-TO-ENDPOINT --out-interface tap+',
'--append felix-FORWARD --jump ACCEPT --in-interface tap+',
'--append felix-FORWARD --jump ACCEPT --out-interface tap+'
],
'felix-FAILSAFE-IN': [
'--append felix-FAILSAFE-IN --protocol tcp --dport 22 --jump ACCEPT'
],
'felix-FAILSAFE-OUT': [
'--append felix-FAILSAFE-OUT --protocol tcp --dport 2379 --jump ACCEPT',
'--append felix-FAILSAFE-OUT --protocol tcp --dport 2380 --jump ACCEPT',
'--append felix-FAILSAFE-OUT --protocol tcp --dport 4001 --jump ACCEPT',
'--append felix-FAILSAFE-OUT --protocol tcp --dport 7001 --jump ACCEPT'
]
}
m_v4_upd.rewrite_chains.assert_called_once_with(
expected_chains,
EXPECTED_TOP_LEVEL_DEPS,
async=False
)
self.assertEqual(
m_v6_nat_upd.ensure_rule_inserted.mock_calls,
[
call("PREROUTING --jump felix-PREROUTING", async=False),
call("POSTROUTING --jump felix-POSTROUTING", async=False),
]
)
m_v6_upd.ensure_rule_inserted.assert_has_calls([
call("INPUT --jump felix-INPUT", async=False),
call("OUTPUT --jump felix-OUTPUT", async=False),
call("FORWARD --jump felix-FORWARD", async=False)
]
)
expected_chains = {
'felix-FIP-DNAT': [],
'felix-FIP-SNAT': [],
'felix-PREROUTING': [
'--append felix-PREROUTING --jump felix-FIP-DNAT'
],
'felix-POSTROUTING': [
'--append felix-POSTROUTING --jump felix-FIP-SNAT'
]
}
m_v6_nat_upd.rewrite_chains.assert_called_once_with(
expected_chains, {
'felix-PREROUTING': set(['felix-FIP-DNAT']),
'felix-POSTROUTING': set(['felix-FIP-SNAT'])
}, async=False
)
m_v6_raw_upd.rewrite_chains.assert_called_once_with(
{'felix-PREROUTING': [
'--append felix-PREROUTING --jump DROP -m comment '
'--comment "IPv6 rpfilter failed"'
]},
{
'felix-PREROUTING': {}
},
async=False
)
m_ipset.ensure_exists.assert_called_once_with()
self.assertEqual(
m_check_call.mock_calls,
[
call(["ip", "tunnel", "add", "tunl0", "mode", "ipip"]),
call(["ip", "link", "set", "tunl0", "mtu", "1480"]),
call(["ip", "link", "set", "tunl0", "up"]),
]
)
self.assertEqual(
m_set_ips.mock_calls,
[call(IPV4, "tunl0", set([IPAddress("10.0.0.1")]))]
)
@patch("calico.felix.futils.check_call", autospec=True)
@patch("calico.felix.frules.devices", autospec=True)
@patch("calico.felix.frules.HOSTS_IPSET_V4", autospec=True)
def test_install_global_ipip_disabled(self, m_ipset, m_devices, m_check_call):
m_devices.interface_exists.return_value = False
m_devices.interface_up.return_value = False
m_set_ips = m_devices.set_interface_ips
env_dict = {
"FELIX_ETCDADDR": "localhost:4001",
"FELIX_HOSTNAME": "myhost",
"FELIX_INTERFACEPREFIX": "tap",
"FELIX_METADATAADDR": "192.168.127.12",
"FELIX_METADATAPORT": "1234",
"FELIX_IPINIPENABLED": "false",
"FELIX_IPINIPMTU": "1480",
"FELIX_DEFAULTENDPOINTTOHOSTACTION": "RETURN"
}
config = load_config("felix_missing.cfg", env_dict=env_dict)
m_v4_upd = Mock(spec=IptablesUpdater)
m_v6_upd = Mock(spec=IptablesUpdater)
m_v6_raw_upd = Mock(spec=IptablesUpdater)
m_v6_nat_upd = Mock(spec=IptablesUpdater)
m_v4_nat_upd = Mock(spec=IptablesUpdater)
frules.install_global_rules(config, m_v4_upd, m_v4_nat_upd,
ip_version=4)
frules.install_global_rules(config, m_v6_upd, m_v6_nat_upd,
ip_version=6, raw_updater=m_v6_raw_upd)
self.assertEqual(
m_v4_nat_upd.ensure_rule_inserted.mock_calls,
[call("PREROUTING --jump felix-PREROUTING", async=False),
call("POSTROUTING --jump felix-POSTROUTING", async=False)]
)
m_v4_upd.ensure_rule_inserted.assert_has_calls([
call("INPUT --jump felix-INPUT", async=False),
call("OUTPUT --jump felix-OUTPUT", async=False),
call("FORWARD --jump felix-FORWARD", async=False)
]
)
self.assertEqual(
m_v4_nat_upd.ensure_rule_removed.mock_calls,
[call("POSTROUTING --out-interface tunl0 "
"-m addrtype ! --src-type LOCAL --limit-iface-out "
"-m addrtype --src-type LOCAL "
"-j MASQUERADE",
async=False)]
)
m_v6_raw_upd.ensure_rule_inserted.assert_called_once_with(
'PREROUTING --in-interface tap+ --match rpfilter --invert --jump '
'felix-PREROUTING',
async=False,
)
m_v6_raw_upd.rewrite_chains.assert_called_once_with(
{'felix-PREROUTING': [
'--append felix-PREROUTING --jump DROP -m comment '
'--comment "IPv6 rpfilter failed"'
]},
{
'felix-PREROUTING': {}
},
async=False
)
self.assertFalse(m_ipset.ensure_exists.called)
self.assertFalse(m_check_call.called)
self.assertFalse(m_set_ips.called)
expected_chains = {
'felix-FIP-DNAT': [],
'felix-FIP-SNAT': [],
'felix-PREROUTING': [
'--append felix-PREROUTING --jump felix-FIP-DNAT',
'--append felix-PREROUTING --protocol tcp --dport 80 --destination '
'169.254.169.254/32 --jump DNAT --to-destination 192.168.127.12:1234'
],
'felix-POSTROUTING': [
'--append felix-POSTROUTING --jump felix-FIP-SNAT'
]
}
m_v4_nat_upd.rewrite_chains.assert_called_once_with(
expected_chains,
{'felix-PREROUTING': set(['felix-FIP-DNAT']),
'felix-POSTROUTING': set(['felix-FIP-SNAT'])},
async=False
)
expected_chains = {
'felix-INPUT': [
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --jump MARK --set-mark 0/0x4000000',
'--append felix-INPUT --in-interface tap+ --jump MARK --set-mark 0x4000000/0x4000000',
'--append felix-INPUT --goto felix-FROM-HOST-IF --match mark --mark 0/0x4000000',
'--append felix-INPUT --protocol tcp --destination 192.168.127.12 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 68 --dport 67 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump felix-FROM-ENDPOINT'
],
'felix-OUTPUT': [
'--append felix-OUTPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-OUTPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-OUTPUT --jump MARK --set-mark 0/0x4000000',
'--append felix-OUTPUT --out-interface tap+ --jump MARK --set-mark 0x4000000/0x4000000',
'--append felix-OUTPUT --goto felix-TO-HOST-IF --match mark --mark 0/0x4000000',
],
'felix-FORWARD': [
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-FORWARD --jump felix-FROM-ENDPOINT --in-interface tap+',
'--append felix-FORWARD --jump felix-TO-ENDPOINT --out-interface tap+',
'--append felix-FORWARD --jump ACCEPT --in-interface tap+',
'--append felix-FORWARD --jump ACCEPT --out-interface tap+'
],
'felix-FAILSAFE-IN': [
'--append felix-FAILSAFE-IN --protocol tcp --dport 22 --jump ACCEPT'
],
'felix-FAILSAFE-OUT': [
'--append felix-FAILSAFE-OUT --protocol tcp --dport 2379 --jump ACCEPT',
'--append felix-FAILSAFE-OUT --protocol tcp --dport 2380 --jump ACCEPT',
'--append felix-FAILSAFE-OUT --protocol tcp --dport 4001 --jump ACCEPT',
'--append felix-FAILSAFE-OUT --protocol tcp --dport 7001 --jump ACCEPT'
]
}
m_v4_upd.rewrite_chains.assert_called_once_with(
expected_chains,
EXPECTED_TOP_LEVEL_DEPS,
async=False
)
@patch("calico.felix.futils.check_call", autospec=True)
@patch("calico.felix.frules.devices", autospec=True)
@patch("calico.felix.frules.HOSTS_IPSET_V4", autospec=True)
def test_install_global_no_ipv6(self, m_ipset, m_devices, m_check_call):
m_devices.interface_exists.return_value = False
m_devices.interface_up.return_value = False
m_set_ips = m_devices.set_interface_ips
env_dict = {
"FELIX_ETCDADDR": "localhost:4001",
"FELIX_HOSTNAME": "myhost",
"FELIX_INTERFACEPREFIX": "tap",
"FELIX_METADATAADDR": "192.168.127.12",
"FELIX_METADATAPORT": "1234",
"FELIX_IPINIPENABLED": "false",
"FELIX_IPINIPMTU": "1480",
"FELIX_DEFAULTENDPOINTTOHOSTACTION": "RETURN"
}
config = load_config("felix_missing.cfg", env_dict=env_dict)
m_v4_upd = Mock(spec=IptablesUpdater)
m_v4_nat_upd = Mock(spec=IptablesUpdater)
frules.install_global_rules(config, m_v4_upd, m_v4_nat_upd,
ip_version=4)
self.assertEqual(
m_v4_nat_upd.ensure_rule_inserted.mock_calls,
[call("PREROUTING --jump felix-PREROUTING", async=False),
call("POSTROUTING --jump felix-POSTROUTING", async=False)]
)
m_v4_upd.ensure_rule_inserted.assert_has_calls([
call("INPUT --jump felix-INPUT", async=False),
call("OUTPUT --jump felix-OUTPUT", async=False),
call("FORWARD --jump felix-FORWARD", async=False)
]
)
self.assertEqual(
m_v4_nat_upd.ensure_rule_removed.mock_calls,
[call("POSTROUTING --out-interface tunl0 "
"-m addrtype ! --src-type LOCAL --limit-iface-out "
"-m addrtype --src-type LOCAL "
"-j MASQUERADE",
async=False)]
)
self.assertFalse(m_ipset.ensure_exists.called)
self.assertFalse(m_check_call.called)
self.assertFalse(m_set_ips.called)
expected_chains = {
'felix-FIP-DNAT': [],
'felix-FIP-SNAT': [],
'felix-PREROUTING': [
'--append felix-PREROUTING --jump felix-FIP-DNAT',
'--append felix-PREROUTING --protocol tcp --dport 80 --destination '
'169.254.169.254/32 --jump DNAT --to-destination 192.168.127.12:1234'
],
'felix-POSTROUTING': [
'--append felix-POSTROUTING --jump felix-FIP-SNAT'
]
}
m_v4_nat_upd.rewrite_chains.assert_called_once_with(
expected_chains,
{'felix-PREROUTING': set(['felix-FIP-DNAT']),
'felix-POSTROUTING': set(['felix-FIP-SNAT'])},
async=False
)
expected_chains = {
'felix-INPUT': [
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --jump MARK --set-mark 0/0x4000000',
'--append felix-INPUT --in-interface tap+ --jump MARK --set-mark 0x4000000/0x4000000',
'--append felix-INPUT --goto felix-FROM-HOST-IF --match mark --mark 0/0x4000000',
'--append felix-INPUT --protocol tcp --destination 192.168.127.12 --dport 1234 --jump ACCEPT',
'--append | |
Size of population 2 after split.
b1: Population growth coefficient of population 1
b2: Population growth coefficient of population 2
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
Tam: The scaled time between the split and the end of ancient migration (in units of 2*Na generations).
Ts: The scaled time between the end of ancient migration and present.
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
# phi for the equilibrium ancestral population
phi = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phi = dadi.Integration.one_pop(phi, xx, Tp, nu=nuA)
# Now do the divergence event
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
# We start the population size change after the split independantly in each population and set the migration rate to m12 and m21
bnu1_func = lambda t: nu1 * b1**(t/Tam)
bnu2_func = lambda t: nu2 * b2**(t/Tam)
phi = dadi.Integration.two_pops(phi, xx, Tam, bnu1_func, bnu2_func, m12=m12, m21=m21)
# We continue the population size change after ancient migration (until present) independantly in each population and set the migration rates to zero
bnu1_func = lambda t: nu1 * b1**(t/Ts)
bnu2_func = lambda t: nu2 * b2**(t/Ts)
phi = dadi.Integration.two_pops(phi, xx, Ts, bnu1_func, bnu2_func, m12=0, m21=0)
###
## Finally, calculate the spectrum.
fs = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,xx))
return fs
def AMA2N(params, (n1,n2), pts):
nuA, nu1, nu2, hrf, m12, m21, Tam, Tp, Ts, Q = params
"""
Model with split, ancient migration, heterogenous effective population size (with 2 classes of loci shared by the two populations = Hill-Robertson effects)
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
hrf: Hill-Robertson factor, i.e. the degree to which Ne is locally reduced due to the effects of background selection and selective sweep effects
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
Tam: The scaled time between the split and the end of ancient migration.
Ts: The scaled time between the end of ancient migration and present (in units of 2*Na generations).
Q: The proportion of the genome with a reduced effective size due to selection at linked sites
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
#### Calculate the pectrum in normally-recombining regions
# phi for the equilibrium ancestral population
phinr = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phinr = dadi.Integration.one_pop(phinr, xx, Tp, nu=nuA)
# Now do the divergence event
phinr = dadi.PhiManip.phi_1D_to_2D(xx, phinr)
# We set the population sizes after the split to nu1 and nu2 and the migration rates to m12 and m21
phinr = dadi.Integration.two_pops(phinr, xx, Tam, nu1, nu2, m12=m12, m21=m21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phinr = dadi.Integration.two_pops(phinr, xx, Ts, nu1, nu2, m12=0, m21=0)
## calculate the spectrum.
fsnr = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
#### Spectrum of low-recombining regions
# phi for the equilibrium ancestral population
philr = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
philr = dadi.Integration.one_pop(philr, xx, Tp, nu=nuA)
# Now do the divergence event
philr = dadi.PhiManip.phi_1D_to_2D(xx, philr)
# We set the population sizes after the split to hrf*nu1 and hrf*nu2 and the migration rates to m12 and m21
philr = dadi.Integration.two_pops(philr, xx, Tam, nu1*hrf, nu2*hrf, m12=m12, m21=m21)
# We keep the population sizes after the split to hrf*nu1 and hrf*nu2 and set the migration rates to zero
philr = dadi.Integration.two_pops(philr, xx, Ts, nu1*hrf, nu2*hrf, m12=0, m21=0)
## calculate the spectrum.
fslr = dadi.Spectrum.from_phi(philr, (n1,n2), (xx,xx))
#### Sum the two spectra in proportion O and 1-O
fs= (1-Q)*fsnr + Q*fslr
return fs
def AMA2N2m(params, (n1,n2), pts):
nuA, nu1, nu2, hrf, m12, m21, me12, me21, Tam, Tp, Ts, P, Q = params
"""
Model of semi permeability with split, ancient migration with 2 migration rates, heterogenous effective population size (2 classes, shared by the two populations = background selection)
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
hrf: Hill-Robertson factor, i.e. the degree to which Ne is locally reduced due to the effects of background selection and selective sweep effects
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
me12: Effective migration from pop 2 to pop 1 in genomic islands.
me21: Effective migration from pop 1 to pop 2 in genomic islands.
Ts: The scaled time between the split and the ancient migration (in units of 2*Na generations).
Tam: The scale time between the ancient migration and present.
P: The proportion of the genome evolving neutrally
Q: The proportion of the genome with a reduced effective size due to selection at linked sites
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
#### Calculate the neutral spectrum
# phi for the equilibrium ancestral population
phiN = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phiN = dadi.Integration.one_pop(phiN, xx, Tp, nu=nuA)
# Now do the divergence event
phiN = dadi.PhiManip.phi_1D_to_2D(xx, phiN)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to m12 and m21
phiN = dadi.Integration.two_pops(phiN, xx, Tam, nu1, nu2, m12=m12, m21=m21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phiN = dadi.Integration.two_pops(phiN, xx, Ts, nu1, nu2, m12=0, m21=0)
###
## calculate the spectrum.
fsN = dadi.Spectrum.from_phi(phiN, (n1,n2), (xx,xx))
#### Calculate the genomic island spectrum
# phi for the equilibrium ancestral population
phiI = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phiI = dadi.Integration.one_pop(phiI, xx, Tp, nu=nuA)
# Now do the divergence event
phiI = dadi.PhiManip.phi_1D_to_2D(xx, phiI)
# We set the population sizes after the split to nu1 and nu2 and the migration rates to me12 and me21
phiI = dadi.Integration.two_pops(phiI, xx, Ts, nu1, nu2, m12=me12, m21=me21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rate to zero
phiI = dadi.Integration.two_pops(phiI, xx, Tam, nu1, nu2, m12=0, m21=0)
###
## calculate the spectrum.
fsI = dadi.Spectrum.from_phi(phiI, (n1,n2), (xx,xx))
#### Calculate the pectrum in normally-recombining regions
# phi for the equilibrium ancestral population
phinr = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phinr = dadi.Integration.one_pop(phinr, xx, Tp, nu=nuA)
# Now do the divergence event
phinr = dadi.PhiManip.phi_1D_to_2D(xx, phinr)
# We set the population sizes after the split to nu1 and nu2 and the migration rates to m12 and m21
phinr = dadi.Integration.two_pops(phinr, xx, Tam, nu1, nu2, m12=m12, m21=m21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phinr = dadi.Integration.two_pops(phinr, xx, Ts, nu1, nu2, m12=0, m21=0)
###
## calculate the spectrum.
# oriented
fsnr = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
#### Spectrum of low-recombining regions
# phi for the equilibrium ancestral population
philr = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
philr = dadi.Integration.one_pop(philr, xx, Tp, nu=nuA)
# Now do the divergence event
philr = dadi.PhiManip.phi_1D_to_2D(xx, philr)
# We set the population sizes after the split to hrf*nu1 and hrf*nu2 and the migration rates to m12 and m21
philr = dadi.Integration.two_pops(philr, xx, Tam, nu1*hrf, nu2*hrf, m12=m12, m21=m21)
# We keep the population sizes after the split to hrf*nu1 and hrf*nu2 and set the migration rates to zero
philr = dadi.Integration.two_pops(philr, xx, Ts, nu1*hrf, nu2*hrf, m12=0, m21=0)
###
## calculate the spectrum.
fslr = dadi.Spectrum.from_phi(philr, (n1,n2), (xx,xx))
#### Sum the spectra
fs = P*fsN+(1-P)*fsI+(1-Q)*fsnr+Q*fslr
return fs
def AMA2NG(params, (n1,n2), pts):
nuA, nu1, nu2, b1, b2, hrf, m12, | |
<gh_stars>0
#!/bin/env python3
import sys
import os
import traceback
import ast
from neo4j import GraphDatabase
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../reasoningtool/QuestionAnswering/")
from KGNodeIndex import KGNodeIndex
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../") # code directory
from RTXConfiguration import RTXConfiguration
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../UI/OpenAPI/python-flask-server/")
from swagger_server.models.node import Node
from swagger_server.models.edge import Edge
from swagger_server.models.node_attribute import NodeAttribute
from swagger_server.models.edge_attribute import EdgeAttribute
class KGQuerier:
def __init__(self, response_object, kp_to_use):
self.response = response_object
self.kp = "KG2" if kp_to_use == "ARAX/KG2" else "KG1"
self.query_graph = None
self.cypher_query = None
self.query_results = None
self.edge_to_nodes_map = dict()
self.final_kg = {'nodes': dict(), 'edges': dict()}
def answer_one_hop_query(self, query_graph, qnodes_using_curies_from_prior_step):
"""
This function answers a one-hop (single-edge) query using either KG1 or KG2.
:param query_graph: A Reasoner API standard query graph.
:param qnodes_using_curies_from_prior_step: Set of QNode IDs whose curie is now a list of curies found in a
prior expand step (only for Expand's purposes).
:return: An (almost) Reasoner API standard knowledge graph containing all of the nodes and edges returned as
results for the query. (Dictionary version, organized by QG IDs.)
"""
self.query_graph = query_graph
dsl_parameters = self.response.data['parameters']
kp = self.kp
synonym_usages_dict = dict()
if dsl_parameters['use_synonyms']:
synonym_usages_dict = self.__add_curie_synonyms_to_query_nodes(self.query_graph.nodes, kp,
qnodes_using_curies_from_prior_step)
if not self.response.status == 'OK':
return self.final_kg, self.edge_to_nodes_map
self.__convert_query_graph_to_cypher_query(dsl_parameters['enforce_directionality'])
if not self.response.status == 'OK':
return self.final_kg, self.edge_to_nodes_map
self.__answer_query_using_kg_neo4j(kp, dsl_parameters['continue_if_no_results'])
if not self.response.status == 'OK':
return self.final_kg, self.edge_to_nodes_map
self.__add_answers_to_kg(dsl_parameters['synonym_handling'], synonym_usages_dict, kp, query_graph)
if not self.response.status == 'OK':
return self.final_kg, self.edge_to_nodes_map
return self.final_kg, self.edge_to_nodes_map
def answer_single_node_query(self, qnode):
if not qnode.curie:
self.response.error(f"Cannot expand a single query node if it doesn't have a curie", error_code="InvalidQuery")
else:
# Gather synonyms as appropriate
use_synonyms = self.response.data['parameters'].get('use_synonyms')
synonym_handling = self.response.data['parameters'].get('synonym_handling')
synonym_usages_dict = dict()
if use_synonyms:
synonym_usages_dict = self.__add_curie_synonyms_to_query_nodes([qnode], self.kp)
if not self.response.status == 'OK':
return self.final_kg
# Build and run a cypher query to get this node/nodes
where_clause = f"{qnode.id}.id='{qnode.curie}'" if type(qnode.curie) is str else f"{qnode.id}.id in {qnode.curie}"
cypher_query = f"MATCH {self.__get_cypher_for_query_node(qnode)} WHERE {where_clause} RETURN {qnode.id}"
results = self.__run_cypher_query(cypher_query, self.kp)
# Process the results and add to our answer knowledge graph, handling synonyms as appropriate
for result in results:
neo4j_node = result.get(qnode.id)
swagger_node = self.__convert_neo4j_node_to_swagger_node(neo4j_node, qnode.id, self.kp)
if qnode.id in synonym_usages_dict and synonym_handling == "map_back":
# Only add the original curie (discard synonym nodes)
if swagger_node.id in synonym_usages_dict[qnode.id].keys():
self.__add_node_to_kg(swagger_node)
else:
self.__add_node_to_kg(swagger_node)
return self.final_kg
def __add_curie_synonyms_to_query_nodes(self, query_nodes, kp, qnodes_using_curies_from_prior_step=None):
self.response.debug("Looking for query nodes to use curie synonyms for")
if not qnodes_using_curies_from_prior_step:
qnodes_using_curies_from_prior_step = set()
kgni = KGNodeIndex()
synonym_usages_dict = dict()
for qnode in query_nodes:
if qnode.curie and (qnode.id not in qnodes_using_curies_from_prior_step):
curies_to_use_synonyms_for = qnode.curie if type(qnode.curie) is list else [qnode.curie]
synonyms = []
for curie in curies_to_use_synonyms_for:
original_curie = curie
equivalent_curies = kgni.get_equivalent_curies(original_curie, kg_name=kp)
if len(equivalent_curies) > 1:
synonyms += equivalent_curies
qnode.type = None # Equivalent curie types may be different than the original, so we clear this
if qnode.id not in synonym_usages_dict:
synonym_usages_dict[qnode.id] = dict()
synonym_usages_dict[qnode.id][original_curie] = equivalent_curies
elif len(equivalent_curies) == 1:
self.response.info(f"Could not find any equivalent curies in {kp} for {original_curie}")
synonyms += equivalent_curies
else:
self.response.error(f"{kp} does not contain a node with curie {original_curie}", error_code="UnknownCurie")
# Use our new synonyms list only if we actually found any synonyms
if synonyms != curies_to_use_synonyms_for:
self.response.info(f"Using equivalent curies for qnode {qnode.id} ({qnode.curie}): {synonyms}")
qnode.curie = synonyms
return synonym_usages_dict
def __convert_query_graph_to_cypher_query(self, enforce_directionality):
if len(self.query_graph.edges) > 1:
self.response.error(f"KGQuerier requires a single-edge query graph", error_code="InvalidQuery")
else:
self.response.debug(f"Generating cypher for edge {self.query_graph.edges[0].id} query graph")
try:
# Build the match clause
edge = self.query_graph.edges[0]
source_node = self.__get_query_node(edge.source_id)
target_node = self.__get_query_node(edge.target_id)
edge_cypher = self.__get_cypher_for_query_edge(edge, enforce_directionality)
source_node_cypher = self.__get_cypher_for_query_node(source_node)
target_node_cypher = self.__get_cypher_for_query_node(target_node)
match_clause = f"MATCH {source_node_cypher}{edge_cypher}{target_node_cypher}"
# Build the where clause
where_fragments = []
for node in [source_node, target_node]:
if node.curie:
if type(node.curie) is str:
where_fragment = f"{node.id}.id='{node.curie}'"
else:
where_fragment = f"{node.id}.id in {node.curie}"
where_fragments.append(where_fragment)
if len(where_fragments):
where_clause = "WHERE "
where_clause += " AND ".join(where_fragments)
else:
where_clause = ""
# Build the with clause
source_node_col_name = f"nodes_{source_node.id}"
target_node_col_name = f"nodes_{target_node.id}"
edge_col_name = f"edges_{edge.id}"
extra_edge_properties = "{.*, " + f"id:ID({edge.id}), {source_node.id}:{source_node.id}.id, {target_node.id}:{target_node.id}.id" + "}"
with_clause = f"WITH collect(distinct {source_node.id}) as {source_node_col_name}, " \
f"collect(distinct {target_node.id}) as {target_node_col_name}, " \
f"collect(distinct {edge.id}{extra_edge_properties}) as {edge_col_name}"
# Build the return clause
return_clause = f"RETURN {source_node_col_name}, {target_node_col_name}, {edge_col_name}"
self.cypher_query = f"{match_clause} {where_clause} {with_clause} {return_clause}"
except:
tb = traceback.format_exc()
error_type, error, _ = sys.exc_info()
self.response.error(f"Problem generating cypher for query. {tb}", error_code=error_type.__name__)
def __answer_query_using_kg_neo4j(self, kp, continue_if_no_results):
self.response.info(f"Sending cypher query for edge {self.query_graph.edges[0].id} to {kp} neo4j")
self.query_results = self.__run_cypher_query(self.cypher_query, kp)
if self.response.status == 'OK':
columns_with_lengths = dict()
for column in self.query_results[0]:
columns_with_lengths[column] = len(self.query_results[0].get(column))
if any(length == 0 for length in columns_with_lengths.values()):
if continue_if_no_results:
self.response.warning(f"No paths were found in {kp} satisfying this query graph")
else:
self.response.error(f"No paths were found in {kp} satisfying this query graph", error_code="NoResults")
else:
num_results_string = ", ".join([f"{column.split('_')[1]}: {value}" for column, value in sorted(columns_with_lengths.items())])
self.response.info(f"Query for edge {self.query_graph.edges[0].id} returned results ({num_results_string})")
def __add_answers_to_kg(self, synonym_handling, synonym_usages_dict, kp, query_graph):
self.response.debug(f"Processing query results for edge {self.query_graph.edges[0].id}")
node_uuid_to_curie_dict = self.__build_node_uuid_to_curie_dict(self.query_results[0]) if kp == "KG1" else dict()
results_table = self.query_results[0]
column_names = [column_name for column_name in results_table]
for column_name in column_names:
# Load answer nodes into our knowledge graph
if column_name.startswith('nodes'): # Example column name: 'nodes_n00'
qnode_id = column_name.replace("nodes_", "", 1)
for node in results_table.get(column_name):
swagger_node = self.__convert_neo4j_node_to_swagger_node(node, qnode_id, kp)
if synonym_handling == 'map_back' and qnode_id in synonym_usages_dict:
# Only keep the node corresponding to the original curie (discard synonym nodes)
if swagger_node.id in synonym_usages_dict[qnode_id].keys():
self.__add_node_to_kg(swagger_node)
else:
self.__add_node_to_kg(swagger_node)
# Load answer edges into our knowledge graph
elif column_name.startswith('edges'): # Example column name: 'edges_e01'
qedge_id = column_name.replace("edges_", "", 1)
for edge in results_table.get(column_name):
if kp == "KG2":
swagger_edge = self.__convert_kg2_edge_to_swagger_edge(edge, qedge_id)
else:
swagger_edge = self.__convert_kg1_edge_to_swagger_edge(edge, qedge_id, node_uuid_to_curie_dict)
# Record which of this edge's nodes correspond to which qnode_id
qedge = query_graph.edges[0]
self.edge_to_nodes_map[swagger_edge.id] = dict()
for qnode_id in [qedge.source_id, qedge.target_id]:
node_curie = edge.get(qnode_id)
self.edge_to_nodes_map[swagger_edge.id][qnode_id] = node_curie
if synonym_handling == 'map_back':
# Edit edge to point to original curie, if it uses a synonym
for qnode_id, synonym_usage_mappings in synonym_usages_dict.items():
curie_for_this_qnode_id = self.edge_to_nodes_map[swagger_edge.id].get(qnode_id)
for original_curie, synonyms_used in synonym_usage_mappings.items():
if curie_for_this_qnode_id in synonyms_used:
if swagger_edge.source_id == curie_for_this_qnode_id:
swagger_edge.source_id = original_curie
if swagger_edge.target_id == curie_for_this_qnode_id:
swagger_edge.target_id = original_curie
self.edge_to_nodes_map[swagger_edge.id][qnode_id] = original_curie
self.__add_edge_to_kg(swagger_edge)
# Make sure any original curie that synonyms were used for appears in the answer kg as appropriate
if synonym_handling == 'map_back':
for qnode_id, synonym_usage_mappings in synonym_usages_dict.items():
for original_curie, synonyms_used in synonym_usage_mappings.items():
if qnode_id not in self.final_kg['nodes'] or original_curie not in self.final_kg['nodes'][qnode_id]:
# Get this node from neo4j and add it to the kg
cypher = f"match (n) where n.id='{original_curie}' return n limit 1"
original_node = self.__run_cypher_query(cypher, kp)[0].get('n')
swagger_node = self.__convert_neo4j_node_to_swagger_node(original_node, qnode_id, kp)
self.__add_node_to_kg(swagger_node)
if self.final_kg['edges']:
# Remove any self-edges
edges_to_remove = []
qedge_id = self.query_graph.edges[0]
for qedge_id, edges in self.final_kg['edges'].items():
for edge_key, edge in edges.items():
if edge.source_id == edge.target_id:
edges_to_remove.append(edge_key)
for edge_id in edges_to_remove:
self.final_kg['edges'][qedge_id].pop(edge_id)
# Remove any nodes that may have been orphaned
for qnode_id in [node.id for node in self.query_graph.nodes]:
node_ids_used_by_edges_for_this_qnode_id = set()
for edge in self.final_kg['edges'][qedge_id].values():
node_ids_used_by_edges_for_this_qnode_id.add(self.edge_to_nodes_map[edge.id][qnode_id])
orphan_node_ids_for_this_qnode_id = set(self.final_kg['nodes'][qnode_id].keys()).difference(node_ids_used_by_edges_for_this_qnode_id)
for node_id in orphan_node_ids_for_this_qnode_id:
self.final_kg['nodes'][qnode_id].pop(node_id)
def __convert_neo4j_node_to_swagger_node(self, neo4j_node, qnode_id, kp):
if kp == "KG2":
return self.__convert_kg2_node_to_swagger_node(neo4j_node, qnode_id)
else:
return self.__convert_kg1_node_to_swagger_node(neo4j_node, qnode_id)
def __convert_kg2_node_to_swagger_node(self, neo4j_node, qnode_id):
swagger_node = Node()
swagger_node.qnode_id = qnode_id
swagger_node.id = neo4j_node.get('id')
swagger_node.name = neo4j_node.get('name')
swagger_node.description = neo4j_node.get('description')
swagger_node.uri = neo4j_node.get('iri')
swagger_node.node_attributes = []
node_category = neo4j_node.get('category_label')
swagger_node.type = node_category if type(node_category) is list else [node_category]
# Fill out the 'symbol' property (only really relevant for nodes from UniProtKB)
if swagger_node.symbol is None and swagger_node.id.lower().startswith("uniprot"):
swagger_node.symbol = neo4j_node.get('name')
swagger_node.name = neo4j_node.get('full_name')
# Add all additional properties on KG2 nodes as swagger NodeAttribute objects
additional_kg2_node_properties = ['publications', 'synonym', 'category', 'provided_by', 'deprecated',
'update_date']
node_attributes = self.__create_swagger_attributes("node", additional_kg2_node_properties, neo4j_node)
swagger_node.node_attributes += node_attributes
return swagger_node
def __convert_kg1_node_to_swagger_node(self, neo4j_node, qnode_id):
swagger_node = Node()
swagger_node.qnode_id = qnode_id
swagger_node.id = neo4j_node.get('id')
swagger_node.name = neo4j_node.get('name')
swagger_node.symbol = neo4j_node.get('symbol')
swagger_node.description = neo4j_node.get('description')
swagger_node.uri = neo4j_node.get('uri')
swagger_node.node_attributes = []
node_category = neo4j_node.get('category')
swagger_node.type = node_category if type(node_category) is list else [node_category]
return swagger_node
def __convert_kg2_edge_to_swagger_edge(self, neo4j_edge, qedge_id):
swagger_edge = Edge()
swagger_edge.id = f"KG2:{neo4j_edge.get('id')}"
swagger_edge.qedge_id = qedge_id
swagger_edge.type = neo4j_edge.get('simplified_edge_label')
swagger_edge.source_id = neo4j_edge.get('subject')
swagger_edge.target_id = neo4j_edge.get('object')
swagger_edge.relation = neo4j_edge.get('relation')
swagger_edge.publications = ast.literal_eval(neo4j_edge.get('publications'))
swagger_edge.provided_by = self.__convert_strange_provided_by_field_to_list(neo4j_edge.get('provided_by')) # Temporary hack until provided_by is fixed in | |
filter_by(reserved=reserved).\
filter_by(address=address).\
update(params, synchronize_session='evaluate')
if not rows_updated:
LOG.debug('The row was updated in a concurrent transaction, '
'we will fetch another row')
raise exception.FixedIpAssociateFailed(net=network_id)
return fixed_ip_ref
@require_admin_context
@_retry_on_deadlock
@retrying.retry(stop_max_attempt_number=5, retry_on_exception=
lambda exc: isinstance(exc, exception.FixedIpAssociateFailed))
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None):
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == null())
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=False).\
filter_by(instance_uuid=None).\
filter_by(host=None).\
first()
if not fixed_ip_ref:
raise exception.NoMoreFixedIps(net=network_id)
params = {}
if fixed_ip_ref['network_id'] is None:
params['network_id'] = network_id
if instance_uuid:
params['instance_uuid'] = instance_uuid
if host:
params['host'] = host
rows_updated = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(id=fixed_ip_ref['id']).\
filter_by(network_id=fixed_ip_ref['network_id']).\
filter_by(reserved=False).\
filter_by(instance_uuid=None).\
filter_by(host=None).\
filter_by(address=fixed_ip_ref['address']).\
update(params, synchronize_session='evaluate')
if not rows_updated:
LOG.debug('The row was updated in a concurrent transaction, '
'we will fetch another row')
raise exception.FixedIpAssociateFailed(net=network_id)
return fixed_ip_ref
@require_context
def fixed_ip_create(context, values):
fixed_ip_ref = models.FixedIp()
fixed_ip_ref.update(values)
try:
fixed_ip_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FixedIpExists(address=values['address'])
return fixed_ip_ref
@require_context
def fixed_ip_bulk_create(context, ips):
engine = get_engine()
with engine.begin() as conn:
try:
tab = models.FixedIp.__table__
conn.execute(tab.insert(), ips)
except db_exc.DBDuplicateEntry as e:
raise exception.FixedIpExists(address=e.value)
@require_context
def fixed_ip_disassociate(context, address):
session = get_session()
with session.begin():
_fixed_ip_get_by_address(context, address, session=session).\
update({'instance_uuid': None,
'virtual_interface_id': None})
def fixed_ip_disassociate_all_by_timeout(context, host, time):
session = get_session()
# NOTE(vish): only update fixed ips that "belong" to this
# host; i.e. the network host or the instance
# host matches. Two queries necessary because
# join with update doesn't work.
with session.begin():
host_filter = or_(and_(models.Instance.host == host,
models.Network.multi_host == true()),
models.Network.host == host)
result = model_query(context, models.FixedIp, (models.FixedIp.id,),
read_deleted="no", session=session).\
filter(models.FixedIp.allocated == false()).\
filter(models.FixedIp.updated_at < time).\
join((models.Network,
models.Network.id == models.FixedIp.network_id)).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(host_filter).\
all()
fixed_ip_ids = [fip[0] for fip in result]
if not fixed_ip_ids:
return 0
result = model_query(context, models.FixedIp, session=session).\
filter(models.FixedIp.id.in_(fixed_ip_ids)).\
update({'instance_uuid': None,
'leased': False,
'updated_at': timeutils.utcnow()},
synchronize_session='fetch')
return result
@require_context
def fixed_ip_get(context, id, get_network=False):
query = model_query(context, models.FixedIp).filter_by(id=id)
if get_network:
query = query.options(joinedload('network'))
result = query.first()
if not result:
raise exception.FixedIpNotFound(id=id)
# FIXME(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
result['instance_uuid'])
nova.context.authorize_project_context(context, instance.project_id)
return result
@require_admin_context
def fixed_ip_get_all(context):
result = model_query(context, models.FixedIp, read_deleted="yes").all()
if not result:
raise exception.NoFixedIpsDefined()
return result
@require_context
def fixed_ip_get_by_address(context, address, columns_to_join=None):
return _fixed_ip_get_by_address(context, address,
columns_to_join=columns_to_join)
def _fixed_ip_get_by_address(context, address, session=None,
columns_to_join=None):
if session is None:
session = get_session()
if columns_to_join is None:
columns_to_join = []
with session.begin(subtransactions=True):
try:
result = model_query(context, models.FixedIp, session=session)
for column in columns_to_join:
result = result.options(joinedload_all(column))
result = result.filter_by(address=address).first()
if not result:
raise exception.FixedIpNotFoundForAddress(address=address)
except db_exc.DBError:
msg = _("Invalid fixed IP Address %s in request") % address
LOG.warn(msg)
raise exception.FixedIpInvalid(msg)
# NOTE(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = _instance_get_by_uuid(
context.elevated(read_deleted='yes'),
result['instance_uuid'],
session
)
nova.context.authorize_project_context(context,
instance.project_id)
return result
@require_context
def fixed_ip_get_by_floating_address(context, floating_address):
return model_query(context, models.FixedIp).\
join(models.FloatingIp,
models.FloatingIp.fixed_ip_id ==
models.FixedIp.id).\
filter(models.FloatingIp.address == floating_address).\
first()
# NOTE(tr3buchet) please don't invent an exception here, None is fine
@require_context
def fixed_ip_get_by_instance(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
vif_and = and_(models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id,
models.VirtualInterface.deleted == 0)
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(instance_uuid=instance_uuid).\
outerjoin(models.VirtualInterface, vif_and).\
options(contains_eager("virtual_interface")).\
options(joinedload('network')).\
options(joinedload('floating_ips')).\
order_by(asc(models.VirtualInterface.created_at),
asc(models.VirtualInterface.id)).\
all()
if not result:
raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
return result
@require_admin_context
def fixed_ip_get_by_host(context, host):
session = get_session()
with session.begin():
instance_uuids = _instance_get_all_uuids_by_host(context, host,
session=session)
if not instance_uuids:
return []
return model_query(context, models.FixedIp, session=session).\
filter(models.FixedIp.instance_uuid.in_(instance_uuids)).\
all()
@require_context
def fixed_ip_get_by_network_host(context, network_id, host):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(host=host).\
first()
if not result:
raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
host=host)
return result
@require_context
def fixed_ips_by_virtual_interface(context, vif_id):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(virtual_interface_id=vif_id).\
options(joinedload('network')).\
options(joinedload('floating_ips')).\
all()
return result
@require_context
def fixed_ip_update(context, address, values):
session = get_session()
with session.begin():
_fixed_ip_get_by_address(context, address, session=session).\
update(values)
def _fixed_ip_count_by_project(context, project_id, session=None):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.FixedIp, (models.FixedIp.id,),
read_deleted="no", session=session).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(models.Instance.project_id == project_id).\
count()
###################
@require_context
def virtual_interface_create(context, values):
"""Create a new virtual interface record in the database.
:param values: = dict containing column values
"""
try:
vif_ref = models.VirtualInterface()
vif_ref.update(values)
vif_ref.save()
except db_exc.DBError:
raise exception.VirtualInterfaceCreateException()
return vif_ref
def _virtual_interface_query(context, session=None, use_slave=False):
return model_query(context, models.VirtualInterface, session=session,
read_deleted="no", use_slave=use_slave)
@require_context
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table.
:param vif_id: = id of the virtual interface
"""
vif_ref = _virtual_interface_query(context).\
filter_by(id=vif_id).\
first()
return vif_ref
@require_context
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table.
:param address: = the address of the interface you're looking to get
"""
try:
vif_ref = _virtual_interface_query(context).\
filter_by(address=address).\
first()
except db_exc.DBError:
msg = _("Invalid virtual interface address %s in request") % address
LOG.warn(msg)
raise exception.InvalidIpAddressError(msg)
return vif_ref
@require_context
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table.
:param vif_uuid: the uuid of the interface you're looking to get
"""
vif_ref = _virtual_interface_query(context).\
filter_by(uuid=vif_uuid).\
first()
return vif_ref
@require_context
@require_instance_exists_using_uuid
def virtual_interface_get_by_instance(context, instance_uuid, use_slave=False):
"""Gets all virtual interfaces for instance.
:param instance_uuid: = uuid of the instance to retrieve vifs for
"""
vif_refs = _virtual_interface_query(context, use_slave=use_slave).\
filter_by(instance_uuid=instance_uuid).\
order_by(asc("created_at"), asc("id")).\
all()
return vif_refs
@require_context
def virtual_interface_get_by_instance_and_network(context, instance_uuid,
network_id):
"""Gets virtual interface for instance that's associated with network."""
vif_ref = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(network_id=network_id).\
first()
return vif_ref
@require_context
def virtual_interface_delete_by_instance(context, instance_uuid):
"""Delete virtual interface records that are associated
with the instance given by instance_id.
:param instance_uuid: = uuid of instance
"""
_virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
@require_context
def virtual_interface_get_all(context):
"""Get all vifs."""
vif_refs = _virtual_interface_query(context).all()
return vif_refs
###################
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.iteritems():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
return metadata_refs
def _validate_unique_server_name(context, session, name):
if not CONF.osapi_compute_unique_server_name_scope:
return
lowername = name.lower()
base_query = model_query(context, models.Instance, session=session,
read_deleted='no').\
filter(func.lower(models.Instance.hostname) == lowername)
if CONF.osapi_compute_unique_server_name_scope == 'project':
instance_with_same_name = base_query.\
filter_by(project_id=context.project_id).\
count()
elif CONF.osapi_compute_unique_server_name_scope == 'global':
instance_with_same_name = base_query.count()
else:
msg = _('Unknown osapi_compute_unique_server_name_scope value: %s'
' Flag must be empty, "global" or'
' "project"') % CONF.osapi_compute_unique_server_name_scope
LOG.warn(msg)
return
if instance_with_same_name > 0:
raise exception.InstanceExists(name=lowername)
def _handle_objects_related_type_conversions(values):
"""Make sure that certain things in values (which may have come from
an objects.instance.Instance object) are in suitable form for the
database.
"""
# NOTE(danms): Make sure IP addresses are passed as strings to
# the database engine
for key in ('access_ip_v4', 'access_ip_v6'):
if key in values and values[key] is not None:
values[key] = str(values[key])
datetime_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at', 'scheduled_at')
convert_objects_related_datetimes(values, *datetime_keys)
def _check_instance_exists(context, session, instance_uuid):
if not model_query(context, models.Instance, session=session,
read_deleted="no").filter_by(
uuid=instance_uuid).first():
raise exception.InstanceNotFound(instance_id=instance_uuid)
@require_context
def instance_create(context, values):
"""Create a new Instance record in the database.
context - request context object
values - dict containing column values.
"""
# NOTE(rpodolyaka): create the default security group, if it doesn't exist.
# This must be done in a separate transaction, so that this one is not
# aborted in case a concurrent one succeeds first and the unique constraint
# for security group names is violated by a concurrent INSERT
security_group_ensure_default(context)
values = values.copy()
values['metadata'] = _metadata_refs(
values.get('metadata'), models.InstanceMetadata)
values['system_metadata'] = _metadata_refs(
values.get('system_metadata'), models.InstanceSystemMetadata)
_handle_objects_related_type_conversions(values)
instance_ref = models.Instance()
if not values.get('uuid'):
values['uuid'] = str(uuid.uuid4())
instance_ref['info_cache'] = models.InstanceInfoCache()
info_cache = values.pop('info_cache', None)
if info_cache is not None:
instance_ref['info_cache'].update(info_cache)
security_groups = values.pop('security_groups', [])
instance_ref['extra'] = models.InstanceExtra()
instance_ref['extra'].update(
{'numa_topology': None,
'pci_requests': None,
'vcpu_model': None,
})
instance_ref['extra'].update(values.pop('extra', {}))
instance_ref.update(values)
def _get_sec_group_models(session, security_groups):
models = []
default_group = _security_group_ensure_default(context, session)
if 'default' in security_groups:
models.append(default_group)
# Generate a new list, so we don't modify the original
security_groups = [x for x in security_groups if x != 'default']
if security_groups:
models.extend(_security_group_get_by_names(context,
session, context.project_id, security_groups))
return models
session = get_session()
with session.begin():
if 'hostname' in values:
_validate_unique_server_name(context, session, values['hostname'])
instance_ref.security_groups = _get_sec_group_models(session,
security_groups)
session.add(instance_ref)
# create the instance uuid to ec2_id mapping entry for instance
ec2_instance_create(context, instance_ref['uuid'])
return instance_ref
def _instance_data_get_for_user(context, project_id, user_id, session=None):
result = model_query(context,
models.Instance, (
func.count(models.Instance.id),
func.sum(models.Instance.vcpus),
func.sum(models.Instance.memory_mb),
), session=session).\
filter_by(project_id=project_id)
if user_id:
result = result.filter_by(user_id=user_id).first()
else:
result = result.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0, result[2] or 0)
@require_context
@_retry_on_deadlock
def instance_destroy(context, instance_uuid, constraint=None):
session = get_session()
with session.begin():
if uuidutils.is_uuid_like(instance_uuid):
instance_ref = _instance_get_by_uuid(context, instance_uuid,
session=session)
else:
raise exception.InvalidUUID(instance_uuid)
query = model_query(context, models.Instance, session=session).\
filter_by(uuid=instance_uuid)
if constraint is not None:
query = constraint.apply(models.Instance, query)
count = query.soft_delete()
if count == 0:
raise exception.ConstraintNotMet()
model_query(context, models.SecurityGroupInstanceAssociation,
session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceInfoCache, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceMetadata, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceFault, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceExtra, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
return instance_ref
@require_context
def instance_get_by_uuid(context, uuid, columns_to_join=None, | |
import os
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import geopandas as gpd
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from scipy import stats
from peaky_finders.predictor import (
create_load_duration,
ISO_LIST,
get_peak_data,
get_iso_map,
get_forecasts,
)
app_name = os.getenv("APP_NAME", "dash-peaky-finders")
iso_map = get_iso_map()
peak_data = get_peak_data(ISO_LIST)
predictions, load, temperature = get_forecasts(ISO_LIST)
load_duration_curves = create_load_duration(peak_data)
TEMPLATE = "plotly_white"
app = dash.Dash(
external_stylesheets=[dbc.themes.LUX], suppress_callback_exceptions=True
)
server = app.server
"""Homepage"""
app.layout = html.Div(
[dcc.Location(id="url", refresh=False), html.Div(id="page-content"),]
)
index_page = html.Div(
[
html.Br(),
html.Br(),
dbc.Row(
[
dbc.Col(html.H1(children="Welcome to Peaky Finders"), width=5),
dbc.Col(width=5),
],
justify="center",
),
html.Br(),
html.Br(),
dbc.Row(
[
dbc.Col(
html.Div(
[
html.H4(
children="To what extent do weather and weekday determine total electricity demand on the grid? Click an ISO button below to find out."
),
html.Div(
[
dcc.Link(
html.Button(
"HOME", id="home-button", className="mr-1"
),
href=f"/{app_name}/",
),
dcc.Link(
html.Button(
"CAISO", id="caiso-button", className="mr-1"
),
href=f"/{app_name}/caiso",
),
dcc.Link(
html.Button(
"MISO", id="miso-button", className="mr-1"
),
href=f"/{app_name}/miso",
),
dcc.Link(
html.Button(
"PJM", id="pjm-button", className="mr-1"
),
href=f"/{app_name}/pjm",
),
dcc.Link(
html.Button(
"NYISO", id="nyiso-button", className="mr-1"
),
href=f"/{app_name}/nyiso",
),
dcc.Link(
html.Button(
"ISONE", id="isone-button", className="mr-1"
),
href=f"/{app_name}/isone",
),
]
),
]
),
width=7,
),
dbc.Col(width=3),
],
justify="center",
),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
dbc.Row(
[dbc.Col(html.H4(children="ISO Territory Map"), width=4), dbc.Col(width=4)],
justify="center",
),
html.Div(
[
dcc.Graph(
figure=px.choropleth(
iso_map,
geojson=iso_map.geometry,
locations=iso_map.index,
color="NAME",
projection="mercator",
)
.update_geos(fitbounds="locations", visible=False)
.update_layout(height=600, margin={"r": 0, "t": 0, "l": 0, "b": 0},)
)
],
style={"display": "inline-block", "width": "90%"},
),
]
)
"""NYISO LAYOUT"""
nyiso_layout = html.Div(
[
html.Div(id="nyiso-content"),
html.Br(),
dbc.Row(
[
dbc.Col(
html.Div(
[
dcc.Link(
html.Button("HOME", id="home-button", className="mr-1"),
href=f"/{app_name}/",
),
dcc.Link(
html.Button(
"CAISO", id="caiso-button", className="mr-1"
),
href=f"/{app_name}/caiso",
),
dcc.Link(
html.Button("MISO", id="miso-button", className="mr-1"),
href=f"/{app_name}/miso",
),
dcc.Link(
html.Button("PJM", id="pjm-button", className="mr-1"),
href=f"/{app_name}/pjm",
),
dcc.Link(
html.Button(
"NYISO", id="nyiso-button", className="mr-1"
),
href=f"/{app_name}/nyiso",
),
dcc.Link(
html.Button(
"ISONE", id="isone-button", className="mr-1"
),
href=f"/{app_name}/isone",
),
]
),
width=4,
),
dbc.Col(width=7),
],
justify="center",
),
html.Br(),
html.Br(),
dbc.Row(
[
dbc.Col(
html.H1("New York Independent System Operator (NYISO)"), width=9
),
dbc.Col(width=2),
],
justify="center",
),
dbc.Row(
[
dbc.Col(
html.Div(
children="""
"The NYISO is the New York Independent System Operator — the organization responsible for
managing New York’s electric grid and its competitive wholesale electric marketplace." For more information,
visit https://www.nyiso.com/.
"""
),
width=9,
),
dbc.Col(width=2),
],
justify="center",
),
html.Br(),
dbc.Row(
[dbc.Col(html.H3("Model Performance"), width=9), dbc.Col(width=2),],
justify="center",
),
dbc.Row(
[
dbc.Col(
html.Div(
children="""Mean Absolute Error (MAE) for February, 2021: 347.62 (pretty good)"""
),
width=9,
),
dbc.Col(width=2),
],
justify="center",
),
html.Br(),
dbc.Row(
[
dbc.Col(
dcc.Dropdown(
id="nyiso-dropdown",
options=[
{"label": "Actual", "value": "Actual"},
{"label": "Predicted", "value": "Predicted"},
],
value=["Actual", "Predicted"],
multi=True,
),
width=6,
),
dbc.Col(width=5),
],
justify="center",
),
dcc.Graph(id="nyiso-graph"),
html.Br(),
html.Br(),
dbc.Row(
[dbc.Col(html.H3("Training Data"), width=9), dbc.Col(width=2)],
justify="center",
),
dbc.Row(
[
dbc.Col(
html.Div(
children="""
The NYISO forecasting model was trained on historical load and weather data
from 2018-2021. Temperature readings are from New York City.
"""
),
width=9,
),
dbc.Col(width=2),
],
justify="center",
),
html.Br(),
dbc.Row(
[
dbc.Col(
html.Div(
[
dcc.Graph(
figure=px.histogram(
peak_data["NYISO"],
x=peak_data["NYISO"]["load_MW"],
nbins=75,
marginal="rug",
title=f"Distribution of NYISO Daily Peaks",
color_discrete_sequence=["darkturquoise"],
).update_layout(
template=TEMPLATE,
xaxis_title="Peak Load (MW)",
yaxis_title="Number of Days",
)
),
]
),
width=4,
),
dbc.Col(
html.Div(
[
dcc.Graph(
figure=go.Figure()
.add_trace(
go.Scatter(
x=load_duration_curves["NYISO"]
.reset_index()
.index,
y=load_duration_curves["NYISO"].values,
mode="lines",
fill="tozeroy",
line=dict(color="maroon", width=3),
)
)
.update_layout(
title="Peak Load Sorted by Day (Highest to Lowest)",
xaxis_title="Number of Days",
yaxis_title="Load (MW)",
template=TEMPLATE,
),
),
]
),
width=4,
),
dbc.Col(
html.Div(
[
dcc.Dropdown(
id="nyiso-scatter-dropdown",
options=[
{"label": "Day of Week", "value": "weekday"},
{"label": "Season", "value": "season"},
],
value="season",
multi=False,
),
dcc.Graph(id="nyiso-scatter"),
]
),
width=4,
),
]
),
]
)
@app.callback(
dash.dependencies.Output("nyiso-content", "children"),
[dash.dependencies.Input("nyiso-button", "value")],
)
@app.callback(
dash.dependencies.Output("nyiso-graph", "figure"),
[dash.dependencies.Input("nyiso-dropdown", "value")],
)
def plot_nyiso_load_(value):
fig = go.Figure()
if "Actual" in value:
fig.add_trace(
go.Scatter(
x=load["NYISO"].index,
y=load["NYISO"].values,
name="Actual Load",
line=dict(color="maroon", width=3),
)
)
if "Predicted" in value:
fig.add_trace(
go.Scatter(
x=predictions["NYISO"].index,
y=predictions["NYISO"].values,
name="Forecasted Load",
line=dict(color="darkturquoise", width=3, dash="dash"),
)
)
return fig.update_layout(
title="System Load: Actual vs. Predicted",
xaxis_title="Date",
yaxis_title="Load (MW)",
template=TEMPLATE,
)
@app.callback(
dash.dependencies.Output("nyiso-scatter", "figure"),
[dash.dependencies.Input("nyiso-scatter-dropdown", "value")],
)
def nyiso_scatter_plot(value):
fig = px.scatter(peak_data["NYISO"], x="load_MW", y="temperature", color=value)
return fig.update_layout(template=TEMPLATE, title="Peak Load vs. Temperature")
app_name = os.getenv("APP_NAME", "dash-peaky-finders")
"""PJM LAYOUT"""
pjm_layout = html.Div(
[
html.Div(id="pjm-content"),
html.Br(),
dbc.Row(
[
dbc.Col(
html.Div(
[
dcc.Link(
html.Button("HOME", id="home-button", className="mr-1"),
href=f"/{app_name}/",
),
dcc.Link(
html.Button(
"CAISO", id="caiso-button", className="mr-1"
),
href=f"/{app_name}/caiso",
),
dcc.Link(
html.Button("MISO", id="miso-button", className="mr-1"),
href=f"/{app_name}/miso",
),
dcc.Link(
html.Button("PJM", id="pjm-button", className="mr-1"),
href=f"/{app_name}/pjm",
),
dcc.Link(
html.Button(
"NYISO", id="nyiso-button", className="mr-1"
),
href=f"/{app_name}/nyiso",
),
dcc.Link(
html.Button(
"ISONE", id="isone-button", className="mr-1"
),
href=f"/{app_name}/isone",
),
]
),
width=4,
),
dbc.Col(width=7),
],
justify="center",
),
html.Br(),
html.Br(),
dbc.Row(
[
dbc.Col(
html.H1("Pennsylvania, Jersey, Maryland Power Pool (PJM)"), width=9
),
dbc.Col(width=2),
],
justify="center",
),
dbc.Row(
[
dbc.Col(
html.Div(
children="""
"PJM is a regional transmission organization (RTO) that coordinates the
movement of wholesale electricity in all or parts of 13 states and
the District of Columbia." For more information, visit https://www.pjm.com.
"""
),
width=9,
),
dbc.Col(width=2),
],
justify="center",
),
html.Br(),
dbc.Row(
[dbc.Col(html.H3("Model Performance"), width=9), dbc.Col(width=2),],
justify="center",
),
dbc.Row(
[
dbc.Col(
html.Div(
children="""Mean Absolute Error (MAE) for February, 2021: 2,886.66 (not great)"""
),
width=9,
),
dbc.Col(width=2),
],
justify="center",
),
html.Br(),
dbc.Row(
[
dbc.Col(
dcc.Dropdown(
id="pjm-dropdown",
options=[
{"label": "Actual", "value": "Actual"},
{"label": "Predicted", "value": "Predicted"},
],
value=["Actual", "Predicted"],
multi=True,
),
width=6,
),
dbc.Col(width=5),
],
justify="center",
),
dcc.Graph(id="pjm-graph"),
html.Br(),
html.Br(),
dbc.Row(
[dbc.Col(html.H3("Training Data"), width=9), dbc.Col(width=2)],
justify="center",
),
dbc.Row(
[
dbc.Col(
html.Div(
children="""
The PJM forecasting model was trained on historical load and weather data
from 2018-2021. Temperature readings are from Philadelphia.
"""
),
width=9,
),
dbc.Col(width=2),
],
justify="center",
),
html.Br(),
dbc.Row(
[
dbc.Col(
html.Div(
[
dcc.Graph(
figure=px.histogram(
peak_data["PJM"],
x=peak_data["PJM"]["load_MW"],
nbins=75,
marginal="rug",
title=f"Distribution of PJM Daily Peaks",
color_discrete_sequence=["darkturquoise"],
).update_layout(
template=TEMPLATE,
xaxis_title="Peak Load (MW)",
yaxis_title="Number of Days",
)
),
]
),
width=4,
),
dbc.Col(
html.Div(
[
dcc.Graph(
figure=go.Figure()
.add_trace(
go.Scatter(
x=load_duration_curves["PJM"]
.reset_index()
.index,
y=load_duration_curves["PJM"].values,
mode="lines",
fill="tozeroy",
line=dict(color="maroon", width=3),
)
)
.update_layout(
title="Peak Load Sorted by Day (Highest to Lowest)",
xaxis_title="Number of Days",
yaxis_title="Load (MW)",
template=TEMPLATE,
),
),
]
),
width=4,
),
dbc.Col(
html.Div(
[
dcc.Dropdown(
id="pjm-scatter-dropdown",
options=[
{"label": "Day of Week", "value": "weekday"},
{"label": "Season", "value": "season"},
],
value="season",
multi=False,
),
dcc.Graph(id="pjm-scatter"),
]
),
width=4,
),
]
),
]
)
@app.callback(
dash.dependencies.Output("pjm-content", "children"),
[dash.dependencies.Input("pjm-button", "value")],
)
@app.callback(
dash.dependencies.Output("pjm-graph", "figure"),
[dash.dependencies.Input("pjm-dropdown", "value")],
)
def plot_pjm_load_(value):
fig = go.Figure()
if "Actual" in value:
fig.add_trace(
go.Scatter(
x=load["PJM"].index,
y=load["PJM"].values,
name="Actual Load",
line=dict(color="maroon", width=3),
)
)
if "Predicted" in value:
fig.add_trace(
go.Scatter(
x=predictions["PJM"].index,
y=predictions["PJM"].values,
name="Forecasted Load",
line=dict(color="darkturquoise", width=3, dash="dash"),
)
)
return fig.update_layout(
title="System Load: Actual vs. Predicted",
xaxis_title="Date",
yaxis_title="Load (MW)",
template=TEMPLATE,
)
@app.callback(
dash.dependencies.Output("pjm-scatter", "figure"),
[dash.dependencies.Input("pjm-scatter-dropdown", "value")],
)
def pjm_scatter_plot(value):
fig = px.scatter(peak_data["PJM"], x="load_MW", y="temperature", color=value)
return fig.update_layout(template=TEMPLATE, title="Peak Load vs. Temperature")
"""MISO LAYOUT"""
miso_layout = html.Div(
[
html.Div(id="miso-content"),
html.Br(),
dbc.Row(
[
dbc.Col(
html.Div(
[
dcc.Link(
html.Button("HOME", id="home-button", className="mr-1"),
href=f"/{app_name}/",
),
dcc.Link(
html.Button(
"CAISO", id="caiso-button", className="mr-1"
),
href=f"/{app_name}/caiso",
),
dcc.Link(
html.Button("MISO", id="miso-button", className="mr-1"),
href=f"/{app_name}/miso",
),
dcc.Link(
html.Button("PJM", id="pjm-button", className="mr-1"),
href=f"/{app_name}/pjm",
),
dcc.Link(
html.Button(
"NYISO", id="nyiso-button", className="mr-1"
),
href=f"/{app_name}/nyiso",
),
dcc.Link(
html.Button(
"ISONE", id="isone-button", className="mr-1"
),
href=f"/{app_name}/isone",
),
]
),
width=4,
),
dbc.Col(width=7),
],
justify="center",
),
html.Br(),
html.Br(),
dbc.Row(
[
dbc.Col(
html.H1("Midcontinent Independent System Operator (MISO)"), width=9
),
dbc.Col(width=2),
],
justify="center",
),
dbc.Row(
[
dbc.Col(
html.Div(
children="""
"Midcontinent Independent System Operator (MISO) is an independent,
not-for-profit organization that delivers safe, cost-effective
electric power across 15 U.S. states and the Canadian province of
Manitoba." For more information,
visit www.misoenergy.org.
"""
),
width=9,
),
dbc.Col(width=2),
],
justify="center",
),
html.Br(),
dbc.Row(
[dbc.Col(html.H3("Model Performance"), width=9), dbc.Col(width=2),],
justify="center",
),
dbc.Row(
[
dbc.Col(
html.Div(
children="""Mean Absolute Error (MAE) for February, 2021: 2382.66 (not great)"""
),
width=9,
),
dbc.Col(width=2),
],
justify="center",
),
html.Br(),
dbc.Row(
[
dbc.Col(
dcc.Dropdown(
id="miso-dropdown",
options=[
{"label": "Actual", "value": "Actual"},
{"label": "Predicted", "value": "Predicted"},
],
value=["Actual", "Predicted"],
multi=True,
),
width=6,
),
dbc.Col(width=5),
],
justify="center",
),
dcc.Graph(id="miso-graph"),
html.Br(),
html.Br(),
dbc.Row(
[dbc.Col(html.H3("Training Data"), width=9), dbc.Col(width=2)],
justify="center",
),
dbc.Row(
[
dbc.Col(
html.Div(
children="""
The MISO forecasting model was trained on historical load and weather data
from 2018-2021. Temperature readings are from Minneapolis.
"""
),
width=9,
),
dbc.Col(width=2),
],
justify="center",
),
html.Br(),
dbc.Row(
[
dbc.Col(
html.Div(
[
dcc.Graph(
figure=px.histogram(
peak_data["MISO"],
x=peak_data["MISO"]["load_MW"],
nbins=75,
marginal="rug",
title=f"Distribution of MISO Daily Peaks",
color_discrete_sequence=["darkturquoise"],
).update_layout(
template=TEMPLATE,
xaxis_title="Peak Load (MW)",
yaxis_title="Number of Days",
)
),
]
),
width=4,
),
dbc.Col(
html.Div(
[
dcc.Graph(
figure=go.Figure()
.add_trace(
go.Scatter(
x=load_duration_curves["MISO"]
.reset_index()
.index,
y=load_duration_curves["MISO"].values,
mode="lines",
fill="tozeroy",
line=dict(color="maroon", width=3),
)
)
.update_layout(
title="Peak Load Sorted by Day (Highest to Lowest)",
xaxis_title="Number of Days",
yaxis_title="Load (MW)",
template=TEMPLATE,
),
),
]
),
width=4,
),
dbc.Col(
html.Div(
[
dcc.Dropdown(
id="miso-scatter-dropdown",
| |
<reponame>victorfica/utils<filename>myboxplot.py<gh_stars>0
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from numpy.random import permutation, seed
import pandas as pd
import seaborn as sns
from itertools import cycle
from vectools import untangle
try:
import numba as nb
from bootstrap_nb import bootci_nb
NUMBA = True
@nb.njit()
def _keepdims_mean(dat):
return np.array([np.mean(dat[:, 0])])
except ImportError:
from scikits.bootstrap import ci
NUMBA = False
__all__ = ['scatterdots',
'myboxplot',
'manyboxplots',
'swarmbox',
'discrete_boxplot']
def scatterdots(data, x, axh=None, width=0.8, returnx=False, rseed=820, **kwargs):
"""Dots plotted with random x-coordinates and y-coordinates from data array.
Parameters
----------
data : ndarray
x : float
Specifies the center of the dot cloud on the x-axis.
axh : matplotlib figure handle
If None then use plt.gca()
width : float
Specifies the range of the dots along the x-axis.
returnx : bool
If True, return the x-coordinates of the plotted data points.
rseed : float
Random seed. Defaults to a constant so that regenerated figures of
the same data are identical.
Returns
-------
Optionally returns the x-coordinates as plotted."""
if axh is None:
axh = plt.gca()
np.random.seed(rseed)
if data is None or len(data) == 0:
if returnx:
return None
return
if not isinstance(data, np.ndarray):
data = np.array(data)
validi = np.arange(len(data))
if any(np.isnan(data)):
validi = np.where(np.logical_not(np.isnan(data)))[0]
ploty = data[validi]
if len(ploty) == 0:
if returnx:
return None
return
w = width
plotx = np.random.permutation(np.linspace(-w/2., w/2., len(ploty)) + x)
axh.scatter(plotx, ploty, **kwargs)
if returnx:
outx = np.nan * np.ones(data.shape)
outx[validi] = plotx
return outx
def myboxplot(data, x = 1, axh=None, width=0.8, boxcolor='black',scatterwidth=0.6,dotcolor='red',returnx=False,subsetInd=None,altDotcolor='gray',violin=False,**kwargs):
"""Make a boxplot with scatterdots overlaid.
Parameters
----------
data : np.ndarray or pd.Series
x : float
Position of box along x-axis.
axh : matplotlib figure handle
If None then use plt.gca()
width : float
Width of the box.
boxcolor : mpl color
scatterwidth : float
Width of the spread of the data points.
dotcolor : mpl color
subsetInd : boolean or int index
Indicates a subset of the data that should be summarized in the boxplot.
However, all data points will be plotted.
altDotcolor : mpl color
Specify the color of the data points that are not in the subset.
returnx : bool
Return the x-coordinates of the data points.
violin : bool
Specify whether the box is a violin plot.
Returns
-------
outx : np.ndarray
Optionall, an array of the x-coordinates as plotted."""
if axh is None:
axh = plt.gca()
if isinstance(data, pd.Series):
data = data.values
if not subsetInd is None:
if not (subsetInd.dtype == np.array([0, 1], dtype=bool).dtype):
tmp = np.zeros(data.shape, dtype=bool)
tmp[subsetInd] = True
subsetInd = tmp
else:
subsetInd = np.ones(data.shape, dtype=bool)
subsetInd = np.asarray(subsetInd)
if not 's' in kwargs:
kwargs['s'] = 20
if not 'marker' in kwargs:
kwargs['marker'] = 'o'
if not 'linewidths' in kwargs:
kwargs['linewidths'] = 0.5
"""Boxplot with dots overlaid"""
outx = np.zeros(data.shape)
if subsetInd.sum() > 0:
if not boxcolor == 'none' and not boxcolor is None:
if violin and False:
sns.violinplot(data[subsetInd], color = boxcolor, positions = [x], alpha = 0.5)
else:
bp = axh.boxplot(data[subsetInd], positions = [x], widths = width, sym = '')
for element in list(bp.keys()):
for b in bp[element]:
b.set_color(boxcolor)
kwargs['c'] = dotcolor
subsetx = scatterdots(data[subsetInd], x = x, axh = axh, width = scatterwidth, returnx = True, **kwargs)
outx[subsetInd] = subsetx
if (~subsetInd).sum() > 0:
kwargs['c'] = altDotcolor
subsetx = scatterdots(data[~subsetInd], x = x, axh = axh, width = scatterwidth, returnx = True, **kwargs)
outx[~subsetInd] = subsetx
if returnx:
return outx
def manyboxplots(df, cols=None, axh=None, colLabels=None,annotation='N',horizontal=False,vRange=None,xRot=0, **kwargs):
"""Series of boxplots along x-axis (or flipped horizontally along y-axis [NOT IMPLEMENTED])
WORK IN PROGRESS
Optionally add annotation for each boxplot with:
(1) "N"
(2) "pctpos" (response rate, by additionally specifying responders)
NOT YET IMPLEMENTED
Parameters
----------
df : pd.DataFrame
cols : list
Column names to be plotted
axh : matplotlib figure handle
If None then use plt.gca()
colLabels : list
Column labels (optional)
annotation : str or None
Specifies what the annotation should be: "N" or "pctpos"
horizontal : bool
Specifies whether boxplots should be vertical (default, False) or horizontal (True)
kwargs : additional arguments
Passed to myboxplot function to specify colors etc."""
if axh is None:
axh = plt.gca()
if cols is None:
cols = df.columns
if colLabels is None:
colLabels = cols
elif len(colLabels)<cols:
colLabels += cols[len(colLabels):]
for x, c in enumerate(cols):
myboxplot(df[c].dropna(), x = x, axh = axh, **kwargs)
if not vRange is None:
plt.ylim(vRange)
yl = plt.ylim()
annotationKwargs = dict(xytext = (0, -10), textcoords = 'offset points', ha = 'center', va = 'top', size = 'medium')
for x, c in enumerate(cols):
tmp = df[c].dropna()
if annotation == 'N':
plt.annotate('%d' % len(tmp), xy = (x, yl[1]), **annotationKwargs)
elif annotation == 'pctpos':
pass
plt.xlim((-1, x+1))
plt.xticks(np.arange(x+1))
xlabelsL = axh.set_xticklabels(colLabels, fontsize='large', rotation=xRot, fontname='Consolas')
def swarmbox(x, y, data, hue=None, palette=None, order=None, hue_order=None, connect=False, connect_on=[], legend_loc=0, legend_bbox=None, swarm_alpha=1, swarm_size=5, box_alpha=1, box_edgecolor='k', box_facewhite=False):
"""Based on seaborn boxplots and swarmplots.
Adds the option to connect dots by joining on an identifier columns"""
if palette is None and not hue is None:
palette = sns.color_palette('Set2', n_colors=data[hue].unique().shape[0])
if hue_order is None and not hue is None:
hue_order = sorted(data[hue].unique())
if order is None:
order = sorted(data[x].unique())
params = dict(data=data, x=x, y=y, hue=hue, order=order, hue_order=hue_order)
box_axh = sns.boxplot(**params,
fliersize=0,
linewidth=1,
palette=palette)
for patch in box_axh.artists:
patch.set_edgecolor((0, 0, 0, 1))
r, g, b, a = patch.get_facecolor()
if box_facewhite:
patch.set_facecolor((1, 1, 1, 1))
else:
patch.set_facecolor((r, g, b, box_alpha))
for line in box_axh.lines:
line.set_color(box_edgecolor)
swarm = sns.swarmplot(**params,
linewidth=0.5,
edgecolor='black',
dodge=True,
alpha=swarm_alpha,
size=swarm_size,
palette=palette)
if connect and not hue is None:
for i in range(len(hue_order) - 1):
"""Loop over pairs of hues (i.e. grouped boxes)"""
curHues = hue_order[i:i+2]
"""Pull out just the swarm collections that are needed"""
zipper = [order] + [swarm.collections[i::len(hue_order)], swarm.collections[i+1::len(hue_order)]]
for curx, cA, cB in zip(*zipper):
"""Loop over the x positions (i.e. outer groups)"""
indA = (data[x] == curx) & (data[hue] == curHues[0])
indB = (data[x] == curx) & (data[hue] == curHues[1])
"""Locate the data and match it up with the points plotted for each hue"""
tmpA = data[[x, hue, y] + connect_on].loc[indA].dropna()
tmpB = data[[x, hue, y] + connect_on].loc[indB].dropna()
plottedA = cA.get_offsets() # shaped (n_elements x 2)
plottedB = cB.get_offsets()
"""Merge the data from each hue, including the new detangled x coords,
based on what was plotted"""
tmpA.loc[:, '_untangi'] = untangle(tmpA[y].values, plottedA[:, 1])
tmpB.loc[:, '_untangi'] = untangle(tmpB[y].values, plottedB[:, 1])
tmpA.loc[:, '_newx'] = plottedA[:, 0][tmpA['_untangi'].values]
tmpB.loc[:, '_newx'] = plottedB[:, 0][tmpB['_untangi'].values]
"""Using 'inner' drops the data points that are in one hue grouping and not the other"""
tmp = pd.merge(tmpA, tmpB, left_on=connect_on, right_on=connect_on, suffixes=('_A', '_B'), how='inner')
"""Plot them one by one"""
for rind, r in tmp.iterrows():
plt.plot(r[['_newx_A', '_newx_B']],
r[[y + '_A', y + '_B']],
'-', color='gray', linewidth=0.5)
if not hue is None and not legend_loc is None:
plt.legend([plt.Circle(1, color=c, alpha=1) for c in palette], hue_order, title=hue, loc=legend_loc, bbox_to_anchor=legend_bbox)
if legend_loc is None:
plt.gca().legend_.remove()
def _xspacing(v, mxWidth=0.3, idealNumPoints=4):
xlim = min(mxWidth, (len(v)/idealNumPoints)*mxWidth/2)
x = np.linspace(-xlim, xlim, len(v))
x = np.random.permutation(x)
"""Use v*0 so that it has the right labels for apply"""
return v*0 + x
def _yjitter(v, jitter=0.3):
y = np.linspace(-jitter/2, jitter/2, len(v))
y = np.random.permutation(y)
return y + v
def discrete_boxplot(x, y, hue, data, yjitter=0.3, palette=None, order=None, hue_order=None, IQR=True, mean_df=None, pvalue_df=None):
if order is None:
order = data[x].unique()
if len(order) == 1:
xspacing = 2
else:
xspacing = 1
if hue_order is None:
hue_order = data[hue].unique()
if palette is None:
palette = [c for i,c in zip(range(len(hue_order)), cycle(mpl.cm.Set1.colors))]
yl = (data[y].min() - 0.5, data[y].max() + 0.5)
plotx = 0
xt = []
xtl = []
for xval in order:
xcoords = []
xcoords_labels = {}
for hueval, color in zip(hue_order, palette):
tmp = data.loc[(data[hue] == hueval) & (data[x] == xval), y]
if mean_df is None:
if IQR:
lcl, mu, ucl = np.percentile(tmp.values, [25, 50, 75])
else:
if NUMBA:
"""bootci_nb requires a 2D matrix and will operate along rows. statfunction needs to return a vector"""
mu, lcl, ucl = bootci_nb(tmp.values[:, None], statfunction=_keepdims_mean, alpha=0.05, n_samples=10000, method='bca').ravel()
| |
m.b1108 <= 0)
m.e1739 = Constraint(expr= -m.b1098 + m.b1108 <= 0)
m.e1740 = Constraint(expr= -m.b1099 + m.b1108 <= 0)
m.e1741 = Constraint(expr= -m.b1098 + m.b1109 <= 0)
m.e1742 = Constraint(expr= -m.b1099 + m.b1109 <= 0)
m.e1743 = Constraint(expr= -m.b1100 + m.b1109 <= 0)
m.e1744 = Constraint(expr= -m.b1097 + m.b1110 <= 0)
m.e1745 = Constraint(expr= -m.b1099 + m.b1110 <= 0)
m.e1746 = Constraint(expr= -m.b1100 + m.b1110 <= 0)
m.e1747 = Constraint(expr= -m.b1097 + m.b1111 <= 0)
m.e1748 = Constraint(expr= -m.b1098 + m.b1111 <= 0)
m.e1749 = Constraint(expr= -m.b1100 + m.b1111 <= 0)
m.e1750 = Constraint(expr= -m.b1097 + m.b1112 <= 0)
m.e1751 = Constraint(expr= -m.b1098 + m.b1112 <= 0)
m.e1752 = Constraint(expr= -m.b1099 + m.b1112 <= 0)
m.e1753 = Constraint(expr= -m.b1114 + m.b1117 <= 0)
m.e1754 = Constraint(expr= -m.b1115 + m.b1117 <= 0)
m.e1755 = Constraint(expr= -m.b1116 + m.b1117 <= 0)
m.e1756 = Constraint(expr= -m.b1113 + m.b1118 <= 0)
m.e1757 = Constraint(expr= -m.b1115 + m.b1118 <= 0)
m.e1758 = Constraint(expr= -m.b1116 + m.b1118 <= 0)
m.e1759 = Constraint(expr= -m.b1113 + m.b1119 <= 0)
m.e1760 = Constraint(expr= -m.b1114 + m.b1119 <= 0)
m.e1761 = Constraint(expr= -m.b1116 + m.b1119 <= 0)
m.e1762 = Constraint(expr= -m.b1113 + m.b1120 <= 0)
m.e1763 = Constraint(expr= -m.b1114 + m.b1120 <= 0)
m.e1764 = Constraint(expr= -m.b1115 + m.b1120 <= 0)
m.e1765 = Constraint(expr= -m.b1114 + m.b1121 <= 0)
m.e1766 = Constraint(expr= -m.b1115 + m.b1121 <= 0)
m.e1767 = Constraint(expr= -m.b1116 + m.b1121 <= 0)
m.e1768 = Constraint(expr= -m.b1113 + m.b1122 <= 0)
m.e1769 = Constraint(expr= -m.b1115 + m.b1122 <= 0)
m.e1770 = Constraint(expr= -m.b1116 + m.b1122 <= 0)
m.e1771 = Constraint(expr= -m.b1113 + m.b1123 <= 0)
m.e1772 = Constraint(expr= -m.b1114 + m.b1123 <= 0)
m.e1773 = Constraint(expr= -m.b1116 + m.b1123 <= 0)
m.e1774 = Constraint(expr= -m.b1113 + m.b1124 <= 0)
m.e1775 = Constraint(expr= -m.b1114 + m.b1124 <= 0)
m.e1776 = Constraint(expr= -m.b1115 + m.b1124 <= 0)
m.e1777 = Constraint(expr= -m.b1114 + m.b1125 <= 0)
m.e1778 = Constraint(expr= -m.b1115 + m.b1125 <= 0)
m.e1779 = Constraint(expr= -m.b1116 + m.b1125 <= 0)
m.e1780 = Constraint(expr= -m.b1113 + m.b1126 <= 0)
m.e1781 = Constraint(expr= -m.b1115 + m.b1126 <= 0)
m.e1782 = Constraint(expr= -m.b1116 + m.b1126 <= 0)
m.e1783 = Constraint(expr= -m.b1113 + m.b1127 <= 0)
m.e1784 = Constraint(expr= -m.b1114 + m.b1127 <= 0)
m.e1785 = Constraint(expr= -m.b1116 + m.b1127 <= 0)
m.e1786 = Constraint(expr= -m.b1113 + m.b1128 <= 0)
m.e1787 = Constraint(expr= -m.b1114 + m.b1128 <= 0)
m.e1788 = Constraint(expr= -m.b1115 + m.b1128 <= 0)
m.e1789 = Constraint(expr= -m.b1130 + m.b1133 <= 0)
m.e1790 = Constraint(expr= -m.b1131 + m.b1133 <= 0)
m.e1791 = Constraint(expr= -m.b1132 + m.b1133 <= 0)
m.e1792 = Constraint(expr= -m.b1129 + m.b1134 <= 0)
m.e1793 = Constraint(expr= -m.b1131 + m.b1134 <= 0)
m.e1794 = Constraint(expr= -m.b1132 + m.b1134 <= 0)
m.e1795 = Constraint(expr= -m.b1129 + m.b1135 <= 0)
m.e1796 = Constraint(expr= -m.b1130 + m.b1135 <= 0)
m.e1797 = Constraint(expr= -m.b1132 + m.b1135 <= 0)
m.e1798 = Constraint(expr= -m.b1129 + m.b1136 <= 0)
m.e1799 = Constraint(expr= -m.b1130 + m.b1136 <= 0)
m.e1800 = Constraint(expr= -m.b1131 + m.b1136 <= 0)
m.e1801 = Constraint(expr= -m.b1130 + m.b1137 <= 0)
m.e1802 = Constraint(expr= -m.b1131 + m.b1137 <= 0)
m.e1803 = Constraint(expr= -m.b1132 + m.b1137 <= 0)
m.e1804 = Constraint(expr= -m.b1129 + m.b1138 <= 0)
m.e1805 = Constraint(expr= -m.b1131 + m.b1138 <= 0)
m.e1806 = Constraint(expr= -m.b1132 + m.b1138 <= 0)
m.e1807 = Constraint(expr= -m.b1129 + m.b1139 <= 0)
m.e1808 = Constraint(expr= -m.b1130 + m.b1139 <= 0)
m.e1809 = Constraint(expr= -m.b1132 + m.b1139 <= 0)
m.e1810 = Constraint(expr= -m.b1129 + m.b1140 <= 0)
m.e1811 = Constraint(expr= -m.b1130 + m.b1140 <= 0)
m.e1812 = Constraint(expr= -m.b1131 + m.b1140 <= 0)
m.e1813 = Constraint(expr= -m.b1130 + m.b1141 <= 0)
m.e1814 = Constraint(expr= -m.b1131 + m.b1141 <= 0)
m.e1815 = Constraint(expr= -m.b1132 + m.b1141 <= 0)
m.e1816 = Constraint(expr= -m.b1129 + m.b1142 <= 0)
m.e1817 = Constraint(expr= -m.b1131 + m.b1142 <= 0)
m.e1818 = Constraint(expr= -m.b1132 + m.b1142 <= 0)
m.e1819 = Constraint(expr= -m.b1129 + m.b1143 <= 0)
m.e1820 = Constraint(expr= -m.b1130 + m.b1143 <= 0)
m.e1821 = Constraint(expr= -m.b1132 + m.b1143 <= 0)
m.e1822 = Constraint(expr= -m.b1129 + m.b1144 <= 0)
m.e1823 = Constraint(expr= -m.b1130 + m.b1144 <= 0)
m.e1824 = Constraint(expr= -m.b1131 + m.b1144 <= 0)
m.e1825 = Constraint(expr= -m.b1146 + m.b1149 <= 0)
m.e1826 = Constraint(expr= -m.b1147 + m.b1149 <= 0)
m.e1827 = Constraint(expr= -m.b1148 + m.b1149 <= 0)
m.e1828 = Constraint(expr= -m.b1145 + m.b1150 <= 0)
m.e1829 = Constraint(expr= -m.b1147 + m.b1150 <= 0)
m.e1830 = Constraint(expr= -m.b1148 + m.b1150 <= 0)
m.e1831 = Constraint(expr= -m.b1145 + m.b1151 <= 0)
m.e1832 = Constraint(expr= -m.b1146 + m.b1151 <= 0)
m.e1833 = Constraint(expr= -m.b1148 + m.b1151 <= 0)
m.e1834 = Constraint(expr= -m.b1145 + m.b1152 <= 0)
m.e1835 = Constraint(expr= -m.b1146 + m.b1152 <= 0)
m.e1836 = Constraint(expr= -m.b1147 + m.b1152 <= 0)
m.e1837 = Constraint(expr= -m.b1146 + m.b1153 <= 0)
m.e1838 = Constraint(expr= -m.b1147 + m.b1153 <= 0)
m.e1839 = Constraint(expr= -m.b1148 + m.b1153 <= 0)
m.e1840 = Constraint(expr= -m.b1145 + m.b1154 <= 0)
m.e1841 = Constraint(expr= -m.b1147 + m.b1154 <= 0)
m.e1842 = Constraint(expr= -m.b1148 + m.b1154 <= 0)
m.e1843 = Constraint(expr= -m.b1145 + m.b1155 <= 0)
m.e1844 = Constraint(expr= -m.b1146 + m.b1155 <= 0)
m.e1845 = Constraint(expr= -m.b1148 + m.b1155 <= 0)
m.e1846 = Constraint(expr= -m.b1145 + m.b1156 <= 0)
m.e1847 = Constraint(expr= -m.b1146 + m.b1156 <= 0)
m.e1848 = Constraint(expr= -m.b1147 + m.b1156 <= 0)
m.e1849 = Constraint(expr= -m.b1146 + m.b1157 <= 0)
m.e1850 = Constraint(expr= -m.b1147 + m.b1157 <= 0)
m.e1851 = Constraint(expr= -m.b1148 + m.b1157 <= 0)
m.e1852 = Constraint(expr= -m.b1145 + m.b1158 <= 0)
m.e1853 = Constraint(expr= -m.b1147 + m.b1158 <= 0)
m.e1854 = Constraint(expr= -m.b1148 + m.b1158 <= 0)
m.e1855 = Constraint(expr= -m.b1145 + m.b1159 <= 0)
m.e1856 = Constraint(expr= -m.b1146 + m.b1159 <= 0)
m.e1857 = Constraint(expr= -m.b1148 + m.b1159 <= 0)
m.e1858 = Constraint(expr= -m.b1145 + m.b1160 <= 0)
m.e1859 = Constraint(expr= -m.b1146 + m.b1160 <= 0)
m.e1860 = Constraint(expr= -m.b1147 + m.b1160 <= 0)
m.e1861 = Constraint(expr= -m.b1162 + m.b1165 <= 0)
m.e1862 = Constraint(expr= -m.b1163 + m.b1165 <= 0)
m.e1863 = Constraint(expr= -m.b1164 + m.b1165 <= 0)
m.e1864 = Constraint(expr= -m.b1161 + m.b1166 <= 0)
m.e1865 = Constraint(expr= -m.b1163 + m.b1166 <= 0)
m.e1866 = Constraint(expr= -m.b1164 + m.b1166 <= 0)
m.e1867 = Constraint(expr= -m.b1161 + m.b1167 <= 0)
m.e1868 = Constraint(expr= -m.b1162 + m.b1167 <= 0)
m.e1869 = Constraint(expr= -m.b1164 + m.b1167 <= 0)
m.e1870 = Constraint(expr= -m.b1161 + m.b1168 <= 0)
m.e1871 = Constraint(expr= -m.b1162 + m.b1168 <= 0)
m.e1872 = Constraint(expr= -m.b1163 + m.b1168 <= 0)
m.e1873 = Constraint(expr= -m.b1162 + m.b1169 <= 0)
m.e1874 = Constraint(expr= -m.b1163 + m.b1169 <= 0)
m.e1875 = Constraint(expr= -m.b1164 + m.b1169 <= 0)
m.e1876 = Constraint(expr= -m.b1161 + m.b1170 <= 0)
m.e1877 = Constraint(expr= -m.b1163 + m.b1170 <= 0)
m.e1878 = Constraint(expr= -m.b1164 + m.b1170 <= 0)
m.e1879 = Constraint(expr= -m.b1161 + m.b1171 <= 0)
m.e1880 = Constraint(expr= -m.b1162 + m.b1171 <= 0)
m.e1881 = Constraint(expr= -m.b1164 + m.b1171 <= 0)
m.e1882 = Constraint(expr= -m.b1161 + m.b1172 <= 0)
m.e1883 = Constraint(expr= -m.b1162 + m.b1172 <= 0)
m.e1884 = Constraint(expr= -m.b1163 + m.b1172 <= 0)
m.e1885 = Constraint(expr= -m.b1162 + m.b1173 <= 0)
m.e1886 = Constraint(expr= -m.b1163 + m.b1173 <= 0)
m.e1887 = Constraint(expr= -m.b1164 + m.b1173 <= 0)
m.e1888 = Constraint(expr= -m.b1161 + m.b1174 <= 0)
m.e1889 = Constraint(expr= -m.b1163 + m.b1174 <= 0)
m.e1890 = Constraint(expr= -m.b1164 + m.b1174 <= 0)
m.e1891 = Constraint(expr= -m.b1161 + m.b1175 <= 0)
m.e1892 = Constraint(expr= -m.b1162 + m.b1175 <= 0)
m.e1893 = Constraint(expr= -m.b1164 + m.b1175 <= 0)
m.e1894 = Constraint(expr= -m.b1161 + m.b1176 <= 0)
m.e1895 = Constraint(expr= -m.b1162 + m.b1176 <= 0)
m.e1896 = Constraint(expr= -m.b1163 + m.b1176 <= 0)
m.e1897 = Constraint(expr= -m.b1178 + m.b1181 <= 0)
m.e1898 = Constraint(expr= -m.b1179 + m.b1181 <= 0)
m.e1899 = Constraint(expr= -m.b1180 + m.b1181 <= 0)
m.e1900 = Constraint(expr= -m.b1177 + m.b1182 <= 0)
m.e1901 = Constraint(expr= -m.b1179 + m.b1182 <= 0)
m.e1902 = Constraint(expr= -m.b1180 + m.b1182 <= 0)
m.e1903 = Constraint(expr= -m.b1177 + m.b1183 <= 0)
m.e1904 = Constraint(expr= -m.b1178 + m.b1183 <= 0)
m.e1905 = Constraint(expr= -m.b1180 + m.b1183 <= 0)
m.e1906 = Constraint(expr= -m.b1177 + m.b1184 <= 0)
m.e1907 = Constraint(expr= -m.b1178 + m.b1184 <= 0)
m.e1908 = Constraint(expr= -m.b1179 + m.b1184 <= 0)
m.e1909 = Constraint(expr= -m.b1178 + m.b1185 <= 0)
m.e1910 = Constraint(expr= -m.b1179 + m.b1185 <= 0)
m.e1911 = Constraint(expr= -m.b1180 + m.b1185 <= 0)
m.e1912 = Constraint(expr= -m.b1177 + m.b1186 <= 0)
m.e1913 = Constraint(expr= -m.b1179 + m.b1186 <= 0)
m.e1914 = Constraint(expr= -m.b1180 + m.b1186 <= 0)
m.e1915 = Constraint(expr= -m.b1177 + m.b1187 <= 0)
m.e1916 = Constraint(expr= -m.b1178 + m.b1187 <= 0)
m.e1917 = Constraint(expr= -m.b1180 + m.b1187 <= 0)
m.e1918 = Constraint(expr= -m.b1177 + m.b1188 <= 0)
m.e1919 = Constraint(expr= -m.b1178 + m.b1188 <= 0)
m.e1920 = Constraint(expr= -m.b1179 + m.b1188 <= 0)
m.e1921 = Constraint(expr= -m.b1178 | |
default=-1)
return retval
@property
def hsrp_priority(self):
## For API simplicity, I always assume there is only one hsrp
## group on the interface
if not self.has_ip_hsrp:
return 0 # Return this if there is no hsrp on the interface
retval = self.re_match_iter_typed(r'^\s*standby\s+(\d+\s+)*priority\s+(\d+)',
group=2, result_type=int, default=100)
return retval
@property
def hsrp_hello_timer(self):
## For API simplicity, I always assume there is only one hsrp
## group on the interface
retval = self.re_match_iter_typed(r'^\s*standby\s+(\d+\s+)*timers\s+(\d+)\s+\d+',
group=2, result_type=int, default=0)
return retval
@property
def hsrp_hold_timer(self):
## For API simplicity, I always assume there is only one hsrp
## group on the interface
retval = self.re_match_iter_typed(r'^\s*standby\s+(\d+\s+)*timers\s+\d+\s+(\d+)',
group=2, result_type=int, default=0)
return retval
@property
def has_hsrp_track(self):
return bool(self.hsrp_track)
@property
def hsrp_track(self):
## For API simplicity, I always assume there is only one hsrp
## group on the interface
retval = self.re_match_iter_typed(r'^\s*standby\s+(\d+\s+)*track\s(\S+.+?)\s+\d+\s*',
group=2, result_type=str, default='')
return retval
@property
def has_hsrp_usebia(self):
## For API simplicity, I always assume there is only one hsrp
## group on the interface
retval = self.re_match_iter_typed(r'^\s*standby\s+(\d+\s+)*(use-bia)',
group=2, result_type=bool, default=False)
return retval
@property
def has_hsrp_preempt(self):
## For API simplicity, I always assume there is only one hsrp
## group on the interface
retval = self.re_match_iter_typed(r'^\s*standby\s+(\d+\s+)*(use-bia)',
group=2, result_type=bool, default=False)
return retval
@property
def hsrp_authentication_md5_keychain(self):
## For API simplicity, I always assume there is only one hsrp
## group on the interface
retval = self.re_match_iter_typed(r'^\s*standby\s+(\d+\s+)*authentication\s+md5\s+key-chain\s+(\S+)',
group=2, result_type=str, default='')
return retval
@property
def has_hsrp_authentication_md5(self):
keychain = self.hsrp_authentication_md5_keychain
return bool(keychain)
@property
def hsrp_authentication_cleartext(self):
pass
##------------- MAC ACLs
@property
def has_mac_accessgroup_in(self):
if not self.is_switchport:
return False
return bool(self.mac_accessgroup_in)
@property
def has_mac_accessgroup_out(self):
if not self.is_switchport:
return False
return bool(self.mac_accessgroup_out)
@property
def mac_accessgroup_in(self):
retval = self.re_match_iter_typed(r'^\s*mac\saccess-group\s+(\S+)\s+in\s*$',
result_type=str, default='')
return retval
@property
def mac_accessgroup_out(self):
retval = self.re_match_iter_typed(r'^\s*mac\saccess-group\s+(\S+)\s+out\s*$',
result_type=str, default='')
return retval
##------------- IPv4 ACLs
@property
def has_ip_accessgroup_in(self):
return bool(self.ipv4_accessgroup_in)
@property
def has_ip_accessgroup_out(self):
return bool(self.ipv4_accessgroup_out)
@property
def has_ipv4_accessgroup_in(self):
return bool(self.ipv4_accessgroup_in)
@property
def has_ipv4_accessgroup_out(self):
return bool(self.ipv4_accessgroup_out)
@property
def ip_accessgroup_in(self):
return self.ipv4_accessgroup_in
@property
def ip_accessgroup_out(self):
return self.ipv4_accessgroup_out
@property
def ipv4_accessgroup_in(self):
retval = self.re_match_iter_typed(r'^\s*ip\saccess-group\s+(\S+)\s+in\s*$',
result_type=str, default='')
return retval
@property
def ipv4_accessgroup_out(self):
retval = self.re_match_iter_typed(r'^\s*ip\saccess-group\s+(\S+)\s+out\s*$',
result_type=str, default='')
return retval
##
##------------- IOS Interface Object
##
class IOSIntfLine(BaseIOSIntfLine):
def __init__(self, *args, **kwargs):
"""Accept an IOS line number and initialize family relationship
attributes
.. warning::
All :class:`~models_cisco.IOSIntfLine` methods are still considered beta-quality, until this notice is removed. The behavior of APIs on this object could change at any time.
"""
super(IOSIntfLine, self).__init__(*args, **kwargs)
@classmethod
def is_object_for(cls, line="", re=re):
intf_regex = r'^interface\s+(\S+.+)'
if re.search(intf_regex, line):
return True
return False
##
##------------- IOS Interface Globals
##
class IOSIntfGlobal(BaseCfgLine):
def __init__(self, *args, **kwargs):
super(IOSIntfGlobal, self).__init__(*args, **kwargs)
self.feature = 'interface global'
def __repr__(self):
return "<%s # %s '%s'>" % (self.classname, self.linenum,
self.text)
@classmethod
def is_object_for(cls, line="", re=re):
if re.search('^(no\s+cdp\s+run)|(logging\s+event\s+link-status\s+global)|(spanning-tree\sportfast\sdefault)|(spanning-tree\sportfast\sbpduguard\sdefault)', line):
return True
return False
@property
def has_cdp_disabled(self):
if self.re_search('^no\s+cdp\s+run\s*'):
return True
return False
@property
def has_intf_logging_def(self):
if self.re_search('^logging\s+event\s+link-status\s+global'):
return True
return False
@property
def has_stp_portfast_def(self):
if self.re_search('^spanning-tree\sportfast\sdefault'):
return True
return False
@property
def has_stp_portfast_bpduguard_def(self):
if self.re_search('^spanning-tree\sportfast\sbpduguard\sdefault'):
return True
return False
@property
def has_stp_mode_rapidpvst(self):
if self.re_search('^spanning-tree\smode\srapid-pvst'):
return True
return False
##
##------------- IOS Hostname Line
##
class IOSHostnameLine(BaseCfgLine):
def __init__(self, *args, **kwargs):
super(IOSHostnameLine, self).__init__(*args, **kwargs)
self.feature = 'hostname'
def __repr__(self):
return "<%s # %s '%s'>" % (self.classname, self.linenum,
self.hostname)
@classmethod
def is_object_for(cls, line="", re=re):
if re.search('^hostname', line):
return True
return False
@property
def hostname(self):
retval = self.re_match_typed(r'^hostname\s+(\S+)',
result_type=str, default='')
return retval
##
##------------- IOS Access Line
##
class IOSAccessLine(BaseCfgLine):
def __init__(self, *args, **kwargs):
super(IOSAccessLine, self).__init__(*args, **kwargs)
self.feature = 'access line'
def __repr__(self):
return "<%s # %s '%s' info: '%s'>" % (self.classname, self.linenum, self.name, self.range_str)
@classmethod
def is_object_for(cls, line="", re=re):
if re.search('^line', line):
return True
return False
@property
def is_accessline(self):
retval = self.re_match_typed(r'^(line\s+\S+)',
result_type=str, default='')
return bool(retval)
@property
def name(self):
retval = self.re_match_typed(r'^line\s+(\S+)',
result_type=str, default='')
# special case for IOS async lines: i.e. "line 33 48"
if re.search('\d+', retval):
return ''
return retval
def reset(self, atomic=True):
# Insert build_reset_string() before this line...
self.insert_before(self.build_reset_string(), atomic=atomic)
def build_reset_string(self):
# IOS interfaces are defaulted like this...
return "default " + self.text
@property
def range_str(self):
return ' '.join(map(str, self.line_range))
@property
def line_range(self):
## Return the access-line's numerical range as a list
## line con 0 => [0]
## line 33 48 => [33, 48]
retval = self.re_match_typed(r'([a-zA-Z]+\s+)*(\d+\s*\d*)$',
group=2, result_type=str, default='')
tmp = map(int, retval.strip().split())
return tmp
def manual_exectimeout_min(self):
tmp = self.parse_exectimeout
return tmp[0]
def manual_exectimeout_sec(self):
tmp = self.parse_exectimeout
if len(tmp>0):
return 0
return tmp[1]
@property
def parse_exectimeout(self):
retval = self.re_match_iter_typed(r'^\s*exec-timeout\s+(\d+\s*\d*)\s*$',
group=1, result_type=str, default='')
tmp = map(int, retval.strip().split())
return tmp
##
##------------- Base IOS Route line object
##
class BaseIOSRouteLine(BaseCfgLine):
def __init__(self, *args, **kwargs):
super(BaseIOSRouteLine, self).__init__(*args, **kwargs)
def __repr__(self):
return "<%s # %s '%s' info: '%s'>" % (self.classname, self.linenum, self.network_object, self.routeinfo)
@property
def routeinfo(self):
### Route information for the repr string
if self.tracking_object_name:
return self.nexthop_str+" AD: "+str(self.admin_distance)+" Track: "+self.tracking_object_name
else:
return self.nexthop_str+" AD: "+str(self.admin_distance)
@classmethod
def is_object_for(cls, line="", re=re):
return False
@property
def vrf(self):
raise NotImplementedError
@property
def address_family(self):
## ipv4, ipv6, etc
raise NotImplementedError
@property
def network(self):
raise NotImplementedError
@property
def netmask(self):
raise NotImplementedError
@property
def admin_distance(self):
raise NotImplementedError
@property
def nexthop_str(self):
raise NotImplementedError
@property
def tracking_object_name(self):
raise NotImplementedError
##
##------------- IOS Route line object
##
_RE_IP_ROUTE = re.compile(r"""^ip\s+route
(?:\s+(?:vrf\s+(?P<vrf>\S+)))? # VRF detection
\s+
(?P<prefix>\d+\.\d+\.\d+\.\d+) # Prefix detection
\s+
(?P<netmask>\d+\.\d+\.\d+\.\d+) # Netmask detection
(?:\s+(?P<nh_intf>[^\d]\S+))? # NH intf
(?:\s+(?P<nh_addr>\d+\.\d+\.\d+\.\d+))? # NH addr
(?:\s+(?P<dhcp>dhcp))? # DHCP keyword (FIXME: add unit test)
(?:\s+(?P<global>global))? # Global keyword
(?:\s+(?P<ad>\d+))? # Administrative distance
(?:\s+(?P<mcast>multicast))? # Multicast Keyword (FIXME: add unit test)
(?:\s+name\s+(?P<name>\S+))? # Route name
(?:\s+(?P<permanent>permanent))? # Permanent Keyword (exclusive of track)
(?:\s+track\s+(?P<track>\d+))? # Track object (exclusive of permanent)
(?:\s+tag\s+(?P<tag>\d+))? # Route tag
""", re.VERBOSE)
_RE_IPV6_ROUTE = re.compile(r"""^ipv6\s+route
(?:\s+vrf\s+(?P<vrf>\S+))?
(?:\s+(?P<prefix>{0})\/(?P<masklength>\d+)) # Prefix detection
(?:
(?:\s+(?P<nh_addr1>{1}))
|(?:\s+(?P<nh_intf>\S+(?:\s+\d\S*?\/\S+)?)(?:\s+(?P<nh_addr2>{2}))?)
)
(?:\s+nexthop-vrf\s+(?P<nexthop_vrf>\S+))?
(?:\s+(?P<ad>\d+))? # Administrative distance
(?:\s+(?:(?P<ucast>unicast)|(?P<mcast>multicast)))?
(?:\s+tag\s+(?P<tag>\d+))? # Route tag
""".format(_IPV6_REGEX_STR_COMPRESSED1, _IPV6_REGEX_STR_COMPRESSED2,
_IPV6_REGEX_STR_COMPRESSED3), re.VERBOSE)
class IOSRouteLine(BaseIOSRouteLine):
def __init__(self, *args, **kwargs):
super(IOSRouteLine, self).__init__(*args, **kwargs)
if 'ipv6' in self.text[0:4]:
self.feature = 'ipv6 route'
self._address_family = "ipv6"
mm = _RE_IPV6_ROUTE.search(self.text)
if not (mm is None):
self.route_info = mm.groupdict()
else:
raise ValueError("Could not parse '{0}'".format(self.text))
else:
self.feature = 'ip route'
self._address_family = "ip"
mm = _RE_IP_ROUTE.search(self.text)
if not (mm is None):
self.route_info = mm.groupdict()
else:
raise ValueError("Could not parse '{0}'".format(self.text))
@classmethod
def is_object_for(cls, line="", re=re):
if (line[0:8]=='ip route') or (line[0:10]=='ipv6 route'):
return True
return False
@property
def vrf(self):
if not (self.route_info['vrf'] is None):
return self.route_info['vrf']
else:
return ''
@property
def address_family(self):
## ipv4, ipv6, etc
return self._address_family
@property
def network(self):
if self._address_family=='ip':
return self.route_info['prefix']
elif self._address_family=='ipv6':
retval = self.re_match_typed(r'^ipv6\s+route\s+(vrf\s+)*(\S+?)\/\d+',
group=2, result_type=str, default='')
return retval
@property
def netmask(self):
if self._address_family=='ip':
return self.route_info['netmask']
elif self._address_family=='ipv6':
return str(self.network_object.netmask)
return retval
@property
def masklen(self):
if self._address_family=='ip':
return self.network_object.prefixlen
elif self._address_family=='ipv6':
masklen_str = self.route_info['masklength'] or '128'
return int(masklen_str)
@property
def network_object(self):
try:
if self._address_family=='ip':
return IPv4Obj('%s/%s' % (self.network, self.netmask),
strict=False)
elif self._address_family=='ipv6':
return IPv6Obj('%s/%s' % (self.network, self.masklen))
except:
return None
@property
def nexthop_str(self):
if self._address_family=='ip':
if self.next_hop_interface:
return self.next_hop_interface + " " + self.next_hop_addr
else:
return self.next_hop_addr
elif self._address_family=='ipv6':
retval = self.re_match_typed(r'^ipv6\s+route\s+(vrf\s+)*\S+\s+(\S+)',
group=2, result_type=str, default='')
return retval
@property
def next_hop_interface(self):
if self._address_family=='ip':
if self.route_info['nh_intf']:
return self.route_info['nh_intf']
else:
return ''
elif self._address_family=='ipv6':
if self.route_info['nh_intf']:
return self.route_info['nh_intf']
else:
return ''
@property
def next_hop_addr(self):
if self._address_family=='ip':
return self.route_info['nh_addr'] or ''
elif self._address_family=='ipv6':
return self.route_info['nh_addr1'] or self.route_info['nh_addr2'] \
or ''
@property
def global_next_hop(self):
if self._address_family=='ip' and bool(self.vrf):
return bool(self.route_info['global'])
elif self._address_family=='ip' and not bool(self.vrf):
return True
elif self._address_family=='ipv6':
## ipv6 uses nexthop_vrf
raise ValueError("[FATAL] ipv6 doesn't support a global_next_hop for '{0}'".format(self.text))
else:
raise ValueError("[FATAL] Could not identify global next-hop for '{0}'".format(self.text))
@property
def nexthop_vrf(self):
if self._address_family=='ipv6':
return self.route_info['nexthop_vrf'] or ''
else:
raise ValueError("[FATAL] ip doesn't support a global_next_hop for '{0}'".format(self.text))
@property
def admin_distance(self):
if self.route_info['ad']:
return int(self.route_info['ad'])
else:
return 1
@property
def multicast(self):
"""Return whether the multicast keyword was specified"""
return bool(self.route_info['mcast'])
@property
def unicast(self):
## FIXME It's unclear how to implement this...
raise NotImplementedError
@property
def route_name(self):
if self._address_family=='ip':
if self.route_info['name']:
return self.route_info['name']
else:
return ''
elif self._address_family=='ipv6':
raise NotImplementedError
@property
def permanent(self):
if self._address_family=='ip':
if self.route_info['permanent']:
return bool(self.route_info['permanent'])
else:
return False
elif self._address_family=='ipv6':
raise NotImplementedError
@property
def tracking_object_name(self):
if self._address_family=='ip':
if bool(self.route_info['track']):
return self.route_info['track']
else:
return ''
elif self._address_family=='ipv6':
raise NotImplementedError
@property
def tag(self):
return self.route_info['tag'] or ''
################################
################################ Groups ###############################
################################
##
##------------- IOS TACACS+ Group
##
class IOSAaaGroupServerLine(BaseCfgLine):
def __init__(self, *args, **kwargs):
super(IOSAaaGroupServerLine, self).__init__(*args, **kwargs)
self.feature = 'aaa | |
<gh_stars>0
# coding=utf-8
from django.test import TestCase
import json
# Create your tests here.
from django.test import override_settings
@override_settings(DEBUG=True)
class TestSimpleCreate(TestCase):
@classmethod
def setUpTestData(cls):
# Set up data for the whole TestCase
cls.user_from = {}
cls.user_from["first_name"] = "Manuel"
cls.user_from["last_name"] = "Gonzalez"
cls.user_from["username"] = "manuggz"
cls.user_from["id"] = "109518141"
cls.chat = {}
cls.chat["first_name"] = "Manuel"
cls.chat["last_name"] = "Gonzalez"
cls.chat["username"] = "manuggz"
cls.chat["type"] = "private"
cls.chat["id"] = "109518141"
cls.consulta = {u'message': {u'text': "", u'from': cls.user_from, u'chat': cls.chat, u'message_id': 905475,
u'date': 1475391962}, u'update_id': 25256647, u'debug': True}
def test_create_sin_imagen_anterior(self):
self.consulta[u'message'][u'text'] = u"/create"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create_space(self):
self.consulta[u'message'][u'text'] = u"/create "
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_space_create(self):
self.consulta[u'message'][u'text'] = u" /create"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_space_create_space(self):
self.consulta[u'message'][u'text'] = u" /create "
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_spacex100_create_spacex100(self):
self.consulta[u'message'][u'text'] = " " * 100 + u"/create " + " " * 100
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_spacex100_create_spacex100_create(self):
self.consulta[u'message'][u'text'] = " " * 100 + u"/create " + " " * 100 + " create"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_spacex100_create_spacex100_create_space(self):
self.consulta[u'message'][u'text'] = " " * 100 + u"/create " + " " * 100 + " create "
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_spacex100_create_spacex100_yao_spacex100(self):
self.consulta[u'message'][u'text'] = " " * 100 + u"/create " + " " * 100 + " yao " + " " * 100
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_spacex100_create_spacex100_create_spacex100_create(self):
self.consulta[u'message'][u'text'] = " " * 100 + u"/create " + " " * 100 + " create " + " " * 100 + " create"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_create_forever_alone(self):
self.consulta[u'message'][u'text'] = u"/create Forever Alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_create_no_existe(self):
self.consulta[u'message'][u'text'] = u"/create 123123123123189876761009123781238712989912992"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_double_create(self):
self.consulta[u'message'][u'text'] = u"/create /create"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
# ---------- Todos los anteriores no utilizan imagen anterior
def test_simple_create_im_alone(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create Im alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create_with_anterior(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create_im_alone_but_with_my_dog(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create Im alone - but with my dog"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create_im_alone_but_with_my_dog_separated(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create Im alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create Im alone - but with my dog"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create_im_alone_but_with_my_dog_separated_but_with_my_dog_separated_(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create Im alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create Im alone - but with my dog - but with my dog"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create_im_alone_(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create Im alone-"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create__im_alone(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create -Im alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create__im_alone_(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create -Im alone-"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create__especiales(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create @!#$%&/(()~¨--.,ñ{ñ{ñ@@!-!AQW@ł@ł"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create_im_alone_black(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create Im Alone, Black"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create_im_alone_mal_color(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create Im Alone, PsyCongree"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create_im_alone__mal_color(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create Im Alone-, PsyCongree"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create__im_alone__mal_color(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create -Im Alone-, PsyCongree"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create__im_alone_mal_color(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create -Im Alone, PsyCongree"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create_im_alone__buen_color(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create Im Alone-, Red"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create__im_alone__buen_color(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create -Im Alone-, Red"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create__im_alone_buen_color(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create -Im Alone, Red"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create__im_alone_but_buen_color(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
self.consulta[u'message'][u'text'] = u"/create -Im Alone-but, Red"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", secure=True)
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
def test_simple_create__im_alone__but_buen_color(self):
self.consulta[u'message'][u'text'] = u"forever alone"
response = self.client.post('/BotTelegram/119646075:AAFsQGgw8IaLwvRZX-IBO9mgV3k048NpuMg/',
json.dumps(self.consulta), content_type="text/json", | |
<gh_stars>1-10
import torch
import os
import argparse
from dataset import DataLoader
from utils import MidtoCorner, IoU
from collections import Counter
from model import YOLOv1
# Argparse to start the YOLO training
ap = argparse.ArgumentParser()
ap.add_argument("-tip", "--test_img_files_path", default="bdd100k/images/100k/val/",
help="path to the test image folder")
ap.add_argument("-ttp", "--test_target_files_path",
default="bdd100k_labels_release/bdd100k/labels/det_v2_val_release.json",
help="path to json file containing the test labels")
ap.add_argument("-bs", "--batch_size", default=10, help="batch size")
ap.add_argument("-ls", "--load_size", default=1000,
help="amount of batches which are being loaded in one take")
ap.add_argument("-nb", "--number_boxes", default=2,
help="amount of bounding boxes which should be predicted")
ap.add_argument("-lc", "--lambda_coord", default=5,
help="hyperparameter penalizeing predicted bounding boxes in the loss function")
ap.add_argument("-ln", "--lambda_noobj", default=0.5,
help="hyperparameter penalizeing prediction confidence scores in the loss function")
ap.add_argument("-lmf", "--load_model_file", default="YOLO_bdd100k.pt",
help="name of the file containing the model weights")
ap.add_argument("-ioutn", "--iou_threshold_nms", default=0.9,
help="threshold for the IoU between the predicted boxes and the ground-truth boxes for NMS")
ap.add_argument("-ioutm", "--iou_threshold_map", default=0.7,
help="threshold for the IoU between the predicted boxes and the ground-truth boxes for mAP")
ap.add_argument("-t", "--threshold", default=0.5,
help="threshold for the confidence score of predicted bounding boxes")
ap.add_argument("-unms", "--use_nms", default=1,
help="1 if non max suppression should be used, else 0")
args = ap.parse_args()
# Dataset parameters
test_img_files_path = args.test_img_files_path
test_target_files_path = args.test_target_files_path
category_list = ["other vehicle", "pedestrian", "traffic light", "traffic sign",
"truck", "train", "other person", "bus", "car", "rider", "motorcycle",
"bicycle", "trailer"]
# Hyperparameters
batch_size = int(args.batch_size)
load_size = int(args.load_size)
split_size = 14
num_boxes = int(args.number_boxes)
lambda_coord = float(args.lambda_coord)
lambda_noobj = float(args.lambda_noobj)
iou_threshold_nms = float(args.iou_threshold_nms)
iou_threshold_map = float(args.iou_threshold_map)
threshold = float(args.threshold)
use_nms = int(args.use_nms)
# Other parameters
cell_dim = int(448/split_size)
num_classes = len(category_list)
load_model_file = args.load_model_file
def validate(test_img_files_path, test_target_files_path, category_list, split_size,
batch_size, load_size, model, cell_dim, num_boxes, num_classes, device,
iou_threshold_nms, iou_threshold_map, threshold, use_nms):
"""
Uses the test dataset to validate the performance of the model. Calculates
the mean Average Precision (mAP) for object detection.
Parameters:
test_img_files_path (str): System path to the image directory containing
the test dataset images.
test_target_files_path (str): System path to the json file containing the
ground-truth labels for the test dataset.
category_list (list): A list containing all classes which should be detected.
split_size (int): Size of the grid which is applied to the image.
batch_size (int): Batch size.
load_size (int): Amount of batches which are loaded in one function call.
model (): The YOLOv1-model.
cell_dim (int): The dimension of a single cell.
num_boxes (int): Amount of bounding boxes which are being predicted by
the model.
num_classes (int): Amount of classes which are being predicted.
device (): Device which is used for training and testing the model.
iou_threshold_nms (float): Threshold for the IoU between the predicted boxes
and the ground-truth boxes for non maximum suppression.
iou_threshold_map (float): Threshold for the IoU between the predicted boxes
and the ground-truth boxes for mean average precision.
threshold (float): Threshold for the confidence score of predicted
bounding boxes.
use_nms (bool): Specifies if non max suppression should be applied to the
bounding box predictions.
"""
model.eval()
print("DATA IS BEING LOADED FOR VALIDATION")
print("")
# Initialize the DataLoader for the test dataset
data = DataLoader(test_img_files_path, test_target_files_path, category_list,
split_size, batch_size, load_size)
data.LoadFiles()
# Here will all predicted and ground-truth bounding boxes for the whole test
# dataset be stored. These two lists will be finally used for evaluation.
# Every element of the list will have the following form:
# [image index, class prediction, confidence score, x1, y1, x2, y2]
# Every element of the list represents a single bounding box.
all_pred_boxes = []
all_target_boxes = []
train_idx = 0 # Tracks the sample index for each image in the test dataset
# This while loop is used to fill the two lists all_pred_boxes and all_target_boxes.
while len(data.img_files) > 0:
print("LOADING NEW VALIDATION BATCHES")
print("Remaining validation files:" + str(len(data.img_files)))
print("")
data.LoadData()
for batch_idx, (img_data, target_data) in enumerate(data.data):
img_data = img_data.to(device)
target_data = target_data.to(device)
with torch.no_grad():
predictions = model(img_data)
print('Extracting bounding boxes')
print('Batch: {}/{} ({:.0f}%)'.format(batch_idx+1, len(data.data),
(batch_idx+1) / len(data.data) * 100.))
print('')
pred_boxes = extract_boxes(predictions, num_classes, num_boxes,
cell_dim, threshold)
target_boxes = extract_boxes(target_data, num_classes, 1, cell_dim,
threshold)
for sample_idx in range(len(pred_boxes)):
if use_nms:
# Applies non max suppression to the bounding box predictions
nms_boxes = non_max_suppression(pred_boxes[sample_idx],
iou_threshold_nms)
else:
# Use the same list without changing anything
nms_boxes = pred_boxes[sample_idx]
for nms_box in nms_boxes:
all_pred_boxes.append([train_idx] + nms_box)
for box in target_boxes[sample_idx]:
all_target_boxes.append([train_idx] + box)
train_idx += 1
pred = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:0, 12:0, 13:0}
for prediction in all_pred_boxes:
cls_idx = prediction[1]
pred[cls_idx] += 1
print(pred)
pred = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:0, 12:0, 13:0}
for prediction in all_target_boxes:
cls_idx = prediction[1]
pred[cls_idx] += 1
print(pred)
"""
print('')
print('##################### Calculating mAP #####################')
print('')
mean_avg_prec = mean_average_precision(all_pred_boxes, all_target_boxes,
iou_threshold_map, box_format="corner",
num_classes = num_classes)
#print(f"Train mAP: {mean_avg_prec}")
"""
def extract_boxes(yolo_tensor, num_classes, num_boxes, cell_dim, threshold):
"""
Extracts all bounding boxes from a given tensor and transforms them into a list.
Removes all bounding boxes which have a confidence score smaller than the
specified threshold.
Parameters:
yolo_tensor (tensor): The tensor from which the bounding boxes need to
be extracted.
num_classes (int): Amount of classes which are being predicted.
num_boxes (int): Amount of bounding boxes which are being predicted.
cell_dim (int): Dimension of a single cell.
threshold (float): Threshold for the confidence score of predicted
bounding boxes.
Returns:
all_bboxes (list): A list where each element is a list representing one
image from the batch. This inner list contains other lists which represent
the bounding boxes within this image.
The box lists are specified as [class_pred, conf_score, x1, y1, x2, y2]
"""
all_bboxes = [] # Stores the final output
for sample_idx in range(yolo_tensor.shape[0]):
bboxes = [] # Stores all bounding boxes of a single image
for cell_h in range(yolo_tensor.shape[1]):
for cell_w in range(yolo_tensor.shape[2]):
# Used to extract the bounding box with the highest confidence
best_box = 0
max_conf = 0.
for box_idx in range(num_boxes):
if yolo_tensor[sample_idx, cell_h, cell_w, box_idx*5] > max_conf:
max_conf = yolo_tensor[sample_idx, cell_h, cell_w, box_idx*5]
best_box = box_idx
conf = yolo_tensor[sample_idx, cell_h, cell_w, best_box*5]
if conf < threshold:
continue
# Used to extract the class with the highest score
best_class = 0
max_conf = 0.
for class_idx in range(num_classes):
if yolo_tensor[sample_idx, cell_h, cell_w, num_boxes*5+class_idx] > max_conf:
max_conf = yolo_tensor[sample_idx, cell_h, cell_w, num_boxes*5+class_idx]
best_class = class_idx
cords = MidtoCorner(yolo_tensor[sample_idx, cell_h, cell_w,
best_box*5+1:best_box*5+5], cell_h, cell_w, cell_dim)
x1 = cords[0]
y1 = cords[1]
x2 = cords[2]
y2 = cords[3]
bboxes.append([best_class, conf, x1, y1, x2, y2])
all_bboxes.append(bboxes)
return all_bboxes
def non_max_suppression(bboxes, iou_threshold):
"""
Applies non maximum suppression to a list of bounding boxes.
Parameters:
bboxes (list): List of lists containing all bboxes with each bboxes
specified as [class_pred, conf_score, x1, y1, x2, y2].
iou_threshold (float): Threshold for the IOU with the ground truth bbox.
Returns:
bboxes_after_nms (list): bboxes after performing NMS given a specific
IoU threshold.
"""
assert type(bboxes) == list
bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)
bboxes_after_nms = []
while bboxes:
chosen_box = bboxes.pop(0)
bboxes = [
box
for box in bboxes
if box[0] != chosen_box[0]
or IoU(
chosen_box[2:],
box[2:]
)
< iou_threshold
]
bboxes_after_nms.append(chosen_box)
return bboxes_after_nms
def mean_average_precision(pred_boxes, true_boxes, iou_threshold, box_format,
num_classes):
"""
Calculates the mean average precision.
Parameters:
pred_boxes (list): List of lists containing all predicted bboxes with
each bbox specified as [train_idx, class_prediction, prob_score, x1, y1, x2, y2].
true_boxes (list): List of lists containing all ground truth bboxes with
each bbox specified as [train_idx, class_prediction, prob_score, x1, y1, x2, y2].
iou_threshold (float): Threshold for counting a predicted bbox as true positive.
box_format (str): "midpoint" or "corners" used to specify the bbox format.
num_classes (int): Number of classes.
Returns:
float: mAP value across all classes given a specific IoU threshold .
"""
# list storing all AP | |
설명
초기 코드에 주어진 class Node 와 class BinSearchTree 를 기반으로, 이진 탐색 트리 (binary search tree) 에 새로운 원소를 삽입하는 insert(key, data) 연산의 구현을 완성하세요.
class BinSearchTree 에는 이미 insert(key, data) 메서드가 구현되어 있습니다. 이것을 그대로 이용하고, class Node 의 insert(key, data) 메서드를 재귀적 방법으로 구현하세요. 강의에서 언급한 바와 같이, 이미 트리 안에 들어 있는 것과 같은 (중복된) 키를 이용하여 삽입을 시도하는 경우에는 KeyError 예외를 발생시켜야 합니다.
[참고 1] inorder() 메서드의 구현은 그대로 두세요. 테스트에 이용됩니다.
[참고 2] solution() 함수의 구현도 그대로 두세요. 이것을 없애면 테스트가 되지 않습니다.
[참고 3] "코드 실행" 을 눌렀을 때 통과하는 것은 아무런 의미가 없습니다.
'''
class Node:
def __init__(self, key, data):
self.key = key
self.data = data
self.left = None
self.right = None
# 나의 풀이
def insert(self, key, data):
if key < self.key:
if self.left:
self.left.insert(key,data)
else:
self.left = Node(key,data)
elif key > self.key:
if self.right:
self.right.insert(key,data)
else:
self.right = Node(key,data)
else:
raise KeyError("")
def inorder(self):
traversal = []
if self.left:
traversal += self.left.inorder()
traversal.append(self)
if self.right:
traversal += self.right.inorder()
return traversal
class BinSearchTree:
def __init__(self):
self.root = None
def insert(self, key, data):
if self.root:
self.root.insert(key, data)
else:
self.root = Node(key, data)
def inorder(self):
if self.root:
return self.root.inorder()
else:
return []
def solution(x):
return 0
'''
(21) 이진 탐색 트리에서 노드의 삭제 연산 구현
문제 설명
초기 코드에 주어진 class Node 와 class BinSearchTree 를 기반으로, 이진 탐색 트리 (binary search tree) 에서 지정된 원소를 삭제하는 remove(key) 연산의 구현을 완성하세요.
class Node 와 class BinSearchTree 에 이미 구현되어 있는 코드는 수정하지 마세요. 코드 구현의 정확성 평가에 이용됩니다. 초기 코드에 들어 있는 주석을 참고로 하여, BinSearchTree::remove() 메서드의 안에 들어 있는 pass 를 없애고 그 자리에 올바른 코드를 써 넣으면 됩니다.
[참고 1] solution() 함수의 구현은 그대로 두세요. 이것을 없애면 테스트가 되지 않습니다.
[참고 2] "코드 실행" 을 눌렀을 때 통과하는 것은 아무런 의미가 없습니다.
[참고 3] 잘 생각해 보면, 이진 탐색 트리를 구현하지 않고 키 순서대로 정렬된 Python 의 배열을 유지함으로써도 같은 연산을 구현할 수 있습니다.
이 연습문제에서는 효율성 테스트를 하지 않기 때문에 이러한 구현을 오답으로 간주하지 않습니다만, 배열을 이용한 구현과 트리 구조를 이용한 구현은 연산의 복잡도에 큰 차이가 있습니다.
이진 탐색 트리로 구현하여 코드 작성 연습을 하시기 바랍니다.
'''
class Node:
def __init__(self, key, data):
self.key = key
self.data = data
self.left = None
self.right = None
def insert(self, key, data):
if key < self.key:
if self.left:
self.left.insert(key, data)
else:
self.left = Node(key, data)
elif key > self.key:
if self.right:
self.right.insert(key, data)
else:
self.right = Node(key, data)
else:
raise KeyError('Key %s already exists.' % key)
def lookup(self, key, parent=None):
if key < self.key:
if self.left:
return self.left.lookup(key, self)
else:
return None, None
elif key > self.key:
if self.right:
return self.right.lookup(key, self)
else:
return None, None
else:
return self, parent
def inorder(self):
traversal = []
if self.left:
traversal += self.left.inorder()
traversal.append(self)
if self.right:
traversal += self.right.inorder()
return traversal
def countChildren(self):
count = 0
if self.left:
count += 1
if self.right:
count += 1
return count
class BinSearchTree:
def __init__(self):
self.root = None
def insert(self, key, data):
if self.root:
self.root.insert(key, data)
else:
self.root = Node(key, data)
def lookup(self, key):
if self.root:
return self.root.lookup(key)
else:
return None, None
def remove(self, key):
node, parent = self.lookup(key)
if node:
nChildren = node.countChildren()
# The simplest case of no children
if nChildren == 0:
# 만약 parent 가 있으면
if parent:
if parent.left.key == node.key:
parent.left = None
else:
parent.right = None
# node 가 왼쪽 자식인지 오른쪽 자식인지 판단하여
# parent.left 또는 parent.right 를 None 으로 하여
# leaf node 였던 자식을 트리에서 끊어내어 없앱니다.
# 만약 parent 가 없으면 (node 는 root 인 경우)
# self.root 를 None 으로 하여 빈 트리로 만듭니다.
else:
self.root = None
# When the node has only one child
elif nChildren == 1:
# 하나 있는 자식이 왼쪽인지 오른쪽인지를 판단하여
# 그 자식을 어떤 변수가 가리키도록 합니다.
if node.left:
s = node.left
else:
s = node.right
# 만약 parent 가 있으면
# node 가 왼쪽 자식인지 오른쪽 자식인지 판단하여
# 위에서 가리킨 자식을 대신 node 의 자리에 넣습니다.
if parent:
if parent.left.key == node.key:
parent.left = s
s = None
else:
parent.right = s
s = None
# 만약 parent 가 없으면 (node 는 root 인 경우)
# self.root 에 위에서 가리킨 자식을 대신 넣습니다.
else:
self.root = s
# When the node has both left and right children
else:
# 1차 풀이(오답)
parent = node.right
s = parent.left
while s.left:
parent = s
s = s.left
node.key = s.key
node.data = s.data
if s.right:
parent.left = s.right
if s.countChilde() == 0:
parent.left = None
'''
테스트 1 〉 통과 (0.07ms, 16.8MB)
테스트 2 〉 통과 (0.04ms, 16.7MB)
테스트 3 〉 통과 (0.07ms, 16.7MB)
테스트 4 〉 통과 (0.06ms, 16.7MB)
테스트 5 〉 실패 (런타임 에러)
테스트 6 〉 실패 (런타임 에러)
테스트 7 〉 통과 (0.06ms, 16.9MB)
테스트 8 〉 실패 (런타임 에러)
테스트 9 〉 실패 (런타임 에러)
'''
# 2차 풀이(정답)
s = node.right
parent = node
while s.left:
parent = s
s = parent.left
node.key = s.key
node.data = s.data
if parent.right == s: # node 가 곧 parent인 경우, s의 child가 left 가 없는 경우
parent.right = s.right
if parent.left == s:
parent.left = s.right
'''
테스트 1 〉 통과 (0.13ms, 16.8MB)
테스트 2 〉 통과 (0.04ms, 16.7MB)
테스트 3 〉 통과 (0.07ms, 16.7MB)
테스트 4 〉 통과 (0.06ms, 16.8MB)
테스트 5 〉 통과 (0.06ms, 16.8MB)
테스트 6 〉 통과 (0.06ms, 16.7MB)
테스트 7 〉 통과 (0.06ms, 16.8MB)
테스트 8 〉 통과 (0.06ms, 16.7MB)
테스트 9 〉 통과 (0.07ms, 16.7MB)
'''
return True
else:
return False
def inorder(self):
if self.root:
return self.root.inorder()
else:
return []
def solution(x):
return 0
'''
(22) 최대 힙에 새로운 원소 삽입
문제 설명
초기 코드에 주어진 class MaxHeap 에 최대 힙에 새로운 원소를 추가하는 연산인 insert() 메서드의 구현을 완성하세요.
[참고 1] solution() 함수의 구현은 그대로 두세요. 이것을 없애면 테스트가 되지 않습니다.
[참고 2] "코드 실행" 을 눌렀을 때 통과하는 것은 아무런 의미가 없습니다.
'''
class MaxHeap:
def __init__(self):
self.data = [None]
def insert(self, item):
# 나의 풀이
a = self.data
a.append(item)
itemIndex = a.index(item)
while itemIndex != 1:
parentIndex = itemIndex // 2
if a[itemIndex] > a[parentIndex]:
a[itemIndex], a[parentIndex] = a[parentIndex], a[itemIndex]
itemIndex, parentIndex = itemIndex, parentIndex
itemIndex = itemIndex // 2
else:
break
'''
테스트 1 〉 통과 (0.06ms, 16.5MB)
'''
# 다른 풀이
self.data.append(item)
i = len(self.data) - 1
while i != 1:
if self.data[i] > self.data[(i // 2)]:
self.data[i], self.data[(i // 2)] = self.data[(i // 2)], self.data[i]
i = i // 2
else:
break
def solution(x):
return 0
'''
(23) 최대 힙에서의 원소 삭제
문제 설명
초기 코드에 여기 저기 포함된 빈 칸을 채움으로써 class MaxHeap 의 메서드인 maxHeapify() 의 구현을 완성하세요. 이것은 이미 주어져 있는 remove() 메서드와 연결되어 최대 힙에서의 원소 삭제 연산을 구성합니다.
[참고 1] remove() 메서드의 내용은 이미 주어져 있으므로 수정하지 않는 쪽이 좋습니다.
[참고 2] solution() 함수의 구현은 그대로 두세요. 이것을 없애면 테스트가 되지 않습니다.
[참고 3] "코드 실행" 을 눌렀을 때 통과하는 것은 아무런 의미가 없습니다.
'''
class MaxHeap:
def __init__(self):
self.data = [None]
def remove(self):
if len(self.data) > 1:
self.data[1], self.data[-1] = self.data[-1], self.data[1]
data = self.data.pop(-1)
self.maxHeapify(1)
else:
data = None
return data
# 나의 풀이
def maxHeapify(self, i):
# 왼쪽 자식 (left child) 의 인덱스를 계산합니다.
left = 2*i
# 오른쪽 자식 (right child) 의 인덱스를 계산합니다.
right = 2*i +1
smallest = i
# 왼쪽 자식이 존재하는지, 그리고 왼쪽 자식의 (키) 값이 (무엇보다?) 더 큰지를 판단합니다.
if left < len(self.data) and self.data[left] > self.data[i]:
# 조건이 만족하는 경우, smallest 는 왼쪽 자식의 인덱스를 가집니다.
smallest = left
# 오른쪽 자식이 존재하는지, 그리고 오른쪽 자식의 (키) 값이 (무엇보다?) 더 큰지를 판단합니다.
if right < len(self.data) and self.data[right] > self.data[smallest]:
#### ⭐️⭐️ i, right, left 셋 중 최대값(smallest)을 찾는 것인데 처음에 self.data[right] > self.data[i]로 작성했다가 엄청 해맸다...
## 출제자 분이 최대값의 변수 명을 great가 아닌 smallest로 지정해놓으셔서 더 헷갈린 것도 있었던 것 같다.
# 조건이 만족하는 경우, smallest 는 오른쪽 자식의 인덱스를 가집니다.
smallest = right
if smallest != i:
# 현재 | |
import requests
import pandas as pd
import numpy as np
import configparser
from datetime import datetime
from dateutil import relativedelta, parser, rrule
from dateutil.rrule import WEEKLY
class WhoopClient:
'''A class to allow a user to login and store their authorization code,
then perform pulls using the code in order to access different types of data'''
def __init__(self,
auth_code=None,
whoop_id=None,
current_datetime=datetime.utcnow()):
self.auth_code = auth_code
self.whoop_id = whoop_id
self.current_datetime = current_datetime
self.start_datetime = None
self.all_data = None
self.all_activities = None
self.sport_dict = None
self.all_sleep = None
self.all_sleep_events = None
def reset(self):
self.auth_code = None
self.whoop_id = None
self.current_datetime = datetime.utcnow()
self.start_datetime = None
self.all_data = None
self.all_activities = None
self.sport_dict = None
self.all_sleep = None
self.all_sleep_events = None
def pull_api(self, url, df=False):
auth_code = self.auth_code
headers = {'authorization': auth_code}
pull = requests.get(url, headers=headers)
if pull.status_code == 200 and len(pull.content) > 1:
if df:
d = pd.json_normalize(pull.json())
return d
else:
return pull.json()
else:
return "no response"
def pull_sleep_main(self, sleep_id):
athlete_id = self.whoop_id
sleep = self.pull_api(
'https://api-7.whoop.com/users/{}/sleeps/{}'.format(
athlete_id, sleep_id))
main_df = pd.json_normalize(sleep)
return main_df
def pull_sleep_events(self, sleep_id):
athlete_id = self.whoop_id
sleep = self.pull_api(
'https://api-7.whoop.com/users/{}/sleeps/{}'.format(
athlete_id, sleep_id))
events_df = pd.json_normalize(sleep['events'])
events_df['id'] = sleep_id
return events_df
def get_authorization(self, user_ini):
'''
Function to get the authorization token and user id.
This must be completed before a user can query the api
'''
config = configparser.ConfigParser()
config.read(user_ini)
username = config['whoop']['username']
password = config['whoop']['password']
headers = {
"username": username,
"password": password,
"grant_type": "password",
"issueRefresh": False
}
auth = requests.post("https://api-7.whoop.com/oauth/token",
json=headers)
if auth.status_code == 200:
content = auth.json()
user_id = content['user']['id']
token = content['access_token']
start_time = content['user']['profile']['createdAt']
self.whoop_id = user_id
self.auth_code = 'bearer ' + token
self.start_datetime = start_time
print("Whoop: Authentication successful")
else:
print(
"Authentication failed - please double check your credentials")
def get_keydata_all(self):
'''
This function returns a dataframe of WHOOP metrics for each day of WHOOP membership.
In the resulting dataframe, each day is a row and contains strain, recovery, and sleep information
'''
if self.start_datetime:
if self.all_data is not None:
## All data already pulled
return self.all_data
else:
start_date = parser.isoparse(
self.start_datetime).replace(tzinfo=None)
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=self.current_datetime,
dtstart=start_date)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d +
relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals]
all_data = pd.DataFrame()
for dates in date_range:
cycle_url = 'https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(
self.whoop_id, dates[1], dates[0])
data = self.pull_api(cycle_url, df=True)
all_data = pd.concat([all_data, data])
all_data.reset_index(drop=True, inplace=True)
## fixing the day column so it's not a list
all_data['days'] = all_data['days'].map(lambda d: d[0])
all_data.rename(columns={"days": 'day'}, inplace=True)
## Putting all time into minutes instead of milliseconds
sleep_cols = [
'qualityDuration', 'needBreakdown.baseline',
'needBreakdown.debt', 'needBreakdown.naps',
'needBreakdown.strain', 'needBreakdown.total'
]
for sleep_col in sleep_cols:
all_data['sleep.' + sleep_col] = all_data[
'sleep.' + sleep_col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
## Making nap variable
all_data['nap_duration'] = all_data['sleep.naps'].apply(
lambda x: x[0]['qualityDuration'] / 60000
if len(x) == 1 else (sum([
y['qualityDuration'] for y in x
if y['qualityDuration'] is not None
]) / 60000 if len(x) > 1 else 0))
all_data.drop(['sleep.naps'], axis=1, inplace=True)
## dropping duplicates subsetting because of list columns
all_data.drop_duplicates(subset=['day', 'sleep.id'],
inplace=True)
self.all_data = all_data
return all_data
else:
print("Please run the authorization function first")
def get_activities_all(self):
'''
Activity data is pulled through the get_keydata functions so if the data pull is present, this function
just transforms the activity column into a dataframe of activities, where each activity is a row.
If it has not been pulled, this function runs the key data function then returns the activity dataframe'''
if self.sport_dict:
sport_dict = self.sport_dict
else:
sports = self.pull_api('https://api-7.whoop.com/sports')
sport_dict = {sport['id']: sport['name'] for sport in sports}
self.sport_dict = self.sport_dict
if self.start_datetime:
## process activity data
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull all data to process activities
data = self.get_keydata_all()
## now process activities data
act_data = pd.json_normalize(
data[data['strain.workouts'].apply(len) > 0]
['strain.workouts'].apply(lambda x: x[0]))
act_data[['during.upper', 'during.lower'
]] = act_data[['during.upper',
'during.lower']].apply(pd.to_datetime)
act_data['total_minutes'] = act_data.apply(
lambda x:
(x['during.upper'] - x['during.lower']).total_seconds() / 60.0,
axis=1)
for z in range(0, 6):
act_data['zone{}_minutes'.format(
z + 1)] = act_data['zones'].apply(lambda x: x[z] / 60000.)
act_data['sport_name'] = act_data.sportId.apply(
lambda x: sport_dict[x])
act_data['day'] = act_data['during.lower'].dt.strftime('%Y-%m-%d')
act_data.drop(['zones', 'during.bounds'], axis=1, inplace=True)
act_data.drop_duplicates(inplace=True)
self.all_activities = act_data
return act_data
else:
print("Whoop: Please run the authorization function first")
def get_sleep_all(self):
'''
This function returns all sleep metrics in a data frame, for the duration of user's WHOOP membership.
Each row in the data frame represents one night of sleep
'''
if self.auth_code:
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull timeframe data
data = self.get_keydata_all()
## getting all the sleep ids
if self.all_sleep is not None:
## All sleep data already pulled
return self.all_sleep
else:
sleep_ids = data['sleep.id'].values.tolist()
sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]
all_sleep = pd.DataFrame()
for s in sleep_list:
m = self.pull_sleep_main(s)
all_sleep = pd.concat([all_sleep, m])
## Cleaning sleep data
sleep_update = [
'qualityDuration', 'latency', 'debtPre', 'debtPost',
'needFromStrain', 'sleepNeed', 'habitualSleepNeed',
'timeInBed', 'lightSleepDuration', 'slowWaveSleepDuration',
'remSleepDuration', 'wakeDuration', 'arousalTime',
'noDataDuration', 'creditFromNaps', 'projectedSleep'
]
for col in sleep_update:
all_sleep[col] = all_sleep[col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
all_sleep.drop(['during.bounds'], axis=1, inplace=True)
self.all_sleep = all_sleep.copy(deep=True)
all_sleep.drop(['events'], axis=1, inplace=True)
return all_sleep
else:
print("Whoop: Please run the authorization function first")
def get_sleep_events_all(self):
'''
This function returns all sleep events in a data frame, for the duration of user's WHOOP membership.
Each row in the data frame represents an individual sleep event within an individual night of sleep.
Sleep events can be joined against the sleep or main datasets by sleep id.
All sleep times are returned in minutes.
'''
if self.auth_code:
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull timeframe data
data = self.get_keydata_all()
## getting all the sleep ids
if self.all_sleep_events is not None:
## All sleep data already pulled
return self.all_sleep_events
else:
if self.all_sleep is not None:
sleep_events = self.all_sleep[['activityId', 'events']]
all_sleep_events = pd.concat([
pd.concat([
pd.json_normalize(events),
pd.DataFrame({'id': len(events) * [sleep]})
],
axis=1) for events, sleep in
zip(sleep_events['events'], sleep_events['activityId'])
])
else:
sleep_ids = data['sleep.id'].values.tolist()
sleep_list = [
int(x) for x in sleep_ids if pd.isna(x) == False
]
all_sleep_events = pd.DataFrame()
for s in sleep_list:
events = self.pull_sleep_events(s)
all_sleep_events = pd.concat(
[all_sleep_events, events])
## Cleaning sleep events data
all_sleep_events['during.lower'] = pd.to_datetime(
all_sleep_events['during.lower'])
all_sleep_events['during.upper'] = pd.to_datetime(
all_sleep_events['during.upper'])
all_sleep_events.drop(['during.bounds'], axis=1, inplace=True)
all_sleep_events['total_minutes'] = all_sleep_events.apply(
lambda x: (x['during.upper'] - x['during.lower']
).total_seconds() / 60.0,
axis=1)
self.all_sleep_events = all_sleep_events
return all_sleep_events
else:
print("Whoop: Please run the authorization function first")
#returnTYpe = df, json
def get_hr_all(self, returnType=None):
'''
This function will pull every heart rate measurement recorded for the life of WHOOP membership.
The default return for this function is a list of lists, where each "row" contains the date, time, and hr value.
The measurements are spaced out every ~6 seconds on average.
To return a dataframe, set df=True. This will take a bit longer, but will return a data frame.
NOTE: This api pull takes about 6 seconds per week of data ... or 1 minutes for 10 weeks of data,
so be careful when you pull, it may take a while.
'''
if self.start_datetime:
athlete_id = self.whoop_id
start_date = parser.isoparse(
self.start_datetime).replace(tzinfo=None)
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=self.current_datetime,
dtstart=start_date)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d + relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals]
hr_list = []
for dates in date_range:
start = dates[0]
end = dates[1]
ul = '''https://api-7.whoop.com/users/{}/metrics/heart_rate?end={}&order=t&start={}&step=6'''.format(
athlete_id, end, start)
hr_vals = self.pull_api(ul)['values']
hr_values = [[
datetime.utcfromtimestamp(h['time'] / 1e3).date(),
datetime.utcfromtimestamp(h['time'] / 1e3).time(),
h['data']
] for h in hr_vals]
hr_list.extend(hr_values)
if returnType == "df":
hr_df = pd.DataFrame(hr_list)
hr_df.columns = ['date', 'time', 'hr']
return hr_df
elif returnType == "json":
hr_json = [{
'datetime': str(h[0]) + 'T' + str(h[1]),
'hr': h[2]
} for h in hr_list]
return hr_json
else:
return hr_list
else:
print("Please run the authorization function first")
def get_keydata_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(),
"%Y-%m-%d")):
'''
This function returns a dataframe of WHOOP metrics for each day in a | |
_x_max = _df_kde_i[_x_name].iloc[-1]
_x_i = np.extract((_x > _x_min) & (_x < _x_max), _x)
_mean, _std = stats.norm.fit(_x_i)
_df_kde_ex['mean'].loc[_index] = _mean
_df_kde_ex['std'].loc[_index] = _std
_df_kde_ex['range'].loc[_index] = _x_max - _x_min
_df_kde_ex['range_min'].loc[_index] = _x_min
_df_kde_ex['range_max'].loc[_index] = _x_max
_df_kde_ex['value_min'].loc[_index] = _df_kde_i['value'].iloc[0]
_df_kde_ex['value_max'].loc[_index] = _df_kde_i['value'].iloc[-1]
return _df_kde, _df_kde_ex
# wrapper to quickly aggregate df
def qagg(df: pd.DataFrame, groupby, columns=None, agg=None, reset_index=True):
if agg is None:
agg = ['mean', 'std']
if columns is None:
columns = df.select_dtypes(include=np.number).columns
_df_agg = df.groupby(groupby).agg({_: agg for _ in columns})
_df_agg = _df_agg.set_axis(flatten([[_ + '_mean', _ + '_std'] for _ in columns]), axis=1, inplace=False)
if reset_index:
_df_agg = _df_agg.reset_index()
return _df_agg
@export
def mahalanobis(point: Union[pd.DataFrame, pd.Series, np.ndarray], df: pd.DataFrame = None, params: List[str] = None,
do_print: bool = True) -> Union[float, List[float]]:
"""
Calculates the Mahalanobis distance for a single point or a DataFrame of points
:param point: The point(s) to calculate the Mahalanobis distance for
:param df: The reference DataFrame against which to calculate the Mahalanobis distance
:param params: The columns to calculate the Mahalanobis distance for
:param do_print: Whether to print intermediate steps to the console
:return: if a single point is passed: Mahalanobis distance as float, else a list of floats
"""
if df is None:
df = point
_df = df.copy()
del df
if params is None:
params = _df.columns
else:
_df = _df[params]
try:
_vi = np.linalg.inv(_df.cov())
except np.linalg.LinAlgError:
return np.nan
_y = _df.mean().values
if isinstance(point, pd.DataFrame):
_out = []
_it = -1
for _index, _row in point.iterrows():
_it += 1
if do_print:
progressbar(_it, point.shape[0])
_x = _row[params].values
_out.append(distance.mahalanobis(_x, _y, _vi))
if do_print:
progressbar()
return _out
elif isinstance(point, pd.Series):
_x = point[params].values
else:
_x = np.array(point)
return distance.mahalanobis(_x, _y, _vi)
def multi_melt(df, cols, suffixes, id_vars, var_name='variable', sep='_', **kwargs):
# for multi melt to work the columns must share common suffixes
_df = df.copy()
del df
_df_out = []
for _col in cols:
_value_vars = ['{}{}{}'.format(_col, sep, _suffix) for _suffix in suffixes]
_df_out_i = _df.melt(id_vars=id_vars, value_vars=_value_vars, value_name=_col, var_name=var_name, **kwargs)
_df_out_i[var_name] = _df_out_i[var_name].str.slice(len(_col) + len(sep))
_df_out_i = _df_out_i.sort_values(by=assert_list(id_vars) + [var_name]).reset_index(drop=True)
_df_out.append(_df_out_i)
_df_out = pd.concat(_df_out, axis=1).pipe(drop_duplicate_cols)
return _df_out
# for resampling integer indexes
def resample(df, rule=1, on=None, groupby=None, agg='mean', columns=None, adj_column_names=True, factor=1, **kwargs):
assert isinstance(df, pd.DataFrame), 'df must be a DataFrame'
_df = df.copy()
del df
if on is not None:
_df = _df.set_index(on)
if columns is None:
_columns = _df.select_dtypes(include=np.number).columns
else:
_columns = columns
if groupby is not None:
_columns = [_ for _ in _columns if _ not in assert_list(groupby)]
_df = _df.groupby(groupby)
# convert int to seconds to be able to use .resample
_df.index = pd.to_datetime(_df.index * factor, unit='s')
# resample as time series
_df = _df.resample('{}s'.format(rule), **kwargs)
# agg
_adj_column_names = False
if agg == 'mean':
_df = _df.mean()
elif agg == 'median':
_df = _df.median()
elif agg == 'sum':
_df = _df.sum()
else:
_df = _df.agg({_: agg for _ in _columns})
if adj_column_names:
_adj_column_names = True
# back to int
_df.index = ((_df.index - pd.to_datetime('1970-01-01')).total_seconds() / factor)
if _adj_column_names:
_column_names = []
for _col in _columns:
for _agg in assert_list(agg):
_column_names += ['{}_{}'.format(_col, _agg)]
_df.columns = _column_names
return _df
@docstr
@export
def df_count(x: str, df: pd.DataFrame, hue: Optional[str] = None, sort_by_count: bool = True, top_nr: int = 5,
x_base: Optional[float] = None, x_min: Optional[float] = None, x_max: Optional[float] = None,
other_name: str = 'other', other_to_na: bool = False, na: Union[bool, str] = 'drop') -> pd.DataFrame:
"""
Create a DataFrame of value counts. Supports hue levels and is therefore useful for plots, for an application
see :func:`~hhpy.plotting.countplot`
:param x: %(x)s
:param df: %(df)s
:param hue: %(hue)s
:param sort_by_count: Whether to sort the DataFrame by value counts [optional]
:param top_nr: %(top_nr)s
:param x_base: if supplied: cast x to integer multiples of x_base, useful when you have float data that would
result in many unique counts for close numbers [optional]
:param x_min: limit the range of valid numeric x values to be greater than or equal to x_min [optional]
:param x_max: limit the range of valid numeric x values to be less than or equal to x_max [optional]
:param other_name: %(other_name)s
:param other_to_na: %(other_to_na)s
:param na: whether to keep (True, 'keep') na values and implicitly cast to string
or drop (False, 'drop') them [optional]
:return: pandas DataFrame containing the counts by x (and by hue if it is supplied)
"""
# -- init
# avoid inplace operations
df = assert_df(df)
# if applicable: drop NaN
if (not na) or (na == 'drop'):
# true NaN
df = df.dropna(subset=[x])
# string NaN
df = df[~df[x].isin(STRING_NAN)]
if hue is not None:
# true NaN
df = df.dropna(subset=[hue])
# string NaN
df = df[~df[hue].isin(STRING_NAN)]
# in case the original column is already called count it is renamed to count_org
if x == 'count':
x = 'count_org'
df = df.rename({'count': 'count_org'}, axis=1)
# -- preprocessing
if x_base:
# round to multiples of x_int
df[x] = np.round(df[x] / x_base) * x_base
if isinstance(x_base, int):
df[x] = df[x].astype(int)
# apply x limits
if x_min is None:
x_min = df[x].min()
if x_max is None:
x_max = df[x].max()
_df_xs = pd.DataFrame({x: range(x_min, x_max, x_base)})
_xs_on = [x]
# init hues
if hue is not None:
_df_hues = df[[hue]].drop_duplicates().reset_index().assign(_dummy=1)
_df_xs = pd.merge(_df_xs.assign(_dummy=1), _df_hues, on='_dummy').drop(['_dummy'], axis=1)
_xs_on = _xs_on + [hue]
else:
# apply x limits (ignored if not numeric)
if x in df.select_dtypes(np.number):
if x_min:
df[x] = df[x].where(lambda _: _ >= x_min, x_min)
if x_max:
df[x] = df[x].where(lambda _: _ <= x_max, x_max)
# to string
df[x] = df[x].astype(str)
if hue is not None:
df[hue] = df[hue].astype(str)
# if applicable: apply top_n_coding (both x and hue)
if top_nr:
df[x] = top_n_coding(s=df[x], n=top_nr, other_name=other_name, other_to_na=other_to_na)
if hue is not None:
df[hue] = top_n_coding(s=df[hue], n=top_nr, other_name=other_name, other_to_na=other_to_na)
# init groupby
_groupby = [x]
if hue is not None:
_groupby = _groupby + [hue]
# we use a dummy column called count and sum over it by group to retain the original x column values
_df_count = df.assign(count=1).groupby(_groupby).agg({'count': 'sum'}).reset_index()
# if applicable: append 0 entries for numerical x inside x_range
if x_base:
# was already called with same if before
# noinspection PyUnboundLocalVariable
_df_count = pd.merge(_df_count, _df_xs, on=_xs_on, how='outer')
_df_count['count'] = _df_count['count'].fillna(0)
# create total count (for perc)
_count_x = 'count_{}'.format(x)
_count_hue = 'count_{}'.format(hue)
if hue is None:
_df_count[_count_hue] = _df_count['count'].sum()
_df_count[_count_x] = _df_count['count']
else:
_df_count[_count_x] = _df_count.groupby(x)['count'].transform(pd.Series.sum)
_df_count[_count_hue] = _df_count.groupby(hue)['count'].transform(pd.Series.sum)
# sort
if sort_by_count:
_df_count = _df_count.sort_values([_count_x], ascending=False).reset_index(drop=True)
# add perc columns
_df_count[f"perc_{x}"] = np.round(_df_count['count'] / _df_count[_count_x] * 100, 2)
_df_count[f"perc_{hue}"] = np.round(_df_count['count'] / _df_count[_count_hue] * 100, 2)
return _df_count
# return prediction accuracy in percent
def get_accuracy(class_true, class_pred):
return np.where(class_true.astype(str) == class_pred.astype(str), 1, 0).sum() / len(class_true)
# takes a numeric pandas series and splits it into groups, the groups are labeled by INTEGER multiples of the step value
def numeric_to_group(pd_series, step=None, outer_limit=4, suffix=None, use_abs=False, use_standard_scaler=True):
# outer limit is given in steps, only INTEGER values allowed
outer_limit = int(outer_limit)
# make a copy to avoid inplace effects
_series = pd.Series(deepcopy(pd_series))
# use standard scaler to center around mean with std +- 1
if use_standard_scaler:
_series = StandardScaler().fit(_series.values.reshape(-1, 1)).transform(_series.values.reshape(-1, 1)).flatten()
# if step is none: use 1 as step
if step is None:
step = 1
if suffix is None:
if use_standard_scaler:
suffix = 'std'
else:
suffix = 'step'
if suffix != '':
suffix = '_' + suffix
# to absolute
if use_abs:
_series = np.abs(_series)
else:
# gather the +0 and -0 group to 0
_series = np.where(np.abs(_series) < step, 0, _series)
# group
# get sign
_series_sign = np.sign(_series)
# divide by step, floor and integer
_series = (np.floor(np.abs(_series) / step)).astype(int) * np.sign(_series).astype(int)
# apply outer limit
if outer_limit is not None:
_series = np.where(_series > outer_limit, outer_limit, _series)
_series = np.where(_series < -outer_limit, -outer_limit, _series)
# make a pretty string
# noinspection PyTypeChecker
_series = pd.Series(_series).apply(lambda x: '{0:n}'.format(x)).astype('str') + suffix
# to cat
_series = _series.astype('category')
return _series
@export
def top_n(s: Sequence, n: Union[int, str] = None, w: Optional[Sequence] = None, | |
struct OPCITEMHEADER1
{
OPCHANDLE hClient;
DWORD dwValueOffset;
WORD wQuality;
WORD wReserved;
FILETIME ftTimeStampItem;
};
"""
SetLocalType(-1, OPCITEMHEADER1_str, 0)
Til2Idb(-1, "OPCITEMHEADER1")
OPCITEMHEADER2_str = """
struct OPCITEMHEADER2
{
OPCHANDLE hClient;
DWORD dwValueOffset;
WORD wQuality;
WORD wReserved;
};
"""
SetLocalType(-1, OPCITEMHEADER2_str, 0)
Til2Idb(-1, "OPCITEMHEADER2")
OPCGROUPHEADERWRITE_str = """
struct OPCGROUPHEADERWRITE
{
DWORD dwItemCount;
OPCHANDLE hClientGroup;
DWORD dwTransactionID;
HRESULT hrStatus;
};
"""
SetLocalType(-1, OPCGROUPHEADERWRITE_str, 0)
Til2Idb(-1, "OPCGROUPHEADERWRITE")
OPCITEMHEADERWRITE_str = """
struct OPCITEMHEADERWRITE
{
OPCHANDLE hClient;
HRESULT dwError;
};
"""
SetLocalType(-1, OPCITEMHEADERWRITE_str, 0)
Til2Idb(-1, "OPCITEMHEADERWRITE")
OPCITEMSTATE_str = """
struct OPCITEMSTATE
{
OPCHANDLE hClient;
FILETIME ftTimeStamp;
WORD wQuality;
WORD wReserved;
VARIANT vDataValue;
};
"""
SetLocalType(-1, OPCITEMSTATE_str, 0)
Til2Idb(-1, "OPCITEMSTATE")
OPCSERVERSTATUS_str = """
struct OPCSERVERSTATUS
{
FILETIME ftStartTime;
FILETIME ftCurrentTime;
FILETIME ftLastUpdateTime;
OPCSERVERSTATE dwServerState;
DWORD dwGroupCount;
DWORD dwBandWidth;
WORD wMajorVersion;
WORD wMinorVersion;
WORD wBuildNumber;
WORD wReserved;
LPWSTR szVendorInfo;
};
"""
SetLocalType(-1, OPCSERVERSTATUS_str, 0)
Til2Idb(-1, "OPCSERVERSTATUS")
OPCITEMDEF_str = """
struct OPCITEMDEF
{
LPWSTR szAccessPath;
LPWSTR szItemID;
BOOL bActive;
OPCHANDLE hClient;
DWORD dwBlobSize;
BYTE *pBlob;
VARTYPE vtRequestedDataType;
WORD wReserved;
};
"""
SetLocalType(-1, OPCITEMDEF_str, 0)
Til2Idb(-1, "OPCITEMDEF")
OPCITEMATTRIBUTES_str = """
struct OPCITEMATTRIBUTES
{
LPWSTR szAccessPath;
LPWSTR szItemID;
BOOL bActive;
OPCHANDLE hClient;
OPCHANDLE hServer;
DWORD dwAccessRights;
DWORD dwBlobSize;
BYTE *pBlob;
VARTYPE vtRequestedDataType;
VARTYPE vtCanonicalDataType;
OPCEUTYPE dwEUType;
VARIANT vEUInfo;
};
"""
SetLocalType(-1, OPCITEMATTRIBUTES_str, 0)
Til2Idb(-1, "OPCITEMATTRIBUTES")
OPCITEMRESULT_str = """
struct OPCITEMRESULT
{
OPCHANDLE hServer;
VARTYPE vtCanonicalDataType;
WORD wReserved;
DWORD dwAccessRights;
DWORD dwBlobSize;
BYTE *pBlob;
};
"""
SetLocalType(-1, OPCITEMRESULT_str, 0)
Til2Idb(-1, "OPCITEMRESULT")
OPCITEMPROPERTY_str = """
struct OPCITEMPROPERTY
{
VARTYPE vtDataType;
WORD wReserved;
DWORD dwPropertyID;
LPWSTR szItemID;
LPWSTR szDescription;
VARIANT vValue;
HRESULT hrErrorID;
DWORD dwReserved;
};
"""
SetLocalType(-1, OPCITEMPROPERTY_str, 0)
Til2Idb(-1, "OPCITEMPROPERTY")
OPCITEMPROPERTIES_str = """
struct OPCITEMPROPERTIES
{
HRESULT hrErrorID;
DWORD dwNumProperties;
OPCITEMPROPERTY *pItemProperties;
DWORD dwReserved;
};
"""
SetLocalType(-1, OPCITEMPROPERTIES_str, 0)
Til2Idb(-1, "OPCITEMPROPERTIES")
OPCBROWSEELEMENT_str = """
struct OPCBROWSEELEMENT
{
LPWSTR szName;
LPWSTR szItemID;
DWORD dwFlagValue;
DWORD dwReserved;
OPCITEMPROPERTIES ItemProperties;
};
"""
SetLocalType(-1, OPCBROWSEELEMENT_str, 0)
Til2Idb(-1, "OPCBROWSEELEMENT")
OPCITEMVQT_str = """
struct OPCITEMVQT
{
VARIANT vDataValue;
BOOL bQualitySpecified;
WORD wQuality;
WORD wReserved;
BOOL bTimeStampSpecified;
DWORD dwReserved;
FILETIME ftTimeStamp;
};
"""
SetLocalType(-1, OPCITEMVQT_str, 0)
Til2Idb(-1, "OPCITEMVQT")
enum_id = AddEnum(-1, 'OPCBROWSEFILTER', 0)
AddConstEx(enum_id, 'OPC_BROWSE_FILTER_ALL', 1, -1)
AddConstEx(enum_id, 'OPC_BROWSE_FILTER_BRANCHES', 2, -1)
AddConstEx(enum_id, 'OPC_BROWSE_FILTER_ITEMS', 3, -1)
IOPCServer_str = """
struct IOPCServer
{
struct IOPCServerVtbl *lpVtbl;
};
"""
SetLocalType(-1, IOPCServer_str, 0)
Til2Idb(-1, "IOPCServer")
IOPCServerVtbl_str = """
struct IOPCServerVtbl
{
HRESULT (__stdcall *QueryInterface)(IOPCServer * This, IID * riid, void **ppvObject);
ULONG (__stdcall *AddRef)(IOPCServer * This);
ULONG (__stdcall *Release)(IOPCServer * This);
HRESULT (__stdcall *AddGroup)(IOPCServer * This, LPCWSTR szName, BOOL bActive, DWORD dwRequestedUpdateRate, OPCHANDLE hClientGroup, LONG *pTimeBias, FLOAT *pPercentDeadband, DWORD dwLCID, OPCHANDLE *phServerGroup, DWORD *pRevisedUpdateRate, IID * riid, LPUNKNOWN *ppUnk);
HRESULT (__stdcall *GetErrorString)(IOPCServer * This, HRESULT dwError, LCID dwLocale, LPWSTR *ppString);
HRESULT (__stdcall *GetGroupByName)(IOPCServer * This, LPCWSTR szName, IID * riid, LPUNKNOWN *ppUnk);
HRESULT (__stdcall *GetStatus)(IOPCServer * This, OPCSERVERSTATUS **ppServerStatus);
HRESULT (__stdcall *RemoveGroup)(IOPCServer * This, OPCHANDLE hServerGroup, BOOL bForce);
HRESULT (__stdcall *CreateGroupEnumerator)(IOPCServer * This, OPCENUMSCOPE dwScope, IID * riid, LPUNKNOWN *ppUnk);
};
"""
SetLocalType(-1, IOPCServerVtbl_str, 0)
Til2Idb(-1, "IOPCServerVtbl")
IOPCServerPublicGroups_str = """
struct IOPCServerPublicGroups
{
struct IOPCServerPublicGroupsVtbl *lpVtbl;
};
"""
SetLocalType(-1, IOPCServerPublicGroups_str, 0)
Til2Idb(-1, "IOPCServerPublicGroups")
IOPCServerPublicGroupsVtbl_str = """
struct IOPCServerPublicGroupsVtbl
{
HRESULT (__stdcall *QueryInterface)(IOPCServerPublicGroups * This, IID * riid, void **ppvObject);
ULONG (__stdcall *AddRef)(IOPCServerPublicGroups * This);
ULONG (__stdcall *Release)(IOPCServerPublicGroups * This);
HRESULT (__stdcall *GetPublicGroupByName)(IOPCServerPublicGroups * This, LPCWSTR szName, IID * riid, LPUNKNOWN *ppUnk);
HRESULT (__stdcall *RemovePublicGroup)(IOPCServerPublicGroups * This, OPCHANDLE hServerGroup, BOOL bForce);
};
"""
SetLocalType(-1, IOPCServerPublicGroupsVtbl_str, 0)
Til2Idb(-1, "IOPCServerPublicGroupsVtbl")
IOPCBrowseServerAddressSpace_str = """
struct IOPCBrowseServerAddressSpace
{
struct IOPCBrowseServerAddressSpaceVtbl *lpVtbl;
};
"""
SetLocalType(-1, IOPCBrowseServerAddressSpace_str, 0)
Til2Idb(-1, "IOPCBrowseServerAddressSpace")
IOPCBrowseServerAddressSpaceVtbl_str = """
struct IOPCBrowseServerAddressSpaceVtbl
{
HRESULT (__stdcall *QueryInterface)(IOPCBrowseServerAddressSpace * This, IID * riid, void **ppvObject);
ULONG (__stdcall *AddRef)(IOPCBrowseServerAddressSpace * This);
ULONG (__stdcall *Release)(IOPCBrowseServerAddressSpace * This);
HRESULT (__stdcall *QueryOrganization)(IOPCBrowseServerAddressSpace * This, OPCNAMESPACETYPE *pNameSpaceType);
HRESULT (__stdcall *ChangeBrowsePosition)(IOPCBrowseServerAddressSpace * This, OPCBROWSEDIRECTION dwBrowseDirection, LPCWSTR szString);
HRESULT (__stdcall *BrowseOPCItemIDs)(IOPCBrowseServerAddressSpace * This, OPCBROWSETYPE dwBrowseFilterType, LPCWSTR szFilterCriteria, VARTYPE vtDataTypeFilter, DWORD dwAccessRightsFilter, LPENUMSTRING *ppIEnumString);
HRESULT (__stdcall *GetItemID)(IOPCBrowseServerAddressSpace * This, LPWSTR szItemDataID, LPWSTR *szItemID);
HRESULT (__stdcall *BrowseAccessPaths)(IOPCBrowseServerAddressSpace * This, LPCWSTR szItemID, LPENUMSTRING *ppIEnumString);
};
"""
SetLocalType(-1, IOPCBrowseServerAddressSpaceVtbl_str, 0)
Til2Idb(-1, "IOPCBrowseServerAddressSpaceVtbl")
IOPCGroupStateMgt_str = """
struct IOPCGroupStateMgt
{
struct IOPCGroupStateMgtVtbl *lpVtbl;
};
"""
SetLocalType(-1, IOPCGroupStateMgt_str, 0)
Til2Idb(-1, "IOPCGroupStateMgt")
IOPCGroupStateMgtVtbl_str = """
struct IOPCGroupStateMgtVtbl
{
HRESULT (__stdcall *QueryInterface)(IOPCGroupStateMgt * This, IID * riid, void **ppvObject);
ULONG (__stdcall *AddRef)(IOPCGroupStateMgt * This);
ULONG (__stdcall *Release)(IOPCGroupStateMgt * This);
HRESULT (__stdcall *GetState)(IOPCGroupStateMgt * This, DWORD *pUpdateRate, BOOL *pActive, LPWSTR *ppName, LONG *pTimeBias, FLOAT *pPercentDeadband, DWORD *pLCID, OPCHANDLE *phClientGroup, OPCHANDLE *phServerGroup);
HRESULT (__stdcall *SetState)(IOPCGroupStateMgt * This, DWORD *pRequestedUpdateRate, DWORD *pRevisedUpdateRate, BOOL *pActive, LONG *pTimeBias, FLOAT *pPercentDeadband, DWORD *pLCID, OPCHANDLE *phClientGroup);
HRESULT (__stdcall *SetName)(IOPCGroupStateMgt * This, LPCWSTR szName);
HRESULT (__stdcall *CloneGroup)(IOPCGroupStateMgt * This, LPCWSTR szName, IID * riid, LPUNKNOWN *ppUnk);
};
"""
SetLocalType(-1, IOPCGroupStateMgtVtbl_str, 0)
Til2Idb(-1, "IOPCGroupStateMgtVtbl")
IIOPCSyncIO_str = """
struct IOPCSyncIO
{
struct IOPCSyncIOVtbl *lpVtbl;
};
"""
SetLocalType(-1, IIOPCSyncIO_str, 0)
Til2Idb(-1, "IOPCSyncIO")
IOPCSyncIOVtbl_str = """
struct IOPCSyncIOVtbl
{
HRESULT (__stdcall *QueryInterface)(IOPCSyncIO * This, IID * riid, void **ppvObject);
ULONG (__stdcall *AddRef)(IOPCSyncIO * This);
ULONG (__stdcall *Release)(IOPCSyncIO * This);
HRESULT (__stdcall *Read)(IOPCSyncIO * This, OPCDATASOURCE dwSource, DWORD dwCount, OPCHANDLE *phServer, OPCITEMSTATE **ppItemValues, HRESULT **ppErrors);
HRESULT (__stdcall *Write)(IOPCSyncIO * This, DWORD dwCount, OPCHANDLE *phServer, VARIANT *pItemValues, HRESULT **ppErrors);
};
"""
SetLocalType(-1, IOPCSyncIOVtbl_str, 0)
Til2Idb(-1, "IOPCSyncIOVtbl")
IOPCAsyncIO_str = """
struct IOPCAsyncIO
{
struct IOPCAsyncIOVtbl *lpVtbl;
};
"""
SetLocalType(-1, IOPCAsyncIO_str, 0)
Til2Idb(-1, "IOPCAsyncIO")
IOPCAsyncIOVtbl_str = """
struct IOPCAsyncIOVtbl
{
HRESULT (__stdcall *QueryInterface)(IOPCAsyncIO * This, IID * riid, void **ppvObject);
ULONG (__stdcall *AddRef)(IOPCAsyncIO * This);
ULONG (__stdcall *Release)(IOPCAsyncIO * This);
HRESULT (__stdcall *Read)(IOPCAsyncIO * This, DWORD dwConnection, OPCDATASOURCE dwSource, DWORD dwCount, OPCHANDLE *phServer, DWORD *pTransactionID, HRESULT **ppErrors);
HRESULT (__stdcall *Write)(IOPCAsyncIO * This, DWORD dwConnection, DWORD dwCount, OPCHANDLE *phServer, VARIANT *pItemValues, DWORD *pTransactionID, HRESULT **ppErrors);
HRESULT (__stdcall *Refresh)(IOPCAsyncIO * This, DWORD dwConnection, OPCDATASOURCE dwSource, DWORD *pTransactionID);
HRESULT (__stdcall *Cancel)(IOPCAsyncIO * This, DWORD dwTransactionID);
};
"""
SetLocalType(-1, IOPCAsyncIOVtbl_str, 0)
Til2Idb(-1, "IOPCAsyncIOVtbl")
IOPCItemMgt_str = """
struct IOPCItemMgt
{
struct IOPCItemMgtVtbl *lpVtbl;
};
"""
SetLocalType(-1, IOPCItemMgt_str, 0)
Til2Idb(-1, "IOPCItemMgt")
IOPCItemMgtVtbl_str = """
struct IOPCItemMgtVtbl
{
HRESULT (__stdcall *QueryInterface)(IOPCItemMgt * This, IID * riid, void **ppvObject);
ULONG (__stdcall *AddRef)(IOPCItemMgt * This);
ULONG (__stdcall *Release)(IOPCItemMgt * This);
HRESULT (__stdcall *AddItems)(IOPCItemMgt * This, DWORD dwCount, OPCITEMDEF *pItemArray, OPCITEMRESULT **ppAddResults, HRESULT **ppErrors);
HRESULT (__stdcall *ValidateItems)(IOPCItemMgt * This, DWORD dwCount, OPCITEMDEF *pItemArray, BOOL bBlobUpdate, OPCITEMRESULT **ppValidationResults, HRESULT **ppErrors);
HRESULT (__stdcall *RemoveItems)(IOPCItemMgt * This, DWORD dwCount, OPCHANDLE *phServer, HRESULT **ppErrors);
HRESULT (__stdcall *SetActiveState)(IOPCItemMgt * This, DWORD dwCount, OPCHANDLE *phServer, BOOL bActive, HRESULT **ppErrors);
HRESULT (__stdcall *SetClientHandles)(IOPCItemMgt * This, DWORD dwCount, OPCHANDLE *phServer, OPCHANDLE *phClient, HRESULT **ppErrors);
HRESULT (__stdcall *SetDatatypes)(IOPCItemMgt * This, DWORD dwCount, OPCHANDLE *phServer, VARTYPE *pRequestedDatatypes, HRESULT **ppErrors);
HRESULT (__stdcall *CreateEnumerator)(IOPCItemMgt * This, IID * riid, LPUNKNOWN *ppUnk);
};
"""
SetLocalType(-1, IOPCItemMgtVtbl_str, 0)
Til2Idb(-1, "IOPCItemMgtVtbl")
IEnumOPCItemAttributes_str = """
struct IEnumOPCItemAttributes
{
struct IEnumOPCItemAttributesVtbl *lpVtbl;
};
"""
SetLocalType(-1, IEnumOPCItemAttributes_str, 0)
Til2Idb(-1, "IEnumOPCItemAttributes")
IEnumOPCItemAttributesVtbl_str = """
struct IEnumOPCItemAttributesVtbl
{
HRESULT (__stdcall *QueryInterface)(IEnumOPCItemAttributes * This, IID * riid, void **ppvObject);
ULONG (__stdcall *AddRef)(IEnumOPCItemAttributes * This);
ULONG (__stdcall *Release)(IEnumOPCItemAttributes * This);
HRESULT (__stdcall *Next)(IEnumOPCItemAttributes * This, ULONG celt, OPCITEMATTRIBUTES **ppItemArray, ULONG *pceltFetched);
HRESULT (__stdcall *Skip)(IEnumOPCItemAttributes * This, ULONG celt);
HRESULT (__stdcall *Reset)(IEnumOPCItemAttributes * This);
HRESULT (__stdcall *Clone)(IEnumOPCItemAttributes * This, IEnumOPCItemAttributes **ppEnumItemAttributes);
};
"""
SetLocalType(-1, IEnumOPCItemAttributesVtbl_str, 0)
Til2Idb(-1, "IEnumOPCItemAttributesVtbl")
IOPCDataCallback_str = """
struct IOPCDataCallback
{
struct IOPCDataCallbackVtbl *lpVtbl;
};
"""
SetLocalType(-1, IOPCDataCallback_str, 0)
Til2Idb(-1, "IOPCDataCallback")
IOPCDataCallbackVtbl_str = """
struct IOPCDataCallbackVtbl
{
HRESULT (__stdcall *QueryInterface)(IOPCDataCallback * This, IID * riid, void **ppvObject);
ULONG (__stdcall *AddRef)(IOPCDataCallback * This);
ULONG (__stdcall *Release)(IOPCDataCallback * This);
HRESULT (__stdcall *OnDataChange)(IOPCDataCallback * This, DWORD dwTransid, OPCHANDLE hGroup, HRESULT hrMasterquality, HRESULT hrMastererror, DWORD dwCount, OPCHANDLE *phClientItems, VARIANT *pvValues, WORD *pwQualities, FILETIME *pftTimeStamps, HRESULT *pErrors);
HRESULT (__stdcall *OnReadComplete)(IOPCDataCallback * This, DWORD dwTransid, OPCHANDLE hGroup, HRESULT hrMasterquality, HRESULT hrMastererror, DWORD dwCount, OPCHANDLE *phClientItems, VARIANT *pvValues, WORD *pwQualities, FILETIME *pftTimeStamps, HRESULT *pErrors);
HRESULT (__stdcall *OnWriteComplete )(IOPCDataCallback * This, DWORD dwTransid, OPCHANDLE hGroup, HRESULT hrMastererr, DWORD dwCount, OPCHANDLE *pClienthandles, HRESULT *pErrors);
HRESULT ( __stdcall *OnCancelComplete )(IOPCDataCallback * This, DWORD dwTransid, OPCHANDLE hGroup);
};
"""
SetLocalType(-1, IOPCDataCallbackVtbl_str, 0)
Til2Idb(-1, "IOPCDataCallbackVtbl")
IOPCAsyncIO2_str = """
struct IOPCAsyncIO2
{
struct IOPCAsyncIO2Vtbl *lpVtbl;
};
"""
SetLocalType(-1, IOPCAsyncIO2_str, 0)
Til2Idb(-1, "IOPCAsyncIO2")
IOPCAsyncIO2Vtbl_str = """
struct IOPCAsyncIO2Vtbl
{
HRESULT (__stdcall *QueryInterface)(IOPCAsyncIO2 * This, IID * riid, void **ppvObject);
ULONG (__stdcall *AddRef)(IOPCAsyncIO2 * This);
ULONG (__stdcall *Release)(IOPCAsyncIO2 * This);
HRESULT (__stdcall *Read)(IOPCAsyncIO2 * This, DWORD dwCount, OPCHANDLE *phServer, DWORD dwTransactionID, DWORD *pdwCancelID, HRESULT **ppErrors);
HRESULT (__stdcall *Write)(IOPCAsyncIO2 * This, DWORD dwCount, OPCHANDLE *phServer, VARIANT *pItemValues, DWORD dwTransactionID, DWORD *pdwCancelID, HRESULT **ppErrors);
HRESULT (__stdcall *Refresh2)(IOPCAsyncIO2 * This, OPCDATASOURCE dwSource, DWORD dwTransactionID, DWORD *pdwCancelID);
HRESULT (__stdcall *Cancel2)(IOPCAsyncIO2 * This, DWORD dwCancelID);
HRESULT (__stdcall *SetEnable)(IOPCAsyncIO2 * This, BOOL bEnable);
HRESULT (__stdcall *GetEnable)(IOPCAsyncIO2 * This, BOOL *pbEnable);
};
"""
SetLocalType(-1, IOPCAsyncIO2Vtbl_str, 0)
Til2Idb(-1, "IOPCAsyncIO2Vtbl")
IOPCItemProperties_str = """
struct IOPCItemProperties
{
struct IOPCItemPropertiesVtbl *lpVtbl;
};
"""
SetLocalType(-1, IOPCItemProperties_str, 0)
Til2Idb(-1, "IOPCItemProperties")
IOPCItemPropertiesVtbl_str = """
struct IOPCItemPropertiesVtbl
{
HRESULT (__stdcall *QueryInterface)(IOPCItemProperties * This, IID * riid, void **ppvObject);
ULONG (__stdcall *AddRef)(IOPCItemProperties * | |
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.http import HttpResponseRedirect, Http404, HttpResponse, HttpResponseForbidden
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.conf import settings
from django.db.models import Q
from django.forms.formsets import formset_factory
from django.forms.models import modelformset_factory
from django.core.mail import send_mail
from django.core import serializers
from django.conf import settings
from decimal import *
from datetime import datetime, timedelta
from clusters.models import *
from clusters.forms import *
from clusters.utils import *
def cluster_params(cluster):
template_params = {}
linked_efs = []
resources = []
efs = EconomicFunction.objects.filter(cluster=cluster)
for ef in efs:
inputs = ef.inputs()
if inputs:
linked_efs.append(ef)
for inp in inputs:
resources.append(inp.resource_type)
outputs = ef.outputs()
if outputs:
linked_efs.append(ef)
for output in outputs:
resources.append(output.resource_type)
efs = list(set(linked_efs))
resources = list(set(resources))
agents = {}
for ef in efs:
for agent in ef.agents.all():
#agents.setdefault(ef.node_id(), []).append(agent.agent.name)
agents.setdefault(ef, []).append(agent.agent)
root = cluster.root()
frtable = function_resource_table(cluster, "qty")
template_params = {
"cluster": cluster,
"functions": efs,
"resources": resources,
"function_agents": agents,
"root": root,
"frtable": frtable,
}
return template_params
def explore_params(cluster):
template_params = {}
linked_efs = []
resources = []
efs = EconomicFunction.objects.filter(cluster=cluster)
for ef in efs:
inputs = ef.inputs()
if inputs:
linked_efs.append(ef)
for inp in inputs:
resources.append(inp.resource_type)
outputs = ef.outputs()
if outputs:
linked_efs.append(ef)
for output in outputs:
resources.append(output.resource_type)
efs = list(set(linked_efs))
resources = list(set(resources))
agents = {}
for ef in efs:
for agent in ef.agents.all():
agents.setdefault(ef, []).append(agent.agent)
root = cluster.root()
template_params = {
"cluster": cluster,
"functions": efs,
"resources": resources,
"function_agents": agents,
"root": root,
}
return template_params
class FlowResource(object):
def __init__(self, resource_type):
self.resource_type = resource_type
# does not work; FlowResource objects cannot fake it for FunctionResourceTypes
def flow_radial_graph_params(cluster):
template_params = {}
flows = FunctionResourceFlow.objects.filter(
from_function__cluster=cluster)
functions = []
resources = []
edges = []
for flow in flows:
from_fn = flow.from_function
try:
len(from_fn.inputs)
except TypeError:
from_fn.inputs = []
from_fn.inputs.append(FlowResource(flow.resource_type))
to_fn = flow.to_function
try:
len(from_fn.outputs)
except TypeError:
to_fn.outputs = []
to_fn.outputs.append(FlowResource(flow.resource_type))
functions.extend([from_fn, to_fn])
resources.append(flow.resource_type)
functions = list(set(functions))
resources = list(set(resources))
agents = {}
for ef in functions:
for agent in ef.agents.all():
#agents.setdefault(ef.node_id(), []).append(agent.agent.name)
agents.setdefault(ef, []).append(agent.agent)
root = cluster.root()
template_params = {
"cluster": cluster,
"functions": functions,
"resources": resources,
"function_agents": agents,
"root": root,
}
return template_params
def clusters(request):
user = request.user
if user.is_active:
communities = [cm.community for cm in user.communities.all()]
cids = [c.id for c in communities]
communities.extend(list(Community.objects.exclude(id__in=cids)))
else:
communities = Community.objects.all()
return render_to_response("clusters/clusters.html", {
"communities": communities,
}, context_instance=RequestContext(request))
def cluster(request, cluster_id, location="agt"):
cluster = get_object_or_404(Cluster, pk=cluster_id)
community = cluster.community
#import pdb; pdb.set_trace()
location_form = None
if community.agent_geographic_area_name:
init = {"location": location,}
location_form = AgentAreaForm(community=community, initial=init, data=request.POST or None)
if request.method == "POST":
if location_form:
if location_form.is_valid():
location = location_form.cleaned_data["location"]
return HttpResponseRedirect('/%s/%s/%s/'
% ('clusters/cluster', cluster_id, location))
if location == "agt":
agents = [agent for agent in cluster.agents() if agent.latitude]
for agent in agents:
agent.all_functions = agent.functions.filter(
function__cluster=cluster)
#import pdb; pdb.set_trace()
else:
agents = cluster.regions()
color_functions = cluster.function_colors()
map_center = ",".join([str(community.latitude), str(community.longitude)])
map_key = settings.GOOGLE_API_KEY
return render_to_response("clusters/cluster.html", {
"cluster": cluster,
"agents": agents,
"map_center": map_center,
"map_key": map_key,
"zoom_level": community.map_zoom_level,
"location_form": location_form,
"color_functions": color_functions,
}, context_instance=RequestContext(request))
def cluster_agents(request, cluster_id):
cluster = get_object_or_404(Cluster, pk=cluster_id)
#import pdb; pdb.set_trace()
agents = cluster.agents()
for agent in agents:
agent.form = AgentTextForm()
#import pdb; pdb.set_trace()
return render_to_response("clusters/cluster_agents.html", {
"cluster": cluster,
"agents": agents,
}, context_instance=RequestContext(request))
def edit_cluster_agents(request, cluster_id):
cluster = get_object_or_404(Cluster, pk=cluster_id)
#import pdb; pdb.set_trace()
agents = cluster.agents()
for agent in agents:
agent.cluster_functions = agent.functions.filter(function__cluster=cluster)
for cf in agent.cluster_functions:
cf.rsrcs = cf.function.resources.all()
if cf.rsrcs:
for res in cf.rsrcs:
res.agent_resource_list = res.function_resources_for_agent(agent)
else:
cf.agent_resources = cf.function_resources.all()
outliers = []
candidates = cf.function_resources.all()
for c in candidates:
if c.is_outlier():
outliers.append(c)
cf.outliers = outliers
#import pdb; pdb.set_trace()
return render_to_response("clusters/edit_cluster_agents.html", {
"cluster": cluster,
"agents": agents,
}, context_instance=RequestContext(request))
@login_required
def edit_cluster_functions(request, cluster_id):
cluster = get_object_or_404(Cluster, pk=cluster_id)
community = cluster.community
symbol = "$"
try:
symbol = community.unit_of_value.symbol
except:
pass
new_function_form = EconomicFunctionForm(prefix="function")
new_resource_form = EconomicResourceTypeForm(prefix="resource")
functions = cluster.functions.all()
for fun in functions:
fun.form = FunctionResourceTypeForm(community=cluster.community)
resources = cluster.resources()
for res in resources:
res.my_consumers = res.cluster_consumers(cluster)
res.my_producers = res.cluster_producers(cluster)
used = [cr.resource_type.id for cr in community.resources.all()]
resource_names = ';'.join([
res.name for res in EconomicResourceType.objects.all().exclude(id__in=used)])
template_params = network_params(cluster, "qty")
template_params["symbol"] = symbol
template_params["functions"] = functions
template_params["resources"] = resources
template_params["new_function_form"] = new_function_form
template_params["new_resource_form"] = new_resource_form
template_params["resource_names"] = resource_names
function_aspect_name = cluster.function_aspect_name
resource_aspect_name = cluster.community.resource_aspect_name
template_params["function_aspect_name"] = function_aspect_name
template_params["resource_aspect_name"] = resource_aspect_name
return render_to_response("clusters/edit_cluster_functions.html",
template_params,
context_instance=RequestContext(request))
@login_required
def edit_flows(request, cluster_id):
cluster = get_object_or_404(Cluster, pk=cluster_id)
if not cluster.permits("edit", request.user):
return HttpResponseForbidden("Uh-uh, you don't have permission to do that")
new_function_form = EconomicFunctionForm(prefix="function")
new_resource_form = EconomicResourceTypeForm(prefix="resource")
flows = FunctionResourceFlow.objects.filter(
from_function__cluster=cluster)
FlowFormSet = modelformset_factory(
FunctionResourceFlow,
form=FunctionResourceFlowForm,
can_delete=True,
extra=4,
)
formset = FlowFormSet(
queryset=FunctionResourceFlow.objects.filter(
from_function__cluster=cluster),
data=request.POST or None,
)
function_choices = [('', '----------')] + [
(fn.id, fn.name) for fn in cluster.functions.all()
]
resource_choices = [('', '----------')] + [
(cr.resource_type.id, cr.resource_type.name) for cr in cluster.community.resources.all()
]
for form in formset.forms:
form.fields['from_function'].choices = function_choices
form.fields['to_function'].choices = function_choices
form.fields['resource_type'].choices = resource_choices
used = [cr.resource_type.id for cr in cluster.community.resources.all()]
resource_names = ';'.join([
res.name for res in EconomicResourceType.objects.all().exclude(id__in=used)])
if request.method == "POST":
#import pdb; pdb.set_trace()
for form in formset.forms:
if form.is_valid():
delete = form.cleaned_data["DELETE"]
if delete:
#todo: this delete code is odd.
#First, I expected formsets to delete automatically id DELETE is True.
#Second, returning an object when requesting id is nice
#but smells like it might break in the future.
#import pdb; pdb.set_trace()
deleted = form.cleaned_data["id"]
deleted.delete()
else:
form.save()
return HttpResponseRedirect('/%s/%s/'
% ('clusters/editflows', cluster.id))
template_params = flow_params(cluster, "qty")
template_params["new_function_form"] = new_function_form
template_params["new_resource_form"] = new_resource_form
template_params["resource_names"] = resource_names
function_aspect_name = cluster.function_aspect_name
resource_aspect_name = cluster.community.resource_aspect_name
template_params["function_aspect_name"] = function_aspect_name
template_params["resource_aspect_name"] = resource_aspect_name
template_params["formset"] = formset
return render_to_response("clusters/edit_flows.html",
template_params,
context_instance=RequestContext(request))
@login_required
def edit_agent_flows(request, cluster_id):
cluster = get_object_or_404(Cluster, pk=cluster_id)
if not cluster.permits("edit", request.user):
return HttpResponseForbidden("Uh-uh, you don't have permission to do that")
new_function_form = InlineAgentFunctionForm(cluster=cluster, prefix="function")
new_resource_form = EconomicResourceTypeForm(prefix="resource")
flows = list(AgentResourceFlow.objects.filter(
from_function__function__cluster=cluster))
flows.extend(list(AgentResourceFlow.objects.filter(
to_function__function__cluster=cluster)))
FlowFormSet = modelformset_factory(
AgentResourceFlow,
form=AgentResourceFlowForm,
can_delete=True,
extra=3,
)
formset = FlowFormSet(
queryset=AgentResourceFlow.objects.filter(
from_function__function__cluster=cluster),
data=request.POST or None,
)
agent_functions = AgentFunction.objects.filter(
function__cluster=cluster)
function_choices = [('', '----------')] + [
(fn.id, fn) for fn in agent_functions]
resources = cluster.community.resources.all()
resource_choices = [('', '----------')] + [
(cr.resource_type.id, cr.resource_type.name) for cr in resources
]
for form in formset.forms:
form.fields['from_function'].choices = function_choices
form.fields['to_function'].choices = function_choices
form.fields['resource_type'].choices = resource_choices
used = [cr.resource_type.id for cr in resources]
erts = EconomicResourceType.objects.all().exclude(id__in=used)
resource_names = '~'.join([res.name for res in erts])
function_names = '~'.join([fn.name for fn in cluster.functions.all()])
if request.method == "POST":
#import pdb; pdb.set_trace()
for form in formset.forms:
if form.is_valid():
delete = form.cleaned_data["DELETE"]
if delete:
#todo: this delete code is odd.
#First, I expected formsets to delete automatically id DELETE is True.
#Second, returning an object when requesting id is nice
#but smells like it might break in the future.
#import pdb; pdb.set_trace()
deleted = form.cleaned_data["id"]
deleted.delete()
else:
form.save()
return HttpResponseRedirect('/%s/%s/'
% ('clusters/editagentflows', cluster.id))
template_params = agent_flow_params(cluster, "qty")
template_params["new_function_form"] = new_function_form
template_params["new_resource_form"] = new_resource_form
template_params["resource_names"] = resource_names
template_params["function_names"] = function_names
function_aspect_name = cluster.function_aspect_name
resource_aspect_name = cluster.community.resource_aspect_name
template_params["function_aspect_name"] = function_aspect_name
template_params["resource_aspect_name"] = resource_aspect_name
template_params["formset"] = formset
return render_to_response("clusters/edit_agent_flows.html",
template_params,
context_instance=RequestContext(request))
def featured_cluster(request):
cluster = get_featured_cluster()
template_params = {}
if cluster:
template_params = cluster_params(cluster)
toggle = "qty"
template_params.update(sankey_params(cluster, toggle))
return render_to_response("clusters/featured_cluster.html",
template_params,
context_instance=RequestContext(request))
def radial_graph(request, cluster_id):
cluster = get_object_or_404(Cluster, pk=cluster_id)
template_params = cluster_params(cluster)
return render_to_response("clusters/radial_graph.html",
template_params,
context_instance=RequestContext(request))
class Edge(object):
def __init__(self, from_node, to_node, quantity, label, width=1):
self.from_node = from_node
self.to_node = to_node
self.quantity = quantity
self.label = label
self.width = width
def agent_network_params(cluster, toggle):
template_params = {}
frts = AgentFunctionResourceType.objects.filter(
agent_function__function__cluster=cluster)
symbol = "$"
if toggle == "val" or toggle == "price":
try:
symbol = cluster.community.unit_of_value.symbol
except:
pass
edges = []
rtypes = []
if toggle == "price":
total = Decimal("0.00")
else:
total = 0.0
if frts:
nodes = list(cluster.agents())
for agt in nodes:
for v in agt.function_inputs(cluster):
rtypes.append(v.resource_type)
if toggle == "val":
value = v.get_value()
total += value
val_string = "".join([symbol, split_thousands(value)])
edges.append(Edge(v.resource_type, agt, value, val_string))
elif toggle == "price":
total += v.price
p_string = "".join([symbol, str(v.price.quantize(Decimal(".01")))])
edges.append(Edge(v.resource_type, agt, v.price, p_string))
else:
total += | |
c_key.kind() != "Conference":
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
conf = c_key.get()
if email in conf.followedBy:
raise ConflictException(
"You already follow this conference")
if conf.seatsAvailable > 0:
retVal = False
else:
conf.followedBy.append(email)
conf.put()
return BooleanMessage(data=retVal)
# - - - Conference Sessions - - - - - - - - - - - - - - - - - - -
def _createSessionObject(self, request):
""" Create Session Object
If a speaker is specified, check validity and add his key to the session
"""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy SessionForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
wsck = request.websafeConferenceKey
del data['websafeKey']
conf_key = ndb.Key(urlsafe=wsck)
conf = conf_key.get()
# check that conference exists
if not conf or conf_key.kind() != 'Conference':
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Create the Session Object from Input
# add default values for those missing (both data model & outbound Message)
for df in SESS_DEFAULTS:
if data[df] in (None, []):
data[df] = SESS_DEFAULTS[df]
setattr(request, df, SESS_DEFAULTS[df])
# convert dates from strings to Date and Time objects respectively
if data['date']:
data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date()
if data['startTime']:
data['startTime'] = datetime.strptime(data['startTime'][:5], "%H:%M").time()
c_key = conf.key
s_id = Session.allocate_ids(size=1, parent=c_key)[0]
s_key = ndb.Key(Session, s_id, parent=c_key)
data['key'] = s_key
# check if speaker is provided and exists
if data['speaker']:
speaker = self._getSpeaker(data['speaker'])
data['speaker'] = speaker.key
# abort if no speaker
if not speaker:
raise endpoints.NotFoundException('No speaker found with key: %s' % data['speaker'])
# add the task for featured speaker
taskqueue.add(params={'wsck': wsck, 'speaker': speaker.fullName},
url='/tasks/featured_speaker')
del data['websafeConferenceKey']
Session(**data).put()
return self._copySessionToForm(request)
@ndb.transactional(xg=True)
def _updateSessionObject(self, request):
""" Update Session Object. Only conf owner can update.
If a speaker is specified append the session key (urlsafe)
to speaker's featured sessions.
"""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy SessionForm/ProtoRPC Message into dict
wssk = request.websafeSessionKey
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
k = ndb.Key(urlsafe=wssk)
session = k.get()
# check that conference exists
if not session or k.kind() != 'Session':
raise endpoints.NotFoundException(
'No session found with key: %s' % wssk)
conf = session.conference
wsck = conf.key.urlsafe()
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# check if speaker is provided and exists
if data['speaker']:
# abort if speaker
speaker = self._getSpeaker(data['speaker'])
if not speaker:
raise endpoints.NotFoundException('No speaker found with key: %s' % data['speaker'])
# add the task for featured speaker
taskqueue.add(params={'wsck': wsck, 'speaker': speaker.fullName},
url='/tasks/featured_speaker')
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name == 'startTime':
data = datetime.strptime(data, "%H:%M").time()
if field.name == 'date':
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'speaker':
data = speaker.key
# write to Conference object
setattr(session, field.name, data)
session.put()
return self._copySessionToForm(request)
# Helper to Copy relevant fields from Session to SessionForm."""
def _copySessionToForm(self, sess):
"""Copy relevant fields from Session to SessionForm."""
sf = SessionForm()
for field in sf.all_fields():
if hasattr(sess, field.name):
# convert Date to date string; just copy others
if field.name == 'date':
setattr(sf, field.name, str(getattr(sess, field.name)))
elif field.name == 'speaker':
sp_key = getattr(sess, field.name)
if sp_key:
setattr(sf, field.name, str(sp_key))
elif field.name == 'startTime':
setattr(sf, field.name, str(getattr(sess, field.name)))
else:
setattr(sf, field.name, getattr(sess, field.name))
elif field.name == "websafeKey":
setattr(sf, field.name, sess.key.urlsafe())
# Checks that all required fields are initialized.
sf.check_initialized()
return sf
# Given a conference, return all sessions
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/sessions/{websafeConferenceKey}',
http_method='GET', name='getConferenceSessions')
def getConferenceSessions(self, request):
"""Return all sessions by conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck)
if conf.kind() != "Conference":
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
sessions = conf.get().sessions
sessions = sessions.order(Session.date, Session.startTime, Session.name)
# return individual SessionForm object per Session
return SessionForms(
items=[self._copySessionToForm(sess)
for sess in sessions])
# Given a conference, return all sessions of a specified type
@endpoints.method(SESS_GET_REQUEST, SessionForms,
path='conference/sessions/{websafeConferenceKey}/type/{typeOfSession}',
http_method='GET', name='getConferenceSessionsByType')
def getConferenceSessionsByType(self, request):
"""Query sessions for a specified type (by websafeConferenceKey)."""
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck)
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
sessions = Session.query(ancestor=conf)
sessions = sessions.filter(Session.typeOfSession == request.typeOfSession)
sessions = sessions.order(Session.date, Session.startTime, Session.name)
return SessionForms(
items=[self._copySessionToForm(sess)
for sess in sessions]
)
@endpoints.method(SP_GET_REQUEST, SessionForms,
path='speakers/{websafeSpeakerKey}/sessions',
http_method='GET', name='getSessionsBySpeaker')
def getSessionsBySpeaker(self, request):
"""Given a speaker, return all sessions given by this particular speaker,
across all conferences (by speaker's fullname).
"""
wsspk = request.websafeSpeakerKey
sp_key = ndb.Key(urlsafe=wsspk)
speaker = sp_key.get()
if not speaker or sp_key.kind() != 'Speaker':
raise endpoints.NotFoundException(
'No speaker found by the key: %s' % wsspk)
sessions = speaker.featuredSessions.fetch()
return SessionForms(
items=[self._copySessionToForm(sess)
for sess in sessions]
)
# Update Session Endpoint
@endpoints.method(SESS_PUT_REQUEST, SessionForm,
path='conference/sessions/update/{websafeSessionKey}',
http_method='PUT', name='updateSession')
def updateSession(self, request):
"""Update a session in conference (by websafeConferenceKey, websafeSessionKey)."""
return self._updateSessionObject(request)
@endpoints.method(SESS_POST_REQUEST, SessionForm,
path='conference/sessions/{websafeConferenceKey}',
http_method='POST', name='createSession')
def createSession(self, request):
"""Create new session in conference (by websafeConferenceKey)."""
return self._createSessionObject(request)
# Return all sessions which are not workshop and are before 7 AM.
@endpoints.method(message_types.VoidMessage, SessionForms,
path='sessions/query',
http_method='GET', name='getSessionsProblematicQuery')
def getSessionsProblematicQuery(self, request):
"""Query sessions with two inequallite filters"""
q = Session.query()
# get time limits
time_up = datetime.strptime('19:00', '%H:%M').time()
# ndb filter one inequality ( typeOfSession)
q = q.filter(Session.typeOfSession != "workshop")
# This has to be first
q = q.order(Session.typeOfSession)
q = q.order(Session.date, Session.startTime, Session.name)
# filter out sessions by time limits
sessions = [sess for sess in q if sess.startTime and sess.startTime < time_up]
return SessionForms(items=[self._copySessionToForm(sess)
for sess in sessions])
@endpoints.method(USER_SESSIONS_POST, SessionForms,
path='sessions/schedule',
http_method='GET', name='getUserSessionsSchedule')
def getUserSessionsSchedule(self, request):
"""query sessions given a date for conferences the user has registered"""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
c_keys = [ndb.Key(urlsafe=wsck) for wsck in profile.conferenceKeysToAttend]
confs = ndb.get_multi(c_keys)
if not confs:
raise endpoints.NotFoundException('You haven\'t registered in any conference')
q = Session.query()
date = datetime.strptime(request.date[:10], "%Y-%m-%d").date()
# if given 2 dates search in date range, else only for that specific day
if request.dateTo:
dateTo = datetime.strptime(request.dateTo[:10], "%Y-%m-%d").date()
q = q.filter(Session.date >= date)
q = q.filter(Session.date <= dateTo)
else:
q = q.filter(Session.date == date)
q = q.order(Session.date, Session.startTime, Session.name)
# filter sessions
sessions = [sess for sess in q if sess.key.parent() in c_keys]
return SessionForms(
items=[self._copySessionToForm(sess)
for sess in sessions]
)
# confs = [conf for conf in confs if conf.startDate <= date and conf.endDate >= date]
# - - - Speaker - - - - - - - - - - - - - - - - - - -
# helper used on create session
def _getSpeaker(self, wsspk):
"""Get Speaker from datastore
If the speaker doesn't exist create an entry
return:
Speaker
"""
k = ndb.Key(urlsafe=wsspk)
sp = k.get()
# check if key provided is a Speaker Key
if k.kind() != 'Speaker':
raise endpoints.NotFoundException("No speaker with key %s" % wsspk)
# return Speaker
return sp
# used from PUT and POST speaker endpoints
def _createSpeakerObject(self, request):
"""Create SpeakerObject"""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
if not request.fullName:
raise endpoints.BadRequestException("Speaker's 'fullName' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
sp_id = Speaker.allocate_ids(size=1)[0]
sp_key = ndb.Key(Speaker, sp_id)
data['key'] = sp_key
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Speaker(**data).put()
return self._copySpeakerToForm(request)
def _copySpeakerToForm(self, speaker):
"""Copy relevant fields from Session to SessionForm."""
sf = SpeakerForm()
for field in sf.all_fields():
if hasattr(speaker, field.name):
setattr(sf, field.name, getattr(speaker, field.name))
elif field.name == "websafeKey":
setattr(sf, field.name, speaker.key.urlsafe())
sf.check_initialized()
return sf
@staticmethod
def _cacheFeaturedSpeaker(wsck, speakers_name):
"""Create Featured Speaker & assign to memcache; used by
getFeaturedSpeaker().
"""
# | |
1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-2', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-3', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-3', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-4', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-4', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-5', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-5', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-6', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-6', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-7', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-7', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-8', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-8', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-9', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-9', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-10', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-10', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-11', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-11', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-12', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-12', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-13', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-13', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-14', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-14', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-1', 2)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-1', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-2', 2)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-2', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-3', 2)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-3', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-4', 2)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-4', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-5', 2)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-5', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-6', 2)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-6', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-7', 2)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-7', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-8', 2)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-8', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-9', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-9', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-10', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-10', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-11', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-11', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-12', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-12', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-13', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-13', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-14', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-14', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-15', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-15', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-16', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-16', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-17', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-17', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-18', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-18', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-19', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-19', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-20', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-20', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-21', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-21', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-22', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-22', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-23', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-23', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-24', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-24', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
FP = UI.GetFlightPortInfo()
base_track = UI.GetTrackById(UI.GetPlatformId())
mission_id = FP.AddGenericMission()
FP.AddAircraftToMission(mission_id, 'Wall-1')
FP.AddAircraftToMission(mission_id, 'Wall-2')
FP.AddAircraftToMission(mission_id, 'Wall-3')
FP.AddAircraftToMission(mission_id, 'Wall-4')
FP.AddAircraftToMission(mission_id, 'Wall-5')
FP.AddAircraftToMission(mission_id, 'Wall-6')
FP.AddAircraftToMission(mission_id, 'Wall-7')
FP.AddAircraftToMission(mission_id, 'Wall-8')
FP.AddAircraftToMission(mission_id, 'Wall-9')
FP.AddAircraftToMission(mission_id, 'Wall-10')
FP.AddAircraftToMission(mission_id, 'Wall-11')
FP.AddAircraftToMission(mission_id, 'Wall-12')
FP.SetMissionLaunchTime(mission_id, '13:00:08+0m+R2.0')
FP.SetMissionDatum(mission_id, 0.0000000, 0.0000000)
FP.SetMissionLandingTarget(mission_id, '')
FP.SetMissionWaveQuantity(mission_id, 4)
FP.SetMissionType(mission_id, '')
FP.SetMissionPatrolArea(mission_id, '-0.3966131,1.1144481,-0.3817177,1.1139344,-0.3817177,1.1113020,-0.3957142,1.1115075,')
FP.AddMissionWaypointAdvanced(mission_id, -0.3888970, 1.1164531, 2000.0, 200.0)
FP.SetMissionWaypointTasks(mission_id, 0, 'WaitForGroup,EngageAll')
FP.AddMissionWaypointAdvanced(mission_id, -0.3895190, 1.1136150, 3000.0, 300.0)
FP.SetMissionWaypointTasks(mission_id, 1, 'AirPatrolArea,EngageAll')
mission_id = FP.AddGenericMission()
FP.AddAircraftToMission(mission_id, 'Wall-13')
FP.AddAircraftToMission(mission_id, 'Wall-14')
FP.AddAircraftToMission(mission_id, 'Wall-15')
FP.AddAircraftToMission(mission_id, 'Wall-16')
FP.AddAircraftToMission(mission_id, 'Wall-17')
FP.AddAircraftToMission(mission_id, 'Wall-18')
FP.AddAircraftToMission(mission_id, 'Wall-19')
FP.AddAircraftToMission(mission_id, 'Wall-20')
FP.AddAircraftToMission(mission_id, 'Wall-21')
FP.AddAircraftToMission(mission_id, 'Wall-22')
FP.AddAircraftToMission(mission_id, 'Wall-23')
FP.AddAircraftToMission(mission_id, 'Wall-24')
FP.SetMissionLaunchTime(mission_id, '13:00:08+0m+R2.0')
FP.SetMissionDatum(mission_id, 0.0000000, 0.0000000)
FP.SetMissionLandingTarget(mission_id, '')
FP.SetMissionWaveQuantity(mission_id, 4)
FP.SetMissionType(mission_id, '')
FP.SetMissionPatrolArea(mission_id, '0.0035024,0.0001601,0.0035684,-0.0008994,-0.0032615,0.0005575,-0.0024091,0.0014547,')
FP.SetMissionPatrolAnchor(mission_id, 'Kevlavik', 2)
FP.AddMissionWaypointAdvanced(mission_id, -0.3888970, 1.1164531, 2000.0, 200.0)
FP.SetMissionWaypointTasks(mission_id, 0, 'WaitForGroup,EngageAll')
FP.AddMissionWaypointAdvanced(mission_id, -0.3901000, 1.1150531, 3000.0, 300.0)
FP.SetMissionWaypointTasks(mission_id, 1, 'AirPatrolArea,EngageAll')
mission_id = FP.AddGenericMission()
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-15')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-6')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-7')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-8')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-9')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-10')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-11')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-12')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-13')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-14')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-15')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-16')
FP.SetMissionLaunchTime(mission_id, '13:00:08+0m+R2.0')
FP.SetMissionDatum(mission_id, 0.0000000, 0.0000000)
FP.SetMissionLandingTarget(mission_id, '')
FP.SetMissionWaveQuantity(mission_id, 4)
FP.SetMissionType(mission_id, 'Standby-ASuW')
mission_id = FP.AddGenericMission()
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-16')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-20')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-1')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-2')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-3')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-4')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-5')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-6')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-7')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-8')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-9')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-10')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-11')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-12')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-13')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-14')
FP.SetMissionLaunchTime(mission_id, '12:00:08+0m+R2.0')
FP.SetMissionDatum(mission_id, 0.0000000, 0.0000000)
FP.SetMissionLandingTarget(mission_id, '')
FP.SetMissionWaveQuantity(mission_id, 8)
FP.SetMissionType(mission_id, 'Standby-ASuW')
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 1"
unit.SetPosition(-32.584050, 55.516823, 14000.0)
unit.heading = 66.29
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.030556')
BB.Write('StationLongitude', '-0.392433')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 2"
unit.SetPosition(-32.168371, 55.198724, 14000.0)
unit.heading = 66.29
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.028579')
BB.Write('StationLongitude', '-0.377806')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 4"
unit.SetPosition(-32.128469, 55.697659, 14000.0)
unit.heading = 66.29
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.036880')
BB.Write('StationLongitude', '-0.400791')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 3"
unit.SetPosition(-31.673336, 55.422092, 14000.0)
unit.heading = 66.29
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.032137')
BB.Write('StationLongitude', '-0.404274')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 7"
unit.SetPosition(-16.107389, 55.375940, 14000.0)
unit.heading = 312.62
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.030348')
BB.Write('StationLongitude', '-0.333093')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "<NAME>"
unit.SetPosition(-16.225302, 54.909432, 14000.0)
unit.heading = 312.62
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.026914')
BB.Write('StationLongitude', '-0.337174')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "<NAME>"
unit.SetPosition(-15.432983, 55.408261, 14000.0)
unit.heading = 312.62
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.033322')
BB.Write('StationLongitude', '-0.301887')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 6"
unit.SetPosition(-15.233573, 54.814413, 14000.0)
unit.heading = 312.62
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.025417')
BB.Write('StationLongitude', '-0.294922')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "<NAME>"
unit.SetPosition(-9.785666, 64.295426, 14000.0)
unit.heading = 253.54
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.066212')
BB.Write('StationLongitude', '-0.358760')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName | |
\text{other})
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
other : Union[dragon.vm.torch.Tensor, number]
The tensor to compare.
out : dragon.vm.torch.Tensor, optional
The output tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return _binary_func(input, other, 'Equal', out)
def exp(input, out=None):
r"""Compute the exponential of input.
.. math:: \text{out} = \exp(\text{input})
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
out : dragon.vm.torch.Tensor, optional
The output tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return _unary_func(input, 'Exp', out)
def floor(input, out=None):
r"""Compute the largest integer not greater than input.
.. math:: \text{out} = \lfloor \text{input} \rfloor
Examples:
```python
x = torch.tensor([0.9, 1.4, 1.9])
print(torch.floor(x)) # [0., 1., 1.]
```
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
out : dragon.vm.torch.Tensor, optional
The output tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return _unary_func(input, 'Floor', out)
def ge(input, other, out=None):
r"""Compute the element-wise greater-equal comparison.
.. math:: \text{out} = (\text{input} \geq \text{other})
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
other : Union[dragon.vm.torch.Tensor, number]
The tensor to compare.
out : dragon.vm.torch.Tensor, optional
The output tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return _binary_func(input, other, 'GreaterEqual', out)
def gt(input, other, out=None):
r"""Compute the element-wise greater comparison.
.. math:: \text{out} = (\text{input} > \text{other})
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
other : Union[dragon.vm.torch.Tensor, number]
The tensor to compare.
out : dragon.vm.torch.Tensor, optional
The output tensor.
Returns
-------
dragon.vm.torch.Tensor
The output byte tensor.
"""
return _binary_func(input, other, 'Greater', out)
def isfinite(input):
r"""Check if the elements of input are finite.
.. math:: \text{out} = \text{isfinite}(\text{input})
Examples:
```python
x = torch.tensor([0., float('nan'), float('inf')])
print(torch.isfinite(x)) # [True, False, False]
```
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return _unary_func(input, 'IsFinite')
def isinf(input):
r"""Check if the elements of input are infinite.
.. math:: \text{out} = \text{isinf}(\text{input})
Examples:
```python
x = torch.tensor([0., 1., float('inf')])
print(torch.isinf(x)) # [False, False, True]
```
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return _unary_func(input, 'IsInf')
def isnan(input):
r"""Check if the elements of input are NaN.
.. math:: \text{out} = \text{isnan}(\text{input})
Examples:
```python
x = torch.tensor([0., 1., float('nan')])
print(torch.isnan(x)) # [False, False, True]
```
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return _unary_func(input, 'IsNaN')
def le(input, other, out=None):
r"""Compute the element-wise less-equal comparison.
.. math:: \text{out} = (\text{input} \leq \text{other})
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
other : Union[dragon.vm.torch.Tensor, number]
The tensor to compare.
out : dragon.vm.torch.Tensor, optional
The output tensor.
Returns
-------
dragon.vm.torch.Tensor
The output byte tensor.
"""
return _binary_func(input, other, 'LessEqual', out)
def log(input, out=None):
r"""Compute the natural logarithm of input.
.. math:: \text{out} = \log(\text{input})
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
out : dragon.vm.torch.Tensor, optional
The output tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return _unary_func(input, 'Log', out)
def logical_and(input, other, out=None):
r"""Compute the element-wise AND logical operation.
.. math:: \text{out} = \text{input} \mathbin{\&} \text{other}
Examples:
```python
a = torch.tensor([False, True, False, True])
b = torch.tensor([False, True, True, False])
c = torch.Tensor([0, 1, 0, 2])
d = torch.Tensor([0, 3, 4, 0])
print(torch.logical_and(a, b)) # [False, True, False, False]
print(torch.logical_and(c, d)) # [False, True, False, False]
```
Parameters
----------
input : dragon.vm.torch.Tensor
The first input tensor.
other : dragon.vm.torch.Tensor
The second input tensor.
out : dragon.vm.torch.Tensor, optional
The output tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return _binary_func(input, other, 'And', out)
def logical_not(input, out=None):
r"""Compute the element-wise NOT logical operation.
.. math:: \text{out} = \,\,\sim \text{input}
Examples:
```python
a = torch.tensor([False, True, True])
b = torch.tensor([0, 1, 2])
print(torch.logical_not(a)) # [True, False, False]
print(torch.logical_not(b)) # [True, False, False]
```
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
out : dragon.vm.torch.Tensor, optional
The output tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return _unary_func(input, 'Not', out)
def logical_or(input, other, out=None):
r"""Compute the element-wise OR logical operation.
.. math:: \text{out} = \text{input} \mathbin{|} \text{other}
Examples:
```python
a = torch.tensor([False, True, False, True])
b = torch.tensor([False, True, True, False])
c = torch.Tensor([0, 1, 0, 2])
d = torch.Tensor([0, 3, 4, 0])
print(torch.logical_or(a, b)) # [False, True, True, True]
print(torch.logical_or(c, d)) # [False, True, True, True]
```
Parameters
----------
input : dragon.vm.torch.Tensor
The first input tensor.
other : dragon.vm.torch.Tensor
The second input tensor.
out : dragon.vm.torch.Tensor, optional
The output tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return _binary_func(input, other, 'Or', out)
def logical_xor(input, other, out=None):
r"""Compute the element-wise XOR logical operation.
.. math:: \text{out} = \text{input} \oplus \text{other}
Examples:
```python
a = torch.tensor([False, True, False, True])
b = torch.tensor([False, True, True, False])
c = torch.Tensor([0, 1, 0, 2])
d = torch.Tensor([0, 3, 4, 0])
print(torch.logical_xor(a, b)) # [False, False, True, True]
print(torch.logical_xor(c, d)) # [False, False, True, True]
```
Parameters
----------
input : dragon.vm.torch.Tensor
The first input tensor.
other : dragon.vm.torch.Tensor
The second input tensor.
out : dragon.vm.torch.Tensor, optional
The output tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return _binary_func(input, other, 'Xor', out)
def logsumexp(input, dim, keepdim=False, out=None):
r"""Apply the composite of log, sum, and exp to input.
.. math:: \text{out}_{i} = \log\sum_{j}\exp(\text{input}_{ij})
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
dim : Union[int, Sequence[int]]
The dimension(s) to reduce.
keepdim : bool, optional, default=False
Whether the output tensor has dim retained or not.
out : dragon.vm.torch.Tensor, optional
The output tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return log(exp(input).sum(dim, keepdim), out)
def lt(input, other, out=None):
r"""Compute the element-wise less comparison.
.. math:: \text{out} = (\text{input} < \text{other})
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
other : Union[dragon.vm.torch.Tensor, number]
The tensor to compare.
out : dragon.vm.torch.Tensor, optional
The output tensor.
Returns
-------
dragon.vm.torch.Tensor
The output byte tensor.
"""
return _binary_func(input, other, 'Less', out)
def matmul(input, other, out=None):
r"""Compute the matrix multiplication.
.. math:: \text{out} = \text{input} \times \text{other}
The behavior depends on the shape of input tensors:
* If both tensors are 1d, computes the vector product.
* If tensors are 1d and >=2d, computes the vector-matrix multiplication.
* If tensors are >=2d and 1d, computes the matrix-vector multiplication.
* If both tensors are >= 2d, computes the matrix-matrix multiplication.
* If one tensor is >= 3d, applies batching and broadcasting to the computation.
Examples:
```python
# Vector x Vector
a = torch.ones(2)
b = torch.ones(2)
print(torch.matmul(a, b))
# Vector x Matrix
a = torch.ones(2)
b = torch.ones(2, 3)
print(torch.matmul(a, b))
# Matrix x Vector
a = torch.ones(3, 2)
b = torch.ones(2)
print(torch.matmul(a, b))
# Matrix x Matrix
a = torch.ones(2, 3)
b = torch.ones(3, 2)
print(torch.matmul(a, b))
```
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
other : dragon.vm.torch.Tensor
The tensor to multiply.
out : dragon.vm.torch.Tensor, optional
The output tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return Function.apply(
'MatMul', input.device, [input, other], outputs=[out])
def max(input, dim=None, keepdim=False, out=None):
"""Compute the max value of elements along the given dimension.
:attr:`dim` could be negative or ``None``:
```python
x = torch.tensor([[1, 2, 3], [4, 5, 6]])
# A negative dimension is the last-k dimension
print(torch.max(x, dim=1))
print(torch.max(x, dim=-1)) # Equivalent
# If dimension is None, reduce input as a vector
# and return a scalar result
print(torch.max(x)) # 6
# Also, dimension could be a sequence of integers
print(torch.max(x, (0, 1))) # 6
```
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
dim : Union[int, Sequence[int]], optional
The dimension to reduce.
keepdim : bool, optional, default=False
Keep the reduced dimension or not.
out : dragon.vm.torch.Tensor, optional
The output tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
keepdim = keepdim if dim is not None else False
dim = nest.flatten(dim) if dim is not None else dim
return Function.apply(
'ReduceMax', input.device, [input], outputs=[out],
axes=dim, keepdims=keepdim)
def maximum(input, other, out=None):
r"""Compute the maximum value of inputs.
.. math:: \text{out} = \max(\text{input}, \text{other})
Parameters
----------
input : Union[dragon.vm.torch.Tensor, number]
The first input tensor.
other : Union[dragon.vm.torch.Tensor, number]
The second input tensor.
out : dragon.vm.torch.Tensor, optional
The output tensor.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
return _binary_func(input, | |
import argparse
import sys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import re
from collections import defaultdict
from math import log10
import matplotlib.patches as mpatches
import statistics
NO_THREADS_BASELINE = "DEFAULT"
def main():
font = {'font.family': 'normal',
# 'font.weight' : 'bold',
'font.size': 18}
plt.rcParams.update(font)
blue_patch = mpatches.Patch(color='blue', label='Original')
x_ticks_labels = ['f', 'fa', 'd', 'da']
# set parameters for original-to-implementation comparison for all three logs
chosen_delta = 0.01
chosen_alpha = 0.99
chosen_epsilon = 0.01
chosen_k = 0.2
# BPI 2012#
bpi2012_baseline = pd.read_csv("./csv/BPI_Challenge_2012_baseline.csv", sep=';')
bpi2012_fitness = pd.read_csv("./csv/BPI_Challenge_2012_fitness.csv", sep=';')
bpi2012_fitnessApprox = pd.read_csv("./csv/BPI_Challenge_2012_fitnessApprox.csv", sep=';')
bpi2012_deviations = pd.read_csv("./csv/BPI_Challenge_2012_deviations.csv", sep=';')
bpi2012_deviationsApprox = pd.read_csv("./csv/BPI_Challenge_2012_deviationsApprox.csv", sep=';')
bpi2012_fitness = bpi2012_fitness.loc[
(bpi2012_fitness["delta"] == chosen_delta) & (bpi2012_fitness["alpha"] == chosen_alpha) & (
bpi2012_fitness["epsilon"] == chosen_epsilon)]
bpi2012_fitnessApprox = bpi2012_fitnessApprox.loc[
(bpi2012_fitnessApprox["delta"] == chosen_delta) & (bpi2012_fitnessApprox["alpha"] == chosen_alpha) & (
bpi2012_fitnessApprox["epsilon"] == chosen_epsilon) & (bpi2012_fitnessApprox["k"] == chosen_k)]
bpi2012_deviations = bpi2012_deviations.loc[
(bpi2012_deviations["delta"] == chosen_delta) & (bpi2012_deviations["alpha"] == chosen_alpha) & (
bpi2012_deviations["epsilon"] == chosen_epsilon)]
bpi2012_deviationsApprox = bpi2012_deviationsApprox.loc[
(bpi2012_deviationsApprox["delta"] == chosen_delta) & (bpi2012_deviationsApprox["alpha"] == chosen_alpha) & (
bpi2012_deviationsApprox["epsilon"] == chosen_epsilon) & (
bpi2012_deviationsApprox["k"] == chosen_k) & (
bpi2012_deviationsApprox["approximationMode"] == "NONALIGNING_KNOWN")]
# BPI 2014#
bpi2014_baseline = pd.read_csv("./csv/Detail_Incident_Activity_baseline.csv", sep=';')
bpi2014_fitness = pd.read_csv("./csv/Detail_Incident_Activity_fitness.csv", sep=';')
bpi2014_fitnessApprox = pd.read_csv("./csv/Detail_Incident_Activity_fitnessApprox.csv", sep=';')
bpi2014_deviations = pd.read_csv("./csv/Detail_Incident_Activity_deviations.csv", sep=';')
bpi2014_deviationsApprox = pd.read_csv("./csv/Detail_Incident_Activity_deviationsApprox.csv", sep=';')
bpi2014_fitness = bpi2014_fitness.loc[
(bpi2014_fitness["delta"] == chosen_delta) & (bpi2014_fitness["alpha"] == chosen_alpha) & (
bpi2014_fitness["epsilon"] == chosen_epsilon)]
bpi2014_fitnessApprox = bpi2014_fitnessApprox.loc[
(bpi2014_fitnessApprox["delta"] == chosen_delta) & (bpi2014_fitnessApprox["alpha"] == chosen_alpha) & (
bpi2014_fitnessApprox["epsilon"] == chosen_epsilon) & (bpi2014_fitnessApprox["k"] == chosen_k)]
bpi2014_deviations = bpi2014_deviations.loc[
(bpi2014_deviations["delta"] == chosen_delta) & (bpi2014_deviations["alpha"] == chosen_alpha) & (
bpi2014_deviations["epsilon"] == chosen_epsilon)]
bpi2014_deviationsApprox = bpi2014_deviationsApprox.loc[
(bpi2014_deviationsApprox["delta"] == chosen_delta) & (bpi2014_deviationsApprox["alpha"] == chosen_alpha) & (
bpi2014_deviationsApprox["epsilon"] == chosen_epsilon) & (
bpi2014_deviationsApprox["k"] == chosen_k) & (
bpi2014_deviationsApprox["approximationMode"] == "NONALIGNING_KNOWN")]
# road traffic fines
road_traffic_baseline = pd.read_csv("./csv/Road_Traffic_Fines_Management_Process_baseline.csv",
sep=';')
road_traffic_fitness = pd.read_csv("./csv/Road_Traffic_Fines_Management_Process_fitness.csv", sep=';')
road_traffic_fitnessApprox = pd.read_csv(
"./csv/Road_Traffic_Fines_Management_Process_fitnessApprox.csv", sep=';')
road_traffic_deviations = pd.read_csv("./csv/Road_Traffic_Fines_Management_Process_deviations.csv",
sep=';')
road_traffic_deviationsApprox = pd.read_csv(
"./csv/Road_Traffic_Fines_Management_Process_deviationsApprox.csv", sep=';')
road_traffic_fitness = road_traffic_fitness.loc[
(road_traffic_fitness["delta"] == chosen_delta) & (road_traffic_fitness["alpha"] == chosen_alpha) & (
road_traffic_fitness["epsilon"] == chosen_epsilon)]
road_traffic_fitnessApprox = road_traffic_fitnessApprox.loc[
(road_traffic_fitnessApprox["delta"] == chosen_delta) & (
road_traffic_fitnessApprox["alpha"] == chosen_alpha) & (
road_traffic_fitnessApprox["epsilon"] == chosen_epsilon) & (
road_traffic_fitnessApprox["k"] == chosen_k)]
road_traffic_deviations = road_traffic_deviations.loc[
(road_traffic_deviations["delta"] == chosen_delta) & (road_traffic_deviations["alpha"] == chosen_alpha) & (
road_traffic_deviations["epsilon"] == chosen_epsilon)]
road_traffic_deviationsApprox = road_traffic_deviationsApprox.loc[
(road_traffic_deviationsApprox["delta"] == chosen_delta) & (
road_traffic_deviationsApprox["alpha"] == chosen_alpha) & (
road_traffic_deviationsApprox["epsilon"] == chosen_epsilon) & (
road_traffic_deviationsApprox["k"] == chosen_k) & (
road_traffic_deviationsApprox["approximationMode"] == "NONALIGNING_KNOWN")]
#reference road traffic fines
rdRTF_baseline = pd.read_csv("./csv/RTFM_model2_baseline.csv",
sep=';')
rdRTF_fitness = pd.read_csv("./csv/RTFM_model2_fitness.csv", sep=';')
rdRTF_fitnessApprox = pd.read_csv(
"./csv/RTFM_model2_fitnessApprox.csv", sep=';')
rdRTF_deviations = pd.read_csv("./csv/RTFM_model2_deviations.csv",
sep=';')
rdRTF_deviationsApprox = pd.read_csv(
"./csv/RTFM_model2_deviationsApprox.csv", sep=';')
rdRTF_fitness = rdRTF_fitness.loc[
(rdRTF_fitness["delta"] == chosen_delta) & (rdRTF_fitness["alpha"] == chosen_alpha) & (
rdRTF_fitness["epsilon"] == chosen_epsilon)]
rdRTF_fitnessApprox = rdRTF_fitnessApprox.loc[
(rdRTF_fitnessApprox["delta"] == chosen_delta) & (
rdRTF_fitnessApprox["alpha"] == chosen_alpha) & (
rdRTF_fitnessApprox["epsilon"] == chosen_epsilon) & (
rdRTF_fitnessApprox["k"] == chosen_k)]
rdRTF_deviations = rdRTF_deviations.loc[
(rdRTF_deviations["delta"] == chosen_delta) & (rdRTF_deviations["alpha"] == chosen_alpha) & (
rdRTF_deviations["epsilon"] == chosen_epsilon)]
rdRTF_deviationsApprox = rdRTF_deviationsApprox.loc[
(rdRTF_deviationsApprox["delta"] == chosen_delta) & (
rdRTF_deviationsApprox["alpha"] == chosen_alpha) & (
rdRTF_deviationsApprox["epsilon"] == chosen_epsilon) & (
rdRTF_deviationsApprox["k"] == chosen_k) & (
rdRTF_deviationsApprox["approximationMode"] == "NONALIGNING_KNOWN")]
# plot computing time comparisons
bpi2012_orig_mean = bpi2012_baseline["time"].mean()
bpi2012_list = []
bpi2012_list.append(bpi2012_fitness["time"].values / bpi2012_orig_mean)
bpi2012_list.append(bpi2012_fitnessApprox["time"].values / bpi2012_orig_mean)
bpi2012_list.append(bpi2012_deviations["time"].values / bpi2012_orig_mean)
bpi2012_list.append(bpi2012_deviationsApprox["time"].values / bpi2012_orig_mean)
bpi2014_orig_mean = bpi2014_baseline["time"].mean()
bpi2014_list = []
bpi2014_list.append(bpi2014_fitness["time"].values / bpi2014_orig_mean)
bpi2014_list.append(bpi2014_fitnessApprox["time"].values / bpi2014_orig_mean)
bpi2014_list.append(bpi2014_deviations["time"].values / bpi2014_orig_mean)
bpi2014_list.append(bpi2014_deviationsApprox["time"].values / bpi2014_orig_mean)
road_traffic_orig_mean = road_traffic_baseline["time"].mean()
road_traffic_list = []
road_traffic_list.append(road_traffic_fitness["time"].values / road_traffic_orig_mean)
road_traffic_list.append(road_traffic_fitnessApprox["time"].values / road_traffic_orig_mean)
road_traffic_list.append(road_traffic_deviations["time"].values / road_traffic_orig_mean)
road_traffic_list.append(road_traffic_deviationsApprox["time"].values / road_traffic_orig_mean)
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
ax1.set_yscale('log')
ax1.set_ylabel('Runtime (relative)')
ax1.set_ylim(0.0007, 10)
ax1.set_yticks([0.001, 0.01, 0.1, 1.0])
ax1.set_yticklabels(["0.1%", "1%", "10%", "100%"])
ax1.set_title('BPI-12', fontsize=18)
ax1.boxplot(bpi2012_list)
# print(bpi2012_list)
# ax1.axhline(1, color='b', linestyle='--')
ax1.set_xticklabels(x_ticks_labels, rotation=0, fontsize=18)
ax1.tick_params(length=6, width=2)
ax1.tick_params(which='minor', length=4, width=1)
ax2.set_title('BPI-14', fontsize=18)
ax2.boxplot(bpi2014_list)
# ax2.axhline(1, color='b', linestyle='--')
ax2.set_xticklabels(x_ticks_labels, rotation=0, fontsize=18)
ax2.tick_params(length=6, width=2)
ax2.tick_params(which='minor', length=4, width=1)
ax3.set_title('Traffic Fines', fontsize=18)
ax3.boxplot(road_traffic_list)
# ax3.axhline(1, color='b', linestyle='--')
ax3.set_xticklabels(x_ticks_labels, rotation=0, fontsize=18)
ax3.tick_params(length=6, width=2)
ax3.tick_params(which='minor', length=4, width=1)
#f.show()
f.savefig("./real_computing_time.pdf", bbox_inches='tight')
plt.clf()
# plot sampled trace comparisons
bpi2012_orig_mean = bpi2012_baseline["logSize"].mean()
bpi2012_list = []
bpi2012_list.append(bpi2012_fitness["logSize"].values / bpi2012_orig_mean)
bpi2012_list.append(bpi2012_fitnessApprox["logSize"].values / bpi2012_orig_mean)
bpi2012_list.append(bpi2012_deviations["logSize"].values / bpi2012_orig_mean)
bpi2012_list.append(bpi2012_deviationsApprox["logSize"].values / bpi2012_orig_mean)
bpi2014_orig_mean = bpi2014_baseline["logSize"].mean()
bpi2014_list = []
bpi2014_list.append(bpi2014_fitness["logSize"].values / bpi2014_orig_mean)
bpi2014_list.append(bpi2014_fitnessApprox["logSize"].values / bpi2014_orig_mean)
bpi2014_list.append(bpi2014_deviations["logSize"].values / bpi2014_orig_mean)
bpi2014_list.append(bpi2014_deviationsApprox["logSize"].values / bpi2014_orig_mean)
road_traffic_orig_mean = road_traffic_baseline["logSize"].mean()
road_traffic_list = []
road_traffic_list.append(road_traffic_fitness["logSize"].values / road_traffic_orig_mean)
road_traffic_list.append(road_traffic_fitnessApprox["logSize"].values / road_traffic_orig_mean)
road_traffic_list.append(road_traffic_deviations["logSize"].values / road_traffic_orig_mean)
road_traffic_list.append(road_traffic_deviationsApprox["logSize"].values / road_traffic_orig_mean)
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
# f.legend(handles=[blue_patch],loc='upper right')
# f.set_size_inches(6,5)
ax1.set_yscale('log')
ax1.set_ylabel('Sampled traces')
ax1.set_ylim(0.0007, 3)
ax1.set_yticks([0.001, 0.01, 0.1, 1.0])
ax1.set_yticklabels(["0.1%", "1%", "10%", "100%"])
ax1.set_title('BPI-12', fontsize=18)
ax1.boxplot(bpi2012_list)
# ax1.axhline(1, color='b', linestyle='--')
ax1.set_xticklabels(x_ticks_labels, rotation='horizontal', fontsize=18)
ax1.tick_params(length=6, width=2)
ax1.tick_params(which='minor', length=4, width=1)
ax2.set_title('BPI-14', fontsize=18)
ax2.boxplot(bpi2014_list)
# ax2.axhline(1, color='b', linestyle='--')
ax2.set_xticklabels(x_ticks_labels, rotation='horizontal', fontsize=18)
ax2.tick_params(length=6, width=2)
ax2.tick_params(which='minor', length=4, width=1)
ax3.set_title('Traffic Fines', fontsize=18)
ax3.boxplot(road_traffic_list)
# ax3.axhline(1, color='b', linestyle='--')
ax3.set_xticklabels(x_ticks_labels, rotation='horizontal', fontsize=18)
ax3.tick_params(length=6, width=2)
ax3.tick_params(which='minor', length=4, width=1)
#f.show()
f.savefig("./real_traces.pdf", bbox_inches='tight')
plt.clf()
# plot fitness comparisons
bpi2012_orig_mean = bpi2012_baseline["fitness"].mean()
bpi2012_list = []
bpi2012_list.append(bpi2012_fitness["fitness"].values)
bpi2012_list.append(bpi2012_fitnessApprox["fitness"].values)
bpi2014_orig_mean = bpi2014_baseline["fitness"].mean()
bpi2014_list = []
bpi2014_list.append(bpi2014_fitness["fitness"].values)
bpi2014_list.append(bpi2014_fitnessApprox["fitness"].values)
road_traffic_orig_mean = road_traffic_baseline["fitness"].mean()
road_traffic_list = []
road_traffic_list.append(road_traffic_fitness["fitness"].values)
road_traffic_list.append(road_traffic_fitnessApprox["fitness"].values)
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
# f.set_size_inches(6,4)
# f.legend(handles=[blue_patch],loc='upper right')
ax1.set_ylabel('Fitness')
ax1.set_yticks([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax1.set_ylim(0.49, 1.01)
ax1.boxplot(bpi2012_list)
ax1.axhline(bpi2012_orig_mean, color='b', linestyle='--')
ax1.set_xticklabels(x_ticks_labels[:2], rotation='horizontal', fontsize=18)
ax1.tick_params(length=6, width=2)
ax1.tick_params(which='minor', length=4, width=1)
ax1.set_title('BPI-12', fontsize=18)
ax2.boxplot(bpi2014_list)
ax2.axhline(bpi2014_orig_mean, color='b', linestyle='--')
ax2.set_xticklabels(x_ticks_labels[:2], rotation='horizontal', fontsize=18)
ax2.tick_params(length=6, width=2)
ax2.tick_params(which='minor', length=4, width=1)
ax2.set_title('BPI-14', fontsize=18)
ax3.boxplot(road_traffic_list)
ax3.axhline(road_traffic_orig_mean, color='b', linestyle='--')
ax3.set_xticklabels(x_ticks_labels[:2], rotation='horizontal', fontsize=18)
ax3.tick_params(length=6, width=2)
ax3.tick_params(which='minor', length=4, width=1)
ax3.set_title('Traffic Fines', fontsize=18)
#f.show()
f.savefig("./real_fitness.pdf", bbox_inches='tight')
plt.clf()
#construct plots for all 4 datasets
bpi2012_orig_mean = bpi2012_baseline["time"].mean()
bpi2012_list = []
bpi2012_list.append(bpi2012_fitness["time"].values / bpi2012_orig_mean)
bpi2012_list.append(bpi2012_fitnessApprox["time"].values / bpi2012_orig_mean)
bpi2012_list.append(bpi2012_deviations["time"].values / bpi2012_orig_mean)
bpi2012_list.append(bpi2012_deviationsApprox["time"].values / bpi2012_orig_mean)
bpi2014_orig_mean = bpi2014_baseline["time"].mean()
bpi2014_list = []
bpi2014_list.append(bpi2014_fitness["time"].values / bpi2014_orig_mean)
bpi2014_list.append(bpi2014_fitnessApprox["time"].values / bpi2014_orig_mean)
bpi2014_list.append(bpi2014_deviations["time"].values / bpi2014_orig_mean)
bpi2014_list.append(bpi2014_deviationsApprox["time"].values / bpi2014_orig_mean)
road_traffic_orig_mean = road_traffic_baseline["time"].mean()
road_traffic_list = []
road_traffic_list.append(road_traffic_fitness["time"].values / road_traffic_orig_mean)
road_traffic_list.append(road_traffic_fitnessApprox["time"].values / road_traffic_orig_mean)
road_traffic_list.append(road_traffic_deviations["time"].values / road_traffic_orig_mean)
road_traffic_list.append(road_traffic_deviationsApprox["time"].values / road_traffic_orig_mean)
rdRTF_orig_mean = rdRTF_baseline["time"].mean()
rdRTF_list = []
rdRTF_list.append(rdRTF_fitness["time"].values / rdRTF_orig_mean)
rdRTF_list.append(rdRTF_fitnessApprox["time"].values / rdRTF_orig_mean)
rdRTF_list.append(rdRTF_deviations["time"].values / rdRTF_orig_mean)
rdRTF_list.append(rdRTF_deviationsApprox["time"].values / rdRTF_orig_mean)
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharey=True)
ax1.set_yscale('log')
ax1.set_ylabel('Runtime (relative)')
ax1.set_ylim(0.0007, 11)
ax1.set_yticks([0.001, 0.01, 0.1, 1.0, 10.0])
ax1.set_yticklabels(["0.1%", "1%", "10%", "100%", "1000%"])
ax1.set_title('BPI-12', fontsize=18)
ax1.boxplot(bpi2012_list)
# print(bpi2012_list)
# ax1.axhline(1, color='b', linestyle='--')
ax1.set_xticklabels(x_ticks_labels, rotation=0, fontsize=18)
ax1.tick_params(length=6, width=2)
ax1.tick_params(which='minor', length=4, width=1)
ax2.set_title('BPI-14', fontsize=18)
ax2.boxplot(bpi2014_list)
# ax2.axhline(1, color='b', linestyle='--')
ax2.set_xticklabels(x_ticks_labels, rotation=0, fontsize=18)
ax2.tick_params(length=6, width=2)
ax2.tick_params(which='minor', length=4, width=1)
ax3.set_title('RTF', fontsize=18)
ax3.boxplot(road_traffic_list)
# ax3.axhline(1, color='b', linestyle='--')
ax3.set_xticklabels(x_ticks_labels, rotation=0, fontsize=18)
ax3.tick_params(length=6, width=2)
ax3.tick_params(which='minor', length=4, width=1)
ax4.set_title('RTFr', fontsize=18)
ax4.boxplot(rdRTF_list)
# ax4.axhline(1, color='b', linestyle='--')
ax4.set_xticklabels(x_ticks_labels, rotation=0, fontsize=18)
ax4.tick_params(length=6, width=2)
ax4.tick_params(which='minor', length=4, width=1)
#f.show()
f.savefig("./real4_computing_time.pdf", bbox_inches='tight')
plt.clf()
bpi2012_orig_mean = bpi2012_baseline["logSize"].mean()
bpi2012_list = []
bpi2012_list.append(bpi2012_fitness["logSize"].values / bpi2012_orig_mean)
bpi2012_list.append(bpi2012_fitnessApprox["logSize"].values / bpi2012_orig_mean)
bpi2012_list.append(bpi2012_deviations["logSize"].values / bpi2012_orig_mean)
bpi2012_list.append(bpi2012_deviationsApprox["logSize"].values / bpi2012_orig_mean)
bpi2014_orig_mean = bpi2014_baseline["logSize"].mean()
bpi2014_list = []
bpi2014_list.append(bpi2014_fitness["logSize"].values / bpi2014_orig_mean)
bpi2014_list.append(bpi2014_fitnessApprox["logSize"].values / bpi2014_orig_mean)
bpi2014_list.append(bpi2014_deviations["logSize"].values / bpi2014_orig_mean)
bpi2014_list.append(bpi2014_deviationsApprox["logSize"].values / bpi2014_orig_mean)
road_traffic_orig_mean = road_traffic_baseline["logSize"].mean()
road_traffic_list = []
road_traffic_list.append(road_traffic_fitness["logSize"].values / road_traffic_orig_mean)
road_traffic_list.append(road_traffic_fitnessApprox["logSize"].values / road_traffic_orig_mean)
road_traffic_list.append(road_traffic_deviations["logSize"].values / road_traffic_orig_mean)
road_traffic_list.append(road_traffic_deviationsApprox["logSize"].values / road_traffic_orig_mean)
rdRTF_orig_mean = rdRTF_baseline["logSize"].mean()
rdRTF_list = []
rdRTF_list.append(rdRTF_fitness["logSize"].values / rdRTF_orig_mean)
rdRTF_list.append(rdRTF_fitnessApprox["logSize"].values / rdRTF_orig_mean)
rdRTF_list.append(rdRTF_deviations["logSize"].values / rdRTF_orig_mean)
rdRTF_list.append(rdRTF_deviationsApprox["logSize"].values / rdRTF_orig_mean)
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharey=True)
# f.legend(handles=[blue_patch],loc='upper right')
# f.set_size_inches(6,5)
ax1.set_yscale('log')
ax1.set_ylabel('Sampled traces')
ax1.set_ylim(0.0007, 3)
ax1.set_yticks([0.001, 0.01, 0.1, 1.0])
ax1.set_yticklabels(["0.1%", "1%", "10%", "100%"])
ax1.set_title('BPI-12', fontsize=18)
ax1.boxplot(bpi2012_list)
# ax1.axhline(1, color='b', linestyle='--')
ax1.set_xticklabels(x_ticks_labels, rotation='horizontal', fontsize=18)
ax1.tick_params(length=6, width=2)
ax1.tick_params(which='minor', length=4, width=1)
ax2.set_title('BPI-14', fontsize=18)
ax2.boxplot(bpi2014_list)
# ax2.axhline(1, color='b', linestyle='--')
ax2.set_xticklabels(x_ticks_labels, rotation='horizontal', fontsize=18)
ax2.tick_params(length=6, width=2)
ax2.tick_params(which='minor', length=4, width=1)
ax3.set_title('RTF', fontsize=18)
ax3.boxplot(road_traffic_list)
# ax3.axhline(1, color='b', linestyle='--')
ax3.set_xticklabels(x_ticks_labels, rotation='horizontal', fontsize=18)
ax3.tick_params(length=6, width=2)
ax3.tick_params(which='minor', length=4, width=1)
ax4.set_title('RTFr', fontsize=18)
ax4.boxplot(rdRTF_list)
# ax4.axhline(1, color='b', linestyle='--')
ax4.set_xticklabels(x_ticks_labels, rotation='horizontal', fontsize=18)
ax4.tick_params(length=6, width=2)
#f.show()
f.savefig("./real4_traces.pdf", bbox_inches='tight')
# plot fitness comparisons
bpi2012_orig_mean = bpi2012_baseline["fitness"].mean()
bpi2012_list = []
bpi2012_list.append(bpi2012_fitness["fitness"].values)
bpi2012_list.append(bpi2012_fitnessApprox["fitness"].values)
bpi2014_orig_mean = bpi2014_baseline["fitness"].mean()
bpi2014_list = []
bpi2014_list.append(bpi2014_fitness["fitness"].values)
bpi2014_list.append(bpi2014_fitnessApprox["fitness"].values)
road_traffic_orig_mean = road_traffic_baseline["fitness"].mean()
road_traffic_list = []
road_traffic_list.append(road_traffic_fitness["fitness"].values)
road_traffic_list.append(road_traffic_fitnessApprox["fitness"].values)
rdRTF_orig_mean = rdRTF_baseline["fitness"].mean()
rdRTF_list = []
rdRTF_list.append(rdRTF_fitness["fitness"].values)
rdRTF_list.append(rdRTF_fitnessApprox["fitness"].values)
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharey=True)
# f.set_size_inches(6,4)
# f.legend(handles=[blue_patch],loc='upper right')
ax1.set_ylabel('Fitness')
ax1.set_yticks([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax1.set_ylim(0.49, 1.01)
ax1.boxplot(bpi2012_list)
ax1.axhline(bpi2012_orig_mean, color='b', linestyle='--')
ax1.set_xticklabels(x_ticks_labels[:2], rotation='horizontal', fontsize=18)
ax1.tick_params(length=6, width=2)
ax1.tick_params(which='minor', length=4, width=1)
ax1.set_title('BPI-12', fontsize=18)
ax2.boxplot(bpi2014_list)
ax2.axhline(bpi2014_orig_mean, color='b', linestyle='--')
ax2.set_xticklabels(x_ticks_labels[:2], rotation='horizontal', fontsize=18)
ax2.tick_params(length=6, width=2)
ax2.tick_params(which='minor', length=4, width=1)
ax2.set_title('BPI-14', fontsize=18)
ax3.boxplot(road_traffic_list)
ax3.axhline(road_traffic_orig_mean, color='b', linestyle='--')
ax3.set_xticklabels(x_ticks_labels[:2], rotation='horizontal', fontsize=18)
ax3.tick_params(length=6, width=2)
ax3.tick_params(which='minor', length=4, width=1)
ax3.set_title('RTF', fontsize=18)
ax4.boxplot(rdRTF_list)
ax4.axhline(rdRTF_orig_mean, color='b', linestyle='--')
ax4.set_xticklabels(x_ticks_labels[:2], rotation='horizontal', fontsize=18)
ax4.tick_params(length=6, width=2)
ax4.tick_params(which='minor', length=4, width=1)
ax4.set_title('RTFr', fontsize=18)
#f.show()
f.savefig("./real4_fitness.pdf", bbox_inches='tight')
def convert_to_log(input):
to_return | |
"metadataonly",
("prop", "1909:9"): "metadataonly",
("prop", "1909:90"): "metadataonly",
("prop", "1909:91"): "metadataonly",
("prop", "1909:92"): "metadataonly",
("prop", "1909:93"): "metadataonly",
("prop", "1909:94"): "metadataonly",
("prop", "1909:95"): "metadataonly",
("prop", "1909:96"): "metadataonly",
("prop", "1909:97"): "metadataonly",
("prop", "1909:98"): "metadataonly",
("prop", "1909:99"): "metadataonly",
("prop", "1910:1"): "metadataonly",
("prop", "1910:10"): "metadataonly",
("prop", "1910:100"): "metadataonly",
("prop", "1910:101"): "metadataonly",
("prop", "1910:102"): "metadataonly",
("prop", "1910:103"): "metadataonly",
("prop", "1910:104"): "metadataonly",
("prop", "1910:105"): "metadataonly",
("prop", "1910:106"): "metadataonly",
("prop", "1910:107"): "metadataonly",
("prop", "1910:108"): "metadataonly",
("prop", "1910:109"): "metadataonly",
("prop", "1910:11"): "metadataonly",
("prop", "1910:110"): "metadataonly",
("prop", "1910:111"): "metadataonly",
("prop", "1910:112"): "metadataonly",
("prop", "1910:113"): "metadataonly",
("prop", "1910:114"): "metadataonly",
("prop", "1910:115"): "metadataonly",
("prop", "1910:116"): "metadataonly",
("prop", "1910:117"): "metadataonly",
("prop", "1910:118"): "metadataonly",
("prop", "1910:119"): "metadataonly",
("prop", "1910:12"): "metadataonly",
("prop", "1910:120"): "metadataonly",
("prop", "1910:121"): "metadataonly",
("prop", "1910:122"): "metadataonly",
("prop", "1910:123"): "metadataonly",
("prop", "1910:124"): "metadataonly",
("prop", "1910:125"): "metadataonly",
("prop", "1910:126"): "metadataonly",
("prop", "1910:127"): "metadataonly",
("prop", "1910:128"): "metadataonly",
("prop", "1910:129"): "metadataonly",
("prop", "1910:13"): "metadataonly",
("prop", "1910:130"): "metadataonly",
("prop", "1910:131"): "metadataonly",
("prop", "1910:132"): "metadataonly",
("prop", "1910:133"): "metadataonly",
("prop", "1910:134"): "metadataonly",
("prop", "1910:135"): "metadataonly",
("prop", "1910:136"): "metadataonly",
("prop", "1910:137"): "metadataonly",
("prop", "1910:138"): "metadataonly",
("prop", "1910:139"): "metadataonly",
("prop", "1910:14"): "metadataonly",
("prop", "1910:140"): "metadataonly",
("prop", "1910:141"): "metadataonly",
("prop", "1910:142"): "metadataonly",
("prop", "1910:143"): "metadataonly",
("prop", "1910:144"): "metadataonly",
("prop", "1910:145"): "metadataonly",
("prop", "1910:146"): "metadataonly",
("prop", "1910:147"): "metadataonly",
("prop", "1910:148"): "metadataonly",
("prop", "1910:149"): "metadataonly",
("prop", "1910:15"): "metadataonly",
("prop", "1910:150"): "metadataonly",
("prop", "1910:151"): "metadataonly",
("prop", "1910:152"): "metadataonly",
("prop", "1910:153"): "metadataonly",
("prop", "1910:154"): "metadataonly",
("prop", "1910:155"): "metadataonly",
("prop", "1910:156"): "metadataonly",
("prop", "1910:157"): "metadataonly",
("prop", "1910:158"): "metadataonly",
("prop", "1910:159"): "metadataonly",
("prop", "1910:16"): "metadataonly",
("prop", "1910:160"): "metadataonly",
("prop", "1910:161"): "metadataonly",
("prop", "1910:162"): "metadataonly",
("prop", "1910:163"): "metadataonly",
("prop", "1910:164"): "metadataonly",
("prop", "1910:165"): "metadataonly",
("prop", "1910:166"): "metadataonly",
("prop", "1910:167"): "metadataonly",
("prop", "1910:168"): "metadataonly",
("prop", "1910:169"): "metadataonly",
("prop", "1910:17"): "metadataonly",
("prop", "1910:170"): "metadataonly",
("prop", "1910:171"): "metadataonly",
("prop", "1910:172"): "metadataonly",
("prop", "1910:173"): "metadataonly",
("prop", "1910:174"): "metadataonly",
("prop", "1910:175"): "metadataonly",
("prop", "1910:176"): "metadataonly",
("prop", "1910:177"): "metadataonly",
("prop", "1910:178"): "metadataonly",
("prop", "1910:179"): "metadataonly",
("prop", "1910:18"): "metadataonly",
("prop", "1910:180"): "metadataonly",
("prop", "1910:181"): "metadataonly",
("prop", "1910:182"): "metadataonly",
("prop", "1910:183"): "metadataonly",
("prop", "1910:184"): "metadataonly",
("prop", "1910:185"): "metadataonly",
("prop", "1910:186"): "metadataonly",
("prop", "1910:187"): "metadataonly",
("prop", "1910:188"): "metadataonly",
("prop", "1910:189"): "metadataonly",
("prop", "1910:19"): "metadataonly",
("prop", "1910:190"): "metadataonly",
("prop", "1910:191"): "metadataonly",
("prop", "1910:194"): "metadataonly",
("prop", "1910:195"): "metadataonly",
("prop", "1910:197"): "metadataonly",
("prop", "1910:198"): "metadataonly",
("prop", "1910:199"): "metadataonly",
("prop", "1910:2"): "metadataonly",
("prop", "1910:200"): "metadataonly",
("prop", "1910:201"): "metadataonly",
("prop", "1910:202"): "metadataonly",
("prop", "1910:203"): "metadataonly",
("prop", "1910:204"): "metadataonly",
("prop", "1910:205"): "metadataonly",
("prop", "1910:206"): "metadataonly",
("prop", "1910:207"): "metadataonly",
("prop", "1910:208"): "metadataonly",
("prop", "1910:209"): "metadataonly",
("prop", "1910:21"): "metadataonly",
("prop", "1910:210"): "metadataonly",
("prop", "1910:211"): "metadataonly",
("prop", "1910:212"): "metadataonly",
("prop", "1910:213"): "metadataonly",
("prop", "1910:214"): "metadataonly",
("prop", "1910:215"): "metadataonly",
("prop", "1910:216"): "metadataonly",
("prop", "1910:217"): "metadataonly",
("prop", "1910:218"): "metadataonly",
("prop", "1910:219"): "metadataonly",
("prop", "1910:22"): "metadataonly",
("prop", "1910:220"): "metadataonly",
("prop", "1910:221"): "metadataonly",
("prop", "1910:222"): "metadataonly",
("prop", "1910:23"): "metadataonly",
("prop", "1910:24"): "metadataonly",
("prop", "1910:25"): "metadataonly",
("prop", "1910:26"): "metadataonly",
("prop", "1910:27"): "metadataonly",
("prop", "1910:28"): "metadataonly",
("prop", "1910:29"): "metadataonly",
("prop", "1910:3"): "metadataonly",
("prop", "1910:30"): "metadataonly",
("prop", "1910:31"): "metadataonly",
("prop", "1910:32"): "metadataonly",
("prop", "1910:33"): "metadataonly",
("prop", "1910:34"): "metadataonly",
("prop", "1910:35"): "metadataonly",
("prop", "1910:36"): "metadataonly",
("prop", "1910:37"): "metadataonly",
("prop", "1910:38"): "metadataonly",
("prop", "1910:39"): "metadataonly",
("prop", "1910:4"): "metadataonly",
("prop", "1910:40"): "metadataonly",
("prop", "1910:41"): "metadataonly",
("prop", "1910:42"): "metadataonly",
("prop", "1910:43"): "metadataonly",
("prop", "1910:44"): "metadataonly",
("prop", "1910:45"): "metadataonly",
("prop", "1910:46"): "metadataonly",
("prop", "1910:47"): "metadataonly",
("prop", "1910:48"): "metadataonly",
("prop", "1910:49"): "metadataonly",
("prop", "1910:5"): "metadataonly",
("prop", "1910:50"): "metadataonly",
("prop", "1910:51"): "metadataonly",
("prop", "1910:52"): "metadataonly",
("prop", "1910:55"): "metadataonly",
("prop", "1910:56"): "metadataonly",
("prop", "1910:57"): "metadataonly",
("prop", "1910:58"): "metadataonly",
("prop", "1910:59"): "metadataonly",
("prop", "1910:6"): "metadataonly",
("prop", "1910:60"): "metadataonly",
("prop", "1910:61"): "metadataonly",
("prop", "1910:62"): "metadataonly",
("prop", "1910:63"): "metadataonly",
("prop", "1910:64"): "metadataonly",
("prop", "1910:65"): "metadataonly",
("prop", "1910:66"): "metadataonly",
("prop", "1910:67"): "metadataonly",
("prop", "1910:68"): "metadataonly",
("prop", "1910:69"): "metadataonly",
("prop", "1910:7"): "metadataonly",
("prop", "1910:70"): "metadataonly",
("prop", "1910:71"): "metadataonly",
("prop", "1910:72"): "metadataonly",
("prop", "1910:73"): "metadataonly",
("prop", "1910:74"): "metadataonly",
("prop", "1910:75"): "metadataonly",
("prop", "1910:76"): "metadataonly",
("prop", "1910:77"): "metadataonly",
("prop", "1910:78"): "metadataonly",
("prop", "1910:79"): "metadataonly",
("prop", "1910:8"): "metadataonly",
("prop", "1910:80"): "metadataonly",
("prop", "1910:81"): "metadataonly",
("prop", "1910:82"): "metadataonly",
("prop", "1910:83"): "metadataonly",
("prop", "1910:84"): "metadataonly",
("prop", "1910:85"): "metadataonly",
("prop", "1910:86"): "metadataonly",
("prop", "1910:87"): "metadataonly",
("prop", "1910:89"): "metadataonly",
("prop", "1910:9"): "metadataonly",
("prop", "1910:91"): "metadataonly",
("prop", "1910:92"): "metadataonly",
("prop", "1910:93"): "metadataonly",
("prop", "1910:94"): "metadataonly",
("prop", "1910:95"): "metadataonly",
("prop", "1910:97"): "metadataonly",
("prop", "1910:98"): "metadataonly",
("prop", "1910:99"): "metadataonly",
("prop", "1911:1"): "metadataonly",
("prop", "1911:10"): "metadataonly",
("prop", "1911:100"): "metadataonly",
("prop", "1911:101"): "metadataonly",
("prop", "1911:102"): "metadataonly",
("prop", "1911:103"): "metadataonly",
("prop", "1911:104"): "metadataonly",
("prop", "1911:105"): "metadataonly",
("prop", "1911:106"): "metadataonly",
("prop", "1911:107"): "metadataonly",
("prop", "1911:108"): "metadataonly",
("prop", "1911:109"): "metadataonly",
("prop", "1911:11"): "metadataonly",
("prop", "1911:110"): "metadataonly",
("prop", "1911:111"): "metadataonly",
("prop", "1911:112"): "metadataonly",
("prop", "1911:113"): "metadataonly",
("prop", "1911:114"): "metadataonly",
("prop", "1911:115"): "metadataonly",
("prop", "1911:116"): "metadataonly",
("prop", "1911:117"): "metadataonly",
("prop", "1911:118"): "metadataonly",
("prop", "1911:119"): "metadataonly",
("prop", "1911:12"): "metadataonly",
("prop", "1911:120"): "metadataonly",
("prop", "1911:121"): "metadataonly",
("prop", "1911:122"): "metadataonly",
("prop", "1911:123"): "metadataonly",
("prop", "1911:124"): "metadataonly",
("prop", "1911:125"): "metadataonly",
("prop", "1911:126"): "metadataonly",
("prop", "1911:127"): "metadataonly",
("prop", "1911:128"): "metadataonly",
("prop", "1911:129"): "metadataonly",
("prop", "1911:13"): "metadataonly",
("prop", "1911:130"): "metadataonly",
("prop", "1911:131"): "metadataonly",
("prop", "1911:132"): "metadataonly",
("prop", "1911:133"): "metadataonly",
("prop", "1911:134"): "metadataonly",
("prop", "1911:135"): "metadataonly",
("prop", "1911:136"): "metadataonly",
("prop", "1911:137"): "metadataonly",
("prop", "1911:138"): "metadataonly",
("prop", "1911:139"): "metadataonly",
("prop", "1911:14"): "metadataonly",
("prop", "1911:140"): "metadataonly",
("prop", "1911:141"): "metadataonly",
("prop", "1911:142"): "metadataonly",
("prop", "1911:143"): "metadataonly",
("prop", "1911:144"): "metadataonly",
("prop", "1911:145"): "metadataonly",
("prop", "1911:146"): "metadataonly",
("prop", "1911:147"): "metadataonly",
("prop", "1911:148"): "metadataonly",
("prop", "1911:149"): "metadataonly",
("prop", "1911:15"): "metadataonly",
("prop", "1911:150"): "metadataonly",
("prop", "1911:151"): "metadataonly",
("prop", "1911:152"): "metadataonly",
("prop", "1911:153"): "metadataonly",
("prop", "1911:154"): "metadataonly",
("prop", "1911:155"): "metadataonly",
("prop", "1911:156"): "metadataonly",
("prop", "1911:157"): "metadataonly",
("prop", "1911:158"): "metadataonly",
("prop", "1911:159"): "metadataonly",
("prop", "1911:16"): "metadataonly",
("prop", "1911:160"): "metadataonly",
("prop", "1911:161"): "metadataonly",
("prop", "1911:162"): "metadataonly",
("prop", "1911:163"): "metadataonly",
("prop", "1911:164"): "metadataonly",
("prop", "1911:165"): "metadataonly",
("prop", "1911:166"): "metadataonly",
("prop", "1911:167"): "metadataonly",
("prop", "1911:168"): "metadataonly",
("prop", "1911:169"): "metadataonly",
("prop", "1911:17"): "metadataonly",
("prop", "1911:170"): "metadataonly",
("prop", "1911:171"): "metadataonly",
("prop", "1911:172"): "metadataonly",
("prop", "1911:173"): "metadataonly",
("prop", "1911:174"): "metadataonly",
("prop", "1911:175"): "metadataonly",
("prop", "1911:176"): "metadataonly",
("prop", "1911:177"): "metadataonly",
("prop", "1911:178"): "metadataonly",
("prop", "1911:179"): "metadataonly",
("prop", "1911:18"): "metadataonly",
("prop", "1911:180"): "metadataonly",
("prop", "1911:181"): "metadataonly",
("prop", "1911:182"): "metadataonly",
("prop", "1911:183"): "metadataonly",
("prop", "1911:184"): "metadataonly",
("prop", "1911:185"): "metadataonly",
("prop", "1911:186"): "metadataonly",
("prop", "1911:187"): "metadataonly",
("prop", "1911:188"): "metadataonly",
("prop", "1911:189"): "metadataonly",
("prop", "1911:19"): "metadataonly",
("prop", "1911:190"): "metadataonly",
("prop", "1911:191"): "metadataonly",
("prop", "1911:192"): "metadataonly",
("prop", "1911:193"): "metadataonly",
("prop", "1911:194"): "metadataonly",
("prop", "1911:195"): "metadataonly",
("prop", "1911:196"): "metadataonly",
("prop", "1911:197"): "metadataonly",
("prop", "1911:198"): "metadataonly",
("prop", "1911:199"): "metadataonly",
("prop", "1911:2"): "metadataonly",
("prop", "1911:20"): "metadataonly",
("prop", "1911:200"): "metadataonly",
("prop", "1911:201"): "metadataonly",
("prop", "1911:202"): "metadataonly",
("prop", "1911:203"): "metadataonly",
("prop", "1911:204"): "metadataonly",
("prop", "1911:205"): "metadataonly",
("prop", "1911:206"): "metadataonly",
("prop", "1911:207"): "metadataonly",
("prop", "1911:208"): "metadataonly",
("prop", "1911:209"): "metadataonly",
("prop", "1911:21"): "metadataonly",
("prop", "1911:210"): "metadataonly",
("prop", "1911:211"): "metadataonly",
("prop", "1911:212"): "metadataonly",
("prop", "1911:213"): "metadataonly",
("prop", "1911:214"): "metadataonly",
("prop", "1911:215"): "metadataonly",
("prop", "1911:216"): "metadataonly",
("prop", "1911:217"): "metadataonly",
("prop", "1911:218"): "metadataonly",
("prop", "1911:219"): "metadataonly",
("prop", "1911:22"): "metadataonly",
("prop", "1911:220"): "metadataonly",
("prop", "1911:221"): "metadataonly",
("prop", "1911:222"): "metadataonly",
("prop", "1911:223"): "metadataonly",
("prop", "1911:224"): "metadataonly",
("prop", "1911:225"): "metadataonly",
("prop", "1911:226"): "metadataonly",
("prop", "1911:227"): "metadataonly",
("prop", "1911:228"): "metadataonly",
("prop", "1911:229"): "metadataonly",
("prop", "1911:23"): "metadataonly",
("prop", "1911:24"): "metadataonly",
("prop", "1911:25"): "metadataonly",
("prop", "1911:26"): "metadataonly",
("prop", "1911:27"): "metadataonly",
("prop", "1911:28"): "metadataonly",
("prop", "1911:29"): "metadataonly",
("prop", "1911:3"): "metadataonly",
("prop", "1911:30"): "metadataonly",
("prop", "1911:31"): "metadataonly",
("prop", "1911:32"): "metadataonly",
("prop", "1911:33"): "metadataonly",
("prop", "1911:34"): "metadataonly",
("prop", "1911:35"): "metadataonly",
("prop", "1911:36"): "metadataonly",
("prop", "1911:37"): "metadataonly",
("prop", "1911:38"): "metadataonly",
("prop", "1911:39"): "metadataonly",
("prop", "1911:4"): "metadataonly",
("prop", "1911:40"): "metadataonly",
("prop", "1911:41"): "metadataonly",
("prop", "1911:42"): "metadataonly",
("prop", "1911:44"): "metadataonly",
("prop", "1911:45"): "metadataonly",
("prop", "1911:46"): "metadataonly",
("prop", "1911:47"): "metadataonly",
("prop", "1911:49"): "metadataonly",
("prop", "1911:5"): "metadataonly",
("prop", "1911:50"): "metadataonly",
("prop", "1911:51"): "metadataonly",
("prop", "1911:52"): "metadataonly",
("prop", "1911:53"): "metadataonly",
("prop", "1911:54"): "metadataonly",
("prop", "1911:55"): "metadataonly",
("prop", "1911:56"): "metadataonly",
("prop", "1911:57"): "metadataonly",
("prop", "1911:58"): "metadataonly",
("prop", "1911:59"): "metadataonly",
("prop", "1911:6"): "metadataonly",
("prop", "1911:60"): "metadataonly",
("prop", "1911:61"): "metadataonly",
("prop", "1911:62"): "metadataonly",
("prop", "1911:63"): "metadataonly",
("prop", "1911:64"): "metadataonly",
("prop", "1911:65"): "metadataonly",
("prop", "1911:66"): "metadataonly",
("prop", "1911:67"): "metadataonly",
("prop", "1911:68"): "metadataonly",
("prop", "1911:69"): "metadataonly",
("prop", "1911:7"): "metadataonly",
("prop", "1911:70"): "metadataonly",
("prop", "1911:71"): "metadataonly",
("prop", "1911:72"): "metadataonly",
("prop", "1911:73"): "metadataonly",
("prop", "1911:74"): "metadataonly",
("prop", "1911:75"): "metadataonly",
("prop", "1911:76"): "metadataonly",
("prop", | |
<gh_stars>1-10
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Tools involved the execution of library creation worklists.
AAB
"""
from everest.entities.utils import get_root_aggregate
from thelma.tools.semiconstants import RESERVOIR_SPECS_NAMES
from thelma.tools.semiconstants import get_384_rack_shape
from thelma.tools.base import BaseTool
from thelma.tools.libcreation.base import LibraryBaseLayoutConverter
from thelma.tools.libcreation.base import LibraryLayout
from thelma.tools.libcreation.base import NUMBER_SECTORS
from thelma.tools.semiconstants import PIPETTING_SPECS_NAMES
from thelma.tools.worklists.series import SampleTransferJob
from thelma.tools.worklists.series import RackSampleTransferJob
from thelma.tools.libcreation.generation \
import LibraryCreationWorklistGenerator
from thelma.tools.libcreation.writer import LibraryCreationWorklistWriter
from thelma.tools.utils.converters import LibraryLayoutConverter
from thelma.tools.utils.racksector import QuadrantIterator
from thelma.tools.utils.racksector import RackSectorTranslator
from thelma.tools.utils.racksector import get_sector_positions
from thelma.interfaces import ITubeRack
from thelma.entities.iso import ISO_STATUS
from thelma.entities.iso import StockSampleCreationIso
from thelma.entities.liquidtransfer import ExecutedWorklist
from thelma.entities.rack import TubeRack
from thelma.entities.user import User
__docformat__ = 'reStructuredText en'
__all__ = ['LibraryCreationExecutor',
'LibraryCreationStockRackVerifier',
'LibraryCreationBufferWorklistTransferJobCreator']
#TODO: create stock samples
class LibraryCreationExecutor(BaseTool):
"""
Executes the worklist file for a pool stock sample creation ISO (including
tube handler worklists). This comprises:
- tube handler worklist execution (requires file upload)
- execution of all worklist files
:Note: The stock samples for the pool are created externally.
**Return Value:** the updated ISO
"""
NAME = 'LibraryCreationXL20Executor'
def __init__(self, stock_sample_creation_iso, user, parent=None):
"""
Constructor:
:param stock_sample_creation_iso: The pool stock sample creation ISO
for which to execute the worklists.
:type stock_sample_creation_iso:
:class:`thelma.entities.iso.StockSampleCreationIso`
:param user: The user conducting the execution.
:type user: :class:`thelma.entities.user.User`
"""
BaseTool.__init__(self, parent=parent)
#: The stock sample creation ISO for which to execute the worklists.
self.stock_sample_creation_iso = stock_sample_creation_iso
#: The user conducting the execution.
self.user = user
#: The library layout for this ISO.
self.__library_layout = None
#: Maps library position onto sector indices.
self.__library_sectors = None
#: The ISO sample stock racks mapped onto sector indices.
self.__sample_stock_racks = None
#: Maps tube racks (for 1 molecule design stock racks) onto sector
#: indices.
self.__stock_rack_map = None
#: The library source (preparation) plates (plate entities) mapped
#: onto sector indices.
self.__library_source_plates = None
#: The executed stock transfer worklists (mapped onto job indices;
#: refers to transfer from single molecule design to pool stock rack).
#: Required for reporting.
self.__stock_transfer_worklists = None
#: The transfer jobs for the series executor.
self.__transfer_jobs = None
#: The indices for the rack transfer jobs mapped onto the worklist
#: they belong to.
self.__rack_transfer_indices = None
#: Position with transfers but without library position (96-well,
#: mapped onto sectors).
self.__ignore_positions_96 = None
def reset(self):
BaseTool.reset(self)
self.__library_layout = None
self.__library_sectors = None
self.__sample_stock_racks = dict()
self.__stock_rack_map = dict()
self.__library_source_plates = dict()
self.__stock_transfer_worklists = dict()
self.__transfer_jobs = dict()
self.__rack_transfer_indices = dict()
self.__ignore_positions_96 = dict()
def run(self):
"""
Executes the library creation worklists.
"""
self.reset()
self.add_info('Start execution ...')
self.__check_input()
if not self.has_errors(): self.__get_library_layout()
if not self.has_errors():
self.__get_sample_stock_racks()
self.__get_library_source_plates()
if not self.has_errors(): self.__verify_single_md_stock_racks()
if not self.has_errors(): self.__find_ignored_positions()
if not self.has_errors(): self.__create_buffer_transfer_jobs()
if not self.has_errors(): self.__create_stock_transfer_jobs()
if not self.has_errors(): self.__create_preparation_transfer_jobs()
if not self.has_errors(): self.__create_aliquot_transfer_jobs()
if not self.has_errors(): self.__execute_transfer_jobs()
if not self.has_errors():
self.stock_sample_creation_iso.status = ISO_STATUS.DONE
self.return_value = self.stock_sample_creation_iso
self.add_info('Transfer execution completed.')
def get_executed_stock_worklists(self):
"""
Returns the executed worklists that *deal with the stock transfer*
(for stock transfer reporting).
"""
if self.return_value is None: return None
return self.__stock_transfer_worklists
def get_working_layout(self):
"""
Returns the working layout containing the molecule design pool ID data
(for reporting).
"""
if self.return_value is None: return None
return self.__library_layout
@property
def entity(self):
"""
Returns the ISO. Required for reporting.
"""
return self.stock_sample_creation_iso
def __check_input(self):
"""
Checks the initialisation values.
"""
self.add_debug('Check input values ...')
if self._check_input_class('stock sample creation ISO',
self.stock_sample_creation_iso, StockSampleCreationIso):
status = self.stock_sample_creation_iso.status
if not status == ISO_STATUS.QUEUED:
msg = 'Unexpected ISO status: "%s"' % (status)
self.add_error(msg)
self._check_input_class('user', self.user, User)
def __get_library_layout(self):
"""
Fetches the library layout and sorts its positions into quadrants.
"""
self.add_debug('Fetch library layout ...')
converter = LibraryLayoutConverter(
self.stock_sample_creation_iso.rack_layout,
parent=self)
self.__library_layout = converter.get_result()
if self.__library_layout is None:
msg = 'Error when trying to convert library layout.'
self.add_error(msg)
else:
self.__library_sectors = QuadrantIterator.sort_into_sectors(
working_layout=self.__library_layout,
number_sectors=NUMBER_SECTORS)
del_sectors = []
for sector_index, positions in self.__library_sectors.iteritems():
if len(positions) < 1: del_sectors.append(sector_index)
for sector_index in del_sectors:
del self.__library_sectors[sector_index]
def __get_sample_stock_racks(self):
"""
Fetches the ISO sample stock racks and the single molecule stock racks
(barcodes are found in the worklist labels).
"""
self.add_debug('Fetch stock racks')
writer_cls = LibraryCreationWorklistWriter
tube_rack_agg = get_root_aggregate(ITubeRack)
not_found = []
for issr in self.stock_sample_creation_iso.iso_sector_stock_racks:
self.__sample_stock_racks[issr.sector_index] = issr
label = issr.planned_worklist.label
starting_index = len(writer_cls.SAMPLE_STOCK_WORKLIST_LABEL)
barcode_str = label[starting_index:]
barcodes = barcode_str.split(writer_cls.\
SAMPLE_STOCK_WORKLIST_DELIMITER)
racks = []
for barcode in barcodes:
rack = tube_rack_agg.get_by_slug(barcode)
if rack is None:
not_found.append(barcode)
else:
racks.append(rack)
self.__stock_rack_map[issr.sector_index] = racks
if len(not_found) > 0:
msg = 'The following single molecule design source stock racks ' \
'have not been found in the DB: %s!' \
% (', '.join(sorted(not_found)))
self.add_error(msg)
def __get_library_source_plates(self):
"""
Fetches the library source plates for this ISO and maps them onto
sector indices.
"""
self.add_debug('Get library source plates ...')
for lsp in self.stock_sample_creation_iso.library_source_plates:
self.__library_source_plates[lsp.sector_index] = lsp.plate
def __verify_single_md_stock_racks(self):
"""
Makes sure we have all the molecule designs present and in the right
positions and no additional tubes in the single molecule design
stock racks.
"""
verifier = LibraryCreationStockRackVerifier(self.__library_layout,
self.__stock_rack_map,
parent=self)
compatible = verifier.get_result()
if compatible is None:
msg = 'Error in the verifier!'
self.add_error(msg)
elif not compatible:
msg = 'The stock racks with the single molecule designs are not ' \
'compatible to the expected layout!'
self.add_error(msg)
def __find_ignored_positions(self):
"""
Finds positions that are planned in the worklists but which are not
in the library layout (because there were not enough pools during
optimization). The positions are found by comparison with the
base layout.
"""
self.add_debug('Find empty plate positions ...')
for sector_index in self.__library_sectors.keys():
self.__ignore_positions_96[sector_index] = []
converter = LibraryBaseLayoutConverter(
self.stock_sample_creation_iso.iso_request.iso_layout,
parent=self)
base_layout = converter.get_result()
if base_layout is None:
msg = 'Error when trying to convert library base layout.'
self.add_error(msg)
elif not len(base_layout) == len(self.__library_layout):
lib_positions = self.__library_layout.get_positions()
ignore_positions_384 = []
for rack_pos in base_layout.get_positions():
if not rack_pos in lib_positions:
ignore_positions_384.append(rack_pos)
self.__find_ignored_sector_positions(ignore_positions_384)
def __find_ignored_sector_positions(self, ignore_positions_384):
"""
Converts the position in the ignored position list for the 384-well
layout into 96-well position.
Positions for sectors that are not required (might be the case on the
last plate) are not checked.
"""
for sector_index in range(NUMBER_SECTORS):
if not self.__library_sectors.has_key(sector_index): continue
sector_positions = get_sector_positions(sector_index=sector_index,
rack_shape=get_384_rack_shape(),
number_sectors=NUMBER_SECTORS)
translator = RackSectorTranslator(number_sectors=NUMBER_SECTORS,
source_sector_index=sector_index,
target_sector_index=0,
enforce_type=RackSectorTranslator.ONE_TO_MANY)
for sector_pos in sector_positions:
if sector_pos in ignore_positions_384:
rack_pos_96 = translator.translate(sector_pos)
self.__ignore_positions_96[sector_index].append(rack_pos_96)
def __create_buffer_transfer_jobs(self):
"""
Creates the transfer jobs for the buffer worklists.
"""
self.add_debug('Create buffer transfer jobs ...')
stock_racks = dict()
for sector_index, issr in self.__sample_stock_racks.iteritems():
rack = issr.rack
stock_racks[sector_index] = rack
creator = LibraryCreationBufferWorklistTransferJobCreator(
self.stock_sample_creation_iso,
stock_racks,
self.__ignore_positions_96,
parent=self)
self.__transfer_jobs = creator.get_result()
if self.__transfer_jobs is None:
msg = 'Unable to get buffer transfer jobs!'
self.add_error(msg)
def __create_stock_transfer_jobs(self):
"""
Creates the transfer jobs for the pool creation. We do not need
to regard potential empty (ignored) positions here, because the
worklist creation is already base on the library layout.
"""
self.add_debug('Create pool creation transfer jobs ...')
current_index = max(self.__transfer_jobs.keys())
for sector_index, issr in self.__sample_stock_racks.iteritems():
racks = self.__stock_rack_map[sector_index]
for rack in racks:
current_index += 1
ctj = SampleTransferJob(current_index,
issr.planned_worklist,
issr.rack,
rack)
ctj.min_transfer_volume = 1
self.__transfer_jobs[current_index] = ctj
self.__stock_transfer_worklists[current_index] = None
def __create_preparation_transfer_jobs(self):
"""
Creates the transfer jobs for the rack transfers (transfer from pool
stock racks to preparation (source) plates).
"""
self.add_debug('Create preparation transfer jobs ...')
current_index = max(self.__transfer_jobs.keys())
worklist_series = self.stock_sample_creation_iso.iso_request.worklist_series
marker = LibraryCreationWorklistGenerator.\
STOCK_TO_PREP_TRANSFER_WORKLIST_LABEL[2:]
rt_worklist = None
rack_transfer = None
for worklist in worklist_series:
if not marker in worklist.label: continue
if len(worklist.planned_transfers) != 1:
msg = 'The worklist for the transfer from pool stock ' \
'rack preparation plate has an unexpected length: ' \
'%i (expected: 1).' % (len(worklist.planned_transfers))
self.add_error(msg)
else:
rack_transfer = worklist.planned_transfers[0]
rt_worklist = worklist
break
if self.has_errors():
pass
elif rack_transfer is None:
msg = 'Unable to find worklist for the transfer from pool stock ' \
'racks to library source (prepraration) plates.'
self.add_error(msg)
else:
job_indices = []
for sector_index, issr in self.__sample_stock_racks.iteritems():
stock_rack = issr.rack
prep_plate = self.__library_source_plates[sector_index]
current_index += 1
rtj = RackSampleTransferJob(current_index,
rack_transfer,
prep_plate,
stock_rack)
self.__transfer_jobs[current_index] = rtj
job_indices.append(current_index)
self.__rack_transfer_indices[rt_worklist] = job_indices
def __create_aliquot_transfer_jobs(self):
"""
Creates the transfer jobs for the rack transfers (transfer from
preparation (source) plates | |
#
# This file is part of Brazil Data Cube Collection Builder.
# Copyright (C) 2019-2020 INPE.
#
# Brazil Data Cube Collection Builder is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Define Brazil Data Cube utils."""
# Python Native
import datetime
import logging
import shutil
import tarfile
from json import loads as json_parser
from os import path as resource_path
from os import remove as resource_remove
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Tuple
from zipfile import BadZipfile, ZipFile
from zlib import error as zlib_error
# 3rdparty
import boto3
import numpy
import rasterio
import rasterio.features
import rasterio.warp
import shapely
import shapely.geometry
from bdc_catalog.models import Band, Collection, Provider, db
from bdc_catalog.utils import multihash_checksum_sha256
from bdc_collectors.base import BaseProvider
from botocore.exceptions import ClientError
from flask import current_app
from rasterio.warp import Resampling
from rio_cogeo.cogeo import cog_translate
from rio_cogeo.profiles import cog_profiles
# Builder
from werkzeug.exceptions import abort
from ..config import CURRENT_DIR, Config
def get_or_create_model(model_class, defaults=None, engine=None, **restrictions):
"""Get or create Brazil Data Cube model.
Utility method for looking up an object with the given restrictions, creating one if necessary.
Args:
model_class (BaseModel) - Base Model of Brazil Data Cube DB
defaults (dict) - Values to fill out model instance
restrictions (dict) - Query Restrictions
Returns:
BaseModel Retrieves model instance
"""
if not engine:
engine = db
instance = engine.session.query(model_class).filter_by(**restrictions).first()
if instance:
return instance, False
params = dict((k, v) for k, v in restrictions.items())
params.update(defaults or {})
instance = model_class(**params)
engine.session.add(instance)
return instance, True
def load_img(img_path):
"""Load an image."""
try:
with rasterio.open(img_path) as dataset:
img = dataset.read(1).flatten()
return img
except:
logging.error('Cannot find {}'.format(img_path))
raise RuntimeError('Cannot find {}'.format(img_path))
def extractall(file, destination=None):
"""Extract zipfile."""
archive = ZipFile(file, 'r')
if destination is None:
destination = resource_path.dirname(file)
archive.extractall(destination)
archive.close()
def get_credentials():
"""Retrieve global secrets with credentials."""
file = resource_path.join(resource_path.dirname(CURRENT_DIR), 'secrets.json')
with open(file) as f:
return json_parser(f.read())
def generate_cogs(input_data_set_path, file_path, profile='deflate', profile_options=None, **options):
"""Generate Cloud Optimized GeoTIFF files (COG).
Example:
>>> tif_file = '/path/to/tif'
>>> generate_cogs(tif_file, '/tmp/cog.tif')
Args:
input_data_set_path (str) - Path to the input data set
file_path (str) - Target data set filename
profile (str) - A COG profile based in `rio_cogeo.profiles`.
profile_options (dict) - Custom options to the profile.
Returns:
Path to COG.
"""
if profile_options is None:
profile_options = dict()
output_profile = cog_profiles.get(profile)
output_profile.update(dict(BIGTIFF="IF_SAFER"))
output_profile.update(profile_options)
# Add option to generate Cloud Optimized GeoTIFF file in memory instead inline temp file.
options.setdefault('in_memory', True)
# Dataset Open option (see gdalwarp `-oo` option)
config = dict(
GDAL_NUM_THREADS="ALL_CPUS",
GDAL_TIFF_INTERNAL_MASK=True,
GDAL_TIFF_OVR_BLOCKSIZE="128",
)
cog_translate(
str(input_data_set_path),
str(file_path),
output_profile,
config=config,
quiet=True,
**options,
)
return str(file_path)
def is_valid_compressed(file):
"""Check tar gz or zip is valid."""
try:
archive = ZipFile(file, 'r')
try:
corrupt = archive.testzip()
except zlib_error:
corrupt = True
archive.close()
except BadZipfile:
corrupt = True
return not corrupt
def extract_and_get_internal_name(zip_file_name, extract_to=None):
"""Extract zipfile and return internal folder path."""
# Check if file is valid
valid = is_valid_compressed(zip_file_name)
if not valid:
raise IOError('Invalid zip file "{}"'.format(zip_file_name))
else:
extractall(zip_file_name, destination=extract_to)
# Get extracted zip folder name
with ZipFile(zip_file_name) as zipObj:
listOfiles = zipObj.namelist()
extracted_file_path = listOfiles[0].split('/')[0] if listOfiles[0].endswith('/') else listOfiles[0]
return extracted_file_path
def upload_file(file_name, bucket='bdc-ds-datacube', object_name=None):
"""Upload a file to an S3 bucket.
Adapted code from boto3 example.
Args:
file_name (str|_io.TextIO): File to upload
bucket (str): Bucket to upload to
object_name (str): S3 object name. If not specified then file_name is used
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
# Upload the file
s3_client = boto3.client('s3', region_name=Config.AWS_REGION_NAME, aws_access_key_id=Config.AWS_ACCESS_KEY_ID, aws_secret_access_key=Config.AWS_SECRET_ACCESS_KEY)
try:
s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.error(e)
return False
return True
def remove_file(file_path: str):
"""Remove file if exists.
Throws Error when user doesn't have access to the file at given path
"""
if resource_path.exists(file_path):
resource_remove(file_path)
def create_asset_definition(href: str, mime_type: str, role: List[str], absolute_path: str,
created=None, is_raster=False):
"""Create a valid asset definition for collections.
TODO: Generate the asset for `Item` field with all bands
Args:
href - Relative path to the asset
mime_type - Asset Mime type str
role - Asset role. Available values are: ['data'], ['thumbnail']
absolute_path - Absolute path to the asset. Required to generate check_sum
created - Date time str of asset. When not set, use current timestamp.
is_raster - Flag to identify raster. When set, `raster_size` and `chunk_size` will be set to the asset.
"""
fmt = '%Y-%m-%dT%H:%M:%S'
_now_str = datetime.datetime.utcnow().strftime(fmt)
if created is None:
created = _now_str
elif isinstance(created, datetime.datetime):
created = created.strftime(fmt)
asset = {
'href': str(href),
'type': mime_type,
'bdc:size': Path(absolute_path).stat().st_size,
'checksum:multihash': multihash_checksum_sha256(str(absolute_path)),
'roles': role,
'created': created,
'updated': _now_str
}
if is_raster:
with rasterio.open(str(absolute_path)) as data_set:
asset['bdc:raster_size'] = dict(
x=data_set.shape[1],
y=data_set.shape[0],
)
chunk_x, chunk_y = data_set.profile.get('blockxsize'), data_set.profile.get('blockxsize')
if chunk_x is None or chunk_x is None:
return asset
asset['bdc:chunk_size'] = dict(x=chunk_x, y=chunk_y)
return asset
def raster_extent(file_path: str, epsg='EPSG:4326') -> shapely.geometry.Polygon:
"""Get raster extent in arbitrary CRS.
Args:
file_path (str): Path to image
epsg (str): EPSG Code of result crs
Returns:
dict: geojson-like geometry
"""
with rasterio.open(str(file_path)) as data_set:
_geom = shapely.geometry.mapping(shapely.geometry.box(*data_set.bounds))
return shapely.geometry.shape(rasterio.warp.transform_geom(data_set.crs, epsg, _geom, precision=6))
def raster_convexhull(file_path: str, epsg='EPSG:4326', no_data=None) -> dict:
"""Get raster image footprint.
Args:
file_path (str): image file
epsg (str): geometry EPSG
no_data: Use custom no data value. Default is dataset.nodata
See:
https://rasterio.readthedocs.io/en/latest/topics/masks.html
"""
with rasterio.open(str(file_path)) as data_set:
# Read raster data, masking nodata values
data = data_set.read(1, masked=True)
if no_data is not None:
data[data == no_data] = numpy.ma.masked
data[data != numpy.ma.masked] = 1
data[data == numpy.ma.masked] = 0
data = data.astype(numpy.uint8)
# Create mask, which 1 represents valid data and 0 nodata
geoms = []
for geom, _ in rasterio.features.shapes(data, mask=data, transform=data_set.transform):
geom = rasterio.warp.transform_geom(data_set.crs, epsg, geom, precision=6)
geoms.append(shapely.geometry.shape(geom))
if len(geoms) == 1:
return geoms[0].convex_hull
multi_polygons = shapely.geometry.MultiPolygon(geoms)
return multi_polygons.convex_hull
def post_processing(quality_file_path: str, collection: Collection, scenes: dict, resample_to=None):
"""Stack the merge bands in order to apply a filter on the quality band.
We have faced some issues regarding `nodata` value in spectral bands, which was resulting
in wrong provenance date on STACK data cubes, since the Fmask tells the pixel is valid (0) but a nodata
value is found in other bands.
To avoid that, we read all the others bands, seeking for `nodata` value. When found, we set this to
nodata in Fmask output::
Quality Nir Quality
0 0 2 4 702 876 7000 9000 => 0 0 2 4
0 0 0 0 687 987 1022 1029 => 0 0 0 0
0 2 2 4 -9999 7100 7322 9564 => 255 2 2 4
Notes:
It may take too long to execute for a large grid.
Args:
quality_file_path: Path to the cloud masking file.
collection: The collection instance.
scenes: Map of band and file path
resample_to: Resolution to re-sample. Default is None, which uses default value.
"""
quality_file_path = Path(quality_file_path)
band_names = [band_name for band_name in scenes.keys() if band_name.lower() not in ('ndvi', 'evi', 'fmask4')]
bands = Band.query().filter(
Band.collection_id == collection.id,
Band.name.in_(band_names)
).all()
options = dict()
with TemporaryDirectory() as tmp:
temp_file = Path(tmp) / quality_file_path.name
# Copy to temp dir
shutil.copyfile(quality_file_path, temp_file)
if resample_to:
with rasterio.open(str(quality_file_path)) as ds:
ds_transform = ds.profile['transform']
options.update(ds.meta.copy())
factor = ds_transform[0] / resample_to
options['width'] = ds.profile['width'] * factor
options['height'] = ds.profile['height'] * factor
transform = ds.transform * ds.transform.scale((ds.width / options['width']), (ds.height / options['height']))
options['transform'] = transform
nodata = options.get('nodata') or 255
options['nodata'] = nodata
raster = ds.read(
out_shape=(
ds.count,
int(options['height']),
int(options['width'])
),
resampling=Resampling.nearest
)
with rasterio.open(str(temp_file), mode='w', **options) as temp_ds:
temp_ds.write_band(1, raster[0])
# Build COG
generate_cogs(str(temp_file), str(temp_file))
with rasterio.open(str(temp_file), **options) as quality_ds:
blocks = list(quality_ds.block_windows())
profile = quality_ds.profile
nodata = profile.get('nodata') or 255
raster_merge = quality_ds.read(1)
for _, block in blocks:
nodata_positions = []
row_offset = block.row_off + block.height
col_offset = block.col_off + block.width
for band in bands:
band_file = scenes[band.name]
with rasterio.open(str(band_file)) as ds:
raster = ds.read(1, window=block)
nodata_found = numpy.where(raster == -9999)
raster_nodata_pos = numpy.ravel_multi_index(nodata_found, raster.shape)
nodata_positions = numpy.union1d(nodata_positions, raster_nodata_pos)
if len(nodata_positions):
raster_merge[block.row_off: row_offset, block.col_off: col_offset][
numpy.unravel_index(nodata_positions.astype(numpy.int64), raster.shape)] = nodata
save_as_cog(str(temp_file), raster_merge, **profile)
# Move right place
shutil.move(str(temp_file), str(quality_file_path))
def save_as_cog(destination: str, raster, mode='w', **profile):
"""Save the raster file as Cloud Optimized GeoTIFF.
See Also:
Cloud Optimized GeoTiff https://gdal.org/drivers/raster/cog.html
Args:
destination: Path to store the data set.
raster: Numpy raster values to | |
in the first r columns, then keep its segment number
cur_segment_id = segmented_img[y, x]
segmented_img[y, x] = cur_segment_id
lowest_active_point_per_col[x] = y
else:
# not continuopus image, but one with borders
# go over all active points
# start with points after height of r and later between 0 and r
for y in range(height):
for x in range(width):
if binary_img[y, x]:
# check if there are connected segments
col_idxs = x + dist_per_row
col_feature_idxs = (col_idxs >= 0) & (col_idxs < width)
col_idxs = col_idxs[col_feature_idxs]
dist_lowest_point_per_col = y - lowest_active_point_per_col[col_idxs]
cols_with_connected_segments = col_idxs[dist_lowest_point_per_col <= max_dist_per_col[col_feature_idxs]]
if len(cols_with_connected_segments) > 0:
segments = segmented_img[lowest_active_point_per_col[cols_with_connected_segments].astype(int), cols_with_connected_segments]
# if current point was already segmented, if it was in the first r cols, then also keep its original segment number in consideration
if segmented_img[y, x] > 0:
segments = np.hstack((segments, segmented_img[y, x]))
cur_segment_id = segments[0]
segments_unique = set()
for segment_id in segments:
if segment_id not in segments_unique:
segments_unique.add(segment_id)
cur_segment_id = min(cur_segment_id, segment_id)
if len(segments_unique) > 1:
for segment_id in segments_unique:
if segment_id != cur_segment_id:
segmented_img[segmented_img == segment_id] = cur_segment_id
segment_ids.remove(segment_id)
else:
# give it new segment number if no other segment nearby
cur_segment_id = max(segment_ids) + 1
segment_ids.add(cur_segment_id)
segmented_img[y, x] = cur_segment_id
lowest_active_point_per_col[x] = y
sorted_segments = sorted(segment_ids)
# relabel the segment ids to have no jumps
for segment_idx in range(1, len(sorted_segments)):
if segment_idx != sorted_segments[segment_idx]:
segmented_img[segmented_img == sorted_segments[segment_idx]] = segment_idx
return segmented_img, list(range(1, len(sorted_segments)))
# class BinaryImageSegmenter:
#
# @staticmethod
# def default_config():
# config = ad.config.Config()
# config.r = 1
# config.tol = 0.1
#
# return config
#
#
# def __init__(self, config=None, **kwargs):
# self.config = ad.config.set_default_config(kwargs, config, self.__class__.default_config())
#
# # holds filter templates for different image sizes if needed
# self.filter_templates = dict()
#
#
# def create_filter_template(self, image_shape):
#
# filter_template = np.zeros(image_shape)
#
# mid_y = int(image_shape[0] / 2)
# mid_x = int(image_shape[1] / 2)
#
# for y in range(mid_y - self.config.r, mid_y + self.config.r + 1):
# for x in range(mid_x - self.config.r, mid_x + self.config.r + 1):
# if np.linalg.norm(np.array([y, x]) - np.array([mid_y, mid_x])) <= self.config.r:
# filter_template[y, x] = 1
#
# mid_point = np.array([mid_y, mid_x])
#
# return filter_template, mid_point
#
#
# def get_filter(self, pos, image_shape):
#
# if image_shape in self.filter_templates:
# [filter_template, mid_point] = self.filter_templates[image_shape]
# else:
# [filter_template, mid_point] = self.create_filter_template(image_shape)
# self.filter_templates[image_shape] = (filter_template, mid_point)
#
# shift = np.array(pos) - mid_point
# return np.roll(filter_template, shift, axis=(0, 1))
#
#
# def calc(self, img):
#
# # create a binary image
# binary_img = np.array(img) >= self.config.tol
#
# # segments
# segmented_img = binary_img.copy() * np.nan
#
# segment_ids = [0]
#
# # go over all active points
# for y in range(binary_img.shape[0]):
# for x in range(binary_img.shape[1]):
# if binary_img[y,x]:
#
# cur_filter = self.get_filter((y, x), binary_img.shape)
#
# # point wise multiplication of filter for current pos and image to identify all the active points that influence the current point
# connected_points = np.multiply(binary_img, cur_filter)
# connected_segments = np.multiply(segmented_img, cur_filter)
#
# # ignore outside segments
# connected_segments[connected_segments == 0] = np.nan
#
# # get sorted existing segments
# min_connected_segment_id = np.nanmin(connected_segments)
#
# if np.isnan(min_connected_segment_id):
# # if the first is nan, then all others are also nan, because of the sorting of unique
#
# # add new segment number because there are no other unseen segments nearby
# new_segment_id = segment_ids[-1] + 1
# segment_ids.append(new_segment_id)
#
# new_points_segment_id = new_segment_id
#
# else:
# # use for all found segments the minimal existing segment number
#
# cur_segment_ids = connected_segments[connected_segments > 0]
#
# removed_segment_ids = []
# for cur_segment_id in cur_segment_ids:
# if cur_segment_id != min_connected_segment_id and cur_segment_id not in removed_segment_ids:
#
# segmented_img[segmented_img == cur_segment_id] = min_connected_segment_id
#
# # remove segment id from list of segments
# segment_ids.remove(cur_segment_id)
# removed_segment_ids.append(cur_segment_id)
#
# new_points_segment_id = min_connected_segment_id
#
# segmented_img[connected_points.astype(bool)] = new_points_segment_id
#
#
# # relabel the segment ids to have no jumps
# segmented_img[np.isnan(segmented_img)] = 0
#
# for segment_idx in range(1, len(segment_ids)):
# if segment_idx != segment_ids[segment_idx]:
# segmented_img[segmented_img == segment_ids[segment_idx]] = segment_idx
#
# return segmented_img
def calc_is_segments_finite(image=None, continuous_segmented_image=None, non_continuous_segmented_image=None, tol=0.2, r=1):
'''This version has a bug. It detects infinite segments as finite if they are connected via different borders: N->E->W->S->N or N->W->E->S->N.'''
warnings.warn(DeprecationWarning('The function calc_is_segments_finite contains a bug which classifies certain infinite segements as finite. Please use calc_is_segments_finite_v2 instead.'))
r = int(r)
if continuous_segmented_image is None:
continuous_segmented_image, continuous_segments = ad.helper.statistics.calc_active_binary_segments(image, tol=tol, r=r, is_continuous_image=True)
else:
(continuous_segmented_image, continuous_segments) = continuous_segmented_image
if non_continuous_segmented_image is None:
non_continuous_segmented_image, non_continuous_segments = ad.helper.statistics.calc_active_binary_segments(image, tol=tol, r=r, is_continuous_image=False)
else:
(non_continuous_segmented_image, non_continuous_segments) = non_continuous_segmented_image
# prepare the distances that have to be taken into account to say an active point is part of another active point
max_dist_per_col = np.zeros(int(r)*2 + 1, dtype=int)
dist_per_row = np.zeros(int(r) * 2 + 1, dtype=int)
idx1 = 0
idx2 = len(dist_per_row) - 1
for x_d in range(r, -1, -1):
dist_per_row[idx1] = -x_d
dist_per_row[idx2] = x_d
for y_d in range(r, -1, -1):
if np.linalg.norm([x_d, y_d]) <= r:
max_dist_per_col[idx1] = y_d
max_dist_per_col[idx2] = y_d
break
idx1 += 1
idx2 -= 1
height = continuous_segmented_image.shape[0]
width = continuous_segmented_image.shape[1]
# get relevant segments, i.e. upper and lower segments of each side
# identify lowest point per col for first r rows:
default_location_array = np.zeros(width) * np.nan
###########################################
# check upper and lower border
upper_locations_per_segment = dict()
lower_locations_per_segment = dict()
for x in range(width):
# upper area of image
for y in range(r): # search from upper to lower
if continuous_segmented_image[y, x] > 0:
segment_id = continuous_segmented_image[y, x]
if segment_id not in upper_locations_per_segment:
upper_locations_per_segment[segment_id] = default_location_array.copy()
upper_locations_per_segment[segment_id][x] = y
break # found the upper -> go to next column
# lower area of image
for y in range(height-1, height-r-1, -1): # search from lower to upper
if continuous_segmented_image[y, x] > 0:
segment_id = continuous_segmented_image[y, x]
if segment_id not in lower_locations_per_segment:
lower_locations_per_segment[segment_id] = default_location_array.copy()
lower_locations_per_segment[segment_id][x] = y
break # found the lower -> go to next column
# go over the segments in the border regions and check if they are connected in the continuous and non-continuous segmentation
# if they are connected in both then they must be infinite
# only check segments that are in both
check_segment_ids = set(upper_locations_per_segment.keys()).intersection(lower_locations_per_segment.keys())
infinite_segments_ids = []
# check if one of them is infinite
for cur_segment_id in check_segment_ids:
stop = False
for x, y in enumerate(upper_locations_per_segment[cur_segment_id]):
if not np.isnan(y):
y = int(y)
# get all points that are connected in the lower area and chek if they are also connected only with a direct link via inside the image not via image borders
# check if there are connected segments
col_idxs = np.mod(x + dist_per_row, width)
dist_lowest_point_per_col = y - (lower_locations_per_segment[cur_segment_id] - height)
cols_with_connected_segments = col_idxs[dist_lowest_point_per_col[col_idxs] <= max_dist_per_col]
rows_with_connected_segments = lower_locations_per_segment[cur_segment_id][cols_with_connected_segments].astype(int)
# check each connected point to see if it is also connected in non continuous segmentation
for idx in range(len(cols_with_connected_segments)):
if non_continuous_segmented_image[rows_with_connected_segments[idx], cols_with_connected_segments[idx]] == non_continuous_segmented_image[y, x]:
infinite_segments_ids.append(cur_segment_id)
stop = True
break
if stop:
break
###########################################
# check left and right border
leftest_locations_per_segment = dict()
rightest_locations_per_segment = dict()
for y in range(height):
# upper area of image
for x in range(r): # search from left to right
if continuous_segmented_image[y, x] > 0:
segment_id = continuous_segmented_image[y, x]
if segment_id not in leftest_locations_per_segment:
leftest_locations_per_segment[segment_id] = default_location_array.copy()
leftest_locations_per_segment[segment_id][y] = x
break # found the upper -> go to next column
# lower area of image
for x in range(width - 1, width - r - 1, -1): # search from lower to upper
if continuous_segmented_image[y, x] > 0:
segment_id = continuous_segmented_image[y, x]
if segment_id not in rightest_locations_per_segment:
rightest_locations_per_segment[segment_id] = default_location_array.copy()
rightest_locations_per_segment[segment_id][y] = x
break # found the lower -> go to next column
# go over the segments in the border regions and check if they are connected in the continuous and non-continuous segmentation
# if they are connected in both then they must be infinite
# only check segments that are in both
check_segment_ids = set(leftest_locations_per_segment.keys()).intersection(rightest_locations_per_segment.keys())
# do not check segments that are already found to be ininite
check_segment_ids = check_segment_ids.difference(infinite_segments_ids)
# check if one of them is infinite
for cur_segment_id in check_segment_ids:
stop = False
for y, x in enumerate(leftest_locations_per_segment[cur_segment_id]):
if not np.isnan(x):
x = int(x)
# get all points that are connected in the lower area and chek if they are also connected only with a direct link via inside the image not via image borders
# check if there are connected segments
row_idxs = np.mod(y + dist_per_row, | |
"""Module alphabet_soup."""
__author__ = '<NAME> (japinol)'
__all__ = ["AlphabetSoup"]
from sys import exit
import logging
from alphabetsoup.utils import remove_diacritics_from_str
# Module Constants
WORDS_MIN_CHARS = 3 # Minimum characters for each word to be searched
WORDS_MIN_SEARCH = 1 # Minimum words to be searched
SOUP_MIN_ROWS = 7 # Minimum rows in the table that represents the alphabet soup
SOUP_MIN_COLS = 7 # Minimum columns in the table that represents the alphabet soup
READ_WORDS_FROM_FILE = "read_from_dictionary" # String used to know if the words to be found will be read from a dictionary in a external file
MORE_INFO_STRING = "more_info" # String used to know if we have to display more info
SAVE_BUFFER_EACH_N_WORDS = 90 # How many words to be found before saving the buffer to the output file
PR_DOT_EACH_N_WORDS = 4000 # How many words searched between progress dots
PR_DOT_NEW_LINE = 50 # How many words written before new line of dots
SOUP_NAME_DEFAULT = "Alphabet Soup"
DIACRITICS_PRESERVE_CHAR_SET = ('ñ', 'ç') # When removing diacritics normalizing a string, the chars in the tuple will be preserved
# Files
IN_FILE_DEFAULT = "files/input_soup.txt"
OUT_FILE_DEFAULT = "output/output_soup.txt"
EXTERNAL_DICT_FILE_DEFAULT = "files/soup_dictionary.txt" # External dictionary file for the words to search
# Errors
ERROR_IN_FILE = "!!! ERROR: Input file: %s. Search aborted !!!"
ERROR_IN_FILE_CONSOLE = "!!! ERROR: Input file: See the output file for more details. Search aborted !!!"
ERROR_OUT_FILE_OPEN = "!!! ERROR: Output file: %s. Search aborted !!!"
ERROR_OUT_FILE_WRITING = "!!! ERROR writing output file: %s. Some information has been lost. Search aborted !!!"
ERROR_OUT_FILE_MAX_TRIES = "!!! ERROR: Too much tries failed writing to the output file: %s. Search aborted !!!"
MAX_ERRORS_OUT_FILE = 5 # Max writing errors when trying to write the buffer to the output file
logging.basicConfig(format='%(asctime)s %(levelname)s %(name)s: %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class AlphabetSoup:
"""Solves an alphabet soup finding a given list of words inside it."""
def __init__(self, name=None, in_file=None, out_file=None, external_dict_file=None,
more_info=None, remove_diacritics=None):
self._name = str(name) if name else SOUP_NAME_DEFAULT
self._in_file = str(in_file) if in_file else IN_FILE_DEFAULT
self._out_file = str(out_file) if out_file else OUT_FILE_DEFAULT
self._external_dict_file = str(external_dict_file) if external_dict_file else EXTERNAL_DICT_FILE_DEFAULT
self._read_words_from_file = True if external_dict_file else False
self._more_info = True if more_info else False # If true, calculates and displays more info, mainly in the summary
self._remove_diacritics = True if remove_diacritics else False # If true, removes diacritics from the words to find
self._num_rows = 0
self._num_cols = 0
self._soup = [] # The alphabet soup
self._words = [] # Words to find
self._num_words = 0 # Number of words to find
self._words_found = set()
self._num_words_too_short = 0
self._str_out = [] # List of strings to save to the output file
self._error_in_file = False # If true, and error in the input file has been detected.
self._num_errors_out_file = 0 # How many errors writing the output file
self._progress_dots_count = 0 # How many progress dots have been displayed
# Put soup's name to the buffer of the output file.
self._write_line('%s\n%s' % (self._name, '-' * len(self._name)))
def read_data(self):
"""Reads the words to find and the alphabet soup."""
self._print_progress_dots(char_str='[')
try:
with open(self._in_file, "r", encoding='utf-8') as in_file:
i = 0
for line_in in in_file:
line = line_in.lower().replace('\n', '').replace(' ', '')
if i == 0:
# Read the list of words to find from the first line if the words are not to be read from a file.
if line != READ_WORDS_FROM_FILE and not self._read_words_from_file:
self._words = line.split(',')
else:
# The words to find will be read from an external file.
self._read_words_from_file = True
elif i >= 2:
# Read the characters in the soup until it founds an empty line.
if line == '':
break
self._soup.append(line)
# The first row will determine the number of columns.
if i == 2:
self._num_cols = len(line)
else:
if len(line) != self._num_cols:
break
i += 1
except FileNotFoundError:
self._error_in_file = True
self._write_line(f"Input file not found: {self._in_file}")
except Exception:
self._error_in_file = True
self._num_rows = len(self._soup)
# Read the words to find from an external file if necessary.
if self._read_words_from_file:
self._read_words_to_find_from_file()
else:
# Discard the words to find that have too few characters.
for word in self._words[:]:
if len(word) < WORDS_MIN_CHARS:
self._num_words_too_short += 1
self._words.remove(word)
# Remove diacritics from the words to find
if self._remove_diacritics:
self._words = [remove_diacritics_from_str(word, preserve_char_set=DIACRITICS_PRESERVE_CHAR_SET)
for word in self._words]
# Remove duplicated words to find and sort the list.
self._words = list(set(self._words))
self._words.sort()
self._num_words = len(self._words)
# Check if there is something wrong with the input data.
if not self._error_in_file:
self._validate_input_data()
# Check if error reading an input file.
if self._error_in_file:
self._write_line(ERROR_IN_FILE % self._in_file)
self._write_data_to_file(open_method='w')
def search_words_in_the_soup(self):
"""Searches the words in the alphabet soup."""
self._print_progress_dots()
if (self._error_in_file):
logger.critical(ERROR_IN_FILE_CONSOLE)
return
self._write_header_of_the_search()
# Search the words in the soup.
self._write_line(self._format_msg_word_found_header())
num_words_searched = 0
num_words_found = 0
for word in self._words:
num_words_found += self._search_word_in_the_soup(word)
num_words_searched += 1
if num_words_found >= SAVE_BUFFER_EACH_N_WORDS:
self._write_data_to_file()
num_words_found = 0
if num_words_searched == PR_DOT_EACH_N_WORDS:
self._print_progress_dots()
num_words_searched = 0
self._print_progress_dots()
# Write the summary of the search.
self._write_summary_of_the_search()
self._print_progress_dots(char_str=']')
# Check if all the buffer has been written to the output file.
if self._str_out and self._num_errors_out_file > 0:
logger.critical(ERROR_OUT_FILE_WRITING % self._out_file)
exit()
def _read_words_to_find_from_file(self):
"""Reads the words to find from an external file. This file must have a word for line."""
try:
self._write_line(f"The words to find will be taken from the file: {self._external_dict_file}")
with open(self._external_dict_file, "r", encoding='utf-8') as in_file:
for line_in in in_file:
line = line_in.strip().replace("\n", "").replace(" ", "").lower()
if len(line) >= WORDS_MIN_CHARS:
self._words.append(line)
else:
self._num_words_too_short += 1
except FileNotFoundError:
self._error_in_file = True
self._write_line(f"Input file not found: {self._external_dict_file}")
except Exception:
self._error_in_file = True
def _validate_input_data(self):
"""Validates the input data."""
# Check the dimensions of the soup.
for wordSoup in self._soup:
if len(wordSoup) != self._num_cols:
self._error_in_file = True
self._write_line(f"!!! ERROR: The first row of the soup has {self._num_cols} columns. "
"Every other row must have this very number of columns !!!")
return
# Check rows, columns and minimum words to search.
if (self._num_rows < SOUP_MIN_ROWS or self._num_cols < SOUP_MIN_COLS
or self._num_words < WORDS_MIN_SEARCH):
self._error_in_file = True
self._write_line("!!! Input data error. Some of the following rules has been violated: !!!")
self._write_line(f" > Minimum rows: {SOUP_MIN_ROWS}")
self._write_line(f" > Minimum columns: {SOUP_MIN_COLS}")
self._write_line(f" > Minimum words to find: {WORDS_MIN_SEARCH}")
def _search_word_in_the_soup(self, word):
"""Searches a word in the alphabet soup."""
word_len = len(word)
num_words_found = 0
for i in range(self._num_rows): # Go through rows
for j in range(self._num_cols): # Go through columns
if word[0] == self._soup[i][j]: # Find the first char. of the current word in the soup
for p in range(word_len): # Check horitz. left->right
if (j + p) >= self._num_cols:
break
elif word[p] != self._soup[i][j + p]:
break
if p == word_len - 1:
self._words_found.add(word)
num_words_found += 1
self._write_line(self._format_msg_word_found(word, i, j, "left--> right"))
for p in range(word_len): # Check horitz. right->left
if (j - p) < 1:
break
elif word[p] != self._soup[i][j - p]:
break
if p == word_len - 1:
self._words_found.add(word)
num_words_found += 1
self._write_line(self._format_msg_word_found(word, i, j, "right--> left"))
for p in range(word_len): # Check vertical top->bottom
if (i + p) >= self._num_rows:
break
elif word[p] != self._soup[i + p][j]:
break
if p == word_len - 1:
self._words_found.add(word)
num_words_found += 1
self._write_line(self._format_msg_word_found(word, i, j, "top--> bottom"))
for p in range(word_len): # Check vertical bottom->top
if (i - p) < 1:
break
elif word[p] != self._soup[i - p][j]:
break
if p == word_len - 1:
self._words_found.add(word)
num_words_found += 1
self._write_line(self._format_msg_word_found(word, i, j, "bottom--> top"))
for p in range(word_len): # Check diagonal top->bottom left->right
if (i + p >= self._num_rows) or (j + p >= self._num_cols):
break
elif word[p] != self._soup[i + p][j + p]:
break
if p == word_len - 1:
self._words_found.add(word)
num_words_found += 1
self._write_line(self._format_msg_word_found(word, i, j, "diag. top->bottom, left->right"))
for p in range(word_len): # Check diagonal bottom->top left->right
if (i - p < 1) or (j + p >= self._num_cols):
break
elif word[p] != self._soup[i - p][j + p]:
break
if p == word_len - 1:
self._words_found.add(word)
num_words_found += 1
self._write_line(self._format_msg_word_found(word, i, j, "diag. bottom->top left->right"))
for p in range(word_len): # Check diagonal top->bottom right->left
if (i + p >= self._num_rows) or (j - p | |
scheduled[1].entry_date == GeneratedDate(2020, 9, 15)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 11, 1), "ABC", 1, Amount(100)), # dated in the future
]
scheduled = scheduled_transactions(records, since=date(2019, 10, 1))
assert len(scheduled) == 3
assert scheduled[0].entry_date == GeneratedDate(2020, 3, 15)
assert scheduled[1].entry_date == GeneratedDate(2020, 6, 15)
assert scheduled[2].entry_date == GeneratedDate(2020, 9, 15)
records = [
Transaction(date(2019, 9, 16), "ABC", 1, Amount(100)),
Transaction(date(2019, 11, 18), "ABC", 1, Amount(100)),
Transaction(date(2020, 2, 24), "ABC", 1, Amount(100)),
Transaction(date(2020, 5, 18), "ABC", 1, Amount(100)),
# note, one month earlier than last year
Transaction(date(2020, 8, 17), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 8, 18))
# todo: issue here is that 2019/9 is projected to 2020/9, but we can clearly tell,
# based on month interval not matching expected frequency (i.e. 3), that we don't
# actually want/expect this projection - it should just be weeded out
assert len(scheduled) == 4
assert scheduled[0].entry_date == GeneratedDate(2020, 11, 30)
assert scheduled[1].entry_date == GeneratedDate(2021, 2, 28)
assert scheduled[2].entry_date == GeneratedDate(2021, 5, 31)
assert scheduled[3].entry_date == GeneratedDate(2021, 8, 31)
records = [
Transaction(date(2020, 3, 13), "ABC", 1, Amount(100)),
Transaction(date(2020, 6, 15), "ABC", 1, Amount(100)),
# preliminary record; e.g. in future, results in projection more than 1 year later
Transaction(date(2020, 9, 15), "ABC", 1, GeneratedAmount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 9, 2))
assert len(scheduled) == 4
assert scheduled[0].entry_date == GeneratedDate(2020, 12, 15)
assert scheduled[1].entry_date == GeneratedDate(2021, 3, 15)
assert scheduled[2].entry_date == GeneratedDate(2021, 6, 15)
# note that this one is included though more than 365 days later; see earliest/cutoff in scheduled_transactions
assert scheduled[3].entry_date == GeneratedDate(2021, 9, 15)
def test_scheduled_grace_period():
records = [
Transaction(date(2018, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 9, 1), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2019, 9, 16))
assert len(scheduled) == 1
records = [
Transaction(date(2018, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 9, 1), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2019, 9, 30))
assert len(scheduled) == 1
records = [
Transaction(date(2018, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 9, 1), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2019, 10, 1))
assert len(scheduled) == 0
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100))
# a quarterly distribution skipped for december
# this should not prevent forecasts for previous distributions;
# we can't know whether this means distribution stopped completely, or is just a change in frequency;
# require user input
]
scheduled = scheduled_transactions(records, since=date(2020, 1, 20))
assert len(scheduled) == 3
for n in range(0, 14):
# going back 13 days; spanning 2021/03/18 - 2021/04/15; a 28 day period
records = [
Transaction(date(2020, 4, 7), "ABC", 1, Amount(1)),
Transaction(date(2021, 3, 31 - n), "ABC", 1, Amount(1)),
]
scheduled = scheduled_transactions(records, since=date(2021, 3, 31))
assert len(scheduled) == 1
records = [
Transaction(date(2020, 4, 7), "ABC", 1, Amount(1)),
# note that this date is the first date far enough back that
# it is not considered a fit for the april forecast
# i.e. if the date was one day later (2021/03/18), it would be
# considered a fit, and the forecast would be discarded
Transaction(date(2021, 3, 17), "ABC", 1, Amount(1)),
]
scheduled = scheduled_transactions(records, since=date(2021, 3, 31))
assert len(scheduled) == 2
def test_scheduled_transactions_closed_position():
records = [
Transaction(date(2019, 1, 20), "ABC", 1, Amount(100)),
Transaction(date(2020, 1, 19), "ABC", 0),
]
scheduled = scheduled_transactions(records, since=date(2020, 1, 20))
assert len(scheduled) == 0
records = [
Transaction(date(2019, 1, 20), "ABC", 1, Amount(100)),
Transaction(date(2020, 1, 19), "ABC", 0),
Transaction(date(2020, 2, 1), "ABC", 1),
]
scheduled = scheduled_transactions(records, since=date(2020, 1, 20))
assert len(scheduled) == 0
# see example/strategic.journal
records = [
Transaction(date(2019, 1, 20), "ABC", 1, Amount(100)),
Transaction(date(2019, 4, 20), "ABC", 1, Amount(100)),
Transaction(date(2019, 7, 20), "ABC", 1, Amount(100)),
Transaction(date(2019, 10, 20), "ABC", 1, Amount(100)),
Transaction(date(2020, 1, 19), "ABC", 0),
Transaction(date(2020, 2, 1), "ABC", 1),
]
scheduled = scheduled_transactions(records, since=date(2020, 2, 20))
assert len(scheduled) == 4
assert scheduled[0].entry_date == GeneratedDate(2020, 4, 30)
assert scheduled[0].position == 1
assert scheduled[0].amount == GeneratedAmount(100)
# ...
assert scheduled[3].entry_date == GeneratedDate(2021, 1, 31)
assert scheduled[3].position == 1
assert scheduled[3].amount == GeneratedAmount(100)
records = [
Transaction(date(2018, 8, 15), "ABC", 1, Amount(100)),
Transaction(date(2018, 11, 14), "ABC", 1, Amount(100)),
Transaction(date(2019, 2, 20), "ABC", 1, Amount(100)),
Transaction(date(2019, 5, 15), "ABC", 1, Amount(100)),
Transaction(date(2019, 8, 14), "ABC", 1, Amount(100)),
Transaction(date(2019, 11, 20), "ABC", 1, Amount(100)),
# simulate preliminary record, using --by-payout-date (entry_date=ex_date)
Transaction(
date(2020, 3, 12), "ABC", 1, GeneratedAmount(100), ex_date=date(2020, 2, 19)
),
Transaction(date(2020, 2, 28), "ABC", 0),
]
scheduled = scheduled_transactions(records, since=date(2020, 3, 8))
assert len(scheduled) == 0
# for this scenario, assume a user records by payout date, but makes sure to put in
# ex-date when necessary to maintain correct forecasting
records = [
# past dividend transaction; assume semi-annual distribution for scenario
Transaction(date(2018, 10, 5), "ABC", 100, Amount(100)),
# closing position right after passed ex-date
Transaction(date(2019, 1, 16), "ABC", 0),
# opening lower position before reaching payout date
Transaction(date(2019, 1, 26), "ABC", 50),
# payout date; note ex-date set
Transaction(
date(2019, 2, 5), "ABC", 100, Amount(100), ex_date=date(2019, 1, 15)
),
]
scheduled = scheduled_transactions(records, since=date(2019, 2, 16))
assert len(scheduled) == 2
assert scheduled[0].entry_date == date(2019, 10, 15)
assert scheduled[1].entry_date == date(2020, 2, 15)
assert scheduled[0].position == 50
assert scheduled[1].position == 50
# same exact scenario, except in this case, user forgot to set ex-date
from dataclasses import replace
records.append(replace(records[3], ex_date=None))
records.pop(3)
scheduled = scheduled_transactions(records, since=date(2019, 2, 16))
assert len(scheduled) == 2
assert scheduled[0].entry_date == date(2019, 10, 15)
assert scheduled[1].entry_date == date(2020, 2, 15)
assert scheduled[0].position == 100
assert scheduled[1].position == 100
def test_scheduled_transactions_sampling():
records = [
Transaction(date(2019, 3, 10), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 12, 1), "ABC", 1, Amount(100)),
# note 5 days earlier than in the past; this leads to an additional projection
# since there's not more than 12m between; e.g. records sampled will range from:
# 2019/03/05 (exclusive) - 2020/03/05 (inclusive)
# e.g. 2019/03/10 => 2020/03/15, but this one will be discarded (as it has been realized)
Transaction(date(2020, 3, 5), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 3, 12))
assert len(scheduled) == 4
assert scheduled[0].entry_date == date(2020, 6, 15)
records = [
Transaction(date(2019, 3, 5), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 12, 1), "ABC", 1, Amount(100)),
# if it was 5 days later, however, then it would be more than 12m and prove no issue
# e.g. records sampled will range from:
# 2019/03/10 (exclusive) - 2020/03/10 (inclusive)
Transaction(date(2020, 3, 10), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 3, 15))
assert len(scheduled) == 4
assert scheduled[0].entry_date == GeneratedDate(2020, 6, 15)
assert scheduled[3].entry_date == GeneratedDate(2021, 3, 15)
records = [
Transaction(date(2019, 3, 10), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 12, 1), "ABC", 1, Amount(100)),
Transaction(date(2020, 3, 5), "ABC", 1, Amount(100)),
]
# no issue whether earliest record was dated later,
# because the earliest record is now out of the 12m period entirely
scheduled = scheduled_transactions(records, since=date(2020, 4, 1))
assert len(scheduled) == 4
assert scheduled[0].entry_date == date(2020, 6, 15)
records = [
Transaction(date(2019, 3, 10), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 12, 1), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 3, 12))
assert len(scheduled) == 4
assert scheduled[0].entry_date == GeneratedDate(2020, 3, 15)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 12, 1), "ABC", 1, Amount(100)),
# note february instead of march; i.e. less than 12m between
Transaction(date(2020, 2, 28), "ABC", | |
k, dim in zip(key, self.dims)
if not isinstance(k, integer_types)
)
new_key = []
for k in key:
if isinstance(k, Variable):
k = k.data
if not isinstance(k, BASIC_INDEXING_TYPES):
k = np.asarray(k)
if k.size == 0:
# Slice by empty list; numpy could not infer the dtype
k = k.astype(int)
elif k.dtype.kind == "b":
(k,) = np.nonzero(k)
new_key.append(k)
return dims, OuterIndexer(tuple(new_key)), None
def _nonzero(self):
""" Equivalent numpy's nonzero but returns a tuple of Varibles. """
# TODO we should replace dask's native nonzero
# after https://github.com/dask/dask/issues/1076 is implemented.
nonzeros = np.nonzero(self.data)
return tuple(Variable((dim), nz) for nz, dim in zip(nonzeros, self.dims))
def _broadcast_indexes_vectorized(self, key):
variables = []
out_dims_set = OrderedSet()
for dim, value in zip(self.dims, key):
if isinstance(value, slice):
out_dims_set.add(dim)
else:
variable = (
value
if isinstance(value, Variable)
else as_variable(value, name=dim)
)
if variable.dtype.kind == "b": # boolean indexing case
(variable,) = variable._nonzero()
variables.append(variable)
out_dims_set.update(variable.dims)
variable_dims = set()
for variable in variables:
variable_dims.update(variable.dims)
slices = []
for i, (dim, value) in enumerate(zip(self.dims, key)):
if isinstance(value, slice):
if dim in variable_dims:
# We only convert slice objects to variables if they share
# a dimension with at least one other variable. Otherwise,
# we can equivalently leave them as slices aknd transpose
# the result. This is significantly faster/more efficient
# for most array backends.
values = np.arange(*value.indices(self.sizes[dim]))
variables.insert(i - len(slices), Variable((dim,), values))
else:
slices.append((i, value))
try:
variables = _broadcast_compat_variables(*variables)
except ValueError:
raise IndexError(f"Dimensions of indexers mismatch: {key}")
out_key = [variable.data for variable in variables]
out_dims = tuple(out_dims_set)
slice_positions = set()
for i, value in slices:
out_key.insert(i, value)
new_position = out_dims.index(self.dims[i])
slice_positions.add(new_position)
if slice_positions:
new_order = [i for i in range(len(out_dims)) if i not in slice_positions]
else:
new_order = None
return out_dims, VectorizedIndexer(tuple(out_key)), new_order
def __getitem__(self: VariableType, key) -> VariableType:
"""Return a new Variable object whose contents are consistent with
getting the provided key from the underlying data.
NB. __getitem__ and __setitem__ implement xarray-style indexing,
where if keys are unlabeled arrays, we index the array orthogonally
with them. If keys are labeled array (such as Variables), they are
broadcasted with our usual scheme and then the array is indexed with
the broadcasted key, like numpy's fancy indexing.
If you really want to do indexing like `x[x > 0]`, manipulate the numpy
array `x.values` directly.
"""
dims, indexer, new_order = self._broadcast_indexes(key)
data = as_indexable(self._data)[indexer]
if new_order:
data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def _finalize_indexing_result(self: VariableType, dims, data) -> VariableType:
"""Used by IndexVariable to return IndexVariable objects when possible."""
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def _getitem_with_mask(self, key, fill_value=dtypes.NA):
"""Index this Variable with -1 remapped to fill_value."""
# TODO(shoyer): expose this method in public API somewhere (isel?) and
# use it for reindex.
# TODO(shoyer): add a sanity check that all other integers are
# non-negative
# TODO(shoyer): add an optimization, remapping -1 to an adjacent value
# that is actually indexed rather than mapping it to the last value
# along each axis.
if fill_value is dtypes.NA:
fill_value = dtypes.get_fill_value(self.dtype)
dims, indexer, new_order = self._broadcast_indexes(key)
if self.size:
if is_duck_dask_array(self._data):
# dask's indexing is faster this way; also vindex does not
# support negative indices yet:
# https://github.com/dask/dask/pull/2967
actual_indexer = indexing.posify_mask_indexer(indexer)
else:
actual_indexer = indexer
data = as_indexable(self._data)[actual_indexer]
mask = indexing.create_mask(indexer, self.shape, data)
# we need to invert the mask in order to pass data first. This helps
# pint to choose the correct unit
# TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed
data = duck_array_ops.where(np.logical_not(mask), data, fill_value)
else:
# array cannot be indexed along dimensions of size 0, so just
# build the mask directly instead.
mask = indexing.create_mask(indexer, self.shape)
data = np.broadcast_to(fill_value, getattr(mask, "shape", ()))
if new_order:
data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def __setitem__(self, key, value):
"""__setitem__ is overloaded to access the underlying numpy values with
orthogonal indexing.
See __getitem__ for more details.
"""
dims, index_tuple, new_order = self._broadcast_indexes(key)
if not isinstance(value, Variable):
value = as_compatible_data(value)
if value.ndim > len(dims):
raise ValueError(
"shape mismatch: value array of shape %s could not be "
"broadcast to indexing result with %s dimensions"
% (value.shape, len(dims))
)
if value.ndim == 0:
value = Variable((), value)
else:
value = Variable(dims[-value.ndim :], value)
# broadcast to become assignable
value = value.set_dims(dims).data
if new_order:
value = duck_array_ops.asarray(value)
value = value[(len(dims) - value.ndim) * (np.newaxis,) + (Ellipsis,)]
value = duck_array_ops.moveaxis(value, new_order, range(len(new_order)))
indexable = as_indexable(self._data)
indexable[index_tuple] = value
@property
def attrs(self) -> Dict[Hashable, Any]:
"""Dictionary of local attributes on this variable."""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Hashable, Any]) -> None:
self._attrs = dict(value)
@property
def encoding(self):
"""Dictionary of encodings on this variable."""
if self._encoding is None:
self._encoding = {}
return self._encoding
@encoding.setter
def encoding(self, value):
try:
self._encoding = dict(value)
except ValueError:
raise ValueError("encoding must be castable to a dictionary")
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
If `deep=True`, the data array is loaded into memory and copied onto
the new object. Dimensions, attributes and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Whether the data array is loaded into memory and copied onto
the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
When `data` is used, `deep` is ignored.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
Examples
--------
Shallow copy versus deep copy
>>> var = xr.Variable(data=[1, 2, 3], dims="x")
>>> var.copy()
<xarray.Variable (x: 3)>
array([1, 2, 3])
>>> var_0 = var.copy(deep=False)
>>> var_0[0] = 7
>>> var_0
<xarray.Variable (x: 3)>
array([7, 2, 3])
>>> var
<xarray.Variable (x: 3)>
array([7, 2, 3])
Changing the data using the ``data`` argument maintains the
structure of the original object, but with the new data. Original
object is unaffected.
>>> var.copy(data=[0.1, 0.2, 0.3])
<xarray.Variable (x: 3)>
array([0.1, 0.2, 0.3])
>>> var
<xarray.Variable (x: 3)>
array([7, 2, 3])
See Also
--------
pandas.DataFrame.copy
"""
if data is None:
data = self._data
if isinstance(data, indexing.MemoryCachedArray):
# don't share caching between copies
data = indexing.MemoryCachedArray(data.array)
if deep:
data = copy.deepcopy(data)
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError(
"Data shape {} must match shape of object {}".format(
data.shape, self.shape
)
)
# note:
# dims is already an immutable tuple
# attributes and encoding will be copied when the new Array is created
return self._replace(data=data)
def _replace(
self, dims=_default, data=_default, attrs=_default, encoding=_default
) -> "Variable":
if dims is _default:
dims = copy.copy(self._dims)
if data is _default:
data = copy.copy(self.data)
if attrs is _default:
attrs = copy.copy(self._attrs)
if encoding is _default:
encoding = copy.copy(self._encoding)
return type(self)(dims, data, attrs, encoding, fastpath=True)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatibility with
# copy.deepcopy
return self.copy(deep=True)
# mutable objects should not be hashable
# https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore
@property
def chunks(self):
"""Block dimensions for this array's data or None if it's not a dask
array.
"""
return getattr(self._data, "chunks", None)
_array_counter = itertools.count()
def chunk(self, chunks={}, name=None, lock=False):
"""Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``.
name : str, optional
Used to generate the name for this array in the internal dask
graph. Does not | |
},
{ 'type': TYPE_UINT32, 'name': 'eventId' },
{ 'type': TYPE_UINT32, 'name': 'deviceId' },
{ 'type': TYPE_UINT32, 'name': 'eventSecond' },
{ 'type': TYPE_UINT32, 'name': 'impact' },
{ 'type': TYPE_IPV6, 'name': 'sourceIpAddress' },
{ 'type': TYPE_IPV6, 'name': 'destinationIpAddress' },
{ 'block': BLOCK_STRING, 'name': 'description' }],
# 156
BLOCK_CORRELATION_EVENT_54: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'deviceId' },
{ 'type': TYPE_UINT32, 'name': 'correlationEventSecond' },
{ 'type': TYPE_UINT32, 'name': 'eventId' },
{ 'type': TYPE_UINT32, 'name': 'policyId' },
{ 'type': TYPE_UINT32, 'name': 'ruleId' },
{ 'type': TYPE_UINT32, 'name': 'priority' },
{ 'block': BLOCK_STRING, 'name': 'eventDescription' },
{ 'type': TYPE_BYTE, 'name': 'eventType' },
{ 'type': TYPE_UINT32, 'name': 'eventDeviceId' },
{ 'type': TYPE_UINT32, 'name': 'signatureId' },
{ 'type': TYPE_UINT32, 'name': 'signatureGeneratorId' },
{ 'type': TYPE_UINT32, 'name': 'triggerEventSecond' },
{ 'type': TYPE_UINT32, 'name': 'triggerEventMicrosecond' },
{ 'type': TYPE_UINT32, 'name': 'deviceEventId' },
{ 'type': TYPE_UINT32, 'name': 'eventDefinedMask' },
{ 'type': TYPE_BYTE, 'name': 'eventImpactFlags' },
{ 'type': TYPE_BYTE, 'name': 'ipProtocol' },
{ 'type': TYPE_UINT16, 'name': 'networkProtocol' },
{ 'type': TYPE_UINT32, 'name': 'sourceIp' }, # No longer used
{ 'type': TYPE_BYTE, 'name': 'sourceHostType' },
{ 'type': TYPE_UINT16, 'name': 'sourceVlanId' },
{ 'type': TYPE_UUID, 'name': 'sourceOperatingSystemFingerprintUuid' },
{ 'type': TYPE_UINT16, 'name': 'sourceCriticality' },
{ 'type': TYPE_UINT32, 'name': 'sourceUserId' },
{ 'type': TYPE_UINT16, 'name': 'sourcePort' },
{ 'type': TYPE_UINT32, 'name': 'sourceServerId' },
{ 'type': TYPE_UINT32, 'name': 'destinationIp' }, # No longer used
{ 'type': TYPE_BYTE, 'name': 'destinationHostType' },
{ 'type': TYPE_UINT16, 'name': 'destinationVlanId' },
{ 'type': TYPE_UUID, 'name': 'destinationOperatingSystemFingerprintUuid' },
{ 'type': TYPE_UINT16, 'name': 'destinationCriticality' },
{ 'type': TYPE_UINT32, 'name': 'destinationUserId' },
{ 'type': TYPE_UINT16, 'name': 'destinationPort' },
{ 'type': TYPE_UINT32, 'name': 'destinationServerId' },
{ 'type': TYPE_BYTE, 'name': 'impact' },
{ 'type': TYPE_BYTE, 'name': 'blocked' },
{ 'type': TYPE_UUID, 'name': 'intrusionPolicy' },
{ 'type': TYPE_UINT32, 'name': 'ruleAction' },
{ 'block': BLOCK_STRING, 'name': 'netbios' },
{ 'type': TYPE_UINT32, 'name': 'urlCategory' },
{ 'type': TYPE_UINT32, 'name': 'urlReputation' },
{ 'block': BLOCK_STRING, 'name': 'url' },
{ 'type': TYPE_UINT32, 'name': 'clientId' },
{ 'block': BLOCK_STRING, 'name': 'clientVersion' },
{ 'type': TYPE_UUID, 'name': 'accessControlPolicyRevision' },
{ 'type': TYPE_UINT32, 'name': 'accessControlRuleId' },
{ 'type': TYPE_UUID, 'name': 'ingressIntefaceUuid' },
{ 'type': TYPE_UUID, 'name': 'egressIntefaceUuid' },
{ 'type': TYPE_UUID, 'name': 'ingressZoneUuid' },
{ 'type': TYPE_UUID, 'name': 'egressZoneUuid' },
{ 'type': TYPE_IPV6, 'name': 'sourceIpv6Address' },
{ 'type': TYPE_IPV6, 'name': 'destinationIpv6Address' },
{ 'type': TYPE_UINT16, 'name': 'sourceCountry' },
{ 'type': TYPE_UINT16, 'name': 'destinationCountry' },
{ 'type': TYPE_UUID, 'name': 'securityIntelligenceUuid' },
{ 'type': TYPE_UINT128, 'name': 'securityContext' },
{ 'type': TYPE_UINT128, 'name': 'sslPolicyId' },
{ 'type': TYPE_UINT32, 'name': 'sslRuleId' },
{ 'type': TYPE_UINT32, 'name': 'sslActualAction' },
{ 'type': TYPE_UINT32, 'name': 'sslFlowStatus' },
{ 'type': TYPE_UINT160, 'name': 'sslCertificateFingerprint'}],
# 158
BLOCK_USER_INFORMATION_DATA_60: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'userId' },
{ 'block': BLOCK_STRING, 'name': 'username' },
{ 'type': TYPE_UINT32, 'name': 'realmId' },
{ 'type': TYPE_UINT32, 'name': 'protocol' },
{ 'block': BLOCK_STRING, 'name': 'firstName' },
{ 'block': BLOCK_STRING, 'name': 'lastName' },
{ 'block': BLOCK_STRING, 'name': 'email' },
{ 'block': BLOCK_STRING, 'name': 'department' },
{ 'block': BLOCK_STRING, 'name': 'phone' },
{ 'type': TYPE_UINT32, 'name': 'endpointProfileID' },
{ 'type': TYPE_UINT32, 'name': 'securityGroupId' },
{ 'type': TYPE_UINT128, 'name': 'locationIpv6Address' }],
# 159
BLOCK_USER_LOGIN_INFORMATION_60: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'timestamp' },
{ 'type': TYPE_IPV4, 'name': 'ipv4Address' },
{ 'block': BLOCK_STRING, 'name': 'username' },
{ 'block': BLOCK_STRING, 'name': 'domain' },
{ 'type': TYPE_UINT32, 'name': 'userId' },
{ 'type': TYPE_UINT32, 'name': 'realmId' },
{ 'type': TYPE_UINT32, 'name': 'endpointProfileId' },
{ 'type': TYPE_UINT32, 'name': 'securityGroupId' },
{ 'type': TYPE_UINT32, 'name': 'applicationId' },
{ 'type': TYPE_UINT32, 'name': 'protocol' },
{ 'block': BLOCK_STRING, 'name': 'email' },
{ 'type': TYPE_IPV6, 'name': 'ipv6Address' },
{ 'type': TYPE_IPV6, 'name': 'locationIpv6Address' },
{ 'type': TYPE_BYTE, 'name': 'loginType' },
{ 'type': TYPE_BYTE, 'name': 'authType' },
{ 'block': BLOCK_STRING, 'name': 'reportedBy' }],
# 160
BLOCK_CONNECTION_STATISTICS_60: [
# Documentation wrong. Missing @pad below
# and ruleReason incorrectly specified as int16
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'deviceId' },
{ 'type': TYPE_UUID, 'name': 'ingressZone' },
{ 'type': TYPE_UUID, 'name': 'egressZone' },
{ 'type': TYPE_UUID, 'name': 'ingressInterface' },
{ 'type': TYPE_UUID, 'name': 'egressInterface' },
{ 'type': TYPE_IPV6, 'name': 'initiatorIpAddress' },
{ 'type': TYPE_IPV6, 'name': 'responderIpAddress' },
{ 'type': TYPE_UUID, 'name': 'policyRevision' },
{ 'type': TYPE_UINT32, 'name': 'ruleId' },
{ 'type': TYPE_UINT16, 'name': 'ruleAction' },
{ 'type': TYPE_UINT32, 'name': 'ruleReason' },
{ 'type': TYPE_UINT16, 'name': 'initiatorPort' },
{ 'type': TYPE_UINT16, 'name': 'responderPort' },
{ 'type': TYPE_UINT16, 'name': 'tcpFlag' },
{ 'type': TYPE_BYTE, 'name': 'protocol' },
{ 'type': TYPE_UUID, 'name': 'netflowSource' },
{ 'type': TYPE_UINT16, 'name': 'instanceId' },
{ 'type': TYPE_UINT16, 'name': 'connectionCounter' },
{ 'type': TYPE_UINT32, 'name': 'firstPacketTimestamp' },
{ 'type': TYPE_UINT32, 'name': 'lastPacketTimestamp' },
{ 'type': TYPE_UINT64, 'name': 'initiatorTransmittedPackets' },
{ 'type': TYPE_UINT64, 'name': 'responderTransmittedPackets' },
{ 'type': TYPE_UINT64, 'name': 'initiatorTransmittedBytes' },
{ 'type': TYPE_UINT64, 'name': 'responderTransmittedBytes' },
{ 'type': TYPE_UINT32, 'name': 'userId' },
{ 'type': TYPE_UINT32, 'name': 'applicationId' }, #applicationProtocolId
{ 'type': TYPE_UINT32, 'name': 'urlCategory' },
{ 'type': TYPE_UINT32, 'name': 'urlReputation' },
{ 'type': TYPE_UINT32, 'name': 'clientApplicationId' },
{ 'type': TYPE_UINT32, 'name': 'webApplicationId' },
{ 'block': BLOCK_STRING, 'name': 'clientUrl' },
{ 'block': BLOCK_STRING, 'name': 'netbios' },
{ 'block': BLOCK_STRING, 'name': 'clientApplicationVersion' },
{ 'type': TYPE_UINT32, 'name': 'monitorRule1' },
{ 'type': TYPE_UINT32, 'name': 'monitorRule2' },
{ 'type': TYPE_UINT32, 'name': 'monitorRule3' },
{ 'type': TYPE_UINT32, 'name': 'monitorRule4' },
{ 'type': TYPE_UINT32, 'name': 'monitorRule5' },
{ 'type': TYPE_UINT32, 'name': 'monitorRule6' },
{ 'type': TYPE_UINT32, 'name': 'monitorRule7' },
{ 'type': TYPE_UINT32, 'name': 'monitorRule8' },
{ 'type': TYPE_BYTE, 'name': 'securityIntelligenceSourceDestination' },
{ 'type': TYPE_BYTE, 'name': 'securityIntelligenceLayer' },
{ 'type': TYPE_UINT16, 'name': 'fileEventCount' },
{ 'type': TYPE_UINT16, 'name': 'intrusionEventCount' },
{ 'type': TYPE_UINT16, 'name': 'initiatorCountry' },
{ 'type': TYPE_UINT16, 'name': 'responderCountry' },
{ 'type': TYPE_UINT16, 'name': 'iocNumber' },
{ 'type': TYPE_UINT32, 'name': 'sourceAutonomousSystem' },
{ 'type': TYPE_UINT32, 'name': 'destinationAutonomousSystem' },
{ 'type': TYPE_UINT16, 'name': 'snmpIn' },
{ 'type': TYPE_UINT16, 'name': 'snmpOut' },
{ 'type': TYPE_BYTE, 'name': 'sourceTos' },
{ 'type': TYPE_BYTE, 'name': 'destinationTos' },
{ 'type': TYPE_BYTE, 'name': 'sourceMask' },
{ 'type': TYPE_BYTE, 'name': 'destinationMask' },
{ 'type': TYPE_UINT128, 'name': 'securityContext' },
{ 'type': TYPE_UINT16, 'name': 'vlanId' },
{ 'block': BLOCK_STRING, 'name': 'referencedHost' },
{ 'block': BLOCK_STRING, 'name': 'userAgent' },
{ 'block': BLOCK_STRING, 'name': 'httpReferrer' },
{ 'type': TYPE_UINT160, 'name': 'sslCertificateFingerprint' },
{ 'type': TYPE_UINT128, 'name': 'sslPolicyId' },
{ 'type': TYPE_UINT32, 'name': 'sslRuleId' },
{ 'type': TYPE_UINT16, 'name': 'sslCipherSuite' },
{ 'type': TYPE_BYTE, 'name': 'sslVersion' },
{ 'type': TYPE_UINT32, 'name': 'sslServerCertificateStatus' },
{ 'type': TYPE_UINT16, 'name': 'sslActualAction' },
{ 'type': TYPE_UINT16, 'name': 'sslExpectedAction' },
{ 'type': TYPE_UINT16, 'name': 'sslFlowStatus' },
{ 'type': TYPE_UINT32, 'name': 'sslFlowError' },
{ 'type': TYPE_UINT32, 'name': 'sslFlowMessages' },
{ 'type': TYPE_UINT64, 'name': 'sslFlowFlags' },
{ 'block': BLOCK_STRING, 'name': 'sslServerName' },
{ 'type': TYPE_UINT32, 'name': 'sslUrlCategory' },
{ 'type': TYPE_UINT256, 'name': 'sslSessionId' },
{ 'type': TYPE_BYTE, 'name': 'sslSessionIdLength' },
{ 'type': TYPE_UINT160, 'name': 'sslTicketId' },
{ 'type': TYPE_BYTE, 'name': 'sslTicketIdLength' },
{ 'type': TYPE_UUID, 'name': 'networkAnalysisPolicyRevision' },
{ 'type': TYPE_UINT32, 'name': 'endpointProfileId' },
{ 'type': TYPE_UINT32, 'name': 'securityGroupId' },
{ 'type': TYPE_IPV6, 'name': 'locationIpv6' },
{ 'type': TYPE_UINT32, 'name': 'httpResponse' },
{ 'block': BLOCK_STRING, 'name': 'dnsQuery' },
{ 'type': TYPE_UINT16, 'name': 'dnsRecordType' },
{ 'type': TYPE_UINT16, 'name': 'dnsResponseType' },
{ 'type': TYPE_UINT32, 'name': 'dnsTtl' },
{ 'type': TYPE_UUID, 'name': 'sinkholeUuid' },
{ 'type': TYPE_UINT32, 'name': 'securityIntelligenceList1' },
{ 'type': TYPE_UINT32, 'name': 'securityIntelligenceList2'}],
# 163
BLOCK_CONNECTION_STATISTICS_61: [
# Documentation wrong. Missing @pad below
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'deviceId' },
{ 'type': TYPE_UUID, 'name': 'ingressZone' },
{ 'type': TYPE_UUID, 'name': 'egressZone' },
{ 'type': TYPE_UUID, 'name': 'ingressInterface' },
{ 'type': TYPE_UUID, 'name': 'egressInterface' },
{ 'type': TYPE_IPV6, 'name': 'initiatorIpAddress' },
{ 'type': TYPE_IPV6, 'name': 'responderIpAddress' },
{ 'type': TYPE_IPV6, 'name': 'originalClientIpAddress' },
{ 'type': TYPE_UUID, | |
Default is 0.8.
sampleRateHz : int
Samples rate of the audio for playback.
channels : int
Number of channels for the output.
Returns
-------
AudioClip
"""
samples = squaretone(duration, freqHz, dutyCycle, gain, sampleRateHz)
if channels > 1:
samples = np.tile(samples, (1, channels)).astype(np.float32)
return AudioClip(samples, sampleRateHz=sampleRateHz)
@staticmethod
def sawtooth(duration=1.0, freqHz=440, peak=1.0, gain=0.8,
sampleRateHz=SAMPLE_RATE_48kHz, channels=2):
"""Generate audio samples for a tone with a sawtooth waveform.
Parameters
----------
duration : float or int
Length of the sound in seconds.
freqHz : float or int
Frequency of the tone in Hertz (Hz). Note that this differs from the
`sampleRateHz`.
peak : float
Location of the peak between 0.0 and 1.0. If the peak is at 0.5, the
resulting wave will be triangular. A value of 1.0 will cause the
peak to be located at the very end of a cycle.
gain : float
Gain factor ranging between 0.0 and 1.0. Default is 0.8.
sampleRateHz : int
Samples rate of the audio for playback.
channels : int
Number of channels for the output.
Returns
-------
AudioClip
"""
samples = sawtone(duration, freqHz, peak, gain, sampleRateHz)
if channels > 1:
samples = np.tile(samples, (1, channels)).astype(np.float32)
return AudioClip(samples, sampleRateHz=sampleRateHz)
# --------------------------------------------------------------------------
# Audio editing methods
#
# Methods related to basic editing of audio samples (operations such as
# splicing clips and signal gain).
#
def __add__(self, other):
"""Concatenate two audio clips."""
assert other.sampleRateHz == self._sampleRateHz
assert other.channels == self.channels
newSamples = np.ascontiguousarray(
np.vstack((self._samples, other.samples)),
dtype=np.float32)
toReturn = AudioClip(
samples=newSamples,
sampleRateHz=self._sampleRateHz)
return toReturn
def __iadd__(self, other):
"""Concatenate two audio clips inplace."""
assert other.sampleRateHz == self._sampleRateHz
assert other.channels == self.channels
self._samples = np.ascontiguousarray(
np.vstack((self._samples, other.samples)),
dtype=np.float32)
return self
def append(self, clip):
"""Append samples from another sound clip to the end of this one.
The `AudioClip` object must have the same sample rate and channels as
this object.
Parameters
----------
clip : AudioClip
Audio clip to append.
Returns
-------
AudioClip
This object with samples from `clip` appended.
Examples
--------
Join two sound clips together::
snd1.append(snd2)
"""
# if either clip is empty, just replace it
if len(self.samples) == 0:
return clip
if len(clip.samples) == 0:
return self
assert self.channels == clip.channels
assert self._sampleRateHz == clip.sampleRateHz
self._samples = np.ascontiguousarray(
np.vstack((self._samples, clip.samples)),
dtype=np.float32)
# recompute the duration of the new clip
self._duration = len(self.samples) / float(self.sampleRateHz)
return self
def copy(self):
"""Create an independent copy of this `AudioClip`.
Returns
-------
AudioClip
"""
return AudioClip(
samples=self._samples.copy(),
sampleRateHz=self._sampleRateHz)
def gain(self, factor, channel=None):
"""Apply gain the audio samples.
This will modify the internal store of samples inplace. Clipping is
automatically applied to samples after applying gain.
Parameters
----------
factor : float or int
Gain factor to multiply audio samples.
channel : int or None
Channel to apply gain to. If `None`, gain will be applied to all
channels.
"""
try:
arrview = self._samples[:, :] \
if channel is None else self._samples[:, channel]
except IndexError:
raise ValueError('Invalid value for `channel`.')
# multiply and clip range
arrview *= float(factor)
arrview.clip(-1, 1)
# --------------------------------------------------------------------------
# Audio analysis methods
#
# Methods related to basic analysis of audio samples, nothing too advanced
# but still useful.
#
def rms(self, channel=None):
"""Compute the root mean square (RMS) of the samples to determine the
average signal level.
Parameters
----------
channel : int or None
Channel to compute RMS (zero-indexed). If `None`, the RMS of all
channels will be computed.
Returns
-------
ndarray or float
An array of RMS values for each channel if ``channel=None`` (even if
there is one channel an array is returned). If `channel` *was*
specified, a `float` will be returned indicating the RMS of that
single channel.
"""
if channel is not None:
assert 0 < channel < self.channels
arr = self._samples if channel is None else self._samples[:, channel]
rms = np.sqrt(np.mean(np.square(arr), axis=0))
return rms if len(rms) > 1 else rms[0]
# --------------------------------------------------------------------------
# Properties
#
@property
def samples(self):
"""Nx1 or Nx2 array of audio samples (`~numpy.ndarray`).
Values must range from -1 to 1. Values outside that range will be
clipped, possibly resulting in distortion.
"""
return self._samples
@samples.setter
def samples(self, value):
self._samples = np.asarray(value, dtype=float) # convert to array
self._samples.clip(-1., 1.) # do clipping to keep samples in range
# recompute duration after updating samples
self._duration = len(self._samples) / float(self._sampleRateHz)
@property
def sampleRateHz(self):
"""Sample rate of the audio clip in Hz (`int`). Should be the same
value as the rate `samples` was captured at.
"""
return self._sampleRateHz
@sampleRateHz.setter
def sampleRateHz(self, value):
self._sampleRateHz = int(value)
# recompute duration after updating sample rate
self._duration = len(self._samples) / float(self._sampleRateHz)
@property
def duration(self):
"""The duration of the audio in seconds (`float`).
This value is computed using the specified sampling frequency and number
of samples.
"""
return self._duration
@property
def channels(self):
"""Number of audio channels in the clip (`int`).
If `channels` > 1, the audio clip is in stereo.
"""
return self._samples.shape[1]
@property
def isStereo(self):
"""`True` if there are two channels of audio samples.
Usually one for each ear. The first channel is usually the left ear, and
the second the right.
"""
return not self.isMono # are we moving in stereo? ;)
@property
def isMono(self):
"""`True` if there is only one channel of audio data.
"""
return self._samples.shape[1] == 1
@property
def userData(self):
"""User data associated with this clip (`dict`). Can be used for storing
additional data related to the clip. Note that `userData` is not saved
with audio files!
Example
-------
Adding fields to `userData`. For instance, we want to associated the
start time the clip was recorded at with it::
myClip.userData['date_recorded'] = t_start
We can access that field later by::
thisRecordingStartTime = myClip.userData['date_recorded']
"""
return self._userData
@userData.setter
def userData(self, value):
assert isinstance(value, dict)
self._userData = value
def convertToWAV(self):
"""Get a copy of stored audio samples in WAV PCM format.
Returns
-------
ndarray
Array with the same shapes as `.samples` but in 16-bit WAV PCM
format.
"""
return np.asarray(
self._samples * ((1 << 15) - 1), dtype=np.int16).tobytes()
def asMono(self, copy=True):
"""Convert the audio clip to mono (single channel audio).
Parameters
----------
copy : bool
If `True` an :class:`~psychopy.sound.AudioClip` containing a copy
of the samples will be returned. If `False`, channels will be
mixed inplace resulting a the same object being returned. User data
is not copied.
Returns
-------
:class:`~psychopy.sound.AudioClip`
Mono version of this object.
"""
samples = np.atleast_2d(self._samples) # enforce 2D
if samples.shape[1] > 1:
samplesMixed = np.atleast_2d(
np.sum(samples, axis=1, dtype=np.float32) / np.float32(2.)).T
else:
samplesMixed = samples.copy()
if copy:
return AudioClip(samplesMixed, self.sampleRateHz)
self._samples = samplesMixed # overwrite
return self
def transcribe(self, engine='sphinx', language='en-US', expectedWords=None,
config=None):
"""Convert speech in audio to text.
This feature passes the audio clip samples to a specified text-to-speech
engine which will attempt to transcribe any speech within. The efficacy
of the transcription depends on the engine selected, audio quality, and
language support. By default, Pocket Sphinx is used which provides
decent transcription capabilities offline for English and a few other
languages. For more robust transcription capabilities with a greater
range of language support, online providers such as Google may be used.
Speech-to-text conversion blocks the main application thread when used
on Python. Don't transcribe audio during time-sensitive parts of your
experiment! This issue is known to the developers and will be fixed in a
later release.
Parameters
----------
engine : str
Speech-to-text engine to use. Can be one of 'sphinx' for CMU Pocket
Sphinx or 'google' for Google Cloud.
language : str
BCP-47 language code (eg., 'en-US'). Note that supported languages
vary between transcription engines.
expectedWords : list or tuple
List of strings representing expected words or phrases. This will
constrain the possible output words to the ones specified. Note not
all engines support this feature (only Sphinx and Google Cloud do at
this time). A warning will be logged if the engine selected does not
support this feature. CMU PocketSphinx has an additional feature
where the | |
<filename>gamemodes/comDifficulty/hard.py
import gamemodes.assets.turnCounter
from random import randint
from tkinter import *
import platform
# Player is O, Computer is X.
def playerVsComputer(ticTacPyApp):
field = [[" ", " ", " "], [" ", " ", " "], [" ", " ", " "]]
def fieldFilled():
notice = Toplevel(gamepvcom)
notice.geometry()
notice.resizable(width=0, height=0)
if platform.system() == "Windows":
notice.iconbitmap("ttpicon.ico")
noticeText = Label(notice, text=ticTacPyApp.fieldFilledWarning, font=("Arial", 16))
noticeText.pack()
noticeButton = Button(notice, text=ticTacPyApp.okay, font=("Arial", 12))
noticeButton["command"] = notice.destroy
noticeButton.pack()
placeholder7 = Label(notice, text="", font=("Arial", 2))
placeholder7.pack()
def playerWon():
def effectOfYes():
playerOnFirstPlace.destroy()
wannaPlayMore()
# Resetting the turns
gamemodes.assets.turnCounter.playerOne = 0
gamemodes.assets.turnCounter.playerTwo = 0
playerOnFirstPlace = Toplevel(gamepvcom)
playerOnFirstPlace.geometry()
playerOnFirstPlace.resizable(width=0, height=0)
if platform.system() == "Windows":
playerOnFirstPlace.iconbitmap("ttpicon.ico")
winnerText = Label(playerOnFirstPlace, text=f"{ticTacPyApp.playerWonText}\n{ticTacPyApp.playAgainQuestion}", font=("Arial", 16))
winnerText.grid(row=0, column=0, columnspan=3)
yesButton = Button(playerOnFirstPlace, text=ticTacPyApp.yes, font=("Arial", 12))
yesButton["command"] = effectOfYes
yesButton.grid(row=1, column=0)
noButton = Button(playerOnFirstPlace, text=ticTacPyApp.no, font=("Arial", 12))
noButton["command"] = doneWithPlaying
noButton.grid(row=1, column=2)
placeholder7 = Label(playerOnFirstPlace, text="", font=("Arial", 2))
placeholder7.grid(row=2)
def playerLost():
def effectOfYes():
playerOnSecondPlace.destroy()
wannaPlayMore()
# Resetting the turns
gamemodes.assets.turnCounter.playerOne = 0
gamemodes.assets.turnCounter.playerTwo = 0
playerOnSecondPlace = Toplevel(gamepvcom)
playerOnSecondPlace.geometry()
playerOnSecondPlace.resizable(width=0, height=0)
if platform.system() == "Windows":
playerOnSecondPlace.iconbitmap("ttpicon.ico")
loserText = Label(playerOnSecondPlace, text=f"{ticTacPyApp.playerLostText}\n{ticTacPyApp.playAgainQuestion}", font=("Arial", 16))
loserText.grid(row=0, column=0, columnspan=3)
yesButton = Button(playerOnSecondPlace, text=ticTacPyApp.yes, font=("Arial", 12))
yesButton["command"] = effectOfYes
yesButton.grid(row=1, column=0)
noButton = Button(playerOnSecondPlace, text=ticTacPyApp.no, font=("Arial", 12))
noButton["command"] = doneWithPlaying
noButton.grid(row=1, column=2)
placeholder7 = Label(playerOnSecondPlace, text="", font=("Arial", 2))
placeholder7.grid(row=2)
def nobodyWon():
def effectOfYes():
playerEqual.destroy()
wannaPlayMore()
# Resetting the turns
gamemodes.assets.turnCounter.playerOne = 0
gamemodes.assets.turnCounter.playerTwo = 0
playerEqual = Toplevel(gamepvcom)
playerEqual.geometry()
playerEqual.resizable(width=0, height=0)
if platform.system() == "Windows":
playerEqual.iconbitmap("ttpicon.ico")
equalText = Label(playerEqual, text=f"{ticTacPyApp.tieText}\n{ticTacPyApp.playAgainQuestion}", font=("Arial", 16))
equalText.grid(row=0, column=0, columnspan=3)
yesButton = Button(playerEqual, text=ticTacPyApp.yes, font=("Arial", 12))
yesButton["command"] = effectOfYes
yesButton.grid(row=1, column=0)
noButton = Button(playerEqual, text=ticTacPyApp.no, font=("Arial", 12))
noButton["command"] = doneWithPlaying
noButton.grid(row=1, column=2)
placeholder7 = Label(playerEqual, text="", font=("Arial", 2))
placeholder7.grid(row=2)
def doneWithPlaying():
gamepvcom.destroy()
# New code
field[0][0] = " "
field[0][1] = " "
field[0][2] = " "
field[1][0] = " "
field[1][1] = " "
field[1][2] = " "
field[2][0] = " "
field[2][1] = " "
field[2][2] = " "
def wannaPlayMore():
# New code
field[0][0] = " "
field[0][1] = " "
field[0][2] = " "
field[1][0] = " "
field[1][1] = " "
field[1][2] = " "
field[2][0] = " "
field[2][1] = " "
field[2][2] = " "
# Hiding player marks
buttonForCellZeroP.grid_forget()
buttonForCellOneP.grid_forget()
buttonForCellTwoP.grid_forget()
buttonForCellThreeP.grid_forget()
buttonForCellFourP.grid_forget()
buttonForCellFiveP.grid_forget()
buttonForCellSixP.grid_forget()
buttonForCellSevenP.grid_forget()
buttonForCellEightP.grid_forget()
# Hiding computer marks
buttonForCellZeroCom.grid_forget()
buttonForCellOneCom.grid_forget()
buttonForCellTwoCom.grid_forget()
buttonForCellThreeCom.grid_forget()
buttonForCellFourCom.grid_forget()
buttonForCellFiveCom.grid_forget()
buttonForCellSixCom.grid_forget()
buttonForCellSevenCom.grid_forget()
buttonForCellEightCom.grid_forget()
# Placing empty cells back
buttonForCellZero.grid(row=1, column=1)
buttonForCellOne.grid(row=1, column=3)
buttonForCellTwo.grid(row=1, column=5)
buttonForCellThree.grid(row=3, column=1)
buttonForCellFour.grid(row=3, column=3)
buttonForCellFive.grid(row=3, column=5)
buttonForCellSix.grid(row=5, column=1)
buttonForCellSeven.grid(row=5, column=3)
buttonForCellEight.grid(row=5, column=5)
def checkState():
# Checking rows
if field[0][0] == field[0][1] == field[0][2]:
if field[0][0] == "O":
playerWon()
elif field[0][0] == "X":
playerLost()
elif field[1][0] == field[1][1] == field[1][2]:
if field[1][0] == "O":
playerWon()
elif field[1][0] == "X":
playerLost()
elif field[2][0] == field[2][1] == field[2][2]:
if field[2][0] == "O":
playerWon()
elif field[2][0] == "X":
playerLost()
# Checking columns
elif field[0][0] == field[1][0] == field[2][0]:
if field[0][0] == "O":
playerWon()
elif field[0][0] == "X":
playerLost()
elif field[0][1] == field[1][1] == field[2][1]:
if field[0][1] == "O":
playerWon()
elif field[0][1] == "X":
playerLost()
elif field[0][2] == field[1][2] == field[2][2]:
if field[0][2] == "O":
playerWon()
elif field[0][2] == "X":
playerLost()
# Checking diagonals (Sorry for my bad English, I primarily speak German. :( )
elif field[0][0] == field[1][1] == field[2][2]:
if field[0][0] == "O":
playerWon()
elif field[0][0] == "X":
playerLost()
elif field[0][2] == field[1][1] == field[2][0]:
if field[0][2] == "O":
playerWon()
elif field[0][2] == "X":
playerLost()
# Checking if full
elif field[0][0] != " " and field[0][1] != " " and field[0][2] != " " and field[1][0] != " ":
if field[1][1] != " " and field[1][2] != " " and field[2][0] != " ":
if field[2][1] != " " and field[2][2] != " ":
nobodyWon()
# Here is the real game
def fieldZeroPlayer():
if field[0][0] == " ":
field[0][0] = "O"
buttonForCellZero.grid_forget()
buttonForCellZeroP.grid(row=1, column=1)
gamemodes.assets.turnCounter.playerOne += 1
computerTurn()
ticTacPyApp.gridCopy = field.copy()
def fieldOnePlayer():
if field[0][1] == " ":
field[0][1] = "O"
buttonForCellOne.grid_forget()
buttonForCellOneP.grid(row=1, column=3)
gamemodes.assets.turnCounter.playerOne += 1
computerTurn()
ticTacPyApp.gridCopy = field.copy()
def fieldTwoPlayer():
if field[0][2] == " ":
field[0][2] = "O"
buttonForCellTwo.grid_forget()
buttonForCellTwoP.grid(row=1, column=5)
gamemodes.assets.turnCounter.playerOne += 1
computerTurn()
ticTacPyApp.gridCopy = field.copy()
def fieldThreePlayer():
if field[1][0] == " ":
field[1][0] = "O"
buttonForCellThree.grid_forget()
buttonForCellThreeP.grid(row=3, column=1)
gamemodes.assets.turnCounter.playerOne += 1
computerTurn()
ticTacPyApp.gridCopy = field.copy()
def fieldFourPlayer():
if field[1][1] == " ":
field[1][1] = "O"
buttonForCellFour.grid_forget()
buttonForCellFourP.grid(row=3, column=3)
gamemodes.assets.turnCounter.playerOne += 1
computerTurn()
ticTacPyApp.gridCopy = field.copy()
def fieldFivePlayer():
if field[1][2] == " ":
field[1][2] = "O"
buttonForCellFive.grid_forget()
buttonForCellFiveP.grid(row=3, column=5)
gamemodes.assets.turnCounter.playerOne += 1
computerTurn()
ticTacPyApp.gridCopy = field.copy()
def fieldSixPlayer():
if field[2][0] == " ":
field[2][0] = "O"
buttonForCellSix.grid_forget()
buttonForCellSixP.grid(row=5, column=1)
gamemodes.assets.turnCounter.playerOne += 1
computerTurn()
ticTacPyApp.gridCopy = field.copy()
def fieldSevenPlayer():
if field[2][1] == " ":
field[2][1] = "O"
buttonForCellSeven.grid_forget()
buttonForCellSevenP.grid(row=5, column=3)
gamemodes.assets.turnCounter.playerOne += 1
computerTurn()
ticTacPyApp.gridCopy = field.copy()
def fieldEightPlayer():
if field[2][2] == " ":
field[2][2] = "O"
buttonForCellEight.grid_forget()
buttonForCellEightP.grid(row=5, column=5)
gamemodes.assets.turnCounter.playerOne += 1
computerTurn()
ticTacPyApp.gridCopy = field.copy()
def fieldZeroComputer():
if field[0][0] == " ":
field[0][0] = "X"
buttonForCellZero.grid_forget()
buttonForCellZeroCom.grid(row=1, column=1)
gamemodes.assets.turnCounter.playerTwo += 1
checkState()
else:
computerTurn()
def fieldOneComputer():
if field[0][1] == " ":
field[0][1] = "X"
buttonForCellOne.grid_forget()
buttonForCellOneCom.grid(row=1, column=3)
gamemodes.assets.turnCounter.playerTwo += 1
checkState()
else:
computerTurn()
def fieldTwoComputer():
if field[0][2] == " ":
field[0][2] = "X"
buttonForCellTwo.grid_forget()
buttonForCellTwoCom.grid(row=1, column=5)
gamemodes.assets.turnCounter.playerTwo += 1
checkState()
else:
computerTurn()
def fieldThreeComputer():
if field[1][0] == " ":
field[1][0] = "X"
buttonForCellThree.grid_forget()
buttonForCellThreeCom.grid(row=3, column=1)
gamemodes.assets.turnCounter.playerTwo += 1
checkState()
else:
computerTurn()
def fieldFourComputer():
if field[1][1] == " ":
field[1][1] = "X"
buttonForCellFour.grid_forget()
buttonForCellFourCom.grid(row=3, column=3)
gamemodes.assets.turnCounter.playerTwo += 1
checkState()
else:
computerTurn()
def fieldFiveComputer():
if field[1][2] == " ":
field[1][2] = "X"
buttonForCellFive.grid_forget()
buttonForCellFiveCom.grid(row=3, column=5)
gamemodes.assets.turnCounter.playerTwo += 1
checkState()
else:
computerTurn()
def fieldSixComputer():
if field[2][0] == " ":
field[2][0] = "X"
buttonForCellSix.grid_forget()
buttonForCellSixCom.grid(row=5, column=1)
gamemodes.assets.turnCounter.playerTwo += 1
checkState()
else:
computerTurn()
def fieldSevenComputer():
if field[2][1] == " ":
field[2][1] = "X"
buttonForCellSeven.grid_forget()
buttonForCellSevenCom.grid(row=5, column=3)
gamemodes.assets.turnCounter.playerTwo += 1
checkState()
else:
computerTurn()
def fieldEightComputer():
if field[2][2] == " ":
field[2][2] = "X"
buttonForCellEight.grid_forget()
buttonForCellEightCom.grid(row=5, column=5)
gamemodes.assets.turnCounter.playerTwo += 1
checkState()
else:
computerTurn()
def computerTurn():
def getMove():
if i == 0:
fieldZeroComputer()
elif i == 1:
fieldOneComputer()
elif i == 2:
fieldTwoComputer()
elif i == 3:
fieldThreeComputer()
elif i == 4:
fieldFourComputer()
elif i == 5:
fieldFiveComputer()
elif i == 6:
fieldSixComputer()
elif i == 7:
fieldSevenComputer()
elif i == 8:
fieldEightComputer()
checkState()
if gamemodes.assets.turnCounter.playerOne != 0 and gamemodes.assets.turnCounter.playerOne > gamemodes.assets.turnCounter.playerTwo:
i = 0
while i < len(ticTacPyApp.gridCopy):
if ticTacPyApp.gridCopy[i] == " ":
gridWithNewMove = ticTacPyApp.gridCopy
gridWithNewMove[i] = "O"
if gridWithNewMove[0] == gridWithNewMove[1] == gridWithNewMove[2] == "O":
getMove()
break
elif gridWithNewMove[4] == gridWithNewMove[3] == gridWithNewMove[5] == "O":
getMove()
break
elif gridWithNewMove[7] == gridWithNewMove[8] == gridWithNewMove[6] == "O":
getMove()
break
elif gridWithNewMove[0] == gridWithNewMove[3] == gridWithNewMove[6] == "O":
getMove()
break
elif gridWithNewMove[1] == gridWithNewMove[4] == gridWithNewMove[7] == "O":
getMove()
break
elif gridWithNewMove[2] == gridWithNewMove[5] == gridWithNewMove[8] == "O":
getMove()
break
elif gridWithNewMove[0] == gridWithNewMove[4] == gridWithNewMove[8] == "O":
getMove()
break
elif gridWithNewMove[2] == gridWithNewMove[4] == gridWithNewMove[6] == "O":
getMove()
break
i += 1
i = 0
if gamemodes.assets.turnCounter.playerOne > gamemodes.assets.turnCounter.playerTwo:
while i < len(ticTacPyApp.gridCopy):
if ticTacPyApp.gridCopy[i] == " ":
gridWithNewMove = ticTacPyApp.gridCopy
gridWithNewMove[i] = "X"
if gridWithNewMove[0] == gridWithNewMove[1] == gridWithNewMove[2] == "X":
getMove()
break
elif gridWithNewMove[4] == gridWithNewMove[3] == gridWithNewMove[5] == "X":
getMove()
break
elif gridWithNewMove[7] == gridWithNewMove[8] == gridWithNewMove[6] == "X":
getMove()
break
elif gridWithNewMove[0] == gridWithNewMove[3] == gridWithNewMove[6] == "X":
getMove()
break
elif gridWithNewMove[1] == gridWithNewMove[4] == gridWithNewMove[7] == "X":
getMove()
break
elif gridWithNewMove[2] == gridWithNewMove[5] == gridWithNewMove[8] == "X":
getMove()
break
elif gridWithNewMove[0] == gridWithNewMove[4] == gridWithNewMove[8] == "X":
getMove()
break
elif gridWithNewMove[2] == gridWithNewMove[4] == gridWithNewMove[6] == "X":
getMove()
break
i += 1
if gamemodes.assets.turnCounter.playerOne > gamemodes.assets.turnCounter.playerTwo:
computer = randint(0, 8)
if computer == 0:
fieldZeroComputer()
elif computer == 1:
| |
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits import mplot3d
import neural_net
import utils
def plot_lra_random_weight_losses(linear_regressor_ann, input_dim, output_dim, train1_x, train1_nb_examples, train1_y):
min_lra_loss = np.inf
random_weights = np.arange(-10,11)
fig = plt.figure()
fig.set_facecolor('w')
for i in random_weights:
linear_regressor_ann.set_weights(i.reshape((output_dim, input_dim)))
lra_output = linear_regressor_ann.forward(train1_x.reshape((train1_nb_examples, input_dim, output_dim)))
lra_loss = np.mean(linear_regressor_ann.loss(train1_y))
if lra_loss < min_lra_loss:
min_lra_loss = lra_loss
plt.scatter(i, lra_loss, color="blue")
plt.title('Loss for Linear regressor ANN')
plt.xlabel('weights')
plt.ylabel('loss')
print(f"Minimum loss:{min_lra_loss:.2f}")
plt.show()
def plot_tla_random_weight_losses(two_layer_ann, input_dim, output_dim, nb_of_hiddenunits, train1_x, train1_nb_examples, train1_y, randomize_first_layer):
random_weights = np.arange(-10,11)
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(111, projection='3d')
for i in random_weights:
for j in random_weights:
if randomize_first_layer:
two_layer_ann.set_weights_1(np.array([i, j]).reshape((nb_of_hiddenunits, input_dim)))
else:
two_layer_ann.set_weights_2(np.array([i, j]).reshape((output_dim, nb_of_hiddenunits)))
tla_output = two_layer_ann.forward(train1_x.reshape((train1_nb_examples, input_dim, output_dim)))
tla_loss = np.mean(two_layer_ann.loss(train1_y))
ax.scatter(i, j, tla_loss, color="blue")
plt.title('Loss for Two Layer ANN')
if randomize_first_layer:
ax.set_xlabel('weights_1')
ax.set_ylabel('weights_1')
else:
ax.set_xlabel('weights_2')
ax.set_ylabel('weights_2')
ax.set_zlabel('loss')
plt.show()
def train_lra(hyperparams, linear_regressor_ann, train_x, train_y, input_dim, output_dim, train_nb_examples, train_uniform_x_samples, label="train"):
learning_rate, nb_of_epochs, batch_size = hyperparams
fig = plt.figure()
min_lra_loss = np.inf
for epoch in range(nb_of_epochs):
for i in range(train_nb_examples//batch_size):
linear_regressor_ann.forward(train_x[i*batch_size:i*batch_size+batch_size].reshape((batch_size, input_dim, output_dim)))
linear_regressor_ann.loss(train_y[i*batch_size:i*batch_size+batch_size])
linear_regressor_ann.backward(learning_rate)
lra_output = linear_regressor_ann.forward(train_x.reshape((train_nb_examples, input_dim, output_dim)))
lra_loss = np.mean(linear_regressor_ann.loss(train_y))
print(f"Epoch:{epoch+1}, Linear regressor ANN loss:{lra_loss:.4f}")
plt.scatter(train_x, train_y)
lra_output = linear_regressor_ann.forward(train_uniform_x_samples.reshape((train_nb_examples, input_dim, output_dim)))
plt.plot(train_uniform_x_samples, lra_output.reshape((train_nb_examples, 1)), linewidth=3)
plt.title(f'Linear regressor ANN, Epoch:{epoch+1}, Training Set, Loss:{lra_loss:.4f}')
plt.xlabel('x')
plt.ylabel('y')
plt.savefig(f'gif/lra_{epoch+1:03d}.png')
plt.close()
if min_lra_loss - lra_loss > 1e-5:
min_lra_loss = lra_loss
else:
print("Stopped training")
plt.scatter(train_x, train_y)
lra_output = linear_regressor_ann.forward(train_uniform_x_samples.reshape((train_nb_examples, input_dim, output_dim)))
plt.plot(train_uniform_x_samples, lra_output.reshape((train_nb_examples, 1)), linewidth=3)
plt.title(f'Linear regressor ANN, Epoch:{epoch+1}, Training Set, Loss:{lra_loss:.4f}')
plt.xlabel('x')
plt.ylabel('y')
plt.savefig(f'output/lra_{label}.png')
plt.close()
break
def evaluate_lra(linear_regressor_ann, x, nb_examples, input_dim, output_dim, y, mode_str):
lra_output = linear_regressor_ann.forward(x.reshape((nb_examples, input_dim, output_dim)))
lra_loss = np.mean(linear_regressor_ann.loss(y))
lra_loss_std = np.std(linear_regressor_ann.loss(y))
print(f"Linear regressor ANN, {mode_str} set loss:{lra_loss:.4f}, std:{lra_loss_std:.4f}")
return lra_loss
def plot_lra_evaluation(linear_regressor_ann, x, input_dim, output_dim, y, mode_str, lra_loss, train_uniform_x_samples, train_nb_examples, label):
fig = plt.figure()
fig.set_facecolor('w')
plt.scatter(x, y)
lra_output = linear_regressor_ann.forward(train_uniform_x_samples.reshape((train_nb_examples, input_dim, output_dim)))
plt.plot(train_uniform_x_samples, lra_output.reshape((train_nb_examples, 1)), linewidth=3)
plt.title(f'Linear regressor ANN, {mode_str} Set, Loss:{np.mean(lra_loss):.4f}')
plt.xlabel('x')
plt.ylabel('y')
plt.savefig(f'output/lra_{label}.png')
plt.show()
def train_tla(data_ix, train_x, train_y, input_dim, output_dim, train_nb_examples, train_uniform_x_samples):
if data_ix == 1:
lr_config = {2: 8e-3, 4: 1e-2, 8: 1e-2, 16: 5e-3}
epoch_config = {2: 5500, 4: 8000, 8: 7500, 16: 9000}
batchsize_config = {2: 2, 4: 2, 8: 2, 16: 3}
activation_config = {2: "sigmoid", 4: "sigmoid", 8: "sigmoid", 16: "sigmoid"}
loss_config = {2: "mse", 4: "mse", 8: "mse", 16: "mse"}
momentum_config = {2: 0.75, 4: 0.75, 8: 0.9, 16: 0.6}
stop_loss_config = {2: 0.05795, 4: 0.02025, 8: 0.02045, 16: 0.02065}
plot_color = {2: "red", 4: "cyan", 8: "magenta", 16: "black"}
elif data_ix == 2:
lr_config = {2: 9e-3, 4: 9e-3, 8: 1e-2, 16: 9e-3}
epoch_config = {2: 7500, 4: 6500, 8: 8000, 16: 30000}
batchsize_config = {2: 3, 4: 3, 8: 2, 16: 2}
activation_config = {2: "sigmoid", 4: "sigmoid", 8: "sigmoid", 16: "sigmoid"}
loss_config = {2: "mse", 4: "mse", 8: "mse", 16: "mse"}
momentum_config = {2: 0.4, 4: 0.4, 8: 0.5, 16: 0.3}
stop_loss_config = {2: 0.28005, 4: 0.14305, 8: 0.05975, 16: 0.05915}
plot_color = {2: "red", 4: "cyan", 8: "magenta", 16: "black"}
trained_nets = []
anim_files = []
for nb_of_hiddenunits in (2, 4, 8, 16):
np.random.seed(550)
learning_rate = lr_config[nb_of_hiddenunits]
nb_of_epochs = epoch_config[nb_of_hiddenunits]
batch_size = batchsize_config[nb_of_hiddenunits]
two_layer_ann = neural_net.TwoLayerANN(nb_of_hiddenunits,
activation_function=activation_config[nb_of_hiddenunits],
loss_function=loss_config[nb_of_hiddenunits],
use_momentum=True, momentum_factor=momentum_config[nb_of_hiddenunits])
fig = plt.figure()
print(f"Training two layer ANN with {nb_of_hiddenunits} units")
for epoch in range(nb_of_epochs):
for i in range(train_nb_examples//batch_size):
two_layer_ann.forward(train_x[i*batch_size:i*batch_size+batch_size].reshape((batch_size, input_dim, output_dim)))
two_layer_ann.loss(train_y[i*batch_size:i*batch_size+batch_size])
two_layer_ann.backward(learning_rate)
tla_output = two_layer_ann.forward(train_x.reshape((train_nb_examples, input_dim, output_dim)))
tla_loss = np.mean(two_layer_ann.loss(train_y))
if (data_ix == 1 and (epoch == 0 or (epoch+1) % 500 == 0)) or (data_ix == 2 and epoch == 0 or (epoch+1) % (1500 if nb_of_hiddenunits == 16 else 500) == 0):
print(f"Epoch:{epoch+1}, Two layer ANN loss:{tla_loss:.4f}")
plt.scatter(train_x, train_y)
tla_output = two_layer_ann.forward(train_uniform_x_samples.reshape((train_nb_examples, input_dim, output_dim)))
plt.plot(train_uniform_x_samples, tla_output.reshape((train_nb_examples, 1)),
color=plot_color[nb_of_hiddenunits], linewidth=3)
plt.title(f'Two layer ANN ({nb_of_hiddenunits} units), Epoch:{epoch+1}, Training Set, Loss:{tla_loss:.4f}')
plt.xlabel('x')
plt.ylabel('y')
plt.savefig(f'gif/tla_{nb_of_hiddenunits}_{epoch+1:05d}.png')
plt.close()
if tla_loss < stop_loss_config[nb_of_hiddenunits]:
print(f"Stopped training, Epoch:{epoch+1}, Two layer ANN loss:{tla_loss:.4f}")
plt.scatter(train_x, train_y)
tla_output = two_layer_ann.forward(train_uniform_x_samples.reshape((train_nb_examples, input_dim, output_dim)))
plt.plot(train_uniform_x_samples, tla_output.reshape((train_nb_examples, 1)),
color=plot_color[nb_of_hiddenunits], linewidth=3)
plt.title(f'Two layer ANN ({nb_of_hiddenunits} units), Epoch:{epoch+1}, Training Set, Loss:{tla_loss:.4f}')
plt.xlabel('x')
plt.ylabel('y')
plt.savefig(f'output/tla_{nb_of_hiddenunits}_train{"_2" if data_ix == 2 else ""}.png')
plt.close()
break
anim_file = f'gif/tla_{nb_of_hiddenunits}_training{"_2" if data_ix == 2 else ""}.gif'
utils.create_animation(anim_file, f'gif/tla_{nb_of_hiddenunits}_*.png', fps=4 if data_ix == 1 else 8)
trained_nets.append(two_layer_ann)
anim_files.append(anim_file)
return trained_nets, anim_files
def evaluate_tla(trained_nets, train_x, train_y, test_x, test_y, train_nb_examples, test_nb_examples, train_uniform_x_samples, input_dim, output_dim, label=""):
ann_hidden_units = [2, 4, 8, 16]
plot_color = {2: "red", 4: "cyan", 8: "magenta", 16: "black"}
for i in range(4):
two_layer_ann = trained_nets[i]
tla_output = two_layer_ann.forward(train_x.reshape((train_nb_examples, input_dim, output_dim)))
tla_loss = np.mean(two_layer_ann.loss(train_y))
tla_loss_std = np.std(two_layer_ann.loss(train_y))
print(f"Two layer ANN, {ann_hidden_units[i]} units, training set loss:{tla_loss:.4f}, std:{tla_loss_std:.4f}")
fig = plt.figure()
fig.set_facecolor('w')
plt.scatter(train_x, train_y)
tla_output = two_layer_ann.forward(train_uniform_x_samples.reshape((train_nb_examples, input_dim, output_dim)))
plt.plot(train_uniform_x_samples, tla_output.reshape((train_nb_examples, 1)),
color=plot_color[ann_hidden_units[i]], linewidth=3)
plt.title(f'Two layer ANN, {ann_hidden_units[i]} units, Training Set, Loss:{np.mean(tla_loss):.4f}')
plt.xlabel('x')
plt.ylabel('y')
plt.savefig(f'output/tla_{ann_hidden_units[i]}_train_curve{label}.png')
plt.show()
tla_output = two_layer_ann.forward(test_x.reshape((test_nb_examples, input_dim, output_dim)))
tla_loss = np.mean(two_layer_ann.loss(test_y))
tla_loss_std = np.std(two_layer_ann.loss(test_y))
print(f"Two layer ANN, {ann_hidden_units[i]} units, test set loss:{tla_loss:.4f}, std:{tla_loss_std:.4f}")
fig = plt.figure()
fig.set_facecolor('w')
plt.scatter(test_x, test_y)
tla_output = two_layer_ann.forward(train_uniform_x_samples.reshape((train_nb_examples, input_dim, output_dim)))
plt.plot(train_uniform_x_samples, tla_output.reshape((train_nb_examples, 1)),
color=plot_color[ann_hidden_units[i]], linewidth=3)
plt.title(f'Two layer ANN, {ann_hidden_units[i]} units, Test Set, Loss:{np.mean(tla_loss):.4f}')
plt.xlabel('x')
plt.ylabel('y')
plt.savefig(f'output/tla_{ann_hidden_units[i]}_test_curve{label}.png')
plt.show()
def plot_tla_curves(trained_nets, train_x, train_y, train_uniform_x_samples, train_nb_examples, input_dim, output_dim, loc="upper left", label=""):
plot_color = {2: "red", 4: "cyan", 8: "magenta", 16: "black"}
fig = plt.figure()
fig.set_facecolor('w')
plt.scatter(train_x, train_y)
two_layer_ann = trained_nets[0]
tla_output = two_layer_ann.forward(train_uniform_x_samples.reshape((train_nb_examples, input_dim, output_dim)))
plt.plot(train_uniform_x_samples, tla_output.reshape((train_nb_examples, 1)), label='2 Units',
color=plot_color[2], linewidth=3)
two_layer_ann = trained_nets[1]
tla_output = two_layer_ann.forward(train_uniform_x_samples.reshape((train_nb_examples, input_dim, output_dim)))
plt.plot(train_uniform_x_samples, tla_output.reshape((train_nb_examples, 1)), label='4 Units',
color=plot_color[4], linewidth=6)
two_layer_ann = trained_nets[2]
tla_output = two_layer_ann.forward(train_uniform_x_samples.reshape((train_nb_examples, input_dim, output_dim)))
plt.plot(train_uniform_x_samples, tla_output.reshape((train_nb_examples, 1)), label='8 Units',
color=plot_color[8], linewidth=3)
two_layer_ann = trained_nets[3]
tla_output = two_layer_ann.forward(train_uniform_x_samples.reshape((train_nb_examples, input_dim, output_dim)))
plt.plot(train_uniform_x_samples, tla_output.reshape((train_nb_examples, 1)), label='16 Units',
color=plot_color[16], linewidth=3)
leg = plt.legend(loc=loc)
for legobj in leg.legendHandles:
legobj.set_linewidth(3)
plt.title(f'Two layer ANNs with different number of hidden units')
plt.xlabel('x')
plt.ylabel('y')
plt.savefig(f'output/tla_all_curves{label}.png')
plt.show()
def train_tla_different_lrs(train2_x, train2_y, train2_nb_examples, train2_uniform_x_samples, input_dim, output_dim):
nb_of_hiddenunits = 8
lr = {0: 1, 1: 1e-1, 2: 1e-2, 3: 1e-3, 4: 1e-4}
nb_epoch = {0: 1, 1: 500, 2: 2500, 3: 25000, 4: 225000}
batch_size = 229
activation = "sigmoid"
loss = "mse"
momentum = 0.99
stop_loss = 0.12
plot_color = "magenta"
for j in lr:
np.random.seed(550)
learning_rate = lr[j]
nb_of_epochs = nb_epoch[j]
two_layer_ann = neural_net.TwoLayerANN(nb_of_hiddenunits,
activation_function=activation,
loss_function=loss,
use_momentum=True, momentum_factor=momentum)
fig = plt.figure()
fig.set_facecolor('w')
print(f"Training two layer ANN with {nb_of_hiddenunits} units, LR:{learning_rate}")
for epoch in range(nb_of_epochs):
for i in range(train2_nb_examples//batch_size):
two_layer_ann.forward(train2_x[i*batch_size:i*batch_size+batch_size].reshape((batch_size, input_dim, output_dim)))
two_layer_ann.loss(train2_y[i*batch_size:i*batch_size+batch_size])
two_layer_ann.backward(learning_rate)
tla_output = two_layer_ann.forward(train2_x.reshape((train2_nb_examples, input_dim, output_dim)))
tla_loss = np.mean(two_layer_ann.loss(train2_y))
if epoch == 0 or (epoch+1) % (1 if j == 0 else (500 if j < 3 else (10000 if j == 3 else 30000))) == 0:
print(f"Epoch:{epoch+1}, Two layer ANN loss:{tla_loss:.4f}")
if tla_loss < stop_loss:
print(f"Stopped training, Epoch:{epoch+1}, Two layer ANN loss:{tla_loss:.4f}")
plt.scatter(train2_x, train2_y)
tla_output = two_layer_ann.forward(train2_uniform_x_samples.reshape((train2_nb_examples, input_dim, output_dim)))
plt.plot(train2_uniform_x_samples, tla_output.reshape((train2_nb_examples, 1)), plot_color,
linewidth=3)
plt.title(f'Two layer ANN ({nb_of_hiddenunits} units), Epoch:{epoch+1}, Training Set, Loss:{tla_loss:.4f}')
plt.xlabel('x')
plt.ylabel('y')
plt.savefig(f'output/tla_{nb_of_hiddenunits}_train_d{j}.png')
plt.show()
break
if epoch == nb_of_epochs - 1:
print(f"Epoch:{epoch+1}, Two layer ANN loss:{tla_loss:.4f}")
plt.scatter(train2_x, train2_y)
tla_output = two_layer_ann.forward(train2_uniform_x_samples.reshape((train2_nb_examples, input_dim, output_dim)))
plt.plot(train2_uniform_x_samples, tla_output.reshape((train2_nb_examples, 1)), plot_color,
linewidth=3)
plt.title(f'Two layer ANN ({nb_of_hiddenunits} units), Epoch:{epoch+1}, Training Set, Loss:{tla_loss:.4f}')
plt.xlabel('x')
plt.ylabel('y')
plt.savefig(f'output/tla_{nb_of_hiddenunits}_train_d{j}.png')
plt.show()
def train_tla_momentum(train2_x, train2_y, train2_nb_examples, train2_uniform_x_samples, input_dim, output_dim):
nb_of_hiddenunits = 8
learning_rate = 1e-2
nb_of_epochs = 300000
batch_size = 229
activation = "sigmoid"
loss = "mse"
mf = {0: 0, 1: 0.99}
stop_loss = 0.12
plot_color = "magenta"
for j in mf:
np.random.seed(550)
momentum = mf[j]
two_layer_ann = neural_net.TwoLayerANN(nb_of_hiddenunits,
activation_function=activation,
loss_function=loss,
use_momentum=True, momentum_factor=momentum)
fig = plt.figure()
fig.set_facecolor('w')
print(f"Training two layer ANN with {nb_of_hiddenunits} units, MF:{momentum}")
for epoch in range(nb_of_epochs):
for i in range(train2_nb_examples//batch_size):
two_layer_ann.forward(train2_x[i*batch_size:i*batch_size+batch_size].reshape((batch_size, input_dim, output_dim)))
two_layer_ann.loss(train2_y[i*batch_size:i*batch_size+batch_size])
two_layer_ann.backward(learning_rate)
tla_output = two_layer_ann.forward(train2_x.reshape((train2_nb_examples, input_dim, output_dim)))
tla_loss = np.mean(two_layer_ann.loss(train2_y))
if epoch == 0 or (epoch+1) % (40000 if j == 0 else 500) == 0:
print(f"Epoch:{epoch+1}, Two layer ANN loss:{tla_loss:.4f}")
if tla_loss < stop_loss:
print(f"Stopped training, Epoch:{epoch+1}, Two layer ANN loss:{tla_loss:.4f}")
plt.scatter(train2_x, train2_y)
tla_output = two_layer_ann.forward(train2_uniform_x_samples.reshape((train2_nb_examples, input_dim, output_dim)))
plt.plot(train2_uniform_x_samples, tla_output.reshape((train2_nb_examples, 1)), plot_color,
linewidth=3)
plt.title(f'Two layer ANN ({nb_of_hiddenunits} units), Epoch:{epoch+1}, Training Set, Loss:{tla_loss:.4f}')
plt.xlabel('x')
plt.ylabel('y')
plt.savefig(f'output/tla_{nb_of_hiddenunits}_train_e{j}.png')
plt.show()
break
def train_tla_batch(train2_x, train2_y, train2_nb_examples, train2_uniform_x_samples, input_dim, output_dim):
nb_of_hiddenunits = 8
learning_rates | |
# -*- coding: utf-8 -*-
"""
This is the base file that can serve as a starting point for the Python Adaptor development.
This file can also be used as template for Python modules.
main_logger.
"""
import argparse as ap
import csv
import datetime
import logging
import os
import pandas as pd
from pathlib import Path
import subprocess
import re
from shutil import move
import xarray as xr
import sys
import xml.etree.ElementTree as ET
# For package only #
# uncomment this when building the wheel distribution: python setup.py bdist_wheel
# from epaswmmadaptor import epaswmm
# from epaswmmadaptor import __version__
__author__ = "pbishop,lboutin"
__copyright__ = "pbishop,lboutin"
__license__ = "mit"
# XML namespace dict, needed to find elements
namespace = {"pi": "http://www.wldelft.nl/fews/PI"}
def add_attributes(ds):
"""
Add model specific attributes to make it more CF compliant
"""
ds.time.attrs["standard_name"] = "time"
ds.time.attrs["long_name"] = "time"
ds.time.attrs["axis"] = "T"
ds.station_id.attrs["standard_name"] = "Station Identifier"
ds.station_id.attrs["long_name"] = "EPA_SWMM Station Identifier"
ds.station_id.attrs["axis"] = "XY"
ds.station_id.attrs["cf_role"] = "timeseries_id"
ds = ds.assign_attrs(
Conventions="CF-1.6",
title="Data from simulation outputs",
institution="TRCA",
source="Don River Hydrology Update Project Number 60528844 December 2018",
history=datetime.datetime.utcnow().replace(microsecond=0).isoformat(" ")
+ " EMT: simulation results from EPA SWMM model",
references="https://trca.ca/",
Metadata_Conventions="Unidata Dataset Discovery v1.0",
summary="EPA SWMM simulation output",
date_created=datetime.datetime.utcnow().replace(microsecond=0).isoformat(" ")
+ " EMT",
coordinate_system="WGS 1984",
featureType="timeSeries",
comment="created from Python script EPA-SWMM-Adaptor",
)
return ds
def bytes_to_string(df, col_to_convert):
"""
Decodes columns in a dataframe. When the NetCDF file is read, string columns are encoded.
"""
for x in col_to_convert:
df[x] = df[x].str.decode('utf-8')
return df
def check_properties(key, props, run_info_file):
if key not in props:
main_logger.error("Key (%s) was not specified in the run_info.xml file." % key)
raise KeyError(
f'"{key}" needs to be specified under <properties> in {run_info_file.resolve()}'
)
def create_xarray_dataset(data_dict, swmm_unit_dict):
"""
Creating xarray datasets.
"""
main_logger.debug("Creating DataSet from the results DataFrame.")
list_ds_nodes = []
list_ds_links = []
list_keys_ignored = []
for key in data_dict.keys():
try:
header = data_dict[key]['Header']
units = data_dict[key]['Units']
rename_header = {}
except Exception:
main_logger.error("Failed to get header/units when creating dataset.")
stop_program()
for item in range(0, len(units)):
try:
rename_header[units[item]] = header[item]
temp_df = data_dict[key]['Data'].copy(deep=True)
temp_df = temp_df.rename(rename_header, axis='columns')
temp_df['station_id'] = key
temp_df.set_index(['station_id'], append=True, inplace=True)
ds2 = xr.Dataset.from_dataframe(temp_df)
except Exception:
main_logger.error("Failed to create DataSet for {0}".format(temp_df['station_id']))
stop_program()
for var, unit in data_dict[key]['units_dict'].items():
try:
attributes_info = swmm_unit_dict.get(unit)
for attrs, val in attributes_info.items():
if attrs == 'UDUNITS':
attrs = 'units'
ds2[var].attrs[attrs] = val
except Exception:
main_logger.error(
"Error raised due to EPA SWMM unit --> {0} is not recognized. Please add corresponding information into the UDUNITS_lookup.csv input file.".format(
unit))
stop_program()
raise KeyError(
"Error raised due to EPA SWMM unit --> {0} is not recognized. Please add corresponding information into the UDUNITS_lookup.csv input file.".format(
unit))
try:
if "node" in key.lower():
list_ds_nodes.append(ds2)
elif "link" in key.lower():
list_ds_links.append(ds2)
else:
list_keys_ignored.append(key)
pass
except Exception:
main_logger.error("Failed to append data to dataset for: {0}".format(key))
stop_program()
print("Locations ignored in the resulting output file (i.e. not a node or a link): \n\n" + str(list_keys_ignored))
# Combining Dataset for each station_id with same type
try:
main_logger.debug("Start combining xarray DataSets for nodes ...")
combined_ds_nodes = xr.combine_by_coords(list_ds_nodes)
combined_ds_nodes = add_attributes(combined_ds_nodes)
except Exception:
main_logger.error("Failed to combining xarray DataSets for nodes")
stop_program()
try:
main_logger.debug("Start combining xarray DataSets for links ...")
combined_ds_links = xr.combine_by_coords(list_ds_links)
combined_ds_links = add_attributes(combined_ds_links)
except Exception:
main_logger.error("Failed to combining xarray DataSets for links")
stop_program()
print("\nDone creating xarray DataSet for Nodes and Links.\n")
main_logger.debug("Done creating xarray DataSet for Nodes and Links.")
return combined_ds_nodes, combined_ds_links
def dir_element(elem, exists=True):
"""
Checks if a string or XML element is a directory path, and returns the corresponding path.
"""
if isinstance(elem, str):
# such that this works if the path is in an attribute
path = Path(elem)
else:
path = Path(elem.text)
if exists and not path.is_dir():
main_logger.error(
"The following is expected to exist but was not found: %s" % (os.path.join(os.getcwd(), path)))
stop_program()
raise FileNotFoundError(path.resolve())
return path
def file_element(elem, exists=True):
"""
Checks if a string or XML element is a path, and returns the corresponding path.
"""
if isinstance(elem, str):
# such that this works if the path is in an attribute
if "bin" in elem:
path = Path(elem)
root = Path(os.getcwd())
path = os.path.join(root.parents[0], path)
path = Path(path)
else:
path = Path(elem)
else:
path = Path(elem.text)
if exists and not path.is_file():
print("The following is expected to exist but was not found: %s" % (os.path.join(os.getcwd(), path)))
main_logger.error(
"The following is expected to exist but was not found: %s" % (os.path.join(os.getcwd(), path)))
stop_program()
raise FileNotFoundError(path.resolve())
return path
def make_df(lines, start, nrows, df_header):
"""
Method to create a pandas DataFrame from a subset of lines from the simulation results *.rpt file.
"""
# PERFORMANCE ISSUE: parsers.py from pandas is causing lost in performance.
# Using pandas.DataFrame() method allowed increasing performance.
# df = pd.read_csv(file, delimiter=r"\s+", names=df_header, header=None,
# skiprows=start+2,nrows=nrows-1, parse_dates=[0], dayfirst = False)
try:
df = pd.DataFrame([[i for i in line.strip().split()] for line in lines[start + 2:start + nrows - 1]],
columns=df_header)
df['DateO'] = pd.to_datetime(df['Date'])
df['TimeO'] = pd.to_timedelta(df['Time'])
df['time'] = df['DateO'] + df['TimeO']
df = df.drop(columns=['DateO', 'TimeO', 'Date', 'Time'])
df = df.set_index('time')
df = df.apply(pd.to_numeric)
except Exception:
main_logger.error("Failed to create dataframe for line starting with: {0}".format(lines[start]))
stop_program()
return df
def read_netcdf(netcdf_filename, col_to_convert):
"""
Read a netCDF file and return a pandas DataFrame
"""
ds = xr.open_dataset(netcdf_filename)
try:
df = ds.to_dataframe()
except Exception:
main_logger.error("Failed to create dataframe when reading the NetCDF file.")
stop_program()
try:
df = bytes_to_string(df, col_to_convert)
except Exception:
main_logger.error("Failed to decode following columns when reading NetCDF file: " + ','.join(col_to_convert))
stop_program()
return df
def read_errors_warnings(file_list):
"""
Read errors and warnings from the *.rpt ASCII and Python log file output from the simulation.
"""
main_logger.debug("File list: {0}".format(file_list))
list_warning_error = []
df = None
for f in file_list:
try:
with open(f, "r") as fi:
for ln in fi:
if any(x in ln for x in ["ERROR", "WARNING", "DEBUG", "INFO", "FATAL"]):
list_warning_error.append(ln.strip())
except Exception:
main_logger.error(
"The following is expected to exist but was not found: {0}".format(os.path.join(os.getcwd(), f)))
stop_program()
raise FileNotFoundError(Path(f).resolve())
if len(list_warning_error) > 0:
df = pd.Series(list_warning_error).str.split(":", 1, expand=True).rename(
columns={0: "level", 1: "description"})
df["description"] = [x.strip() for x in df["description"]]
df = df.drop_duplicates(["level",
"description"]).reset_index() # SWMM outputs identical warnings sometimes, which does not add any value
else:
df = None
# CONVERT TO FEWS ERROR Numbering
if df is not None and len(df) > 0:
df['description'] = df['level'] + ": " + df['description']
for index, row in df.iterrows():
if "DEBUG" in row["level"]:
df.at[index, "level"] = 4
if "INFO" in row["level"]:
df.at[index, "level"] = 3
if "WARN" in row["level"]:
df.at[index, "level"] = 2
if "ERROR" in row["level"]:
df.at[index, "level"] = 1
if "FATAL" in row["level"]:
df.at[index, "level"] = 0
else:
main_logger.info("No errors, warnings or info messages were detected.")
df = pd.DataFrame(columns=['description', 'level'])
return df
def read_rpt_file(rpt_input_file):
"""
Read *.rpt ASCII file with timeSeries output from the simulation.
"""
try:
with open(rpt_input_file, "r") as f:
lines = f.readlines()
data_dict = {}
in_data = False
varchange = False
old_name = ""
main_logger.debug("Starting parsing *.rpt file...")
# Parse ASCII *.rpt file into nested Dictionary/DataFrame
for i in range(0, len(lines)):
try:
if "<<<" in lines[i]:
new_name = lines[i].strip().strip("<<<").strip(">>>").lstrip(" ").rstrip(" ").replace(" ", "_")
print("Proceeding with --> ", new_name)
data_dict[new_name] = {'start_line': i + 3}
# Parsing Header and Units information.
# Two potential cases:
# 1) Date and Time located on the Header line (Nodes and Links)
# 2) Date and Time located on the Units line (Subcatchment)
header = lines[i + 2].strip().lstrip(" ").rstrip(" ").rstrip("/").split()
units = lines[i + 3].strip().lstrip(" ").rstrip(" ").rstrip("/").split() # [2::]
if len(header) > len(units):
df_header = header
header = header[2::]
else:
df_header = units[:2] + header
units = units[2::]
units_dict = {}
for item in range(len(header)):
units_dict[header[item]] = units[item]
data_dict[new_name] = {'start_line': i + 3, 'Header': header, 'Units': units,
'df_header': df_header, 'units_dict': units_dict}
if not in_data:
in_data = True
elif varchange:
varchange = False
# Pass when there's a change in variable
else:
data_dict[old_name]['end_line'] = i - 3
df_header = data_dict[old_name]['df_header']
start = data_dict[old_name]['start_line']
nrows = data_dict[old_name]['end_line'] - data_dict[old_name]['start_line']
df = make_df(lines, start, nrows, df_header)
data_dict[old_name]['Data'] = df
elif not in_data:
pass
elif "***" in lines[i]: # Export type/variable change
data_dict[old_name]['end_line'] = i - 5
df_header = data_dict[old_name]['df_header']
start = data_dict[old_name]['start_line']
nrows = | |
from flask import render_template, flash, redirect, url_for, request, Flask, jsonify, send_from_directory
from app import app, db, ImmigrationToolBox, DataWizardTools
from app.models import User, Post
from app.forms import PostForm
from werkzeug.urls import url_parse
from datetime import datetime
import pandas as pd
@app.route("/IOIimmQuarterly", methods=['GET', 'POST'])
def upload_IOIimmQuarterly():
if request.method == 'POST':
print(request.files['file'])
f = request.files['file']
test = pd.read_excel(f)
test.fillna('',inplace=True)
#Cleaning
if test.iloc[0][0] == '':
df = pd.read_excel(f,skiprows=2)
else:
df = pd.read_excel(f)
df.fillna('',inplace=True)
#Create Hyperlinks
df['Hyperlinked Case #'] = df.apply(lambda x : DataWizardTools.Hyperlinker(x['Matter/Case ID#']),axis=1)
df['Assigned Branch/CC'] = df['Assigned Branch/CC'].str.replace('Bronx Legal Services','BxLS')
df['Assigned Branch/CC'] = df['Assigned Branch/CC'].str.replace('Brooklyn Legal Services','BkLS')
df['Assigned Branch/CC'] = df['Assigned Branch/CC'].str.replace('Queens Legal Services','QLS')
df['Assigned Branch/CC'] = df['Assigned Branch/CC'].str.replace('Manhattan Legal Services','MLS')
df['Assigned Branch/CC'] = df['Assigned Branch/CC'].str.replace('Staten Island Legal Services','SILS')
df['Assigned Branch/CC'] = df['Assigned Branch/CC'].str.replace('Legal Support Unit','LSU')
#Determining 'level of service' from 3 fields
df['HRA Level of Service'] = df.apply(lambda x: ImmigrationToolBox.HRA_Level_Service(x['Close Reason'],x['Level of Service']), axis=1)
#HRA Case Coding
#Putting Cases into HRA's Baskets!
df['HRA Case Coding'] = df.apply(lambda x: ImmigrationToolBox.HRA_Case_Coding(x['Legal Problem Code'],x['Special Legal Problem Code'],x['HRA Level of Service'],x['IOI Does Client Have A Criminal History? (IOI 2)']), axis=1)
#Dummy SLPC for Juvenile Cases
def DummySLPC(LPC,SLPC):
LPC = str(LPC)
SLPC = str(SLPC)
if LPC == "44 Minor Guardianship / Conservatorship" or LPC == "42 Neglected/Abused/Dependent":
return 'N/A'
else:
return SLPC
df['Special Legal Problem Code'] = df.apply(lambda x: DummySLPC(x['Legal Problem Code'],x['Special Legal Problem Code']), axis=1)
df['HRA Service Type'] = df['HRA Case Coding'].apply(lambda x: x[:2] if x != 'Hold For Review' else '')
df['HRA Proceeding Type'] = df['HRA Case Coding'].apply(lambda x: x[3:] if x != 'Hold For Review' else '')
#Giving things better names in cleanup sheet
df['DHCI form?'] = df['Has Declaration of Household Composition and Income (DHCI) Form?']
df['Consent form?'] = df['IOI HRA Consent Form? (IOI 2)']
df['Client Name'] = df['Full Person/Group Name (Last First)']
df['Office'] = df['Assigned Branch/CC']
df['Country of Origin'] = df['IOI Country Of Origin (IOI 1 and 2)']
df['Substantial Activity'] = df['IOI Substantial Activity (Choose One)']
df['Date of Substantial Activity'] = df['Custom IOI Date substantial Activity Performed']
#Income Waiver
def Income_Exclude(IncomePct,Waiver,Referral):
IncomePct = int(IncomePct)
Waiver = str(Waiver)
if Referral == 'Action NY':
return ''
elif IncomePct > 200 and Waiver.startswith('2') == False:
return 'Needs Income Waiver'
else:
return ''
df['Exclude due to Income?'] = df.apply(lambda x: Income_Exclude(x['Percentage of Poverty'],x['IOI HRA WAIVER APPROVAL DATE if over 200% of FPL (IOI 2)'],x['IOI Referral Source (IOI 2)']), axis=1)
#Eligibility_Date & Rollovers
def Eligibility_Date(Effective_Date,Date_Opened):
if Effective_Date != '':
return Effective_Date
else:
return Date_Opened
df['Eligibility_Date'] = df.apply(lambda x : Eligibility_Date(x['IOI HRA Effective Date (optional) (IOI 2)'],x['Date Opened']), axis = 1)
#Manipulable Dates
df['Open Month'] = df['Eligibility_Date'].apply(lambda x: str(x)[:2])
df['Open Day'] = df['Eligibility_Date'].apply(lambda x: str(x)[3:5])
df['Open Year'] = df['Eligibility_Date'].apply(lambda x: str(x)[6:])
df['Open Construct'] = df['Open Year'] + df['Open Month'] + df['Open Day']
df['Subs Month'] = df['Date of Substantial Activity'].apply(lambda x: str(x)[:2])
df['Subs Day'] = df['Date of Substantial Activity'].apply(lambda x: str(x)[3:5])
df['Subs Year'] = df['Date of Substantial Activity'].apply(lambda x: str(x)[6:])
df['Subs Construct'] = df['Subs Year'] + df['Subs Month'] + df['Subs Day']
df['Subs Construct'] = df.apply(lambda x : x['Subs Construct'] if x['Subs Construct'] != '' else 0, axis = 1)
df['Outcome1 Month'] = df['IOI Outcome 2 Date (IOI 2)'].apply(lambda x: str(x)[:2])
df['Outcome1 Day'] = df['IOI Outcome 2 Date (IOI 2)'].apply(lambda x: str(x)[3:5])
df['Outcome1 Year'] = df['IOI Outcome 2 Date (IOI 2)'].apply(lambda x: str(x)[6:])
df['Outcome1 Construct'] = df['Outcome1 Year'] + df['Outcome1 Month'] + df['Outcome1 Day']
df['Outcome2 Month'] = df['IOI Secondary Outcome Date 2 (IOI 2)'].apply(lambda x: str(x)[:2])
df['Outcome2 Day'] = df['IOI Secondary Outcome Date 2 (IOI 2)'].apply(lambda x: str(x)[3:5])
df['Outcome2 Year'] = df['IOI Secondary Outcome Date 2 (IOI 2)'].apply(lambda x: str(x)[6:])
df['Outcome2 Construct'] = df['Outcome2 Year'] + df['Outcome2 Month'] + df['Outcome2 Day']
#DHCI Form
def DHCI_Needed(DHCI,Open_Construct,LoS):
if Open_Construct == '':
return ''
elif LoS.startswith('Advice'):
return ''
elif LoS.startswith('Brief'):
return ''
elif int(Open_Construct) < 20180701:
return ''
elif DHCI != 'Yes':
return 'Needs DHCI Form'
else:
return ''
df['Needs DHCI?'] = df.apply(lambda x: DHCI_Needed(x['Has Declaration of Household Composition and Income (DHCI) Form?'],x['Open Construct'],x['Level of Service']), axis=1)
#Needs Substantial Activity to Rollover into FY'20
def Needs_Rollover(Open_Construct,Substantial_Activity,Substantial_Activity_Date,CaseID,ReportedFY19):
if int(Open_Construct) >= 20190701:
return ''
elif Substantial_Activity != '' and int(Substantial_Activity_Date) >= 20190701 and int(Substantial_Activity_Date) <= 20200630:
return ''
elif CaseID in ReportedFY19:
return 'Needs Substantial Activity in FY20'
else:
return ''
df['Needs Substantial Activity?'] = df.apply(lambda x: Needs_Rollover(x['Open Construct'],x['Substantial Activity'],x['Subs Construct'],x['Matter/Case ID#'], ImmigrationToolBox.ReportedFY19), axis=1)
#Outcomes
#if there are two outcomes choose which outcome to report based on which one happened more recently
def OutcomeToReport (Outcome1,OutcomeDate1,Outcome2,OutcomeDate2,ServiceLevel,CloseDate):
if OutcomeDate1 == '' and OutcomeDate2 == '' and CloseDate != '' and ServiceLevel == 'Advice':
return 'Advice given'
elif OutcomeDate1 == '' and OutcomeDate2 == '' and CloseDate != '' and ServiceLevel == 'Brief Service':
return 'Advice given'
elif CloseDate != '' and ServiceLevel == 'Full Rep or Extensive Service' and Outcome1 == '' and Outcome2 == '':
return '*Needs Outcome*'
elif OutcomeDate1 >= OutcomeDate2:
return Outcome1
elif OutcomeDate2 > OutcomeDate1:
return Outcome2
else:
return 'no actual outcome'
df['Outcome To Report'] = df.apply(lambda x: OutcomeToReport(x['IOI Outcome 2 (IOI 2)'],x['Outcome1 Construct'],x['IOI Secondary Outcome 2 (IOI 2)'],x['Outcome2 Construct'],x['HRA Level of Service'],x['Date Closed']), axis=1)
#make it add the outcome date as well (or tell you if you need it!)
def OutcomeDateToReport (Outcome1,OutcomeDate1,Outcome2,OutcomeDate2,ServiceLevel,CloseDate,ActualOutcomeDate1,ActualOutcomeDate2):
if OutcomeDate1 == '' and OutcomeDate2 == '' and CloseDate != '' and ServiceLevel == 'Advice':
return CloseDate
elif OutcomeDate1 == '' and OutcomeDate2 == '' and CloseDate != '' and ServiceLevel == 'Brief Service':
return CloseDate
elif OutcomeDate1 >= OutcomeDate2:
return ActualOutcomeDate1
elif OutcomeDate2 > OutcomeDate1:
return ActualOutcomeDate2
else:
return '*Needs Outcome Date*'
df['Outcome Date To Report'] = df.apply(lambda x: OutcomeDateToReport(x['IOI Outcome 2 (IOI 2)'],x['Outcome1 Construct'],x['IOI Secondary Outcome 2 (IOI 2)'],x['Outcome2 Construct'],x['HRA Level of Service'],x['Date Closed'],x['IOI Outcome 2 Date (IOI 2)'],x['IOI Secondary Outcome Date 2 (IOI 2)']), axis=1)
#kind of glitchy - if it has an outcome date but no outcome it doesn't say *Needs Outcome*
#add LSNYC to start of case numbers
df['Unique_ID'] = 'LSNYC'+df['Matter/Case ID#']
#take second letters of first and last names
df['Last_Initial'] = df['Client Last Name'].str[1]
df['First_Initial'] = df['Client First Name'].str[1]
#Year of birth
df['Year_of_Birth'] = df['Date of Birth'].str[-4:]
#Unique Client ID#
df['Unique Client ID#'] = df['First_Initial'] + df['Last_Initial'] + df['Year_of_Birth']
#Deliverable Categories
def Deliverable_Category(HRA_Coded_Case,Income_Cleanup,Age_at_Intake):
if Income_Cleanup == 'Needs Income Waiver':
return 'Needs Cleanup'
elif HRA_Coded_Case == 'T2-RD' and Age_at_Intake <= 21:
return 'Tier 2 (minor removal)'
elif HRA_Coded_Case == 'T2-RD':
return 'Tier 2 (removal)'
elif HRA_Coded_Case.startswith('T2') == True:
return 'Tier 2 (other)'
elif HRA_Coded_Case.startswith('T1')== True:
return 'Tier 1'
elif HRA_Coded_Case.startswith('B') == True:
return 'Brief'
else:
return 'Needs Cleanup'
df['Deliverable Tally'] = df.apply(lambda x: Deliverable_Category(x['HRA Case Coding'],x['Exclude due to Income?'],x['Age at Intake']), axis=1)
#make all cases for any client that has a minor removal tally, into also being minor removal cases
dfs = df.groupby('Unique Client ID#',sort = False)
tdf = pd.DataFrame()
for x, y in dfs:
for z in y['Deliverable Tally']:
if z == 'Tier 2 (minor removal)':
y['Modified Deliverable Tally'] = 'Tier 2 (minor removal)'
tdf = tdf.append(y)
df = tdf
#write function to identify blank 'modified deliverable tallies' and add it back in as the original deliverable tally
df.fillna('',inplace= True)
def fillBlanks(ModifiedTally,Tally):
if ModifiedTally == '':
return Tally
else:
return ModifiedTally
df['Modified Deliverable Tally'] = df.apply(lambda x: fillBlanks(x['Modified Deliverable Tally'],x['Deliverable Tally']),axis=1)
#***add code to make it so that it deletes any extra 'brief' cases for clients that have mutliple cases
#gender
def HRAGender (gender):
if gender == 'Male' or gender == 'Female':
return gender
else:
return 'Other'
df['Gender'] = df.apply(lambda x: HRAGender(x['Gender']), axis=1)
| |
this test if it completely |
| notContains | surrounds a feature in selectingLayer. No portion of the |
| | containing feature; however, the contained feature is allowed |
| | to touch the containing feature (that is, share a common |
| | point along its boundary). |
| | |
| | contains-If a feature in layer contains a feature in |
| | selectingLayer, the feature in layer is included in the output. |
| | notcontains-If a feature in layer contains a feature in |
| | selectingLayer, the feature in the first layer is excluded |
+-------------------+----------------------------------------------------------------------------------------+
| within | |within| |
| | |
| | A feature in layer passes this test if it is completely |
| notWithin | surrounded by a feature in selectingLayer. The entire feature |
| | layer must be within the containing feature; however, the two |
| | features are allowed to touch (that is, share a common point |
| | along its boundary). |
| | |
| | * within-If a feature in layer is completely within a feature in |
| | selectingLayer, the feature in layer is included in the output. |
| | * notwithin-If a feature in layer is completely within a feature |
| | in selectingLayer, the feature in layer is excluded from the |
| | output. |
| | |
| | **Note:** |
| | |
| | can use the within relationship for points and lines, just as |
| | you can with the contains relationship. For example, your first |
| | layer contains points representing manhole covers and you want |
| | to find the manholes that are on street centerlines (as opposed |
| | to parking lots or other non-street features). You could use |
| | within to find manhole points within street centerlines, but |
| | in order for a point to contain a line, the point must be exactly |
| | on the line (that is, in GIS terms, they are snapped to each |
| | other). If there is any doubt about this, use the withinDistance |
| | relationship with a suitable distance value. |
+-------------------+----------------------------------------------------------------------------------------+
| nearest | |nearest| |
| | |
| | feature in the first layer passes this test if it is nearest |
| | to a feature in the second layer. |
| | |
| | * nearest-If a feature in the first layer is nearest to a |
| | feature in the second layer, the feature in the first layer |
| | is included in the output. |
+-------------------+----------------------------------------------------------------------------------------+
* ``distance`` is the distance to use for the withinDistance and notWithinDistance spatial relationship.
* ``units`` is the units for distance.
Choice list: ['Meters', 'Kilometers', 'Feet', 'Yards', 'Miles']
An expression may be a list, which denotes a group. The first operator in the group indicates how the group expression
is added to the previous expression. Grouping expressions is only necessary when you need to create two or more distinct
sets of features from the same layer. One way to think of grouping is that without grouping, you would have to execute
``find_existing_locations`` multiple times and merge the results.
------------------------------------- ------------------------------------------------------------------------------------------------------
output_name Optional string. If provided, the method will create a feature layer of the results. You define the name of the layer.
If ``output_name`` is not supplied, the task will return a feature collection.
------------------------------------- ------------------------------------------------------------------------------------------------------
context Optional string. Additional settings such as processing extent and output spatial reference. For ``find_existing_locations``, there are two settings.
#. Extent (``extent``)-a bounding box that defines the analysis area. Only those points in the input_layers that intersect the bounding box will be analyzed.
#. Output Spatial Reference (``outSR``)-the output features will be projected into the output spatial reference.
------------------------------------- ------------------------------------------------------------------------------------------------------
gis Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
------------------------------------- ------------------------------------------------------------------------------------------------------
estimate Optional boolean. Is true, the number of credits needed to run the operation will be returned as a float.
------------------------------------- ------------------------------------------------------------------------------------------------------
future Optional boolean. If True, the result will be a GPJob object and results will be returned asynchronously.
===================================== ======================================================================================================
:Returns: result_layer : feature layer Item if output_name is specified, else Feature Collection.
.. code-block:: python
#USAGE EXAMPLE: To find busy (where SEGMENT_TY is 1 and where ARTERIAL_C is 1) streets from the existing seattle streets layer.
arterial_streets = find_existing_locations(input_layers=[bike_route_streets],
expressions=[{"operator":"","layer":0,"where":"SEGMENT_TY = 1"},
{"operator":"and","layer":0,"where":"ARTERIAL_C = 1"}],
output_name='ArterialStreets')
"""
if input_layers is None:
input_layers = []
if expressions is None:
expressions = []
gis = _arcgis.env.active_gis if gis is None else gis
return gis._tools.featureanalysis.find_existing_locations(
input_layers,
expressions,
output_name,
context,
estimate=estimate,future=future)
def derive_new_locations(
input_layers=[],
expressions=[],
output_name=None,
context=None,
gis=None,
estimate=False,
future=False):
"""
.. image:: _static/images/derive_new_locations/derive_new_locations.png
.. |intersect| image:: _static/images/derive_new_locations/intersect.png
.. |distance| image:: _static/images/derive_new_locations/distance.png
.. |within| image:: _static/images/derive_new_locations/within.png
.. |nearest| image:: _static/images/derive_new_locations/nearest.png
.. |contains| image:: _static/images/derive_new_locations/contains.png
The ``derive_new_locations`` method derives new features from the input layers that meet a query you specify. A query is
made up of one or more expressions. There are two types of expressions: attribute and spatial. An example of an
attribute expression is that a parcel must be vacant, which is an attribute of the Parcels layer
(STATUS = 'VACANT'). An example of a spatial expression is that the parcel must also be within a certain
distance of a river (Parcels within a distance of 0.75 Miles from Rivers).
The ``derive_new_locations`` method is very similar to the ``find_existing_locations`` method, the main difference is that
the result of ``derive_new_locations`` can contain partial features.
* In both methods, the attribute expression ``where`` and the spatial relationships within and contains return the same result.
This is because these relationships return entire features.
* When ``intersects`` or ``within_distance`` is used, ``derive_new_locations`` creates new features
in the result. For example, when intersecting a parcel feature and a flood zone area that partially overlap each other,
``find_existing_locations`` will return the entire parcel whereas ``derive_new_locations`` will return just the portion of
the parcel that is within the flood zone.
===================================== ======================================================================================================
**Argument** **Description**
------------------------------------- ------------------------------------------------------------------------------------------------------
input_layers Required list of feature layers. A list of layers that will be used in the expressions parameter.
Each layer in the list can be:
* a feature service layer with an optional filter to select specific features, or
* a feature collection
------------------------------------- ------------------------------------------------------------------------------------------------------
expressions Required dict. There are two types of expressions, attribute and spatial.
Example attribute expression:
{
"operator": "and",
"layer": 0,
"where": "STATUS = 'VACANT'"
}
**Note**
* operator can be either ``and`` or ``or``
* layer is the index of the layer in the ``input_layers`` parameter.
* The where clause must be surrounded by double quotes.
* When dealing with text fields, values must be single-quoted ('VACANT').
* Date fields support all queries except LIKE. Dates are strings in YYYY:MM:DD hh:mm:ss format. Here's an example using the date field ObsDate:
"where": "ObsDate >= '1998-04-30 13:30:00' "
+----------+------------------------------------------------------------------+
| = | Equal |
+----------+------------------------------------------------------------------+
| > | Greater than |
+----------+------------------------------------------------------------------+
| < | Less than |
+----------+------------------------------------------------------------------+
| >= | Greater than or equal to |
+----------+------------------------------------------------------------------+
| <= | Less than or equal to |
+----------+------------------------------------------------------------------+
| <> | Not equal |
+----------+------------------------------------------------------------------+
| LIKE '% | A percent symbol (%) signifies a wildcard, meaning that |
| <string>'| anything is acceptable in its place-one character, a |
| | hundred characters, or no character. This expression |
| | would select Mississippi and Missouri among USA |
| | state names: STATE_NAME LIKE 'Miss%' |
+----------+------------------------------------------------------------------+
| BETWEEN | Selects a record if it has a value greater than or equal |
| <value1> | to <value1> | |
<gh_stars>1-10
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from .common import DistributedOperator
from .common import DistributedOperatorImpl
from .common import register_distributed_operator
from .common import register_distributed_operator_impl
from ..utils import is_dim_shard
from ..utils import is_dim_replicate
from ..utils import is_valid_list_index
from ..utils import compute_compatible_dim_mapping
from ..utils import compute_compatible_dims_mapping
from ..utils import compute_compatible_and_update_dim_mapping
from paddle.fluid import core, unique_name
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.framework import Program, Parameter, Variable, program_guard
from paddle.fluid.data_feeder import check_variable_and_dtype, check_dtype
from ..process import new_process_group
from ..utils import _get_comm_group
def _update_dims_mapping_for_matmul(op_dist_attr):
changed = False
op_desc = op_dist_attr.get_owner_op().desc
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
out_name = op_desc.output('Out')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
x_dims_mapping_len = len(x_dims_mapping)
y_dims_mapping_len = len(y_dims_mapping)
out_dims_mapping_len = len(out_dims_mapping)
# Add dim mapping to Make sure the length dims_mapping be at least 2
if x_dims_mapping_len == 1:
x_dims_mapping.insert(0, -1)
if y_dims_mapping_len == 1:
y_dims_mapping.insert(1, -1)
# Deal with dim > 2 and take care of broadcasting
if out_dims_mapping_len > 2:
broadcast_x_dims_mapping = []
broadcast_y_dims_mapping = []
broadcast_out_dims_mapping = []
for i in range(out_dims_mapping_len - x_dims_mapping_len):
broadcast_x_dims_mapping.append(out_dims_mapping[i])
for i in range(x_dims_mapping_len - 2):
broadcast_x_dims_mapping.append(x_dims_mapping[i])
for i in range(out_dims_mapping_len - y_dims_mapping_len):
broadcast_y_dims_mapping.append(out_dims_mapping[i])
for i in range(y_dims_mapping_len - 2):
broadcast_y_dims_mapping.append(y_dims_mapping[i])
for i in range(out_dims_mapping_len - 2):
broadcast_out_dims_mapping.append(out_dims_mapping[i])
compatible_dims_mapping = compute_compatible_dims_mapping([
broadcast_x_dims_mapping, broadcast_y_dims_mapping,
broadcast_out_dims_mapping
])
assert compatible_dims_mapping is not None, "There is no compatible dim mapping."
for i in range(x_dims_mapping_len - 2):
new_idx = i + (out_dims_mapping_len - x_dims_mapping_len)
if x_dims_mapping[i] != compatible_dims_mapping[new_idx]:
x_dims_mapping[i] = compatible_dims_mapping[new_idx]
changed = True
for i in range(y_dims_mapping_len - 2):
new_idx = i + (out_dims_mapping_len - y_dims_mapping_len)
if y_dims_mapping[i] != compatible_dims_mapping[new_idx]:
y_dims_mapping[i] = compatible_dims_mapping[new_idx]
changed = True
for i in range(out_dims_mapping_len - 2):
if out_dims_mapping[i] != compatible_dims_mapping[i]:
out_dims_mapping[i] = compatible_dims_mapping[i]
changed = True
# The following which uses negative index can be work
# when len(out_dims_mapping) > 2 and len(out_dims_mapping) <=2
dim_changed = compute_compatible_and_update_dim_mapping(
[x_dims_mapping, y_dims_mapping], [-1, -2])
if dim_changed:
changed = True
dim_changed = compute_compatible_and_update_dim_mapping(
[x_dims_mapping, out_dims_mapping], [-2, -2])
if dim_changed:
changed = True
dim_changed = compute_compatible_and_update_dim_mapping(
[y_dims_mapping, out_dims_mapping], [-1, -1])
if dim_changed:
changed = True
# Remove unnecessary dim mapping to make sure the lenght of dims_mapping is same as its tensor
if x_dims_mapping_len == 1:
x_dims_mapping.pop(0)
if y_dims_mapping_len == 1:
y_dims_mapping.pop(1)
assert len(x_dims_mapping) == x_dims_mapping_len
assert len(y_dims_mapping) == y_dims_mapping_len
assert len(out_dims_mapping) == out_dims_mapping_len
return changed
class DistributedMatmul(DistributedOperator):
def __init__(self, name):
super(DistributedMatmul, self).__init__()
self._name = name
register_distributed_operator("matmul", DistributedMatmul("matmul"))
# ColumnParallel
class DistributedMatmulImpl0(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedMatmulImpl0, self).__init__()
self._name = name
self._forward_implemented = True
self._backward_implemented = False
def is_process_mesh_compatible(self, op_dist_attr):
""" No restriction for now. """
return True
def is_input_compatible(self, op_dist_attr):
op_desc = op_dist_attr.get_owner_op().desc
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
if is_dim_shard(x_dims_mapping[-1]):
return False
if is_dim_shard(y_dims_mapping[0]) or is_dim_replicate(y_dims_mapping[
1]):
return False
for mapping in x_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def is_output_compatible(self, op_dist_attr):
op_desc = op_dist_attr.get_owner_op().desc
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if is_dim_replicate(out_dims_mapping[-1]):
return False
for mapping in out_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def update_dims_mapping(self, op_dist_attr):
changed = False
dim_changed = _update_dims_mapping_for_matmul(op_dist_attr)
if dim_changed:
changed = True
return changed
def forward(self, serial_op):
def static_handle(dst_block,
src_op,
op_dist_attr,
input_name_mapping,
output_name_mapping,
rank_id=0):
assert len(
input_name_mapping
) == 2, "col_parallel_linear take 2 inputs variable but got {}".format(
input_name_mapping)
assert len(
output_name_mapping
) == 1, "col_parallel_linear take 2 inputs variable but got {}".format(
output_name_mapping)
assert len(
input_name_mapping['X']
) == 1, "col_parallel_linear input X take 1 variable but got {}".format(
input_name_mapping['X'])
assert len(
input_name_mapping['Y']
) == 1, "col_parallel_linear input Y take 1 variable but got {}".format(
input_name_mapping['Y'])
assert len(
output_name_mapping['Out']
) == 1, "col_parallel_linear input Out take 1 variable but got {}".format(
input_name_mapping['Out'])
X_var = dst_block.var(input_name_mapping['X'][0])
Weight_var = dst_block.var(input_name_mapping['Y'][0])
Out_var = dst_block.var(output_name_mapping['Out'][0])
# TODO infer logic comm presentation
model_parallel_axis, process_mesh = op_dist_attr.get_owner_context(
)._get_model_parallel_info()
group_ranks = _get_comm_group(process_mesh.process_group,
process_mesh.topology,
model_parallel_axis, rank_id)
group = new_process_group(group_ranks)
intermediate_var_0 = dst_block.create_var(
name=unique_name.generate_with_ignorable_key(".".join(
["c_identity", 'tmp'])),
dtype=X_var.dtype,
shape=X_var.shape,
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=X_var.stop_gradient)
check_variable_and_dtype(
X_var, 'tensor',
['float16', 'float32', 'float64', 'int32', 'int64'],
'_c_identity')
dst_block.append_op(
type='c_identity',
inputs={'X': [X_var]},
outputs={'Out': intermediate_var_0},
attrs={
'ring_id': group.id,
'use_calc_stream': True,
'use_model_parallel': True,
})
check_variable_and_dtype(intermediate_var_0, 'x',
['float16', 'float32', 'float64'],
'linear')
check_dtype(intermediate_var_0.dtype, 'dtype',
['float16', 'float32', 'float64'], 'linear')
attrs = {
'transpose_X': False,
'transpose_Y': False,
'alpha': 1,
}
inputs = {'X': [intermediate_var_0], 'Y': [Weight_var]}
dst_block.append_op(
type='matmul',
inputs=inputs,
outputs={'Out': Out_var},
attrs=attrs)
if in_dygraph_mode():
raise NotImplementedError(
"Dist op for [{}] with idx [{}] is NOT implemented yet.".format(
"matmul", 0))
else:
return static_handle
# RowParallel
class DistributedMatmulImpl1(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedMatmulImpl1, self).__init__()
self._name = name
self._forward_implemented = True
self._backward_implemented = False
def is_process_mesh_compatible(self, op_dist_attr):
""" No restriction for now. """
return True
def is_input_compatible(self, op_dist_attr):
op_desc = op_dist_attr.get_owner_op().desc
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
if is_dim_replicate(x_dims_mapping[-1]):
return False
if is_dim_replicate(y_dims_mapping[-2]) or is_dim_shard(y_dims_mapping[
-1]):
return False
# Other dimensions must be replicate except the batch dimension
for mapping in x_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def is_output_compatible(self, op_dist_attr):
op_desc = op_dist_attr.get_owner_op().desc
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if is_dim_shard(out_dims_mapping[-1]):
return False
# Other dimensions must be replicate except the batch dimension
for mapping in out_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def update_dims_mapping(self, op_dist_attr):
changed = False
dim_changed = _update_dims_mapping_for_matmul(op_dist_attr)
if dim_changed:
changed = True
return changed
def forward(self, serial_op):
def static_handle(dst_block,
src_op,
op_dist_attr,
input_name_mapping,
output_name_mapping,
rank_id=0):
assert len(
input_name_mapping
) == 2, "col_parallel_linear take 2 inputs variable but got {}".format(
input_name_mapping)
assert len(
output_name_mapping
) == 1, "col_parallel_linear take 2 inputs variable but got {}".format(
output_name_mapping)
assert len(
input_name_mapping['X']
) == 1, "col_parallel_linear input X take 1 variable but got {}".format(
input_name_mapping['X'])
assert len(
input_name_mapping['Y']
) == 1, "col_parallel_linear input Y take 1 variable but got {}".format(
input_name_mapping['Y'])
assert len(
output_name_mapping['Out']
) == 1, "col_parallel_linear input Out take 1 variable but got {}".format(
input_name_mapping['Out'])
X_var = dst_block.var(input_name_mapping['X'][0])
Weight_var = dst_block.var(input_name_mapping['Y'][0])
Out_var = dst_block.var(output_name_mapping['Out'][0])
# TODO infer logic comm presentation
model_parallel_axis, process_mesh = op_dist_attr.get_owner_context(
)._get_model_parallel_info()
group_ranks = _get_comm_group(process_mesh.process_group,
process_mesh.topology,
model_parallel_axis, rank_id)
group = new_process_group(group_ranks)
check_variable_and_dtype(
X_var, 'x', ['float16', 'float32', 'float64'], 'linear')
check_dtype(X_var.dtype, 'dtype',
['float16', 'float32', 'float64'], 'linear')
attrs = {
'transpose_X': False,
'transpose_Y': False,
'alpha': 1,
}
inputs = {'X': X_var, 'Y': Weight_var}
intermediate_var_0 = dst_block.create_var(
shape=Out_var.shape,
dtype=Out_var.dtype,
type=Out_var.type,
lod_level=Out_var.lod_level,
persistable=False,
is_data=False,
need_check_feed=Out_var.desc.need_check_feed())
dst_block.append_op(
type='matmul',
inputs=inputs,
outputs={'Out': intermediate_var_0},
attrs=attrs)
dst_block.append_op(
type='c_allreduce_sum',
inputs={'X': intermediate_var_0},
outputs={'Out': Out_var},
attrs={
'ring_id': group.id,
'use_calc_stream': True,
'use_model_parallel': True
})
if in_dygraph_mode():
raise NotImplementedError(
"Dist op for [{}] with idx [{}] is NOT implemented yet.".format(
"matmul", 0))
else:
return static_handle
# ReplicateParallel
class DistributedMatmulImpl2(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedMatmulImpl2, self).__init__()
self._name = name
def is_process_mesh_compatible(self, op_dist_attr):
""" No restriction for now. """
return True
def is_input_compatible(self, op_dist_attr):
op_desc = op_dist_attr.get_owner_op().desc
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
if is_dim_shard(x_dims_mapping[-1]):
return False
if is_valid_list_index(x_dims_mapping,
-2) and is_dim_shard(x_dims_mapping[-2]):
return False
if is_dim_shard(y_dims_mapping[-1]):
return False
if is_valid_list_index(y_dims_mapping,
-2) and is_dim_shard(y_dims_mapping[-2]):
return False
return True
def is_output_compatible(self, op_dist_attr):
op_desc = op_dist_attr.get_owner_op().desc
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if is_dim_shard(out_dims_mapping[-1]):
return False
if is_valid_list_index(out_dims_mapping,
-2) and is_dim_shard(out_dims_mapping[-2]):
return False
return True
def update_dims_mapping(self, op_dist_attr):
changed = False
dim_changed = _update_dims_mapping_for_matmul(op_dist_attr)
if dim_changed:
changed = True
return changed
register_distributed_operator_impl("matmul",
DistributedMatmulImpl0("column_parallel"))
register_distributed_operator_impl("matmul",
DistributedMatmulImpl1("row_parallel"))
register_distributed_operator_impl("matmul",
DistributedMatmulImpl2("replicate_parallel"))
class DistributedMatmulV2(DistributedOperator):
def __init__(self, name):
super(DistributedMatmulV2, self).__init__()
self._name = name
register_distributed_operator("matmul_v2", DistributedMatmulV2("matmul_v2"))
# ColumnParallel
class DistributedMatmulV2Impl0(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedMatmulV2Impl0, self).__init__()
self._name = name
self._forward_implemented = True
self._backward_implemented = False
def is_process_mesh_compatible(self, op_dist_attr):
""" No restriction for now. """
return True
def is_input_compatible(self, op_dist_attr):
op_desc = op_dist_attr.get_owner_op().desc
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
if is_dim_shard(x_dims_mapping[-1]):
return False
if is_dim_shard(y_dims_mapping[0]) or is_dim_replicate(y_dims_mapping[
1]):
return False
for mapping in x_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def is_output_compatible(self, op_dist_attr):
op_desc = op_dist_attr.get_owner_op().desc
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 10 20:21:46 2015
@author: derrick
"""
from __future__ import print_function, absolute_import, unicode_literals, division
import glob
import itertools
import json
import os
import random
import numpy as np
import obspy
import pandas as pd
from six import string_types
import detex
# client imports
import obspy.clients.fdsn
import obspy.clients.neic
import obspy.clients.earthworm
conDirDefault = 'ContinuousWaveForms'
eveDirDefault = 'EventWaveForms'
# extension key to map obspy output type to extension. Add more here
formatKey = {'mseed': 'msd', 'pickle': 'pkl', 'sac': 'sac', 'Q': 'Q'}
def read(path):
"""
function to read a file from a path. If IOError or TypeError simply try
appending os.set to start
"""
try:
st = obspy.read(path)
except (IOError, TypeError):
try:
st = obspy.read(os.path.join(os.path.sep, path))
except (IOError, TypeError):
msg = 'Cannot read %s, the file may be corrupt, skipping it' % path
detex.log(__name__, msg, level='warn', pri=True)
return None
return st
def quickFetch(fetch_arg, **kwargs):
"""
Instantiate a DataFetcher using as little information as possible.
Parameters
----------
fetch_arg : str or DataFetcher instance
fetch_arg can be one of three things:
1. An instance of DataFetcher
2. A valid DataFetcher Method other than dir
3. A path to a directory containing waveform data
fetch_arg is checked in that order, so if you are trying to use a
data directory make sure it does not share names with a valid
DataFetcher method
kwargs are passed to the DataFetcher class, see DataFetcher
docs for details
Returns
-------
An instance of DataFetcher
Notes
--------
For client methods (eg 'uuss', 'iris') remove response is assumed True
with the default prelim. filter. If you don't want this make a custom
instance of DataFetcher.
"""
if isinstance(fetch_arg, DataFetcher):
dat_fet = fetch_arg
elif isinstance(fetch_arg, string_types):
if fetch_arg in DataFetcher.supMethods:
if fetch_arg == 'dir':
msg = 'If using method dir you must pass a path to directory'
detex.log(__name__, msg, level='error')
dat_fet = DataFetcher(fetch_arg, removeResponse=True, **kwargs)
else:
if not os.path.exists(fetch_arg):
msg = 'Directory %s does not exist' % fetch_arg
detex.log(__name__, msg, level='error')
dat_fet = DataFetcher('dir', directoryName=fetch_arg, **kwargs)
else:
msg = 'Input not understood, read docs and try again'
detex.log(__name__, msg, level='error')
return dat_fet
def makeDataDirectories(templateKey='TemplateKey.csv',
stationKey='StationKey.csv',
fetch='IRIS',
formatOut='mseed',
templateDir=eveDirDefault,
timeBeforeOrigin=1 * 60,
timeAfterOrigin=4 * 60,
conDir=conDirDefault,
secBuf=120,
conDatDuration=3600,
multiPro=False,
getContinuous=True,
getTemplates=True,
removeResponse=True,
opType='VEL',
prefilt=[.05, .1, 15, 20]):
"""
Function designed to fetch data needed for detex and store them in local
directories. StationKey.csv and TemplateKey.csv indicate which events to
download and for which stations. Organizes ContinuousWaveForms and
EventWaveForms directories.
Parameters
------------
template_key : str or pd DataFrame
The path to the TemplateKey csv
station_key : str or pd DataFrame
The path to the station key
fetch : str or FetchData instance
String for method argument of FetchData class or FetchData instance
formatOut : str
Seismic data file format, most obspy formats acceptable, options are:
'mseed','sac','GSE2','sacxy','q','sh_asc',' slist', 'tspair','segy',
'su', 'pickle', 'h5' (h5 only if obspyh5 module installed)
tempalateDir : str
The name of the template directory. Using the default is recommended
else the templateDir parameter will have to be set in calling most
other detex functions
timeBeforeOrigin: real number
The time in seconds before the reported origin of each template that
is downloaded.
timeAfterOrigin : real number(int, float, etc.)
The time in seconds to download after the origin time of each template.
conDir : str
The name of the continuous waveform directory. Using the default is
recommended
secBuf : real number (int, float, etc.)
The number of seconds to download after each hour of continuous data.
This might be non-zero in order to capture some detections that would
normally be overlooked if data did not overlap somewhat.
conDatDuration : real number (int, float, etc.)
The duration of the continuous data to download in seconds.
multiPro : bool
If True fork several processes to get data at once, potentially much
faster but a bit inconsiderate on the server hosting the data
getContinuous : bool
If True fetch continuous data with station and date ranges listed in
the station key
getTemplates : bool
If True get template data with stations listed in the station key
and events listed in the template key
removeResponse : bool
If true remove instrument response
opType : str
Output type after removing instrument response. Choices are:
"DISP" (m), "VEL" (m/s), or "ACC" (m/s**2)
prefilt : list 4 real numbers
Pre-filter parameters for removing instrument response, response is
flat from corners 2 to 3.
"""
temkey = detex.util.readKey(templateKey, 'template')
stakey = detex.util.readKey(stationKey, 'station')
# Check output type
if formatOut not in formatKey.keys():
msg = ('%s is not an acceptable format, choices are %s' %
(formatOut, formatKey.keys()))
detex.log(__name__, msg, level='error')
# Configure data fetcher
if isinstance(fetch, detex.getdata.DataFetcher):
fetcher = fetch
# Make sure DataFetcher is on same page as function inputs
fetcher.opType = opType
fetcher.removeResponse = removeResponse
fetcher.prefilt = prefilt
else:
fetcher = detex.getdata.DataFetcher(fetch,
removeResponse=removeResponse,
opType=opType,
prefilt=prefilt)
## Get templates
if getTemplates:
msg = 'Getting template waveforms'
detex.log(__name__, msg, level='info', pri=True)
_getTemData(temkey, stakey, templateDir, formatOut,
fetcher, timeBeforeOrigin, timeAfterOrigin)
## Get continuous data
if getContinuous:
msg = 'Getting continuous data'
detex.log(__name__, msg, level='info', pri=True)
_getConData(fetcher, stakey, conDir, secBuf, opType, formatOut,
duration=conDatDuration)
## Log finish
msg = "finished makeDataDirectories call"
detex.log(__name__, msg, level='info', close=True, pri=True)
def _getTemData(temkey, stakey, temDir, formatOut, fetcher, tb4, taft):
streamGenerator = fetcher.getTemData(temkey, stakey, tb4, taft,
returnName=True, temDir=temDir,
skipIfExists=True)
for st, name in streamGenerator:
netsta = st[0].stats.network + '.' + st[0].stats.station
fname = netsta + '.' + name + '.' + formatKey[formatOut]
fdir = os.path.join(temDir, name)
if not os.path.exists(fdir):
os.makedirs(fdir)
st.write(os.path.join(fdir, fname), formatOut)
if not os.path.exists(os.path.join(temDir, '.index.db')):
indexDirectory(temDir)
def _getConData(fetcher, stakey, conDir, secBuf, opType, formatOut,
duration=3600):
streamGenerator = fetcher.getConData(stakey,
secBuf,
returnName=True,
conDir=conDir,
skipIfExists=True,
duration=duration)
for st, path, fname in streamGenerator:
if st is not None: # if data were returned
if not os.path.exists(path):
os.makedirs(path)
fname = fname + '.' + formatKey[formatOut]
st.write(os.path.join(path, fname), formatOut)
if not os.path.exists(os.path.join(conDir, '.index.db')):
indexDirectory(conDir)
class DataFetcher(object):
"""
\n
Class to handle data acquisition
Parameters
----------
method : str or int
One of the approved methods for getting data as supported by detex
Options are:
"dir" : A data directory as created by makeDataDirectories
"client" : an obspy client can be passed to get data
useful if using an in-network database
"iris" : an iris client is initiated, also uses IRIS for inventory
"uuss" : A client attached to the university of utah
seismograph stations is initiated using CWB for waveforms
and IRIS is used for station inventories
client : An obspy client object
Client object used to get data, from obspy.clients
removeResponse : bool
If True remove response before returning stream.
inventoryArg : None, obspy client object, or obspy Inventory object
A seperate client for station inventories, only used if
removeResponse == True, also supports keyword "iris" for iris client
directoryName : str
A path to the continuous waveforms directory or event waveforms
directory. If None is passed default names are used
(ContinuousWaveForms and EventWaveForms)
opType : str
Output type after removing instrument response. Choices are:
"DISP" (m), "VEL" (m/s), or "ACC" (m/s**2)
prefilt : list of real numbers
Pre-filter parameters for removing instrument response.
conDatDuration : int or float
Duration for continuous data in seconds
conBuff : int or float
The amount of data, in seconds, to download at the end of the
conDatDuration. Ideally should be equal to template length, important
in order to avoid missing potential events at the end of a stream
timeBeforeOrigin : int or float
Seconds before origin of each event to fetch (used in getTemData)
timeAfterOrigin : int or float
Seconds after origin of each event to fetch (used in getTemData)
checkData : bool
If True apply some data checks before returning streams, can be useful
for older data sets.
fillZeros : bool
If True fill data that are not available with 0s (provided some data are
available)
"""
supMethods | |
<reponame>yawatajunk/Wi-Sun_EnergyMeter<filename>sem_com.py
#!/usr/bin/python3
# coding: UTF-8
import argparse
import binascii
import datetime
import glob
import json
import threading
import time
import os
import pickle
import socket
import sys
import RPi.GPIO as gpio
from y3module import Y3Module
from echonet_lite import *
import user_conf
# 定数定義
Y3RESET_GPIO = 18 # Wi-SUNリセット用GPIO
LED_GPIO = 4 # LED用GPIO
# ログファイル関連
TMP_LOG_DIR = '/tmp/' # 一次ログディレクトリ
LOG_DIR = 'sem_app/public/logs/' # ログ用ディレクトリ, 本スクリプトからの相対パス
SOCK_FILE = TMP_LOG_DIR + 'sem.sock' # UNIXソケット
TMP_LOG_FILE = TMP_LOG_DIR + 'sem.csv' # 一時ログファイル
POW_DAYS_JSON_FILE = LOG_DIR + 'pow_days.json' # JSON形式の電力ログファイル
POW_DAY_LOG_HEAD = 'pow_day_' # 日別ログファイル名の先頭
POW_DAY_LOG_FMT = '%Y%m%d' # 日時フォーマット
CURR_POW_FILE = TMP_LOG_DIR + 'curr_pow.txt'
# 低圧スマート電力量計 情報保存用リスト
sem_info = {}
def gpio_init():
"""GPIO初期化"""
gpio.setwarnings(False)
gpio.setmode(gpio.BCM)
gpio.setup(Y3RESET_GPIO, gpio.OUT)
gpio.setup(LED_GPIO, gpio.OUT)
gpio.output(Y3RESET_GPIO, gpio.HIGH)
time.sleep(0.1)
gpio.output(LED_GPIO, gpio.LOW)
class LedThread(threading.Thread):
"""LEDを点滅させるスレッド"""
def __init__(self):
super().__init__()
self._trigger = False
self._termFlag = False
def run(self):
while not self._termFlag:
if self._trigger:
self.ledon(True)
time.sleep(0.3)
self.ledon(False)
self._trigger = False
else:
time.sleep(0.1)
@staticmethod
def ledon(ctl):
if ctl:
gpio.output(LED_GPIO, gpio.HIGH)
else:
gpio.output(LED_GPIO, gpio.LOW)
def oneshot(self):
self._trigger = True
def terminate(self):
self._termFlag = True
self.join()
def y3reset():
"""Wi-Sunモジュールのリセット"""
gpio.output(Y3RESET_GPIO, gpio.LOW) # high -> low -> high
time.sleep(0.5)
gpio.output(Y3RESET_GPIO, gpio.HIGH)
time.sleep(2.0)
class Y3ModuleSub(Y3Module):
"""Y3Module()のサブクラス"""
global sem_inf_list
def __init__(self):
super().__init__()
self.EHD = '1081'
self.ECV_INF = '73' # ECHONET ECVコード (INF)
# UART受信スレッドrun()をECHONET Lite電文用に拡張
# UART受信用スレッド
def run(self):
while not self.term_flag:
msg = self.read()
if msg:
msg_list = self.parse_message(msg)
# debug: UDP(PANA)の受信
if msg_list['COMMAND'] == 'ERXUDP' and msg_list['LPORT'] == self.Y3_UDP_PANA_PORT:
sys.stdout.write('[PANA]: {}\n'.format(msg_list['DATA']))
# スマートメーターが自発的に発するプロパティ通知
if msg_list['COMMAND'] == 'ERXUDP' and msg_list['DATA'][0:4] == self.EHD \
and msg_list['DATA'][20:22] == self.ECV_INF:
sem_inf_list.append(msg_list)
elif self.search['search_words']: # サーチ中である
# サーチワードを受信した。
search_words = self.search['search_words'][0]
if isinstance(search_words, list):
for word in search_words:
if msg_list['COMMAND'].startswith(word):
self.search['found_word_list'].append(msg_list)
self.search['search_words'].pop(0)
break
elif msg_list['COMMAND'].startswith(search_words):
self.search['found_word_list'].append(msg_list)
self.search['search_words'].pop(0)
elif self.search['ignore_intermidiate']:
pass # 途中の受信データを破棄
else: # サーチワードではなかった
self.enqueue_message(msg_list)
else: # サーチ中ではない
self.enqueue_message(msg_list)
elif self.search['timeout']: # read()がタイムアウト,write()でタイムアウトが設定されている
if time.time() - self.search['start_time'] > self.search['timeout']:
self.search['found_word_list'] = []
self.search['search_words'] = []
self.search['timeout'] = 0
def sem_get(epc):
"""プロパティ値要求 'Get' """
global tid_counter
frame = sem.GET_FRAME_DICT['get_' + epc]
tid_counter = tid_counter + 1 if tid_counter + 1 != 65536 else 0 # TICカウントアップ
frame = sem.change_tid_frame(tid_counter, frame)
res = y3.udp_send(1, ip6, True, y3.Y3_UDP_ECHONET_PORT, frame)
def sem_get_getres(epc):
"""プロパティ値要求 'Get', 'GetRes'受信
epc: EHONET Liteプロパティ
"""
sem_get(epc) # 'Get'送信
start = time.time()
while True:
if y3.get_queue_size(): # データ受信
msg_list = y3.dequeue_message() # 受信データ取り出し
if msg_list['COMMAND'] == 'ERXUDP':
parsed_data = sem.parse_frame(msg_list['DATA'])
if parsed_data:
if parsed_data['tid'] != tid_counter:
errmsg = '[Error]: ECHONET Lite TID mismatch\n'
sys.stdout.write(errmsg)
return False
else:
return msg_list['DATA']
else:
sys.stdout.write('[Error]: ECHONET Lite frame error.\n')
return False
else:
sys.stdout.write('[Error]: Unknown data received.\n')
return False
else: # データ未受信
if time.time() - start > 20: # タイムアウト 20s
sys.stdout.write('[Error]: Time out.\n')
return False
time.sleep(0.01)
def sem_seti(epc, edt):
"""プロパティ値書き込み要求(応答要) 'SetI'
---------------------------------
(注)未検証 (注)未検証 (注)未検証
---------------------------------
epc: Echonet Liteプロパティ(bytes)
edt: Echonet Liteプロパティ値データ(bytes)
return: True(成功) / False(失敗)"""
global tid_counter
tid_counter = tid_counter + 1 if tid_counter + 1 != 65536 else 0 # TICカウントアップ
ptys = [[epc, edt]]
frame = sem.make_frame(tid_counter, sem.ESV_CODE['setc'], ptys)
res = y3.udp_send(1, ip6, True, y3.Y3_UDP_ECHONET_PORT, frame)
start = time.time()
while True:
if y3.get_queue_size(): # データ受信
msg_list = y3.dequeue_message() # 受信データ取り出し
if msg_list['COMMAND'] == 'ERXUDP':
parsed_data = sem.parse_frame(msg_list['DATA'])
if parsed_data:
if parsed_data['tid'] != tid_counter:
errmsg = '[Error]: ECHONET Lite TID mismatch\n'
sys.stdout.write(errmsg)
return False
else:
return msg_list['DATA']
else:
sys.stdout.write('[Error]: ECHONET Lite frame error.\n')
return False
else:
sys.stdout.write('[Error]: Unknown data received.\n')
return False
else: # データ未受信
if time.time() - start > 20: # タイムアウト 20s
sys.stdout.write('[Error]: Time out.\n')
return False
time.sleep(0.01)
def pow_logfile_init(dt):
"""電力ログファイル初期設定"""
f = open(TMP_LOG_FILE , 'w') # 一時ログ初期化
f.close()
if not (os.path.isdir(LOG_DIR) and os.access(LOG_DIR, os.W_OK)): # ログ用ディレクトリ確認
return False
csv_day_files = [] # 10日分のログファイルリスト(CSV)
pkl_day_files = [] # (pickle)
for i in range(10): # 10日分の電力ログ作成
t = dt - datetime.timedelta(days = i) # 対象日のdatetime
# ログファイル名
dt_str = t.strftime(POW_DAY_LOG_FMT)
csv_filename = LOG_DIR + POW_DAY_LOG_HEAD + dt_str + '.csv'
pkl_filename = TMP_LOG_DIR + POW_DAY_LOG_HEAD + dt_str + '.pickle'
csv_day_files.append(csv_filename)
pkl_day_files.append(pkl_filename)
if not os.path.exists(csv_filename): # 電力ログ(CSV)が無かったら作成する
try:
fcsv = open(csv_filename, 'w')
fcsv.close()
except:
return False
if not os.path.exists(pkl_filename): # 電力ログ(pickle)が無かったら作成する
result = csv2pickle(csv_filename, pkl_filename, t)
if not result:
return False
files = glob.glob(LOG_DIR + POW_DAY_LOG_HEAD + '*.csv') # 電力ログ(CSV)検索
for f in files:
if f in csv_day_files:
continue
else:
os.remove(f) # 古い電力ログ(CSV)を削除
files = glob.glob(TMP_LOG_DIR + POW_DAY_LOG_HEAD + '*.pickle') # 電力ログ(pickle)検索
for f in files:
if f in pkl_day_files:
continue
else:
os.remove(f) # 古い電力ログ(pickle)を削除
# CSVファイルをJSONファイルに変換
pickle2json(sorted(pkl_day_files), POW_DAYS_JSON_FILE)
return True
def pow_logfile_maintainance(last_dt, new_dt):
"""電力ログファイル更新"""
if last_dt.minute != new_dt.minute and new_dt.minute % 10 == 0: # 10分毎
dt_str = last_dt.strftime(POW_DAY_LOG_FMT)
today_csv_file = LOG_DIR + POW_DAY_LOG_HEAD + dt_str + '.csv'
today_pkl_file = TMP_LOG_DIR + POW_DAY_LOG_HEAD + dt_str + '.pickle'
file_cat(today_csv_file, TMP_LOG_FILE)
os.remove(TMP_LOG_FILE) # 一時ログファイルを削除
csv2pickle(today_csv_file, today_pkl_file, last_dt) # pickle更新
if last_dt.day != new_dt.day: # 日付変更
pow_logfile_init(new_dt) # 電力ログ初期化
else:
pkl_day_files = glob.glob(TMP_LOG_DIR + POW_DAY_LOG_HEAD + '*.pickle') # 電力ログ(pickle)検索
pickle2json(sorted(pkl_day_files), POW_DAYS_JSON_FILE) # CSVファイルをJSONファイルに変換
def file_cat(file_a, file_b):
"""ファイルを連結する"""
try:
fp_a = open(file_a, 'ab')
fp_b = open(file_b, 'rb')
fp_a.write(fp_b.read())
fp_a.close()
fp_b.close()
return True
except:
return False
def csv2pickle(csvfile, pklfile, dt):
"""csvファイルをpickleファイルに変換"""
try:
fcsv = open(csvfile, 'r')
fpkl = open(pklfile, 'wb')
data = fcsv.readlines()
except:
return False
if data == []: # 新規作成のよりcsvファイルが空の場合
# 引数dt(datetime型)から、0時0分のタイムスタンプを作成
ts_origin = datetime.datetime.combine(dt, datetime.time(0, 0)).timestamp()
else:
ts = int(data[0].strip().split(',')[0]) # ログからタイムスタンプを取得
dt = datetime.datetime.fromtimestamp(ts) # 0時0分のタイムスタンプを作成
ts_origin = datetime.datetime(dt.year, dt.month, dt.day).timestamp()
data_work = [[None, []] for row in range(60 * 24)] # 作業用空箱
for minute in range(60 * 24):
data_work[minute][0] = ts_origin + 60 * minute # 1分刻みのタイムスタンプを設定
for row in data:
row_list = row.strip().split(',') # [タイムスタンプ(s), 電力]
if row_list[1] != 'None':
minute = int((int(row_list[0]) - ts_origin) / 60) # 00:00からの経過時間[分]
if minute > 0 and minute < 60 * 24:
data_work[minute][1].append(int(row_list[1])) # 電力を追加
data_summary = [[None, None] for row in range(60 * 24)] # 集計用空箱
for minute, data in enumerate(data_work):
data_summary[minute][0] = data[0]
if len(data[1]):
data_summary[minute][1] = round(sum(data[1]) / len(data[1])) # 電力平均値
pickle.dump(data_summary, fpkl)
fcsv.close()
fpkl.close()
return True
def pickle2json(pklfiles, jsonfile):
"""pickleファイルをJSONファイルに変換する"""
data = []
for fpkl in pklfiles:
try:
f = open(fpkl, 'rb')
d = pickle.load(f)
data = data + d
except:
return False
json_data = []
for row in data:
row = [int(row[0])*1000, None if row[1] is None else int(row[1])]
json_data.append(row)
s = json.dumps(json_data)
try:
f = open(jsonfile, 'w')
f.write(s)
f.close()
return True
except:
return False
# コマンドライン引数
def arg_parse():
p = argparse.ArgumentParser()
p.add_argument('-d', '--delay', help='This script starts after a delay of [n] seconds.', default=0, type=int)
args = p.parse_args()
return args
if __name__ == '__main__':
args = arg_parse()
if args.delay: # スクリプトをスタートするまでの待ち時間。sem_appとの連携時にsem_com.pyのスタートを遅らせる。
if isinstance(args.delay, int):
ws = args.delay
sys.stdout.write('Waiting for {} seconds...\n'.format(ws))
time.sleep(ws)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sem_inf_list = [] # スマートメータのプロパティ通知用
tid_counter = 0 # TIDカウンタ
pana_ts = 0.0 # PANA認証時のタイムスタンプを記録
saved_dt = datetime.datetime.now() # 現在日時を保存
sys.stdout.write('Log files setup...\n')
result = pow_logfile_init(saved_dt) # ログファイル初期化
if not result:
sys.stdout.write('[Error]: Log file error\n')
sys.exit(-1)
gpio_init()
led = LedThread()
led.start()
led.oneshot()
y3 = Y3ModuleSub()
y3.uart_open(dev='/dev/ttyAMA0', baud=115200, timeout=1)
y3.start()
sys.stdout.write('Wi-SUN reset...\n')
y3reset()
y3.set_echoback_off()
y3.set_opt(True)
y3.set_password(user_conf.SEM_PASSWORD)
y3.set_routeb_id(user_conf.SEM_ROUTEB_ID)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.connect(SOCK_FILE)
except:
sock = None
channel_list = []
sem_exist = False
for i in range(10):
sys.stdout.write('({}/10) Active scan with a duration of {}...\n'.format(i+1, user_conf.SEM_DURATION))
channel_list = y3.active_scan(user_conf.SEM_DURATION)
if channel_list is False: # active_scan()をCtrl+cで終了したとき
break
if channel_list:
sem_exist = True
break
if not sem_exist: # スキャン失敗
sys.stdout.write('[Error]: Can not connect to a smart energy meter\n')
if sem_exist:
ch = channel_list[0]
sys.stdout.write('Energy Meter: [Ch.0x{:02X}, Addr.{}, LQI.{}, PAN.0x{:04X}]\n'.format(ch['Channel'],
ch['Addr'], ch['LQI'], ch['Pan ID']))
# チャンネル設定
y3.set_channel(ch['Channel'])
sys.stdout.write('Set channel to 0x{:02X}\n'.format(ch['Channel']))
# スマートメータのIP6アドレス
ip6 = y3.get_ip6(ch['Addr'])
sys.stdout.write('IP6 address is \'{}\'\n'.format(ip6))
# PAN ID
y3.set_pan_id(ch['Pan ID'])
sys.stdout.write('Set PAN ID to 0x{:04X}\n'.format(ch['Pan ID']))
# PANA認証(PaC)
sem_exist = False
pana_done = False
for i in range(10):
sys.stdout.write('({}/10) PANA connection...\n'.format(i+1))
sem_exist = y3.start_pac(ip6)
if sem_exist: # インスタンスリスト通知の受信待ち
st = time.time()
while True:
if sem_inf_list:
pana_ts = time.time() # タイムスタンプを保存
sys.stdout.write('Successfully done.\n')
time.sleep(3)
pana_done = True
break
elif time.time() - st > 15: # PANA認証失敗によるタイムアウト
sys.stdout.write('Fail to connect.\n')
sem_exist = False
pana_done = False
break
else:
time.sleep(0.1)
if pana_done:
break
if sem_exist:
sem = EchonetLiteSmartEnergyMeter()
get_list = ['operation_status', 'location', 'version', 'fault_status',
'manufacturer_code', 'production_no',
'current_time', 'current_date', | |
<reponame>SimonasJocys/retina_net
"""
Copyright 2018 <NAME>
This file was derived from eval.py, implementing RSNA metric calculation.
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from .anchors import compute_overlap
from .visualization import draw_detections, draw_annotations
import keras
import numpy as np
import os
import cv2
def _compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
# helper function to calculate IoU
def iou(box1, box2):
x11,y11,x12,y12=box1
x21,y21,x22,y22=box2
w1,h1=x12-x11,y12-y11
w2,h2=x22-x21,y22-y21
#x11, y11, w1, h1 = box1
#x21, y21, w2, h2 = box2
#assert w1 * h1 > 0
#assert w2 * h2 > 0
#x12, y12 = x11 + w1, y11 + h1
#x22, y22 = x21 + w2, y21 + h2
area1, area2 = w1 * h1, w2 * h2
xi1, yi1, xi2, yi2 = max([x11, x21]), max([y11, y21]), min([x12, x22]), min([y12, y22])
if xi2 <= xi1 or yi2 <= yi1:
return 0
else:
intersect = (xi2-xi1) * (yi2-yi1)
union = area1 + area2 - intersect
return intersect / union
def map_iou(boxes_true, boxes_pred, scores, thresholds = [0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75]):
"""
Mean average precision at differnet intersection over union (IoU) threshold
input:
boxes_true: Mx4 numpy array of ground true bounding boxes of one image.
bbox format: (x1, y1, w, h)
boxes_pred: Nx4 numpy array of predicted bounding boxes of one image.
bbox format: (x1, y1, w, h)
scores: length N numpy array of scores associated with predicted bboxes
thresholds: IoU shresholds to evaluate mean average precision on
output:
map: mean average precision of the image
"""
# According to the introduction, images with no ground truth bboxes will not be
# included in the map score unless there is a false positive detection (?)
# return None if both are empty, don't count the image in final evaluation (?)
if len(boxes_true) == 0 and len(boxes_pred) == 0:
return None
assert boxes_true.shape[1] == 4 or boxes_pred.shape[1] == 4, "boxes should be 2D arrays with shape[1]=4"
if len(boxes_pred):
assert len(scores) == len(boxes_pred), "boxes_pred and scores should be same length"
# sort boxes_pred by scores in decreasing order
boxes_pred = boxes_pred[np.argsort(scores)[::-1], :]
map_total = 0
# loop over thresholds
for t in thresholds:
matched_bt = set()
tp, fn = 0, 0
for i, bt in enumerate(boxes_true):
matched = False
for j, bp in enumerate(boxes_pred):
miou = iou(bt, bp)
if miou >= t and not matched and j not in matched_bt:
matched = True
tp += 1 # bt is matched for the first time, count as TP
matched_bt.add(j)
if not matched:
fn += 1 # bt has no match, count as FN
fp = len(boxes_pred) - len(matched_bt) # FP is the bp that not matched to any bt
m = tp / (tp + fn + fp)
map_total += m
return map_total / len(thresholds)
def nmf(boxes, scores, overlapThresh):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
pick = []
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs=np.argsort(scores)
#idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked using the
# integer data type
return pick #boxes[pick].astype("int")
def _get_detections(generator, model, score_threshold=0.05, max_detections=100, save_path=None):
""" Get the detections from the model using the generator.
The result is a list of lists such that the size is:
all_detections[num_images][num_classes] = detections[num_detections, 4 + num_classes]
# Arguments
generator : The generator used to run images through the model.
model : The model to run on the images.
score_threshold : The score confidence threshold to use.
max_detections : The maximum number of detections to use per image.
save_path : The path to save the images with visualized detections to.
# Returns
A list of lists containing the detections for each image in the generator.
"""
all_detections = []
for i in range(generator.size()):
raw_image = generator.load_image(i)
image = generator.preprocess_image(raw_image.copy())
image, scale = generator.resize_image(image)
if keras.backend.image_data_format() == 'channels_first':
image = image.transpose((2, 0, 1))
# run network
boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))[:3]
# correct boxes for image scale
boxes /= scale
# select indices which have a score above the threshold
indices = np.where(scores[0, :] > score_threshold)[0]
# select those scores
scores = scores[0][indices]
# find the order with which to sort the scores
scores_sort = np.argsort(-scores)[:max_detections]
# select detections
image_boxes = boxes[0, indices[scores_sort], :]
image_scores = scores[scores_sort]
all_detections.append([image_boxes, image_scores])
print('{}/{}'.format(i + 1, generator.size()), end='\r')
return all_detections
def _get_annotations(generator):
""" Get the ground truth annotations from the generator.
The result is a list of lists such that the size is:
all_detections[num_images][num_classes] = annotations[num_detections, 5]
# Arguments
generator : The generator used to retrieve ground truth annotations.
# Returns
A list of lists containing the annotations for each image in the generator.
"""
all_annotations=[]
for i in range(generator.size()):
# load the annotations
annotation = generator.load_annotations(i)[:,:4]
all_annotations.append(annotation)
print('{}/{}'.format(i + 1, generator.size()), end='\r')
return all_annotations
def evaluate_rsna(generator, model):
detections = _get_detections(generator, model)
annotations = _get_annotations(generator)
max_youden=0
y_se=y_sp=0
y_rsna_score=0
y_thresh=0
nmf_threshold=0
for score_threshold in np.arange(0.05,0.31,0.01):
total_score=0
denominator=0
tp=tn=fp=fn=0
for i, detection in enumerate(detections):
boxes_true=annotations[i]
boxes_pred=detection[0]
scores=detection[1]
indices = np.where(scores > score_threshold)[0]
scores=scores[indices]
boxes_pred=boxes_pred[indices]
idx=nmf(boxes_pred,scores, nmf_threshold)
scores=scores[idx]
boxes_pred=boxes_pred[idx]
score=map_iou(boxes_true, boxes_pred, scores)
if score is not None:
total_score+=score
denominator+=1
if boxes_true.size:
if boxes_pred.size:
tp+=1
else:
fn+=1
else:
if boxes_pred.size:
fp+=1
else:
tn+=1
se=tp/(tp+fn)
sp=tn/(fp+tn)
youden=se+sp-1
rsna_score=total_score/denominator
if youden>max_youden:
max_youden=youden
y_rsna_score=rsna_score
y_se=se
y_sp=sp
y_thresh=score_threshold
# print(f'{score_threshold:.3f} {total_score/denominator:.5f} {se:.5f} {sp:.5f} {se+sp-1:.5f}')
return (y_thresh, y_rsna_score, y_se, y_sp, max_youden)
def evaluate(
generator,
model,
iou_threshold=0.5,
score_threshold=0.05,
max_detections=100,
save_path=None
):
""" Evaluate a given dataset using a given model.
# Arguments
generator : The generator that represents | |
import os
from nipype.interfaces.afni.base import (AFNICommandBase,
AFNICommandOutputSpec,
isdefined)
from nipype.utils.filemanip import split_filename as split_f
from nipype.interfaces.base import (CommandLine, CommandLineInputSpec,
TraitedSpec, traits, File)
def interative_flirt(anat_file, anat_file_BET, template_brain_file,
template_mask_file, n_iter):
"""
This funcion, from Regis script, aims at interatively building a better
skull stripped version of the subject's. There is a need for an already
computed skullstripped version to initiallized the procedure
(anat_file_BET)
The algo works this way:
1) flirt skullstripped template to skullstripped subject's brain
2) apply transfo on template's mask to have the mask in subject's space
3) mask orig anat with compute mask to obtained a new skullstripped
subject's brain. Use this new skullstripped subject's for the next
iteration.
"""
import os
import nipype.interfaces.fsl as fsl
from nipype.utils.filemanip import split_filename as split_f
path_t, fname_t, ext_t = split_f(template_brain_file)
template_to_anat_file = os.path.abspath(fname_t + "_to_anat.xfm")
path_a, fname_a, ext_a = split_f(anat_file)
anat_file_brain_mask = os.path.abspath(fname_a + "_brain_mask.nii.gz")
anat_file_brain = os.path.abspath(fname_a + "_brain.nii")
flirt_ref_file = anat_file_BET
for i in range(n_iter):
print('Iter flirt {}'.format(i))
# first step = FLIRT: template brain to anat bet
# -> transfo matrix (linear) between the two
flirt = fsl.FLIRT()
flirt.inputs.in_file = template_brain_file
flirt.inputs.reference = flirt_ref_file
flirt.inputs.out_matrix_file = template_to_anat_file
flirt.inputs.cost = "normcorr"
flirt.run()
# second step = apply transfo to template's mask
# -> brain_mask in subject's space
print('Iter apply_flirt {}'.format(i))
apply_flirt = fsl.ApplyXFM()
apply_flirt.inputs.in_file = template_mask_file
apply_flirt.inputs.reference = anat_file_BET
apply_flirt.inputs.in_matrix_file = template_to_anat_file
apply_flirt.inputs.apply_xfm = True
apply_flirt.inputs.interp = "nearestneighbour"
apply_flirt.inputs.out_file = anat_file_brain_mask
apply_flirt.run()
# third step = use the mask in subject's space to mask the build
# a skull-stripped version of the subject's brain
# -> better skullstripped version
print('Iter apply_mask {}'.format(i))
# apply_mask = fsl.ApplyMask() ### a voir si plus pertinent...
apply_mask = fsl.BinaryMaths()
apply_mask.inputs.in_file = anat_file
apply_mask.inputs.operation = 'mul'
apply_mask.inputs.operand_file = anat_file_brain_mask
apply_mask.inputs.out_file = anat_file_brain
apply_mask.inputs.output_type = "NIFTI"
apply_mask.run()
flirt_ref_file = anat_file_brain
return anat_file_brain, template_to_anat_file
# IterREGBET
class IterREGBETInputSpec(CommandLineInputSpec):
# mandatory
inw_file = File(
exists=True,
desc='Moving Whole-head image',
mandatory=True, position=0, argstr="-inw %s")
inb_file = File(
exists=True,
desc='Moving brain image',
mandatory=True, position=1, argstr="-inb %s")
refb_file = File(
exists=True,
desc='Fixed reference brain image',
mandatory=True, position=2, argstr="-refb %s")
# optional
xp = traits.String(
genfile=True,
desc="Prefix for the registration outputs (\"in_FLIRT-to_ref\" if not \
specified)",
position=3, argstr="-xp %s", mandatory=False)
bs = traits.String(
"_IRbrain", usedefault=True,
desc="Suffix for the brain files (\"in_IRbrain\" & \"in_IRbrain_mask\"\
if not specified)",
position=3, argstr="-bs %s", mandatory=False)
dof = traits.Enum(
12, 6, 7, desc='FLIRT degrees of freedom (6=rigid body, 7=scale, \
12=affine (default)). Use dof 6 for intra-subject, 12 for \
inter-subject registration',
argstr='-dof %d',
usedefault=True, mandatory=True)
cost = traits.Enum(
'normmi', 'leastsq', 'labeldiff', 'bbr', 'mutualinfo', 'corratio',
'normcorr',
desc='FLIRT cost {mutualinfo,corratio,normcorr,normmi,leastsq,\
labeldiff,bbr} (default is normmi)',
argstr='-cost %s',
usedefault=True, mandatory=False)
# how to assert minimal value in traits def? is now in _parse_args
n = traits.Int(
2, usedefault=True,
desc='n = the number of FLIRT iterations (>=2, default=2).',
argstr="-n %d", mandatory=False)
m = traits.Enum(
'ref', 'union', 'inter', 'mix',
desc='At each new iteration, either use:\
- the reference brain mask, m=ref (default)\
- the union of the reference and input brain masks, \
m=union (use if your input brain is too small)\
- the intersection of the reference and input brain masks, m=inter\
(use if your input brain is too big)\
- a mix between union & intersection, m=mix (give it a try!)',
argstr='-m %s',
usedefault=True, mandatory=False)
refw_file = File(
exists=True,
desc='Do a whole-head non-linear registration (using FNIRT) during\
last iteration (provide reference whole-head image)',
mandatory=False, argstr="-refw %s")
k = traits.Bool(
False, usedefault=True,
position=3, argstr="-k",
desc="Will keep temporary files",
mandatory=False)
p = traits.String(
desc="Prefix for running FSL functions\
(can be a path or just a prefix)",
position=3, argstr="-p %s")
class IterREGBETOutputSpec(TraitedSpec):
brain_file = File(
exists=True,
desc="brain from IterREGBET.sh")
brain_mask_file = File(
exists=True,
desc="masked brain from IterREGBET.sh")
warp_file = File(
exists=True,
desc="warped image from IterREGBET.sh")
transfo_file = File(
exists=True,
desc="transfo_file")
inv_transfo_file = File(
exists=True,
desc="inv_transfo_file")
class IterREGBET(CommandLine):
"""
Description: Iterative registration of the in-file to the ref file (
registered brain mask of the ref image is used at each iteration). To use
when the input brain mask is not optimal (eg. an output of FSL BET).
Will output a better brain mask of the in-file.
Inputs:
Mandatory:
inw_file
Moving Whole-head image
inb_file
Moving brain image
refb_file
Fixed reference brain image
Optional:
xp
Prefix for the registration outputs ("in_FLIRT-to_ref" if not
specified)
bs
Suffix for the brain files ("in_IRbrain" & "in_IRbrain_mask"
if not specified)
dof
FLIRT degrees of freedom (6=rigid body, 7=scale,
12=affine (default)). Use dof 6 for intra-subject, 12 for
inter-subject registration
cost
FLIRT cost {mutualinfo,corratio,normcorr,normmi,leastsq,\
labeldiff,bbr} (default is normmi)
n
n = the number of FLIRT iterations (>=2, default=2)
m
At each new iteration, either use:
- the reference brain mask, m=ref (default)
- the union of the reference and input brain masks, m=union \
(use if your input brain is too small)
- the intersection of the reference and input brain masks, \
m=inter (use if your input brain is too big)
- a mix between union & intersection, m=mix (give it a try!)
refw_file
Do a whole-head non-linear registration (using FNIRT) during
last iteration (provide reference whole-head image)
k
Will keep temporary files
p
Prefix for running FSL functions (can be a path or just a
prefix)
Outputs:
brain_file
brain from IterREGBET.sh
brain_mask_file
masked brain from IterREGBET.sh
warp_file
warped image from IterREGBET.sh
transfo_file
transfo_file
inv_transfo_file
inv_transfo_file
"""
input_spec = IterREGBETInputSpec
output_spec = IterREGBETOutputSpec
package_directory = os.path.dirname(os.path.abspath(__file__))
_cmd = 'bash {}/../bash/IterREGBET.sh'.format(package_directory)
def _gen_filename(self, name):
if name == "xp":
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
from nipype.utils.filemanip import split_filename as split_f
_, in_brain, _ = split_f(self.inputs.inb_file)
_, ref, _ = split_f(self.inputs.refb_file)
if isdefined(self.inputs.xp):
outname = self.inputs.xp
else:
outname = in_brain + "_FLIRT-to_" + ref
return outname
def _list_outputs(self):
import os
from nipype.utils.filemanip import split_filename as split_f
outputs = self._outputs().get()
path, fname, ext = split_f(self.inputs.inw_file)
outputs["brain_file"] = os.path.abspath(
fname + self.inputs.bs + ".nii.gz")
outputs["brain_mask_file"] = os.path.abspath(
fname + self.inputs.bs + "_mask.nii.gz")
if isdefined(self.inputs.xp):
outfile = self.inputs.xp
else:
outfile = self._gen_outfilename()
outputs["warp_file"] = os.path.abspath(outfile + ".nii.gz")
outputs["transfo_file"] = os.path.abspath(outfile + ".xfm")
outputs["inv_transfo_file"] = os.path.abspath(outfile + "_inverse.xfm")
return outputs
# NMTSubjectAlign
class NMTSubjectAlignInputSpec(CommandLineInputSpec):
T1_file = File(
exists=True,
desc='Target file',
mandatory=True, position=0, argstr="%s")
NMT_SS_file = File(
exists=True,
desc='Align to T1',
mandatory=True, position=1, argstr="%s")
class NMTSubjectAlignOutputSpec(TraitedSpec):
aff_file = File(
exists=True,
desc="shft_aff")
warp_file = File(
exists=True,
desc="shft_WARP.nii.gz")
warpinv_file = File(
exists=True,
desc="shft_WARPINV.nii.gz")
transfo_file = File(
exists=True,
desc="composite_linear_to_NMT.1D")
inv_transfo_file = File(
exists=True,
desc="_composite_linear_to_NMT_inv.1D")
class NMTSubjectAlign(CommandLine):
"""
Description: Align NMT subject to template (NMT 1.2)
Inputs:
Mandatory:
T1_file:
File, 'Target file'
NMT_SS_file:
File, 'Align to T1'
Outputs:
aff_file:
File, "aff"
warp_file:
File, "shft_WARP"
warpinv_file:
File, "shft_WARPINV"
transfo_file:
File, "composite_linear_to_NMT"
inv_transfo_file:
File, "_composite_linear_to_NMT_inv"
"""
input_spec = NMTSubjectAlignInputSpec
output_spec = NMTSubjectAlignOutputSpec
package_directory = os.path.dirname(os.path.abspath(__file__))
_cmd = 'tcsh -x {}/../bash/NMT_subject_align.csh'.format(package_directory)
def _list_outputs(self):
from nipype.utils.filemanip import split_filename as split_f
outputs = self._outputs().get()
path, fname, ext = split_f(self.inputs.T1_file)
outputs["aff_file"] = os.path.abspath(fname + "_shft_aff" + ext)
# TODO will require some checks
# outputs["warpinv_file"] = os.path.abspath(
# fname + "_shft_WARPINV.nii")
outputs["warpinv_file"] = os.path.abspath(
fname + "_shft_WARPINV.nii.gz")
outputs["warp_file"] = os.path.abspath(
fname + "_shft_WARP.nii.gz")
# outputs["warpinv_file"] = os.path.abspath(
# fname + "_shft_WARPINV" + ext)
outputs["transfo_file"] = os.path.abspath(
fname + "_composite_linear_to_NMT.1D")
outputs["inv_transfo_file"] = os.path.abspath(
fname + "_composite_linear_to_NMT_inv.1D")
return outputs
# NMTSubjectAlign2
class NMTSubjectAlign2InputSpec(CommandLineInputSpec):
T1_file = File(
exists=True,
desc='Target file',
mandatory=True, position=0, argstr="-i %s")
NMT_SS_file = File(
exists=True,
desc='Align to T1',
mandatory=True, position=1, argstr="-r %s")
afni_ext = traits.Enum(
"tlrc", "orig", exists=True, desc='Afni extension', mandatory=True,
position=2, usedefault=True, argstr="-e %s")
class NMTSubjectAlign2OutputSpec(TraitedSpec):
aff_file = File(
exists=True,
desc="affine (subject image linearly transformed to the NMT template)")
warp_file = File(
exists=True,
desc="WARP.nii.gz")
warpinv_file = File(
exists=True,
desc="WARPINV.nii.gz")
transfo_file = File(
exists=True,
desc="Combined Linear transform from subject to NMT)")
inv_transfo_file = File(
exists=True,
desc="Inverse Linear Transform from NMT to subject")
class NMTSubjectAlign2(CommandLine):
"""
Description: Align NMT subject to template (NMT 1.3)
Inputs:
Mandatory:
T1_file:
File, 'Target file'
NMT_SS_file:
File, 'Align to T1'
Outputs:
aff_file:
File, "subject image linearly transformed to the NMT template"
warp_file:
File, "shft_WARP"
warpinv_file:
File, "shft_WARPINV"
transfo_file:
File, "Combined Linear transform from subject to NMT"
inv_transfo_file:
File, "Inverse | |
expansion? Numeric integer.", default="OFF")
parser.add_option("--query", action="store", type="string", dest="query", help="Query collections of cells whose enrichment will be searched in target cells", default="OFF")
parser.add_option("--source", action="store", type="string", dest="source", help="File source for inputs...", default="OFF")
parser.add_option("--target", action="store", type="string", dest="target", help="Target collections of cells in which enrichment is searched for", default="OFF")
parser.add_option("--domain", action="store", type="string", dest="domain", help="Domain of co-associations for hybrid-type analyses", default="OFF")
parser.add_option("--A", action = "store", type = "string", dest = "a", help = "Paths to files of interest", default="OFF")
parser.add_option("--B", action = "store", type = "string", dest = "b", help = "Files to be hybridized", default="OFF")
parser.add_option("--indexes", action = "store", type = "string", dest = "indexes", help = "Indexes for matrix construction...", default="OFF")
parser.add_option("--values", action = "store", type = "string", dest = "values", help = "Values for matrix construction...", default="OFF")
parser.add_option("--contexts", action = "store", type = "string", dest = "contexts", help = "What contexts of development should I track?", default="OFF")
parser.add_option("--exclude", action="store", type="string", dest="exclude", help="Are there items that should be excluded?", default="")
parser.add_option("--start", action = "store", type = "int", dest = "start", help = "Start development time for cell search", default=1)
parser.add_option("--stop", action = "store", type = "int", dest = "stop", help = "End development time for cell search", default=250)
parser.add_option("--step", action = "store", type = "int", dest = "step", help = "Step size", default=1)
parser.add_option("--total", action = "store", type = "int", dest = "total", help = "Total simulations (indexes) for 'master' operations ", default=1000)
parser.add_option("--threads", action = "store", type = "int", dest = "threads", help = "Parallel processing threads", default=1)
parser.add_option("--chunks", action = "store", type = "int", dest = "chunks", help = "", default=100)
parser.add_option("--module", action = "store", type = "string", dest = "module", help = "", default="md1")
parser.add_option("--qsub", action = "store", type = "string", dest = "qsub", help = "Qsub configuration header", default="OFF")
parser.add_option("--server", action = "store", type = "string", dest = "server", help = "Are we on the server?", default="OFF")
parser.add_option("--job", action = "store", type = "string", dest = "job", help = "Job name for cluster", default="OFF")
parser.add_option("--copy", action = "store", type = "string", dest = "copy", help = "Copy simulated peaks to analysis folder?", default="OFF")
parser.add_option("--tag", action = "store", type = "string", dest = "tag", help = "Add tag to TFBS?", default="")
(option, args) = parser.parse_args()
# import paths:
if option.server == "OFF":
path_dict = modencode.configBuild(option.path + "/input/" + "configure_path.txt")
elif option.server == "ON":
path_dict = modencode.configBuild(option.path + "/input/" + "configure_server.txt")
# specify input and output paths:
inpath = path_dict["input"]
extraspath = path_dict["extras"]
pythonpath = path_dict["python"]
scriptspath = path_dict["scripts"]
downloadpath = path_dict["download"]
fastqpath = path_dict["fastq"]
bowtiepath = path_dict["bowtie"]
bwapath = path_dict["bwa"]
macspath = path_dict["macs"]
memepath = path_dict["meme"]
idrpath = path_dict["idr"]
igvpath = path_dict["igv"]
testpath = path_dict["test"]
processingpath = path_dict["processing"]
annotationspath = path_dict["annotations"]
peakspath = path_dict["peaks"]
gopath = path_dict["go"]
hotpath = path_dict["hot"]
qsubpath = path_dict["qsub"]
coassociationspath = path_dict["coassociations"]
bindingpath = path_dict["binding"]
neuronspath = path_dict["neurons"]
cellspath = path_dict["cells"]
# standardize paths for analysis:
alignerpath = bwapath
indexpath = alignerpath + "index/"
alignmentpath = alignerpath + "alignment/"
qcfilterpath = alignerpath + "qcfilter/"
qcmergepath = alignerpath + "qcmerge/"
# import configuration dictionaries:
source_dict = modencode.configBuild(inpath + "configure_source.txt")
method_dict = modencode.configBuild(inpath + "configure_method.txt")
context_dict = modencode.configBuild(inpath + "configure_context.txt")
# define organism parameters:
if option.organism == "hs" or option.organism == "h.sapiens":
organismTag = "hs"
#organismIGV = "ce6"
elif option.organism == "mm" or option.organism == "m.musculus":
organismTag = "mm"
#organismIGV = "ce6"
elif option.organism == "ce" or option.organism == "c.elegans":
organismTag = "ce"
#organismIGV = "ce6"
elif option.organism == "dm" or option.organism == "d.melanogaster":
organismTag = "dm"
#organismIGV = "dm5"
# specify genome size file:
if option.nuclear == "ON":
chromosomes = metrn.chromosomes[organismTag]["nuclear"]
genome_size_file = option.path + "/input/" + metrn.reference[organismTag]["nuclear_sizes"]
genome_size_dict = general.build_config(genome_size_file, mode="single", separator="\t", spaceReplace=True)
else:
chromosomes = metrn.chromosomes[organismTag]["complete"]
genome_size_file = option.path + "/input/" + metrn.reference[organismTag]["complete_sizes"]
genome_size_dict = general.build_config(genome_size_file, mode="single", separator="\t", spaceReplace=True)
# load gene ID dictionaries:
id2name_dict, name2id_dict = modencode.idBuild(inpath + metrn.reference[organismTag]["gene_ids"], "Sequence Name (Gene)", "Gene Public Name", mode="label", header=True, idUpper=True, nameUpper=True)
# update peaks path:
peakspath = peakspath + option.peaks + "/"
# define input/output folders:
expressionpath = cellspath + "expression/"
correctionpath = cellspath + "correction/"
lineagepath = cellspath + "lineage/"
bindingpath = cellspath + "peaks/"
overlappath = cellspath + "overlap/"
cellsetpath = cellspath + "cellset/"
genesetpath = cellspath + "geneset/"
reportspath = cellspath + "reports/"
comparepath = cellspath + "compare/"
matrixpath = cellspath + "matrix/"
tissuespath = cellspath + "tissues/"
distancepath = cellspath + "distance/"
hybridpath = cellspath + "hybrid/"
dynamicspath = cellspath + "dynamics/"
cubismpath = cellspath + "cubism/"
timepath = cellspath + "time/"
cellnotationspath = cellspath + "annotations/"
general.pathGenerator(expressionpath)
general.pathGenerator(correctionpath)
general.pathGenerator(lineagepath)
general.pathGenerator(bindingpath)
general.pathGenerator(overlappath)
general.pathGenerator(cellsetpath)
general.pathGenerator(genesetpath)
general.pathGenerator(reportspath)
general.pathGenerator(comparepath)
general.pathGenerator(matrixpath)
general.pathGenerator(tissuespath)
general.pathGenerator(distancepath)
general.pathGenerator(timepath)
general.pathGenerator(hybridpath)
general.pathGenerator(dynamicspath)
general.pathGenerator(cubismpath)
general.pathGenerator(cellnotationspath)
# generate expression flag:
if option.measure == "max.expression":
expression_flag = "maxCel_"
elif option.measure == "avg.expression":
expression_flag = "avgExp_"
# check that the index range is coherent:
if option.stop > option.total:
print
print "Error: Range exceeded! Stop index is larger than total."
print
return
# master mode:
if "master" in option.mode:
# capture master mode:
master, mode = option.mode.split(":")
# prepare for qsub:
bash_path = str(option.path + "/data/cells/runs/").replace("//","/")
bash_base = "_".join([mode, option.peaks, option.name]) + "-M"
qsub_base = "_".join([mode, option.peaks, option.name])
general.pathGenerator(bash_path)
if option.qsub != "OFF":
qsub_header = open(qsubpath + option.qsub).read()
qsub = True
else:
qsub_header = ""
qsub = False
if option.job == "QSUB":
qsub_header = qsub_header.replace("qsubRunner", "qsub-" + qsub_base)
elif option.job != "OFF":
qsub_header = qsub_header.replace("qsubRunner", "qsub-" + option.job)
bash_base = option.job + "-M"
# update server path:
if option.qsub != "OFF":
option.path = serverPath(option.path)
# prepare slave modules:
m, steps, modules, commands, sequences, chunks, start, complete = 1, 0, list(), list(), list(), option.chunks, option.start, False
for index in range(option.start, option.stop+1, option.step):
run = "rn" + general.indexTag(index, option.total)
steps += 1
# cellular peak generation mode:
if mode == "cell.peaks":
command = "python <<CODEPATH>>mapCells.py --path <<PATH>> --organism <<ORGANISM>> --mode <<MODE>> --peaks <<PEAKS>> --start <<START>> --stop <<STOP>> --total <<TOTAL>> --expression <<EXPRESSION>> --collection <<COLLECTION>> --times <<TIMES>> --fraction <<FRACTION>> --minimum <<MINIMUM>> --name <<NAME>> --qsub <<QSUB>> --server <<SERVER>> --module <<MODULE>>"
command = command.replace("<<CODEPATH>>", option.path + "/python/")
command = command.replace("<<PATH>>", option.path)
command = command.replace("<<ORGANISM>>", option.organism)
command = command.replace("<<MODE>>", mode)
command = command.replace("<<PEAKS>>", option.peaks)
command = command.replace("<<START>>", str(index))
command = command.replace("<<STOP>>", str(index))
command = command.replace("<<TOTAL>>", str(option.total))
command = command.replace("<<EXPRESSION>>", option.expression)
command = command.replace("<<COLLECTION>>", option.collection)
command = command.replace("<<TIMES>>", option.times)
command = command.replace("<<FRACTION>>", str(option.fraction))
command = command.replace("<<MINIMUM>>", str(option.minimum))
command = command.replace("<<NAME>>", option.name + general.indexTag(index, option.total))
command = command.replace("<<QSUB>>", option.qsub)
command = command.replace("<<SERVER>>", option.server)
command = command.replace("<<MODULE>>", "md" + str(m))
# cellular peak generation mode:
if mode == "cell.annotation":
command = "python <<CODEPATH>>mapCells.py --path <<PATH>> --organism <<ORGANISM>> --mode <<MODE>> --peaks <<PEAKS>> --start <<START>> --stop <<STOP>> --total <<TOTAL>> --infile <<INFILE>> --collection <<COLLECTION>> --times <<TIMES>> --name <<NAME>> --qsub <<QSUB>> --server <<SERVER>> --module <<MODULE>>"
command = command.replace("<<CODEPATH>>", option.path + "/python/")
command = command.replace("<<PATH>>", option.path)
command = command.replace("<<ORGANISM>>", option.organism)
command = command.replace("<<MODE>>", mode)
command = command.replace("<<PEAKS>>", option.peaks)
command = command.replace("<<START>>", str(index))
command = command.replace("<<STOP>>", str(index))
command = command.replace("<<TOTAL>>", str(option.total))
command = command.replace("<<INFILE>>", option.infile)
command = command.replace("<<COLLECTION>>", option.collection)
command = command.replace("<<TIMES>>", option.times)
command = command.replace("<<NAME>>", option.name + general.indexTag(index, option.total) + option.nametag)
command = command.replace("<<QSUB>>", option.qsub)
command = command.replace("<<SERVER>>", option.server)
command = command.replace("<<MODULE>>", "md" + str(m))
# cellular overlap mode:
if mode == "cell.overlap":
command = "python <<CODEPATH>>mapCells.py --path <<PATH>> --organism <<ORGANISM>> --mode <<MODE>> --peaks <<PEAKS>> --start <<START>> --stop <<STOP>> --total <<TOTAL>> --expression <<EXPRESSION>> --collection <<COLLECTION>> --times <<TIMES>> --fraction <<FRACTION>> --minimum <<MINIMUM>> --extend <<EXTEND>> --name <<NAME>> --qsub <<QSUB>> --server <<SERVER>> --module <<MODULE>>"
command = command.replace("<<CODEPATH>>", option.path + "/python/")
command = command.replace("<<PATH>>", option.path)
command = command.replace("<<ORGANISM>>", option.organism)
command = command.replace("<<MODE>>", mode)
command = command.replace("<<PEAKS>>", option.peaks)
command = command.replace("<<START>>", str(index))
command = command.replace("<<STOP>>", str(index))
command = command.replace("<<TOTAL>>", str(option.total))
command = command.replace("<<EXPRESSION>>", option.expression)
command = command.replace("<<COLLECTION>>", option.collection + general.indexTag(index, option.total) + option.nametag)
command = command.replace("<<TIMES>>", option.times)
command = command.replace("<<NAME>>", option.name)
command = command.replace("<<FRACTION>>", str(option.fraction))
command = command.replace("<<MINIMUM>>", str(option.minimum))
command = command.replace("<<EXTEND>>", str(option.extend))
command = command.replace("<<QSUB>>", option.qsub)
command = command.replace("<<SERVER>>", option.server)
command = command.replace("<<MODULE>>", "md" + str(m))
# coassociations hybrid mode:
if mode == "cell.hybrid":
collection = option.collection + general.indexTag(index, option.total) + option.nametag
command = "python <<CODEPATH>>mapCells.py --path <<PATH>> --organism <<ORGANISM>> --mode <<MODE>> --A <<A>> --B <<B>> --indexes <<INDEXES>> --values <<VALUES>> --contexts <<CONTEXTS>>"
command = command.replace("<<CODEPATH>>", option.path + "/python/")
command = command.replace("<<PATH>>", option.path)
command = command.replace("<<ORGANISM>>", option.organism)
command = command.replace("<<MODE>>", mode)
command = command.replace("<<A>>", option.a)
command = command.replace("<<B>>", collection + "/mapcells_" + collection + "_matrix_overlap")
command = command.replace("<<INDEXES>>", option.indexes)
command = command.replace("<<VALUES>>", option.values)
command = command.replace("<<CONTEXTS>>", option.contexts)
# is it time to export a chunk?
if index-start+option.step == chunks:
# update start, modules, commands, and module count (m):
start = index + option.step
commands.append(command)
modules.append(commands)
commands = list()
complete = True
m += 1
# store whether the most recent index/command has been stored:
else:
complete = False
# update if there are additional commands:
if not complete:
commands.append(command)
modules.append(commands)
m += 1
# launch commands:
print
print "Launching comparisons:", len(modules)
#for module in modules:
# for command in module:
# print command
runCommands(modules, threads=option.threads, mode="module.run", run_mode="verbose", run_path=bash_path, run_base=bash_base, record=True, qsub_header=qsub_header, qsub=qsub)
print "Analyses performed:", len(modules)
print
# filter cells :
elif option.mode == "filter":
# load cells to filter:
filterCells = open(path_dict[option.source] + option.target).read().strip().split("\n")
# generate output file:
f_output = open(path_dict[option.source] + option.name, "w")
# process input lines:
f, k = 0, 0
inlines = open(path_dict[option.source] + option.infile).readlines()
for inline in inlines:
process = True
items = inline.strip().split(",")
for item in items:
if item in filterCells:
process = False
f += 1
if process:
print >>f_output, inline.strip()
k += 1
print
print "Input lines:", len(inlines)
print "Output lines:", k, "(" + str(f) + " filtered)"
print
# close output:
f_output.close()
# simplify cell annotations :
elif option.mode == "simply":
# generate output file:
f_output = open(path_dict[option.source] | |
"""
server.py
Provides classes to create servers and handle their clients.
"""
import socket
from . import logger
from .settings import default_header_size, default_chunk_size, default_port, default_queue
from abc import ABC, abstractmethod
import threading
from typing import Tuple, Any, Callable, List, Union
import pickle
import traceback
from pathlib import Path
TimeoutException = socket.timeout
class Server(ABC):
"""
An Abstract Server object containing the ip and port it is bound to.
Args:
ip (str): The ip address(IPV4) of the server. Defaults to local machine's ip
port(int): The port the server must be bound to. Defaults to tcpsockets.settings.default_port if it is not set
to None else raises Exception.
queue(int): The waiting queue length of the server. Defaults to tcpsockets.settings.default_queue if it is not
set to None else raises Exception.
background(bool): Whether the server runs in background (in separate thread) or not. Defaults to True
Attributes:
ip (str): The ip address(IPV4) of the server.
port(str): The port the server is bound to.
socket(socket.socket): Reference to the socket object.
background(bool): Whether the server runs in separate thread or not.
running(bool): Whether the server is running or not.
server_thread(threading.Thread): The Thread in which the server will run if background is True.
"""
def __init__(self, ip: str = socket.gethostbyname(socket.gethostname()), port: int = None, queue: int = None,
background: bool = True):
self.background: bool = background
self.running: bool = False
self.port: int = port
if self.port is None:
if default_port is None:
raise Exception("Either Server port or Default Port must be set.")
self.port = default_port
self.queue = queue
if self.queue is None:
if default_queue is None:
raise Exception("Either queue parameter or Default Queue must be set.")
self.queue = default_queue
self.ip: str = ip
logger.log("Creating server socket")
self.socket: socket.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logger.log(f"Binding socket to {self.ip} at {self.port}")
self.socket.bind((self.ip, self.port))
if self.background:
self.server_thread = threading.Thread(target=self.starter)
self.closing = False
def handler(self, client: "Client") -> None:
"""
The Function called when the client connects to the server. Must be overridden by inheritance or by calling
the client_handler decorator.
Args:
client(Client): The Client object that has connected to the server.
Returns:
None
"""
raise Exception("No Handler set")
def client_handler(self, func: Callable) -> None:
"""
Decorator to set the client handler.
Args:
func(Callable): The User Defined Client Handler function that must take one positional argument of type
tcpsockets.server.Client.
Returns:
None
"""
# noinspection PyAttributeOutsideInit
self.handler = func
def start(self) -> None:
"""
Start the server.
Returns:
None
"""
if self.running:
return
if self.background:
self.server_thread.start()
else:
self.starter()
@abstractmethod
def starter(self):
pass
class Client:
"""
A Client Class which represents a client a they connect to the server. Provides methods to send and receive any
python object that can be pickled.
Args:
sckt(socket.socket): reference to the client's socket returned by socket.accept()
address(Tuple[str,int]): a tuple containing the ip and the port.
Attributes:
total_client_connections(int): A Class variable which tracks the total number of client by tracking all its
Objects.
client_connection_id(int): An id uniquely identifying one instance of a connection. It is an int set to the
total number of connections made just after the client joins.
socket(socket.socket): reference to the client's socket returned by socket.accept()
ip(str): The ip address(IPV4) of the client returned by socket.accept()
port(int): The port the client is connected to.
"""
total_client_connections: int = 0
def __init__(self, sckt: socket.socket, address: Tuple[str, int]):
Client.total_client_connections += 1
self.client_connection_id: int = Client.total_client_connections
self.socket: socket.socket = sckt
self.ip: str = address[0]
self.port: int = address[1]
def close(self) -> None:
"""
Closes the socket.
Returns:
None
"""
self.socket.close()
def send(self, obj: Any, byte_converter: Callable[[Any], bytes] = None) -> None:
"""
Send a python object that can be pickled to the client. First sends a fixed length header defined in
tcpsockets.settings giving the size of outgoing message then sends the pickled object. default_header_size
can be set by using tcpsockets.settings.set_default_header_size function.
Args:
obj(Any): The Object that has to be sent to the client that can be pickled.
byte_converter(Callable[[Any], bytes]): Function to convert object to bytes.
Returns:
None
"""
if byte_converter is None:
bytes_obj = pickle.dumps(obj)
else:
bytes_obj = byte_converter(obj)
bytes_obj_size = len(bytes_obj)
header = str(bytes_obj_size).ljust(default_header_size).encode("utf-8")
self.socket.send(header)
self.socket.send(bytes_obj)
def send_to(self, obj: Any, client: "Client", byte_converter: Callable[[Any], bytes] = None):
"""
Send a python object that can be pickled to the client. First sends a fixed length header defined in
tcpsockets.settings giving the size of outgoing message then sends the pickled object. default_header_size
can be set by using tcpsockets.settings.set_default_header_size function.
Args:
client(Client): Which client to send it to
obj(Any): The Object that has to be sent to the client that can be pickled.
byte_converter(Callable[[Any], bytes]): Function to convert object to bytes.
Returns:
None
"""
if byte_converter is None:
bytes_obj = pickle.dumps(obj)
else:
bytes_obj = byte_converter(obj)
bytes_obj_size = len(bytes_obj)
header = str(bytes_obj_size).ljust(default_header_size).encode("utf-8")
self.socket.sendto(header, address=(client.ip, client.port))
self.socket.sendto(bytes_obj, address=(client.ip, client.port))
def receive(self, chunk_size: int = None, byte_converter: Callable[[bytes], Any] = None) -> Any:
"""
Receive a python object sent by the client. First receive a fixed length header then receive the pickled object
chunk by chunk using the chunk_size argument.
Args:
chunk_size(int): The amount of bytes to receive at once. Defaults to tcpsockets.settings.default_chunk_size.
byte_converter(Callable[[bytes], Any]): Function to convert bytes to object.
Returns:
Any: The object sent by the server
"""
if chunk_size is None:
chunk_size = default_chunk_size
obj_size_header: str = self.socket.recv(default_header_size).decode("utf-8")
obj_size: int = int(obj_size_header.strip())
bytes_obj = b""
for _ in range(obj_size // chunk_size):
bytes_obj += self.socket.recv(chunk_size)
bytes_obj += self.socket.recv(obj_size % chunk_size)
return pickle.loads(bytes_obj) if byte_converter is None else byte_converter(bytes_obj)
def send_file(self, file_location: Path, chunk_size: int) -> None:
"""
Send a file object in small chunks whose sizes are "chunk_size" each.
Args:
file_location(Path): Path object representing file location of the file to be sent.
chunk_size(int): Size of 1 chunk.
"""
self.send((file_location.name, file_location.stat().st_size))
with open(file_location, "rb") as file:
file_chunk = file.read(chunk_size)
self.socket.send(file_chunk)
while file_chunk:
file_chunk = file.read(chunk_size)
self.socket.send(file_chunk)
def receive_file(self, file_save_location: Path, chunk_size: int) -> None:
"""
Receive a file object in small chunks whose sizes are "chunk_size" each.
Args:
file_location(Path): Path object representing location of the folder where the file is to be saved.
chunk_size(int): Size of 1 chunk.
"""
name, size = self.receive()
with open(file_save_location / name, "wb") as file:
byte_number = 0
while byte_number < size:
byte_chunk = self.socket.recv(chunk_size)
byte_number += len(byte_chunk)
file.write(byte_chunk)
def set_receive_timeout(self, timeout: int) -> None:
"""
Sets the time out for Client.receive(). Raises socket.timeout after timeout.
Args:
timeout(int): Number of seconds for timeout.
"""
self.socket.settimeout(timeout)
def __eq__(self, other: "Client") -> bool:
"""
Checks if the client objects have same client_connection_id
Args:
other(Client): The other client object to be compared with.
Returns:
bool: bool saying whether both client objects have the same client_connection_id.
"""
return self.client_connection_id == other.client_connection_id
class SequentialServer(Server):
"""
A Sequential Server for handling clients one by one.
Args:
ip (str): The ip address(IPV4) of the server. Defaults to local machine's ip
port(int): The port the server must be bound to. Defaults to tcpsockets.settings.default_port if
it is not set to None else raises Exception.
queue(int): The waiting queue length of the server. Defaults to tcpsockets.settings.default_queue
if it is not set to None else raises Exception.
background(bool): Whether the server runs in background (in separate thread) or not. Defaults to True
Attributes:
handling(bool): A bool saying whether the server is handling a client or not.
stopper_thread(threading.Thread): A Thread that is responsible for stopping the Server.
current_client(Union[None, Client]): The Client Object of the client currently being handled.
If not handling then it is set to None.
"""
def __init__(self, ip: str = socket.gethostbyname(socket.gethostname()), port: int = None, queue: int = None,
background: bool = True):
super(SequentialServer, self).__init__(ip, port, queue, background)
self.handling: bool = False
self.stopper_thread: threading.Thread = threading.Thread(target=self.stopper)
self.current_client: Union[None, Client] = None
def stopper(self) -> None:
"""
A stopper method running in its own thread to stop the server if it is stuck in the blocking socket.accept.
Creates a new socket to stop socket.accept.
Returns:
None
"""
while self.running or self.handling:
pass
closer_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
closer_socket.connect((self.ip, self.port))
except ConnectionRefusedError:
pass
closer_socket.close()
def starter(self) | |
Check for connecting lanes. This is pretty much for the roundabouts, but some weird geometries
# make it possible for single junctions to have the same road entering and exiting. Two cases,
# Lanes that exit one junction and enter another (or viceversa)
exit_lane_keys = [get_lane_key(wp) for wp in exit_lane_wps]
entry_lane_keys = [get_lane_key(wp) for wp in entry_lane_wps]
for wp in list(entry_lane_wps):
if get_lane_key(wp) in exit_lane_keys:
entry_lane_wps.remove(wp)
if self.debug:
draw_point(self._world, wp.transform.location, 'small', 'connect', True)
for wp in list(exit_lane_wps):
if get_lane_key(wp) in entry_lane_keys:
exit_lane_wps.remove(wp)
if self.debug:
draw_point(self._world, wp.transform.location, 'small', 'connect', True)
# Lanes with a fake junction in the middle (maps junction exit to fake junction entry and viceversa)
for entry_key, exit_key in self._fake_lane_pair_keys:
entry_wp = None
for wp in entry_lane_wps:
if get_lane_key(wp) == exit_key: # A junction exit is a fake junction entry
entry_wp = wp
break
exit_wp = None
for wp in exit_lane_wps:
if get_lane_key(wp) == entry_key: # A junction entry is a fake junction exit
exit_wp = wp
break
if entry_wp and exit_wp:
entry_lane_wps.remove(entry_wp)
exit_lane_wps.remove(exit_wp)
if self.debug:
draw_point(self._world, entry_wp.transform.location, 'small', 'connect', True)
draw_point(self._world, exit_wp.transform.location, 'small', 'connect', True)
junction_data.entry_wps = entry_lane_wps
junction_data.exit_wps = exit_lane_wps
# Filter the entries and exits that correspond to the route
route_entry_wp = self._route[junction_data.route_entry_index]
# Junction entry
for wp in get_same_dir_lanes(route_entry_wp):
junction_wp = self._get_closest_junction_waypoint(wp, entry_lane_wps)
junction_data.route_entry_keys.append(get_lane_key(junction_wp))
for wp in get_opposite_dir_lanes(route_entry_wp):
junction_wp = self._get_closest_junction_waypoint(wp, exit_lane_wps)
junction_data.opposite_exit_keys.append(get_lane_key(junction_wp))
# Junction exit
if junction_data.route_exit_index: # Can be None if route ends at a junction
route_exit_wp = self._route[junction_data.route_exit_index]
for wp in get_same_dir_lanes(route_exit_wp):
junction_wp = self._get_closest_junction_waypoint(wp, exit_lane_wps)
junction_data.route_exit_keys.append(get_lane_key(junction_wp))
for wp in get_opposite_dir_lanes(route_exit_wp):
junction_wp = self._get_closest_junction_waypoint(wp, entry_lane_wps)
junction_data.opposite_entry_keys.append(get_lane_key(junction_wp))
# Add the entry directions of each lane with respect to the route. Used for scenarios 7 to 9
route_entry_yaw = route_entry_wp.transform.rotation.yaw
for wp in entry_lane_wps:
diff = (wp.transform.rotation.yaw - route_entry_yaw) % 360
if diff > 330.0:
direction = 'ref'
elif diff > 225.0:
direction = 'right'
elif diff > 135.0:
direction = 'opposite'
elif diff > 30.0:
direction = 'left'
else:
direction = 'ref'
junction_data.entry_directions[direction].append(get_lane_key(wp))
# Supposing scenario vehicles go straight, these correspond to the exit lanes of the entry directions
for wp in exit_lane_wps:
diff = (wp.transform.rotation.yaw - route_entry_yaw) % 360
if diff > 330.0:
direction = 'ref'
elif diff > 225.0:
direction = 'right'
elif diff > 135.0:
direction = 'opposite'
elif diff > 30.0:
direction = 'left'
else:
direction = 'ref'
junction_data.exit_directions[direction].append(get_lane_key(wp))
if self.debug:
exit_lane = self._route[junction_data.route_exit_index] if junction_data.route_exit_index else None
print('> R Entry Lane: {}'.format(get_lane_key(self._route[junction_data.route_entry_index])))
print('> R Exit Lane: {}'.format(get_lane_key(exit_lane)))
entry_print = '> J Entry Lanes: '
for entry_wp in entry_lane_wps:
key = get_lane_key(entry_wp)
entry_print += key + ' ' * (6 - len(key))
print(entry_print)
exit_print = '> J Exit Lanes: '
for exit_wp in exit_lane_wps:
key = get_lane_key(exit_wp)
exit_print += key + ' ' * (6 - len(key))
print(exit_print)
route_entry = '> R-J Entry Lanes: '
for entry_key in junction_data.route_entry_keys:
route_entry += entry_key + ' ' * (6 - len(entry_key))
print(route_entry)
route_exit = '> R-J Route Exit Lanes: '
for exit_key in junction_data.route_exit_keys:
route_exit += exit_key + ' ' * (6 - len(exit_key))
print(route_exit)
route_oppo_entry = '> R-J Oppo Entry Lanes: '
for oppo_entry_key in junction_data.opposite_entry_keys:
route_oppo_entry += oppo_entry_key + ' ' * (6 - len(oppo_entry_key))
print(route_oppo_entry)
route_oppo_exit = '> R-J Oppo Exit Lanes: '
for oppo_exit_key in junction_data.opposite_exit_keys:
route_oppo_exit += oppo_exit_key + ' ' * (6 - len(oppo_exit_key))
print(route_oppo_exit)
################################
## Waypoint related functions ##
################################
def _is_junction(self, waypoint):
if not waypoint.is_junction or waypoint.junction_id in self._fake_junction_ids:
return False
return True
################################
## Mode functions ##
################################
def _add_actor_dict_element(self, actor_dict, actor, exit_lane_key='', at_oppo_entry_lane=False):
"""Adds a new actor to the actor dictionary"""
actor_dict[actor] = {
'state': 'junction_entry' if not exit_lane_key else 'junction_exit',
'exit_lane_key': exit_lane_key,
'at_oppo_entry_lane': at_oppo_entry_lane
}
def _switch_to_junction_mode(self, junction):
"""Prepares the junction mode, changing the state of the actors"""
self._ego_state = 'junction'
for actor in list(self._road_actors):
self._add_actor_dict_element(junction.actor_dict, actor)
self._road_actors.remove(actor)
if not self._is_scenario_2_active:
self._tm.vehicle_percentage_speed_difference(actor, 0)
self._road_back_actors.clear()
self._road_extra_front_actors = 0
self._opposite_sources.clear()
def _initialise_junction_scenario(self, direction, remove_entries, remove_exits, remove_middle):
"""
Removes all vehicles in a particular 'direction' as well as all actors inside the junction.
Additionally, activates some flags to ensure the junction is empty at all times
"""
if self._active_junctions:
scenario_junction = self._active_junctions[0]
scenario_junction.scenario_info = {
'direction': direction,
'remove_entries': remove_entries,
'remove_middle': remove_middle,
'remove_exits': remove_exits,
}
entry_direction_keys = scenario_junction.entry_directions[direction]
actor_dict = scenario_junction.actor_dict
if remove_entries:
for entry_source in scenario_junction.entry_sources:
if get_lane_key(entry_source.entry_lane_wp) in entry_direction_keys:
# Source is affected
actors = entry_source.actors
for actor in list(actors):
if actor_dict[actor]['state'] == 'junction_entry':
# Actor is at the entry lane
self._destroy_actor(actor)
if remove_exits:
for exit_dir in scenario_junction.exit_directions[direction]:
for actor in list(scenario_junction.exit_dict[exit_dir]['actors']):
self._destroy_actor(actor)
if remove_middle:
actor_dict = scenario_junction.actor_dict
for actor in list(actor_dict):
if actor_dict[actor]['state'] == 'junction_middle':
self._destroy_actor(actor)
def _handle_junction_scenario_end(self, junction):
"""Ends the junction scenario interaction. This is pretty much useless as the junction
scenario ends at the same time as the active junction, but in the future it might not"""
junction.scenario_info = {
'direction': None,
'remove_entries': False,
'remove_middle': False,
'remove_exits': False,
}
def _monitor_scenario_4_end(self, ego_location):
"""Monitors the ego distance to the junction to know if the scenario 4 has ended"""
if self._ego_exitted_junction:
ref_location = self._start_ego_wp.transform.location
if ego_location.distance(ref_location) > self._crossing_dist:
for actor in self._scenario_4_actors:
self._tm.vehicle_percentage_speed_difference(actor, 0)
self._is_scenario_4_active = False
self._scenario_4_actors.clear()
self._ego_exitted_junction = False
self._crossing_dist = None
def _handle_scenario_4_interaction(self, junction, ego_wp):
"""
Handles the interation between the scenario 4 of the Leaderboard and the background activity.
This removes all vehicles near the bycicle path, and stops the others so that they don't interfere
"""
if not self._is_scenario_4_active:
return
self._ego_exitted_junction = True
self._start_ego_wp = ego_wp
min_crossing_space = 2
# Actor exitting the junction
exit_dict = junction.exit_dict
for exit_key in exit_dict:
if exit_key not in junction.route_exit_keys:
continue
actors = exit_dict[exit_key]['actors']
exit_lane_wp = exit_dict[exit_key]['ref_wp']
exit_lane_location = exit_lane_wp.transform.location
for actor in list(actors):
actor_location = CarlaDataProvider.get_location(actor)
if not actor_location:
self._destroy_actor(actor)
continue
dist_to_scenario = exit_lane_location.distance(actor_location) - self._crossing_dist
actor_length = actor.bounding_box.extent.x
if abs(dist_to_scenario) < actor_length + min_crossing_space:
self._destroy_actor(actor)
continue
if dist_to_scenario > 0:
continue # Don't stop the actors that have already passed the scenario
if get_lane_key(ego_wp) == get_lane_key(exit_lane_wp):
self._destroy_actor(actor)
continue # Actor at the ego lane and between the ego and scenario
self._scenario_4_actors.append(actor)
# Actor entering the junction
for entry_source in junction.entry_sources:
entry_lane_wp = entry_source.entry_lane_wp
if get_lane_key(entry_lane_wp) in junction.opposite_entry_keys:
# Source is affected
actors = entry_source.actors
entry_lane_location = entry_lane_wp.transform.location
for actor in list(actors):
actor_location = CarlaDataProvider.get_location(actor)
if not actor_location:
self._destroy_actor(actor)
continue
crossing_space = abs(entry_lane_location.distance(actor_location) - self._crossing_dist)
actor_length = actor.bounding_box.extent.x
if crossing_space < actor_length + min_crossing_space:
self._destroy_actor(actor)
continue # Actors blocking the path of the crossing obstacle
self._scenario_4_actors.append(actor)
# Actors entering the next junction
if len(self._active_junctions) > 1:
next_junction = self._active_junctions[1]
actors_dict = next_junction.actor_dict
for actor in list(actors_dict):
if actors_dict[actor]['state'] != 'junction_entry':
continue
actor_location = CarlaDataProvider.get_location(actor)
if not actor_location:
self._destroy_actor(actor)
continue
dist_to_scenario = exit_lane_location.distance(actor_location) - self._crossing_dist
actor_length = actor.bounding_box.extent.x
if abs(dist_to_scenario) < actor_length + min_crossing_space:
self._destroy_actor(actor)
continue
if dist_to_scenario > 0:
continue # Don't stop the actors that have already passed the scenario
actor_wp = self._map.get_waypoint(actor_location)
if get_lane_key(ego_wp) == get_lane_key(actor_wp):
self._destroy_actor(actor)
continue # Actor at the ego lane and between the ego and scenario
self._scenario_4_actors.append(actor)
# Immediately freeze the actors
for actor in self._scenario_4_actors:
try:
actor.set_target_velocity(carla.Vector3D(0, 0, 0))
self._tm.vehicle_percentage_speed_difference(actor, 100)
except RuntimeError:
pass # Just in case the actor is not alive
def _end_junction_behavior(self, ego_wp, junction):
"""
Destroys unneeded actors (those behind the ego), moves the rest to other data structures
and cleans up the variables. If no other junctions are active, starts road mode
"""
actor_dict = junction.actor_dict
route_exit_keys = junction.route_exit_keys
self._active_junctions.pop(0)
for actor in list(actor_dict):
location = CarlaDataProvider.get_location(actor)
if not location or self._is_location_behind_ego(location):
self._destroy_actor(actor)
continue
self._tm.vehicle_percentage_speed_difference(actor, 0)
if actor_dict[actor]['at_oppo_entry_lane']:
self._opposite_actors.append(actor)
self._tm.ignore_lights_percentage(actor, 100)
self._tm.ignore_signs_percentage(actor, 100)
continue
if not self._active_junctions and actor_dict[actor]['exit_lane_key'] in route_exit_keys:
self._road_actors.append(actor)
continue
self._destroy_actor(actor)
self._handle_scenario_4_interaction(junction, ego_wp)
self._handle_junction_scenario_end(junction)
self._switch_junction_road_sources(junction)
if not self._active_junctions:
self._ego_state = 'road'
self._initialise_opposite_sources()
self._initialise_road_checker()
self._road_ego_key = self._get_ego_route_lane_key(ego_wp)
for source in junction.exit_sources:
self._road_back_actors[source.mapped_key] = []
def _switch_junction_road_sources(self, junction):
"""
Removes the sources part of the previous road and gets the ones of the exitted junction.
"""
self._road_sources.clear()
new_sources = junction.exit_sources
self._road_sources.extend(new_sources)
def _search_for_next_junction(self):
"""Check if | |
is not exist in the current DB-Structure".format(dbname), exc_info=self._logger_traceback)
return Status(status=None, track_id=self._error_track_id.incr(), func_name=function_name(-2))
### Update Attribute
if "info" in self.tables(dbname=dbname):
try:
#cursor = self._db.cursor()
self._threads_cursors[thread_name].execute(query)
self._commit(write_all_cash=False)
self._update_temp_attributsList_in_instance(thread_name=thread_name)
return Status(status=True)
except Exception as exception:
print_exc_plus() if self._ext_tb else ""
self.logger.error("Something happens while detaching of '{}'-DB: '{}'".format(attached_db_name, repr(exception) ), exc_info=self._logger_traceback)
return Status(status=False, track_id=self._error_track_id.incr(), func_name=function_name(-2))
else:
self.logger.error("Info-Table is wasn't found or not exist. Please initialize the Info Table, bevor you may add any attributes.", exc_info=self._logger_traceback)
return Status(status=False, track_id=self._error_track_id.incr(), func_name=function_name(-2))
def update_attrs(self, inp_dict, dbname="main", thread_name="Thread0"):
### Exception Handling
s = self._check_db_should_exist()
if not s["status"]:
return s
self._commit_if_inserts_was_did()
# Check if attributes and values have the same length
# Check if attributes and values have the same length
if not isinstance(inp_dict, dict):
self.logger.error("UpdateAttributes: InputDict is not an 'dict'.", exc_info=self._logger_traceback)
return Status(status=False, track_id=self._error_track_id.incr(), func_name=function_name(-2))
inp_dict = copy.deepcopy(inp_dict)
# if given attribute exist in the Info_Table
col_in_info_table = self.col("info", dbname=dbname)
if not all(elem in col_in_info_table for elem in inp_dict.keys()):
average = list(set(inp_dict.keys())-set(col_in_info_table))
self.logger.error("Some of the given Attributes ('{}') is not exist in this DataBase. ".format(average ), exc_info=self._logger_traceback)
return Status(status=None, track_id=self._error_track_id.incr(), func_name=function_name(-2))
attrib_to_str = ",".join(["{}='{}'".format(k,v) for k,v in inp_dict.iteritems()])
if dbname in self.dbnames:
query = 'UPDATE {}.info \nSET {};'.format(dbname,attrib_to_str)
else:
self.logger.error("Given dbName ('{}') is not exist in the current DB-Structure".format(dbname), exc_info=self._logger_traceback)
return Status(status=None, track_id=self._error_track_id.incr(), func_name=function_name(-2))
### Update Attribute
if "info" in self.tables(dbname=dbname):
try:
#cursor = self._db.cursor()
self._threads_cursors[thread_name].execute(query)
self._commit(write_all_cash=False)
self._update_temp_attributsList_in_instance(thread_name=thread_name)
return Status(status=True)
except Exception as exception:
print_exc_plus() if self._ext_tb else ""
self.logger.error("Something happens while detaching of '{}'-DB: '{}'".format(attached_db_name, repr(exception) ), exc_info=self._logger_traceback)
return Status(status=False, track_id=self._error_track_id.incr(), func_name=function_name(-2))
else:
self.logger.error("Info-Table is wasn't found or not exist. Please initialize the Info Table, bevor you may add any attributes.", exc_info=self._logger_traceback)
return Status(status=False, track_id=self._error_track_id.incr(), func_name=function_name(-2))
def get_attr(self,attributName, dbname="main"):
s = self._check_db_should_exist()
if not s["status"]:
return s
if not isinstance(attributName, (str, unicode)):
self.logger.error("Given AttributName should be an string or unicode object.", exc_info=self._logger_traceback)
#return Status(status=None, track_id=self._error_track_id.incr(), func_name=function_name(-2))
return None
if not self._attributs_dict:
self.logger.warning("Temporary AttributesList is empty")
#return Status(status=None, track_id=self._error_track_id.incr(), func_name=function_name(-2))
return None
# if given attribute exist in the Info_Table
#p((attributName, self._attributs_dict,dbname))
try:
if attributName not in self._attributs_dict[dbname]:
self.logger.error("Given Attribute ('{}') is not exist in the '{}'-DB.".format(attributName,dbname), exc_info=self._logger_traceback)
#return Status(status=None, track_id=self._error_track_id.incr(), func_name=function_name(-2))
return None
except KeyError:
self.logger.error("'{}'-DB is not found.".format(dbname))
return None
if dbname in self.dbnames:
try:
return self._attributs_dict.get(dbname, None).get(attributName, None)
except:
return None
#[dbname][attributName]
else:
self.logger.error("Given dbName ('{}') is not exist in the current DB-Structure".format(dbname), exc_info=self._logger_traceback)
#return Status(status=None, track_id=self._error_track_id.incr(), func_name=function_name(-2))
return None
def get_all_attr(self, dbname="main"):
s = self._check_db_should_exist()
if not s["status"]:
return s
if u"info" in self.tables(dbname=dbname):
if dbname in self.dbnames:
return self._attributs_dict[dbname]
else:
self.logger.error("Given dbName ('{}') is not exist in the current DB-Structure".format(dbname), exc_info=self._logger_traceback)
return Status(status=None, track_id=self._error_track_id.incr(), func_name=function_name(-2))
else:
self.logger.error("Info-Table wasn't found or not exist. Please initialize the Info Table, bevor you may add any attributes.", exc_info=self._logger_traceback)
return Status(status=None, track_id=self._error_track_id.incr(), func_name=function_name(-2))
##########################DB-Execute Commands#####################
def execute(self, query, values=False, dbname="main", thread_name="Thread0"):
s = self._check_db_should_exist()
if not s["status"]:
return s
self._commit_if_inserts_was_did()
try:
#c =
s = self._execute(query, values=values, dbname=dbname, thread_name=thread_name, new_cursor=True)
#p(s, c="r")
if not s["status"]:
return False
else:
cur = s["out_obj"]
#p(cur, "cur")
self._update_temp_tablesList_in_instance(thread_name=thread_name)
self._update_temp_indexesList_in_instance(thread_name=thread_name)
self._update_database_pragma_list(thread_name=thread_name)
self._update_pragma_table_info(thread_name=thread_name)
return cur
except Exception as exception:
print_exc_plus() if self._ext_tb else ""
self.logger.error("Something happens while execution of the following query: '{}'. See following Exception: '{}'. ".format(query,str(exception)), exc_info=self._logger_traceback)
return Status(status=False, track_id=self._error_track_id.incr(), func_name=function_name(-2))
def executescript(self, query, thread_name="Thread0", dbname="main"):
s = self._check_db_should_exist()
if not s["status"]:
return s
self._commit_if_inserts_was_did()
try:
#cur = self._db.cursor()
self._threads_cursors[thread_name].executescript(query)
self._update_temp_tablesList_in_instance(thread_name=thread_name)
self._update_temp_indexesList_in_instance(thread_name=thread_name)
self._update_database_pragma_list(thread_name=thread_name)
self._update_pragma_table_info(thread_name=thread_name)
return cur
except Exception as exception:
print_exc_plus() if self._ext_tb else ""
self.logger.error("Something happens while execution of the following query: '{}'. See following Exception: '{}'. ".format(query,str(exception)), exc_info=self._logger_traceback)
return Status(status=False, track_id=self._error_track_id.incr(), func_name=function_name(-2))
def executemany(self, query, argument, thread_name="Thread0", dbname="main"):
s = self._check_db_should_exist()
if not s["status"]:
return s
self._commit_if_inserts_was_did()
try:
s = self._executemany(query, values=argument, dbname=dbname, thread_name=thread_name, new_cursor=True)
if not s["status"]:
return s
else:
cur = s["out_obj"]
self._update_temp_tablesList_in_instance(thread_name=thread_name)
self._update_temp_indexesList_in_instance(thread_name=thread_name)
self._update_database_pragma_list(thread_name=thread_name)
self._update_pragma_table_info(thread_name=thread_name)
return cur
except Exception as exception:
print_exc_plus() if self._ext_tb else ""
self.logger.error("Something happens while execution of the following query: '{}'. See following Exception: '{}'. ".format(query,str(exception)), exc_info=self._logger_traceback)
return Status(status=False, track_id=self._error_track_id.incr(), func_name=function_name(-2))
def _executemany(self, query, values=False, dbname="main", thread_name="Thread0", new_cursor = False):
return self._execution(query, values=values, dbname=dbname, many=True, thread_name=thread_name, new_cursor=new_cursor)
def _execute(self, query, values=False, dbname="main", thread_name="Thread0", new_cursor = False):
return self._execution(query, values=values, dbname=dbname, many=False,thread_name=thread_name, new_cursor=new_cursor)
def _execution(self,query, values=False, dbname="main", many=False, thread_name="Thread0", new_cursor = False):
#p("EXECUTION")
try:
try:
cursor = self._threads_cursors[thread_name]
except:
self._threads_cursors[thread_name] = self._db.cursor()
cursor = self._threads_cursors[thread_name]
cursor = self._db.cursor() if new_cursor else cursor
#p(type(cursor))
#p(query,"query")
if many:
if values:
cursor.executemany(query, values)
else:
cursor.executemany(query)
else:
if values:
cursor.execute(query, values)
else:
cursor.execute(query)
if self._thread_safe:
cursor.join()
#time.sleep(5)
return Status(status=True, out_obj=cursor)
except (sqlite.OperationalError, sqlite.IntegrityError) as exception:
track_id = self._error_track_id.incr()
l_query = query if self._log_content else "!!LogContentDisable!!"
l_values = values if self._log_content else "!!LogContentDisable!!"
if "UNIQUE constraint failed:" in str(exception):
msg = "UniquenessERROR: Redundant row was get and was out-sorted. |ErrorTrackID:'{}'| See Exception: '{}'. InpQuery: '{}'. InpValues: '{}'. ".format( track_id, repr(exception), l_query, l_values)
#self.logger.outsorted_corpus()
if self.typ(dbname=dbname) == "corpus":
level_name = "outsorted_corpus"
self.logger.outsorted_corpus(msg)
elif self.typ(dbname=dbname) == "stats":
level_name = "outsorted_stats"
self.logger.outsorted_stats(msg)
else:
level_name = "error_insertion"
self.logger.error_insertion(msg)
self.error_insertion_counter.incr()
return Status(status=False, track_id=track_id,
desc="Redundant row was get.",
level=level_name, action="outsorted",
inp_obj= (query, values,dbname),
error_name="{} (UniquenessERROR)".format(exception.__class__.__name__), exception=exception)
elif "SQL logic error" in str(exception):
msg = "SQL logic error (ThreadsCrash): Probably it is the result of ThreadsCrash. Please use option 'thread_safe' to ensure ThreadSafety and run script again. (Attention: Executed insertions could be inconsistent!) |ErrorTrackID:'{}'| See Exception: '{}'. InpQuery: '{}'. InpValues: '{}'. ".format( track_id, repr(exception), l_query, l_values)
#self.logger.outsorted_corpus()
level_name = "error_insertion"
self.logger.error_insertion(msg)
self.error_insertion_counter.incr()
#if self._raise_exceptions:
# raise ThreadsCrash,
# "SQL logic error: Probably it is the result of ThreadsCrash. Please use option 'thread_safe' to ensure ThreadSafety and run script again. |ErrorTrackID:'{}'| (Attention: Executed insertions could be inconsistent!) ".format(track_id)
return Status(status=False, track_id=track_id,
desc="SQL logic error. ",
level=level_name, action="ThreadsCrash",
inp_obj= (query, values,dbname),
error_name="{} (ErrorBindingParameter)".format(exception.__class__.__name__), exception=exception)
else:
msg = "ExecutionError: '{}'. (current Execution was ignored) |ErrorTrackID:'{}'|".format( repr(exception),track_id )
if "has no column named" in str(exception):
msg = "ExecutionError: '{}'. (current Execution was ignored) Possible Solution: 'Insert failed columns into SQL-DataBase or use Reader-Template to format and outsorte not needed columns. ' |ErrorTrackID:'{}'|".format( repr(exception),track_id )
self.logger.error_insertion(msg, exc_info=self._logger_traceback)
self.error_insertion_counter.incr()
return Status(status=False, track_id=track_id,
desc="It is not possible to insert all got columns into CorpDB. Possible Explanation: 1. Current DB was initialized with wrong and not full number of columns, please reinitialize current CorpusDB with right column names and types. Or use also option precomputed corp types. For this use option 'template_name' on the Corpus Level ( while Initialization) (ex:template_name='twitter',or template_name='blogger' ) or also on the Reader Level 'reader_formatter_name='twitter'.",
level="error_insertion", action="stop_execution",
inp_obj= (query, values,dbname), func_name=function_name(-3),
error_name=exception.__class__.__name__, exception=repr(exception))
except sqlite.InterfaceError as exception:
#p((query, values))
track_id = self._error_track_id.incr()
l_query = query if self._log_content else "!!LogContentDisable!!"
l_values = values if self._log_content else "!!LogContentDisable!!"
#self.error_insertion_counter.incr()
if "Error binding parameter " in str(exception):
msg = "Error binding parameter(ThreadsCrash): Probably it is the result of ThreadsCrash. Please use option 'thread_safe' to ensure ThreadSafety and run script again. (Attention: Executed insertions could be inconsistent!) |ErrorTrackID:'{}'| See Exception: '{}'. InpQuery: '{}'. InpValues: '{}'. ".format( track_id, repr(exception), l_query, l_values)
#self.logger.outsorted_corpus()
level_name = "error_insertion"
self.logger.error_insertion(msg)
#if self._raise_exceptions:
# raise ThreadsCrash, "Error binding parameter: Probably it is the result of ThreadsCrash. Please use option 'thread_safe' to ensure ThreadSafety and run script again. |ErrorTrackID:'{}'| (Attention: Executed insertions could be inconsistent!) ".format(track_id)
self.error_insertion_counter.incr()
return Status(status=False, track_id=track_id,
desc="Error binding parameter",
level=level_name, action="ThreadsCrash",
inp_obj= (query, values,dbname),
error_name="{} (ErrorBindingParameter)".format(exception.__class__.__name__), exception=exception)
else:
self.logger.error_insertion("ExecutionError: '{}'. (current Execution was ignored) |ErrorTrackID:'{}'|".format( repr(exception),track_id ), exc_info=self._logger_traceback)
self.error_insertion_counter.incr()
return Status(status=False, track_id=track_id,
desc=repr(exception),
level="error_insertion", action="ignored",
inp_obj= (query, values,dbname), func_name=function_name(-3),
error_name=exception.__class__.__name__, exception=exception)
except Exception as exception:
track_id = self._error_track_id.incr()
print_exc_plus() if self._ext_tb else ""
self.logger.low_debug("ExecutionError: |ErrorTrackID:'{}'| Following Query could have an Error: '{}'. Track the error in the 'error_insertion'-Level. ".format(track_id, query, ))
l_query = query if self._log_content else "!!LogContentDisable!!"
l_values = values if self._log_content else "!!LogContentDisable!!"
self.error_insertion_counter.incr()
if "has no column named" in str(exception):
self.logger.error_insertion("ExecutionError: One of the columns is not in the Table. See Exception: '{}'. (current Execution was ignored) |ErrorTrackID:'{}'| | |
# -*- coding: utf-8 -*-
"""
Created 5 March 2019
epsc_peak_x.y.z.py
"""
# from __main__ import *
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import elephant
from neo.io import IgorIO
import os
from collections import OrderedDict
import math
def get_metadata(file, data_notes):
'''Takes a filename and parses it for metadata, and returns metadata in an
orderedDict as a pandas DataFrame for saving later
Also takes information from the cell spreadsheet in data_notes'''
# pull out cell id, cell number, date and condition
file_split = file.split('_')
cell_id = file_split[0]+'_'+file_split[1]
cell_num = cell_id[-1:]
date = '20'+cell_id[2:4]+'-'+cell_id[4:6]+'-'+cell_id[6:8]
if 'drug' in file:
condition = 'TTX+4-AP'
else:
condition = 'control'
# grab metadata from data notes spreadsheet
file_data = data_notes[data_notes['Cell name'] == file]
cell_path = file_data['File Path'].tolist()[0]
genotype = file_data['Genotype'].tolist()[0]
cell_type = file_data['Cell type'].tolist()[0]
depol_sweep_start = file_data['Depol sweeps start'].tolist()[0]
depol_sweep_stop = file_data['Depol sweeps stop'].tolist()[0]
# save metadate into orderedDict pandas DataFrame
dict = OrderedDict()
dict['Date'] = date
dict['Cell ID'] = cell_id
dict['Cell Number'] = cell_num
dict['Cell Path'] = cell_path
# dict['Condition'] = condition
dict['Genotype'] = genotype
dict['Cell Type'] = cell_type
dict['Exclude Sweep Start'] = depol_sweep_start
dict['Exclude Sweep Stop'] = depol_sweep_stop
metadata = pd.DataFrame(dict, index=range(1))
return metadata
def igor_to_pandas(file, data_dir):
'''This function opens an igor binary file (.ibw), extracts the time
series data, and returns a pandas DataFrame'''
file_path = os.path.join(data_dir, file)
data_raw = IgorIO(filename=file_path)
data_neo = data_raw.read_block()
data_neo_array = data_neo.segments[0].analogsignals[0]
data_df = pd.DataFrame(data_neo_array.as_array())
return data_df
def mean_baseline(data, stim_time, pre_stim=100, sf=10):
'''
Find the mean baseline in a given time series
Parameters
----------
data: pandas.Series or pandas.DataFrame
The time series data for which you want a baseline.
stim_time: int or float
The time in ms when stimulus is triggered.
pre_stim: int or float
Time in ms before the stimulus trigger over which baseline is measured.
sf: int or float
The sampling frequency in kHz.
Returns
-------
baseline: float or pandas.Series
The mean baseline over the defined window
'''
start = (stim_time - pre_stim) * sf
stop = (stim_time - 1) * sf
window = data.iloc[start:stop]
baseline = window.mean()
return baseline
def epsc_peak(data, baseline, stim_time, polarity='-', post_stim=100, sf=10):
'''
Find the peak EPSC value for a pandas.Series or for each sweep (column) of
a pandas.DataFrame. This finds the absolute peak value of mean baseline
subtracted data.
Parameters:
-----------
data: pandas.Series or pandas.DataFrame
Time series data with stimulated synaptic response triggered at the
same time for each sweep.
baseline: scalar or pandas.Series
Mean baseline values used to subtract for each sweep.
stim_time: int or float
Time in ms at which stimulus is triggered each sweep.
polarity: str
The expected polarity of the EPSC; negative: '-'; postitive: '+'.
Default is '-'.
post_stim: int or float
Time in ms that marks the end of the sampling window post stimulus.
Default is 100 ms.
sf: int or float
The sampling frequency in kHz. Default is 10 kHz.
Returns
-------
epsc_peaks: pandas.Series
The absolute peak of mean baseline subtracted time series data.
'''
subtracted_data = data - baseline
start = stim_time * sf
end = (stim_time + post_stim) * sf
peak_window = subtracted_data.iloc[start:end]
if polarity == '-':
epsc_peaks = peak_window.min()
elif polarity == '+':
epsc_peaks =peak_window.max()
else:
raise ValueError(
"polarity must either be + or -"
)
return epsc_peaks
def series_resistance(data, tp_start, vm_jump, sf=10):
'''
Calculate the approximate series resistance (Rs) from a test pulse (tp).
Parameters
----------
data: pandas.Series or pandas.DataFrame
Raw time series daata of the v-clamp recording in nA.
tp_start: int or float
Time in ms when test pulse begins.
vm_jump: int or float
Amplitude ofwhatever windows needs here the test pulse voltage command in mV..
sf: int or float
Sampling frequency in kHz. Default is 10 kHz.
Returns:
rs: pandas.Series of float
The series resistance for each sweep in MOhms.
'''
# find the baseline 10 ms pre test pulse and subtract from raw data
rs_baseline = mean_baseline(data, stim_time=tp_start, pre_stim=11)
rs_subtracted = data - rs_baseline
# set up indices for starting and ending peak window
start = tp_start * sf
end = (tp_start + 2) * sf
rs_window = rs_subtracted.iloc[start:end]
if vm_jump > 0:
rs_peak = rs_window.max()
else:
rs_peak = rs_window.min()
# calculate Rs via V=IR -> Rs = V/I
rs = ((vm_jump * 10**-3) / (rs_peak * 10**-9)) * 10**-6
return rs
''' *********************************************************************** '''
''' ################## Define file structure on server #################### '''
# home_dir will depend on the OS, but the rest will not
# query machine identity and set home_dir from there
machine = os.uname()[0]
if machine == 'Darwin':
home_dir = '/Volumes/Urban'
elif machine == 'Linux':
home_dir = '/run/user/1000/gvfs/smb-share:server=172.16.17.32,share=urban'
else:
home_dir = os.path.join('Z:', os.sep)
project_dir = os.path.join(home_dir, 'Huang', 'OSN_OMPvGg8_MTC')
figure_dir = os.path.join(project_dir, 'figures')
table_dir = os.path.join(project_dir, 'tables')
data_dir = os.path.join(project_dir, 'data')
''' ## Open the notes spreadsheet and parse for what we want to analyze ## '''
# open metadata file
data_notes = pd.read_csv(os.path.join(table_dir, 'OSN_Gg8vOMP.csv'))
# pull out cell_id for directory, file name, and make the full path
file_name_list = data_notes['Cell name'].tolist()
cell_id_list = []
for file in file_name_list:
file_split = file.split('_')
cell_id = file_split[0]+'_'+file_split[1]
cell_id_list.append(cell_id)
file_path_list = []
for cell, file in zip(cell_id_list, file_name_list):
file_path = os.path.join(cell, file + '.ibw')
file_path_list.append(file_path)
data_notes = pd.concat([pd.DataFrame({'File Path': file_path_list}), data_notes], axis=1)
# drop cells that didn't save to igor
noigor_list = np.array(data_notes[data_notes['Igor saved?'] == 'No'].index)
data_notes = data_notes.drop(index=noigor_list)
# drop cells that don't have any # of drug sweeps
nodrug_list = np.array(data_notes[data_notes['# of drug sweeps'].isnull() == True].index)
data_notes = data_notes.drop(index=nodrug_list)
# update file name list to have only files you want to analyze after logic
file_name_list = data_notes['Cell name'].tolist()
''' ##########################################################################
This is all the analysis, figures, saving
Read in file metadata, open file from igor, convert to pandas
##############################################################################
'''
# loop through all the files in file_name_list for plots and saving
for file_name in file_name_list:
# set file name from list
file = file_name
# gather metadata and set some key parameters for use later on in loop
metadata = get_metadata(file, data_notes)
file_path = metadata['Cell Path'][0]
cell_id = metadata['Cell ID'][0]
genotype = metadata['Genotype'][0]
exclude_start = metadata['Exclude Sweep Start'][0]
exclude_stop = metadata['Exclude Sweep Stop'][0]
# open igor file and convert to pandas
data = igor_to_pandas(file_path, data_dir)
# process logic and build exclude sweeps list from metadata, and exclude sweeps
if math.isnan(exclude_start) is False:
# need to pull out the end of the excluded sweeps
# if all sweeps after start are excluded
if math.isnan(exclude_stop) is True:
data = data.iloc[:, :int(exclude_start)]
# else only exclude traces in between start and stop
else:
begin = data.iloc[:, :int(exclude_start)]
end = data.iloc[:, int(exclude_stop):]
data = pd.concat([begin, end], axis=1)
else:
pass
'''
Pull out EPSC peak from unfiltered signals
Baseline 100 ms preceding blue light
Peak within 250 ms of blue light
'''
baseline = mean_baseline(data, 500)
peaks = epsc_peak(data, baseline, 500)
'''
Pull out EPSC peaks from filtered signals
Baseline 100 ms preceding blue light
Peak within 250 ms of blue light
'''
# filter signal with butterworth filter at 500 Hz for data
filt_data = elephant.signal_processing.butter(data.T,
lowpass_freq=500.0,
fs=10000.0)
filt_data = pd.DataFrame(filt_data).T
filt_baseline = mean_baseline(filt_data, 500)
filt_peaks = epsc_peak(filt_data, filt_baseline, 500)
''' Calculating Series Resistance (rs) from test pulse (tp) '''
rs = series_resistance(data, 50, -5)
''' Plot EPSC peaks and Rs over time of experiemnt '''
# set up index markers for data | drug line and drug stimuli
# pull out number of sweeps for both conditions and all
n_control_sweeps = len(peaks)
# set up auto y max for peak plots (min since negative)
y_min = peaks.min()
y_min_lim = y_min * 1.15 * 1000
# set up logic for Rs y scaling: if < 20 MOhms, don't scale, if > scale
if rs.max() <= 20:
rs_y_min = 0
rs_y_max = 20
else:
rs_y_min = rs.min() * 0.5
rs_y_max = rs.max() * 1.2
# make a figure with 2 plots
fig, axs = plt.subplots(2, 2, figsize=(6, 6), constrained_layout=True)
fig.suptitle('Summary for ' + genotype + ' ' + cell_id)
# optional for plotting unfiltered on same graph for comparison
axs[0, 0].plot(peaks*1000, marker='.', color='darkgray', linestyle='', label='raw')
# plot the filterd peak currents NOTE: convert peak values to pA
axs[0, 0].plot(filt_peaks*1000, color='k', marker='.', linestyle='', label='filtered')
axs[0, 0].set_xlabel('Stimulus Number')
axs[0, 0].set_ylabel('EPSC Peak (pA)')
| |
#!python
# Produces the set of factorial implementations (defined by models argument) from input RT-level model
# for given configuration of implementation flow (config.flow in the input configuration argument)
# Author: <NAME>, Universitat Politecnica de Valencia
import sys
import xml.etree.ElementTree as ET
import re
import os
import subprocess
import string
import copy
import glob
import shutil
import datetime
import time
from sys import platform
from multiprocessing import Process, Manager
from multiprocessing.managers import BaseManager
from subprocess import call
from Datamanager import *
sys.path.insert(0, os.path.join(os.getcwd(), './SupportScripts'))
import VendorSpecific
TIMEOUT_IMPL_PHASE = 2500
MAXCONSTRVAL = 220.0
def implement_model(config, model, adjust_constraints, stat, ForceReimplement = False):
os.chdir(config.design_genconf.design_dir)
if (not ForceReimplement) and (update_metrics(model) != None): return
if not os.path.exists(config.design_genconf.tool_log_dir):
os.makedirs(config.design_genconf.tool_log_dir)
log = open(os.path.join(config.design_genconf.tool_log_dir, model.Label+".log"), 'w')
log.write("\nImplementing: " + model.Label + ', started: ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
if stat == None: stat = ProcStatus('Config')
if not 'EvalTime' in model.Metrics: model.Metrics['EvalTime'] = dict()
if not isinstance(model.Metrics['EvalTime'], dict): model.Metrics['EvalTime'] = dict()
for k,v in model.Metrics['EvalTime'].iteritems():
stat.update(str(k), '{} sec'.format(v), 'ok')
if model.ModelPath == '': model.ModelPath = os.path.join(config.design_genconf.design_dir, model.Label)
#If this configuration has not been implemented previously - implement it, retrieve the results, update statistics for monitoring interface
if ForceReimplement:
if os.path.exists(model.ModelPath):
try:
# #backup_script = 'zip -r {0}_BACKUP {1} > {2}/ziplog_{3}.log'.format(model.Label, cleanup_path(get_relative_path(os.getcwd(), model.ModelPath)), config.design_genconf.tool_log_dir, model.Label)
# #proc = subprocess.Popen(backup_script, shell=True)
# #print('Running backup of previous results: {0}'.format(backup_script))
# #proc.wait()
shutil.rmtree(model.ModelPath)
except e:
print(str(e))
if not os.path.exists(model.ModelPath):
shutil.copytree(os.path.join(config.design_genconf.design_dir, config.design_genconf.template_dir), model.ModelPath)
os.chdir(model.ModelPath)
print("Started Process [" + model.Label + "], workdir: " + os.getcwd())
if not os.path.exists(config.design_genconf.log_dir):
os.makedirs(config.design_genconf.log_dir)
constraint_template = ''
all_constraints = [p.constraint_to_adjust for p in config.flow.get_phase_chain() if p.constraint_to_adjust!=None]
for c in all_constraints:
c.current_value = c.start_value
if os.path.exists(config.design_genconf.constraint_file):
with open(config.design_genconf.constraint_file, 'r') as f:
constraint_template = f.read()
implentability_checked, impl_test = False, False
phase = config.flow.entry_phase
while phase != None:
stat.update('Progress', phase.name, 'wait')
stat.update(phase.name, 'In progress', 'wait')
#update constraint file if any contraint defined
if len(phase.constraints_to_export) > 0:
constraint_content = constraint_template
for ce in all_constraints:
constraint_content = constraint_content.replace(ce.placeholder, str(ce.current_value))
stat.update('Iteration', ce.iteration, 'ok')
ce.iteration += 1
if ce.iteration > 14:
model.Metrics['Implprop']['Error'] = 'ImplError'
model.Metrics['Error']='ImplError'
phase = None
log.write('\n{0}\tImplementation failed: too many iterations (hang detected)'.format(str(datetime.datetime.now().replace(microsecond=0))))
stat.update('Progress', 'Error', 'err')
model.serialize(SerializationFormats.XML, model.std_dumpfile_name)
return
if os.path.exists(config.design_genconf.constraint_file):
with open(config.design_genconf.constraint_file, 'w') as f:
f.write(constraint_content)
completed = False
attempt = 0
while not completed:
attempt+=1
script = getattr(VendorSpecific, phase.script_builder)(phase, config, model)
timestart = datetime.datetime.now().replace(microsecond=0)
start_t = time.time()
log.write('\n{0}\tStarting: {1}, attempt: {2}, script: {{{3}}}'.format(str(timestart), phase.name, attempt, script))
log.flush()
proc = subprocess.Popen(script, shell=True)
time.sleep(1)
while (proc.poll() == None) and (time.time() - start_t < TIMEOUT_IMPL_PHASE):
time.sleep(1)
if proc.poll() == None:
log.write('\n{0}\tTimeout: {1}, attempt: {2}, script: {{{3}}}'.format(str(datetime.datetime.now().replace(microsecond=0)), phase.name, attempt, script))
log.flush()
proc.kill()
success = False
else:
success = getattr(VendorSpecific, phase.postcondition_handler)(phase, config, model)
timetaken = datetime.datetime.now().replace(microsecond=0) - timestart
if success:
if not phase.name in model.Metrics['EvalTime']:
model.Metrics['EvalTime'][phase.name] = int(time_to_seconds(timetaken))
else:
model.Metrics['EvalTime'][phase.name] += int(time_to_seconds(timetaken))
stat.update(phase.name, '{} sec'.format(model.Metrics['EvalTime'][phase.name]), 'ok')
completed = True
else:
if attempt > config.retry_attempts:
#report an error and stop
stat.update(phase.name, 'Error', 'err')
model.Metrics['Implprop']['Error'] = 'ImplError'
model.Metrics['Error']='ImplError'
model.serialize(SerializationFormats.XML, model.std_dumpfile_name)
log.write("\nPostcondition/Timeout error at {0}: , exiting".format(phase.name) )
log.close()
phase = None
return
res = getattr(VendorSpecific, phase.result_handler)(phase, config, model)
if not 'Implprop' in model.Metrics: model.Metrics['Implprop'] = dict()
if not isinstance(model.Metrics['Implprop'], dict): model.Metrics['Implprop'] = dict()
for k, v in res.iteritems():
model.Metrics['Implprop'][k] = v
stat.update(k, str(v), 'res')
if adjust_constraints and phase.constraint_to_adjust != None:
satisfied = getattr(VendorSpecific, phase.constraint_to_adjust.check_handler)(phase, config, model)
if satisfied:
implentability_checked = True
if impl_test:
impl_test == False
phase.constraint_to_adjust.current_value = saved_constraint
log.write('\n{0}\tImplementation test passed'.format(str(timestart)))
if phase.constraint_to_adjust.converged:
phase = phase.next
else:
#strengthen the constraint until not satisfied
if phase.constraint_to_adjust.goal == AdjustGoal.min:
phase.constraint_to_adjust.current_value -= phase.constraint_to_adjust.adjust_step
elif phase.constraint_to_adjust.goal == AdjustGoal.max:
phase.constraint_to_adjust.current_value += phase.constraint_to_adjust.adjust_step
if phase.constraint_to_adjust.current_value > MAXCONSTRVAL:
model.Metrics['Implprop']['Error'] = 'ImplError'
model.Metrics['Error']='ImplError'
phase = None
break
log.write('\n{0}\tConstraint adjusted: {1} = {2}'.format(str(timestart), phase.constraint_to_adjust.placeholder, phase.constraint_to_adjust.current_value))
if phase.constraint_to_adjust.current_value <= 0:
#mask config as non implementable and exit
model.Metrics['Implprop']['Error'] = 'ImplError'
model.Metrics['Error']='ImplError'
completed = True
phase = None
break
phase = phase.constraint_to_adjust.return_to_phase
else:
if (not implentability_checked) and phase.constraint_to_adjust.goal == AdjustGoal.max:
if impl_test:
model.Metrics['Implprop']['Error'] = 'ImplError'
model.Metrics['Error']='ImplError'
completed = True
phase = None
log.write('\n{0}\tImplementation test failed'.format(str(timestart)))
break
else:
saved_constraint = phase.constraint_to_adjust.current_value - phase.constraint_to_adjust.adjust_step
phase.constraint_to_adjust.current_value = 2.0
impl_test = True
phase = phase.constraint_to_adjust.return_to_phase
log.write('\n{0}\tImplementation test started'.format(str(timestart)))
else:
#once not satisfied - converged
phase.constraint_to_adjust.converged = True
#relax the constraint until satisfied
if phase.constraint_to_adjust.goal == AdjustGoal.min:
phase.constraint_to_adjust.current_value += phase.constraint_to_adjust.adjust_step
if phase.constraint_to_adjust.current_value > MAXCONSTRVAL:
model.Metrics['Implprop']['Error'] = 'ImplError'
model.Metrics['Error']='ImplError'
phase = None
break
elif phase.constraint_to_adjust.goal == AdjustGoal.max:
phase.constraint_to_adjust.current_value -= phase.constraint_to_adjust.adjust_step
if phase.constraint_to_adjust.current_value <= 0:
#mask config as non implementable and exit
model.Metrics['Implprop']['Error'] = 'ImplError'
model.Metrics['Error']='ImplError'
completed = True
phase = None
break
log.write('\n{0}\tConstraint adjusted: {1} = {2}'.format(str(timestart), phase.constraint_to_adjust.placeholder, phase.constraint_to_adjust.current_value))
phase = phase.constraint_to_adjust.return_to_phase
else:
phase = phase.next
model.serialize(SerializationFormats.XML, model.std_dumpfile_name)
stat.update('Progress', 'Completed', 'ok')
log.close()
def power_simulation(dut, davosconf):
os.chdir(davosconf.ExperimentalDesignConfig.design_genconf.design_dir)
dut.ModelPath = os.path.join(davosconf.ExperimentalDesignConfig.design_genconf.design_dir, dut.Label)
if not os.path.exists(dut.ModelPath):
print('power_simulation: path {0} not found, skipping {1}'.format(dut.ModelPath, dut.Label))
return()
if os.path.exists( os.path.join(dut.ModelPath,'pwr.log') ):
print('Activity file exists for config {0}, skipping simulation step'.format(dut.Label))
else:
print('Simulating switching activity for config {0}'.format(dut.Label))
workloadcyles = 14500
clockperiod = 1000.0/dut.Metrics['Implprop']['FREQUENCY']
with open(os.path.join(davosconf.ExperimentalDesignConfig.design_genconf.design_dir, davosconf.ExperimentalDesignConfig.design_genconf.template_dir, 'TbTop.v'), 'r') as src, open(os.path.join(dut.ModelPath, 'TbTop.v'), 'w') as dst:
content = src.read()
for k,v in {'#CLOCKPERIOD' : '{0:.3f}'.format(clockperiod)}.iteritems(): content=content.replace(k,v)
dst.write(content)
with open(os.path.join(davosconf.ExperimentalDesignConfig.design_genconf.design_dir, davosconf.ExperimentalDesignConfig.design_genconf.template_dir, 'simrun.do'), 'r') as src, open(os.path.join(dut.ModelPath, 'simrun.do'), 'w') as dst:
content = src.read()
for k,v in {'#PATH':dut.ModelPath.replace("\\","/"), '#WORKLOADTIME':'{0:d}ns'.format(int(workloadcyles*clockperiod))}.iteritems(): content=content.replace(k,v)
dst.write(content)
with open(os.path.join(davosconf.ExperimentalDesignConfig.design_genconf.design_dir, davosconf.ExperimentalDesignConfig.design_genconf.template_dir, 'pwrsim.do'), 'r') as src, open(os.path.join(dut.ModelPath, 'pwrsim.do'), 'w') as dst:
content = src.read()
for k,v in {'#PATH':dut.ModelPath.replace("\\","/"), '#WORKLOADTIME':'{0:d}ns'.format(int(workloadcyles*clockperiod))}.iteritems(): content=content.replace(k,v)
dst.write(content)
os.chdir(dut.ModelPath)
start_t = time.time()
script='vivado -mode batch -source {0} > {1} 2> {2}'.format(os.path.join(dut.ModelPath, 'pwrsim.do'), davosconf.ExperimentalDesignConfig.design_genconf.log_dir + '/powersim.log', davosconf.ExperimentalDesignConfig.design_genconf.log_dir + '/powersim.err')
proc = subprocess.Popen(script, shell=True)
time.sleep(1)
while (proc.poll() == None) and (time.time() - start_t < 2000):
time.sleep(1)
if proc.poll() == None:
proc.kill()
print('power_simulation : TIMEOUT DUT: {0}'.format(dut.Label))
dut.Metrics['Implprop']['POWER_PL'] = 0.0
return()
#check and fix swithing activity file
with open(os.path.join(dut.ModelPath,'pwrsim.saif'),'r') as f:
content = f.read()
#content = content.replace('(INSTANCE TbTop', ' (INSTANCE design_1_wrapper\n\t\t(INSTANCE design_1_i') + ')'
content = content.replace('(INSTANCE TbTop', ' (INSTANCE ZynqEnv_wrapper\n\t\t(INSTANCE ZynqEnv_i') + ')'
with open(os.path.join(dut.ModelPath,'pwrsim.saif'),'w') as f:
f.write(content)
script = """open_project AVR_ZC.xpr
open_run [get_runs {1}]
read_saif {{{0}/pwrsim.saif}} -strip_path ZynqEnv_wrapper -out_file rds.log
report_power -file {0}/pwr.log
""".format(dut.ModelPath, 'ImplementationPhase')
proc = subprocess.Popen('vivado -mode tcl'.format(), stdin=subprocess.PIPE, stdout=subprocess.PIPE , shell=True)
out, err = proc.communicate(script.replace('\\','/').encode())
with open(os.path.join(dut.ModelPath, 'pwr.log'), 'r') as f:
content = f.read()
dut.Metrics['Implprop']['POWER_DYNAMIC'] = float(re.findall("Dynamic \(W\).*?([0-9\.]+)", content)[0])
dut.Metrics['Implprop']['POWER_PL'] = float(re.findall(davosconf.ExperimentalDesignConfig.design_genconf.uut_root+".*?([0-9\.]+)", content)[0])
shutil.rmtree(os.path.join(dut.ModelPath, 'AVR_ZC.sim', 'sim_1'), ignore_errors=True, onerror=None)
print('Power simulated for {0}: {1:.4f} W'.format(dut.Label, dut.Metrics['Implprop']['POWER_PL']))
#returns dictionary dict[config_label] = (process_id=None, model_descriptor, statistic_descriptor)
#read from file statfile (xml)
def recover_statistics(model_list, statfile, recover_state = True):
procdict = dict()
recover_stat_tree = None
if recover_state and os.path.exists(statfile):
recover_stat_tree = ET.parse(statfile).getroot()
for i in model_list:
stat = ProcStatus('Config')
if(recover_stat_tree is not None):
stat.from_xml(recover_stat_tree, 'Config', 'Label', i.Label)
procdict[i.Label] = (None, i, stat)
stat.update('Label', i.Label,'')
return(procdict)
def proclist_stat(proclist):
active_proc = 0
finished_proc = 0
for p in proclist:
if p[0] != None:
if(p[0].is_alive()):
active_proc += 1
else:
finished_proc += 1
return(active_proc, finished_proc)
def allocate_gui_local(config):
os.chdir(config.design_genconf.design_dir)
copy_all_files(os.path.join(config.call_dir,'UserInterface/IMPL'), config.design_genconf.design_dir)
copy_all_files(os.path.join(config.call_dir,'UserInterface/libs'), os.path.join(config.design_genconf.design_dir, 'libs'))
with open(os.path.join(config.design_genconf.design_dir, 'Monitoring.js'), 'r') as f:
content = f.read()
content = content.replace('#FULLSTAT', '\'{0}\''.format(config.statfile)).replace('#MINSTAT', '\'{0}\''.format(config.statfile.replace('.xml','_min.xml')))
theaders = ['Label']
tlogdesc = ['./Logs/@.log']
for c in VendorSpecific.GetResultLabels():
theaders.append(c)
tlogdesc.append('./Logs/@.log')
theaders.append('Progress')
tlogdesc.append('./Logs/@.log')
theaders.append('Iteration')
tlogdesc.append('./Logs/@.log')
for p in config.flow.get_phase_chain():
theaders.append(p.name)
tlogdesc.append('./@/{0}/{1}'.format(config.design_genconf.log_dir, p.logfile))
content = content.replace('#THEADERS', '[{0}]'.format(', '.join(['\'{0}\''.format(c) for c in theaders])))
content = content.replace('#LOGDESC', '[{0}]'.format(', '.join(['\'{0}\''.format(c) for c in tlogdesc])))
content = content.replace('#GLOBALHEADERS', "['Phase', 'Progress', 'Time_Taken', 'Report']")
with open(os.path.join(config.design_genconf.design_dir, 'Monitoring.js'), 'w') as f:
f.write(content)
#Launch monitoring interface
try:
if platform == 'linux' or platform == 'linux2':
subprocess.check_output('xdg-open ' + os.path.join(config.call_dir, config.design_genconf.design_dir, 'Monitoring.html > ./dummylog.txt'), shell=True)
elif platform == 'cygwin':
subprocess.check_output('cygstart ' + os.path.join(config.call_dir, config.design_genconf.design_dir, 'Monitoring.html > ./dummylog.txt'), shell=True)
elif platform == 'win32' or platform == 'win64':
subprocess.check_output('start ' + os.path.join(config.call_dir, config.design_genconf.design_dir, 'Monitoring.html > ./dummylog.txt'), shell=True)
except subprocess.CalledProcessError as e:
print e.output
def update_metrics(m):
if os.path.isfile(os.path.join(m.ModelPath, HDLModelDescriptor.std_dumpfile_name)):
tag = ET.parse(os.path.join(m.ModelPath, HDLModelDescriptor.std_dumpfile_name)).getroot()
res = HDLModelDescriptor.deserialize(SerializationFormats.XML, tag).Metrics
for k, v in res.iteritems():
m.Metrics[k] = v
return(m.Metrics)
else:
return(None)
def export_results(models, dir):
with open(os.path.join(dir, 'IMPLEMENTATION_RESULTS.xml'), 'w') as f:
f.write('<?xml version="1.0"?>\n<data>\n{0}\n</data>'.format('\n\n'.join([m.serialize(SerializationFormats.XML) for m in models])))
with open(os.path.join(dir, 'IMPLEMENTATION_SUMMARY.xml'), 'w') as f:
f.write('<?xml version="1.0"?>\n<data>\n{0}\n</data>'.format('\n\n'.join([m.log_xml() for m in models])))
def build_summary_page(models, fname):
spage = HtmlPage('Summary')
spage.css_file = 'markupstyle.css'
T = Table('Summary')
T.add_column('Label')
factors = []
for f in models[0].Factors:
factors.append(f.FactorName)
T.add_column(f.FactorName)
for i in range(len(models)):
T.add_row()
T.put(i,0, str(models[i].Label))
for f in range(len(factors)):
| |
insert_xforwarded_for: Optional[pulumi.Input[str]] = None,
lws_separator: Optional[pulumi.Input[str]] = None,
lws_width: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
oneconnect_transformations: Optional[pulumi.Input[str]] = None,
proxy_type: Optional[pulumi.Input[str]] = None,
redirect_rewrite: Optional[pulumi.Input[str]] = None,
request_chunking: Optional[pulumi.Input[str]] = None,
response_chunking: Optional[pulumi.Input[str]] = None,
response_headers_permitteds: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
server_agent_name: Optional[pulumi.Input[str]] = None,
tm_partition: Optional[pulumi.Input[str]] = None,
via_host_name: Optional[pulumi.Input[str]] = None,
via_request: Optional[pulumi.Input[str]] = None,
via_response: Optional[pulumi.Input[str]] = None,
xff_alternative_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'ProfileHttp':
"""
Get an existing ProfileHttp resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] accept_xff: Enables or disables trusting the client IP address, and statistics from the client IP address, based on the request's XFF (X-forwarded-for) headers, if they exist.
:param pulumi.Input[str] app_service: The application service to which the object belongs.
:param pulumi.Input[str] basic_auth_realm: Specifies a quoted string for the basic authentication realm. The system sends this string to a client whenever authorization fails. The default value is `none`
:param pulumi.Input[str] defaults_from: Specifies the profile that you want to use as the parent profile. Your new profile inherits all settings and values from the parent profile specified.
:param pulumi.Input[str] description: Specifies user-defined description.
:param pulumi.Input[str] encrypt_cookie_secret: Type a passphrase for cookie encryption.
:param pulumi.Input[Sequence[pulumi.Input[str]]] encrypt_cookies: Type the cookie names for the system to encrypt.
:param pulumi.Input[str] fallback_host: Specifies an HTTP fallback host. HTTP redirection allows you to redirect HTTP traffic to another protocol identifier, host name, port number
:param pulumi.Input[Sequence[pulumi.Input[str]]] fallback_status_codes: Specifies one or more three-digit status codes that can be returned by an HTTP server,that should trigger a redirection to the fallback host.
:param pulumi.Input[str] head_erase: Specifies the header string that you want to erase from an HTTP request. Default is `none`.
:param pulumi.Input[str] head_insert: Specifies a quoted header string that you want to insert into an HTTP request.Default is `none`.
:param pulumi.Input[str] insert_xforwarded_for: Specifies, when enabled, that the system inserts an X-Forwarded-For header in an HTTP request with the client IP address, to use with connection pooling. The default is `Disabled`.
:param pulumi.Input[str] lws_separator: Specifies the linear white space (LWS) separator that the system inserts when a header exceeds the maximum width you
specify in the LWS Maximum Columns setting.
:param pulumi.Input[int] lws_width: Specifies the linear white space (LWS) separator that the system inserts when a header exceeds the maximum width you specify in the LWS Maximum Columns setting.
:param pulumi.Input[str] name: Specifies the name of the http profile,name of Profile should be full path. Full path is the combination of the `partition + profile name`,For example `/Common/test-http-profile`.
:param pulumi.Input[str] oneconnect_transformations: Enables the system to perform HTTP header transformations for the purpose of keeping server-side connections open. This feature requires configuration of a OneConnect profile
:param pulumi.Input[str] proxy_type: Specifies the proxy mode for this profile: reverse, explicit, or transparent. The default is `reverse`.
:param pulumi.Input[str] redirect_rewrite: Specifies whether the system rewrites the URIs that are part of HTTP redirect (3XX) responses. The default is `none`.
:param pulumi.Input[str] request_chunking: Specifies how the system handles HTTP content that is chunked by a client. The default is `preserve`.
:param pulumi.Input[str] response_chunking: Specifies how the system handles HTTP content that is chunked by a server. The default is `selective`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] response_headers_permitteds: Specifies headers that the BIG-IP system allows in an HTTP response.If you are specifying more than one header, separate the headers with a blank space.
:param pulumi.Input[str] server_agent_name: Specifies the value of the Server header in responses that the BIG-IP itself generates. The default is BigIP. If no
string is specified, then no Server header will be added to such responses
:param pulumi.Input[str] tm_partition: Displays the administrative partition within which this profile resides.
:param pulumi.Input[str] via_host_name: Specifies the hostname to include into Via header
:param pulumi.Input[str] via_request: Specifies whether to append, remove, or preserve a Via header in an HTTP request
:param pulumi.Input[str] via_response: Specifies whether to append, remove, or preserve a Via header in an HTTP request
:param pulumi.Input[Sequence[pulumi.Input[str]]] xff_alternative_names: Specifies alternative XFF headers instead of the default X-forwarded-for header.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ProfileHttpState.__new__(_ProfileHttpState)
__props__.__dict__["accept_xff"] = accept_xff
__props__.__dict__["app_service"] = app_service
__props__.__dict__["basic_auth_realm"] = basic_auth_realm
__props__.__dict__["defaults_from"] = defaults_from
__props__.__dict__["description"] = description
__props__.__dict__["encrypt_cookie_secret"] = encrypt_cookie_secret
__props__.__dict__["encrypt_cookies"] = encrypt_cookies
__props__.__dict__["fallback_host"] = fallback_host
__props__.__dict__["fallback_status_codes"] = fallback_status_codes
__props__.__dict__["head_erase"] = head_erase
__props__.__dict__["head_insert"] = head_insert
__props__.__dict__["insert_xforwarded_for"] = insert_xforwarded_for
__props__.__dict__["lws_separator"] = lws_separator
__props__.__dict__["lws_width"] = lws_width
__props__.__dict__["name"] = name
__props__.__dict__["oneconnect_transformations"] = oneconnect_transformations
__props__.__dict__["proxy_type"] = proxy_type
__props__.__dict__["redirect_rewrite"] = redirect_rewrite
__props__.__dict__["request_chunking"] = request_chunking
__props__.__dict__["response_chunking"] = response_chunking
__props__.__dict__["response_headers_permitteds"] = response_headers_permitteds
__props__.__dict__["server_agent_name"] = server_agent_name
__props__.__dict__["tm_partition"] = tm_partition
__props__.__dict__["via_host_name"] = via_host_name
__props__.__dict__["via_request"] = via_request
__props__.__dict__["via_response"] = via_response
__props__.__dict__["xff_alternative_names"] = xff_alternative_names
return ProfileHttp(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="acceptXff")
def accept_xff(self) -> pulumi.Output[str]:
"""
Enables or disables trusting the client IP address, and statistics from the client IP address, based on the request's XFF (X-forwarded-for) headers, if they exist.
"""
return pulumi.get(self, "accept_xff")
@property
@pulumi.getter(name="appService")
def app_service(self) -> pulumi.Output[Optional[str]]:
"""
The application service to which the object belongs.
"""
return pulumi.get(self, "app_service")
@property
@pulumi.getter(name="basicAuthRealm")
def basic_auth_realm(self) -> pulumi.Output[str]:
"""
Specifies a quoted string for the basic authentication realm. The system sends this string to a client whenever authorization fails. The default value is `none`
"""
return pulumi.get(self, "basic_auth_realm")
@property
@pulumi.getter(name="defaultsFrom")
def defaults_from(self) -> pulumi.Output[str]:
"""
Specifies the profile that you want to use as the parent profile. Your new profile inherits all settings and values from the parent profile specified.
"""
return pulumi.get(self, "defaults_from")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
Specifies user-defined description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="encryptCookieSecret")
def encrypt_cookie_secret(self) -> pulumi.Output[Optional[str]]:
"""
Type a passphrase for cookie encryption.
"""
return pulumi.get(self, "encrypt_cookie_secret")
@property
@pulumi.getter(name="encryptCookies")
def encrypt_cookies(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Type the cookie names for the system to encrypt.
"""
return pulumi.get(self, "encrypt_cookies")
@property
@pulumi.getter(name="fallbackHost")
def fallback_host(self) -> pulumi.Output[str]:
"""
Specifies an HTTP fallback host. HTTP redirection allows you to redirect HTTP traffic to another protocol identifier, host name, port number
"""
return pulumi.get(self, "fallback_host")
@property
@pulumi.getter(name="fallbackStatusCodes")
def fallback_status_codes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Specifies one or more three-digit status codes that can be returned by an HTTP server,that should trigger a redirection to the fallback host.
"""
return pulumi.get(self, "fallback_status_codes")
@property
@pulumi.getter(name="headErase")
def head_erase(self) -> pulumi.Output[str]:
"""
Specifies the header string that you want to erase from an HTTP request. Default is `none`.
"""
return pulumi.get(self, "head_erase")
@property
@pulumi.getter(name="headInsert")
def head_insert(self) -> pulumi.Output[str]:
"""
Specifies a quoted header string that you want to insert into an HTTP request.Default is `none`.
"""
return pulumi.get(self, "head_insert")
@property
@pulumi.getter(name="insertXforwardedFor")
def insert_xforwarded_for(self) -> pulumi.Output[str]:
"""
Specifies, when enabled, that the system inserts an X-Forwarded-For header in an HTTP request with the client IP address, to use with connection pooling. The default is `Disabled`.
"""
return pulumi.get(self, "insert_xforwarded_for")
@property
@pulumi.getter(name="lwsSeparator")
def lws_separator(self) -> pulumi.Output[str]:
"""
Specifies the linear white space (LWS) separator that the system inserts when a header exceeds the maximum width you
specify in the LWS Maximum Columns setting.
"""
return pulumi.get(self, "lws_separator")
@property
@pulumi.getter(name="lwsWidth")
def lws_width(self) -> pulumi.Output[int]:
"""
Specifies the linear white space (LWS) separator that the system inserts when a header exceeds the maximum width you specify in the LWS Maximum Columns setting.
"""
return pulumi.get(self, "lws_width")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the http profile,name of Profile should be full path. Full path is the combination of the `partition + profile name`,For example `/Common/test-http-profile`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="oneconnectTransformations")
def oneconnect_transformations(self) -> pulumi.Output[str]:
"""
Enables the system to perform HTTP header transformations for the purpose of keeping server-side connections open. This feature requires configuration of a OneConnect profile
"""
return pulumi.get(self, "oneconnect_transformations")
@property
@pulumi.getter(name="proxyType")
def proxy_type(self) -> pulumi.Output[str]:
"""
Specifies the proxy mode for this profile: reverse, explicit, or transparent. The | |
<gh_stars>0
from __future__ import annotations
import datetime as _datetime
import logging as _logging
import os
import pathlib
import re
from contextlib import contextmanager
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Generator, List, Optional
from flytekit.clients import friendly as friendly_client # noqa
from flytekit.common.core.identifier import WorkflowExecutionIdentifier as _SdkWorkflowExecutionIdentifier
from flytekit.common.tasks.sdk_runnable import ExecutionParameters
from flytekit.configuration import images, internal
from flytekit.configuration import sdk as _sdk_config
from flytekit.engines.unit import mock_stats as _mock_stats
from flytekit.interfaces.data import data_proxy as _data_proxy
from flytekit.models.core import identifier as _identifier
_DEFAULT_FLYTEKIT_ENTRYPOINT_FILELOC = "bin/entrypoint.py"
@dataclass(init=True, repr=True, eq=True, frozen=True)
class Image(object):
name: str
fqn: str
tag: str
@dataclass(init=True, repr=True, eq=True, frozen=True)
class ImageConfig(object):
default_image: Image
images: List[Image] = None
def find_image(self, name) -> Optional[Image]:
for i in self.images:
if i.name == name:
return i
return None
_IMAGE_FQN_TAG_REGEX = re.compile(r"([^:]+)(?=:.+)?")
def look_up_image_info(name: str, tag: str, optional_tag: bool = False) -> Image:
"""
Looks up the image tag from environment variable (should be set from the Dockerfile).
FLYTE_INTERNAL_IMAGE should be the environment variable.
This function is used when registering tasks/workflows with Admin.
When using the canonical Python-based development cycle, the version that is used to register workflows
and tasks with Admin should be the version of the image itself, which should ideally be something unique
like the sha of the latest commit.
:param optional_tag:
:param name:
:param Text tag: e.g. somedocker.com/myimage:someversion123
:rtype: Text
"""
if tag is None or tag == "":
raise Exception("Bad input for image tag {}".format(tag))
matches = _IMAGE_FQN_TAG_REGEX.findall(tag)
if matches is not None:
if len(matches) == 1 and optional_tag:
return Image(name=name, fqn=matches[0], tag=None)
elif len(matches) == 2:
return Image(name=name, fqn=matches[0], tag=matches[1])
else:
raise AssertionError(f"Incorrectly formatted image {tag}, missing tag value")
raise Exception("Could not parse given image and version from configuration.")
def get_image_config(img_name: str = None) -> ImageConfig:
image_name = img_name if img_name else internal.IMAGE.get()
default_img = look_up_image_info("default", image_name)
other_images = [look_up_image_info(k, tag=v, optional_tag=True) for k, v in images.get_specified_images().items()]
other_images.append(default_img)
return ImageConfig(default_image=default_img, images=other_images)
@dataclass
class InstanceVar(object):
module: str
name: str
o: Any
@dataclass
class EntrypointSettings(object):
path: str = None
command: str = None
version: int = 0
class SerializationSettings(object):
def __init__(
self,
project: str,
domain: str,
version: str,
image_config: ImageConfig,
env: Optional[Dict[str, str]],
flytekit_virtualenv_root: str = None,
entrypoint_settings: EntrypointSettings = None,
):
self._project = project
self._domain = domain
self._version = version
self._image_config = image_config
self._env = env or {}
self._instance_lookup = {}
self._flytekit_virtualenv_root = flytekit_virtualenv_root
self._entrypoint_settings = entrypoint_settings
@property
def project(self) -> str:
return self._project
@property
def domain(self) -> str:
return self._domain
@property
def version(self) -> str:
return self._version
@property
def image_config(self) -> ImageConfig:
return self._image_config
@property
def env(self) -> Dict[str, str]:
return self._env
@property
def flytekit_virtualenv_root(self) -> str:
return self._flytekit_virtualenv_root
@property
def entrypoint_settings(self) -> EntrypointSettings:
return self._entrypoint_settings
def add_instance_var(self, var: InstanceVar):
self._instance_lookup[var.o] = var
def get_instance_var(self, o: Any) -> InstanceVar:
if o in self._instance_lookup:
return self._instance_lookup[o]
raise KeyError(f"Instance Variable not found for object id {o}")
class CompilationState(object):
def __init__(self, prefix: str):
"""
:param prefix: This is because we may one day want to be able to have subworkflows inside other workflows. If
users choose to not specify their node names, then we can end up with multiple "n0"s. This prefix allows
us to give those nested nodes a distinct name, as well as properly identify them in the workflow.
# TODO: Ketan to revisit this whole concept when we re-organize the new structure
"""
from flytekit.annotated.node import Node
self._nodes: List[Node] = []
self._old_prefix = ""
self._prefix = prefix
self.mode = 1 # TODO: Turn into enum in the future, or remove if only one mode.
# TODO Branch mode should just be a new Compilation state context. But for now we are just
# storing the nodes separately
self._branch = False
self._branch_nodes: List[Node] = []
@property
def prefix(self) -> str:
return self._prefix
def add_node(self, n: Node):
if self._branch:
self._branch_nodes.append(n)
else:
self._nodes.append(n)
@property
def nodes(self):
if self._branch:
return self._branch_nodes
return self._nodes
def enter_conditional_section(self):
"""
We cannot use a context manager here, so we will mimic the context manager API
"""
self._branch = True
self._old_prefix = self._prefix
self._prefix = self._prefix + "branch"
def exit_conditional_section(self):
"""
Disables that we are in a branch
"""
self._branch = False
self._branch_nodes = []
self._prefix = self._old_prefix
def is_in_a_branch(self) -> bool:
return self._branch
class BranchEvalMode(Enum):
BRANCH_ACTIVE = "branch active"
BRANCH_SKIPPED = "branch skipped"
class ExecutionState(object):
class Mode(Enum):
# This is the mode that is used when a task execution mimics the actual runtime environment.
# NOTE: This is important to understand the difference between TASK_EXECUTION and LOCAL_TASK_EXECUTION
# LOCAL_TASK_EXECUTION, is the mode that is run purely locally and in some cases the difference between local
# and runtime environment may be different. For example for Dynamic tasks local_task_execution will just run it
# as a regular function, while task_execution will extract a runtime spec
TASK_EXECUTION = 1
# This represents when flytekit is locally running a workflow. The behavior of tasks differs in this case
# because instead of running a task's user defined function directly, it'll need to wrap the return values in
# NodeOutput
LOCAL_WORKFLOW_EXECUTION = 2
# This is the mode that is used to to indicate a purely local task execution - i.e. running without a container
# or propeller.
LOCAL_TASK_EXECUTION = 3
def __init__(
self, mode: Mode, working_dir: os.PathLike, engine_dir: os.PathLike, additional_context: Dict[Any, Any] = None
):
self._mode = mode
self._working_dir = working_dir
self._engine_dir = engine_dir
self._additional_context = additional_context
self._branch_eval_mode = None
@property
def working_dir(self) -> os.PathLike:
return self._working_dir
@property
def engine_dir(self) -> os.PathLike:
return self._engine_dir
@property
def additional_context(self) -> Dict[Any, Any]:
return self._additional_context
@property
def mode(self) -> Mode:
return self._mode
@property
def branch_eval_mode(self) -> Optional[BranchEvalMode]:
return self._branch_eval_mode
def enter_conditional_section(self):
"""
We cannot use a context manager here, so we will mimic the context manager API
Reason we cannot use is because branch is a functional api and the context block is not well defined
TODO we might want to create a new node manager here, as we want to capture all nodes in this branch
context
"""
self._branch_eval_mode = BranchEvalMode.BRANCH_SKIPPED
def take_branch(self):
"""
Indicates that we are within an if-else block and the current branch has evaluated to true.
Useful only in local execution mode
"""
self._branch_eval_mode = BranchEvalMode.BRANCH_ACTIVE
def branch_complete(self):
"""
Indicates that we are within a conditional / ifelse block and the active branch is not done.
Default to SKIPPED
"""
self._branch_eval_mode = BranchEvalMode.BRANCH_SKIPPED
def exit_conditional_section(self):
"""
Removes any current branch logic
"""
self._branch_eval_mode = None
class FlyteContext(object):
OBJS = []
def __init__(
self,
parent=None,
file_access: _data_proxy.FileAccessProvider = None,
compilation_state: CompilationState = None,
execution_state: ExecutionState = None,
flyte_client: friendly_client.SynchronousFlyteClient = None,
user_space_params: ExecutionParameters = None,
serialization_settings: SerializationSettings = None,
):
# TODO: Should we have this auto-parenting feature?
if parent is None and len(FlyteContext.OBJS) > 0:
parent = FlyteContext.OBJS[-1]
if compilation_state is not None and execution_state is not None:
raise Exception("Can't specify both")
self._parent: FlyteContext = parent
self._file_access = file_access
self._compilation_state = compilation_state
self._execution_state = execution_state
self._flyte_client = flyte_client
self._user_space_params = user_space_params
self._serialization_settings = serialization_settings
def __enter__(self):
# Should we auto-assign the parent here?
# Or detect if self's parent is not [-1]?
FlyteContext.OBJS.append(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
FlyteContext.OBJS.pop()
@classmethod
def current_context(cls) -> FlyteContext:
if len(cls.OBJS) == 0:
raise Exception("There should pretty much always be a base context object.")
return cls.OBJS[-1]
@property
def file_access(self) -> _data_proxy.FileAccessProvider:
if self._file_access is not None:
return self._file_access
elif self._parent is not None:
return self._parent.file_access
else:
raise Exception("No file_access initialized")
@contextmanager
def new_file_access_context(self, file_access_provider: _data_proxy.FileAccessProvider):
new_ctx = FlyteContext(parent=self, file_access=file_access_provider)
FlyteContext.OBJS.append(new_ctx)
try:
yield new_ctx
finally:
FlyteContext.OBJS.pop()
@property
def user_space_params(self) -> Optional[ExecutionParameters]:
if self._user_space_params is not None:
return self._user_space_params
elif self._parent is not None:
return self._parent.user_space_params
else:
raise Exception("No user_space_params initialized")
@property
def execution_state(self) -> Optional[ExecutionState]:
return self._execution_state
@contextmanager
def new_execution_context(
self,
mode: ExecutionState.Mode,
additional_context: Dict[Any, Any] = None,
execution_params: Optional[ExecutionParameters] = None,
working_dir: Optional[str] = None,
) -> Generator[FlyteContext, None, None]:
# Create a working directory for the execution to use
working_dir = working_dir or self.file_access.get_random_local_directory()
engine_dir = os.path.join(working_dir, "engine_dir")
pathlib.Path(engine_dir).mkdir(parents=True, exist_ok=True)
exec_state = ExecutionState(
mode=mode, working_dir=working_dir, engine_dir=engine_dir, additional_context=additional_context
)
# | |
self.printLevel >= 2: print("==== Current state: ", self.zt, ". Location: ", loc)
def evaluateCost(self, t, xt, bt):
return np.max(np.dot(self.V[t][xt].T, bt))
def updateBelief(self, xt, xNext, at, ot, bt):
return np.dot( self.M[at][xt][xNext][ot], bt )/np.sum(np.dot( self.M[at][xt][xNext][ot], bt ))
def getObservation(self, xt, obstOpt):
# obstacle observation options:
# - obstOpt = 0 ---> always measure 0 unless perfect measurement
# - obstOpt = 1 ---> always measure 1 unless perfect measurement
# - obstOpt = 2 ---> perfect measurement
# - obstOpt = 3 ---> random unless perfect measurement
if obstOpt == 2:
meas = self.zt
else:
locMeas = list(self.loc)
row, col = np.where(xt == self.stateMap)
for i in range(0, self.numObs):
dist = np.abs(row-self.row_obs[i]) + np.abs(col-self.col_obs[i]) # compute distance from obstacle number ii
if dist >= 2: # if dist >= 2 --> random measurment otherwise exact one
if obstOpt < 2:
locMeas[i] = obstOpt
else:
locMeas[i] = np.round(random.uniform(0, 1))
if self.unGoal == True:
for i in range(0, self.numUGoal):
dist = np.abs(row-self.row_goal[i]) + np.abs(col-self.col_goal[i]) # compute distance from obstacle number ii
if dist >= 1: # if dist >= 1 --> random measurment otherwise exact one
if obstOpt < 2:
locMeas[self.numObs + i] = obstOpt
else:
locMeas[self.numObs + i] = np.round(random.uniform(0, 1))
locMeas = tuple(locMeas)
for i in range(0,len(self.comb)):
if self.comb[i]==locMeas:
meas = i
return meas
class MOMDP_TOQ(MOMDP):
def __init__(self, gridVar, totTimeSteps, printLevel, policy, discOpt, unGoal = False):
super().__init__(gridVar, totTimeSteps, printLevel, policy, discOpt, unGoal)
def backupLoopBeliefPoint(self, t, xt):
J_out = np.zeros((np.shape(self.Belief)[0],np.shape(self.Belief)[1]))
V_out = np.zeros((np.shape(self.Belief)[0],np.shape(self.Belief)[1]))
for j in range(0, np.shape(self.Belief)[1]): # loop over belief points (this for loop can be parallelized)
# for each belief point we are going to compute a cost vector -->
# initialize cost and value function vector for all action. Afterwards,
# we are going to take the max
V_alpha_a = np.zeros((np.shape(self.Belief)[0], self.numA))
V_cost_alpha_a = np.zeros(self.numA)
J_alpha_a = np.zeros((np.shape(self.Belief)[0], self.numA))
J_cost_alpha_a = np.zeros( self.numA)
bt = self.Belief[:, j]
for a in range(0, self.numA):
for xNext in self.sucessorStatesList[xt][a]:
Mbt = np.dot(self.M[a,xt,xNext], bt)
J_lambda_kax = np.dot(self.J[t+1,xNext].T, Mbt.T)
V_lambda_kax = np.dot(self.V[t+1,xNext].T, Mbt.T)
idxOptList = []
for o in range(0, self.numZ):
J_lambda_kaxo = J_lambda_kax[:, o]
idxToChange = J_lambda_kaxo < (J_lambda_kaxo.max()-self.toll)
J_lambda_kaxo[idxToChange] = -1000 # Set non-max value to low numer
sumVec = J_lambda_kaxo + V_lambda_kax[:, o]
idxOptList.append(np.argmax(sumVec))
V_alpha_a[:, a] += np.tensordot(self.V[t+1,xNext,:,idxOptList], self.M[a,xt,xNext], 2)
J_alpha_a[:, a] += np.tensordot(self.J[t+1,xNext,:,idxOptList], self.M[a,xt,xNext], 2)
# Select Cost
if xt in self.goal:
V_cost_alpha_a[a] = 1 + np.dot(V_alpha_a[:, a].T, bt)
J_cost_alpha_a[a] = 1
else:
V_cost_alpha_a[a] = np.dot(V_alpha_a[:, a].T, bt)
J_cost_alpha_a[a] = np.dot(J_alpha_a[:, a].T, bt)
idxToChange = J_cost_alpha_a < (J_cost_alpha_a.max()-self.toll) # Where values are low
J_cost_alpha_a[idxToChange] = -1000 # Set non-max value to low numer
sumVec = J_cost_alpha_a + V_cost_alpha_a
idxOpt = np.argmax(sumVec)
if xt in self.goal:
V_out[:,j] = np.ones(np.shape(self.Belief)[0]) + V_alpha_a[:,idxOpt]
J_out[:,j] = np.ones(np.shape(self.Belief)[0])
else:
V_out[:,j] = V_alpha_a[:,idxOpt]
J_out[:,j] = J_alpha_a[:,idxOpt]
self.V[t,xt] = V_out
self.J[t,xt] = J_out
def evaluatePolicy(self, t, xt, bt):
V_alpha_a = np.zeros((self.numZ, self.numA))
J_alpha_a = np.zeros((self.numZ, self.numA))
actionCost = []
actionSpec = []
for a in range(0, self.numA):
for xNext in self.sucessorStatesList[xt][a]:
for o in range(0, self.numZ):
# Cost Value Function Update
J_lambda_kaxo = np.dot(self.J[t+1,xNext].T, np.dot( self.M[a,xt,xNext,o], bt ))
V_lambda_kaxo = np.dot(self.V[t+1,xNext].T, np.dot( self.M[a,xt,xNext,o], bt ))
idx = np.where( (J_lambda_kaxo >= np.max(J_lambda_kaxo)-self.toll) )
possOpt = -np.inf * np.ones(np.shape(V_lambda_kaxo)[0])
possOpt[idx] = V_lambda_kaxo[idx]
idxOpt = np.argmax(possOpt)
V_alpha_a[:, a] = V_alpha_a[:, a] + np.dot(self.M[a,xt,xNext,o].T, self.V[t+1,xNext,:, idxOpt])
J_alpha_a[:, a] = J_alpha_a[:, a] + np.dot(self.M[a,xt,xNext,o].T, self.J[t+1,xNext,:, idxOpt])
# Select Cost
if xt in self.goal: stageCost = 1;
else: stageCost = 0
actionCost.append(np.round(stageCost + np.dot(V_alpha_a[:, a], bt), self.digitPrecision))
actionSpec.append(np.round(np.dot(J_alpha_a[:, a], bt), self.digitPrecision))
# Pick best action
probability = max(actionSpec)
cost = max(actionCost)
if self.printLevel > 2: print("Constraint vector: ", actionSpec)
if self.printLevel > 2: print("Cost Vector: ", actionCost)
if probability == 0:
if self.printLevel >= 1: print("Abort Mission")
probability = 0
selectedAction = 0
cost = np.inf
else:
# Pick action with highest prob sat specs
action = np.where(np.array(actionSpec) >= probability-self.toll)
# Among the one with highest prob sat specs pick best cost
possOpt = -np.inf * np.ones(self.numA)
possOpt[action] = np.array(actionCost)[action]
actionSel = np.where(possOpt==np.max(possOpt))
# Print to sceen
if self.printLevel > 2: print("Possible Moves ", self.actVec[action])
if self.printLevel > 2: print("Same Cost Moves: ", self.actVec[actionSel])
if self.printLevel > 2: print("Selected Action: ", self.actVec[actionSel[0][0]])
selectedAction = actionSel[0][0]
if self.printLevel >= 2: print("Probability satinsfying spec: ", probability, ". Expected Cost: ", cost)
return selectedAction, probability, cost
class MOMDP_TO(MOMDP):
def __init__(self, gridVar, totTimeSteps, printLevel, policy, discOpt):
super().__init__(gridVar, totTimeSteps, printLevel, policy, discOpt)
def backupLoopBeliefPoint(self, t, xt):
V_out = np.zeros((np.shape(self.Belief)[0],np.shape(self.Belief)[1]))
for j in range(0, np.shape(self.Belief)[1]): # loop over belief points
# for each belief point we are going to compute a cost vector -->
# initialize cost and value function vector for all action. Afterwards,
# we are going to take the max
V_alpha_a = np.zeros((np.shape(self.Belief)[0], self.numA))
V_cost_alpha_a = np.zeros(self.numA)
bt = self.Belief[:, j]
for a in range(0, self.numA):
for xNext in self.sucessorStatesList[xt][a]:
Mbt = np.dot(self.M[a,xt,xNext], bt)
V_lambda_kax = np.dot(self.V[t+1,xNext].T, Mbt.T)
idxOptList = []
for o in range(0, self.numZ):
idxOptList.append(np.argmax(V_lambda_kax[:, o]))
V_alpha_a[:, a] += np.tensordot(self.V[t+1,xNext,:,idxOptList], self.M[a,xt,xNext], 2)
# Select Cost
if xt in self.goal:
V_cost_alpha_a[a] = 1 + np.dot(V_alpha_a[:, a].T, bt)
else:
V_cost_alpha_a[a] = np.dot(V_alpha_a[:, a].T, bt)
idxOpt = np.argmax(V_cost_alpha_a)
if xt in self.goal:
V_out[:,j] = np.ones(np.shape(self.Belief)[0]) + V_alpha_a[:,idxOpt]
else:
V_out[:,j] = V_alpha_a[:,idxOpt]
self.V[t,xt] = V_out
def evaluatePolicy(self, t, xt, bt):
V_alpha_a = np.zeros((self.numZ, self.numA))
actionCost = []
for a in range(0, self.numA):
for xNext in self.sucessorStatesList[xt][a]:
for o in range(0, self.numZ):
# Cost Value Function Update
V_lambda_kaxo = np.dot(self.V[t+1,xNext].T, np.dot( self.M[a,xt,xNext,o], bt ))
idxOpt = np.argmax(V_lambda_kaxo)
V_alpha_a[:, a] = V_alpha_a[:, a] + np.dot(self.M[a,xt,xNext,o].T, self.V[t+1,xNext,:, idxOpt])
# Select Cost
if xt in self.goal: stageCost = 1;
else: stageCost = 0
actionCost.append(stageCost + np.dot(V_alpha_a[:, a], bt))
# Pick best action
cost = max(actionCost)
probability = np.inf
if self.printLevel > 2: print("Cost Vector: ", actionCost)
if cost == 0:
if self.printLevel >= 1: print("Run out of time abort the mission")
probability = 0
selectedAction = 0
cost = np.inf
else:
actionSel = np.where(actionCost==np.max(actionCost))
# Print to sceen
if self.printLevel > 2: print("Same Cost Moves: ", self.actVec[actionSel])
if self.printLevel > 2: print("Selected Action: ", self.actVec[actionSel[0][0]])
selectedAction = actionSel[0][0]
probability = np.inf # the probability cannot be computed in TO, unless we run out of time --> failure
if self.printLevel >= 2: print("Probability satinsfying spec: ", probability, ". Expected Cost: ", cost)
return selectedAction, probability, cost
class MOMDP_Q(MOMDP):
def __init__(self, gridVar, totTimeSteps, printLevel, policy, discOpt):
super().__init__(gridVar, totTimeSteps, printLevel, policy, discOpt)
def backupLoopBeliefPoint(self, t, xt):
J_out = np.zeros((np.shape(self.Belief)[0],np.shape(self.Belief)[1]))
for j in range(0, np.shape(self.Belief)[1]): # loop over belief points
# for each belief point we are going to compute a cost vector -->
# initialize cost and value function vector for all action. Afterwards,
# we are going to take the max
J_alpha_a = np.zeros((np.shape(self.Belief)[0], self.numA))
J_cost_alpha_a = np.zeros( self.numA)
J_cost_alpha_a_vec = np.zeros( self.numA)
J_alpha_a_vec = np.zeros((np.shape(self.Belief)[0], self.numA))
bt = self.Belief[:, j]
for a in range(0, self.numA):
for xNext in self.sucessorStatesList[xt][a]:
Mbt_vec = np.dot(self.M[a,xt,xNext], bt)
J_lambda_kax = np.dot(self.J[t+1,xNext].T, Mbt_vec.T)
idxOptList = []
for o in range(0, self.numZ):
idxOptList.append(np.argmax(J_lambda_kax[:, o] ))
J_alpha_a[:, a] += np.tensordot(self.J[t+1,xNext,:,idxOptList], self.M[a,xt,xNext], 2)
# Select Cost
if xt in self.goal:
J_cost_alpha_a[a] = 1
else:
J_cost_alpha_a[a] = np.round(np.dot(J_alpha_a[:, a].T, bt), self.digitPrecision)
idxOpt = np.argmax( J_cost_alpha_a )
if xt in self.goal:
J_out[:,j] = np.ones(np.shape(self.Belief)[0])
else:
J_out[:,j] = J_alpha_a[:,idxOpt]
self.J[t,xt] = J_out
def evaluatePolicy(self, t, xt, bt):
J_alpha_a = np.zeros((self.numZ, self.numA))
actionSpec = []
for a in range(0, self.numA):
for xNext in self.sucessorStatesList[xt][a]:
for o in range(0, self.numZ):
# Cost Value Function Update
J_lambda_kaxo = np.dot(self.J[t+1][xNext].T, np.dot( self.M[a,xt,xNext,o], bt ))
idxOpt = np.argmax(J_lambda_kaxo)
J_alpha_a[:, a] = J_alpha_a[:, a] + np.dot(self.M[a,xt,xNext,o].T, self.J[t+1,xNext,:, idxOpt])
# Select Cost
actionSpec.append(np.round(np.dot(J_alpha_a[:, a], bt), self.digitPrecision))
# Pick best action
probability = max(actionSpec)
cost = np.inf # set cost to inf because no cost is computed for the Quantitative policy
# pdb.set_trace()
if self.printLevel > 2: print("Max spec vector: ", actionSpec)
if probability == 0:
if self.printLevel >= 1: print("Abort Mission")
probability = 0
selectedAction = 0
else:
actionSel = np.where(actionSpec==np.max(actionSpec))
# Print to sceen
if self.printLevel > 2: print("Possible Moves ", self.actVec[actionSel])
if self.printLevel > 2: print("Selected Action: ", self.actVec[actionSel[0][0]])
selectedAction = actionSel[0][0]
if self.printLevel >= 2: print("Probability satinsfying spec: ", probability, ". Expected Cost: ", cost)
return selectedAction, probability, cost
class MOMDP_TOQ_notVectorized(MOMDP):
def __init__(self, gridVar, totTimeSteps, printLevel, policy, discOpt):
super().__init__(gridVar, totTimeSteps, printLevel, policy, discOpt)
def backupLoopBeliefPoint(self, t, xt):
J_out = np.zeros((np.shape(self.Belief)[0],np.shape(self.Belief)[1]))
V_out = np.zeros((np.shape(self.Belief)[0],np.shape(self.Belief)[1]))
for j in range(0, np.shape(self.Belief)[1]): # loop over belief points
# for each belief point we are going to compute a cost vector -->
# initialize cost and value function vector for all action. Afterwards,
# we are going to take the max
V_alpha_a = np.zeros((np.shape(self.Belief)[0], self.numA))
V_cost_alpha_a = np.zeros(self.numA)
J_alpha_a = np.zeros((np.shape(self.Belief)[0], self.numA))
J_cost_alpha_a = np.zeros( self.numA)
bt = self.Belief[:, j]
for a in range(0, self.numA):
for xNext in self.sucessorStatesList[xt][a]:
for o in range(0, self.numZ):
J_lambda_kaxo = np.dot(self.J[t+1, xNext].T, np.dot(self.M[a,xt,xNext,o], bt))
V_lambda_kaxo = np.dot(self.V[t+1, xNext].T, np.dot(self.M[a,xt,xNext,o], bt))
idx = np.where( (J_lambda_kaxo >= np.max(J_lambda_kaxo)-self.toll) & (J_lambda_kaxo <= np.max(J_lambda_kaxo)+self.toll) )
possOpt = -np.inf * np.ones(np.shape(V_lambda_kaxo)[0])
possOpt[idx] = V_lambda_kaxo[idx]
idxOpt = np.argmax(possOpt)
V_alpha_a[:, a] += np.dot(self.M[a,xt,xNext,o].T, self.V[t+1,xNext,:, idxOpt])
J_alpha_a[:, a] += np.dot(self.M[a,xt,xNext,o].T, self.J[t+1,xNext,:, idxOpt])
# Select Cost
if xt in self.goal:
V_cost_alpha_a[a] = 1 + np.dot(V_alpha_a[:, a].T, bt)
J_cost_alpha_a[a] = 1
else:
V_cost_alpha_a[a] = np.dot(V_alpha_a[:, a].T, bt)
J_cost_alpha_a[a] = np.dot(J_alpha_a[:, a].T, bt)
# take the max to compute the vector at the belief point
optVector = -np.inf * np.ones(self.numA);
idx = np.where( (J_cost_alpha_a >= np.max(J_cost_alpha_a)-self.toll) & (J_cost_alpha_a <= np.max(J_cost_alpha_a)+self.toll) )
optVector[idx] = V_cost_alpha_a[idx]
idxOpt = np.argmax(optVector)
if xt in self.goal:
V_out[:,j] = np.ones(np.shape(self.Belief)[0]) + V_alpha_a[:,idxOpt]
J_out[:,j] = np.ones(np.shape(self.Belief)[0])
else:
V_out[:,j] = V_alpha_a[:,idxOpt]
J_out[:,j] = J_alpha_a[:,idxOpt]
self.V[t][xt] = V_out
self.J[t][xt] = J_out
def evaluatePolicy(self, t, xt, bt):
V_alpha_a = np.zeros((self.numZ, self.numA))
J_alpha_a = np.zeros((self.numZ, self.numA))
actionCost = []
actionSpec = | |
from enum import IntEnum
from ctypes import (
Union,
c_char, c_uint16,
c_uint8,
c_uint32,
c_uint64,
c_int32,
)
from DyldExtractor.structure import Structure
class LoadCommands(IntEnum):
"""An Enum for all the load commands.
"""
"""
After MacOS X 10.1 when a new load command is added that is required to be
understood by the dynamic linker for the image to execute properly the
LC_REQ_DYLD bit will be or'ed into the load command constant. If the dynamic
linker sees such a load command it it does not understand will issue a
"unknown load command required for execution" error and refuse to use the
image. Other load commands without this bit that are not understood will
simply be ignored.
"""
LC_REQ_DYLD = 0x80000000
# Constants for the cmd field of all load commands, the type
LC_SEGMENT = 0x1 # segment of this file to be mapped
LC_SYMTAB = 0x2 # link-edit stab symbol table info
LC_SYMSEG = 0x3 # link-edit gdb symbol table info (obsolete)
LC_THREAD = 0x4 # thread
LC_UNIXTHREAD = 0x5 # unix thread (includes a stack)
LC_LOADFVMLIB = 0x6 # load a specified fixed VM shared library
LC_IDFVMLIB = 0x7 # fixed VM shared library identification
LC_IDENT = 0x8 # object identification info (obsolete)
LC_FVMFILE = 0x9 # fixed VM file inclusion (internal use)
LC_PREPAGE = 0xa # prepage command (internal use)
LC_DYSYMTAB = 0xb # dynamic link-edit symbol table info
LC_LOAD_DYLIB = 0xc # load a dynamically linked shared library
LC_ID_DYLIB = 0xd # dynamically linked shared lib ident
LC_LOAD_DYLINKER = 0xe # load a dynamic linker
LC_ID_DYLINKER = 0xf # dynamic linker identification
LC_PREBOUND_DYLIB = 0x10 # modules prebound for a dynamically
# linked shared library
LC_ROUTINES = 0x11 # image routines
LC_SUB_FRAMEWORK = 0x12 # sub framework
LC_SUB_UMBRELLA = 0x13 # sub umbrella
LC_SUB_CLIENT = 0x14 # sub client
LC_SUB_LIBRARY = 0x15 # sub library
LC_TWOLEVEL_HINTS = 0x16 # two-level namespace lookup hints
LC_PREBIND_CKSUM = 0x17 # prebind checksum
"""
load a dynamically linked shared library that is allowed to be missing
(all symbols are weak imported).
"""
LC_LOAD_WEAK_DYLIB = (0x18 | LC_REQ_DYLD)
LC_SEGMENT_64 = 0x19 # 64-bit segment of this file to be
# mapped
LC_ROUTINES_64 = 0x1a # 64-bit image routines
LC_UUID = 0x1b # the uuid
LC_RPATH = (0x1c | LC_REQ_DYLD) # runpath additions
LC_CODE_SIGNATURE = 0x1d # local of code signature
LC_SEGMENT_SPLIT_INFO = 0x1e # local of info to split segments
LC_REEXPORT_DYLIB = (0x1f | LC_REQ_DYLD) # load and re-export dylib
LC_LAZY_LOAD_DYLIB = 0x20 # delay load of dylib until first use
LC_ENCRYPTION_INFO = 0x21 # encrypted segment information
LC_DYLD_INFO = 0x22 # compressed dyld information
LC_DYLD_INFO_ONLY = (0x22 | LC_REQ_DYLD) # compressed dyld information only
LC_LOAD_UPWARD_DYLIB = (0x23 | LC_REQ_DYLD) # load upward dylib
LC_VERSION_MIN_MACOSX = 0x24 # build for MacOSX min OS version
LC_VERSION_MIN_IPHONEOS = 0x25 # build for iPhoneOS min OS version
LC_FUNCTION_STARTS = 0x26 # compressed table of function start addresses
LC_DYLD_ENVIRONMENT = 0x27 # string for dyld to treat
# like environment variable
LC_MAIN = (0x28 | LC_REQ_DYLD) # replacement for LC_UNIXTHREAD
LC_DATA_IN_CODE = 0x29 # table of non-instructions in __text
LC_SOURCE_VERSION = 0x2A # source version used to build binary
LC_DYLIB_CODE_SIGN_DRS = 0x2B # Code signing DRs copied from linked dylibs
LC_ENCRYPTION_INFO_64 = 0x2C # 64-bit encrypted segment information
LC_LINKER_OPTION = 0x2D # linker options in MH_OBJECT files
LC_LINKER_OPTIMIZATION_HINT = 0x2E # optimization hints in MH_OBJECT files
LC_VERSION_MIN_TVOS = 0x2F # build for AppleTV min OS version
LC_VERSION_MIN_WATCHOS = 0x30 # build for Watch min OS version
LC_NOTE = 0x31 # arbitrary data included within a Mach-O file
LC_BUILD_VERSION = 0x32 # build for platform min OS version
LC_DYLD_EXPORTS_TRIE = (0x33 | LC_REQ_DYLD) # used with linkedit_data_command, payload is trie
LC_DYLD_CHAINED_FIXUPS = (0x34 | LC_REQ_DYLD) # used with linkedit_data_command
LC_FILESET_ENTRY = (0x35 | LC_REQ_DYLD) # used with fileset_entry_command
class mach_header_64(Structure):
SIZE = 32
magic: int # mach magic number identifier
cputype: int # cpu specifier
cpusubtype: int # machine specifier
filetype: int # type of file
ncmds: int # number of load commands
sizeofcmds: int # the size of all the load commands
flags: int # flags
reserved: int # reserved
_fields_ = [
("magic", c_uint32),
("cputype", c_uint32),
("cpusubtype", c_uint32),
("filetype", c_uint32),
("ncmds", c_uint32),
("sizeofcmds", c_uint32),
("flags", c_uint32),
("reserved", c_uint32),
]
class load_command(Structure):
cmd: int # type of load command
cmdsize: int # total size of command in bytes
_fields_ = [
("cmd", c_uint32),
("cmdsize", c_uint32),
]
class UnknownLoadCommand(load_command):
pass
class segment_command_64(Structure):
"""
The 64-bit segment load command indicates that a part of this file is to be
mapped into a 64-bit task's address space. If the 64-bit segment has
sections then section_64 structures directly follow the 64-bit segment
command and their size is reflected in cmdsize.
for 64-bit architectures
"""
SIZE = 72
cmd: int # LC_SEGMENT_64
cmdsize: int # includes sizeof section_64 structs
segname: bytes # segment name
vmaddr: int # memory address of this segment
vmsize: int # memory size of this segment
fileoff: int # file offset of this segment
filesize: int # amount to map from the file
maxprot: int # maximum VM protection
initprot: int # initial VM protection
nsects: int # number of sections in segment
flags: int # flags
_fields_ = [
("cmd", c_uint32),
("cmdsize", c_uint32),
("segname", c_char * 16),
("vmaddr", c_uint64),
("vmsize", c_uint64),
("fileoff", c_uint64),
("filesize", c_uint64),
("maxprot", c_int32),
("initprot", c_int32),
("nsects", c_uint32),
("flags", c_uint32),
]
class section_64(Structure):
# for 64-bit architectures
SIZE = 80
sectname: bytes # name of this section
segname: bytes # segment this section goes in
addr: int # memory address of this section
size: int # size in bytes of this section
offset: int # file offset of this section
align: int # section alignment (power of 2)
reloff: int # file offset of relocation entries
nreloc: int # number of relocation entries
flags: int # flags (section type and attributes
reserved1: int # reserved (for offset or index)
reserved2: int # reserved (for count or sizeof)
reserved3: int # reserved
_fields_ = [
("sectname", c_char * 16),
("segname", c_char * 16),
("addr", c_uint64),
("size", c_uint64),
("offset", c_uint32),
("align", c_uint32),
("reloff", c_uint32),
("nreloc", c_uint32),
("flags", c_uint32),
("reserved1", c_uint32),
("reserved2", c_uint32),
("reserved3", c_uint32),
]
class lc_str(Structure):
offset: int # offset to the string
_fields_ = [
("offset", c_uint32),
]
class Fvmlib(Structure):
name: lc_str # library's target pathname
minor_version: int # library's minor version number
header_addr: int # library's header address
_fields_ = [
("name", lc_str),
("minor_version", c_uint32),
("header_addr", c_uint32),
]
class fvmlib_command(Structure):
cmd: int # LC_IDFVMLIB or LC_LOADFVMLIB
cmdsize: int # includes pathname string
fvmlib: Fvmlib # the library identification
_fields_ = [
("cmd", c_uint32),
("cmdsize", c_uint32),
("fvmlib", Fvmlib),
]
class Dylib(Structure):
name: lc_str # library's path name
timestamp: int # library's build time stamp
current_version: int # library's current version number
compatibility_version: int # library's compatibility vers number
_fields_ = [
("name", lc_str),
("timestamp", c_uint32),
("current_version", c_uint32),
("compatibility_version", c_uint32),
]
class dylib_command(Structure):
cmd: int # LC_ID_DYLIB, LC_LOAD_{,WEAK_}DYLIB,
# LC_REEXPORT_DYLIB
cmdsize: int # includes pathname string
dylib: Dylib # the library identification
_fields_ = [
("cmd", c_uint32),
("cmdsize", c_uint32),
("dylib", Dylib),
]
class sub_framework_command(Structure):
cmd: int # LC_SUB_FRAMEWORK
cmdsize: int # includes umbrella string
umbrella: lc_str # the umbrella framework name
_fields_ = [
("cmd", c_uint32),
("cmdsize", c_uint32),
("umbrella", lc_str),
]
class sub_client_command(Structure):
cmd: int # LC_SUB_CLIENT
cmdsize: int # includes client string
client: lc_str # the client name
_fields_ = [
("cmd", c_uint32),
("cmdsize", c_uint32),
("client", lc_str),
]
class sub_umbrella_command(Structure):
cmd: int # LC_SUB_UMBRELLA
cmdsize: int # includes sub_umbrella string
sub_umbrella: lc_str # the sub_umbrella framework name
_fields_ = [
("cmd", c_uint32),
("cmdsize", c_uint32),
("sub_umbrella", lc_str),
]
class sub_library_command(Structure):
cmd: int # LC_SUB_LIBRARY
cmdsize: int # includes sub_library string
sub_library: lc_str # the sub_library name
_fields_ = [
("cmd", c_uint32),
("cmdsize", c_uint32),
("sub_library", lc_str),
]
class prebound_dylib_command(Structure):
cmd: int # LC_PREBOUND_DYLIB
cmdsize: int # includes strings
name: lc_str # library's path name
nmodules: int # number of modules in library
linked_modules: lc_str # bit vector of linked modules
_fields_ = [
("cmd", c_uint32),
("cmdsize", c_uint32),
("name", lc_str),
("nmodules", c_uint32),
("linked_modules", lc_str),
]
class dylinker_command(Structure):
cmd: int # LC_ID_DYLINKER, LC_LOAD_DYLINKER or
# LC_DYLD_ENVIRONMENT
cmdsize: int # includes pathname string
name: lc_str # dynamic linker's path name
_fields_ = [
("cmd", c_uint32),
("cmdsize", c_uint32),
("name", lc_str),
]
class routines_command_64(Structure):
# for 64-bit architectures
cmd: int # LC_ROUTINES_64
cmdsize: int # total size of this command
init_address: int # address of initialization routine
init_module: int # index into the module table that
# the init routine is defined in
reserved1: int
reserved2: int
reserved3: int
reserved4: int
reserved5: int
reserved6: int
_fields_ = [
("cmd", c_uint32),
("cmdsize", c_uint32),
("init_address", c_uint64),
("init_module", c_uint64),
("reserved1", c_uint64),
("reserved2", c_uint64),
("reserved3", c_uint64),
("reserved4", c_uint64),
("reserved5", c_uint64),
("reserved6", c_uint64),
]
class symtab_command(Structure):
cmd: int # LC_SYMTAB
cmdsize: int # sizeof(struct symtab_command)
symoff: int # symbol table offset
nsyms: int # number of symbol table entries
stroff: int # string table offset
strsize: int # string table size in bytes
_fields_ = [
("cmd", c_uint32),
("cmdsize", c_uint32),
("symoff", c_uint32),
("nsyms", c_uint32),
("stroff", c_uint32),
("strsize", c_uint32),
]
class N_un(Union):
n_strx: int # index into the string table
_fields_ = [
("n_strx", c_uint32)
]
class nlist_64(Structure):
SIZE: int = 16
n_un: N_un
n_type: int # type flag, see below
n_sect: int # section number or NO_SECT
n_desc: int # see <mach-o/stab.h>
n_value: int # value of this symbol (or stab offset)
_fields_ = [
("n_un", N_un),
("n_type", c_uint8),
("n_sect", c_uint8),
("n_desc", c_uint16),
("n_value", c_uint64),
]
class dysymtab_command(Structure):
cmd: int # LC_DYSYMTAB
cmdsize: int # sizeof(struct dysymtab_command)
ilocalsym: int # index to local symbols
nlocalsym: int # number of local symbols
iextdefsym: int # index to externally defined symbols
nextdefsym: int # number of externally defined symbols
iundefsym: int # index to undefined symbols
nundefsym: int # number of undefined symbols
tocoff: int # file offset to table of contents
ntoc: int # number of entries in table of contents
modtaboff: int # file | |
<gh_stars>0
from scoring import scoring
from objects import Spectrum, SequenceAlignment, HybridSequenceAlignment, Database, Alignments, DEVFallOffEntry
from alignment import alignment_utils, hybrid_alignment
import objects
import utils
import database
import gen_spectra
import math
import re
import time
FIRST_ALIGN_TIME = 0
AMBIGUOUS_REMOVAL_TIME = 0
PRECURSOR_MASS_TIME = 0
OBJECTIFY_TIME = 0
FIRST_ALIGN_COUNT = 0
AMBIGUOUS_REMOVAL_COUNT = 0
PRECURSOR_MASS_COUNT = 0
OBJECTIFY_COUNT = 0
OUT_OF_RANGE_SEQS = 0
TOTAL_ITERATIONS = 0
global extension_times
extension_times = []
global initial_alignment_times
initial_alignment_times = []
global Non_hybrid_refine_time
Non_hybrid_refine_time = []
global non_hybrid_scoring_times
non_hybrid_scoring_times = []
global Hybrid_refine_times
Hybrid_refine_times = []
global hybrid_scoring_times
hybrid_scoring_times = []
def same_protein_alignment(seq1: str, seq2: str, parent_sequence: str):
if seq1 == seq2:
return (seq1, None)
if seq1 in seq2:
return (seq2, None)
if seq2 in seq1:
return (seq1, None)
gap_aa = max(1, len(parent_sequence) // 100)
left_start = [m.start() for m in re.finditer(seq1, parent_sequence)]
right_start = [m.start() for m in re.finditer(seq2, parent_sequence)]
if all([r < l for r in right_start for l in left_start]):
return hybrid_alignment.hybrid_alignment(seq1, seq2)
nonhybrid_alignments = []
for l in left_start:
for r in right_start:
if r < l:
continue
if r - (l + len(seq1)) <= gap_aa:
overlapped = parent_sequence[l: r + len(seq2)]
nonhybrid_alignments.append(overlapped)
if len(nonhybrid_alignments) == 0:
return hybrid_alignment.hybrid_alignment(seq1, seq2)
nonhybrid_alignments.sort(key=lambda x: len(x))
return (nonhybrid_alignments[0], None)
def align_b_y(b_kmers: list, y_kmers: list, spectrum: Spectrum, db: Database):
spec_alignments = []
for b_kmer in b_kmers:
b_seq = b_kmer[2][2]
b_proteins = database.get_proteins_with_subsequence(db, b_seq)
for y_kmer in y_kmers:
y_seq = y_kmer[2][2]
y_proteins = database.get_proteins_with_subsequence(db, y_seq)
if any([x in y_proteins for x in b_proteins]):
shared_prots = [x for x in y_proteins if x in b_proteins]
for sp in shared_prots:
prot_seqs = database.get_entry_by_name(db, sp)
for prot_entry in prot_seqs:
spec_alignments.append(
same_protein_alignment(b_seq, y_seq, prot_entry.sequence)
)
spec_alignments.append((f'{b_seq}{y_seq}', f'{b_seq}-{y_seq}'))
else:
spec_alignments.append(hybrid_alignment.hybrid_alignment(b_seq, y_seq))
return list(set([x for x in spec_alignments if x is not None]))
def extend_base_kmers(b_kmers: list, y_kmers: list, spectrum: Spectrum, db: Database):
extended_b = []
extended_y = []
for seq in b_kmers:
extended_b += [x for x in alignment_utils.extend_non_hybrid(seq, spectrum, 'b', db)]
for seq in y_kmers:
extended_y += [x for x in alignment_utils.extend_non_hybrid(seq, spectrum, 'y', db)]
return extended_b, extended_y
def refine_alignments(spectrum: Spectrum, db: Database, alignments: list, precursor_tolerance: int = 10,
DEV: bool = False, truth: dict = None, fall_off: dict = None):
global PRECURSOR_MASS_COUNT, AMBIGUOUS_REMOVAL_COUNT, OUT_OF_RANGE_SEQS
global PRECURSOR_MASS_TIME, AMBIGUOUS_REMOVAL_TIME
predicted_len = utils.predicted_len(spectrum.precursor_mass, spectrum.precursor_charge)
allowed_gap = math.ceil(predicted_len * .25)
st = time.time()
precursor_matches = []
for sequence_pairs in alignments:
sequence = sequence_pairs[0] if sequence_pairs[1] is None else sequence_pairs[1]
p_ms = [
x for x in \
alignment_utils.match_precursor(spectrum, sequence, db, gap=allowed_gap, tolerance=precursor_tolerance)
]
if len(p_ms) and p_ms[0] is None:
OUT_OF_RANGE_SEQS += 1
continue
precursor_matches += p_ms
PRECURSOR_MASS_COUNT += len(alignments)
PRECURSOR_MASS_TIME += time.time() - st
if DEV:
_id = spectrum.id
is_hybrid = truth[_id]['hybrid']
truth_seq = truth[_id]['sequence']
if not utils.DEV_contains_truth_exact(truth_seq, is_hybrid, precursor_matches):
metadata = {
'sequences_before_precursor_filling': alignments,
'sequences_after_precursor_filling': precursor_matches,
'observed_precursor_mass': spectrum.precursor_mass,
'observed_percursor_charge': spectrum.precursor_charge,
'allowed_gap': allowed_gap
}
fall_off[_id] = DEVFallOffEntry(
is_hybrid,
truth_seq,
'precursor_filling',
metadata
)
return Alignments(spectrum, [])
nonhyba, hyba = [], []
for p_m in precursor_matches:
if '-' in p_m or '(' in p_m or ')' in p_m:
hyba.append((p_m.replace('-', '').replace('(', '').replace(')', ''), p_m))
else:
nonhyba.append((p_m, None))
st = time.time()
updated_hybrids = [] if len(hyba) == 0 else hybrid_alignment.replace_ambiguous_hybrids(hyba, db, spectrum)
if DEV and truth[spectrum.id]['hybrid']:
_id = spectrum.id
is_hybrid = truth[_id]['hybrid']
truth_seq = truth[_id]['sequence']
if not utils.DEV_contains_truth_exact(truth_seq, is_hybrid, [x[0] for x in updated_hybrids]):
metadata = {
'before_ambiguous_removal': hyba,
'after_ambiguous_removal': updated_hybrids
}
fall_off[_id] = DEVFallOffEntry(
is_hybrid,
truth_seq,
'removing_ambiguous_hybrids',
metadata
)
return Alignments(spectrum, [])
AMBIGUOUS_REMOVAL_COUNT += len(hyba)
AMBIGUOUS_REMOVAL_TIME += time.time() - st
return nonhyba + updated_hybrids
def attempt_alignment_dev(spectrum: Spectrum, truth: bool = None, fall_off: bool = None,a: list = None):
b_seqs = [x[0] for x in a]
y_seqs = [x[0] for x in a]
_id = spectrum.id
is_hybrid = truth[_id]['hybrid']
truth_seq = truth[_id]['sequence']
if not utils.DEV_contains_truth_parts(truth_seq, is_hybrid, b_seqs, y_seqs):
metadata = {
'alignments': a,
'before_alignments_b': b_seqs,
'before_alignments_y': y_seqs
}
fall_off[_id] = DEVFallOffEntry(
is_hybrid,
truth_seq,
'first_alignment_round',
metadata
)
return Alignments(spectrum, [], None)
def attempt_alignment_first_pass(spectrum: Spectrum, db: Database, n: int = 3, ppm_tolerance: int = 20,
precursor_tolerance: int = 10,digest_type: str = '',truth: bool = None, fall_off: bool = None,
DEV: bool = False,OBJECTIFY_COUNT: int = 0,OBJECTIFY_TIME: int = 0,a: list = None,is_last: bool = False):
refine_start = time.time()
non_hybrid_refined = refine_alignments(spectrum, db, [x for x in a if x[1] is None],
precursor_tolerance=precursor_tolerance, DEV=DEV, truth=truth, fall_off=fall_off)
refine_time = time.time() - refine_start
Non_hybrid_refine_time.append(refine_time)
non_hybrid_alignments = []
tracker = {}
st = time.time()
for nhr, _ in non_hybrid_refined:
if nhr in tracker:
continue
tracker[nhr] = True
scoring_start = time.time()
p_d = scoring.precursor_distance(
spectrum.precursor_mass,
gen_spectra.get_precursor(nhr, spectrum.precursor_charge)
)
b_score = scoring.score_sequence(
spectrum.mz_values,
sorted(gen_spectra.gen_spectrum(nhr, ion='b')['spectrum']),
ppm_tolerance
)
y_score = scoring.score_sequence(
spectrum.mz_values,
sorted(gen_spectra.gen_spectrum(nhr, ion='y')['spectrum']),
ppm_tolerance
)
total_error = scoring.total_mass_error(spectrum, nhr, ppm_tolerance)
t_score = b_score + y_score + scoring.digest_score(nhr, db, digest_type)
non_hybrid_scoring_times.append(time.time() - scoring_start)
parents = alignment_utils.get_parents(nhr, db)
non_hybrid_alignments.append(
SequenceAlignment(
parents[0],
nhr,
b_score,
y_score,
t_score,
p_d,
total_error
)
)
OBJECTIFY_COUNT += len(non_hybrid_refined)
OBJECTIFY_TIME += time.time() - st
if any([x.total_score >= 1.5 * len(x.sequence) for x in non_hybrid_alignments]):
sorted_alignments = sorted(
non_hybrid_alignments,
key=lambda x: (
x.total_score,
math.inf if x.total_mass_error <= 0 else 1/x.total_mass_error,
math.inf if x.precursor_distance <= 0 else 1/x.precursor_distance,
x.b_score,
x.y_score
),
reverse=True
)
top_n_alignments = sorted_alignments[:n]
return top_n_alignments
else:
return None, non_hybrid_alignments
def attempt_alignment_second_pass(spectrum: Spectrum, db: Database, n: int = 3,
ppm_tolerance: int = 20, precursor_tolerance: int = 10,digest_type: str = '',truth: bool = None,
fall_off: bool = None,DEV: bool = False,OBJECTIFY_COUNT: int = 0,OBJECTIFY_TIME: int = 0,
a: list = [],non_hybrid_alignments: list = []):
refine_start = time.time()
hybrid_refined = refine_alignments(
spectrum,
db,
[x for x in a if x[1] is not None],
precursor_tolerance=precursor_tolerance,
DEV=DEV,
truth=truth,
fall_off=fall_off
)
refine_time = time.time() - refine_start
Hybrid_refine_times.append(refine_time)
hybrid_alignments = []
tracker = {}
st = time.time()
for hr, special_hr in hybrid_refined:
if hr in tracker:
continue
tracker[hr] = True
scoring_start = time.time()
p_d = scoring.precursor_distance(
spectrum.precursor_mass,
gen_spectra.get_precursor(hr, spectrum.precursor_charge)
)
b_score = scoring.score_sequence(
spectrum.mz_values,
sorted(gen_spectra.gen_spectrum(hr, ion='b')['spectrum']),
ppm_tolerance
)
y_score = scoring.score_sequence(
spectrum.mz_values,
sorted(gen_spectra.gen_spectrum(hr, ion='y')['spectrum']),
ppm_tolerance
)
total_error = scoring.total_mass_error(spectrum, hr, ppm_tolerance)
hybrid_scoring_times.append(time.time() - scoring_start)
parents = alignment_utils.get_parents(hr, db)
t_score = None
if special_hr is None:
t_score = b_score + y_score + scoring.digest_score(hr, db, digest_type)
non_hybrid_alignments.append(
SequenceAlignment(
parents[0],
hr,
b_score,
y_score,
t_score,
p_d,
total_error
)
)
i = 0
avg_b_score = 0
avg_y_score = 0
avg_total_score = 0
for score in non_hybrid_alignments:
avg_b_score = avg_b_score + score[2]
avg_y_score = avg_y_score + score[3]
avg_total_score = avg_total_score + score[4]
i = i + 1
alignment_instrumentation = objects.Alignment_Instrumentation(
avg_b_score = avg_b_score/i,
avg_y_score = avg_y_score/i,
avg_total_score = avg_total_score/i
)
else:
t_score = scoring.hybrid_score(spectrum, special_hr, ppm_tolerance)\
+ scoring.digest_score(special_hr, db, digest_type)
hybrid_alignments.append(
HybridSequenceAlignment(
parents[0],
parents[1],
hr,
special_hr,
b_score,
y_score,
t_score,
p_d,
total_error
)
)
i = 0
avg_b_score = 0
avg_y_score = 0
avg_total_score = 0
for score in hybrid_alignments:
avg_b_score = avg_b_score + score[4]
avg_y_score = avg_y_score + score[5]
avg_total_score = avg_total_score + score[6]
i = i + 1
alignment_instrumentation = objects.Alignment_Instrumentation(
avg_b_score = avg_b_score/i,
avg_y_score = avg_y_score/i,
avg_total_score = avg_total_score/i
)
OBJECTIFY_COUNT += len(hybrid_refined)
OBJECTIFY_TIME += time.time() - st
all_alignments = non_hybrid_alignments + hybrid_alignments
sorted_alignments = sorted(
all_alignments,
key=lambda x: (
x.total_score,
math.inf if x.total_mass_error <= 0 else 1/x.total_mass_error,
math.inf if x.precursor_distance <= 0 else 1/x.precursor_distance,
1/len(x),
x.b_score,
x.y_score
),
reverse=True
)
top_n_alignments = sorted_alignments[:n]
return Alignments(spectrum, top_n_alignments)
def attempt_alignment(spectrum: Spectrum, db: Database, b_hits: list,y_hits: list, n: int = 3,
ppm_tolerance: int = 20, precursor_tolerance: int = 10,digest_type: str = '',DEBUG: bool = False,
truth: bool = None, fall_off: bool = None):
global FIRST_ALIGN_TIME, AMBIGUOUS_REMOVAL_TIME, PRECURSOR_MASS_TIME, OBJECTIFY_TIME
global FIRST_ALIGN_COUNT, AMBIGUOUS_REMOVAL_COUNT, PRECURSOR_MASS_COUNT, OBJECTIFY_COUNT
global TOTAL_ITERATIONS
TOTAL_ITERATIONS += 1
DEV = truth is not None and fall_off is not None
b_non_hybrids, y_non_hybrids = extend_base_kmers(b_hits, y_hits, spectrum, db)
non_hybrids = b_non_hybrids + y_non_hybrids
a = align_b_y(b_hits, y_hits, spectrum, db) | |
from __future__ import annotations
import dataclasses
import enum
import io
import secrets
import struct
from typing import Callable, Generator, Optional, Sequence, Tuple
from . import exceptions, extensions
from .typing import Data
try:
from .speedups import apply_mask
except ImportError: # pragma: no cover
from .utils import apply_mask
__all__ = [
"Opcode",
"OP_CONT",
"OP_TEXT",
"OP_BINARY",
"OP_CLOSE",
"OP_PING",
"OP_PONG",
"DATA_OPCODES",
"CTRL_OPCODES",
"Frame",
"prepare_data",
"prepare_ctrl",
"Close",
]
class Opcode(enum.IntEnum):
"""Opcode values for WebSocket frames."""
CONT, TEXT, BINARY = 0x00, 0x01, 0x02
CLOSE, PING, PONG = 0x08, 0x09, 0x0A
OP_CONT = Opcode.CONT
OP_TEXT = Opcode.TEXT
OP_BINARY = Opcode.BINARY
OP_CLOSE = Opcode.CLOSE
OP_PING = Opcode.PING
OP_PONG = Opcode.PONG
DATA_OPCODES = OP_CONT, OP_TEXT, OP_BINARY
CTRL_OPCODES = OP_CLOSE, OP_PING, OP_PONG
# See https://www.iana.org/assignments/websocket/websocket.xhtml
CLOSE_CODES = {
1000: "OK",
1001: "going away",
1002: "protocol error",
1003: "unsupported type",
# 1004 is reserved
1005: "no status code [internal]",
1006: "connection closed abnormally [internal]",
1007: "invalid data",
1008: "policy violation",
1009: "message too big",
1010: "extension required",
1011: "unexpected error",
1012: "service restart",
1013: "try again later",
1014: "bad gateway",
1015: "TLS failure [internal]",
}
# Close code that are allowed in a close frame.
# Using a set optimizes `code in EXTERNAL_CLOSE_CODES`.
EXTERNAL_CLOSE_CODES = {
1000,
1001,
1002,
1003,
1007,
1008,
1009,
1010,
1011,
1012,
1013,
1014,
}
OK_CLOSE_CODES = {1000, 1001}
BytesLike = bytes, bytearray, memoryview
@dataclasses.dataclass
class Frame:
"""
WebSocket frame.
Attributes:
opcode: Opcode.
data: Payload data.
fin: FIN bit.
rsv1: RSV1 bit.
rsv2: RSV2 bit.
rsv3: RSV3 bit.
Only these fields are needed. The MASK bit, payload length and masking-key
are handled on the fly when parsing and serializing frames.
"""
opcode: Opcode
data: bytes
fin: bool = True
rsv1: bool = False
rsv2: bool = False
rsv3: bool = False
def __str__(self) -> str:
"""
Return a human-readable represention of a frame.
"""
coding = None
length = f"{len(self.data)} byte{'' if len(self.data) == 1 else 's'}"
non_final = "" if self.fin else "continued"
if self.opcode is OP_TEXT:
# Decoding only the beginning and the end is needlessly hard.
# Decode the entire payload then elide later if necessary.
data = repr(self.data.decode())
elif self.opcode is OP_BINARY:
# We'll show at most the first 16 bytes and the last 8 bytes.
# Encode just what we need, plus two dummy bytes to elide later.
binary = self.data
if len(binary) > 25:
binary = binary[:16] + b"\x00\x00" + binary[-8:]
data = " ".join(f"{byte:02x}" for byte in binary)
elif self.opcode is OP_CLOSE:
data = str(Close.parse(self.data))
elif self.data:
# We don't know if a Continuation frame contains text or binary.
# Ping and Pong frames could contain UTF-8. Attempt to decode as
# UTF-8 and display it as text; fallback to binary.
try:
data = repr(self.data.decode())
coding = "text"
except UnicodeDecodeError:
binary = self.data
if len(binary) > 25:
binary = binary[:16] + b"\x00\x00" + binary[-8:]
data = " ".join(f"{byte:02x}" for byte in binary)
coding = "binary"
else:
data = "''"
if len(data) > 75:
data = data[:48] + "..." + data[-24:]
metadata = ", ".join(filter(None, [coding, length, non_final]))
return f"{self.opcode.name} {data} [{metadata}]"
@classmethod
def parse(
cls,
read_exact: Callable[[int], Generator[None, None, bytes]],
*,
mask: bool,
max_size: Optional[int] = None,
extensions: Optional[Sequence[extensions.Extension]] = None,
) -> Generator[None, None, Frame]:
"""
Parse a WebSocket frame.
This is a generator-based coroutine.
Args:
read_exact: generator-based coroutine that reads the requested
bytes or raises an exception if there isn't enough data.
mask: whether the frame should be masked i.e. whether the read
happens on the server side.
max_size: maximum payload size in bytes.
extensions: list of extensions, applied in reverse order.
Raises:
PayloadTooBig: if the frame's payload size exceeds ``max_size``.
ProtocolError: if the frame contains incorrect values.
"""
# Read the header.
data = yield from read_exact(2)
head1, head2 = struct.unpack("!BB", data)
# While not Pythonic, this is marginally faster than calling bool().
fin = True if head1 & 0b10000000 else False
rsv1 = True if head1 & 0b01000000 else False
rsv2 = True if head1 & 0b00100000 else False
rsv3 = True if head1 & 0b00010000 else False
try:
opcode = Opcode(head1 & 0b00001111)
except ValueError as exc:
raise exceptions.ProtocolError("invalid opcode") from exc
if (True if head2 & 0b10000000 else False) != mask:
raise exceptions.ProtocolError("incorrect masking")
length = head2 & 0b01111111
if length == 126:
data = yield from read_exact(2)
(length,) = struct.unpack("!H", data)
elif length == 127:
data = yield from read_exact(8)
(length,) = struct.unpack("!Q", data)
if max_size is not None and length > max_size:
raise exceptions.PayloadTooBig(
f"over size limit ({length} > {max_size} bytes)"
)
if mask:
mask_bytes = yield from read_exact(4)
# Read the data.
data = yield from read_exact(length)
if mask:
data = apply_mask(data, mask_bytes)
frame = cls(opcode, data, fin, rsv1, rsv2, rsv3)
if extensions is None:
extensions = []
for extension in reversed(extensions):
frame = extension.decode(frame, max_size=max_size)
frame.check()
return frame
def serialize(
self,
*,
mask: bool,
extensions: Optional[Sequence[extensions.Extension]] = None,
) -> bytes:
"""
Serialize a WebSocket frame.
Args:
mask: whether the frame should be masked i.e. whether the write
happens on the client side.
extensions: list of extensions, applied in order.
Raises:
ProtocolError: if the frame contains incorrect values.
"""
self.check()
if extensions is None:
extensions = []
for extension in extensions:
self = extension.encode(self)
output = io.BytesIO()
# Prepare the header.
head1 = (
(0b10000000 if self.fin else 0)
| (0b01000000 if self.rsv1 else 0)
| (0b00100000 if self.rsv2 else 0)
| (0b00010000 if self.rsv3 else 0)
| self.opcode
)
head2 = 0b10000000 if mask else 0
length = len(self.data)
if length < 126:
output.write(struct.pack("!BB", head1, head2 | length))
elif length < 65536:
output.write(struct.pack("!BBH", head1, head2 | 126, length))
else:
output.write(struct.pack("!BBQ", head1, head2 | 127, length))
if mask:
mask_bytes = secrets.token_bytes(4)
output.write(mask_bytes)
# Prepare the data.
if mask:
data = apply_mask(self.data, mask_bytes)
else:
data = self.data
output.write(data)
return output.getvalue()
def check(self) -> None:
"""
Check that reserved bits and opcode have acceptable values.
Raises:
ProtocolError: if a reserved bit or the opcode is invalid.
"""
if self.rsv1 or self.rsv2 or self.rsv3:
raise exceptions.ProtocolError("reserved bits must be 0")
if self.opcode in CTRL_OPCODES:
if len(self.data) > 125:
raise exceptions.ProtocolError("control frame too long")
if not self.fin:
raise exceptions.ProtocolError("fragmented control frame")
def prepare_data(data: Data) -> Tuple[int, bytes]:
"""
Convert a string or byte-like object to an opcode and a bytes-like object.
This function is designed for data frames.
If ``data`` is a :class:`str`, return ``OP_TEXT`` and a :class:`bytes`
object encoding ``data`` in UTF-8.
If ``data`` is a bytes-like object, return ``OP_BINARY`` and a bytes-like
object.
Raises:
TypeError: if ``data`` doesn't have a supported type.
"""
if isinstance(data, str):
return OP_TEXT, data.encode("utf-8")
elif isinstance(data, BytesLike):
return OP_BINARY, data
else:
raise TypeError("data must be str or bytes-like")
def prepare_ctrl(data: Data) -> bytes:
"""
Convert a string or byte-like object to bytes.
This function is designed for ping and pong frames.
If ``data`` is a :class:`str`, return a :class:`bytes` object encoding
``data`` in UTF-8.
If ``data`` is a bytes-like object, return a :class:`bytes` object.
Raises:
TypeError: if ``data`` doesn't have a supported type.
"""
if isinstance(data, str):
return data.encode("utf-8")
elif isinstance(data, BytesLike):
return bytes(data)
else:
raise TypeError("data must be str or bytes-like")
@dataclasses.dataclass
class Close:
"""
Code and reason for WebSocket close frames.
Attributes:
code: Close code.
reason: Close reason.
"""
code: int
reason: str
def __str__(self) -> str:
"""
Return a human-readable represention of a close code and reason.
"""
if 3000 <= self.code < 4000:
explanation = "registered"
elif 4000 <= self.code < 5000:
explanation = "private use"
else:
explanation = CLOSE_CODES.get(self.code, "unknown")
result = f"{self.code} ({explanation})"
if self.reason:
result = f"{result} {self.reason}"
return result
@classmethod
def parse(cls, data: bytes) -> Close:
"""
Parse the payload of a close frame.
Args:
data: payload of the close frame.
Raises:
ProtocolError: if data is ill-formed.
UnicodeDecodeError: if the reason isn't valid UTF-8.
"""
if len(data) >= 2:
(code,) = struct.unpack("!H", data[:2])
reason = data[2:].decode("utf-8")
close = cls(code, reason)
close.check()
return close
elif len(data) == 0:
return cls(1005, "")
else:
raise exceptions.ProtocolError("close frame too short")
def serialize(self) -> bytes:
"""
Serialize the payload of | |
bid=_bid)
saliva_sample_data = self._get_usable_saliva_sample(pid=participant_matrix.pids[i],
bid=_bid)
# Determine which sample ID to use
sample_data = self._determine_best_sample(blood_sample_data, saliva_sample_data)
# update the sample id, collected site, and biobank order
if sample_data is not None:
participant_matrix.sample_ids[i] = sample_data[0]
participant_matrix.site_ids[i] = sample_data[1]
participant_matrix.order_ids[i] = sample_data[2]
else:
logging.info(f'No valid samples for pid {participant_matrix.pids[i]}.')
# insert new members and make the manifest
return self.process_samples_into_manifest(
participant_matrix,
cohort=cohort,
saliva=saliva,
local=local
)
def genomic_members_insert(self, *, members, session, set_id, bids):
"""
Bulk save of member for genomic_set_member as well as PDR
batch updating of members
:param: members
:param: session
:param: set_id
:param: bids
"""
try:
session.bulk_save_objects(members)
session.commit()
members = self.member_dao.get_members_from_set_id(set_id, bids=bids)
member_ids = [m.id for m in members]
bq_genomic_set_member_batch_update(member_ids, project_id=self.controller.bq_project_id)
genomic_set_member_batch_update(member_ids)
except Exception as e:
raise Exception("Error occurred on genomic member insert: {0}".format(e))
def _get_new_biobank_samples(self, from_date):
"""
Retrieves BiobankStoredSample objects with `rdr_created`
after the last run of the new participant workflow job.
The query filters out participants that do not match the
genomic validation requirements.
:param: from_date
:return: list of tuples (bid, pid, biobank_identifier.value, collected_site_id)
"""
_new_samples_sql = self.query.new_biobank_samples()
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"ai_param": Race.AMERICAN_INDIAN_OR_ALASKA_NATIVE.__int__(),
"from_date_param": from_date.strftime("%Y-%m-%d"),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"cohort_3_param": ParticipantCohort.COHORT_3.__int__(),
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.samples_dao.session() as session:
result = session.execute(_new_samples_sql, params).fetchall()
result = self._prioritize_samples_by_participant(result)
return list(zip(*result))[:-2] # Slicing to remove the last two columns retrieved for prioritization
def _prioritize_samples_by_participant(self, sample_results):
preferred_samples = {}
for sample in sample_results:
preferred_sample = sample
previously_found_sample = preferred_samples.get(sample.participant_id, None)
if previously_found_sample is not None:
preferred_sample = self._determine_best_sample(previously_found_sample, sample)
preferred_samples[sample.participant_id] = preferred_sample
return list(preferred_samples.values())
@staticmethod
def _determine_best_sample(sample_one, sample_two):
if sample_one is None:
return sample_two
if sample_two is None:
return sample_one
# Return the usable sample (status less than NOT_RECEIVED) if one is usable and the other isn't
if sample_one.status < int(SampleStatus.SAMPLE_NOT_RECEIVED) <= sample_two.status:
return sample_one
elif sample_two.status < int(SampleStatus.SAMPLE_NOT_RECEIVED) <= sample_two.status:
return sample_two
elif sample_one.status >= int(SampleStatus.SAMPLE_NOT_RECEIVED) \
and sample_two.status >= int(SampleStatus.SAMPLE_NOT_RECEIVED):
return None
# Both are usable
# Return the sample by the priority of the code: 1ED04, then 1ED10, and 1SAL2 last
test_codes_by_preference = ['1ED04', '1ED10', '1SAL2'] # most desirable first
samples_by_code = {}
for sample in [sample_one, sample_two]:
samples_by_code[sample.test] = sample
for test_code in test_codes_by_preference:
if samples_by_code.get(test_code):
return samples_by_code[test_code]
logging.error(f'Should have been able to select between '
f'{sample_one.biobank_stored_sample_id} and {sample_two.biobank_stored_sample_id}')
def _get_new_c2_participants(self, from_date):
"""
Retrieves C2 participants and validation data.
Broken out so that DNA samples' business logic is handled separately
:param from_date:
:return:
"""
_c2_participant_sql = self.query.new_c2_participants()
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"ai_param": Race.AMERICAN_INDIAN_OR_ALASKA_NATIVE.__int__(),
"from_date_param": from_date.strftime("%Y-%m-%d"),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"cohort_2_param": ParticipantCohort.COHORT_2.__int__(),
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.ps_dao.session() as session:
result = session.execute(_c2_participant_sql, params).fetchall()
return list([list(r) for r in zip(*result)])
def _get_remaining_c2_participants(self):
_c2_participant_sql = self.query.remaining_c2_participants()
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"ai_param": Race.AMERICAN_INDIAN_OR_ALASKA_NATIVE.__int__(),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"cohort_2_param": ParticipantCohort.COHORT_2.__int__(),
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.ps_dao.session() as session:
result = session.execute(_c2_participant_sql, params).fetchall()
return list([list(r) for r in zip(*result)])
def _get_new_c1_participants(self, from_date):
"""
Retrieves C1 participants and validation data.
:param from_date:
:return:
"""
_c1_participant_sql = self.query.new_c1_participants()
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"ai_param": Race.AMERICAN_INDIAN_OR_ALASKA_NATIVE.__int__(),
"from_date_param": from_date.strftime("%Y-%m-%d"),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"cohort_1_param": ParticipantCohort.COHORT_1.__int__(),
"c1_reconsent_param": COHORT_1_REVIEW_CONSENT_YES_CODE,
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.ps_dao.session() as session:
result = session.execute(_c1_participant_sql, params).fetchall()
return list([list(r) for r in zip(*result)])
def _get_long_read_participants(self, limit=None):
"""
Retrieves participants based on filters that have
been denoted to use in the long read pilot program
"""
with self.member_dao.session() as session:
gsm_alias = aliased(GenomicSetMember)
result = session.query(GenomicSetMember).join(
ParticipantSummary,
GenomicSetMember.participantId == ParticipantSummary.participantId,
).join(
ParticipantRaceAnswers,
ParticipantRaceAnswers.participantId == ParticipantSummary.participantId,
).join(
Code,
ParticipantRaceAnswers.codeId == Code.codeId,
).join(
GenomicGCValidationMetrics,
GenomicSetMember.id == GenomicGCValidationMetrics.genomicSetMemberId,
).outerjoin(
gsm_alias,
sqlalchemy.and_(
gsm_alias.participantId == ParticipantSummary.participantId,
gsm_alias.genomeType == 'long_read'
)
).filter(
Code.value == 'WhatRaceEthnicity_Black',
GenomicSetMember.genomeType.in_(['aou_wgs']),
GenomicSetMember.genomicWorkflowState != GenomicWorkflowState.IGNORE,
GenomicGCValidationMetrics.ignoreFlag == 0,
GenomicGCValidationMetrics.contamination <= 0.01,
ParticipantSummary.participantOrigin == 'vibrent',
ParticipantSummary.ehrUpdateTime.isnot(None),
gsm_alias.id.is_(None),
).distinct(gsm_alias.biobankId)
if limit:
result = result.limit(limit)
return result.all()
def _get_usable_blood_sample(self, pid, bid):
"""
Select 1ED04 or 1ED10 based on max collected date
:param pid: participant_id
:param bid: biobank_id
:return: tuple(blood_collected date, blood sample, blood site, blood order)
"""
_samples_sql = self.query.usable_blood_sample()
params = {
"pid_param": pid,
"bid_param": bid,
}
with self.samples_dao.session() as session:
result = session.execute(_samples_sql, params).first()
return result
def _get_usable_saliva_sample(self, pid, bid):
"""
Select 1SAL2 based on max collected date
:param pid: participant_id
:param bid: biobank_id
:return: tuple(saliva date, saliva sample, saliva site, saliva order)
"""
_samples_sql = self.query.usable_saliva_sample()
params = {
"pid_param": pid,
"bid_param": bid,
}
with self.samples_dao.session() as session:
result = session.execute(_samples_sql, params).first()
return result
def _get_remaining_saliva_participants(self, config):
_saliva_sql = self.query.remaining_saliva_participants(config)
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"ai_param": Race.AMERICAN_INDIAN_OR_ALASKA_NATIVE.__int__(),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.samples_dao.session() as session:
result = session.execute(_saliva_sql, params).fetchall()
return list([list(r) for r in zip(*result)])
def _create_new_genomic_set(self):
"""Inserts a new genomic set for this run"""
attributes = {
'genomicSetName': f'new_participant_workflow_{self.run_id}',
'genomicSetCriteria': '.',
'genomicSetVersion': 1,
'genomicSetStatus': GenomicSetStatus.VALID,
}
new_set_obj = GenomicSet(**attributes)
inserted_set = self.set_dao.insert(new_set_obj)
# Insert new set for PDR
bq_genomic_set_update(inserted_set.id, project_id=self.controller.bq_project_id)
genomic_set_update(inserted_set.id)
return inserted_set
def _create_new_set_member(self, **kwargs):
"""Inserts new GenomicSetMember object"""
new_member_obj = GenomicSetMember(**kwargs)
return self.member_dao.insert(new_member_obj)
def _get_new_york_flag_from_site(self, collected_site_id):
"""
Looks up whether a collected site's state is NY
:param collected_site_id: the id of the site
:return: int (1 or 0 for NY or Not)
"""
return int(self.site_dao.get(collected_site_id).state == 'NY')
def _get_new_york_flag_from_state_id(self, state_id):
"""
Looks up whether a collected site's state is NY
:param state_id: the code ID for the state
:return: int (1 or 0 for NY or Not)
"""
return int(self.code_dao.get(state_id).value.split('_')[1] == 'NY')
def _calculate_validation_flags(self, validation_criteria):
"""
Determines validation and flags for genomic sample
:param validation_criteria:
:return: list of validation flags
"""
# Process validation flags for inserting into genomic_set_member
flags = [flag for (passing, flag) in
zip(validation_criteria, self._VALIDATION_FLAGS)
if not passing]
return flags
class ManifestDefinitionProvider:
"""
Helper class to produce the definitions for each manifest
"""
# Metadata for the various manifests
ManifestDef = namedtuple('ManifestDef', ["job_run_field",
"source_data",
"destination_bucket",
"output_filename",
"columns",
"signal"])
def __init__(
self,
job_run_id=None,
bucket_name=None,
**kwargs
):
# Attributes
self.job_run_id = job_run_id
self.bucket_name = bucket_name
self.kwargs = kwargs
self.query = GenomicQueryClass(
input_manifest=self.kwargs['kwargs'].get('input_manifest')
)
self.manifest_columns_config = {
GenomicManifestTypes.CVL_W1: (
"genomic_set_name",
"biobank_id",
"sample_id",
"sex_at_birth",
"ny_flag",
"site_id",
"secondary_validation",
"date_submitted",
"test_name",
),
GenomicManifestTypes.AW3_ARRAY: (
"chipwellbarcode",
"biobank_id",
"sample_id",
"biobankidsampleid",
"sex_at_birth",
"site_id",
"red_idat_path",
"red_idat_md5_path",
"green_idat_path",
"green_idat_md5_path",
"vcf_path",
"vcf_index_path",
"vcf_md5_path",
"callrate",
"sex_concordance",
"contamination",
"processing_status",
"research_id",
"sample_source",
"pipeline_id"
),
GenomicManifestTypes.GEM_A1: (
'biobank_id',
'sample_id',
"sex_at_birth",
"consent_for_ror",
"date_of_consent_for_ror",
"chipwellbarcode",
"genome_center",
),
GenomicManifestTypes.GEM_A3: (
'biobank_id',
'sample_id',
'date_of_consent_removal',
),
GenomicManifestTypes.CVL_W3: (
"value",
"sample_id",
"biobank_id",
"collection_tubeid",
"sex_at_birth",
"genome_type",
"ny_flag",
"request_id",
"package_id",
"ai_an",
"site_id",
),
GenomicManifestTypes.AW3_WGS: (
"biobank_id",
"sample_id",
"biobankidsampleid",
"sex_at_birth",
"site_id",
"vcf_hf_path",
"vcf_hf_index_path",
"vcf_hf_md5_path",
"cram_path",
"cram_md5_path",
"crai_path",
"gvcf_path",
"gvcf_md5_path",
"contamination",
"sex_concordance",
"processing_status",
"mean_coverage",
"research_id",
"sample_source",
"mapped_reads_pct",
"sex_ploidy"
),
GenomicManifestTypes.AW2F: (
"PACKAGE_ID",
"BIOBANKID_SAMPLEID",
"BOX_STORAGEUNIT_ID",
"BOX_ID/PLATE_ID",
"WELL_POSITION",
"SAMPLE_ID",
"PARENT_SAMPLE_ID",
"COLLECTION_TUBE_ID",
"MATRIX_ID",
"COLLECTION_DATE",
"BIOBANK_ID",
"SEX_AT_BIRTH",
"AGE",
"NY_STATE_(Y/N)",
"SAMPLE_TYPE",
"TREATMENTS",
"QUANTITY_(uL)",
"TOTAL_CONCENTRATION_(ng/uL)",
"TOTAL_DNA(ng)",
"VISIT_DESCRIPTION",
"SAMPLE_SOURCE",
"STUDY",
"TRACKING_NUMBER",
"CONTACT",
"EMAIL",
"STUDY_PI",
"TEST_NAME",
"FAILURE_MODE",
"FAILURE_MODE_DESC",
"PROCESSING_STATUS",
"CONTAMINATION",
"CONTAMINATION_CATEGORY",
"CONSENT_FOR_ROR",
),
}
def _get_source_data_query(self, manifest_type):
"""
Returns the query to use for manifest's source data
:param manifest_type:
:return: query object
"""
try:
return self.query.genomic_data_config[manifest_type]
except KeyError:
logging.warning(f"Manifest type {manifest_type} does not resolve query")
def get_def(self, manifest_type):
"""
Returns the manifest definition based on manifest_type
:param manifest_type:
:return: ManifestDef()
"""
now_formatted = clock.CLOCK.now().strftime("%Y-%m-%d-%H-%M-%S")
def_config = {
GenomicManifestTypes.CVL_W1: {
'job_run_field': 'cvlW1ManifestJobRunId',
'output_filename': f'{CVL_W1_MANIFEST_SUBFOLDER}/AoU_CVL_Manifest_{now_formatted}.csv',
'signal': 'manifest-generated'
},
GenomicManifestTypes.GEM_A1: {
'job_run_field': 'gemA1ManifestJobRunId',
'output_filename': f'{GENOMIC_GEM_A1_MANIFEST_SUBFOLDER}/AoU_GEM_A1_manifest_{now_formatted}.csv',
'signal': 'manifest-generated'
},
GenomicManifestTypes.GEM_A3: {
'job_run_field': 'gemA3ManifestJobRunId',
'output_filename': f'{GENOMIC_GEM_A3_MANIFEST_SUBFOLDER}/AoU_GEM_A3_manifest_{now_formatted}.csv',
'signal': 'manifest-generated'
},
GenomicManifestTypes.CVL_W3: {
'job_run_field': 'cvlW3ManifestJobRunID',
'output_filename': f'{CVL_W3_MANIFEST_SUBFOLDER}/AoU_CVL_W1_{now_formatted}.csv',
'signal': 'manifest-generated'
},
GenomicManifestTypes.AW3_ARRAY: {
'job_run_field': 'aw3ManifestJobRunID',
'output_filename': f'{GENOMIC_AW3_ARRAY_SUBFOLDER}/AoU_DRCV_GEN_{now_formatted}.csv',
'signal': 'bypass'
},
GenomicManifestTypes.AW3_WGS: {
'job_run_field': 'aw3ManifestJobRunID',
'output_filename': f'{GENOMIC_AW3_WGS_SUBFOLDER}/AoU_DRCV_SEQ_{now_formatted}.csv',
'signal': 'bypass'
},
GenomicManifestTypes.AW2F: {
'job_run_field': 'aw2fManifestJobRunID',
'output_filename': f'{BIOBANK_AW2F_SUBFOLDER}/GC_AoU_DataType_PKG-YYMM-xxxxxx_contamination.csv',
'signal': 'bypass'
}
}
return self.ManifestDef(
job_run_field=def_config[manifest_type]['job_run_field'],
source_data=self._get_source_data_query(manifest_type),
destination_bucket=f'{self.bucket_name}',
output_filename=def_config[manifest_type]['output_filename'],
columns=self.manifest_columns_config[manifest_type],
signal=def_config[manifest_type]['signal'],
)
class ManifestCompiler:
"""
This component compiles Genomic manifests
based on definitions provided by ManifestDefinitionProvider
"""
def __init__(
self,
run_id,
bucket_name=None,
max_num=None,
controller=None
):
self.run_id = run_id
self.bucket_name = bucket_name
self.max_num = max_num
self.controller = controller
self.output_file_name = None
self.manifest_def = None
self.def_provider = None
# Dao components
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
def generate_and_transfer_manifest(self, manifest_type, genome_type, version=None, **kwargs):
"""
Main execution method for ManifestCompiler
:return: result | |
# -*- coding: utf-8 -*-
"""
modify and save the xml file for icc profile.
============================================
"""
# import standard libraries
import os
import xml.etree.ElementTree as ET
from datetime import datetime
# import third-party libraries
import numpy as np
# import my libraries
import icc_profile_calc_param as ipcp
import color_space as cs
# information
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2020 - <NAME>'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = '<NAME>'
__email__ = 'toru.ver.11 at-sign gmail.com'
__all__ = []
RENDERING_INTENT_ABSOLUTE = "Absolute Colorimetric"
RENDERING_INTENT_RELATIVE = "Relative Colorimetric"
RENDERING_INTENT_PERCEPTUAL = "Perceptual"
RENDERING_INTENT_SATURATION = "Saturation"
def get_value_from_specific_header_tag(root, header_name):
"""
Example
-------
>>> tree = ET.parse("./aces_ap0_gm30.xml")
>>> root = tree.getroot()
>>> get_value_from_specific_header_tag(root, "RenderingIntent")
"Perceptual"
"""
header_value = None
for element in root.iter(header_name):
header_value = element.text
return header_value
def set_value_to_specific_header_tag(root, header_name, value):
"""
Example
-------
>>> tree = ET.parse("./aces_ap0_gm30.xml")
>>> root = tree.getroot()
>>> set_value_to_specific_header_tag(
... root, "RenderingIntent", "Absolute Colorimetric")
>>> get_value_from_specific_header_tag(root, "RenderingIntent")
"Absolute Colorimetric"
"""
for element in root.iter(header_name):
element.text = value
def create_current_date_str():
"""
create "YYYY-MM-DDTHH:MM:SS" format str.
"""
return datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
def get_icc_tag_element(
root, parent_tag='s15Fixed16ArrayType',
key_tag="TagSignature", key_text='chad',
target_tag='Array'):
icc_tag_element = None
for parent_element in root.iter(parent_tag):
if parent_element.find(key_tag).text == key_text:
icc_tag_element = parent_element.find(target_tag)
return icc_tag_element
def get_icc_header_element(root, tag="ProfileVersion"):
header_element = None
for element in root.iter(tag):
header_element = element
break
return header_element
def get_chad_mtx_element(root):
return get_icc_tag_element(
root, parent_tag="s15Fixed16ArrayType",
key_tag="TagSignature", key_text="chad", target_tag="Array")
def get_desc_element(root):
return get_icc_tag_element(
root, parent_tag="multiLocalizedUnicodeType",
key_tag="TagSignature", key_text="desc", target_tag="LocalizedText")
def get_cprt_element(root):
return get_icc_tag_element(
root, parent_tag="multiLocalizedUnicodeType",
key_tag="TagSignature", key_text="cprt", target_tag="LocalizedText")
def get_wtpt_element(root):
return get_icc_tag_element(
root, parent_tag="XYZType",
key_tag="TagSignature", key_text="wtpt", target_tag="XYZNumber")
def get_lumi_element(root):
return get_icc_tag_element(
root, parent_tag="XYZType",
key_tag="TagSignature", key_text="lumi", target_tag="XYZNumber")
def get_parametric_curve_element(root):
parametric_curve_element = None
parent_tag = 'parametricCurveType'
key_tag = 'ParametricCurve'
for parent_element in root.iter(parent_tag):
if parent_element.find(key_tag) is not None:
parametric_curve_element = parent_element.find(key_tag)
return parametric_curve_element
def get_rgbXYZ_element_list(root):
rgbXYZ_element_list = [None, None, None]
parent_tag = 'XYZType'
key_tag = 'TagSignature'
target_tag = 'XYZNumber'
key_text_list = ['<KEY> <KEY>']
for idx, key_text in enumerate(key_text_list):
for parent_element in root.iter(parent_tag):
if parent_element.find(key_tag).text == key_text:
rgbXYZ_element_list[idx] = parent_element.find(target_tag)
return rgbXYZ_element_list
def get_chad_matrix_from_chad_mtx_element(chad_mtx_element):
"""
Parameters
----------
chad_mtx_element : xml.etree.ElementTree.Element
An instance of the Element class. It indicate the chad tag.
Returns
-------
ndarray
chromatic adaptation matrix.
Examples
--------
>>> tree = ET.parse("./aces_ap0_gm30.xml")
>>> root = tree.getroot()
>>> chad_mtx_element = get_chad_mtx_element(root)
>>> get_chad_matrix_from_chad_mtx_element(chad_mtx_element)
[[ 1.04788208 0.02958679 -0.00924683]
[ 0.0229187 0.99047852 0.01507568]
[-0.05021667 -0.01707458 0.75167847]]
"""
text_data = chad_mtx_element.text
text_mtx_list = text_data.strip().replace("\n", "").split()
mtx = np.array([float(x) for x in text_mtx_list]).reshape((3, 3))
return mtx
def set_chad_matrix_to_chad_mtx_element(mtx, chad_mtx_element):
"""
Parameters
----------
mtx : ndarray
chromatic adaptation matrix
chad_mtx_element : xml.etree.ElementTree.Element
An instance of the Element class. It indicate the chad tag.
Returns
-------
None
Examples
--------
>>> tree = ET.parse("./aces_ap0_gm30.xml")
>>> root = tree.getroot()
>>> chad_mtx_element = get_chad_mtx_element(root)
>>> print(chad_mtx_element.text)
1.04788208 0.02291870 -0.05021667
0.02958679 0.99047852 -0.01707458
-0.00924683 0.01507568 0.75167847
>>> mtx = np.array([
... [0.1, 0.2, 0.3],
... [0.4, 0.5, 0.6],
... [0.7, 0.8, 0.9]])
>>> set_chad_matrix_to_chad_mtx_element(mtx, chad_mtx_element)
>>> print(chad_mtx_element.text)
0.10000000 0.20000000 0.30000000
0.40000000 0.50000000 0.60000000
0.70000000 0.80000000 0.90000000
"""
head_space = " " * 12
foot_space = " " * 6
buf = "\n"
buf += f"{head_space}{mtx[0][0]:.8f} {mtx[0][1]:.8f} {mtx[0][2]:.8f}\n"
buf += f"{head_space}{mtx[1][0]:.8f} {mtx[1][1]:.8f} {mtx[1][2]:.8f}\n"
buf += f"{head_space}{mtx[2][0]:.8f} {mtx[2][1]:.8f} {mtx[2][2]:.8f}\n"
buf += foot_space
chad_mtx_element.text = buf
def get_parametric_curve_params_from_element(parameteric_curve_element):
"""
Parameters
----------
parameteric_curve_element : xml.etree.ElementTree.Element
An instance of the Element class. It indicate the ParametricCurve tag.
Returns
-------
function_type_str : str
A string of function type.
params : ndarray
A one-dimensional parameter array.
Examples
--------
>>> tree = ET.parse("./p3-2.xml")
>>> root = tree.getroot()
>>> parameteric_curve_element = get_parametric_curve_element(root)
>>> function_type, params = get_parametric_curve_params_from_element(
... parameteric_curve_element)
>>> print(function_type)
3
>>> print(params)
[ 2.39999390 0.94786072 0.05213928 0.07739258 0.04045105 ]
"""
function_type_str = parameteric_curve_element.attrib['FunctionType']
text_data = parameteric_curve_element.text
param_str_list = text_data.strip().replace("\n", "").split()
param_ndarray = np.array([float(x) for x in param_str_list])
return function_type_str, param_ndarray
def set_parametric_curve_params_to_element(
function_type_str, params, parameteric_curve_element):
"""
Parameters
----------
function_type_str : str
A function type.
'0' : simple power function
'3' : sRGB like function
params : ndarray
An array of the function parameters.
parameteric_curve_element : xml.etree.ElementTree.Element
An instance of the Element class. It indicate the ParametricCurve tag.
Returns
-------
None
Examples
--------
>>> tree = ET.parse("./p3-2.xml")
>>> root = tree.getroot()
>>> parameteric_curve_element = get_parametric_curve_element(root)
>>> function_type = "2"
>>> params = np.array([2.4, 0.1, 0.2, 0.3])
>>> set_parametric_curve_params_to_element(
... function_type, params, parameteric_curve_element)
>>> print(parameteric_curve_element.attrib)
{'FunctionType': '2'}
>>> print(parameteric_curve_element.text)
2.39999390 0.10000000 0.20000000 0.30000000
"""
parameteric_curve_element.attrib['FunctionType'] = function_type_str
head_space = '\n' + " " * 8
foot_space = " " * 6
buf = head_space
param_str_array = [f"{x:.8f}" for x in params]
buf += " ".join(param_str_array) + '\n' + foot_space
parameteric_curve_element.text = buf
def get_rgbXYZ_params_from_element(rgbXYZ_element_list):
"""
Parameters
----------
rgbXYZ_element_list : list
list of xml.etree.ElementTree.Element.
Returns
-------
ndarray
A 3x3 matrix that convert from src color space to the PCS.
Examples
--------
>>> tree = ET.parse("./p3-2.xml")
>>> root = tree.getroot()
>>> rgb_XYZ_element_list = get_rgbXYZ_element_list(root)
>>> get_rgbXYZ_params_from_element(rgbXYZ_element_list)
[[ 0.51512146 0.29197693 0.15710449]
[ 0.24119568 0.69224548 0.0665741 ]
[-0.00105286 0.04188538 0.78407288]]
"""
src2pcs_mtx = np.zeros((3, 3))
for idx, rgb_XYZ_element in enumerate(rgbXYZ_element_list):
src2pcs_mtx[0][idx] = float(rgb_XYZ_element.attrib['X'])
src2pcs_mtx[1][idx] = float(rgb_XYZ_element.attrib['Y'])
src2pcs_mtx[2][idx] = float(rgb_XYZ_element.attrib['Z'])
return np.array(src2pcs_mtx)
def set_rgbXYZ_params_to_element(src2pcs_mtx, rgb_XYZ_element_list):
"""
Parameters
----------
src2pcs_mtx : ndarray
A 3x3 matrix that convert from src color space to the PCS.
rgbXYZ_element_list : list
list of xml.etree.ElementTree.Element.
Returns
-------
ndarray
A 3x3 matrix that convert from src color space to the PCS.
Examples
--------
>>> tree = ET.parse("./p3-2.xml")
>>> root = tree.getroot()
>>> rgb_XYZ_element_list = get_rgbXYZ_element_list(root)
>>> src2pcs_mtx = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
>>> set_rgbXYZ_params_to_element(src2pcs_mtx, rgb_XYZ_element_list)
>>> for rgb_XYZ_element in rgb_XYZ_element_list:
... print(rgb_XYZ_element.attrib)
{'X': '0.00000000', 'Y': '3.00000000', 'Z': '6.00000000'}
{'X': '1.00000000', 'Y': '4.00000000', 'Z': '7.00000000'}
{'X': '2.00000000', 'Y': '5.00000000', 'Z': '8.00000000'}
"""
for idx, rgb_XYZ_element in enumerate(rgb_XYZ_element_list):
rgb_XYZ_element.attrib['X'] = f"{src2pcs_mtx[0][idx]:.08f}"
rgb_XYZ_element.attrib['Y'] = f"{src2pcs_mtx[1][idx]:.08f}"
rgb_XYZ_element.attrib['Z'] = f"{src2pcs_mtx[2][idx]:.08f}"
def get_lumi_params_from_element(lumi_element):
"""
Parameters
----------
lumi_element : xml.etree.ElementTree.Element
elemnent of the lumi tag.
Returns
-------
float
luminance of the reference white.
Examples
--------
>>> tree = ET.parse("./p3-2.xml")
>>> root = tree.getroot()
>>> lumi_element = get_lumi_element(root)
>>> get_lumi_params_from_element(lumi_element)
100.0
"""
return float(lumi_element.attrib['Y'])
def set_lumi_params_to_element(luminance, lumi_element):
"""
Parameters
----------
luminance : float
luminance of the reference white.
lumi_element : xml.etree.ElementTree.Element
elemnent of the lumi tag.
Returns
-------
None
Examples
--------
>>> tree = ET.parse("./p3-2.xml")
>>> root = tree.getroot()
>>> lumi_element = get_lumi_element(root)
>>> set_lumi_params_to_element(333.3, lumi_element)
>>> get_lumi_params_from_element(lumi_element)
333.3
"""
lumi_element.attrib['Y'] = f"{luminance:.8f}"
def get_wtpt_params_from_element(wtpt_element):
"""
Parameters
----------
wtpt_element : xml.etree.ElementTree.Element
elemnent of the wtpt tag.
Returns
-------
ndarray
white point in large XYZ format.
Examples
--------
>>> tree = ET.parse("./p3-2.xml")
>>> root = tree.getroot()
>>> wtpt_element = get_wtpt_element(root)
>>> get_wtpt_params_from_element(wtpt_element)
[ 0.96420288 1. 0.8249054 ]
"""
large_xyz = np.array(
[float(wtpt_element.attrib['X']),
float(wtpt_element.attrib['Y']),
float(wtpt_element.attrib['Z'])])
return large_xyz
def set_wtpt_params_to_element(wtpt, wtpt_element):
"""
Parameters
----------
wtpt : ndarray
white point in large XYZ format.
wtpt_element : xml.etree.ElementTree.Element
elemnent of the wtpt tag.
Returns
-------
None
Examples
--------
>>> tree = ET.parse("./p3-2.xml")
>>> root = tree.getroot()
>>> wtpt_element = get_wtpt_element(root)
>>> wtpt = np.array([0.9, 1.0, 0.8])
>>> set_wtpt_params_to_element(wtpt, wtpt_element)
>>> get_wtpt_params_from_element(wtpt_element)
[ 0.90000000 1.0000000 0.80000000 ]
"""
wtpt_element.attrib['X'] = f"{wtpt[0]:.8f}"
wtpt_element.attrib['Y'] = f"{wtpt[1]:.8f}"
wtpt_element.attrib['Z'] = f"{wtpt[2]:.8f}"
def xml_parse_test():
tree = ET.parse("./icc_profile_sample/p3-2.xml")
root = tree.getroot()
chad_mtx_element = get_chad_mtx_element(root)
print(chad_mtx_element.text)
get_chad_matrix_from_chad_mtx_element(chad_mtx_element)
set_chad_matrix_to_chad_mtx_element(
mtx=np.ones((3, 3)), chad_mtx_element=chad_mtx_element)
print(chad_mtx_element.text)
desc_element = get_desc_element(root)
print(desc_element.text)
cprt_element = get_cprt_element(root)
print(cprt_element.text)
profile_ver_element = get_icc_header_element(root, tag="ProfileVersion")
print(profile_ver_element.text)
color_space_element = get_icc_header_element(root, tag="DataColourSpace")
print(color_space_element.text)
parameteric_curve_element = get_parametric_curve_element(root)
print(parameteric_curve_element.text)
function_type, params = get_parametric_curve_params_from_element(
parameteric_curve_element)
print(function_type)
print(params)
function_type = "2"
params = np.array([2.4, 0.1, 0.2, 0.3])
set_parametric_curve_params_to_element(
function_type, params, parameteric_curve_element)
print(parameteric_curve_element.attrib)
print(parameteric_curve_element.text)
rgb_XYZ_element_list = get_rgbXYZ_element_list(root)
for rgb_XYZ_element in rgb_XYZ_element_list:
print(rgb_XYZ_element.attrib)
src2pcs_mtx = get_rgbXYZ_params_from_element(rgb_XYZ_element_list)
print(src2pcs_mtx)
src2pcs_mtx = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
set_rgbXYZ_params_to_element(src2pcs_mtx, rgb_XYZ_element_list)
for rgb_XYZ_element in rgb_XYZ_element_list:
print(rgb_XYZ_element.attrib)
ri = get_value_from_specific_header_tag(root, "RenderingIntent")
print(ri)
set_value_to_specific_header_tag(
root, "RenderingIntent", RENDERING_INTENT_PERCEPTUAL)
current_time_str = create_current_date_str()
print(current_time_str)
wtpt_element = get_wtpt_element(root)
print(wtpt_element.attrib)
lumi_element = get_lumi_element(root)
print(get_lumi_params_from_element(lumi_element))
set_lumi_params_to_element(1000.0, lumi_element)
wtpt_element = get_wtpt_element(root)
print(wtpt_element.attrib)
print(get_wtpt_params_from_element(wtpt_element))
wtpt = np.array([0.123, 0.46, 0.88])
set_wtpt_params_to_element(wtpt, wtpt_element)
tree.write("test_out.xml")
def create_profle_header(root):
"""
create the profile header.
"""
set_value_to_specific_header_tag(
root, "PreferredCMMType", "ADBE")
set_value_to_specific_header_tag(
root, "ProfileVersion", "4.30")
set_value_to_specific_header_tag(
root, "CreationDateTime", create_current_date_str())
set_value_to_specific_header_tag(
root, "PrimaryPlatform", "MSFT")
set_value_to_specific_header_tag(
root, "DeviceManufacturer", "")
set_value_to_specific_header_tag(
root, "DeviceModel", "")
set_value_to_specific_header_tag(
root, "ProfileCreator", "")
set_value_to_specific_header_tag(
root, "ProfileID", "")
def create_simple_power_gamma_profile(
gamma=2.4, src_white=np.array([0.3127, 0.3290]),
src_primaries=np.array([[0.680, 0.320], [0.265, 0.690], [0.15, 0.06]]),
desc_str="Gamam=2.4_DCI-P3_D65",
cprt_str="Copyright 2020 HOGEHOGE Corp.",
output_name="Gamam=2.4_DCI-P3_D65.xml"):
"""
create simple profile.
gamma function must be "y = x ** gamma" format.
"""
tree = ET.parse("./icc_profile_sample/base_profile_v4.xml")
root = tree.getroot()
| |
lista de asistencias de mes son {0}".format(asistencias_curso_mes))
matriculados_aula = AulaMatricula.objects.filter(aula=aula).order_by('matricula__alumno__apellido_pa')
alumnos = []
for matriculado in matriculados_aula:
alumnos.append(matriculado.matricula.alumno)
num_alumnos = len(alumnos)
logger.info("El número de alumnos matriculados en esta aula es {0}".format(num_alumnos))
fechas = []
lista_asistencias_dia = []
for dia in range(0, num_dias):
asistencias_curso_dia = asistencias_curso_mes.filter(fecha__day=dia+1)
logger.info("La lista de asistencias del día {0} mes son {1}".format(dia+1, asistencias_curso_dia))
n = 0
for asistencias_dias in asistencias_curso_dia:
lista_asistencias_dia.append(asistencias_dias.estado_asistencia)
if n == 0:
fechas.append(asistencias_dias.fecha)
n = n + 1
num_horizontal = len(fechas)
num_vertical = len(alumnos)
aula_selected = Aula.objects.get(id_aula=aula)
logger.info("La lista de asistencias de mes son {0}".format(lista_asistencias_dia))
logger.info("La lista de fechas de mes son {0}".format(fechas))
contexto = self.VisualizarAsistenciaform(request)
contexto['asistencias'] = asistencias_curso_mes
contexto['num_hor'] = num_horizontal
contexto['num_ver'] = num_vertical
contexto['fechas'] = fechas
contexto['alumnos'] = alumnos
contexto['mes_selected'] = mes
contexto['aula_selected'] = aula_selected
return render(request, template_name=self.template_name, context=contexto)
#################################################
##### NOTAS ALUMNOS #####
#################################################
#################################################
##### EVENTOS #####
#################################################
class EventoCreateView(CreateView):
model = Evento
form_class = EventoForm
template_name = "evento_form.html"
success_url = reverse_lazy("academic:evento_list")
def form_valid(self, form):
form.instance.colegio = Colegio.objects.get(pk=get_current_colegio())
return super(EventoCreateView, self).form_valid(form)
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
personalcolegio = PersonalColegio.objects.filter(colegio_id=get_current_colegio(), activo=True)
personal = []
for personalcol in personalcolegio:
personal.append(personalcol.personal)
return render(request, template_name=self.template_name, context={
'form': self.form_class,
'empleados': personal,
})
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
class EventoListView(ListView):
model = Evento
form_class = EventoForm
template_name = "evento_list.html"
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
evento = Evento.objects.filter(colegio_id=get_current_colegio())
return render(request, template_name=self.template_name, context={
'eventos':evento,
})
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
class EventoDetailView(DetailView):
model = Evento
form_class = EventoForm
template_name = "evento_detail.html"
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
evento = Evento.objects.get(id_evento=request.GET['evento'])
return render(request, template_name=self.template_name, context={
'evento':evento,
})
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
#################################################
##### CRUD DE MATRICULA AULA #####
#################################################
class AulaMatriculaCreateView(TemplateView):
model = AulaMatricula
success_url = reverse_lazy('academic:aula_list')
template_name = 'aulamatricula_form.html'
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
no_matriculados_aulas = []
aula_actual = Aula.objects.get(id_aula=request.GET['aula'])
matriculas = Matricula.objects.filter(tipo_servicio=aula_actual.tipo_servicio,colegio_id=get_current_colegio(), activo=True)
for matricula in matriculas:
alumno_aula = AulaMatricula.objects.filter(matricula=matricula, activo = True)
if alumno_aula:
logger.info("El alumno {0} ya tiene aula".format(matricula.alumno))
else:
no_matriculados_aulas.append(matricula)
return render(request, template_name=self.template_name, context={
'aula': aula_actual,
'matriculas': no_matriculados_aulas,
})
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
def post(self, request, *args, **kwargs):
logger.info(request.POST)
aula_actual = Aula.objects.get(id_aula=request.POST['aula'])
matriculas = Matricula.objects.filter(tipo_servicio=aula_actual.tipo_servicio, colegio_id=get_current_colegio(),
activo=True)
data_form = request.POST
for matricula in matriculas:
try:
text = "item{0}".format(matricula.id_matricula)
if data_form[text]:
aulamatricula = self.model(
aula=aula_actual,
matricula=matricula,
)
aulamatricula.save()
logger.info("se creo un registro")
except:
logger.info("hay un error")
return HttpResponseRedirect(reverse('academic:aula_list'))
class AulaMatriculaDeleteView(TemplateView):
model = AulaMatricula
template_name = "aulamatricula_form.html"
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
print(request.GET['alumno'])
matricula = Matricula.objects.get(id_matricula=int(request.GET['alumno']))
alumno = AulaMatricula.objects.get(matricula=matricula)
alumno.activo = False
alumno.save()
return HttpResponseRedirect(reverse('academic:aula_list'))
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
#################################################
##### CRUD DE PERIODO ACADEMICO #####
#################################################
class PeriodoAcademicoListView(MyLoginRequiredMixin, ListView):
model = PeriodoAcademico
template_name = 'periodo_list.html'
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
return super(PeriodoAcademicoListView, self).get(request, *args, **kwargs)
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
def get_context_data(self, **kwargs):
context = super(PeriodoAcademicoListView, self).get_context_data(**kwargs)
request = get_current_request()
if request.session.get('colegio'):
id = request.session.get('colegio')
context['idcolegio'] = id
return context
class PeriodoAcademicoDetailView(UpdateView):
model = PeriodoAcademico
form_class = PeriodoAcademicoForm
template_name = 'periodo_detail.html'
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
return super(PeriodoAcademicoDetailView, self).get(request, *args, **kwargs)
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
class PeriodoAcademicoCreationView(CreateView):
model = PeriodoAcademico
form_class = PeriodoAcademicoForm
success_url = reverse_lazy('academic:periodo_list')
template_name = 'periodo_form.html'
def form_valid(self, form):
form.instance.colegio = Colegio.objects.get(pk=get_current_colegio())
return super(PeriodoAcademicoCreationView, self).form_valid(form)
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
return super(PeriodoAcademicoCreationView, self).get(request, *args, **kwargs)
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
class PeriodoAcademicoUpdateView(UpdateView):
model = PeriodoAcademico
form_class = PeriodoAcademicoForm
success_url = reverse_lazy('academic:periodo_list')
template_name = 'periodo_form.html'
#####################################################
##### NOTAS DE ALUMNOS #####
#####################################################
class RegistrarNotasAlumnosView(TemplateView):
model = Notas
template_name = 'notas_registrar.html'
success_url = reverse_lazy('academic:asistencia_registrar_dia')
form2 = RegistrarNotas2Form
def RegistrarNotas1Form(self, request):
roles = ['promotor', 'director']
if validar_roles(roles=roles):
# Cargamos las aulas relacionadas al colegio
id_colegio = get_current_colegio()
periodos_colegio = PeriodoAcademico.objects.filter(colegio=id_colegio).order_by('nombre')
if periodos_colegio.count() == 0:
periodos_colegio = ["No hay periodos registrados"]
# Cargamos las aulas relacionadas al colegio
id_colegio = get_current_colegio()
aulas_colegio = Aula.objects.filter(tipo_servicio__colegio=id_colegio).order_by('nombre')
if aulas_colegio.count() == 0:
aulas_colegio = ["No hay aulas registradas"]
cursos =[]
cursos_aula = AulaCurso.objects.filter(curso__colegio=id_colegio)
for curso_aula in cursos_aula:
cursos.append(curso_aula.curso)
return {'aulas_colegio': aulas_colegio, 'periodos_colegio': periodos_colegio, 'cursos_aula': cursos}
else:
mensaje_error = "No tienes acceso a esta vista"
return {'mensaje_error': mensaje_error} # return context
def get(self, request, *args, **kwargs):
super(RegistrarNotasAlumnosView, self).get(request, *args, **kwargs)
contexto = self.RegistrarNotas1Form(request)
contexto2 ={'form2': self.form2}
contexto.update(contexto2)
if 'mensaje_error' in contexto.keys():
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
else:
return render(request, self.template_name, contexto) # return context
def post(self, request, *args, **kwargs):
logger.info("Estoy en el POST")
logger.info("Los datos de llegada son {0}".format(request.POST))
if 'aula' in request.POST.keys():
aula = request.POST["aula"]
periodo = request.POST["periodo_academico"]
curso = request.POST["curso"]
aula_selected = Aula.objects.get(id_aula=aula)
periodo_selected = PeriodoAcademico.objects.get(id_periodo_academico=periodo)
curso_selected = Curso.objects.get(id_curso=curso)
matriculadosaula = AulaMatricula.objects.filter(aula=aula).order_by('matricula__alumno__apellido_pa')
logger.info("Datos son {0}".format(matriculadosaula))
alumnos = []
for matriculado in matriculadosaula:
alumnos.append(matriculado.matricula.alumno)
contexto = self.RegistrarNotas1Form(request)
contexto2 = {'form2': self.form2}
contexto.update(contexto2)
contexto['alumnos'] = alumnos
contexto['aula_selected'] = aula_selected
contexto['periodo_selected'] = periodo_selected
contexto['curso_selected'] = curso_selected
return render(request, template_name=self.template_name, context=contexto)
else:
logger.info("Estoy en el POST")
logger.info("Los datos de llegada son {0}".format(request.POST))
data_post = request.POST
alumnos_id = data_post.getlist('id')
notas = data_post.getlist('nota')
curso_id = data_post['curso']
periodo_id = data_post['periodo']
colegio_id = get_current_colegio()
curso = Curso.objects.get(id_curso=curso_id)
periodo = PeriodoAcademico.objects.get(id_periodo_academico=periodo_id)
colegio = Colegio.objects.get(id_colegio=colegio_id)
num = len(alumnos_id)
for n in range(0, num):
alumno = Alumno.objects.get(id_alumno=alumnos_id[n])
nota = Notas(alumno=alumno, curso=curso, periodo_academico=periodo, nota=notas[n], colegio=colegio)
nota.save()
return redirect('academic:notas_ver')
class VisualizarNotasView(TemplateView):
model = Notas
template_name = "notas_ver.html"
def VisualizarNotasform(self, request):
roles = ['promotor', 'director']
if validar_roles(roles=roles):
# Cargamos los meses
meses_todos = ["Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto",
"Setiembre", "Octubre", "Noviembre", "Diciembre"]
num_mes = datetime.date.today().month
meses = []
for i in range(0, num_mes + 1):
meses.append(meses_todos[i])
id_colegio = get_current_colegio()
aulas = Aula.objects.filter(tipo_servicio__colegio=id_colegio).order_by('nombre')
return {'meses': meses_todos, 'aulas': aulas}
else:
mensaje_error = "No tienes acceso a esta vista"
return {'mensaje_error': mensaje_error} # return context
def get(self, request, *args, **kwargs):
super(VisualizarAsistenciaView, self).get(request, *args, **kwargs)
contexto = self.VisualizarAsistenciaform(request)
if 'mensaje_error' in contexto.keys():
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
else:
return render(request, self.template_name, contexto) # return context
def post(self, request, *args, **kwargs):
mes = request.POST["mes"]
aula = request.POST["aula"]
num_mes = obtener_mes(mes)
logger.info("Estoy en el Post, datos de llegada son {0}".format(request.POST))
meses_dias = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
num_dias = meses_dias[num_mes - 1]
# id_curso =
asistencias_curso = self.model.objects.filter()
# Proceso de filtrado según el año
anio = datetime.date.today().year
asistencias_curso_anio = asistencias_curso.filter(fecha__year=anio)
# Proceso de filtrado según el mes
asistencias_curso_mes = asistencias_curso_anio.filter(fecha__month=num_mes)
logger.info("La lista de asistencias de mes son {0}".format(asistencias_curso_mes))
matriculados_aula = AulaMatricula.objects.filter(aula=aula).order_by('matricula__alumno__apellido_pa')
alumnos = []
for matriculado in matriculados_aula:
alumnos.append(matriculado.matricula.alumno)
num_alumnos = len(alumnos)
logger.info("El número de alumnos matriculados en esta aula es {0}".format(num_alumnos))
fechas = []
lista_asistencias_dia = []
for dia in range(0, num_dias):
asistencias_curso_dia = asistencias_curso_mes.filter(fecha__day=dia + 1)
logger.info(
"La lista de asistencias del día {0} mes son {1}".format(dia + 1, asistencias_curso_dia))
n = 0
for asistencias_dias in asistencias_curso_dia:
lista_asistencias_dia.append(asistencias_dias.estado_asistencia)
if n == 0:
fechas.append(asistencias_dias.fecha)
n = n + 1
num_horizontal = len(fechas)
num_vertical = len(alumnos)
aula_selected = Aula.objects.get(id_aula=aula)
logger.info("La lista de asistencias de mes son {0}".format(lista_asistencias_dia))
logger.info("La lista de fechas de mes son {0}".format(fechas))
contexto = self.VisualizarAsistenciaform(request)
contexto['asistencias'] = asistencias_curso_mes
contexto['num_hor'] = num_horizontal
contexto['num_ver'] = num_vertical
contexto['fechas'] = fechas
contexto['alumnos'] = alumnos
contexto['mes_selected'] = mes
contexto['aula_selected'] = aula_selected
return render(request, template_name=self.template_name, context=contexto)
#################################################
##### HORARIOS DE CURSOS #####
#################################################
class HorarioAulaCreateView(CreateView):
model = HorarioAula
form_class = HorarioAulaForm
success_url = reverse_lazy('academic:aula_list')
template_name = 'horarios_aula.html'
"""
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo', 'tesorero']
if validar_roles(roles=roles):
curso = CursoDocente.objects.filter(curso=request.GET["curso"], activo=True)
return render(request, template_name=self.template_name, context={
'form': self.form_class,
'curso': curso,
})
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
"""
def get_cursos(request):
if request.is_ajax():
id_aula = request.GET.get("id_aula", " ")
aula_cursos = AulaCurso.objects.filter(aula__id_aula=int(id_aula))
results = []
for aula_curso in aula_cursos:
aula_curso_json = {}
aula_curso_json['id'] = aula_curso.curso.id_curso
aula_curso_json['value'] = aula_curso.curso.nombre
results.append(aula_curso_json)
data = json.dumps(results)
else:
data = 'fail'
mimetype = 'application/json'
return HttpResponse(data, mimetype)
#########################################
###### RECORDATORIOS TAREAS Y PCS #######
#########################################
class RecordatorioAulaCreateView(CreateView):
model = RecordatorioAula
success_url = reverse_lazy('academic:aula_list')
template_name = 'recordatorio_aula_form.html'
form_class = RecordatorioAulaForm
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo', 'tesorero']
if validar_roles(roles=roles):
aula_actual = Aula.objects.get(id_aula=request.GET['aula'])
return render(request, template_name=self.template_name, context={
'aula': aula_actual,
'form': self.form_class,
})
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
def post(self, request, *args, **kwargs):
data_form = request.POST
aula = Aula.objects.get(id_aula=request.POST['aula'])
nombre = data_form['nombre']
fecha = data_form['fecha_programacion']
estado = 1 # ESTO SIGNIFICA QUE EL EVENTO FUE CREADO
descripcion | |
import xarray as xr
import numpy as np
import datetime
import healpy as hp
import time
from torch.utils.data import Dataset, DataLoader
from modules.data import WeatherBenchDatasetIterative
# Utils
def _inner(x, y):
result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])
return result[..., 0, 0]
def inner_product(x, y, dim):
return xr.apply_ufunc(_inner, x, y, input_core_dims=[[dim], [dim]])
def create_iterative_observations_healpix(ds, lead_time, max_lead_time, nb_timesteps, test_years, nodes):
lead_times = np.arange(lead_time, max_lead_time + lead_time, lead_time)
data = ds.to_array(dim='level', name='Dataset').transpose('time', 'node', 'level')
n_samples = data.isel(time=slice(0, -nb_timesteps*lead_time)).shape[0] - max_lead_time
obs_list = []
for lead in lead_times:
obs_list.append(data.isel(time=slice(lead, lead + n_samples)).isel(level=slice(0, 2)).values)
observations_numpy = np.array(obs_list)
# Lat lon coordinates
nside = int(np.sqrt(nodes/12))
out_lon, out_lat = hp.pix2ang(nside, np.arange(nodes), lonlat=True)
# Actual times
start = np.datetime64(test_years[0], 'h') + np.timedelta64(lead_time, 'h')
stop = start + np.timedelta64(n_samples, 'h')
times = np.arange(start, stop)
# Variables
var_dict_out = {var: None for var in ['z', 't']}
das = [];
lev_idx = 0
for var, levels in var_dict_out.items():
if levels is None:
das.append(xr.DataArray(
observations_numpy[:, :, :, lev_idx],
dims=['lead_time', 'time', 'node'],
coords={'lead_time': lead_times, 'time': times, 'node': np.arange(nodes)},
name=var
))
lev_idx += 1
else:
nlevs = len(levels)
das.append(xr.DataArray(
observations_numpy[:, :, :, lev_idx:lev_idx+nlevs],
dims=['lead_time', 'time', 'node', 'level'],
coords={'lead_time': lead_times, 'time': valid_time, 'node': nodes, 'level': nlevs},
name=var
))
lev_idx += nlevs
observation_ds = xr.merge(das)
observation_ds = observation_ds.assign_coords({'lat': out_lat, 'lon': out_lon})
return observation_ds
def create_iterative_observations_hp(input_dir, test_years, lead_time, max_lead_time, nside, nb_timesteps=2):
z500 = xr.open_mfdataset(f'{input_dir}geopotential_500/*.nc', combine='by_coords').sel(time=slice(*test_years))
t850 = xr.open_mfdataset(f'{input_dir}temperature_850/*.nc', combine='by_coords').sel(time=slice(*test_years))
test_data = xr.merge([z500, t850], compat='override')
n_samples = test_data.isel(time=slice(0, -nb_timesteps*lead_time)).dims['time'] - max_lead_time
nb_iter = max_lead_time // lead_time
n_pixels = 12*(nside**2)
# Lead times
lead_times = np.arange(lead_time, max_lead_time + lead_time, lead_time)
# Lat lon coordinates
out_lon, out_lat = hp.pix2ang(nside, np.arange(n_pixels), lonlat=True)
# Actual times
start = np.datetime64(test_years[0], 'h') + np.timedelta64(lead_time, 'h')
stop = start + np.timedelta64(n_samples, 'h')
times = np.arange(start, stop, 1)
# Variables
data_vars = ['z', 't']
var_dict_out = {var: None for var in data_vars}
data = np.zeros((2, nb_iter, n_samples, 32, 64))
for i in range(nb_iter):
data[0, i, :, :, :] = test_data.z.isel(time=slice(lead_time*(i+1), lead_time*(i+1) + n_samples)).values
data[1, i, :, :, :] = test_data.t.isel(time=slice(lead_time*(i+1), lead_time*(i+1) + n_samples)).values
das = [];
lev_idx = 0
for var in data_vars:
das.append(xr.DataArray(
data[lev_idx, :, :, :],
dims=['lead_time', 'time', 'node'],
coords={'lead_time': lead_times, 'time': times, 'node': np.arange(n_pixels)},
name=var
))
lev_idx += 1
observations = xr.merge(das)
observations = observations.assign_coords({'lat': out_lat, 'lon': out_lon})
return observations
def create_iterative_observations_eq(input_dir, test_years, lead_time, max_lead_time, nside, nb_timesteps=2):
z500 = xr.open_mfdataset(f'{input_dir}geopotential_500/*.nc', combine='by_coords').sel(time=slice(*test_years))
t850 = xr.open_mfdataset(f'{input_dir}temperature_850/*.nc', combine='by_coords').sel(time=slice(*test_years))
test_data = xr.merge([z500, t850], compat='override')
n_samples = test_data.isel(time=slice(0, -nb_timesteps*lead_time)).dims['time'] - max_lead_time
nb_iter = max_lead_time // lead_time
n_pixels = 12*(nside**2)
# Lead times
lead_times = np.arange(lead_time, max_lead_time + lead_time, lead_time)
# Actual times
start = np.datetime64(test_years[0], 'h') + np.timedelta64(lead_time, 'h')
stop = start + np.timedelta64(n_samples, 'h')
times = np.arange(start, stop, 1)
# Variables
data_vars = ['z', 't']
var_dict_out = {var: None for var in data_vars}
data = np.zeros((2, nb_iter, n_samples, 32, 64))
for i in range(nb_iter):
data[0, i, :, :, :] = test_data.z.isel(time=slice(lead_time*(i+1), lead_time*(i+1) + n_samples)).values
data[1, i, :, :, :] = test_data.t.isel(time=slice(lead_time*(i+1), lead_time*(i+1) + n_samples)).values
das = [];
lev_idx = 0
for var in data_vars:
das.append(xr.DataArray(
data[lev_idx, :, :, :, :],
dims=['lead_time', 'time', 'lat', 'lon'],
coords={'lead_time': lead_times, 'time': times, 'lat': test_data.lat.values, 'lon': test_data.lon.values},
name=var
))
lev_idx += 1
observations = xr.merge(das)
return observations
# Predict
def create_iterative_predictions_healpix(model, device, dg):
batch_size = dg.batch_size
delta_t = dg.dataset.lead_time
max_lead_time = dg.dataset.max_lead_time
initial_lead_time = delta_t
nodes = dg.dataset.nodes
nside = int(np.sqrt(nodes/12))
n_samples = dg.dataset.n_samples
in_feat = dg.dataset.in_features
out_feat = dg.dataset.out_features
data_vars = dg.dataset.mean.level.values.tolist()[:out_feat]
train_std = dg.dataset.std.values[:out_feat]
train_mean = dg.dataset.mean.values[:out_feat]
# Lead times
lead_times = np.arange(delta_t, max_lead_time + delta_t, delta_t)
# Lat lon coordinates
out_lon, out_lat = hp.pix2ang(nside, np.arange(nodes), lonlat=True)
# Actual times
start = np.datetime64(dg.dataset.years[0], 'h') + np.timedelta64(initial_lead_time, 'h')
stop = start + np.timedelta64(dg.dataset.n_samples, 'h')
times = np.arange(start, stop)
# Variables
var_dict_out = {var: None for var in data_vars}
# Radiation
constants = np.array(dg.dataset.data.isel(level=slice(out_feat, None)).values)
dataloader = dg
predictions = []
model.eval()
for lead in lead_times:
outputs = []
state = []
states = np.empty((n_samples, nodes, in_feat))
time1 = time.time()
for i, (sample, _) in enumerate(dataloader):
inputs = sample[0].to(device)
output = model(inputs)
outputs.append(output.detach().cpu().clone().numpy()[:, :, :out_feat])
state.append(output.detach().cpu().clone().numpy())
preds = np.concatenate(outputs)
states[:, :, :out_feat] = np.concatenate(state)
states[:, :, out_feat:] = constants[lead:n_samples+lead, :]
predictions.append(preds * train_std + train_mean)
new_set = WeatherBenchDatasetIterative(states)
dataloader = DataLoader(new_set, batch_size=batch_size, shuffle=False, num_workers=10)
time2 = time.time()
predictions = np.array(predictions)
das = [];
lev_idx = 0
for var in data_vars:
das.append(xr.DataArray(
predictions[:, :, :, lev_idx],
dims=['lead_time', 'time', 'node'],
coords={'lead_time': lead_times, 'time': times, 'node': np.arange(nodes)},
name=var
))
lev_idx += 1
prediction_ds = xr.merge(das)
prediction_ds = prediction_ds.assign_coords({'lat': out_lat, 'lon': out_lon})
return prediction_ds
def create_predictions(model, device, dg):
"""Create direct predictions for models using 1D signals (eg GCNN)
Parameters
----------
model : torch.nn.Module
Trained model
device
GPU / CPU where model is running
dg : DataLoader
Test data
Returns
-------
predictions : xr.Dataset
Model predictions
"""
lats = np.arange(-90+dg.dataset.res/2, 90+dg.dataset.res/2, dg.dataset.res)
lons = np.arange(0, 360, dg.dataset.res)
lat = xr.DataArray(lats, coords=[lats], dims=['lat'], name='lat')
lon = xr.DataArray(lons, coords=[lons], dims=['lon'], name='lon')
start = np.datetime64(dg.dataset.years[0], 'h') + np.timedelta64(dg.dataset.lead_time, 'h')
stop = np.datetime64(str(int(dg.dataset.years[1])+1), 'h')
times = np.arange(start, stop)
valid_time = xr.DataArray(times, coords=[times], dims='time', name='time', attrs={'long_name': 'time'})
out_features = dg.dataset.out_features
output_dim = (len(lats), len(lons), dg.dataset.out_features)
outputs = []
for i, (sample, _) in enumerate(dg):
sample = sample.to(device)
output = model(sample).detach().cpu().clone().numpy().reshape((-1, *output_dim))
outputs.append(output)
preds = np.concatenate(outputs)
# Unnormalize
preds = preds * dg.dataset.std.values[:out_features] + dg.dataset.mean.values[:out_features]
das = []
lev_idx = 0
for var, levels in dg.dataset.var_dict_out.items():
if levels is None:
das.append(xr.DataArray(
preds[:, :, :, lev_idx],
dims=['time', 'lat', 'lon'],
coords={'time': valid_time, 'lat': lat, 'lon': lon},
name=var
))
lev_idx += 1
else:
nlevs = len(levels)
das.append(xr.DataArray(
preds[:, :, :, lev_idx:lev_idx+nlevs],
dims=['time', 'lat', 'lon', 'level'],
coords={'time': valid_time, 'lat': lat, 'lon': lon, 'level': levels},
name=var
))
lev_idx += nlevs
return xr.merge(das)
def create_predictions_2D(model, dg, mean, std):
"""Create direct predictions for models using 2D signals (images)
Parameters
----------
model : torch.nn.Module
Trained model
dg : DataLoader
Test data
mean : np.ndarray
Training set mean
std : np.ndarray
Training set std
Returns
-------
predictions : xr.Dataset
Model predictions
"""
outputs = []
for i, (sample, _) in enumerate(dg):
sample = sample.to(device)
output = model(sample).detach().cpu().clone().permute(0, 2, 3, 1).numpy()
outputs.append(output)
preds = np.concatenate(outputs)
# Unnormalize
preds = preds * std.values + mean.values
das = []
lev_idx = 0
for var, levels in dg.dataset.var_dict.items():
if levels is None:
das.append(xr.DataArray(
preds[:, :, :, lev_idx],
dims=['time', 'lat', 'lon'],
coords={'time': dg.dataset.valid_time, 'lat': dg.dataset.data.lat, 'lon': dg.dataset.data.lon},
name=var
))
lev_idx += 1
else:
nlevs = len(levels)
das.append(xr.DataArray(
preds[:, :, :, lev_idx:lev_idx+nlevs],
dims=['time', 'lat', 'lon', 'level'],
coords={'time': dg.dataset.valid_time, 'lat': dg.dataset.data.lat, 'lon': dg.dataset.data.lon, 'level': levels},
name=var
))
lev_idx += nlevs
return xr.merge(das)
def compute_anomalies(ds, mean):
""" Computes anomalies by removing relevant average to data
Parameters
----------
ds : xr.Dataset
Dataset from whoch to compute the anomalies
mean : string
Which mean to remove to the data. Options are {"monthly", "weekly"}
Returns
-------
anomalies : xr.Dataset
Demeaned dataset
"""
assert mean in ["monthly", "weekly"], "Parameter mean should be either 'monthly' or 'weekly'"
if mean is "monthly":
anomalies = ds.groupby('time.month') - ds.groupby('time.month').mean()
else:
anomalies = ds.groupby('time.week') - ds.groupby('time.week').mean()
return anomalies
# Metrics for healpix iterative predictions
def compute_rmse_healpix(pred, obs, dims=('node', 'time')):
error = pred - obs
rmse = np.sqrt(((error)**2).mean(dims))
return rmse.drop('lat').drop('lon').load()
def compute_relBIAS_map_healpix(pred, obs):
""" Compute the relative bias from two xr.DataArrays given some dimensions
Parameters
----------
pred : xr.DataArray
Forecast. Time coordinate must be validation time.
obs_mean : xr.DataArray
Mean of observations across "time" and "lead_time" dimensions
Returns
-------
relBIAS : xr.DataArray
Relative bias map
"""
error = pred - obs
rbias = error.mean(('time')).load() / obs.mean(('time')).load()
return rbias
def compute_weighted_rmse(da_fc, da_true, dims=xr.ALL_DIMS):
""" Compute the root mean squared error (RMSE) with latitude weighting from two xr.DataArrays.
Parameters
----------
da_fc : xr.DataArray
Forecast
da_true : xr.DataArray
Labels
dims (str):
Dimensions over which to compute the metric
Returns
-------
rmse : xr.DataArray
Latitude weighted root mean squared | |
<gh_stars>0
#!/usr/bin/env python
from __future__ import division
import os
import argparse
import itertools
from functools import partial
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
from dask import delayed, get, multiprocessing
from dask.diagnostics import ProgressBar
import warnings
import comptools as comp
from pyunfold import iterative_unfold
warnings.filterwarnings("ignore", category=UserWarning, module="matplotlib")
warnings.filterwarnings("ignore", category=DeprecationWarning, module="sklearn")
def get_test_counts(case, composition, num_groups, energy_midpoints,
flux_to_counts_scaling):
log_energy_midpoints = np.log10(energy_midpoints)
scale = 1.0 / num_groups
if case == 'constant':
counts = np.array([1000]*len(log_energy_midpoints))
elif case == 'simple_power_law':
comp_flux = comp.broken_power_law_flux(energy_midpoints,
gamma_before=-2.7,
# gamma_before=-3.0,
energy_break=3e12)
comp_flux = scale * comp_flux
counts = flux_to_counts_scaling * comp_flux
elif case == 'broken_power_law_0':
comp_flux = comp.broken_power_law_flux(energy_midpoints,
energy_break=10**7.0)
comp_flux = scale * comp_flux
counts = flux_to_counts_scaling * comp_flux
elif case in ['broken_power_law_1', 'broken_power_law_2']:
if composition in ['PPlus', 'He4Nucleus', 'light']:
gamma_after = -3.0 if case == 'broken_power_law_1' else -4.0
else:
gamma_after = -4.0 if case == 'broken_power_law_1' else -3.0
comp_flux = comp.broken_power_law_flux(energy_midpoints,
energy_break=10**7.0,
gamma_before=-2.7,
gamma_after=gamma_after)
comp_flux = scale * comp_flux
counts = flux_to_counts_scaling * comp_flux
elif case in ['H4a', 'H3a']:
model_flux = comp.model_flux(model=case,
energy=energy_midpoints,
num_groups=num_groups)
counts = model_flux['flux_{}'.format(composition)].values * flux_to_counts_scaling
else:
raise ValueError('Invalid case, "{}", entered'.format(case))
return counts
def calculate_ratio(flux, flux_err_stat, flux_err_sys,
true_flux, true_flux_err_stat, true_flux_err_sys):
diff = flux - true_flux
# Error bar calculation
diff_err_sys = np.sqrt(flux_err_sys**2 + true_flux_err_sys**2)
diff_err_stat = np.sqrt(flux_err_stat**2 + true_flux_err_stat**2)
frac_diff, frac_diff_sys = comp.ratio_error(num=diff,
num_err=diff_err_sys,
den=true_flux,
den_err=true_flux_err_sys)
frac_diff, frac_diff_stat = comp.ratio_error(num=diff,
num_err=diff_err_stat,
den=true_flux,
den_err=true_flux_err_stat)
return frac_diff, frac_diff_stat, frac_diff_sys
def main(config, num_groups, prior, ts_stopping, case, response, response_err, p=None):
figures_dir = os.path.join(comp.paths.figures_dir, 'unfolding', config,
'datachallenge', '{}_case'.format(case),
'{}_prior'.format(prior),
'ts_stopping_{}'.format(ts_stopping))
# Calculate desired counts distribution for test case
counts_true = pd.DataFrame(index=range(num_ebins),
columns=comp_list)
for composition in comp_list:
flux_to_counts_scaling = eff_area[composition] * livetime * solid_angle * energybins.energy_bin_widths
counts_true[composition] = get_test_counts(case,
composition,
num_groups,
energybins.energy_midpoints,
flux_to_counts_scaling)
counts_true['total'] = counts_true.sum(axis=1)
# Plot true flux and H4a flux (as a visual reference)
fig, axarr = plt.subplots(nrows=1, ncols=num_groups + 1,
sharex=True, sharey=True, figsize=(15, 5))
for idx, composition in enumerate(comp_list + ['total']):
ax = axarr[idx]
model_flux = comp.model_flux(model='H4a',
energy=energybins.energy_midpoints,
num_groups=num_groups)
model_comp_flux = model_flux['flux_{}'.format(composition)].values
ax.plot(energybins.log_energy_midpoints,
energybins.energy_midpoints**2.7 * model_comp_flux,
color=color_dict[composition],
ls='-.',
lw=2,
marker='None',
label='H4a')
comp_flux, _ = counts_to_flux(counts_true[composition],
composition=composition)
ax.plot(energybins.log_energy_midpoints, comp_flux,
color=color_dict[composition],
ls='-',
lw=2,
marker='None',
label='Test case')
ax.set_yscale("log", nonposy='clip')
ax.set_xlabel('$\mathrm{\log_{10}(E/GeV)}$')
if idx == 0:
ax.set_ylabel('$\mathrm{ E^{2.7} \ J(E) \ [GeV^{1.7} m^{-2} sr^{-1} s^{-1}]}$')
ax.set_title(composition)
ax.grid(lw=1, which='both')
ax.legend()
true_flux_outfile = os.path.join(
figures_dir,
'true_flux_{}-groups_{}-case.png'.format(num_groups, case))
comp.check_output_dir(true_flux_outfile)
plt.savefig(true_flux_outfile)
# Run analysis pipeline on simulation
counts_observed = pd.DataFrame(0, index=range(num_ebins),
columns=comp_list)
counts_observed_err = pd.DataFrame(0, index=range(num_ebins),
columns=comp_list)
weights = pd.DataFrame(0, index=range(num_ebins),
columns=comp_list)
# Construct mask for energy bin
energy_bins = np.digitize(df_sim_data['MC_log_energy'],
bins=energybins.log_energy_bins) - 1
for idx_log_energy, composition in itertools.product(
range(len(energybins.log_energy_midpoints)),
comp_list):
# Filter out events that don't pass composition & energy mask
comp_mask = df_sim_data['comp_group_{}'.format(num_groups)] == composition
energy_mask = energy_bins == idx_log_energy
df_sim_bin = df_sim_data.loc[comp_mask & energy_mask, :]
# Reweight simulation events to get desired number of events
weight = counts_true[composition][idx_log_energy] / df_sim_bin.shape[0]
# weight = counts_true.loc[log_energy, composition] / df_sim_bin.shape[0]
weights.loc[idx_log_energy, composition] = weight
# Get predicted composition
y_test = df_sim_bin['comp_target_{}'.format(num_groups)].values
X_test = df_sim_bin[feature_list].values
if p is not None:
pred_target = custom_predict(y_test, p=p, num_groups=num_groups)
else:
pred_target = pipeline.predict(X_test)
pred_comp = np.array(comp.decode_composition_groups(
pred_target, num_groups=num_groups))
assert len(pred_comp) == df_sim_bin.shape[0]
for p_comp in np.unique(pred_comp):
pred_comp_mask = pred_comp == p_comp
comp_counts, _ = np.histogram(df_sim_bin.loc[pred_comp_mask, 'reco_log_energy'],
bins=energybins.log_energy_bins)
counts_observed[p_comp] += weight * comp_counts
counts_observed_err[p_comp] += [sum(weight**2 for _ in range(c)) for c in comp_counts]
# Square root the sum of squares of the weight errors
for composition in comp_list:
counts_observed_err[composition] = np.sqrt(counts_observed_err[composition])
counts_observed_err['total'] = np.sqrt(np.sum(counts_observed_err[composition]**2 for composition in comp_list))
# Calculate total counts
counts_observed['total'] = counts_observed.sum(axis=1)
# Plot weights for each composition and energy bin
fig, ax = plt.subplots()
for composition in comp_list:
weights[composition].plot(ls=':', label=composition,
color=color_dict[composition], ax=ax)
ax.set_xlabel('$\mathrm{\log_{10}(E/GeV)}$')
ax.set_ylabel('Weights')
ax.set_yscale("log", nonposy='clip')
ax.grid(lw=1)
ax.legend()
weights_outfile = os.path.join(
figures_dir, 'weights_{}-groups_{}.png'.format(num_groups, case))
comp.check_output_dir(weights_outfile)
plt.savefig(weights_outfile)
# Format observed counts, detection efficiencies, and priors for PyUnfold use
counts_pyunfold = np.empty(num_groups * len(energybins.energy_midpoints))
counts_err_pyunfold = np.empty(num_groups * len(energybins.energy_midpoints))
efficiencies = np.empty(num_groups * len(energybins.energy_midpoints))
efficiencies_err = np.empty(num_groups * len(energybins.energy_midpoints))
for idx, composition in enumerate(comp_list):
counts_pyunfold[idx::num_groups] = counts_observed[composition]
counts_err_pyunfold[idx::num_groups] = counts_observed_err[composition]
efficiencies[idx::num_groups] = df_eff['eff_median_{}'.format(composition)]
efficiencies_err[idx::num_groups] = df_eff['eff_err_low_{}'.format(composition)]
if prior == 'Jeffreys':
prior_pyunfold = 'Jeffreys'
print('Jeffreys prior')
else:
model_flux = comp.model_flux(model=prior,
energy=energybins.energy_midpoints,
num_groups=num_groups)
prior_pyunfold = np.empty(num_groups * len(energybins.energy_midpoints))
for idx, composition in enumerate(comp_list):
prior_pyunfold[idx::num_groups] = model_flux['flux_{}'.format(composition)]
# Want to ensure prior_pyunfold are probabilities (i.e. they add to 1)
prior_pyunfold = prior_pyunfold / np.sum(prior_pyunfold)
df_unfolding_iter = iterative_unfold(data=counts_pyunfold,
data_err=counts_err_pyunfold,
response=res_normalized,
response_err=res_normalized_err,
efficiencies=efficiencies,
efficiencies_err=efficiencies_err,
prior=prior_pyunfold,
ts='ks',
ts_stopping=ts_stopping,
max_iter=100,
return_iterations=True)
# print('\n{} case (prior {}): {} iterations'.format(case, prior, df_unfolding_iter.shape[0]))
output = {'prior': prior, 'ts_stopping': ts_stopping, 'case': case}
counts, counts_sys_err, counts_stat_err = comp.unfolded_counts_dist(
df_unfolding_iter,
iteration=-1,
num_groups=num_groups)
for idx, composition in enumerate(comp_list + ['total']):
# Pre-unfolding flux plot
initial_counts = counts_observed[composition].values
initial_counts_err = counts_observed_err[composition].values
# initial_counts_err = np.sqrt(initial_counts)
initial_flux, initial_flux_err_stat = counts_to_flux(
initial_counts,
initial_counts_err,
composition=composition)
initial_flux_err_sys = np.zeros_like(initial_flux)
# Unfolded flux plot
flux, flux_err_sys = unfolded_counts_to_flux(
counts[composition],
counts_sys_err[composition])
flux, flux_err_stat = unfolded_counts_to_flux(
counts[composition],
counts_stat_err[composition])
# True flux
true_counts = counts_true[composition].values
true_counts_err = np.sqrt(true_counts)
true_flux, true_flux_err_stat = counts_to_flux(
true_counts,
true_counts_err,
composition=composition)
true_flux_err_sys = np.zeros_like(true_flux)
output['initial_counts_{}'.format(composition)] = initial_counts
output['initial_counts_err_{}'.format(composition)] = initial_counts_err
output['flux_{}'.format(composition)] = flux
output['flux_err_stat_{}'.format(composition)] = flux_err_stat
output['flux_err_sys_{}'.format(composition)] = flux_err_sys
output['true_flux_{}'.format(composition)] = true_flux
output['true_flux_err_stat_{}'.format(composition)] = true_flux_err_stat
output['true_flux_err_sys_{}'.format(composition)] = true_flux_err_sys
output['initial_flux_{}'.format(composition)] = initial_flux
output['initial_flux_err_stat_{}'.format(composition)] = initial_flux_err_stat
output['initial_flux_err_sys_{}'.format(composition)] = initial_flux_err_sys
# Don't want to consume too much memory by keeping too many figures open
plt.close('all')
return output
def save_flux_plot(group, config, case, ts_stopping, num_groups):
"""Saves flux comparison plot
"""
comp_list = comp.get_comp_list(num_groups=num_groups)
energybins = comp.get_energybins(config)
# Get plotting axis
figures_dir = os.path.join(comp.paths.figures_dir, 'unfolding', config,
'datachallenge', '{}_case'.format(case),
'prior_comparisons',
'ts_stopping_{}'.format(ts_stopping))
# Make initial counts (pre-unfolding) plot
fig_counts, ax_counts = plt.subplots()
fig = plt.figure(figsize=(12, 5))
gs = gridspec.GridSpec(nrows=2, ncols=num_groups+1, hspace=0.1)
# gs = gridspec.GridSpec(nrows=2, ncols=num_groups+1, hspace=0.075)
axs_flux, axs_ratio = {}, {}
for idx, composition in enumerate(comp_list + ['total']):
if idx == 0:
axs_flux[composition] = fig.add_subplot(gs[0, idx])
else:
axs_flux[composition] = fig.add_subplot(gs[0, idx], sharey=axs_flux[comp_list[0]])
axs_ratio[composition] = fig.add_subplot(gs[1, idx], sharex=axs_flux[composition])
prior_groupby = group.groupby('prior')
marker_iter = iter('.^x*')
ls_iter = iter(['-', ':', '-.', '--'])
fontsize = 18
initial_flux_test, initial_flux_err_stat_test, initial_flux_err_sys_test = {}, {}, {}
for prior_idx, (prior, df_group) in enumerate(prior_groupby):
marker = next(marker_iter)
ls = next(ls_iter)
label = priors_labels[priors.index(prior)]
for idx, composition in enumerate(comp_list + ['total']):
ax_flux = axs_flux[composition]
ax_ratio = axs_ratio[composition]
color = sns.color_palette(comp.get_colormap(composition), len(priors)+3).as_hex()[-1*(prior_idx + 2)]
true_color = sns.color_palette(comp.get_colormap(composition), len(priors)+3).as_hex()[-1]
# True flux
true_flux = df_group['true_flux_{}'.format(composition)].values[0]
true_flux_err_stat = df_group['true_flux_err_stat_{}'.format(composition)].values[0]
true_flux_err_sys = df_group['true_flux_err_sys_{}'.format(composition)].values[0]
if prior_idx == 0:
ax_flux.errorbar(energybins.log_energy_midpoints, true_flux, yerr=true_flux_err_stat,
color=true_color, ls='None', marker='*',
label='True flux', alpha=0.8)
# Unfolded flux
flux = df_group['flux_{}'.format(composition)].values[0]
flux_err_stat = df_group['flux_err_stat_{}'.format(composition)].values[0]
flux_err_sys = df_group['flux_err_sys_{}'.format(composition)].values[0]
if not plot_initial_flux:
comp.plot_steps(energybins.log_energy_bins, flux, yerr=flux_err_sys,
ax=ax_flux, alpha=0.4, fillalpha=0.4,
color=color, ls=ls)
ax_flux.errorbar(energybins.log_energy_midpoints, flux, yerr=flux_err_stat,
color=color, ls='None', marker=marker,
label='Unfolded ({})'.format(label), alpha=0.8)
# Initial (pre-unfolding) flux
initial_flux = df_group['initial_flux_{}'.format(composition)].values[0]
initial_flux_err_stat = df_group['initial_flux_err_stat_{}'.format(composition)].values[0]
initial_flux_err_sys = df_group['initial_flux_err_sys_{}'.format(composition)].values[0]
# Sanity check that all the initial_flux (what goes into the unfolding)
# are the same for each prior.
if prior_idx == 0:
initial_flux_test[composition] = initial_flux
initial_flux_err_stat_test[composition] = initial_flux_err_stat
initial_flux_err_sys_test[composition] = initial_flux_err_sys
initial_counts = df_group['initial_counts_{}'.format(composition)].values[0]
initial_counts_err = df_group['initial_counts_err_{}'.format(composition)].values[0]
comp.plot_steps(energybins.log_energy_bins, initial_counts, yerr=initial_counts_err,
ax=ax_counts, alpha=0.8, fillalpha=0.4,
color=color_dict[composition], label=composition)
ax_counts.set_yscale("log", nonposy='clip')
ax_counts.set_ylabel('Observed counts from BDT')
# ax_counts.set_ylabel('$\mathrm{ E^{2.7} \ J(E) \ [GeV^{1.7} m^{-2} sr^{-1} s^{-1}]}$')
ax_counts.set_xlabel('$\mathrm{\log_{10}(E/GeV)}$')
ax_counts.set_xlim(6.4, 7.8)
ax_counts.grid()
ax_counts.legend()
else:
np.testing.assert_allclose(initial_flux_test[composition], initial_flux)
np.testing.assert_allclose(initial_flux_err_stat_test[composition], initial_flux_err_stat)
np.testing.assert_allclose(initial_flux_err_sys_test[composition], initial_flux_err_sys)
if plot_initial_flux:
comp.plot_steps(energybins.log_energy_bins, initial_flux, yerr=initial_flux_err_sys,
ax=ax_flux, alpha=0.4, fillalpha=0.4,
color=color, ls=ls)
ax_flux.errorbar(energybins.log_energy_midpoints, initial_flux, yerr=initial_flux_err_stat,
color=color, ls='None', marker=marker,
label='Initial ({})'.format(label), alpha=0.8)
ax_flux.set_yscale("log", nonposy='clip')
ax_flux.set_xlim(6.4, 7.8)
ax_flux.set_ylim(1e3, 1e5)
ax_flux.grid(linestyle='dotted', which="both", lw=1)
ax_flux.set_title(composition, fontsize=14)
if composition == 'total':
ax_flux.legend(fontsize=10, loc='lower left')
if idx == 0:
ax_flux.set_ylabel('$\mathrm{ E^{2.7} \ J(E)}$' +'\n' + '$\mathrm{[GeV^{1.7} m^{-2} sr^{-1} s^{-1}]}$',
fontsize=18)
else:
plt.setp(ax_flux.get_yticklabels(), visible=False)
plt.setp(ax_flux.get_xticklabels(), visible=False)
ax_flux.tick_params(axis='both', which='major', labelsize=14)
if plot_initial_flux:
frac_diff, frac_diff_stat, frac_diff_sys = calculate_ratio(
initial_flux, initial_flux_err_stat, initial_flux_err_sys,
true_flux, true_flux_err_stat, true_flux_err_sys)
else:
frac_diff, frac_diff_stat, frac_diff_sys = calculate_ratio(
flux, flux_err_stat, flux_err_sys,
true_flux, true_flux_err_stat, true_flux_err_sys)
comp.plot_steps(energybins.log_energy_bins, frac_diff, yerr=frac_diff_sys,
ax=ax_ratio, alpha=0.4, fillalpha=0.4,
color=color, ls=ls)
ax_ratio.errorbar(energybins.log_energy_midpoints, frac_diff, yerr=frac_diff_stat,
color=color, ls='None', marker=marker,
label='Unfolded ({})'.format(label), alpha=0.8)
ax_ratio.axhline(0, ls='-.', lw=1, marker='None', color='k')
ax_ratio.grid(linestyle='dotted', which="both", lw=1)
ax_ratio.set_yticks(np.arange(-1, 1.5, 0.5))
# ax_ratio.set_yticks(np.arange(-1, 1.5, 0.25))
ax_ratio.set_ylim(-1, 1)
if idx == 0:
ax_ratio.set_ylabel('$\mathrm{(J - J_{true}) / J_{true}}$',
fontsize=18)
else:
plt.setp(ax_ratio.get_yticklabels(), visible=False)
ax_ratio.set_xlabel('$\mathrm{\log_{10}(E/GeV)}$', fontsize=18)
ax_ratio.tick_params(axis='both', which='major', labelsize=14)
plt.tight_layout()
flux_outfile = os.path.join(figures_dir,
'flux_ratio_{}-groups_{}-case.png'.format(num_groups, case))
comp.check_output_dir(flux_outfile)
plt.savefig(flux_outfile)
counts_outfile = os.path.join(figures_dir,
'counts_{}-groups_{}-case.png'.format(num_groups, case))
comp.check_output_dir(counts_outfile)
fig_counts.savefig(counts_outfile)
# Don't want to consume too much | |
<reponame>bordplate/Ghostwriter<gh_stars>0
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""This module contains the tools required for generating Microsoft Office
documents for reporting. The Reportwriter class accepts data and produces a
docx, xlsx, pptx, and json using the provided data.
"""
import re
import io
import os
import json
from PIL import Image
from PIL import ImageOps
from xlsxwriter.workbook import Workbook
from docx import Document
from docx.oxml import parse_xml
from docx.oxml.shared import OxmlElement, qn
from docx.oxml.ns import nsdecls
from docx.shared import RGBColor, Inches, Pt
from docx.enum.style import WD_STYLE_TYPE
from docx.enum.text import WD_ALIGN_PARAGRAPH
import pptx
from pptx import Presentation
from pptx.enum.text import PP_ALIGN
from pptx.enum.text import MSO_ANCHOR
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
class Reportwriter():
"""Class for generating documents for the provided findings."""
# Color codes used for finding severity
# Blue
informational_color = '8eaadb'
informational_color_hex = [0x83, 0xaa, 0xdb]
# Green
low_color = 'a8d08d'
low_color_hex = [0xa8, 0xd0, 0x8d]
# Orange
medium_color = 'f4b083'
medium_color_hex = [0xf4, 0xb0, 0x83]
# Red
high_color = 'ff7e79'
high_color_hex = [0xff, 0x7e, 0x79]
# Purple
critical_color = '7030a0'
critical_color_hex = [0x70, 0x30, 0xa0]
# Picture border color - this one needs the # in front
border_color = '#2d2b6b'
border_color_hex = [0x45, 0x43, 0x107]
# Extensions allowed for evidence
image_extensions = ['png', 'jpeg', 'jpg']
text_extensions = ['txt', 'ps1', 'py', 'md', 'log']
def __init__(self, report_queryset, output_path, evidence_path,
template_loc=None):
"""Everything that must be initialized is setup here."""
self.output_path = output_path
self.template_loc = template_loc
self.evidence_path = evidence_path
self.report_queryset = report_queryset
def generate_json(self):
"""Export report as a JSON dictionary."""
project_name = str(self.report_queryset.project)
# Client data
report_dict = {}
report_dict['client'] = {}
report_dict['client']['id'] = self.report_queryset.project.client.id
report_dict['client']['full_name'] = \
self.report_queryset.project.client.name
report_dict['client']['short_name'] = \
self.report_queryset.project.client.short_name
report_dict['client']['codename'] = \
self.report_queryset.project.client.codename
# Client points of contact data
report_dict['client']['poc'] = {}
for poc in self.report_queryset.project.client.clientcontact_set.all():
report_dict['client']['poc'][poc.id] = {}
report_dict['client']['poc'][poc.id]['id'] = poc.id
report_dict['client']['poc'][poc.id]['name'] = poc.name
report_dict['client']['poc'][poc.id]['job_title'] = poc.job_title
report_dict['client']['poc'][poc.id]['email'] = poc.email
report_dict['client']['poc'][poc.id]['phone'] = poc.phone
report_dict['client']['poc'][poc.id]['note'] = poc.note
# Project data
report_dict['project'] = {}
report_dict['project']['id'] = self.report_queryset.project.id
report_dict['project']['name'] = project_name
report_dict['project']['start_date'] = \
self.report_queryset.project.start_date
report_dict['project']['end_date'] = \
self.report_queryset.project.end_date
report_dict['project']['codename'] = \
self.report_queryset.project.codename
report_dict['project']['project_type'] = \
self.report_queryset.project.project_type.project_type
report_dict['project']['note'] = self.report_queryset.project.note
# Finding data
report_dict['findings'] = {}
for finding in self.report_queryset.reportfindinglink_set.all():
report_dict['findings'][finding.title] = {}
report_dict['findings'][finding.title]['id'] = finding.id
report_dict['findings'][finding.title]['title'] = finding.title
report_dict['findings'][finding.title]['severity'] = \
finding.severity.severity
report_dict['findings'][finding.title]['affected_entities'] = \
finding.affected_entities
report_dict['findings'][finding.title]['description'] = \
finding.description
report_dict['findings'][finding.title]['impact'] = finding.impact
report_dict['findings'][finding.title]['recommendation'] = \
finding.mitigation
report_dict['findings'][finding.title]['replication_steps'] = \
finding.replication_steps
report_dict['findings'][finding.title][
'host_detection_techniques'] = \
finding.host_detection_techniques
report_dict['findings'][finding.title][
'network_detection_techniques'] = \
finding.network_detection_techniques
report_dict['findings'][finding.title]['references'] = \
finding.references
# Get any evidence
report_dict['findings'][finding.title]['evidence'] = {}
for evidence_file in finding.evidence_set.all():
evidence_name = evidence_file.friendly_name
report_dict['findings'][finding.title][
'evidence'][evidence_name] = {}
report_dict['findings'][finding.title][
'evidence'][evidence_name]['id'] = evidence_file.id
report_dict['findings'][finding.title][
'evidence'][evidence_name]['friendly_name'] = \
evidence_file.friendly_name
report_dict['findings'][finding.title][
'evidence'][evidence_name]['uploaded_by'] = \
evidence_file.uploaded_by.username
report_dict['findings'][finding.title][
'evidence'][evidence_name]['upload_date'] = \
evidence_file.upload_date
report_dict['findings'][finding.title][
'evidence'][evidence_name]['description'] = \
evidence_file.description
report_dict['findings'][finding.title][
'evidence'][evidence_name]['caption'] = \
evidence_file.caption
report_dict['findings'][finding.title][
'evidence'][evidence_name]['url'] = \
evidence_file.document.url
report_dict['findings'][finding.title][
'evidence'][evidence_name]['file_path'] = \
str(evidence_file.document)
# Infrastructure data
report_dict['infrastructure'] = {}
report_dict['infrastructure']['domains'] = {}
report_dict['infrastructure']['servers'] = {}
report_dict['infrastructure']['servers']['static'] = {}
report_dict['infrastructure']['servers']['cloud'] = {}
report_dict['infrastructure']['domains_and_servers'] = {}
for domain in self.report_queryset.project.history_set.all():
report_dict['infrastructure']['domains'][domain.domain.id] = {}
report_dict['infrastructure']['domains'][domain.domain.id][
'id'] = domain.domain.id
report_dict['infrastructure']['domains'][domain.domain.id][
'name'] = domain.domain.name
report_dict['infrastructure']['domains'][domain.domain.id][
'activity'] = domain.activity_type.activity
report_dict['infrastructure']['domains'][domain.domain.id][
'operator'] = domain.operator.username
report_dict['infrastructure']['domains'][domain.domain.id][
'start_date'] = domain.start_date
report_dict['infrastructure']['domains'][domain.domain.id][
'end_date'] = domain.end_date
report_dict['infrastructure']['domains'][domain.domain.id][
'note'] = domain.note
for server in self.report_queryset.project.serverhistory_set.all():
report_dict['infrastructure']['servers']['static'][
server.server.id] = {}
report_dict['infrastructure']['servers']['static'][
server.server.id]['id'] = server.server.id
report_dict['infrastructure']['servers']['static'][
server.server.id]['ip_address'] = server.server.ip_address
report_dict['infrastructure']['servers']['static'][
server.server.id]['activity'] = server.activity_type.activity
report_dict['infrastructure']['servers']['static'][
server.server.id]['role'] = server.server_role.server_role
report_dict['infrastructure']['servers']['static'][
server.server.id]['operator'] = server.operator.username
report_dict['infrastructure']['servers']['static'][
server.server.id]['start_date'] = server.start_date
report_dict['infrastructure']['servers']['static'][
server.server.id]['end_date'] = server.end_date
report_dict['infrastructure']['servers']['static'][
server.server.id]['note'] = server.note
for server in self.report_queryset.project.transientserver_set.all():
report_dict['infrastructure']['servers']['cloud'][server.id] = {}
report_dict['infrastructure']['servers']['cloud'][server.id][
'id'] = server.id
report_dict['infrastructure']['servers']['cloud'][server.id][
'ip_address'] = server.ip_address
report_dict['infrastructure']['servers']['cloud'][server.id][
'activity'] = server.activity_type.activity
report_dict['infrastructure']['servers']['cloud'][server.id][
'role'] = server.server_role.server_role
report_dict['infrastructure']['servers']['cloud'][server.id][
'operator'] = server.operator.username
report_dict['infrastructure']['servers']['cloud'][server.id][
'note'] = server.note
# Hold all domain/server associations in a temporary dictionary
temp = {}
for connection in self.report_queryset.project.domainserverconnection_set.all():
# Handle one-to-many relationships by combining everything into
# a domain and list of servers
if connection.subdomain is not "*":
domain_name = connection.subdomain + "." + connection.domain.domain.name
else:
domain_name = connection.domain.domain.name
report_dict['infrastructure']['domains_and_servers'][connection.id] = {}
report_dict['infrastructure']['domains_and_servers'][
connection.id]['domain'] = domain_name
if connection.static_server:
server = connection.static_server.server.ip_address
else:
server = connection.transient_server.ip_address
if domain_name in temp:
server_list = [server]
for val in temp[domain_name]:
server_list.append(val)
# Remove any duplicates from server_list
server = list(set(server_list))
# Now add the temporary dictionary's data to the report JSON
report_dict['infrastructure']['domains_and_servers'][
connection.id]['servers'] = server
if connection.endpoint:
report_dict['infrastructure']['domains_and_servers'][
connection.id]['cdn_endpoint'] = connection.endpoint
else:
report_dict['infrastructure']['domains_and_servers'][
connection.id]['cdn_endpoint'] = "None"
# Operator assignments
report_dict['team'] = {}
for operator in self.report_queryset.project.projectassignment_set.all():
report_dict['team'][operator.operator.id] = {}
report_dict['team'][operator.operator.id][
'id'] = operator.operator.id
report_dict['team'][operator.operator.id][
'name'] = operator.operator.name
report_dict['team'][operator.operator.id][
'project_role'] = operator.role.project_role
report_dict['team'][operator.operator.id][
'email'] = operator.operator.email
report_dict['team'][operator.operator.id][
'start_date'] = operator.start_date
report_dict['team'][operator.operator.id][
'end_date'] = operator.end_date
report_dict['team'][operator.operator.id][
'note'] = operator.note
return json.dumps(report_dict, indent=2, cls=DjangoJSONEncoder)
def create_newline(self):
"""Create a blank line to act as a separator between document elements.
A paragraph must be added and then a run in order to use an
`add_break()`. This creates the appropriate <w:r> in the docx
document's XML.
"""
p = self.spenny_doc.add_paragraph()
run = p.add_run()
run.add_break()
def make_figure(self, paragraph):
"""Make the specified paragraph an auto-incrementing Figure in the
Word document.
Code from: https://github.com/python-openxml/python-docx/issues/359
"""
run = run = paragraph.add_run()
r = run._r
fldChar = OxmlElement('w:fldChar')
fldChar.set(qn('w:fldCharType'), 'begin')
r.append(fldChar)
instrText = OxmlElement('w:instrText')
instrText.text = ' SEQ Figure \\* ARABIC'
r.append(instrText)
fldChar = OxmlElement('w:fldChar')
fldChar.set(qn('w:fldCharType'), 'end')
r.append(fldChar)
def list_number(self, par, prev=None, level=None, num=True):
"""Makes the specified paragraph a list item with a specific level and
optional restart.
An attempt will be made to retrieve an abstract numbering style that
corresponds to the style of the paragraph. If that is not possible,
the default numbering or bullet style will be used based on the
``num`` parameter.
Parameters
----------
par : docx.paragraph.Paragraph
The paragraph to turn into a list item.
prev : docx.paragraph.Paragraph or None
The previous paragraph in the list. If specified, the numbering
and styles will be taken as a continuation of this paragraph.
If omitted, a new numbering scheme will be started.
level : int or None
The level of the paragraph within the outline. If ``prev`` is
set, defaults to the same level as in ``prev``. Otherwise,
defaults to zero.
num : bool
If ``prev`` is :py:obj:`None` and the style of the paragraph
does not correspond to an existing numbering style, this will
determine wether or not the list will be numbered or bulleted.
The result is not guaranteed, but is fairly safe for most Word
templates.
Code from:
https://github.com/python-openxml/python-docx/issues/25#issuecomment-400787031
"""
xpath_options = {
True: {'single': 'count(w:lvl)=1 and ', 'level': 0},
False: {'single': '', 'level': level},
}
def style_xpath(prefer_single=True):
"""
The style comes from the outer-scope variable ``par.style.name``.
"""
style = par.style.style_id
return (
'w:abstractNum['
'{single}w:lvl[@w:ilvl="{level}"]/w:pStyle[@w:val="{style}"]'
']/@w:abstractNumId'
).format(style=style, **xpath_options[prefer_single])
def type_xpath(prefer_single=True):
"""
The type is from the outer-scope variable ``num``.
"""
type = 'decimal' if num else 'bullet'
return (
'w:abstractNum['
'{single}w:lvl[@w:ilvl="{level}"]/w:numFmt[@w:val="{type}"]'
']/@w:abstractNumId'
).format(type=type, **xpath_options[prefer_single])
def get_abstract_id():
"""Select as follows:
1. Match single-level by style (get min ID)
2. Match exact style and level (get min ID)
3. Match single-level decimal/bullet types (get min ID)
4. Match decimal/bullet in requested level (get min ID)
3. 0
"""
for fn in (style_xpath, type_xpath):
for prefer_single in (True, False):
xpath = fn(prefer_single)
ids = numbering.xpath(xpath)
if ids:
return min(int(x) for x in ids)
return 0
if (prev is None or
prev._p.pPr is None or
prev._p.pPr.numPr is None or
prev._p.pPr.numPr.numId is None):
if level is None:
level = 0
numbering = self.spenny_doc.part.numbering_part.\
numbering_definitions._numbering
# Compute the abstract ID first by style, then by num
abstract = get_abstract_id()
# Set the concrete numbering based on the abstract numbering ID
num = numbering.add_num(abstract)
# Make sure to override the abstract continuation property
num.add_lvlOverride(ilvl=level).add_startOverride(1)
# Extract the newly-allocated concrete numbering ID
num = num.numId
else:
if level is None:
level = prev._p.pPr.numPr.ilvl.val
# Get the previous concrete numbering ID
num = prev._p.pPr.numPr.numId.val
par._p.get_or_add_pPr().get_or_add_numPr().\
get_or_add_numId().val = num
par._p.get_or_add_pPr().get_or_add_numPr().\
get_or_add_ilvl().val = level
def process_text(self, text, finding, report_json):
"""Process the provided text from the specified finding to parse
keywords for evidence placement and formatting.
"""
numbered_list = False
bulleted_list = False
code_block = False
inline_code = False
italic_text = False
bold_text = False
p = None
prev_p = None
regex = r'\{\{\.(.*?)\}\}'
for line in text.split('\n'):
line = line.strip()
| |
<reponame>edgardmota/vent
import datetime
import docker
import json
import math
import multiprocessing
import os
import pkg_resources
import platform
import re
import requests
from subprocess import check_output, Popen, PIPE
from vent.api.templates import Template
from vent.helpers.paths import PathDirs
from vent.helpers.logs import Logger
logger = Logger(__name__)
def Version():
""" Get Vent version """
version = ''
try:
version = pkg_resources.require("vent")[0].version
if not version.startswith('v'):
version = 'v' + version
except Exception as e: # pragma: no cover
version = "Error: " + str(e)
return version
def System():
""" Get system operating system """
return platform.system()
def Docker():
""" Get Docker setup information """
docker_info = {'server': {}, 'env': '', 'type': '', 'os': ''}
# get docker server version
try:
d_client = docker.from_env()
docker_info['server'] = d_client.version()
except Exception as e: # pragma: no cover
logger.error("Can't get docker info " + str(e))
# get operating system
system = System()
docker_info['os'] = system
# check if native or using docker-machine
if 'DOCKER_MACHINE_NAME' in os.environ:
# using docker-machine
docker_info['env'] = os.environ['DOCKER_MACHINE_NAME']
docker_info['type'] = 'docker-machine'
elif 'DOCKER_HOST' in os.environ:
# not native
docker_info['env'] = os.environ['DOCKER_HOST']
docker_info['type'] = 'remote'
else:
# using "local" server
docker_info['type'] = 'native'
return docker_info
def Containers(vent=True, running=True):
"""
Get containers that are created, by default limit to vent containers that
are running
"""
containers = []
try:
d_client = docker.from_env()
if vent:
c = d_client.containers.list(all=not running,
filters={'label': 'vent'})
else:
c = d_client.containers.list(all=not running)
for container in c:
containers.append((container.name, container.status))
except Exception as e: # pragma: no cover
logger.error("Docker problem " + str(e))
return containers
def Cpu():
""" Get number of available CPUs """
cpu = "Unknown"
try:
cpu = str(multiprocessing.cpu_count())
except Exception as e: # pragma: no cover
logger.error("Can't access CPU count' " + str(e))
return cpu
def Gpu(pull=False):
""" Check for support of GPUs, and return what's available """
gpu = (False, "")
try:
image = 'nvidia/cuda:8.0-runtime'
image_name, tag = image.split(":")
d_client = docker.from_env()
nvidia_image = d_client.images.list(name=image)
if pull and len(nvidia_image) == 0:
try:
d_client.images.pull(image_name, tag=tag)
nvidia_image = d_client.images.list(name=image)
except Exception as e: # pragma: no cover
logger.error("Something with the GPU went wrong " + str(e))
if len(nvidia_image) > 0:
cmd = 'nvidia-docker run --rm ' + image + ' nvidia-smi -L'
proc = Popen([cmd],
stdout=PIPE,
stderr=PIPE,
shell=True,
close_fds=True)
gpus = proc.stdout.read()
err = proc.stderr.read()
if gpus:
gpu_str = ""
for line in gpus.strip().split("\n"):
gpu_str += line.split(" (UUID: ")[0] + ", "
gpu = (True, gpu_str[:-2])
else:
if err:
gpu = (False, "Unknown", str(err))
else:
gpu = (False, "None")
else:
gpu = (False, "None")
except Exception as e: # pragma: no cover
gpu = (False, "Unknown", str(e))
return gpu
def GpuUsage(**kargs):
""" Get the current GPU usage of available GPUs """
usage = (False, None)
gpu_status = {'vent_usage': {'dedicated': [], 'mem_mb': {}}}
path_dirs = PathDirs(**kargs)
path_dirs.host_config()
template = Template(template=path_dirs.cfg_file)
# get running jobs using gpus
try:
d_client = docker.from_env()
c = d_client.containers.list(all=False,
filters={'label': 'vent-plugin'})
for container in c:
if ('vent.gpu' in container.attrs['Config']['Labels'] and
container.attrs['Config']['Labels']['vent.gpu'] == 'yes'):
device = container.attrs['Config']['Labels']['vent.gpu.device']
if ('vent.gpu.dedicated' in container.attrs['Config']['Labels'] and
container.attrs['Config']['Labels']['vent.gpu.dedicated'] == 'yes'):
gpu_status['vent_usage']['dedicated'].append(device)
elif 'vent.gpu.mem_mb' in container.attrs['Config']['Labels']:
if device not in gpu_status['vent_usage']['mem_mb']:
gpu_status['vent_usage']['mem_mb'][device] = 0
gpu_status['vent_usage']['mem_mb'][device] += int(container.attrs['Config']['Labels']['vent.gpu.mem_mb'])
except Exception as e: # pragma: no cover
logger.error("Could not get running jobs " + str(e))
port = '3476'
# default docker gateway
host = '172.17.0.1'
result = template.option('nvidia-docker-plugin', 'port')
if result[0]:
port = result[1]
result = template.option('nvidia-docker-plugin', 'host')
if result[0]:
host = result[1]
else:
try:
# now just requires ip, ifconfig
route = check_output(('ip', 'route')).split('\n')
default = ''
# grab the default network device.
for device in route:
if 'default' in device:
default = device.split()[4]
break
# grab the IP address for the default device
ip_addr = check_output(('ifconfig', default))
ip_addr = ip_addr.split('\n')[1].split()[1]
host = ip_addr
except Exception as e: # pragma: no cover
logger.error("Something with the ip addresses"
"went wrong " + str(e))
# have to get the info separately to determine how much memory is availabe
nd_url = 'http://' + host + ':' + port + '/v1.0/gpu/info/json'
try:
r = requests.get(nd_url)
if r.status_code == 200:
status = r.json()
for i, device in enumerate(status['Devices']):
gm = int(round(math.log(int(device['Memory']['Global']), 2)))
gpu_status[i] = {'global_memory': 2**gm,
'cores': device['Cores']}
else:
usage = (False, "Unable to get GPU usage request error code: " +
str(r.status_code))
except Exception as e: # pragma: no cover
usage = (False, "Error: " + str(e))
# get actual status of each gpu
nd_url = 'http://' + host + ':' + port + '/v1.0/gpu/status/json'
try:
r = requests.get(nd_url)
if r.status_code == 200:
status = r.json()
for i, device in enumerate(status['Devices']):
if i not in gpu_status:
gpu_status[i] = {}
gpu_status[i]['utilization'] = device['Utilization']
gpu_status[i]['memory'] = device['Memory']
gpu_status[i]['processes'] = device['Processes']
usage = (True, gpu_status)
else:
usage = (False, "Unable to get GPU usage request error code: " +
str(r.status_code))
except Exception as e: # pragma: no cover
usage = (False, "Error: " + str(e))
return usage
def Images(vent=True):
""" Get images that are build, by default limit to vent images """
images = []
# TODO needs to also check images in the manifest that couldn't have the
# label added
try:
d_client = docker.from_env()
if vent:
i = d_client.images.list(filters={'label': 'vent'})
else:
i = d_client.images.list()
for image in i:
images.append((image.tags[0], image.short_id))
except Exception as e: # pragma: no cover
logger.error("Something with the Images went wrong " + str(e))
return images
def Jobs():
"""
Get the number of jobs that are running and finished, and the number of
total tools running and finished for those jobs
"""
jobs = [0, 0, 0, 0]
# get running jobs
try:
d_client = docker.from_env()
c = d_client.containers.list(all=False,
filters={'label': 'vent-plugin'})
files = []
for container in c:
jobs[1] += 1
if 'file' in container.attrs['Config']['Labels']:
if container.attrs['Config']['Labels']['file'] not in files:
files.append(container.attrs['Config']['Labels']['file'])
jobs[0] = len(files)
except Exception as e: # pragma: no cover
logger.error("Could not get running jobs " + str(e))
# get finished jobs
try:
d_client = docker.from_env()
c = d_client.containers.list(all=True,
filters={'label': 'vent-plugin',
'status': 'exited'})
file_names = []
tool_names = []
finished_jobs = []
path_dirs = PathDirs()
manifest = os.path.join(path_dirs.meta_dir, "status.json")
if os.path.exists(manifest):
file_status = 'a'
else:
file_status = 'w'
# get a list of past jobs' file names if status.json exists
if file_status == 'a':
with open(manifest, 'r') as infile:
for line in infile:
finished_jobs.append(json.loads(line))
# get a list of file names so we can check against each container
file_names = [d['FileName'] for d in finished_jobs]
# multiple tools can run on 1 file. Use a tuple to status check
tool_names = [(d['FileName'], d['VentPlugin'])
for d in finished_jobs]
for container in c:
jobs[3] += 1
if 'file' in container.attrs['Config']['Labels']:
# make sure the file name and the tool tup exists because
# multiple tools can run on 1 file.
if (container.attrs['Config']['Labels']['file'],
container.attrs['Config']['Labels']['vent.name']) not in \
tool_names:
# TODO figure out a nicer way of getting desired values
# from containers.attrs.
new_file = {}
new_file['FileName'] = \
container.attrs['Config']['Labels']['file']
new_file['VentPlugin'] = \
container.attrs['Config']['Labels']['vent.name']
new_file['StartedAt'] = \
container.attrs['State']['StartedAt']
new_file['FinishedAt'] = \
container.attrs['State']['FinishedAt']
new_file['ID'] = \
container.attrs['Id'][:12]
# create/append a json file with all wanted information
with open(manifest, file_status) as outfile:
json.dump(new_file, outfile)
outfile.write("\n")
# delete any containers with 'vent-plugin' in the groups
if 'vent-plugin' in container.attrs['Config']['Labels']:
container.remove()
# add extra one to account for file that just finished if the file was
# just created since file_names is processed near the beginning
if file_status == 'w' and len(file_names) == 1:
jobs[2] = len(set(file_names)) + 1
else:
jobs[2] = len(set(file_names))
jobs[3] = jobs[3] - jobs[1]
except Exception as e: # pragma: no cover
logger.error("Could not get finished jobs " + str(e))
return tuple(jobs)
def Tools(**kargs):
""" Get tools that exist in the manifest """
path_dirs = PathDirs(**kargs)
manifest = os.path.join(path_dirs.meta_dir, "plugin_manifest.cfg")
template = Template(template=manifest)
tools = template.sections()
return tools[1]
def Services(core, vent=True, external=False, **kargs):
"""
Get services that have exposed ports, expects param core to be True or
False based on which type of services to return, by default limit to vent
containers and processes not running externally, if not limited by vent
containers, then core is ignored.
"""
services = | |
== 3 and self.dim_L == 6
self.T_min = self.T_initial
self.L_min = self.L_initial
self.S_min = self.S_initial
self.x = self.pack(self.T_min, self.L_min, self.S_min)
self.n = self.x.size()
self.minimizer = lbfgs.run(
target_evaluator = self,
termination_params = lbfgs.termination_parameters(
max_iterations = max_iterations,
max_calls = int(max_iterations*1.5)),
exception_handling_params =
lbfgs.exception_handling_parameters(
ignore_line_search_failed_step_at_lower_bound = True,
ignore_line_search_failed_step_at_upper_bound = True,
ignore_line_search_failed_maxfev = True)
)
self.compute_functional_and_gradients()
del self.x
def pack(self, T, L, S):
v = []
if (self.refine_T): v += list(flex.double([T]))
if (self.refine_L): v += list(L)
if (self.refine_S): v += list(S)
return flex.double(tuple(v))
def unpack_x(self):
i = 0
if (self.refine_T):
self.T_min = tuple(self.x)[i:self.dim_T]
i = self.dim_T
if (self.refine_L):
self.L_min = tuple(self.x)[i:i+self.dim_L]
i += self.dim_L
if (self.refine_S):
self.S_min = tuple(self.x)[i:i+self.dim_S]
def compute_functional_and_gradients(self):
self.unpack_x()
manager = tls_from_uiso_target_and_grads(self.T_min[0],
self.L_min,
self.S_min,
self.origin,
self.sites,
self.uiso)
self.f = manager.target()
self.g = self.pack(manager.grad_T(), manager.grad_L(), manager.grad_S())
return self.f, self.g
#######
class tls_xray_target_minimizer(object):
def __init__(self,
fmodel,
tlsos_initial,
refine_T,
refine_L,
refine_S,
selections,
selections_1d,
max_iterations,
run_finite_differences_test = False,
correct_adp = True):
adopt_init_args(self, locals())
fmodel.xray_structure.scatterers().flags_set_grads(state=False)
xray.set_scatterer_grad_flags(scatterers = fmodel.xray_structure.scatterers(),
u_aniso = True)
if(self.run_finite_differences_test): self.correct_adp = False
self.fmodel_copy = self.fmodel.deep_copy()
self.target_functor = self.fmodel_copy.target_functor()
self.run_finite_differences_test_counter = 0
self.T_initial = []
self.L_initial = []
self.S_initial = []
self.origins = []
for tlso_ in tlsos_initial:
self.T_initial.append(tlso_.t)
self.L_initial.append(tlso_.l)
self.S_initial.append(tlso_.s)
self.origins.append(tlso_.origin)
self.counter = 0
self.n_groups = len(self.T_initial)
self.dim_T = len(self.T_initial[0])
self.dim_L = len(self.L_initial[0])
self.dim_S = len(self.S_initial[0])
self.T_min = self.T_initial
self.L_min = self.L_initial
self.S_min = self.S_initial
self.x = self.pack(self.T_min, self.L_min, self.S_min)
self.minimizer = lbfgs.run(
target_evaluator = self,
core_params = lbfgs.core_parameters(maxfev = 10),
termination_params = lbfgs.termination_parameters(
min_iterations = max_iterations,
max_calls = int(max_iterations*1.5)),
exception_handling_params = lbfgs.exception_handling_parameters(
ignore_line_search_failed_step_at_lower_bound = True,
ignore_line_search_failed_step_at_upper_bound = True,
ignore_line_search_failed_maxfev = True))
self.compute_functional_and_gradients()
del self.x
self.tlsos_result = generate_tlsos(
selections = self.selections,
xray_structure = self.fmodel.xray_structure,
T = self.T_min,
L = self.L_min,
S = self.S_min)
def pack(self, T, L, S):
v = []
for Ti,Li,Si in zip(T,L,S):
if (self.refine_T): v += list(Ti)
if (self.refine_L): v += list(Li)
if (self.refine_S): v += list(Si)
return flex.double(tuple(v))
def unpack_x(self):
i = 0
T_min = []
L_min = []
S_min = []
for j in xrange(self.n_groups):
if (self.refine_T):
self.T_min[j] = tuple(self.x)[i:i+self.dim_T]
i += self.dim_T
if (self.refine_L):
self.L_min[j] = tuple(self.x)[i:i+self.dim_L]
i += self.dim_L
if (self.refine_S):
self.S_min[j] = tuple(self.x)[i:i+self.dim_S]
i += self.dim_S
def compute_functional_and_gradients(self):
self.counter += 1
self.unpack_x()
tlsos = generate_tlsos(selections = self.selections,
xray_structure = self.fmodel_copy.xray_structure,
T = self.T_min,
L = self.L_min,
S = self.S_min)
update_xray_structure_with_tls(
xray_structure = self.fmodel_copy.xray_structure,
selections = self.selections,
selections_1d = self.selections_1d,
tlsos = tlsos,
correct_adp = self.correct_adp)
self.fmodel_copy.update_xray_structure(update_f_calc=True)
t_r = self.target_functor(compute_gradients=True)
self.f = t_r.target_work()
grad_manager = tls_xray_grads(
target_result=t_r,
selections=self.selections,
tlsos=tlsos)
self.g = self.pack(grad_manager.grad_T,
grad_manager.grad_L,
grad_manager.grad_S)
if(self.run_finite_differences_test and
self.run_finite_differences_test_counter < 2):
tolerance = 1.e-3
self.run_finite_differences_test_counter += 1
GT,GL,GS = finite_differences_grads_of_xray_target_wrt_tls(
target_functor=self.target_functor,
T=self.T_min,
L=self.L_min,
S=self.S_min,
origins=self.origins,
selections=self.selections,
delta=0.00001)
format = "%10.6f %10.6f %10.6f %10.6f %10.6f %10.6f"
formats="%10.6f %10.6f %10.6f %10.6f %10.6f %10.6f %10.6f %10.6f %10.6f"
for m1,m2 in zip(grad_manager.grad_T, GT):
if(0):
print "T1=" + format % (m1[0],m1[1],m1[2],m1[3],m1[4],m1[5])
print "T2=" + format % (m2[0],m2[1],m2[2],m2[3],m2[4],m2[5])
assert approx_equal(m1,m2,tolerance)
for m1,m2 in zip(grad_manager.grad_L, GL):
if(0):
print "L1=" + format % (m1[0],m1[1],m1[2],m1[3],m1[4],m1[5])
print "L2=" + format % (m2[0],m2[1],m2[2],m2[3],m2[4],m2[5])
assert approx_equal(m1,m2,tolerance)
for m1,m2 in zip(grad_manager.grad_S, GS):
if(0):
print "S1=" + formats %\
(m1[0],m1[1],m1[2],m1[3],m1[4],m1[5],m1[6],m1[7],m1[8])
print "S2=" + formats %\
(m2[0],m2[1],m2[2],m2[3],m2[4],m2[5],m2[6],m2[7],m2[8])
assert approx_equal(m1,m2,tolerance)
return self.f, self.g
class tls_xray_grads(object):
def __init__(self, target_result, selections, tlsos):
self.grad_T = []
self.grad_L = []
self.grad_S = []
d_target_d_uaniso = target_result.gradients_wrt_atomic_parameters(
u_aniso=True)
for sel, tlso in zip(selections, tlsos):
d_target_d_tls_manager = d_target_d_tls(
sites=target_result.manager.xray_structure.sites_cart().select(sel),
origin = tlso.origin,
d_target_d_uaniso = d_target_d_uaniso.select(sel),
scale_l_and_s = True,# False will brake f.d. test
use_trace_s_zero_constraint = True)
self.grad_T.append(list(d_target_d_tls_manager.grad_T()))
self.grad_L.append(list(d_target_d_tls_manager.grad_L()))
self.grad_S.append(list(d_target_d_tls_manager.grad_S()))
def update_xray_structure_with_tls(xray_structure,
selections,
tlsos,
selections_1d = None,
correct_adp = True):
global time_update_xray_structure_with_tls
timer = user_plus_sys_time()
u_cart_from_tls_ = u_cart_from_tls(sites_cart = xray_structure.sites_cart(),
selections = selections,
tlsos = tlsos)
xray_structure.set_u_cart(u_cart=u_cart_from_tls_, selection = selections_1d)
if(correct_adp): xray_structure.tidy_us(u_min = 1.e-6)
time_update_xray_structure_with_tls += timer.elapsed()
def split_u(xray_structure, tls_selections, offset):
global time_split_u
timer = user_plus_sys_time()
uc = xray_structure.unit_cell()
u_iso = xray_structure.scatterers().extract_u_iso()
u_eq_1 = xray_structure.extract_u_iso_or_u_equiv()
for tls_selection in tls_selections:
u_iso_sel = u_iso.select(tls_selection)
u_iso_min = flex.min(u_iso_sel)
if(offset):
offset_ = adptbx.b_as_u(5.0)
else: offset_ = 0.0
if u_iso_min >= offset_:
u_iso_min = u_iso_min - offset_
t = adptbx.u_iso_as_u_star(uc, u_iso_min)
for i_seq in tls_selection:
sc = xray_structure.scatterers()[i_seq]
assert sc.u_iso == u_iso[i_seq]
u_iso_new = sc.u_iso - u_iso_min
assert u_iso_new >= 0.0
sc.u_iso = u_iso_new
assert sc.flags.use_u_aniso()
assert sc.flags.use_u_iso()
if(sc.u_star == (-1.0,-1.0,-1.0,-1.0,-1.0,-1.0)):
sc.u_star = t
else:
x = flex.double(sc.u_star)
y = flex.double(t)
z = list(x + y)
sc.u_star = z
u_iso = xray_structure.scatterers().extract_u_iso().select(
xray_structure.use_u_iso())
assert (u_iso < 0.0).count(True) == 0
u_eq_2 = xray_structure.extract_u_iso_or_u_equiv()
assert approx_equal(u_eq_1, u_eq_2)
time_split_u += timer.elapsed()
def tls_from_u_cart(xray_structure,
tlsos_initial,
tls_selections,
number_of_macro_cycles = 100,
max_iterations = 100):
global time_tls_from_u_cart
timer = user_plus_sys_time()
uc = xray_structure.unit_cell()
xray_structure.tidy_us(u_min = 1.e-6)
ueq = xray_structure.extract_u_iso_or_u_equiv()
assert (ueq < 0.0).count(True) == 0
u_cart = xray_structure.scatterers().extract_u_cart(uc)
for tls_selection in tls_selections:
u_cart_selected = u_cart.select(tls_selection)
assert adptbx.is_positive_definite(u_cart_selected,1.e-6).count(False)==0
xray_structure.tidy_us(u_min = 1.e-6)
t = []
l = []
s = []
lim_l = 10.0
for tls_selection, tlso in zip(tls_selections, tlsos_initial):
t.append( tlso.t )
#if(abs(tlso.t[0]) < eps and abs(tlso.t[1]) < eps and abs(tlso.t[2]) < eps and
# abs(tlso.t[3]) < eps and abs(tlso.t[4]) < eps and abs(tlso.t[5]) < eps):
# t.append( t_from_u_cart(u_cart.select(tls_selection), 1.e-6) )
#else:
# t.append( tlso.t )
#l.append( [0,0,0,0,0,0] )
if abs(tlso.l[0])>lim_l: l1 = tlso.l[0]/5.
else: l1 = tlso.l[0]
if abs(tlso.l[1])>lim_l: l2 = tlso.l[1]/5.
else: l2 = tlso.l[1]
if abs(tlso.l[2])>lim_l: l3 = tlso.l[2]/5.
else: l3 = tlso.l[2]
if abs(tlso.l[3])>lim_l: l4 = tlso.l[3]/5.
else: l4 = tlso.l[3]
if abs(tlso.l[4])>lim_l: l5 = tlso.l[4]/5.
else: l5 = tlso.l[4]
if abs(tlso.l[5])>lim_l: l6 = tlso.l[5]/5.
else: l6 = tlso.l[5]
l.append( [l1,l2,l3,l4,l5,l6] )
s.append( tlso.s )
tlsos = generate_tlsos(selections = tls_selections,
xray_structure = xray_structure,
T = t, L = l, S = s)
#for rt,rl,rs in [[0,1,1],[1,0,0]]*3:
for rt,rl,rs in [[0,1,1],[1,0,0],[1,1,1]]:
tlsos_ = tls_from_uanisos(xray_structure = xray_structure,
selections = tls_selections,
tlsos_initial = tlsos,
number_of_macro_cycles = number_of_macro_cycles,
max_iterations = max_iterations,
refine_T = rt,
refine_L = rl,
refine_S = rs,
verbose = -1,
out = None)
tlsos = tlsos_
t = []
l = []
s = []
for tlso in tlsos:
t.append( tlso.t )
if abs(tlso.l[0])>lim_l: l1 = lim_l/5.
else: l1 = tlso.l[0]
if abs(tlso.l[1])>lim_l: l2 = lim_l/5.
else: l2 = tlso.l[1]
if abs(tlso.l[2])>lim_l: l3 = lim_l/5.
else: l3 = tlso.l[2]
if abs(tlso.l[3])>lim_l: l4 = lim_l/5.
else: l4 = tlso.l[3]
if abs(tlso.l[4])>lim_l: l5 = lim_l/5.
else: l5 = tlso.l[4]
if abs(tlso.l[5])>lim_l: l6 = lim_l/5.
else: l6 = tlso.l[5]
l.append( [l1,l2,l3,l4,l5,l6] )
s.append( tlso.s )
tlsos = generate_tlsos(selections = tls_selections,
xray_structure = xray_structure,
T = t, L = l, S = s)
time_tls_from_u_cart += timer.elapsed()
return tlsos
class tls_refinement(object):
def __init__(self,
fmodel,
model,
selections,
selections_1d,
refine_T,
refine_L,
refine_S,
number_of_macro_cycles,
max_number_of_iterations,
start_tls_value = None,
run_finite_differences_test = False,
eps = 1.e-6,
out = None,
macro_cycle = None,
verbose = True):
global time_tls_total
timer = user_plus_sys_time()
if(out is None): out = sys.stdout
prefix = "TLS refinement:"
fmodel.info().show_targets(text = prefix+" start model", out = out)
fmodel.xray_structure.show_u_statistics(text = prefix+" start model",
out = out)
xrs = fmodel.xray_structure
xrs.tidy_us(u_min = 1.e-6)
if(start_tls_value is not None):
try:
crash_or_not = abs(start_tls_value + 0)
tlsos = generate_tlsos(value = start_tls_value,
selections = selections,
xray_structure = xrs)
except Exception:
tlsos = start_tls_value
else:
tlsos = tls_from_u_cart(xray_structure = xrs,
tlsos_initial = model.tls_groups.tlsos,
tls_selections = selections,
number_of_macro_cycles = 100,
max_iterations = 100)
if (verbose) :
show_tls(tlsos = tlsos, text = prefix+" start parameters",out = out)
for macro_cycle in range(1, number_of_macro_cycles+1):
print >> out
prefix = "TLS refinement: after macrocycle "+str(macro_cycle)
minimized = tls_xray_target_minimizer(
fmodel = fmodel,
tlsos_initial = tlsos,
refine_T = refine_T,
refine_L = refine_L,
refine_S = refine_S,
selections = selections,
selections_1d = selections_1d,
max_iterations = max_number_of_iterations,
run_finite_differences_test = run_finite_differences_test)
xrs = minimized.fmodel_copy.xray_structure
xrs.show_u_statistics(text = prefix, out = out)
if(verbose):
show_tls(tlsos = minimized.tlsos_result, text = prefix, out = out)
fmodel.update_xray_structure(xray_structure = xrs,
update_f_calc = True)
fmodel.info().show_targets(text = prefix, out = out)
if(xrs.is_positive_definite_u().count(False) > 0):
xrs.tidy_us(u_min = 1.e-6)
xrs.show_u_statistics(
text = prefix+": after making positive definite",
out = out)
fmodel.update_xray_structure(xray_structure = xrs,
update_f_calc = True)
fmodel.info().show_targets(text=prefix+": after making positive definite",
out = out)
tlsos = make_tlso_compatible_with_u_positive_definite(
tlsos = minimized.tlsos_result,
xray_structure = xrs.deep_copy_scatterers(),
selections = selections,
max_iterations = 10,
number_of_u_nonpositive_definite = 0,
eps = eps,
refine_T = refine_T,
refine_L = refine_L,
refine_S = refine_S,
out = | |
<reponame>chunmk/modeldb
# -*- coding: utf-8 -*-
from __future__ import print_function
import ast
import copy
import glob
import os
import pathlib2
import pprint
import shutil
import sys
import tempfile
import time
import warnings
import zipfile
import requests
from .entity import _ModelDBEntity, _MODEL_ARTIFACTS_ATTR_KEY
from .deployable_entity import _DeployableEntity
from .._protos.public.common import CommonService_pb2 as _CommonCommonService
from .._protos.public.modeldb import CommonService_pb2 as _CommonService
from .._protos.public.modeldb import ExperimentRunService_pb2 as _ExperimentRunService
from ..external import six
from ..external.six.moves import cPickle as pickle # pylint: disable=import-error, no-name-in-module
from .._internal_utils import (
_artifact_utils,
_pip_requirements_utils,
_request_utils,
_utils,
importer,
)
from .._dataset_versioning import (
dataset as _dataset,
dataset_version as _dataset_version,
)
from .. import _repository
from .._repository import commit as commit_module
from .. import data_types
from .. import deployment
from .. import utils
from ..environment import _Environment, Python
class ExperimentRun(_DeployableEntity):
"""
Object representing a machine learning Experiment Run.
This class provides read/write functionality for Experiment Run metadata.
There should not be a need to instantiate this class directly; please use
:meth:`Client.set_experiment_run() <verta.client.Client.set_experiment_run>`.
Attributes
----------
id : str
ID of this Experiment Run.
name : str
Name of this Experiment Run.
has_environment : bool
Whether there is an environment associated with this Experiment Run.
"""
def __init__(self, conn, conf, msg):
super(ExperimentRun, self).__init__(conn, conf,
_ExperimentRunService, "experiment-run", msg)
def __repr__(self):
self._refresh_cache()
run_msg = self._msg
return '\n'.join((
"name: {}".format(run_msg.name),
"url: {}://{}/{}/projects/{}/exp-runs/{}".format(
self._conn.scheme, self._conn.socket, self.workspace, run_msg.project_id, self.id),
"date created: {}".format(
_utils.timestamp_to_str(int(run_msg.date_created))),
"date updated: {}".format(
_utils.timestamp_to_str(int(run_msg.date_updated))),
"description: {}".format(run_msg.description),
"tags: {}".format(run_msg.tags),
"attributes: {}".format(
_utils.unravel_key_values(run_msg.attributes)),
"id: {}".format(run_msg.id),
"experiment id: {}".format(run_msg.experiment_id),
"project id: {}".format(run_msg.project_id),
"hyperparameters: {}".format(
_utils.unravel_key_values(run_msg.hyperparameters)),
"observations: {}".format(
_utils.unravel_observations(run_msg.observations)),
"metrics: {}".format(_utils.unravel_key_values(run_msg.metrics)),
"artifact keys: {}".format(
_utils.unravel_artifacts(run_msg.artifacts)),
))
def _update_cache(self):
self._hyperparameters = _utils.unravel_key_values(
self._msg.hyperparameters)
self._metrics = _utils.unravel_key_values(self._msg.metrics)
@property
def workspace(self):
self._refresh_cache()
proj_id = self._msg.project_id
response = _utils.make_request(
"GET",
"{}://{}/api/v1/modeldb/project/getProjectById".format(
self._conn.scheme, self._conn.socket),
self._conn, params={'id': proj_id},
)
_utils.raise_for_http_error(response)
project_json = _utils.body_to_json(response)['project']
if 'workspace_id' not in project_json:
# workspace is OSS default
return self._conn._OSS_DEFAULT_WORKSPACE
else:
return self._conn.get_workspace_name_from_legacy_id(project_json['workspace_id'])
@property
def name(self):
self._refresh_cache()
return self._msg.name
@property
def has_environment(self):
self._refresh_cache()
return self._msg.environment.HasField("python") or self._msg.environment.HasField("docker")
@classmethod
def _generate_default_name(cls):
return "Run {}".format(_utils.generate_default_name())
@classmethod
def _get_proto_by_id(cls, conn, id):
Message = _ExperimentRunService.GetExperimentRunById
msg = Message(id=id)
response = conn.make_proto_request("GET",
"/api/v1/modeldb/experiment-run/getExperimentRunById",
params=msg)
return conn.maybe_proto_response(response, Message.Response).experiment_run
@classmethod
def _get_proto_by_name(cls, conn, name, expt_id):
Message = _ExperimentRunService.GetExperimentRunByName
msg = Message(experiment_id=expt_id, name=name)
response = conn.make_proto_request("GET",
"/api/v1/modeldb/experiment-run/getExperimentRunByName",
params=msg)
return conn.maybe_proto_response(response, Message.Response).experiment_run
@classmethod
def _create_proto_internal(cls, conn, ctx, name, desc=None, tags=None, attrs=None, date_created=None):
Message = _ExperimentRunService.CreateExperimentRun
msg = Message(project_id=ctx.proj.id, experiment_id=ctx.expt.id, name=name,
description=desc, tags=tags, attributes=attrs,
date_created=date_created, date_updated=date_created)
response = conn.make_proto_request("POST",
"/api/v1/modeldb/experiment-run/createExperimentRun",
body=msg)
expt_run = conn.must_proto_response(
response, Message.Response).experiment_run
print("created new ExperimentRun: {}".format(expt_run.name))
return expt_run
def _log_artifact(self, key, artifact, artifact_type, extension=None, method=None, overwrite=False):
"""
Logs an artifact to this Experiment Run.
Parameters
----------
key : str
Name of the artifact.
artifact : str or file-like or object
Artifact or some representation thereof.
- If str, then it will be interpreted as a filesystem path, its contents read as bytes,
and uploaded as an artifact.
- If file-like, then the contents will be read as bytes and uploaded as an artifact.
- Otherwise, the object will be serialized and uploaded as an artifact.
artifact_type : int
Variant of `_CommonCommonService.ArtifactTypeEnum`.
extension : str, optional
Filename extension associated with the artifact.
method : str, optional
Serialization method used to produce the bytestream, if `artifact` was already serialized by verta.
overwrite : bool, default False
Whether to allow overwriting an existing artifact with key `key`.
"""
if isinstance(artifact, six.string_types):
os.path.expanduser(artifact)
artifact = open(artifact, 'rb')
if hasattr(artifact, 'read') and method is not None: # already a verta-produced stream
artifact_stream = artifact
else:
artifact_stream, method = _artifact_utils.ensure_bytestream(
artifact)
if extension is None:
extension = _artifact_utils.ext_from_method(method)
# calculate checksum
artifact_hash = _artifact_utils.calc_sha256(artifact_stream)
artifact_stream.seek(0)
# determine basename
# The key might already contain the file extension, thanks to our hard-coded deployment
# keys e.g. "model.pkl" and "model_api.json".
if extension is None:
basename = key
elif key.endswith(os.extsep + extension):
basename = key
else:
basename = key + os.extsep + extension
# build upload path from checksum and basename
artifact_path = os.path.join(artifact_hash, basename)
# TODO: incorporate into config
VERTA_ARTIFACT_DIR = os.environ.get('VERTA_ARTIFACT_DIR', "")
VERTA_ARTIFACT_DIR = os.path.expanduser(VERTA_ARTIFACT_DIR)
if VERTA_ARTIFACT_DIR:
print("set artifact directory from environment:")
print(" " + VERTA_ARTIFACT_DIR)
artifact_path = os.path.join(VERTA_ARTIFACT_DIR, artifact_path)
pathlib2.Path(artifact_path).parent.mkdir(
parents=True, exist_ok=True)
# log key to ModelDB
Message = _ExperimentRunService.LogArtifact
artifact_msg = _CommonCommonService.Artifact(key=key,
path=artifact_path,
path_only=True if VERTA_ARTIFACT_DIR else False,
artifact_type=artifact_type,
filename_extension=extension)
msg = Message(id=self.id, artifact=artifact_msg)
data = _utils.proto_to_json(msg)
if overwrite:
response = _utils.make_request("DELETE",
"{}://{}/api/v1/modeldb/experiment-run/deleteArtifact".format(
self._conn.scheme, self._conn.socket),
self._conn, json={'id': self.id, 'key': key})
_utils.raise_for_http_error(response)
response = _utils.make_request("POST",
"{}://{}/api/v1/modeldb/experiment-run/logArtifact".format(
self._conn.scheme, self._conn.socket),
self._conn, json=data)
if not response.ok:
if response.status_code == 409:
raise ValueError("artifact with key {} already exists;"
" consider setting overwrite=True".format(key))
else:
_utils.raise_for_http_error(response)
if VERTA_ARTIFACT_DIR:
print("logging artifact")
with open(artifact_path, 'wb') as f:
shutil.copyfileobj(artifact_stream, f)
print("log complete; file written to {}".format(artifact_path))
else:
self._upload_artifact(key, artifact_stream)
self._clear_cache()
def _upload_artifact(self, key, artifact_stream, part_size=_artifact_utils._64MB):
"""
Uploads `artifact_stream` to ModelDB artifact store.
Parameters
----------
key : str
artifact_stream : file-like
part_size : int, default 64 MB
If using multipart upload, number of bytes to upload per part.
"""
# TODO: add to Client config
env_part_size = os.environ.get('VERTA_ARTIFACT_PART_SIZE', "")
try:
part_size = int(float(env_part_size))
except ValueError: # not an int
pass
else:
print("set artifact part size {} from environment".format(part_size))
artifact_stream.seek(0)
if self._conf.debug:
print("[DEBUG] uploading {} bytes ({})".format(
_artifact_utils.get_stream_length(artifact_stream), key))
artifact_stream.seek(0)
# check if multipart upload ok
url_for_artifact = self._get_url_for_artifact(key, "PUT", part_num=1)
if url_for_artifact.multipart_upload_ok:
# TODO: parallelize this
file_parts = iter(lambda: artifact_stream.read(part_size), b'')
for part_num, file_part in enumerate(file_parts, start=1):
print("uploading part {}".format(part_num), end='\r')
# get presigned URL
url = self._get_url_for_artifact(
key, "PUT", part_num=part_num).url
# wrap file part into bytestream to avoid OverflowError
# Passing a bytestring >2 GB (num bytes > max val of int32) directly to
# ``requests`` will overwhelm CPython's SSL lib when it tries to sign the
# payload. But passing a buffered bytestream instead of the raw bytestring
# indicates to ``requests`` that it should perform a streaming upload via
# HTTP/1.1 chunked transfer encoding and avoid this issue.
# https://github.com/psf/requests/issues/2717
part_stream = six.BytesIO(file_part)
# upload part
response = _utils.make_request(
"PUT", url, self._conn, data=part_stream)
_utils.raise_for_http_error(response)
# commit part
url = "{}://{}/api/v1/modeldb/experiment-run/commitArtifactPart".format(
self._conn.scheme,
self._conn.socket,
)
msg = _CommonService.CommitArtifactPart(id=self.id, key=key)
msg.artifact_part.part_number = part_num
msg.artifact_part.etag = response.headers['ETag']
data = _utils.proto_to_json(msg)
# TODO: increase retries
response = _utils.make_request(
"POST", url, self._conn, json=data)
_utils.raise_for_http_error(response)
print()
# complete upload
url = "{}://{}/api/v1/modeldb/experiment-run/commitMultipartArtifact".format(
self._conn.scheme,
self._conn.socket,
)
msg = _CommonService.CommitMultipartArtifact(id=self.id, key=key)
data = _utils.proto_to_json(msg)
response = _utils.make_request("POST", url, self._conn, json=data)
_utils.raise_for_http_error(response)
else:
# upload full artifact
if url_for_artifact.fields:
# if fields were returned by backend, make a POST request and supply them as form fields
response = _utils.make_request(
"POST", url_for_artifact.url, self._conn,
# requests uses the `files` parameter for sending multipart/form-data POSTs.
# https://stackoverflow.com/a/12385661/8651995
# the file contents must be the final form field
# https://docs.aws.amazon.com/AmazonS3/latest/dev/HTTPPOSTForms.html#HTTPPOSTFormFields
files=list(url_for_artifact.fields.items()) + \
[('file', artifact_stream)],
)
else:
response = _utils.make_request(
"PUT", url_for_artifact.url, self._conn, data=artifact_stream)
_utils.raise_for_http_error(response)
print("upload complete ({})".format(key))
def _log_artifact_path(self, key, artifact_path, artifact_type, overwrite=False):
"""
Logs the filesystem path of an artifact to this Experiment Run.
Parameters
----------
key : str
Name of the artifact.
artifact_path : str
Filesystem path of the artifact.
artifact_type : int
Variant of `_CommonCommonService.ArtifactTypeEnum`.
overwrite : bool, default False
Whether to allow overwriting an existing artifact with key `key`.
"""
# log key-path to ModelDB
Message = _ExperimentRunService.LogArtifact
artifact_msg = _CommonCommonService.Artifact(key=key,
path=artifact_path,
path_only=True,
artifact_type=artifact_type)
msg = Message(id=self.id, artifact=artifact_msg)
data = _utils.proto_to_json(msg)
if overwrite:
response = _utils.make_request("DELETE",
"{}://{}/api/v1/modeldb/experiment-run/deleteArtifact".format(
self._conn.scheme, self._conn.socket),
self._conn, json={'id': self.id, 'key': key})
_utils.raise_for_http_error(response)
response = _utils.make_request("POST",
"{}://{}/api/v1/modeldb/experiment-run/logArtifact".format(
self._conn.scheme, self._conn.socket),
self._conn, json=data)
if not response.ok:
if response.status_code == 409:
raise ValueError("artifact with key {} already exists;"
" consider setting overwrite=True".format(key))
else:
_utils.raise_for_http_error(response)
self._clear_cache()
def _get_artifact(self, key):
"""
Gets the artifact with name `key` from this Experiment Run.
If the artifact was originally logged as just a filesystem path, that path will be returned.
Otherwise, bytes representing the artifact object will be returned.
Parameters
----------
key : str
Name of the artifact.
Returns
-------
str or bytes
Filesystem path or bytes representing the artifact.
bool
True if the artifact was only logged as its filesystem path.
"""
# get key-path from ModelDB
Message = _CommonService.GetArtifacts
msg = Message(id=self.id, key=key)
data = _utils.proto_to_json(msg)
response = _utils.make_request("GET",
"{}://{}/api/v1/modeldb/experiment-run/getArtifacts".format(
self._conn.scheme, self._conn.socket),
self._conn, params=data)
_utils.raise_for_http_error(response)
response_msg = | |
<gh_stars>0
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Metrics computation."""
import sys
import logging
from collections import Counter
from functools import reduce
from operator import add
from pprint import pformat
from typing import List
import torch
import wandb
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger(__name__)
LBR = "["
RBR = "]"
IN = "IN:"
SL = "SL:"
class Tree:
"""TOP format tree object."""
def __init__(self, entity, subtrees: List = None):
"""Makes a tree node with value entity and subtrees.
if subtrees is None, .subtrees attribute will be empty list
Args:
entity: intent/slot value, e.g. IN:INTENT
subtrees: list of Tree objects
"""
self.entity = entity
self.subtrees = subtrees
if subtrees is None:
self.subtrees = []
# for per-class metrics
self._counts = Counter([entity])
self._len = 1
if len(self.subtrees) > 0:
self._len += sum(map(len, self.subtrees))
self._counts += reduce(add, (s._counts for s in self.subtrees))
# for __repr__ and __eq__
self._dict_repr = {self.entity: [s._dict_repr for s in self.subtrees]}
def __repr__(self):
return pformat(self._dict_repr)
def __eq__(self, other):
if isinstance(other, dict):
return self._dict_repr == other
if isinstance(other, Tree):
return self._dict_repr == other._dict_repr
raise ValueError(type(other))
def __len__(self):
return self._len
def get_size(self, classes=None):
"""Gets the number of nodes with node.entity in classes.
Args:
classes: optional, a iterable of classes to consider when computing the size
Returns:
if classes argument is not specified, returns the total number of nodes
(same as __len__)
if classes argument is specified, returns only the count of nodes corresponding to classes
"""
if classes is None:
return self.__len__()
_size = sum(self._counts.get(c, 0) for c in classes)
return _size
@property
def counts(self):
return self._counts
@classmethod
def from_tokens(cls, tokens, return_index=False, inside_slot=False):
"""Builds a parsing tree for labeled bracketing score computation.
The tree is build until the last ] symbol, everything after it is ignored
Args:
tokens: list of tokens
return_index: used in recursion to provide token index
Returns:
Tree object, if return_index == False
tuple (Tree, index), if return_index == True
Raises:
ValueError, if tokens do not represent a valid tree
"""
# every tree should start with
# [ ENTITY_TYPE: ENTITY
if len(tokens) < 3 or tokens[0] != LBR:
raise ValueError(f"Tree starts with {tokens[:4]}")
entity_type = tokens[1]
# ignore invalid subtrees
if entity_type not in [IN, SL]:
raise ValueError(f"Tree starts with {tokens[:4]}")
entity = entity_type + tokens[2] # e.g. IN:INTENT
subtrees = []
slot_value_tokens = []
safety = 1000
i = 3
inside_slot = inside_slot or entity_type == SL
while i < len(tokens):
if safety <= 0:
raise RuntimeError(
"The sequence is too long (>1000 tokens) or "
"there is a bug in the tree building function."
)
safety -= 1
token = tokens[i]
# ignore non-slot values
# e.g. ignore "go stuff in" [IN:STUFF Do stuff]
if not inside_slot and token not in [LBR, RBR]:
i += 1
continue
# LBR starts a new subtree
if token == LBR:
subtree, j = cls.from_tokens(
tokens[i:], return_index=True, inside_slot=inside_slot
)
if slot_value_tokens:
subtrees.append(Tree(" ".join(slot_value_tokens)))
slot_value_tokens = []
subtrees.append(subtree)
i += j
continue
# RBR ends the tree, merge slot values into a single leaf if any
# e.g. "stuff value" becomes a single leaf in [IN:GET_STUFF [SL:STUF_VALUE stuff value]]
if token == RBR:
if slot_value_tokens:
subtrees.append(Tree(" ".join(slot_value_tokens)))
slot_value_tokens = []
i += 1
break
# if the token is not a special symbol and inside SL: bracket (probably, nested)
slot_value_tokens.append(token)
i += 1
tree = Tree(entity, subtrees)
if return_index:
return tree, i
return tree
def to_tokens(self):
if not self.subtrees:
return self.entity
return f"[{self.entity} {self.subtrees_to_tokens()}]"
def subtrees_to_tokens(self):
return " ".join([s.to_tokens() for s in self.subtrees])
# Main function
def get_metrics(
pred_tokens,
true_tokens,
monitor_classes,
prefix,
schema_tokenizer,
do_each=False,
verbose=False,
compute_macro=False,
):
"""Computes exact_match and tree-based metrics
Apply prefix to all keys.
The main purpuse of this function is to unify evaluation in PointerModule and cli_utils.evaluate_model()
Args:
pred_tokens: List[List[str]]
true_tokens: List[List[str]]
monitor_classes: List[str]
prefix: str, will be appended to all return dict keys
schema_tokenizer: TopSchemaTokenizer
do_each: bool, if False compute tree path metrics only for monitor_classes[0] and overall
if True compute tree path metrics for all monitor_classes and overall
compute_macro: adds macro-averaged metrics to the return dictionary
Returns:
dictionary with keys
{prefix}_{score_name}
{prefix}_new_classes_{score_name}
cls/{prefix}_{monitor_classes[i]}_{score_name}; if do_each=False then only i == 0, else for each class
for each score_name - key from get_tree_path_scores output dictionary
"""
if verbose:
logger.info(f"Getting metrics for classes {monitor_classes}")
exact_match = sum(int(str(p) == str(l)) for p, l in zip(pred_tokens, true_tokens))
exact_match /= len(true_tokens)
tree_metrics = get_tree_path_metrics(
pred_tokens,
true_tokens,
monitor_classes,
prefix,
do_each,
verbose=verbose,
compute_macro=compute_macro,
)
pred_strs = [schema_tokenizer.detokenize(p) for p in pred_tokens]
true_strs = [schema_tokenizer.detokenize(p) for p in true_tokens]
exact_match_str = sum(int(p == t) for p, t in zip(pred_strs, true_strs)) / len(true_strs)
log_dict = {
f"{prefix}_exact_match": exact_match,
f"{prefix}_exact_match_str": exact_match_str,
**tree_metrics,
}
return log_dict
# Tree path scores
def get_tree_path_metrics(
pred_tokens, true_tokens, monitor_classes, prefix, do_each=False, verbose=False, compute_macro=False,
):
"""Gets metrics for all classes, for monitor classes and for monitor_classes[0].
Apply prefix to all keys.
Args:
pred_tokens: List[List[str]]
true_tokens: List[List[str]]
monitor_classes: List[str]
prefix: str, will be appended to all return dict keys
do_each: bool, if False compute tree path metrics only for monitor_classes[0] and overall
if True compute tree path metrics for all monitor_classes and overall
compute_macro: adds macro-averaged metrics to the return dictionary
Returns:
dictionary with keys
{prefix}_{score_name}
{prefix}_new_classes_{score_name}
cls/{prefix}_{monitor_classes[i]}_{score_name}, if do_each=False then i is only == 0
for each score_name - key from get_tree_path_scores output dictionary
"""
if verbose:
logger.info("Computing tree path scores.")
tree_path_scores = get_tree_path_scores(
pred_tokens=pred_tokens, true_tokens=true_tokens, verbose=verbose
)
tree_path_scores = {f"{prefix}_{k}": v for k, v in tree_path_scores.items()}
if verbose:
logger.info(
"Finished computing tree path scores overall. Starting to compute per-class scores."
)
if monitor_classes is not None:
if verbose:
logger.info(f"Computing scores for classes {monitor_classes}")
_new_classes_scores = get_tree_path_scores(
pred_tokens=pred_tokens, true_tokens=true_tokens, classes=monitor_classes
)
_new_classes_scores = {
f"{prefix}_new_classes_{k}": v for k, v in _new_classes_scores.items()
}
tree_path_scores.update(_new_classes_scores)
sum_p, sum_r, sum_f1 = 0, 0, 0
for i, class_ in enumerate(monitor_classes):
if verbose:
logger.info(f"Computing scores for the class {class_}")
if i > 0 and not do_each:
break
_class_score = get_tree_path_scores(
pred_tokens=pred_tokens, true_tokens=true_tokens, classes=[class_]
)
_class_score_prefixed = {f"cls/{prefix}_{class_}_{k}": v for k, v in _class_score.items()}
tree_path_scores.update(_class_score_prefixed)
sum_p += _class_score["tree_path_precision"]
sum_r += _class_score["tree_path_recall"]
sum_f1 += _class_score["tree_path_f1"]
if compute_macro:
p = sum_p / len(monitor_classes)
r = sum_r / len(monitor_classes)
f1 = sum_f1 / len(monitor_classes)
tree_path_scores[f"{prefix}_precision_macro"] = p
tree_path_scores[f"{prefix}_recall_macro"] = r
tree_path_scores[f"{prefix}_f1_macro"] = f1
return tree_path_scores
def get_tree_path_scores(pred_tokens, true_tokens, classes=None, verbose=False):
"""
Args:
pred_tokens: list of lists of tokens
true_tokens: list of lists of tokens
Returns:
dictionary with keys
predicted_paths
expected_paths
tree_path_precision
tree_path_recall
tree_path_f1
"""
if verbose:
logger.info("Computing tree path scores")
pred_paths_lst, true_paths_lst = [], []
for pred, true in zip(pred_tokens, true_tokens):
if verbose:
logger.info(f"(get_tree_path_scores) Predicted: {pred}")
logger.info(f"(get_tree_path_scores) Expected: {true}")
try:
pred_paths = _get_paths_with_values(Tree.from_tokens(pred))
except ValueError:
pred_paths = dict()
# we need to build true tree even if pred tree is not valid to compute correct n_expected
true_paths = _get_paths_with_values(Tree.from_tokens(true))
pred_paths_lst.append(pred_paths)
true_paths_lst.append(true_paths)
true_positives = 0
n_predicted = 0
n_expected = 0
for pred_paths, true_paths in zip(pred_paths_lst, true_paths_lst):
if classes is None:
n_expected += len(true_paths)
n_predicted += len(pred_paths)
else:
n_expected += len([p for p in true_paths.keys() if any(class_in_path(c, p) for c in classes)])
n_predicted += len([p for p in pred_paths.keys() if any(class_in_path(c, p) for c in classes)])
true_positives += _get_tree_path_matches(pred_paths, true_paths, classes)
precision = 0
if n_predicted > 0:
precision = true_positives / n_predicted
recall = 0
if n_expected > 0:
recall = true_positives / n_expected
f1 = 0
if precision + recall > 0:
f1 = 2 * precision * recall / (precision + recall)
return {
"tree_path_precision": precision,
"tree_path_recall": recall,
"tree_path_f1": f1,
}
def _get_paths_with_values(tree, max_depth=10) -> dict:
"""Goes over the | |
# ecm_7_008 R0(C0R1(Q0R2))(C1R3)
# def RaCRaQRbbaCRb(w, R0, C0, R1, Q0_pair, R2, C1, R3 ):
R0, C0, R1 = para_list[:3]
Q0_pair = para_list[3:5]
R2, C1, R3 = para_list[5:7]
z_list = [RaCRaQRbbaCRb(i, R0, C0, R1, Q0_pair, R2, C1, R3) for i in w]
elif serial == 9:
# ecm_7_009 R0(C0R1)(C1R2)(C2R3)
# def RaCRbaCRbaCRb(w, R0, C0, R1, C1, R2, C2, R3):
R0, C0, R1, C1, R2, C2, R3 = para_list
z_list = [RaCRbaCRbaCRb(i, R0, C0, R1, C1, R2, C2, R3) for i in w]
elif serial == 10:
# ecm_7_010 R0(C0R1)(Q0R2)(C1R3)
# def RaCRbaQRbaCRb(w, R0, C0, R1, Q0_pair, R2, C1, R3):
R0, C0, R1 = para_list[:3]
Q0_pair = para_list[3:5]
R2, C1, R3 = para_list[5:]
z_list = [RaCRbaQRbaCRb(i, R0, C0, R1, Q0_pair, R2, C1, R3) for i in w]
elif serial == 11:
# ecm_7_011 R0(Q0R1(C0R2)(O0R3))
# def RaQRaCRbaORbb(w, R0, Q0_pair, R1, C0, R2, O0, R3):
R0 = para_list[0]
Q0_pair = para_list[1:3]
R1 = para_list[3]
C0, R2, O0, R3 = para_list[4:]
z_list = [RaQRaCRbaORbb(i, R0, Q0_pair, R1, C0, R2, O0, R3) for i in w]
elif serial == 12:
# ecm_7_012 R0(Q0R1(C0R2)(L0R3))
# def RaQRaCRbaLRbb(w, R0, Q0_pair, R1, C0, R2, L0, R3):
R0 = para_list[0]
Q0_pair = para_list[1:3]
R1, C0, R2, L0, R3 = para_list[3:]
z_list = [RaQRaCRbaLRbb(i, R0, Q0_pair, R1, C0, R2, L0, R3) for i in w]
elif serial == 13:
# ecm_7_013 R0(Q0R1)(Q1R2)(C0R3)
# def RaQRbaQRbaCRb(w, R0, Q0_pair, R1, Q1_pair, R2, C0, R3):
R0 = para_list[0]
Q0_pair = para_list[1:3]
R1 = para_list[3]
Q1_pair = para_list[4:6]
R2, C0, R3 = para_list[6:]
z_list = [RaQRbaQRbaCRb(i, R0, Q0_pair, R1, Q1_pair, R2, C0, R3) for i in w]
elif serial == 14:
# ecm_7_014 R0(Q0R1(L0R2)(L1R3))
# def RaQRaLRbaLRbb(w, R0, Q0_pair, R1, L0, R2, L1, R3):
R0 = para_list[0]
Q0_pair = para_list[1:3]
R1, L0, R2, L1, R3 = para_list[3:]
z_list = [RaQRaLRbaLRbb(i, R0, Q0_pair, R1, L0, R2, L1, R3) for i in w]
elif serial == 15:
# ecm_7_015 R0(Q0R1(Q1R2)(C0R3))
# def RaQRaQRbaCRbb(w, R0, Q0_pair, R1, Q1_pair, R2, C0, R3):
R0 = para_list[0]
Q0_pair = para_list[1:3]
R1 = para_list[3]
Q1_pair = para_list[4:6]
R2, C0, R3 = para_list[6:]
z_list = [RaQRaQRbaCRbb(i, R0, Q0_pair, R1, Q1_pair, R2, C0, R3) for i in w]
elif serial == 16:
# ecm_7_016 R0(Q0R1(Q1R2)(O0R3))
# def RaQRaQRbaORbb(w, R0, Q0_pair, R1, Q1_pair, R2, O0, R3):
R0 = para_list[0]
Q0_pair = para_list[1:3]
R1 = para_list[3]
Q1_pair = para_list[4:6]
R2, O0, R3 = para_list[6:]
z_list = [RaQRaQRbaORbb(i, R0, Q0_pair, R1, Q1_pair, R2, O0, R3) for i in w]
elif serial == 17:
# ecm_7_017 R0(Q0R1(C0R2)(C1R3))
# def RaQRaCRbaCRbb(w, R0, Q0_pair, R1, C0, R2, C1, R3):
R0 = para_list[0]
Q0_pair = para_list[1:3]
R1, C0, R2, C1, R3 = para_list[3:]
z_list = [RaQRaCRbaCRbb(i, R0, Q0_pair, R1, C0, R2, C1, R3) for i in w]
elif serial == 18:
# 7_018 R(CR(RW))(QR)
# DPFC: ECM-11 R0(C0R1(R2W0))(Q0R3)
# def RaCRaRWbbaQRb(w, R0, C0, R1, R2, W0, Q0_pair, R3):
R0, C0, R1, R2, W0 = para_list[:5]
Q0_pair = para_list[5:7]
R3 = para_list[7]
z_list = [RaCRaRWbbaQRb(i, R0, C0, R1, R2, W0, Q0_pair, R3) for i in w]
elif element_num == 8:
if serial == 1:
# ecm_8_001 (C0R0(C1R1)(C2R2)(C3R3))
# def aCRaCRbaCRbaCRbb(w, C0, R0, C1, R1, C2, R2, C3, R3):
C0, R0, C1, R1, C2, R2, C3, R3 = para_list
z_list = [aCRaCRbaCRbaCRbb(i, C0, R0, C1, R1, C2, R2, C3, R3) for i in w]
elif serial == 2:
# ecm_8_002 (C0R0(L0R1)(L1R2)(L2R3))
# def aCRaLRbaLRbaLRbb(w, C0, R0, L0, R1, L1, R2, L2, R3):
C0, R0, L0, R1, L1, R2, L2, R3 = para_list
z_list = [aCRaLRbaLRbaLRbb(i, C0, R0, L0, R1, L1, R2, L2, R3) for i in w]
elif serial == 3:
# ecm_8_003 L0R0(C0(R1(Q0R2)))(C1R3)
# def LRaCaRaQRbbbaCRb(w, L0, R0, C0, R1, Q0_pair, R2, C1, R3):
L0, R0, C0, R1 = para_list[:4]
Q0_pair = para_list[4:6]
R2, C1, R3 = para_list[6:]
z_list = [LRaCaRaQRbbbaCRb(i, L0, R0, C0, R1, Q0_pair, R2, C1, R3) for i in w]
elif serial == 4:
# ecm_8_004 L0R0(C0R1(Q0R2)(R3W0))
# def LRaCRaQRbaRWbb(w, L0, R0, C0, R1, Q0_pair, R2, R3, W0):
L0, R0, C0, R1 = para_list[:4]
Q0_pair = para_list[4:6]
R2, R3, W0 = para_list[6:]
z_list = [LRaCRaQRbaRWbb(i, L0, R0, C0, R1, Q0_pair, R2, R3, W0) for i in w]
elif serial == 5:
# ecm_8_005 L0R0(C0R1(Q0R2))(C1R3)
# def LRaCRaQRbbaCRb(w, L0, R0, C0, R1, Q0_pair, R2, C1, R3):
L0, R0, C0, R1 = para_list[:4]
Q0_pair = para_list[4:6]
R2, C1, R3 = para_list[6:]
z_list = [LRaCRaQRbbaCRb(i, L0, R0, C0, R1, Q0_pair, R2, C1, R3) for i in w]
elif serial == 6:
# ecm_8_006 L0R0(Q0(R1(L1R2)(C0R3)))
# def LRaQaRaLRbaCRbbb(w, L0, R0, Q0_pair, R1, L1, R2, C0, R3):
L0, R0 = para_list[:2]
Q0_pair = para_list[2:4]
R1, L1, R2, C0, R3 = para_list[4:]
z_list = [LRaQaRaLRbaCRbbb(i, L0, R0, Q0_pair, R1, L1, R2, C0, R3) for i in w]
elif serial == 7:
# ecm_8_007 LR(QR(LR)(CR)) --> L0R0(Q0R1(L1R2)(C0R3))
# def LRaQRaLRbaCRbb(w, L0, R0, Q0_pair, R1, L1, R2, C0, R3):
L0, R0 = para_list[:2]
Q0_pair = para_list[2:4]
R1, L1, R2, C0, R3 = para_list[4:]
z_list = [LRaQRaLRbaCRbb(i, L0, R0, Q0_pair, R1, L1, R2, C0, R3) for i in w]
elif serial == 8:
# ecm_8_008 R0(C0(R1(Q0(R2(C1(R3W0))))))
# def RaCaRaQaRaCaRWbbbbbb(w, R0, C0, R1, Q0_pair, R2, C1, R3, W0):
R0, C0, R1 = para_list[:3]
Q0_pair = para_list[3:5]
R2, C1, R3, W0 = para_list[5:]
z_list = [RaCaRaQaRaCaRWbbbbbb(i, R0, C0, R1, Q0_pair, R2, C1, R3, W0) for i in w]
elif serial == 9:
# ecm_8_009 R(C(R(Q(RW))))(CR) --> R0(C0(R1(Q0(R2W0))))(C1R3)
# def RaCaRaQaRWbbbbaCRb(w, R0, C0, R1, Q0_pair, R2, W0, C1, R3):
R0, C0, R1 = para_list[:3]
Q0_pair = para_list[3:5]
R2, W0, C1, R3 = para_list[5:]
z_list = [RaCaRaQaRWbbbbaCRb(i, R0, C0, R1, Q0_pair, R2, W0, C1, R3) for i in w]
elif serial == 10:
# ecm_8_010 L0R0(C0(R1(Q0(R2(C1R3)))))
# def LRaCaRaQaRaCRbbbbb(w, L0, R0, C0, R1, Q0_pair, R2, C1, R3):
L0, R0, C0, R1 = para_list[:4]
Q0_pair = para_list[4:6]
R2, C1, R3 = para_list[6:]
z_list = [LRaCaRaQaRaCRbbbbb(i, L0, R0, C0, R1, Q0_pair, R2, C1, R3) for i in w]
elif serial == 11:
# ecm_8_011 R0(Q0(R1(Q1R2)(Q2(R3W0))))
# def RaQaRaQRbaQaRWbbbb(w, R0, Q0_pair, R1, Q1_pair, R2, Q2_pair, R3, W0):
R0 = para_list[0]
Q0_pair = para_list[1:3]
R1 = para_list[3]
Q1_pair = para_list[4:6]
R2 = para_list[6]
Q2_pair = para_list[7:9]
R3, W0 = para_list[9:]
z_list = [RaQaRaQRbaQaRWbbbb(i, R0, Q0_pair, R1, Q1_pair, R2, Q2_pair, R3, W0) for i in w]
elif element_num == 9:
if serial == 1:
# ecm_9_001 R0(Q0R1(O0R2)(L0R3)(L1R4))
# def RaQRaORbaLRbaLRbb(w, R0, Q0_pair, R1, O0, R2, L0, R3, L1, R4):
R0 = para_list[0]
Q0_pair = para_list[1:3]
R1, O0, R2, L0, R3, L1, R4 = para_list[3:]
z_list = [RaQRaORbaLRbaLRbb(i, R0, Q0_pair, R1, O0, R2, L0, R3, L1, R4) for i in w]
elif serial == 2:
# ecm_9_002 L0R0(C0(R1(Q0(R2(C1(R3W0))))))
# def LRaCaRaQaRaCaRWbbbbbb(w, L0, R0, C0, R1, Q0_pair, R2, C1, R3, W0):
L0, R0, C0, R1 = para_list[:4]
Q0_pair = para_list[4:6]
R2, C1, R3, W0 = para_list[6:]
z_list = [LRaCaRaQaRaCaRWbbbbbb(i, L0, R0, C0, R1, Q0_pair, R2, C1, R3, W0) for i in w]
elif serial == 3:
# ecm_9_003 R0(C0(R1(Q0(R2(L0R3)(C1R4)))))
# def RaCaRaQaRaLRbaCRbbbbb(w, R0, C0, R1, Q0_pair, R2, L0, R3, C1, R4):
R0, C0, R1 = para_list[:3]
Q0_pair = para_list[3:5]
R2, L0, R3, C1, R4 = para_list[5:]
z_list = [RaCaRaQaRaLRbaCRbbbbb(i, R0, C0, R1, Q0_pair, R2, L0, R3, C1, R4) for i in w]
elif serial == 4:
# ecm_9_004 R0(C0R1(L0R2)(L1R3)(O0R4))
# def RaCRaLRbaLRbaORbb(w, R0, C0, R1, L0, R2, L1, R3, O0, R4):
R0, C0, R1, L0, R2, L1, R3, O0, R4 = para_list
z_list = [RaCRaLRbaLRbaORbb(i, R0, C0, R1, L0, R2, L1, R3, O0, R4) for i in w]
elif serial == 5:
# ecm_9_005 R0(C0R1(O0R2)(L0R3)(L1R4))
# def RaCRaORbaLRbaLRbb(w, R0, C0, R1, O0, R2, L0, R3, L1, R4):
R0, C0, R1, O0, R2, L0, R3, L1, R4 = para_list
z_list = [RaCRaORbaLRbaLRbb(i, R0, C0, R1, O0, R2, L0, R3, L1, R4) for i in w]
elif serial == 6:
# ecm_9_006 R0(C0R1(Q0R2)(L0R3)(C1R4))
# def RaCRaQRbaLRbaCRbb(w, R0, C0, R1, Q0_pair, | |
field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Links this graph makes rules about.
"""
resource_type = Field("GraphDefinitionLink", const=True)
description: fhirtypes.String = Field(
None,
alias="description",
title="Why this link is specified",
description=(
"Information about why this link is of interest in this graph "
"definition."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
max: fhirtypes.String = Field(
None,
alias="max",
title="Maximum occurrences for this link",
description=None,
# if property is element of this resource.
element_property=True,
)
max__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_max", title="Extension field for ``max``."
)
min: fhirtypes.Integer = Field(
None,
alias="min",
title="Minimum occurrences for this link",
description=None,
# if property is element of this resource.
element_property=True,
)
min__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_min", title="Extension field for ``min``."
)
path: fhirtypes.String = Field(
None,
alias="path",
title="Path in the resource that contains the link",
description=None,
# if property is element of this resource.
element_property=True,
element_required=True,
)
path__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_path", title="Extension field for ``path``."
)
sliceName: fhirtypes.String = Field(
None,
alias="sliceName",
title="Which slice (if profiled)",
description=None,
# if property is element of this resource.
element_property=True,
)
sliceName__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_sliceName", title="Extension field for ``sliceName``."
)
target: typing.List[fhirtypes.GraphDefinitionLinkTargetType] = Field(
...,
alias="target",
title="Potential target for the link",
description=None,
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``GraphDefinitionLink`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"path",
"sliceName",
"min",
"max",
"description",
"target",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2107(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("path", "path__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class GraphDefinitionLinkTarget(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Potential target for the link.
"""
resource_type = Field("GraphDefinitionLinkTarget", const=True)
compartment: typing.List[
fhirtypes.GraphDefinitionLinkTargetCompartmentType
] = Field(
None,
alias="compartment",
title="Compartment Consistency Rules",
description=None,
# if property is element of this resource.
element_property=True,
)
link: typing.List[fhirtypes.GraphDefinitionLinkType] = Field(
None,
alias="link",
title="Additional links from target resource",
description=None,
# if property is element of this resource.
element_property=True,
)
profile: fhirtypes.Uri = Field(
None,
alias="profile",
title="Profile for the target resource",
description=None,
# if property is element of this resource.
element_property=True,
)
profile__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_profile", title="Extension field for ``profile``."
)
type: fhirtypes.Code = Field(
None,
alias="type",
title="Type of resource this link refers to",
description=None,
# if property is element of this resource.
element_property=True,
element_required=True,
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``GraphDefinitionLinkTarget`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"type",
"profile",
"compartment",
"link",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2731(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("type", "type__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class GraphDefinitionLinkTargetCompartment(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Compartment Consistency Rules.
"""
resource_type = Field("GraphDefinitionLinkTargetCompartment", const=True)
code: fhirtypes.Code = Field(
None,
alias="code",
title="Identifies the compartment",
description=None,
# if property is element of this resource.
element_property=True,
element_required=True,
)
code__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_code", title="Extension field for ``code``."
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Documentation for FHIRPath expression",
description=None,
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
expression: fhirtypes.String = Field(
None,
alias="expression",
title="Custom rule, as a FHIRPath expression",
description=None,
# if property is element of this resource.
element_property=True,
)
expression__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_expression", title="Extension field for ``expression``."
)
rule: fhirtypes.Code = Field(
None,
alias="rule",
title="identical | matching | different | custom",
description="identical | matching | different | no-rule | custom.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["identical", "matching", "different", "custom"],
)
rule__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_rule", title="Extension field for ``rule``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``GraphDefinitionLinkTargetCompartment`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"code",
"rule",
"expression",
"description",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_3893(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("code", "code__ext"), ("rule", "rule__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
| |
<reponame>GaffaSnobb/kshell-utilities
import sys, time, warnings
from typing import Union, Tuple, Optional
from fractions import Fraction
from itertools import chain
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import chi2
from scipy.optimize import curve_fit
from .parameters import flags
def create_spin_parity_list(
spins: np.ndarray,
parities: np.ndarray
) -> list:
"""
Pair up input spins and parities in a list of lists.
Parameters
----------
spins : np.ndarray
Array of spins for each energy level.
parities : np.ndarray
Array of corresponding parities for each energy level.
Returns
-------
spins_parities : list
A nested list of spins and parities [[spin, parity], ...] sorted
with respect to the spin. N is the number of unique spins in
'spins'.
Examples
--------
Example list:
``` python
[[1, +1], [3, +1], [5, +1], [7, +1], [9, +1], [11, +1], [13, +1]]
```
"""
spin_parity_list = []
for i in range(len(spins)):
if (tmp := [int(spins[i]), int(parities[i])]) in spin_parity_list:
continue
spin_parity_list.append(tmp)
return spin_parity_list
def div0(numerator, denominator):
"""
Suppress ZeroDivisionError, set x/0 to 0, and set inf, -inf and nan
to 0. Author <NAME>.
Examples
--------
>>> div0([1, 1, 1], [1, 2, 0])
array([1. , 0.5, 0. ])
"""
with np.errstate(divide='ignore', invalid='ignore'):
res = np.true_divide(numerator, denominator)
res[~np.isfinite(res)] = 0 # -inf inf NaN
return res
def gamma_strength_function_average(
levels: np.ndarray,
transitions: np.ndarray,
bin_width: Union[float, int],
Ex_min: Union[float, int],
Ex_max: Union[float, int],
multipole_type: str,
prefactor_E1: Union[None, float] = None,
prefactor_M1: Union[None, float] = None,
prefactor_E2: Union[None, float] = None,
initial_or_final: str = "initial",
partial_or_total: str = "partial",
include_only_nonzero_in_average: bool = True,
include_n_levels: Union[None, int] = None,
filter_spins: Union[None, list] = None,
filter_parities: str = "both",
return_n_transitions: bool = False,
# plot: bool = False,
# save_plot: bool = False
) -> Tuple[np.ndarray, np.ndarray, Optional[np.ndarray]]:
"""
Calculate the gamma strength function averaged over total angular
momenta, parities, and initial excitation energies.
Author: <NAME>.
Modified by: GaffaSnobb.
TODO: Figure out the pre-factors.
TODO: Use numpy.logical_or to filter levels and transitions to avoid
TODO: Make res.transitions_BXL.ji, res.transitions_BXL.pii, etc.
class attributes (properties).
using many if statements in the loops.
Parameters
----------
levels : np.ndarray
Array containing energy, spin, and parity for each excited
state. [[E, 2*spin, parity, idx], ...]. idx counts how many
times a state of that given spin and parity has occurred. The
first 0+ state will have an idx of 1, the second 0+ will have an
idx of 2, etc.
transitions : np.ndarray
Array containing transition data for the specified
multipolarity.
OLD:
Mx8 array containing [2*spin_final, parity_initial, Ex_final,
2*spin_initial, parity_initial, Ex_initial, E_gamma, B(.., i->f)]
OLD NEW:
[2*spin_initial, parity_initial, Ex_initial, 2*spin_final,
parity_final, Ex_final, E_gamma, B(.., i->f), B(.., f<-i)]
NEW:
[2*spin_initial, parity_initial, idx_initial, Ex_initial,
2*spin_final, parity_final, idx_final, Ex_final, E_gamma,
B(.., i->f), B(.., f<-i)]
bin_width : Union[float, int]
The width of the energy bins. A bin width of 0.2 contains 20
states of uniform spacing of 0.01.
Ex_min : Union[float, int]
Lower limit for initial level excitation energy, usually in MeV.
Ex_max : Union[float, int]
Upper limit for initial level excitation energy, usually in MeV.
multipole_type : str
Choose whether to calculate for 'E1', 'M1' or 'E2'. NOTE:
Currently only M1 and E1 is implemented.
prefactor_E1 : Union[None, float]
E1 pre-factor from the definition of the GSF. Defaults to a
standard value if None.
prefactor_M1 : Union[None, float]
M1 pre-factor from the definition of the GSF. Defaults to a
standard value if None.
prefactor_E2 : Union[None, float]
E2 pre-factor from the definition of the GSF. Defaults to a
standard value if None.
initial_or_final : str
Choose whether to use the energy of the initial or final state
for the transition calculations. NOTE: This may be removed in
a future release since the correct alternative is to use the
initial energy.
partial_or_total : str
Choose whether to use the partial level density
rho(E_i, J_i, pi_i) or the total level density rho(E_i) for
calculating the gamma strength function. Note that the partial
level density, the default value, is probably the correct
alternative. Using the total level density will introduce an
arbitrary scaling depending on how many (J, pi) combinations
were included in the calculations.
This argument is included for easy comparison between the two
densities. See the appendix of PhysRevC.98.064321 for details.
include_only_nonzero_in_average : bool
If True (default) only non-zero values are included in the final
averaging of the gamma strength function. The correct
alternative is to use only the non-zero values, so setting this
parameter to False should be done with care.
include_n_levels : Union[None, int]
The number of states per spin to include. Example:
include_n_levels = 100 will include only the 100 lowest laying
states for each spin.
filter_spins : Union[None, list]
Which spins to include in the GSF. If None, all spins are
included. TODO: Make int valid input too.
filter_parities : str
Which parities to include in the GSF. 'both', '+', '-' are
allowed.
return_n_transitions : bool
Count the number of transitions, as a function of gamma energy,
involved in the GSF calculation and return this number as a
third return value. For calculating Porter-Thomas fluctuations
in the GSF by
r(E_gamma) = sqrt(2/n(E_gamma))
where n is the number of transitions for each gamma energy, used
to calculate the GSF. The value n is called n_transitions_array
in the code. See for example DOI: 10.1103/PhysRevC.98.054303 for
details.
plot : bool
Toogle plotting on / off.
save_plot : bool
Toogle saving of plot (as .png with dpi=300) on / off.
Variables
---------
Ex : np.ndarray
The excitation energy of all levels.
Ex_initial : np.ndarray
The excitation energy of the initial state of a transition.
spins : np.ndarray
The spins of all levels.
parities : np.ndarray
The parities of all levels.
Returns
-------
bins : np.ndarray
The bins corresponding to gSF_ExJpiavg (x values for plot).
gSF_ExJpiavg : np.ndarray
The gamma strength function.
"""
skip_counter = { # Debug.
"Transit: Energy range": 0,
"Transit: Number of levels": 0,
"Transit: Parity": 0,
"Level density: Energy range": 0,
"Level density: Number of levels": 0,
"Level density: Parity": 0
}
total_gsf_time = time.perf_counter()
allowed_filter_parities = ["+", "-", "both"]
if filter_parities not in allowed_filter_parities:
msg = f"filter_parities must be {allowed_filter_parities}"
raise TypeError(msg)
if filter_parities == "both":
filter_parities = [-1, +1]
elif filter_parities == "-":
filter_parities = [-1]
elif filter_parities == "+":
filter_parities = [+1]
if include_n_levels is None:
include_n_levels = np.inf # Include all states.
if (Ex_min < 0) or (Ex_max < 0):
msg = "Ex_min and Ex_max cannot be negative!"
raise ValueError(msg)
if Ex_max < Ex_min:
msg = "Ex_max cannot be smaller than Ex_min!"
raise ValueError(msg)
prefactors = { # Factor from the def. of the GSF.
"M1": 11.5473e-9, # [1/(mu_N**2*MeV**2)].
# "E1": 1.047e-6,
"E1": 3.4888977e-7
}
if prefactor_E1 is not None:
"""
Override the E1 prefactor.
"""
prefactors["E1"] = prefactor_E1
if prefactor_M1 is not None:
"""
Override the M1 prefactor.
"""
prefactors["M1"] = prefactor_M1
if prefactor_E2 is not None:
"""
Override the E2 prefactor.
"""
prefactors["E2"] = prefactor_E2
prefactor = prefactors[multipole_type]
# Extract data to a more readable form:
n_transitions = len(transitions[:, 0])
n_levels = len(levels[:, 0])
E_ground_state = levels[0, 0] # Read out the absolute ground state energy so we can get relative energies later.
try:
Ex, spins, parities, level_counter = np.copy(levels[:, 0]), levels[:, 1], levels[:, 2], levels[:, 3]
except IndexError as err:
msg = f"{err.__str__()}\n"
msg += "Error probably due to old tmp files. Use loadtxt parameter"
msg += " load_and_save_to_file = 'overwrite' to re-read data from the"
msg += " summary file and generate new tmp files."
raise Exception(msg) from err
if initial_or_final == "initial":
Ex_initial_or_final = np.copy(transitions[:, 3]) # To avoid altering the raw data.
spin_initial_or_final_idx = 0
parity_initial_or_final_idx = 1
elif initial_or_final == "final":
Ex_initial_or_final = np.copy(transitions[:, 7]) # To avoid altering the raw data.
spin_initial_or_final_idx = 4
parity_initial_or_final_idx = 5
msg = "Using final states for the energy limits is not correct"
msg += " and should only be | |
# Copyright 2016, 2017, 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implementation of hypervisor interface for Zvm
"""
#
# IMPORTS
#
from tempfile import NamedTemporaryFile
from tessia.baselib.common.logger import get_logger
from tessia.baselib.common.params_validators.utils import validate_params
from tessia.baselib.guests.cms.cms import GuestCms, ERROR_REGEX
from tessia.baselib.hypervisors.base import HypervisorBase
#
# CONSTANTS AND DEFINITIONS
#
#
# CODE
#
class HypervisorZvm(HypervisorBase):
"""
This class implements the driver to support the ZVM hypervisor type
"""
# the identifier for this hypervisor class
HYP_ID = 'zvm'
# names of files uploaded to zvm during netboot process
NETBOOT_CMDLINE_FILE = 'PARMFILE PARM T'
NETBOOT_KERNEL_FILE = 'KERNEL IMG T'
NETBOOT_INITRD_FILE = 'INITRD IMG T'
@validate_params
def __init__(self, system_name, host_name, user, passwd, parameters):
"""
Constructor
Args:
system_name (string): string containing the hypervisor name
host_name (string): hostname or ip address of system
user (string): user to login to system
passwd (string): <PASSWORD> system
parameters (dict): a dictionary containing values specific to each
hypervisor type
Raises:
None
"""
# make sure here and noipl are set for the connection
if not parameters:
parameters = {}
parameters['here'] = True
parameters['noipl'] = True
user = user.upper()
super().__init__(system_name, host_name, user,
passwd, parameters)
self._logger = get_logger(__name__)
self._logger.debug(
"create HypervisorZvm: name='%s' host_name='%s' user='%s' "
"parameters='%s'",
self.name,
self.host_name,
self.user,
str(self.parameters)
)
# use the cms class as the connection to the zVM guest
self._cms = GuestCms(user, host_name, user, passwd, parameters)
# __init__()
def _netboot(self, params):
"""
Upload the netboot files to the zVM and perform ipl of the guest via
the reader device.
Args:
params (dict): netboot parameters, see json schema for details
Raises:
RuntimeError: if any zVM command fails
"""
_, re_match = self._cms.run(
r'i cms\naccess (noprof', wait_for=['Ready;'])
if not re_match:
raise RuntimeError('Failed to initialize CMS')
# make sure terminal waits before clearing the screen to prevent
# missing content
self._cms.run('term more 50 10', use_cp=True)
# prepare a vdisk where we can upload the files to
vdisk_dev = 'ffff'
exists_msg = '(?i) {} ON '.format(vdisk_dev)
_, re_match = self._cms.run(
"q v {}".format(vdisk_dev),
wait_for=[ERROR_REGEX, exists_msg], timeout=10)
if not re_match:
raise RuntimeError(
'Query device {} returned unexpected output'.format(vdisk_dev))
# no error message detected: device exists and must be detached
if re_match.re.pattern == exists_msg:
detached_msg = '(?i){} detached'.format(vdisk_dev)
_, re_match = self._cms.run(
'detach {}'.format(vdisk_dev),
wait_for=[ERROR_REGEX, detached_msg], timeout=5)
if not re_match:
raise RuntimeError(
'Detach {} returned unexpected output'.format(vdisk_dev))
if re_match.re.pattern == ERROR_REGEX:
raise RuntimeError('Detach {} failed with: {}'.format(
vdisk_dev, re_match.group()))
# create the vdisk with approx. 100 MB size
defined_msg = '(?i){} defined'.format(vdisk_dev)
_, re_match = self._cms.run(
'define vfb-512 as ffff blk 200000',
wait_for=[ERROR_REGEX, defined_msg], timeout=5)
if not re_match:
raise RuntimeError('Define vdisk returned unexpected output')
if re_match.re.pattern == ERROR_REGEX:
raise RuntimeError('Define vdisk failed with: {}'.format(
re_match.group()))
# format VDISK as file mode t
_, re_match = self._cms.run(
# confirm operation by typing '1' and set disk label as 'tmpdsk'
r'format ffff t\n1\ntmpdsk', wait_for=r'(?i)Ready;',
timeout=10)
if not re_match:
raise RuntimeError('Format vdisk returned unexpected output')
# upload the kernel file
self._cms.push_file(params['kernel_uri'], self.NETBOOT_KERNEL_FILE)
# initrd file specified: upload it
if params.get('initrd_uri'):
self._cms.push_file(params['initrd_uri'], self.NETBOOT_INITRD_FILE)
# create a temp file to hold the kernel args and upload it
with NamedTemporaryFile(mode='w') as file_fd:
file_fd.write(params['cmdline'])
file_fd.flush()
self._cms.push_file('file://{}'.format(file_fd.name),
self.NETBOOT_CMDLINE_FILE)
# commands to prepare the reader device and punch the files to it
cmds = [
'spool punch * rdr',
'close reader',
'purge reader all',
'punch {} (noh'.format(self.NETBOOT_KERNEL_FILE),
'punch {} (noh'.format(self.NETBOOT_CMDLINE_FILE),
]
if params.get('initrd_uri'):
cmds.append('punch {} (noh'.format(self.NETBOOT_INITRD_FILE))
cmds.append('change reader all keep')
# reset puncher to defaults
cmds.append('spool pun off')
# execute all commands, abort in case of error
wait_prompts = [r'Ready;', r'Ready\(\d+\);']
for cmd in cmds:
_, re_match = self._cms.run(
cmd, wait_for=wait_prompts, timeout=300)
if not re_match:
raise RuntimeError(
"Command '{}' returned unexpected output".format(cmd))
if re_match.re.pattern == wait_prompts[1]:
raise RuntimeError("Command '{}' failed with: {}".format(
cmd, re_match.group()))
# IPL the reader
_, re_match = self._cms.run(
'ipl 00c clear',
wait_for=['Kernel command line: '], timeout=600)
if not re_match:
raise RuntimeError('Failed to IPL downloaded kernel')
# _netboot()
@staticmethod
def _split_chars(string, size):
"""
Split a string in sub-strings separated by space in chunks
determined by 'size'.
Args:
string (str): string to split
size (int): size of each sub-string
Returns:
str: resulting string containing sub-strings
"""
index = 0
result = []
while index < len(string):
result.append(string[index:index+size])
index += size
return ' '.join(result)
# _split_chars()
def login(self, timeout=60):
"""
Execute the login to the hypervisor system using the credentials
provided.
Args:
timeout (int): how many seconds to wait for connection
Raises:
None
"""
self._logger.debug(
"performing LOGIN HypervisorZvm: name='%s' host_name='%s' "
"user='%s' parameters='%s'",
self.name,
self.host_name,
self.user,
str(self.parameters)
)
# login to the zVM guest
self._cms.login()
# login()
def logoff(self):
"""
Close an active connection to the hypervisor system
Args:
None
Raises:
RuntimeError: In case of fail during logoff
"""
self._logger.debug("performing LOGOFF HypervisorZvm")
# disconnect from zVM guest, keep the guest running
self._cms.logoff()
# logoff()
def set_boot_device(self, guest_name, boot_device):
"""
Set boot device for next load
For ZVM it is a no-op
Args:
guest_name (str): guest to operate on
boot_device (dict): boot device config
"""
self._logger.debug(
"performing SET_BOOT_DEVICE HypervisorZvm: name='%s', guest='%s' "
"boot_device='%s'",
self.name,
guest_name,
str(boot_device)
)
@validate_params
def start(self, guest_name, cpu, memory, parameters):
"""
Attach the given resources and IPL the guest using the method and
device specified.
Args:
guest_name (str): Name of the guest as known by hypervisor
cpu (int): Number of CPUs to assign
memory (int): Amount of memory to assign in megabytes
parameters (dict): additional parameters, see json schema for
details
Raises:
RuntimeError: if any zVM command fails
ValueError: - if boot method is 'disk' but no boot dev was defined
- if guest name is different from the username provided
for login
"""
# guest to ipl is different from username: cannot continue
if guest_name.upper() != self.user:
msg = ('On z/VM the guest name provided must be the same as the '
'username specified for login')
raise ValueError(msg)
boot_dev = None
if parameters['boot_method'] == 'disk':
try:
boot_dev = [vol for vol in parameters['storage_volumes']
if vol.get('boot_device')][0]
except IndexError:
raise ValueError("Boot method 'disk' requires a boot device")
elif parameters['boot_method'] == 'network':
if not 'netboot' in parameters:
raise ValueError(
"Boot method 'network' requires netboot parameters")
# put guest in a clean state
self._cms.stop()
self._cms.login()
if cpu != 0:
# clear possible attached cpus
reset_msg = r'(?i)storage cleared - system reset'
_, re_match = self._cms.run("detach cpu all",
wait_for=[reset_msg, ERROR_REGEX],
use_cp=True, timeout=10)
# command timed out waiting for a valid output: abort as we don't
# know the new guest state
if not re_match:
raise RuntimeError('Detach CPU(s) returned unexpected output')
# HCPCPU1456E is ok because base cpu cannot be detached,
# but with unknown error cannot continue
if (re_match.re.pattern != reset_msg and
not re_match.group().startswith('HCPCPU1456E')):
raise RuntimeError(
'Detach CPU(s) failed with: {}'.format(re_match.group()))
if memory != 0:
# attach defined memory
stor_msg = r'STORAGE = \d+'
_, re_match = self._cms.run("define storage {}M".format(memory),
wait_for=[stor_msg, ERROR_REGEX],
use_cp=True, timeout=10)
if not re_match:
raise RuntimeError(
'Define storage (memory) returned unexpected output')
if re_match.re.pattern != stor_msg:
raise RuntimeError('Define storage (memory) failed with: {}'
.format(re_match.group()))
# attach additional cpus and devices
self._cms.hotplug(
cpu=cpu-1,
vols=parameters['storage_volumes'],
extensions={'ifaces': parameters['ifaces']})
# boot method 'cms': enter ipl command
if parameters['boot_method'] == 'cms':
_, re_match = self._cms.run(
r'i cms\naccess (noprof', wait_for=['Ready;'])
if not re_match:
raise RuntimeError('Failed to IPL CMS')
# make sure terminal waits before clearing the screen to prevent
# missing content
self._cms.run('term more 50 10', use_cp=True)
return
# boot method 'network' - presence of netboot parameters was already
# checked
if parameters['boot_method'] == 'network':
self._netboot(parameters['netboot'])
# boot device defined: perform 'disk' based ipl
elif boot_dev:
if boot_dev['type'] != 'fcp':
devno = boot_dev['devno']
# fcp device: set | |
<gh_stars>0
#!/usr/bin/env python3
# coding: utf-8
"""
4oBe4e Console Client
Usage:
console_client.py <server_address> <player_name> [--logger=<level>]
console_client.py (-h | --help)
console_client.py --version
Options:
-h --help Show this screen
--version Show version
--logger=<level> Log level [default: warning]
"""
import logging
from time import sleep
from typing import Any, Dict, List, Set, Tuple
from docopt import docopt # type: ignore
from colorama import Back, Fore, Style # type: ignore
import requests
from const import HOME_ZONE, END_PROGRESS, LAST_ON_PATH, PLAYER_COLOURS
from util import progress_to_position
# Players' board attributes
players: List[Dict[str, Any]] = [
{
"colour": PLAYER_COLOURS[0],
"home": [[5, [2, 3]], [6, [2, 3]]],
"target": [[7, [6, 7]], [8, [6, 7]]],
"finish": [[9, [*range(3, 8)]], None],
},
{
"colour": PLAYER_COLOURS[1],
"home": [[2, [12, 13]], [3, [12, 13]]],
"target": [[6, [10, 11]], [7, [10, 11]]],
"finish": [[3, [9]], [4, [9]], [5, [9]], [6, [9]], [7, [9]]],
},
{
"colour": PLAYER_COLOURS[2],
"home": [[12, [15, 16]], [13, [15, 16]]],
"target": [[10, [11, 12]], [11, [11, 12]]],
"finish": [[9, [*range(11, 16)]], None],
},
{
"colour": PLAYER_COLOURS[3],
"home": [[15, [5, 6]], [16, [5, 6]]],
"target": [[11, [7, 8]], [12, [7, 8]]],
"finish": [[11, [9]], [12, [9]], [13, [9]], [14, [9]], [15, [9]]],
},
]
def _colour(name: str = "WHITE") -> str:
return eval(f"Fore.{name}")
def draw_board() -> List[List[Any]]:
"""Draw an ASCII board with the current pieces."""
ROWS = 19
COLS = 19
HOME_SHAPE = "[ ]"
TARGET_SHAPE = "{ }"
FOOTPATH_SHAPE = "( )"
FINISH_SHAPE = " + "
# Init board
board = [[Style.RESET_ALL + " "] * COLS for row in range(ROWS)]
# Fill board frame
for i in range(len(board)):
board[i][:: len(board[i]) - 1] = [Fore.MAGENTA + " . ", Fore.MAGENTA + " . "]
for i in 0, -1:
board[i][:] = (Fore.CYAN + " . ") * len(board[i])
# Fill player areas
for p in players:
for h in p["home"]:
for c in h[1]:
board[h[0]][c] = _colour(p["colour"]) + HOME_SHAPE
for t in p["target"]:
for c in t[1]:
board[t[0]][c] = _colour(p["colour"]) + TARGET_SHAPE
for f in p["finish"]:
if f:
for c in f[1]:
board[f[0]][c] = _colour(p["colour"]) + FINISH_SHAPE
# Fill footpath
footpath: List[List[List[int]]] = [
[[2, 16], [8, 9, 10]],
[[3, 4, 14, 15], [8, 10]],
[[5, 13], [5, 6, 7, 8, 10, 11, 12, 13]],
[[6, 7, 11, 12], [5, 13]],
[[8, 10], [2, 3, 4, 5, 13, 14, 15, 16]],
[[9], [2, 16]],
]
for fp in footpath:
for r in fp[0]:
for c in fp[1]:
board[r][c] = Fore.WHITE + FOOTPATH_SHAPE
# Fill Trophies
board[9][9] = ">|<"
board[-1][-1] = Style.RESET_ALL + "🏳️🌈"
return board
def redraw(pieces: List[Dict]) -> None:
"""The screen update function. Do not modify this for now."""
board = draw_board()
draw_pieces_on_board(board, pieces)
print()
for row in board:
print("".join(row))
def _cant_overlap(piece_number: int, player_number: int, piece_progress=0) -> bool:
"""Even though this is piece-related logic, it has to do only with visualisation.
Thus in view, rather than object logic"""
return not (HOME_ZONE < piece_progress < END_PROGRESS)
def draw_pieces_on_board(board: List[List[Any]], pieces: List[Dict]) -> List[List[Any]]:
"""It is not part of a job of this method to resolve game logic,
such as collision of pieces of different players on the path"""
for piece in pieces:
(x, y) = put_piece_on_board(piece["number"], piece["player"], piece["progress"])
player_progress = [
p["progress"] for p in pieces if p["player"] == piece["player"]
]
count = player_progress.count(piece["progress"])
val = (
str(piece["number"])
if _cant_overlap(piece["number"], piece["player"], piece["progress"])
or count == 1
else str(count)
)
board[x][y] = f"{_colour(players[piece['player']]['colour'])}.{val}."
return board
def put_piece_on_board(
piece_number: int, player_number: int, piece_progress=0
) -> Tuple[int, int]:
"""Currently player is in [1..4], piece is in [0..3]. Do we need to change this?
TODO: Refactor to implement startegy pattern
"""
coords = (0, 0)
progress = piece_progress
if progress == 0:
coords = __coord_in_home(piece_number, player_number, piece_progress)
elif 0 < progress <= LAST_ON_PATH:
coords = __coord_on_path(piece_number, player_number, piece_progress)
elif LAST_ON_PATH < progress < END_PROGRESS:
coords = __coord_on_finish(piece_number, player_number, piece_progress)
elif progress == END_PROGRESS:
coords = __coord_in_target(piece_number, player_number, piece_progress)
else:
raise NotImplementedError()
return coords
def get_state(session: requests.sessions.Session, server_address: str) -> Dict:
req = session.get(f"{server_address}/state")
return req.json()
def get_players(session: requests.sessions.Session, server_address: str) -> Dict:
req = session.get(f"{server_address}/players")
return req.json()
def join_player(
session: requests.sessions.Session, server_address: str, player_name: str
) -> Tuple:
req = session.get(f"{server_address}/join/{player_name}")
res = req.json()
return (res["player_num"], res["player_token"])
def roll_dice(session: requests.sessions.Session, server_address: str) -> Dict:
req = session.get(f"{server_address}/play/roll")
return req.json()
def move_piece(
session: requests.sessions.Session,
server_address: str,
piece_number: int,
dice: int,
) -> Dict:
req = session.get(f"{server_address}/play/move/{piece_number}/{dice}")
return req.json()
def put_piece(
session: requests.sessions.Session,
server_address: str,
piece_number: int,
dice: int,
) -> Dict:
req = session.get(f"{server_address}/play/out/{piece_number}/{dice}")
return req.json()
def __coord_in_home(
piece_number: int, player_number: int, piece_progress=0
) -> Tuple[int, int]:
"""Draw in home positions: each piece has its location. Progress is always same, thus irrelevant
>>> __coord_in_home(0, 0)
(5, 2)
>>> __coord_in_home(1, 1)
(2, 13)
>>> __coord_in_home(2, 2)
(13, 15)
>>> __coord_in_home(3, 3)
(16, 6)
"""
assert piece_progress == 0
zones = [(5, 2), (2, 12), (12, 15), (15, 5)]
shift = [(0, 0), (0, 1), (1, 0), (1, 1)]
return (
zones[player_number][0] + shift[piece_number][0],
zones[player_number][1] + shift[piece_number][1],
)
def __coord_on_path(
piece_number: int, player_number: int, piece_progress=0
) -> Tuple[int, int]:
"""Draws on path: if two or more pieces on same cell, instead of number,
draws a placeholder, which does not need to show piece number
Logic split this in 4 different cases, determined by player offset.
Parameter piece does't influence logic.
Player Progress to Position conversion:
P0 1..56: (pos)
P1 1..42: (p_num * shift + pos)
43..56: (p_num * shift + pos) % end_progress
P2 1..28: (p_num * shift + pos)
29..56: (p_num * shift + pos) % end_progress
P3 1..14: (p_num * shift + pos)
15..56: (p_num * shift + pos) % end_progress
Test player 1:
>>> __coord_on_path(player_number=0, piece_number=1, piece_progress=1)
(8, 2)
Test player 2:
>>> __coord_on_path(player_number=1, piece_number=1, piece_progress=1)
(2, 10)
Test player 3:
>>> __coord_on_path(player_number=2, piece_number=1, piece_progress=1)
(10, 16)
Test player 4:
>>> __coord_on_path(player_number=3, piece_number=1, piece_progress=1)
(16, 8)
Test path wrap:
>>> __coord_on_path(player_number=3, piece_number=1, piece_progress=56)
(16, 9)
Test overlap:
>>> __coord_on_path(player_number=1, piece_number=1, piece_progress=17)
(10, 14)
"""
assert 1 <= piece_progress <= LAST_ON_PATH and 0 <= player_number <= 3
POSITION_TO_ROWCOL: Tuple[Tuple[int, int], ...] = (
(0, 0),
(8, 2),
(8, 3),
(8, 4),
(8, 5),
(7, 5),
(6, 5),
(5, 5),
(5, 6),
(5, 7),
(5, 8),
(4, 8),
(3, 8),
(2, 8),
(2, 9),
(2, 10),
(3, 10),
(4, 10),
(5, 10),
(5, 11),
(5, 12),
(5, 13),
(6, 13),
(7, 13),
(8, 13),
(8, 14),
(8, 15),
(8, 16),
(9, 16),
(10, 16),
(10, 15),
(10, 14),
(10, 13),
(11, 13),
(12, 13),
(13, 13),
(13, 12),
(13, 11),
(13, 10),
(14, 10),
(15, 10),
(16, 10),
(16, 9),
(16, 8),
(15, 8),
(14, 8),
(13, 8),
(13, 7),
(13, 6),
(13, 5),
(12, 5),
(11, 5),
(10, 5),
(10, 4),
(10, 3),
(10, 2),
(9, 2),
)
return POSITION_TO_ROWCOL[progress_to_position(player_number, piece_progress)]
def __coord_on_finish(
piece_number: int, player_number: int, piece_progress=0
) -> Tuple[int, int]:
"""Piece number is irrelevant
>>> __coord_on_finish(1, 0, 57)
(9, 3)
>>> __coord_on_finish(1, 0, 61)
(9, 7)
>>> __coord_on_finish(1, 1, 57)
(3, 9)
>>> __coord_on_finish(1, 2, 58)
(9, 14)
>>> __coord_on_finish(1, 3, 59)
(13, 9)
>>> __coord_on_finish(1, 3, 61)
(11, 9)
"""
pos = piece_progress - LAST_ON_PATH
assert 0 < pos < 6
player = player_number
(x, y) = (0, 0)
if player in [0, 2]:
x = 9
y = pos + 2 if player == 0 else 15 - (pos - 1)
elif player in [1, 3]:
x = pos + 2 if player == 1 else 15 - (pos - 1)
y = 9
else:
raise NotImplementedError()
return (x, y)
def __coord_in_target(
piece_number: int, player_number: int, piece_progress=0
) -> Tuple[int, int]:
"""Draw in target positions: each piece has its location.
Progress is always same, thus irrelevant
>>> __coord_in_target(0, 0, 62)
(7, 6)
>>> __coord_in_target(1, 1, 62)
(6, 11)
>>> __coord_in_target(2, 2, 62)
(11, 11)
>>> __coord_in_target(3, 3, 62)
(12, 8)
"""
assert piece_progress == 62
zones = [(7, 6), (6, 10), (10, 11), (11, 7)]
shift = [(0, 0), (0, 1), (1, 0), (1, 1)]
return (
zones[player_number][0] + shift[piece_number][0],
zones[player_number][1] + shift[piece_number][1],
)
def main():
"""Main loop."""
# Parse args
args = | |
values into sets in case these are already templates.
values1 = set(lel1.attrib[key].split(cls.VALUES_SEPARATOR)) if key in lel1.attrib else set()
values2 = set(lel2.attrib[key].split(cls.VALUES_SEPARATOR)) if key in lel2.attrib else set()
if values1 != values2:
if not overwrite or key in unstable_attributes_in_lel1 or key in unstable_attributes_in_lel2:
# If we're not overwriting, or we are overwriting but this attribute is already unstable,
# we should combine the values and mark it unstable.
el_result.attrib[key] = cls.VALUES_SEPARATOR.join(sorted(values1 | values2))
unstable_attributes.add(key)
else:
# We are overwriting and this is not an unstable attribute in lel2, so just overwrite from lel2.
el_result.attrib[key] = cls.VALUES_SEPARATOR.join(values2)
else:
# The values are the same, just copy from one or the other.
el_result.attrib[key] = lel2.attrib[key]
# Now put the special attributes back.
cls._set_unstable_attributes_for_element(el_result, unstable_attributes)
if "unstable_element" in lel1.attrib or "unstable_element" in lel2.attrib:
el_result.attrib["unstable_element"] = "true"
if "unstable_text" in lel1.attrib or "unstable_text" in lel2.attrib:
el_result.attrib["unstable_text"] = "true"
@classmethod
def _helper_combine_text(cls, lel1, lel2, el_result, overwrite=False):
"""Combines text from lel1 and lel2.
Args:
lel1, lel2: elements from two different lxml trees / templates
el_result: the element that is serving as the combination result of lel1 and lel2
overwrite: If true, any incompatibilities will be resolved by taking from lel2. If false,
incompatibilities will be resolved by marking the text as unstable.
"""
unstable_text_in_lel1 = "unstable_text" in lel1.attrib
unstable_text_in_lel2 = "unstable_text" in lel2.attrib
values1 = set(lel1.text.split(cls.VALUES_SEPARATOR)) \
if lel1.text is not None and lel1.text.strip() != '' else set()
values2 = set(lel2.text.split(cls.VALUES_SEPARATOR)) \
if lel2.text is not None and lel2.text.strip() != '' else set()
if values1 != values2:
if not overwrite or unstable_text_in_lel1 or unstable_text_in_lel2:
# If we're not overwriting, or we are overwriting but the text is already unstable in either element,
# we should combine the values and mark it unstable.
el_result.text = cls.VALUES_SEPARATOR.join(sorted(values1 | values2))
el_result.attrib["unstable_text"] = "true"
else:
# We are overwriting and this is not unstable, so just overwrite from lel2.
el_result.text = lel2.text
else:
# The values are the same, just copy from one or the other.
el_result.text = lel2.text
@classmethod
def _helper_combine_children(cls, lel1, lel2, el_result, overwrite=False):
"""Combines children from lel1 and lel2.
Args:
lel1, lel2: elements from two different lxml trees / templates
el_result: the element that is serving as the combination result of lel1 and lel2
overwrite: If true, any incompatibilities will be resolved by taking from lel2. If false,
incompatibilities will be resolved by marking the text as unstable.
Returns:
Nothing
"""
# Ideally there should be a one-to-one correspondence between children. We step one by one through each
# child list recursively trying to match the children.
# Watch out!: cnn.com orders some children randomly! For instance, batBeacon and kxhead will appear in both
# versions of the page but in different positions. These will be marked "unstable_element" in the template.
# When we fail to match two children at the same index and one of them is unstable, we can advance one
# anyway, adding it to the result and to a "unmatched children" list.
# If we have unmatched children, we try to "catch up" by matching to them first before going on with the
# other children.
# TODO: Sometimes a child in one tree matches multiple children in the other tree and it's hard to know
# which ones actually correspond. We can improve the child matching if our match functions return a score.
def match_el_to_list_and_remove(el, els_list):
# If el matches an item in els_list, removes that item from the list and returns the
# matching element and the result of running the match function.
if len(els_list) > 0:
lel1_unstable = "unstable_element" in el.attrib \
or "unstable_text" in el.attrib \
or "unstable_attributes" in el.attrib
child_result = None
# Go backwards so we'll match the most recently added child first.
subindex = len(els_list)-1
while subindex >= 0:
cur_el = els_list[subindex]
# One of the elements must be marked unstable.
lel2_unstable = "unstable_element" in cur_el.attrib \
or "unstable_text" in cur_el.attrib \
or "unstable_attributes" in cur_el.attrib
if lel1_unstable or lel2_unstable:
child_result = cls._get_merged_tree(cur_el, el, overwrite)
if child_result is not None:
break
subindex -= 1
if child_result is not None:
# The element matched an item from the list. Remove that item from the list.
match = els_list[subindex]
del els_list[subindex]
return match, child_result
return None, None
# Go through the children one by one trying to match them to one another.
index1 = index2 = prev_index1 = prev_index2 = 0
unmatched_children1 = list()
unmatched_children2 = list()
while index1 < len(lel1) and index2 < len(lel2):
# First try to match the child in lel1 to the child in the corresponding position in lel2.
child_result = cls._get_merged_tree(lel1[index1], lel2[index2], overwrite)
if child_result is not None:
# This element in lel2 matches the element in lel1. Definitely add it.
el_result.append(child_result)
if "unstable_element" in lel1[index1].attrib or "unstable_element" in lel2[index2].attrib:
child_result.attrib["unstable_element"] = "true"
prev_index1 = index1
prev_index2 = index2
index1 += 1
index2 += 1
continue
# That didn't work, so let's try matching the elements to any unmatched children.
# If one of the children finds a match, advance that child index
# and restart the loop with the new children.
# Check against prev_index to make sure this index has advanced. Otherwise no need
# to do this again.
if index2 > prev_index2:
skipped_match, child_result = match_el_to_list_and_remove(lel2[index2], unmatched_children1)
if child_result is not None:
# Replace the child with the new result.
el_result.insert(el_result.index(skipped_match), child_result)
el_result.remove(skipped_match)
prev_index1 = index1
prev_index2 = index2
index2 += 1
continue
if index1 > prev_index1:
# If we've advanced within lel1, check the new child against all skipped children from lel2.
skipped_match, child_result = match_el_to_list_and_remove(lel1[index1], unmatched_children2)
if child_result is not None:
# Replace the child with the new result.
el_result.insert(el_result.index(skipped_match), child_result)
el_result.remove(skipped_match)
prev_index1 = index1
prev_index2 = index2
index1 += 1
continue
# The children didn't match each other or any as-of-yet unmatched elements.
prev_index1 = index1
prev_index2 = index2
# Let's add the child to the result and also to a "unmatched_children" list.
# - If we are NOT overwriting, mark it unstable and add it to the result.
# - If we are overwriting, do not mark it unstable unless it was already marked unstable.
# An unmatched child may end up matching a child from the other element down the line, in which case
# it will be combined with that child in the final result.
# If we are overwriting, when we're all done, any remaining unmatched children from lel1 that are
# not marked unstable will be removed.
# If one index is lagging behind, skip that one to catch up.
if index1 < index2:
cchild = deepcopy(lel1[index1])
if not overwrite:
cchild.attrib["unstable_element"] = "true"
unmatched_children1.append(cchild)
el_result.append(cchild)
index1 += 1
elif index1 > index2:
cchild = deepcopy(lel2[index2])
if not overwrite:
cchild.attrib["unstable_element"] = "true"
unmatched_children2.append(cchild)
el_result.append(cchild)
index2 += 1
# If there are more children in one list than the other, skip the child in the bigger list.
elif len(lel1) < len(lel2):
cchild = deepcopy(lel2[index2])
if not overwrite:
cchild.attrib["unstable_element"] = "true"
unmatched_children2.append(cchild)
el_result.append(cchild)
index2 += 1
else:
cchild = deepcopy(lel1[index1])
if not overwrite:
cchild.attrib["unstable_element"] = "true"
unmatched_children1.append(cchild)
el_result.append(cchild)
index1 += 1
# We've exhausted one or the other of the element lists.
# Check the remaining children against unmatched children from the other tree.
while index1 < len(lel1):
# Does the lel1 child correspond with an unmatched child from lel2?
skipped_match, child_result = match_el_to_list_and_remove(lel1[index1], unmatched_children2)
if child_result is not None:
# Replace the child with the new result.
el_result.insert(el_result.index(skipped_match), child_result)
el_result.remove(skipped_match)
elif not overwrite or "unstable_element" in lel1[index1].attrib:
# As long as we're not overwriting we can add it and mark it unstable.
cchild = deepcopy(lel1[index1])
cchild.attrib["unstable_element"] = "true"
el_result.append(cchild)
else:
# The child from lel1 couldn't be added. Keep checking the other children.
pass
index1 += 1
while index2 < len(lel2):
# Does the lel2 | |
<filename>routeTracker/UI.py
import tkinter as tk
import ctypes
import time
import math
import pickle
import winsound
from tkinter.filedialog import askopenfilename, askdirectory
from tkinter import messagebox
import os
import pyperclip
import webbrowser
import traceback
import datetime
import sys
if getattr(sys, 'frozen', False): # Running as compiled
running_dir = sys._MEIPASS + "/files/" # Same path name than pyinstaller option
else:
running_dir = "./" # Path name when run with Python interpreter
ICON = running_dir + "carrier.ico"
class POINT(ctypes.Structure):
_fields_ = [("x", ctypes.c_ulong), ("y", ctypes.c_ulong)]
def mousePosition():
pt = POINT()
ctypes.windll.user32.GetCursorPos(ctypes.byref(pt))
return int(pt.x), int(pt.y)
class UserInterface():
def __init__(self, reader, debug=False):
user32 = ctypes.windll.user32
width, height = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)
dataTemplate = {'window position': [width / 2 - 250, height / 4],
'route positions': {},
'showType': 'show',
'topmost': 1,
'alarm': True,
'logLocation': '',
'shipCargo': 0,
'carrierCargo': 0,
'more': False,
'jumpStart': '00:00',
'jumpEnd': '23:59'
}
self.exiting = False
self.debug = debug
# self.logReader=reader
self.maxCountdown = 60 * 21
self.logCheck = 5
self.logReader = reader
self.scroll = 0
self.dragOffset = [0, 0]
self.scrolling = False
self.stopLocations = []
self.pristineRings = []
self.countdown = self.maxCountdown
self.countdownStart = time.time()
self.logStart = 0
self.currentFileDataKeys = {}
self.currentFileData = [['unknown']]
self.system = None
self.nextSystem = 'unknown'
self.currentFile = None
self.position = 0
self.dragging = False
self.draggingPos = [width / 2 - 250, height / 4]
self.hovering = False
self.scrollTop = [0, 0]
self.scrollBottom = [0, 0]
try:
with open("trackerData.txt", "rb") as f:
self.data = pickle.load(f)
except FileNotFoundError:
self.data = dataTemplate
with open("trackerData.txt", "wb") as f:
pickle.dump(self.data, f)
added = False
dataKeys = list(self.data.keys())
for i in list(dataTemplate.keys()):
if i not in dataKeys:
self.data[i] = dataTemplate[i]
added = True
if added:
with open("trackerData.txt", "wb") as f:
pickle.dump(self.data, f)
if "current file" in list(self.data.keys()):
self.currentFile = self.data["current file"]
self.openFile(dialogue=False)
if self.data['logLocation'] != '':
self.logReader.folderLocation = self.data['logLocation']
self.createWindow()
def mainLoop(self):
timeLoop = time.time()
while True:
time.sleep(0.01)
try:
pyperclip.paste()
if self.exiting:
self.saveData()
self.window.destroy()
self.root.destroy()
try:
self.settingsWindow.destroy()
except AttributeError:
pass
except tk._tkinter.TclError:
pass
break
# self.menu.update()
currentTime = time.time()
if currentTime - self.logStart > self.logCheck and self.currentFileData != None:
self.logStart = currentTime
self.logReader.updateLog()
# print(self.logReader.oldSystem,self.logReader.currentSystem)
if self.logReader.oldSystem != self.logReader.currentSystem:
# print("Jumped to "+self.logReader.currentSystem)
self.nextSystem = 'unknown'
for i in range(self.position, len(self.currentFileData) - 1):
##print(i)
##print(ui.currentFileData[i])
if self.currentFileData[i][self.currentFileDataKeys['System Name']] == self.logReader.currentSystem:
# print('copied ' + self.nextSystem + ' to clipboard')
if self.currentFileData[i + 1][self.currentFileDataKeys['System Name']] == \
self.currentFileData[i][self.currentFileDataKeys['System Name']]:
self.position = i + 1
# print('double')
else:
self.position = i
self.nextSystem = self.currentFileData[self.position + 1][
self.currentFileDataKeys['System Name']]
pyperclip.copy(self.nextSystem)
self.data['route positions'][self.currentFile] = self.position
self.saveData()
# try:
self.clear()
"""
except Exception as e:
#print(e)"""
break
# try:
self.root.update()
x, y = mousePosition()
if self.hovering:
self.clear()
self.hovering = False
if self.dragging:
self.data['window position'] = [x - self.dragOffset[0], y - self.dragOffset[1]]
self.clear()
elif self.scrolling and self.scrollLength < len(self.currentFileData):
proportion = (y - self.barCentre - self.scrollTop[1]) / self.scrollHeight
self.scroll = round(proportion * len(self.currentFileData) - self.position)
self.limitScroll()
self.clear()
elif currentTime - timeLoop > 1:
self.clear()
timeLoop = currentTime
"""
if self.data['topmost'] == 0:
if not self.window.focus_displayof():
if topSet != 0:
self.window.attributes('-topmost', 0)
topSet=0
elif topSet != 1:
self.window.attributes('-topmost', 1)
topSet=1
##print(topSet)
"""
"""
except Exception as e:
if e == SystemExit:
break
else:
self.exiting=True
#print(e)"""
try:
self.settingsWindow.update()
except AttributeError:
pass
except tk._tkinter.TclError:
pass
except pyperclip.PyperclipWindowsException:
time.sleep(2)
def openFile(self, dialogue=True):
self.scroll = 0
if dialogue:
self.currentFile = askopenfilename()
self.data["current file"] = self.currentFile
if self.currentFile != '':
# print(self.currentFile)
# print(self.data)
if self.currentFile in list(self.data['route positions'].keys()):
self.position = self.data['route positions'][self.currentFile]
else:
self.position = 0
self.data['route positions'][self.currentFile] = self.position
self.saveData()
try:
with open(self.currentFile, 'r') as f:
self.currentFileData = f.read()
self.currentFileData = "".join(self.currentFileData.split("\""))
self.currentFileData = self.currentFileData.split("\n")
self.currentFileData = [i.split(",") for i in self.currentFileData]
##print(currentFileData)
self.currentFileDataKeys = {}
for i in range(len(self.currentFileData[0])):
self.currentFileDataKeys[self.currentFileData[0][i]] = i
del self.currentFileData[0]
if [''] in self.currentFileData:
self.currentFileData.remove([''])
self.stopLocations = []
self.pristineRings=[]
for i in range(len(self.currentFileData) - 1):
if self.currentFileData[i][self.currentFileDataKeys['System Name']] == self.currentFileData[i + 1][
self.currentFileDataKeys['System Name']]:
self.stopLocations.append(i)
if self.currentFileData[i][self.currentFileDataKeys['Pristine']] == 'Yes':
self.pristineRings.append(i)
##print(self.currentFileData[i])
##print(self.stopLocations)
except FileNotFoundError as e:
messagebox.showerror("Import Error", e)
if self.data['showType'] == 'show':
self.logReader.resetValues()
self.logStart = 0
self.createWindow()
def saveData(self, values=None):
with open("trackerData.txt", "wb") as f:
pickle.dump(self.data, f)
# overlay functions
def clear(self):
# all to change with new UI
try:
self.canvas.destroy()
except:
pass
clip = pyperclip.paste()
x, y = self.data['window position'][0], self.data['window position'][1]
self.canvas = tk.Canvas(self.window, bg="pink", bd=0, highlightthickness=0, relief='ridge')
self.canvas.pack(fill="both", expand=True)
self.canvas.create_rectangle(x, y, x + 520, y + 30, fill='black')
if self.logReader.currentSystem == clip:
self.canvas.create_text(x + 5, y + 5, text=self.logReader.currentSystem, font="Ebrima 13 bold",
fill='green', anchor='nw')
else:
self.canvas.create_text(x + 5, y + 5, text=self.logReader.currentSystem, font="Ebrima 13 bold",
fill='orange', anchor='nw')
self.canvas.create_rectangle(x + 150, y, x + 500, y + 30, fill='black')
self.canvas.create_text(x + 158, y + 5, text='>> ', font="Ebrima 13 bold", fill='orange', anchor='nw')
if self.nextSystem == clip:
self.canvas.create_text(x + 190, y + 5, text=self.nextSystem, font="Ebrima 13 bold", fill='green',
anchor='nw')
else:
self.canvas.create_text(x + 190, y + 5, text=self.nextSystem, font="Ebrima 13 bold", fill='orange',
anchor='nw')
self.canvas.create_rectangle(x + 340, y, x + 500, y + 30, fill='black')
timeSince = time.time() - self.logReader.lastJumpRequest
timeSince = self.maxCountdown - timeSince
if timeSince > 0:
if timeSince < 10 and self.data['alarm']:
winsound.Beep(3000, 100)
mins = str(round(timeSince // 60))
seconds = str(math.floor(timeSince % 60))
if len(mins) == 1:
mins = '0' + mins
if len(seconds) == 1:
seconds = '0' + seconds
text = mins + ':' + seconds
else:
text = 'Ready'
text = '| ' + text + ' |'
self.canvas.create_text(x + 350, y + 5, text=text, font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_text(x + 420, y + 5, text='☰', font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_text(x + 440, y + 5, text='📁', font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_text(x + 463, y + 5, text='⚙', font="Ebrima 13 bold", fill='orange', anchor='nw')
if self.data['topmost'] == 1:
self.canvas.create_text(x + 485, y + 5, text='⮝', font="Ebrima 13 bold", fill='orange', anchor='nw')
else:
self.canvas.create_text(x + 485, y + 5, text='⮟', font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_text(x + 500, y + 5, text='✘', font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_line(x, y, x + 520, y, fill='orange')
self.canvas.create_line(x, y + 30, x + 520, y + 30, fill='orange')
if self.data['more']:
self.createDashboard()
def createDashboard(self):
mouseX, mouseY = mousePosition()
x, y = self.data['window position'][0], self.data['window position'][1]
try:
self.canvas.create_rectangle(x, y + 35, x + 520, y + 600, fill='black', outline='orange')
# pannel backgrounds
self.canvas.create_rectangle(x + 10, y + 40, x + 510, y + 150, fill='#111111', outline='#333333')
self.canvas.create_rectangle(x + 10, y + 160, x + 510, y + 270, fill='#111111', outline='#333333')
self.canvas.create_rectangle(x + 10, y + 280, x + 510, y + 540, fill='#111111', outline='#333333')
horPos = self.position / len(self.currentFileData) * 480 + 20
self.canvas.create_line(x + 20, y + 80, x + horPos, y + 80, fill='orange', width=2, dash=10)
self.canvas.create_line(x + horPos, y + 80, x + 500, y + 80, fill='orange', width=2)
above = False
for i in [0] + self.stopLocations:
horPos = i / len(self.currentFileData) * 480 + 20
if i in self.pristineRings:
colour = '#a1c5ff'
else:
colour = 'orange'
if i in self.stopLocations:
size=3
else:
size=1
if above and (mouseX - (x + horPos)) ** 2 + (mouseY - (y + 80)) ** 2 < size ** 2:
if horPos < 250:
anchor = 'w'
else:
anchor = 'e'
self.canvas.create_line(x + horPos, y + 70, x + horPos, y + 80, fill=colour)
jumps=i - self.position
if jumps > 0:
eta = ' | ' + self.getETA(jumps=jumps)
else:
eta=''
self.canvas.create_text(x + horPos, y + 60,
text=self.currentFileData[i][self.currentFileDataKeys['System Name']] + eta,
font="Ebrima 10 bold", fill=colour, anchor=anchor)
self.canvas.create_oval(x + horPos - size*1.5, y + 80 - size*1.5, x + horPos + size*1.5, y + 80 + size*1.5, fill=colour,
outline=colour)
elif not above:
self.canvas.create_rectangle(x + horPos - 8, y + 80, x + 500, y + 120, fill='#111111',
outline='#111111')
self.canvas.create_line(x + horPos, y + 80, x + horPos, y + 90, fill='orange')
self.canvas.create_text(x + horPos, y + 95,
text=self.currentFileData[i][self.currentFileDataKeys['System Name']],
font="Ebrima 10 bold", fill='orange', anchor='w')
self.canvas.create_oval(x + horPos - size, y + 80 - size, x + horPos + 5, y + 80 + size, fill=colour, outline=colour)
above = True
horPos = 500
jumps = len(self.currentFileData) - 1 - self.position
if jumps > | |
<filename>cf_xarray/accessor.py
import functools
import inspect
import itertools
import warnings
from collections import ChainMap
from typing import (
Any,
Callable,
Dict,
Hashable,
Iterable,
List,
Mapping,
MutableMapping,
Set,
Tuple,
Union,
)
import xarray as xr
from xarray import DataArray, Dataset
from .helpers import bounds_to_vertices
from .utils import parse_cell_methods_attr
#: Classes wrapped by cf_xarray.
_WRAPPED_CLASSES = (
xr.core.resample.Resample,
xr.core.groupby.GroupBy,
xr.core.rolling.Rolling,
xr.core.rolling.Coarsen,
xr.core.weighted.Weighted,
)
#: `axis` names understood by cf_xarray
_AXIS_NAMES = ("X", "Y", "Z", "T")
#: `coordinate` types understood by cf_xarray.
_COORD_NAMES = ("longitude", "latitude", "vertical", "time")
#: Cell measures understood by cf_xarray.
_CELL_MEASURES = ("area", "volume")
# Define the criteria for coordinate matches
# Copied from metpy
# Internally we only use X, Y, Z, T
coordinate_criteria: MutableMapping[str, MutableMapping[str, Tuple]] = {
"standard_name": {
"X": ("projection_x_coordinate",),
"Y": ("projection_y_coordinate",),
"T": ("time",),
"time": ("time",),
"vertical": (
"air_pressure",
"height",
"depth",
"geopotential_height",
# computed dimensional coordinate name
"altitude",
"height_above_geopotential_datum",
"height_above_reference_ellipsoid",
"height_above_mean_sea_level",
),
"Z": (
"model_level_number",
"atmosphere_ln_pressure_coordinate",
"atmosphere_sigma_coordinate",
"atmosphere_hybrid_sigma_pressure_coordinate",
"atmosphere_hybrid_height_coordinate",
"atmosphere_sleve_coordinate",
"ocean_sigma_coordinate",
"ocean_s_coordinate",
"ocean_s_coordinate_g1",
"ocean_s_coordinate_g2",
"ocean_sigma_z_coordinate",
"ocean_double_sigma_coordinate",
),
"latitude": ("latitude",),
"longitude": ("longitude",),
},
"_CoordinateAxisType": {
"T": ("Time",),
"Z": ("GeoZ", "Height", "Pressure"),
"Y": ("GeoY",),
"latitude": ("Lat",),
"X": ("GeoX",),
"longitude": ("Lon",),
},
"axis": {"T": ("T",), "Z": ("Z",), "Y": ("Y",), "X": ("X",)},
"cartesian_axis": {"T": ("T",), "Z": ("Z",), "Y": ("Y",), "X": ("X",)},
"positive": {"vertical": ("up", "down")},
"units": {
"latitude": (
"degree_north",
"degree_N",
"degreeN",
"degrees_north",
"degrees_N",
"degreesN",
),
"longitude": (
"degree_east",
"degree_E",
"degreeE",
"degrees_east",
"degrees_E",
"degreesE",
),
},
}
# "long_name" and "standard_name" criteria are the same. For convenience.
coordinate_criteria["long_name"] = coordinate_criteria["standard_name"]
#: regular expressions for guess_coord_axis
regex = {
"time": "time[0-9]*|min|hour|day|week|month|year",
"vertical": (
"(lv_|bottom_top|sigma|h(ei)?ght|altitude|depth|isobaric|pres|"
"isotherm)[a-z_]*[0-9]*"
),
"Y": "y",
"latitude": "y?lat[a-z0-9]*",
"X": "x",
"longitude": "x?lon[a-z0-9]*",
}
regex["Z"] = regex["vertical"]
regex["T"] = regex["time"]
attrs = {
"X": {"axis": "X"},
"T": {"axis": "T", "standard_name": "time"},
"Y": {"axis": "Y"},
"Z": {"axis": "Z"},
"latitude": {"units": "degrees_north", "standard_name": "latitude"},
"longitude": {"units": "degrees_east", "standard_name": "longitude"},
}
attrs["time"] = attrs["T"]
attrs["vertical"] = attrs["Z"]
def _is_datetime_like(da: DataArray) -> bool:
import numpy as np
if np.issubdtype(da.dtype, np.datetime64) or np.issubdtype(
da.dtype, np.timedelta64
):
return True
try:
import cftime
if isinstance(da.data[0], cftime.datetime):
return True
except ImportError:
pass
return False
# Type for Mapper functions
Mapper = Callable[[Union[DataArray, Dataset], str], List[str]]
def apply_mapper(
mappers: Union[Mapper, Tuple[Mapper, ...]],
obj: Union[DataArray, Dataset],
key: str,
error: bool = True,
default: Any = None,
) -> List[Any]:
"""
Applies a mapping function; does error handling / returning defaults.
Expects the mapper function to raise an error if passed a bad key.
It should return a list in all other cases including when there are no
results for a good key.
"""
if default is None:
default = []
def _apply_single_mapper(mapper):
try:
results = mapper(obj, key)
except Exception as e:
if error:
raise e
else:
results = []
return results
if not isinstance(mappers, Iterable):
mappers = (mappers,)
# apply a sequence of mappers
# if the mapper fails, it *should* return an empty list
# if the mapper raises an error, that is processed based on `error`
results = []
for mapper in mappers:
results.append(_apply_single_mapper(mapper))
nresults = sum([bool(v) for v in results])
if nresults > 1:
raise KeyError(
f"Multiple mappers succeeded with key {key!r}.\nI was using mappers: {mappers!r}."
f"I received results: {results!r}.\nPlease open an issue."
)
if nresults == 0:
if error:
raise KeyError(
f"cf-xarray cannot interpret key {key!r}. Perhaps some needed attributes are missing."
)
else:
# none of the mappers worked. Return the default
return default
return list(itertools.chain(*results))
def _get_axis_coord_single(var: Union[DataArray, Dataset], key: str) -> List[str]:
""" Helper method for when we really want only one result per key. """
results = _get_axis_coord(var, key)
if len(results) > 1:
raise KeyError(
f"Multiple results for {key!r} found: {results!r}. I expected only one."
)
elif len(results) == 0:
raise KeyError(f"No results found for {key!r}.")
return results
def _get_axis_coord_time_accessor(
var: Union[DataArray, Dataset], key: str
) -> List[str]:
"""
Helper method for when our key name is of the nature "T.month" and we want to
isolate the "T" for coordinate mapping
Parameters
----------
var: DataArray, Dataset
DataArray belonging to the coordinate to be checked
key: str, [e.g. "T.month"]
key to check for.
Returns
-------
List[str], Variable name(s) in parent xarray object that matches axis or coordinate `key` appended by the frequency extension (e.g. ".month")
Notes
-----
Returns an empty list if there is no frequency extension specified.
"""
if "." in key:
key, ext = key.split(".", 1)
results = _get_axis_coord_single(var, key)
return [v + "." + ext for v in results]
else:
return []
def _get_axis_coord(var: Union[DataArray, Dataset], key: str) -> List[str]:
"""
Translate from axis or coord name to variable name
Parameters
----------
var: DataArray, Dataset
DataArray belonging to the coordinate to be checked
key: str, ["X", "Y", "Z", "T", "longitude", "latitude", "vertical", "time"]
key to check for.
error: bool
raise errors when key is not found or interpretable. Use False and provide default
to replicate dict.get(k, None).
default: Any
default value to return when error is False.
Returns
-------
List[str], Variable name(s) in parent xarray object that matches axis or coordinate `key`
Notes
-----
This functions checks for the following attributes in order
- `standard_name` (CF option)
- `_CoordinateAxisType` (from THREDDS)
- `axis` (CF option)
- `positive` (CF standard for non-pressure vertical coordinate)
References
----------
MetPy's parse_cf
"""
valid_keys = _COORD_NAMES + _AXIS_NAMES
if key not in valid_keys:
raise KeyError(
f"cf_xarray did not understand key {key!r}. Expected one of {valid_keys!r}"
)
search_in = set()
if "coordinates" in var.encoding:
search_in.update(var.encoding["coordinates"].split(" "))
if "coordinates" in var.attrs:
search_in.update(var.attrs["coordinates"].split(" "))
if not search_in:
search_in = set(var.coords)
# maybe only do this for key in _AXIS_NAMES?
search_in.update(var.indexes)
results: Set = set()
for coord in search_in:
for criterion, valid_values in coordinate_criteria.items():
if key in valid_values:
expected = valid_values[key]
if (
coord in var.coords
and var.coords[coord].attrs.get(criterion, None) in expected
):
results.update((coord,))
return list(results)
def _get_measure_variable(
da: DataArray, key: str, error: bool = True, default: str = None
) -> List[DataArray]:
""" tiny wrapper since xarray does not support providing str for weights."""
varnames = apply_mapper(_get_measure, da, key, error, default)
if len(varnames) > 1:
raise ValueError(f"Multiple measures found for key {key!r}: {varnames!r}.")
return [da[varnames[0]]]
def _get_measure(obj: Union[DataArray, Dataset], key: str) -> List[str]:
"""
Translate from cell measures to appropriate variable name.
This function interprets the ``cell_measures`` attribute on DataArrays.
Parameters
----------
obj: DataArray, Dataset
DataArray belonging to the coordinate to be checked
key: str
key to check for.
Returns
-------
List[str], Variable name(s) in parent xarray object that matches axis or coordinate `key`
"""
if isinstance(obj, DataArray):
obj = obj._to_temp_dataset()
results = set()
for var in obj.variables:
da = obj[var]
if "cell_measures" in da.attrs:
attr = da.attrs["cell_measures"]
measures = parse_cell_methods_attr(attr)
if key in measures:
results.update([measures[key]])
if isinstance(results, str):
return [results]
return list(results)
#: Default mappers for common keys.
_DEFAULT_KEY_MAPPERS: Mapping[str, Tuple[Mapper, ...]] = {
"dim": (_get_axis_coord,),
"dims": (_get_axis_coord,), # transpose
"dimensions": (_get_axis_coord,), # stack
"dims_dict": (_get_axis_coord,), # swap_dims, rename_dims
"shifts": (_get_axis_coord,), # shift, roll
"pad_width": (_get_axis_coord,), # shift, roll
# "names": something_with_all_valid_keys? # set_coords, reset_coords
"coords": (_get_axis_coord,), # interp
"indexers": (_get_axis_coord,), # sel, isel, reindex
# "indexes": (_get_axis_coord,), # set_index
"dims_or_levels": (_get_axis_coord,), # reset_index
"window": (_get_axis_coord,), # rolling_exp
"coord": (_get_axis_coord_single,), # differentiate, integrate
"group": (_get_axis_coord_single, _get_axis_coord_time_accessor),
"indexer": (_get_axis_coord_single,), # resample
"variables": (_get_axis_coord,), # sortby
"weights": (_get_measure_variable,), # type: ignore
}
def _get_with_standard_name(ds: Dataset, name: Union[str, List[str]]) -> List[str]:
""" returns a list of variable names with standard name == name. """
varnames = []
for vname, var in ds.variables.items():
stdname = var.attrs.get("standard_name", None)
if stdname == name:
varnames.append(str(vname))
return varnames
def _guess_bounds_dim(da):
"""
Guess bounds values given a 1D coordinate variable.
Assumes equal spacing on either side of the coordinate label.
"""
assert da.ndim == 1
dim = da.dims[0]
diff = da.diff(dim)
lower = da - diff / 2
upper = da + diff / 2
bounds = xr.concat([lower, upper], dim="bounds")
first = (bounds.isel({dim: 0}) - diff[0]).assign_coords({dim: da[dim][0]})
result = xr.concat([first, bounds], dim=dim)
return result
def _build_docstring(func):
"""
Builds a nice docstring for wrapped functions, stating what key words
can be used for arguments.
"""
# this list will need to be updated any time a new mapper is added
mapper_docstrings = {
_get_axis_coord: f"One or more of {(_AXIS_NAMES + _COORD_NAMES)!r}",
_get_axis_coord_single: f"One of {(_AXIS_NAMES + _COORD_NAMES)!r}",
# _get_measure_variable: f"One of {_CELL_MEASURES!r}",
| |
<filename>tempest/scenario/manager.py<gh_stars>0
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import six
import subprocess
from ironicclient import exc as ironic_exceptions
import netaddr
from neutronclient.common import exceptions as exc
from novaclient import exceptions as nova_exceptions
from tempest.api.network import common as net_common
from tempest import clients
from tempest.common import isolated_creds
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log
import tempest.test
CONF = config.CONF
LOG = log.getLogger(__name__)
# NOTE(afazekas): Workaround for the stdout logging
LOG_nova_client = logging.getLogger('novaclient.client')
LOG_nova_client.addHandler(log.NullHandler())
LOG_cinder_client = logging.getLogger('cinderclient.client')
LOG_cinder_client.addHandler(log.NullHandler())
class OfficialClientTest(tempest.test.BaseTestCase):
"""
Official Client test base class for scenario testing.
Official Client tests are tests that have the following characteristics:
* Test basic operations of an API, typically in an order that
a regular user would perform those operations
* Test only the correct inputs and action paths -- no fuzz or
random input data is sent, only valid inputs.
* Use only the default client tool for calling an API
"""
@classmethod
def setUpClass(cls):
super(OfficialClientTest, cls).setUpClass()
cls.isolated_creds = isolated_creds.IsolatedCreds(
cls.__name__, tempest_client=False,
network_resources=cls.network_resources)
username, password, tenant_name = cls.credentials()
cls.manager = clients.OfficialClientManager(
username, password, tenant_name)
cls.compute_client = cls.manager.compute_client
cls.image_client = cls.manager.image_client
cls.baremetal_client = cls.manager.baremetal_client
cls.identity_client = cls.manager.identity_client
cls.network_client = cls.manager.network_client
cls.volume_client = cls.manager.volume_client
cls.object_storage_client = cls.manager.object_storage_client
cls.orchestration_client = cls.manager.orchestration_client
cls.data_processing_client = cls.manager.data_processing_client
cls.resource_keys = {}
cls.os_resources = []
@classmethod
def _get_credentials(cls, get_creds, prefix):
if CONF.compute.allow_tenant_isolation:
username, tenant_name, password = get_creds()
else:
username = getattr(CONF.identity, prefix + 'username')
password = getattr(CONF.identity, prefix + 'password')
tenant_name = getattr(CONF.identity, prefix + 'tenant_name')
return username, password, tenant_name
@classmethod
def credentials(cls):
return cls._get_credentials(cls.isolated_creds.get_primary_creds, '')
@classmethod
def alt_credentials(cls):
return cls._get_credentials(cls.isolated_creds.get_alt_creds, 'alt_')
@classmethod
def admin_credentials(cls):
return cls._get_credentials(cls.isolated_creds.get_admin_creds,
'admin_')
@staticmethod
def cleanup_resource(resource, test_name):
LOG.debug("Deleting %r from shared resources of %s" %
(resource, test_name))
try:
# OpenStack resources are assumed to have a delete()
# method which destroys the resource...
resource.delete()
except Exception as e:
# If the resource is already missing, mission accomplished.
# add status code as workaround for bug 1247568
if (e.__class__.__name__ == 'NotFound' or
(hasattr(e, 'status_code') and e.status_code == 404)):
return
raise
def is_deletion_complete():
# Deletion testing is only required for objects whose
# existence cannot be checked via retrieval.
if isinstance(resource, dict):
return True
try:
resource.get()
except Exception as e:
# Clients are expected to return an exception
# called 'NotFound' if retrieval fails.
if e.__class__.__name__ == 'NotFound':
return True
raise
return False
# Block until resource deletion has completed or timed-out
tempest.test.call_until_true(is_deletion_complete, 10, 1)
@classmethod
def tearDownClass(cls):
# NOTE(jaypipes): Because scenario tests are typically run in a
# specific order, and because test methods in scenario tests
# generally create resources in a particular order, we destroy
# resources in the reverse order in which resources are added to
# the scenario test class object
while cls.os_resources:
thing = cls.os_resources.pop()
cls.cleanup_resource(thing, cls.__name__)
cls.isolated_creds.clear_isolated_creds()
super(OfficialClientTest, cls).tearDownClass()
@classmethod
def set_resource(cls, key, thing):
LOG.debug("Adding %r to shared resources of %s" %
(thing, cls.__name__))
cls.resource_keys[key] = thing
cls.os_resources.append(thing)
@classmethod
def get_resource(cls, key):
return cls.resource_keys[key]
@classmethod
def remove_resource(cls, key):
thing = cls.resource_keys[key]
cls.os_resources.remove(thing)
del cls.resource_keys[key]
def status_timeout(self, things, thing_id, expected_status,
error_status='ERROR',
not_found_exception=nova_exceptions.NotFound):
"""
Given a thing and an expected status, do a loop, sleeping
for a configurable amount of time, checking for the
expected status to show. At any time, if the returned
status of the thing is ERROR, fail out.
"""
self._status_timeout(things, thing_id,
expected_status=expected_status,
error_status=error_status,
not_found_exception=not_found_exception)
def delete_timeout(self, things, thing_id,
error_status='ERROR',
not_found_exception=nova_exceptions.NotFound):
"""
Given a thing, do a loop, sleeping
for a configurable amount of time, checking for the
deleted status to show. At any time, if the returned
status of the thing is ERROR, fail out.
"""
self._status_timeout(things,
thing_id,
allow_notfound=True,
error_status=error_status,
not_found_exception=not_found_exception)
def _status_timeout(self,
things,
thing_id,
expected_status=None,
allow_notfound=False,
error_status='ERROR',
not_found_exception=nova_exceptions.NotFound):
log_status = expected_status if expected_status else ''
if allow_notfound:
log_status += ' or NotFound' if log_status != '' else 'NotFound'
def check_status():
# python-novaclient has resources available to its client
# that all implement a get() method taking an identifier
# for the singular resource to retrieve.
try:
thing = things.get(thing_id)
except not_found_exception:
if allow_notfound:
return True
else:
raise
new_status = thing.status
# Some components are reporting error status in lower case
# so case sensitive comparisons can really mess things
# up.
if new_status.lower() == error_status.lower():
message = ("%s failed to get to expected status (%s). "
"In %s state.") % (thing, expected_status,
new_status)
raise exceptions.BuildErrorException(message,
server_id=thing_id)
elif new_status == expected_status and expected_status is not None:
return True # All good.
LOG.debug("Waiting for %s to get to %s status. "
"Currently in %s status",
thing, log_status, new_status)
if not tempest.test.call_until_true(
check_status,
CONF.compute.build_timeout,
CONF.compute.build_interval):
message = ("Timed out waiting for thing %s "
"to become %s") % (thing_id, log_status)
raise exceptions.TimeoutException(message)
def _create_loginable_secgroup_rule_nova(self, client=None,
secgroup_id=None):
if client is None:
client = self.compute_client
if secgroup_id is None:
sgs = client.security_groups.list()
for sg in sgs:
if sg.name == 'default':
secgroup_id = sg.id
# These rules are intended to permit inbound ssh and icmp
# traffic from all sources, so no group_id is provided.
# Setting a group_id would only permit traffic from ports
# belonging to the same security group.
rulesets = [
{
# ssh
'ip_protocol': 'tcp',
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
},
{
# ping
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
}
]
rules = list()
for ruleset in rulesets:
sg_rule = client.security_group_rules.create(secgroup_id,
**ruleset)
self.set_resource(sg_rule.id, sg_rule)
rules.append(sg_rule)
return rules
def create_server(self, client=None, name=None, image=None, flavor=None,
wait=True, create_kwargs={}):
if client is None:
client = self.compute_client
if name is None:
name = data_utils.rand_name('scenario-server-')
if image is None:
image = CONF.compute.image_ref
if flavor is None:
flavor = CONF.compute.flavor_ref
fixed_network_name = CONF.compute.fixed_network_name
if 'nics' not in create_kwargs and fixed_network_name:
networks = client.networks.list()
# If several networks found, set the NetID on which to connect the
# server to avoid the following error "Multiple possible networks
# found, use a Network ID to be more specific."
# See Tempest #1250866
if len(networks) > 1:
for network in networks:
if network.label == fixed_network_name:
create_kwargs['nics'] = [{'net-id': network.id}]
break
# If we didn't find the network we were looking for :
else:
msg = ("The network on which the NIC of the server must "
"be connected can not be found : "
"fixed_network_name=%s. Starting instance without "
"specifying a network.") % fixed_network_name
LOG.info(msg)
LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
name, image, flavor)
server = client.servers.create(name, image, flavor, **create_kwargs)
self.assertEqual(server.name, name)
self.set_resource(name, server)
if wait:
self.status_timeout(client.servers, server.id, 'ACTIVE')
# The instance retrieved on creation is missing network
# details, necessitating retrieval after it becomes active to
# ensure correct details.
server = client.servers.get(server.id)
self.set_resource(name, server)
LOG.debug("Created server: %s", server)
return server
def create_volume(self, client=None, size=1, name=None,
snapshot_id=None, imageRef=None):
if client is None:
client = self.volume_client
if name is None:
name = data_utils.rand_name('scenario-volume-')
LOG.debug("Creating a volume (size: %s, name: %s)", size, name)
volume = client.volumes.create(size=size, display_name=name,
snapshot_id=snapshot_id,
imageRef=imageRef)
self.set_resource(name, volume)
self.assertEqual(name, volume.display_name)
self.status_timeout(client.volumes, volume.id, 'available')
LOG.debug("Created volume: %s", volume)
return volume
def create_server_snapshot(self, server, compute_client=None,
image_client=None, name=None):
if compute_client is None:
compute_client = self.compute_client
if image_client is None:
image_client = self.image_client
if name is None:
name = data_utils.rand_name('scenario-snapshot-')
LOG.debug("Creating a snapshot image for server: %s", server.name)
image_id = compute_client.servers.create_image(server, name)
self.addCleanup(image_client.images.delete, image_id)
self.status_timeout(image_client.images, image_id, 'active')
snapshot_image = image_client.images.get(image_id)
self.assertEqual(name, snapshot_image.name)
LOG.debug("Created snapshot image %s for server %s",
snapshot_image.name, server.name)
return snapshot_image
def create_keypair(self, client=None, name=None):
if client is None:
client = self.compute_client
if name is None:
name = data_utils.rand_name('scenario-keypair-')
keypair = client.keypairs.create(name)
self.assertEqual(keypair.name, name)
self.set_resource(name, keypair)
return keypair
| |
<reponame>seidelma/id3c<gh_stars>0
"""
Datastore abstraction for our database.
"""
import logging
import psycopg2
from functools import wraps
from psycopg2 import DataError, DatabaseError, IntegrityError, ProgrammingError, sql
from psycopg2.errors import InsufficientPrivilege
from typing import Any
from uuid import UUID
from werkzeug.exceptions import Forbidden, NotFound, Conflict
from .. import db
from ..db import find_identifier, upsert_sample
from ..db.session import DatabaseSession
from .exceptions import AuthenticationRequired, BadRequest
from .utils import export
LOG = logging.getLogger(__name__)
def catch_permission_denied(function):
"""
Decorator to catch :class:`psycopg2.ProgrammingError` exceptions with the
``INSUFFICIENT_PRIVILEGE`` error code and rethrow them as
:class:`~werkzeug.exceptions.Forbidden` exceptions instead.
"""
@wraps(function)
def decorated(*args, **kwargs):
try:
return function(*args, **kwargs)
except InsufficientPrivilege as error:
LOG.error("Forbidden: %s", error)
raise Forbidden()
return decorated
@export
def login(username: str, password: str) -> DatabaseSession:
"""
Creates a new database session authenticated as the given user.
Returns an opaque session object which other functions in this module
require.
"""
LOG.debug(f"Logging into PostgreSQL database as '{username}'")
try:
return DatabaseSession(username = username, password = password)
except DatabaseError as error:
raise AuthenticationRequired() from None
@export
@catch_permission_denied
def store_enrollment(session: DatabaseSession, document: str) -> None:
"""
Store the given enrollment JSON *document* (a **string**) in the backing
database using *session*.
Raises a :class:`BadRequestDatabaseError` exception if the given *document*
isn't valid and a :class:`Forbidden` exception if the database reports a
`permission denied` error.
"""
with session, session.cursor() as cursor:
try:
cursor.execute(
"INSERT INTO receiving.enrollment (document) VALUES (%s)",
(document,))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
@export
@catch_permission_denied
def store_presence_absence(session: DatabaseSession, document: str) -> None:
"""
Store the given presence/absence *document* (a **string**) in the backing
database using *session*.
Raises a :class:`BadRequestDatabaseError` exception if the given *document*
isn't valid and a :class:`Forbidden` exception if the database reports a
`permission denied` error.
"""
with session, session.cursor() as cursor:
try:
cursor.execute(
"insert into receiving.presence_absence (document) VALUES (%s)",
(document,))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
@export
@catch_permission_denied
def store_sequence_read_set(session: DatabaseSession, document: str) -> None:
"""
Store the given sequence read set *document* (a **string**) in the backing
database using *session*.
Raises a :class:`BadRequestDatabaseError` exception if the given *document*
isn't valid and a :class:`Forbidden` exception if the database reports a
`permission denied` error.
"""
with session, session.cursor() as cursor:
try:
cursor.execute(
"insert into receiving.sequence_read_set (document) values (%s)",
(document,))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
@export
@catch_permission_denied
def store_consensus_genome(session: DatabaseSession, document: str) -> None:
"""
Store the given consensus genome *document* (a **string**) in the backing
database using *session*.
Raises a :class:`BadRequestDatabaseError` exception if the given *document*
isn't valid and a :class:`Forbidden` exception if the database reports a
`permission denied` error.
"""
with session, session.cursor() as cursor:
try:
cursor.execute(
"insert into receiving.consensus_genome (document) values (%s)",
(document,))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
@export
@catch_permission_denied
def store_redcap_det(session: DatabaseSession, document: str) -> None:
"""
Store the given REDCap DET *document* (a **string**) in the backing
database using *session*.
Raises a :class:`BadRequestDatabaseError` exception if the given *document*
isn't valid and a :class:`Forbidden` exception if the database reports a
`permission denied` error.
"""
with session, session.cursor() as cursor:
try:
cursor.execute(
"insert into receiving.redcap_det (document) values (%s)",
(document,))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
@export
@catch_permission_denied
def store_fhir(session: DatabaseSession, document: str) -> None:
"""
Store the given FHIR *document* (a **string**) in the backing
database using *session*.
Raises a :class:`BadRequestDatabaseError` exception if the given *document*
isn't valid and a :class:`Forbidden` exception if the database reports a
`permission denied` error.
"""
with session, session.cursor() as cursor:
try:
cursor.execute(
"insert into receiving.fhir (document) values (%s)",
(document,))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
@export
@catch_permission_denied
def verify_barcode_use_list(session: DatabaseSession, barcode_use_list: list) -> Any:
"""
Check the given *barcode_use_list* containing objects with ``barcode`` and ``use``
keys and values to verify that each barcode exists in the backing database and that the
given use matches the stored use.
Returns a list of objects in the same order as the input, with each object including the
``barcode`` (string) and ``use`` (string) being verified, ``barcode_found`` (boolean)
indicating whether the given barcode exists, and ``use_match`` (boolean) indicating whether
the given use matches the stored use. The ``use_match`` value will be `null` if the barcode
does not exist.
"""
barcode_use_tuples = [(bu["barcode"],bu["use"]) for bu in barcode_use_list]
args_str = ','.join(['%s'] * len(barcode_use_tuples))
sql = "select q.barcode, q.use, \
case \
when identifier.barcode is not null then true else false \
end as barcode_found, \
case \
when identifier_set.use IS NULL then null \
when q.use::citext=identifier_set.use then true \
else false \
end as use_match \
from (values {}) as q (barcode, use) \
left join warehouse.identifier on q.barcode::citext = identifier.barcode \
left join warehouse.identifier_set using (identifier_set_id)".format(args_str)
result = session.fetch_all(sql, tuple(barcode_use_tuples))
return result
@export
@catch_permission_denied
def fetch_identifier(session: DatabaseSession, id: str) -> Any:
"""
Fetch the identifier *id* from the backing database using *session*.
*id* may be a full UUID or shortened barcode.
Returns a named tuple with ``uuid``, ``barcode``, ``generated``, ``set``,
and ``use`` attributes. If the identifier doesn't exist, raises a
:class:`~werkzeug.exceptions.NotFound` exception.
"""
try:
uuid = UUID(id)
id_field = "uuid"
except ValueError:
id_field = "barcode"
with session:
identifier = session.fetch_row(f"""
select uuid, barcode, generated, identifier_set.name as set, identifier_set.use
from warehouse.identifier
join warehouse.identifier_set using (identifier_set_id)
where {id_field} = %s
""", (id,))
if not identifier:
LOG.info(f"Identifier {id_field} «{id}» not found")
raise NotFound(f"Identifier {id_field} «{id}» not found")
return identifier
@export
@catch_permission_denied
def fetch_identifier_sets(session: DatabaseSession) -> Any:
"""
Fetch all identifier sets from the backing database using *session*.
Returns a list of named tuples with ``name``, ``description``, and ``use``
attributes.
"""
with session, session.cursor() as cursor:
cursor.execute("""
select name, description, use
from warehouse.identifier_set
""")
return list(cursor)
@export
@catch_permission_denied
def fetch_identifier_set(session: DatabaseSession, name: str) -> Any:
"""
Fetch the identifier set *name* from the backing database using *session*.
Returns a named tuple with ``name``, ``description``, and ``use`` attributes.
If the set doesn't exist, raises a :class:`~werkzeug.exceptions.NotFound`
exception.
"""
with session:
set = session.fetch_row("""
select name, description, use
from warehouse.identifier_set
where name = %s
""", (name,))
if not set:
LOG.info(f"Identifier set «{name}» not found")
raise NotFound(f"Identifier set «{name}» not found")
return set
@export
@catch_permission_denied
def make_identifier_set(session: DatabaseSession, name: str, **fields) -> bool:
"""
Create a new identifier set *name* in the backing database using *session*
if it doesn't already exist, or update if it does exist.
If *use* and/or *description* are provided as keyword arguments, their values
are set in the database. Becuase *use* is a required field in the target table,
if it is not provided as a keyword argument the query will attempt to retrieve
its value from an existing record.
Returns ``True`` if the set was created or updated and ``False`` if it
already existed and was not updated.
Raises a :class:`BadRequestDatabaseError` exception if the database reports a
`constraint` error and a :class:`Forbidden` exception if the database reports a
`permission denied` error.
"""
with session, session.cursor() as cursor:
if "use" in fields and "description" in fields:
try:
cursor.execute("""
insert into warehouse.identifier_set (name, use, description)
values (%s, %s, nullif(%s, ''))
on conflict (name) do update
set use = excluded.use,
description = excluded.description
where identifier_set.use <> excluded.use
or coalesce(identifier_set.description,'') <> coalesce(excluded.description,'')
""", (name, fields["use"], fields["description"]))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
elif "use" in fields:
try:
cursor.execute("""
insert into warehouse.identifier_set (name, use)
values (%s, %s)
on conflict (name) do update
set use = excluded.use
where identifier_set.use <> excluded.use
""", (name, fields["use"]))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
elif "description" in fields:
try:
cursor.execute("""
insert into warehouse.identifier_set (name, use, description)
select s.name, t.use, s.description
from (values(%s, nullif(%s,''))) s(name, description)
left join (
select name, use
FROM warehouse.identifier_set WHERE name = %s
) t using (name)
on conflict (name) do update
set use = excluded.use, description = excluded.description
where identifier_set.use <> excluded.use
or coalesce(identifier_set.description,'') <> coalesce(excluded.description,'')
""", (name, fields["description"], name))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
else:
try:
cursor.execute("""
insert into warehouse.identifier_set (name, use)
select s.name, t.use
from (values(%s)) s(name)
left join (
select name, use
FROM warehouse.identifier_set WHERE name = %s
) t using (name)
on conflict (name) do update
set use = excluded.use
where identifier_set.use <> excluded.use
""", (name, name))
except (DataError, IntegrityError) as error:
raise | |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.329955,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 4.07746,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0277673,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.224498,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.14255,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0699257,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.112788,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0569314,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.239645,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0581209,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.19185,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0269307,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.002933,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0319061,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0216913,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0588368,
'Execution Unit/Register Files/Runtime Dynamic': 0.0246243,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0741606,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.196406,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.09055,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000210663,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000210663,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000183811,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 7.1333e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000311598,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000916735,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00200827,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0208524,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.32639,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0576799,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0708242,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.60928,
'Instruction Fetch Unit/Runtime Dynamic': 0.152282,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0451777,
'L2/Runtime Dynamic': 0.0132532,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.06334,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.419427,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0267302,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0267302,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.18957,
'Load Store Unit/Runtime Dynamic': 0.577981,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0659122,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.131824,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0233924,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0240613,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0824701,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00948431,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.278763,
'Memory Management Unit/Runtime Dynamic': 0.0335456,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.9041,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0708425,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.004017,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0345831,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': | |
elif wordNext == "this" and wordNextNext == "evening":
remainder = "pm"
used = 2
daySpecified = True
elif wordNext == "at" and wordNextNext == "night":
if strHH and int(strHH) > 5:
remainder = "pm"
else:
remainder = "am"
used += 2
else:
if timeQualifier != "":
military = True
if strHH and int(strHH) <= 12 and \
(timeQualifier in timeQualifiersPM):
strHH += str(int(strHH)+12)
else:
# try to parse numbers without colons
# 5 hours, 10 minutes etc.
length = len(word)
strNum = ""
remainder = ""
for i in range(length):
if word[i].isdigit():
strNum += word[i]
else:
remainder += word[i]
if remainder == "":
remainder = wordNext.replace(".", "").lstrip().rstrip()
if (
remainder == "pm" or
wordNext == "pm" or
remainder == "p.m." or
wordNext == "p.m."):
strHH = strNum
remainder = "pm"
used = 1
elif (
remainder == "am" or
wordNext == "am" or
remainder == "a.m." or
wordNext == "a.m."):
strHH = strNum
remainder = "am"
used = 1
else:
if (
int(strNum) > 100 and
(
wordPrev == "o" or
wordPrev == "oh"
)):
# 0800 hours (pronounced oh-eight-hundred)
strHH = str(int(strNum) // 100)
strMM = str(int(strNum) % 100)
military = True
if wordNext == "hours":
used += 1
elif (
(wordNext == "hours" or wordNext == "hour" or
remainder == "hours" or remainder == "hour") and
word[0] != '0' and
(
int(strNum) < 100 or
int(strNum) > 2400
)):
# ignores military time
# "in 3 hours"
hrOffset = int(strNum)
used = 2
isTime = False
hrAbs = -1
minAbs = -1
elif wordNext == "minutes" or wordNext == "minute" or \
remainder == "minutes" or remainder == "minute":
# "in 10 minutes"
minOffset = int(strNum)
used = 2
isTime = False
hrAbs = -1
minAbs = -1
elif wordNext == "seconds" or wordNext == "second" \
or remainder == "seconds" or remainder == "second":
# in 5 seconds
secOffset = int(strNum)
used = 2
isTime = False
hrAbs = -1
minAbs = -1
elif int(strNum) > 100:
# military time, eg. "3300 hours"
strHH = str(int(strNum) // 100)
strMM = str(int(strNum) % 100)
military = True
if wordNext == "hours" or wordNext == "hour" or \
remainder == "hours" or remainder == "hour":
used += 1
elif wordNext and wordNext[0].isdigit():
# military time, e.g. "04 38 hours"
strHH = strNum
strMM = wordNext
military = True
used += 1
if (wordNextNext == "hours" or
wordNextNext == "hour" or
remainder == "hours" or remainder == "hour"):
used += 1
elif (
wordNext == "" or wordNext == "o'clock" or
(
wordNext == "in" and
(
wordNextNext == "the" or
wordNextNext == timeQualifier
)
)):
strHH = strNum
strMM = "00"
if wordNext == "o'clock":
used += 1
if wordNext == "in" or wordNextNext == "in":
used += (1 if wordNext == "in" else 2)
wordNextNextNext = words[idx + 3] \
if idx + 3 < len(words) else ""
if (wordNextNext and
(wordNextNext in timeQualifier or
wordNextNextNext in timeQualifier)):
if (wordNextNext in timeQualifiersPM or
wordNextNextNext in timeQualifiersPM):
remainder = "pm"
used += 1
if (wordNextNext in timeQualifiersAM or
wordNextNextNext in timeQualifiersAM):
remainder = "am"
used += 1
if timeQualifier != "":
used += 1 # TODO: Unsure if this is 100% accurate
military = True
else:
isTime = False
HH = int(strHH) if strHH else 0
MM = int(strMM) if strMM else 0
HH = HH + 12 if remainder == "pm" and HH < 12 else HH
HH = HH - 12 if remainder == "am" and HH >= 12 else HH
if (not military and
remainder not in ['am', 'pm', 'hours', 'minutes'] and
((not daySpecified) or dayOffset < 1)):
# ambiguous time, detect whether they mean this evening or
# the next morning based on whether it has already passed
if dateNow.hour < HH:
pass # No modification needed
elif dateNow.hour < HH + 12:
HH += 12
else:
# has passed, assume the next morning
dayOffset += 1
if timeQualifier in timeQualifiersPM and HH < 12:
HH += 12
if HH > 24 or MM > 59:
isTime = False
used = 0
if isTime:
hrAbs = HH
minAbs = MM
used += 1
if used > 0:
# removed parsed words from the sentence
for i in range(used):
if idx + i >= len(words):
break
words[idx + i] = ""
if wordPrev == "o" or wordPrev == "oh":
words[words.index(wordPrev)] = ""
if wordPrev == "early":
hrOffset = -1
words[idx - 1] = ""
idx -= 1
elif wordPrev == "late":
hrOffset = 1
words[idx - 1] = ""
idx -= 1
if idx > 0 and wordPrev in markers:
words[idx - 1] = ""
if wordPrev == "this":
daySpecified = True
if idx > 1 and wordPrevPrev in markers:
words[idx - 2] = ""
if wordPrevPrev == "this":
daySpecified = True
idx += used - 1
found = True
# check that we found a date
if not date_found:
return None
if dayOffset is False:
dayOffset = 0
# perform date manipulation
extractedDate = dateNow.replace(microsecond=0)
if datestr != "":
# date included an explicit date, e.g. "june 5" or "june 2, 2017"
try:
temp = datetime.strptime(datestr, "%B %d")
except ValueError:
# Try again, allowing the year
temp = datetime.strptime(datestr, "%B %d %Y")
extractedDate = extractedDate.replace(hour=0, minute=0, second=0)
if not hasYear:
temp = temp.replace(year=extractedDate.year,
tzinfo=extractedDate.tzinfo)
if extractedDate < temp:
extractedDate = extractedDate.replace(
year=int(currentYear),
month=int(temp.strftime("%m")),
day=int(temp.strftime("%d")),
tzinfo=extractedDate.tzinfo)
else:
extractedDate = extractedDate.replace(
year=int(currentYear) + 1,
month=int(temp.strftime("%m")),
day=int(temp.strftime("%d")),
tzinfo=extractedDate.tzinfo)
else:
extractedDate = extractedDate.replace(
year=int(temp.strftime("%Y")),
month=int(temp.strftime("%m")),
day=int(temp.strftime("%d")),
tzinfo=extractedDate.tzinfo)
else:
# ignore the current HH:MM:SS if relative using days or greater
if hrOffset == 0 and minOffset == 0 and secOffset == 0:
extractedDate = extractedDate.replace(hour=0, minute=0, second=0)
if yearOffset != 0:
extractedDate = extractedDate + relativedelta(years=yearOffset)
if monthOffset != 0:
extractedDate = extractedDate + relativedelta(months=monthOffset)
if dayOffset != 0:
extractedDate = extractedDate + relativedelta(days=dayOffset)
if hrAbs != -1 and minAbs != -1:
# If no time was supplied in the string set the time to default
# time if it's available
if hrAbs is None and minAbs is None and default_time is not None:
hrAbs, minAbs = default_time.hour, default_time.minute
else:
hrAbs = hrAbs or 0
minAbs = minAbs or 0
extractedDate = extractedDate + relativedelta(hours=hrAbs,
minutes=minAbs)
if (hrAbs != 0 or minAbs != 0) and datestr == "":
if not daySpecified and dateNow > extractedDate:
extractedDate = extractedDate + relativedelta(days=1)
if hrOffset != 0:
extractedDate = extractedDate + relativedelta(hours=hrOffset)
if minOffset != 0:
extractedDate = extractedDate + relativedelta(minutes=minOffset)
if secOffset != 0:
extractedDate = extractedDate + relativedelta(seconds=secOffset)
for idx, word in enumerate(words):
if words[idx] == "and" and \
words[idx - 1] == "" and words[idx + 1] == "":
words[idx] = ""
resultStr = " ".join(words)
resultStr = ' '.join(resultStr.split())
return [extractedDate, resultStr]
def isFractional_cnh(input_str, short_scale=True):
"""
This function takes the given text and checks if it is a fraction.
Args:
input_str (str): the string to check if fractional
short_scale (bool): use short scale if True, long scale if False
Returns:
(bool) or (float): False if not a fraction, otherwise the fraction
"""
if input_str.endswith('s', -1):
input_str = input_str[:len(input_str) - 1] # e.g. "fifths"
fracts = {"whole": 1, "half": 2, "halve": 2, "quarter": 4}
if short_scale:
for num in SHORT_ORDINAL_STRING_EN:
if num > 2:
fracts[SHORT_ORDINAL_STRING_EN[num]] = num
else:
for num in LONG_ORDINAL_STRING_EN:
if num > 2:
fracts[LONG_ORDINAL_STRING_EN[num]] = num
if input_str.lower() in fracts:
return 1.0 / fracts[input_str.lower()]
return False
def normalize_cnh(text, remove_articles):
""" English string normalization """
words = text.split() # this also removed extra spaces
normalized = ""
for word in words:
if remove_articles and word in ["the", "a", "an"]:
continue
# Expand common contractions, e.g. "isn't" -> "is not"
contraction = ["ain't", "aren't", "can't", "could've", "couldn't",
"didn't", "doesn't", "don't", "gonna", "gotta",
"hadn't", "hasn't", "haven't", "he'd", "he'll", "he's",
"how'd", "how'll", "how's", | |
<filename>src/evaluation/experiment.py
'''
Created on Nov 1, 2016
@author: <NAME>
'''
import logging
import pickle
import pandas as pd
import datetime
import configparser
import os, subprocess
import sklearn.metrics as skm
import numpy as np
import glob
#import lample_lstm_tagger.lstm_wrapper as lstm_wrapper
import evaluation.metrics as metrics
from baselines.dawid_and_skene import ds, ibccvb
from evaluation.plots import SCORE_NAMES, plot_results
from baselines.hmm import HMM_crowd
from baselines.util import crowd_data, crowdlab, instance
from baselines import ibcc, clustering, majority_voting
from bsc import bsc
from data import data_utils
from evaluation.span_level_f1 import precision, recall, f1, strict_span_metrics_2
logging.basicConfig(level=logging.DEBUG)
def calculate_sample_metrics(nclasses, agg, gt, probs, doc_starts, print_per_class_results=False):
result = -np.ones(len(SCORE_NAMES) - 3)
gt = gt.astype(int)
probs[np.isnan(probs)] = 0
probs[np.isinf(probs)] = 0
# token-level metrics
result[0] = skm.accuracy_score(gt, agg)
# the results are undefined if some classes are not present in the gold labels
prec_by_class = skm.precision_score(gt, agg, average=None, labels=range(nclasses))
rec_by_class = skm.recall_score(gt, agg, average=None, labels=range(nclasses))
f1_by_class = skm.f1_score(gt, agg, average=None, labels=range(nclasses))
if print_per_class_results:
print('Token Precision:')
print(prec_by_class)
print('Token Recall:')
print(rec_by_class)
print('Token F1:')
print(f1_by_class)
result[1] = np.mean(prec_by_class[np.unique(gt)])
result[2] = np.mean(rec_by_class[np.unique(gt)])
result[3] = np.mean(f1_by_class[np.unique(gt)])
# span-level metrics - strict
p, r, f = strict_span_metrics_2(agg, gt, doc_starts)
result[6] = p # precision(agg, gt, True, doc_starts)
result[7] = r # recall(agg, gt, True, doc_starts)
result[8] = f # f1(agg, gt, True, doc_starts)
# span-level metrics -- relaxed
result[9] = precision(agg, gt, False, doc_starts)
result[10] = recall(agg, gt, False, doc_starts)
result[11] = f1(agg, gt, False, doc_starts)
auc_score = 0
total_weights = 0
for i in range(probs.shape[1]):
if not np.any(gt == i) or np.all(gt == i) or np.any(np.isnan(probs[:, i])) or np.any(np.isinf(probs[:, i])):
print('Could not evaluate AUC for class %i -- all data points have same value.' % i)
continue
auc_i = skm.roc_auc_score(gt == i, probs[:, i])
# print 'AUC for class %i: %f' % (i, auc_i)
auc_score += auc_i * np.sum(gt == i)
total_weights += np.sum(gt == i)
if print_per_class_results:
print('AUC for class %i = %f' % (i, auc_i))
result[4] = auc_score / float(total_weights) if total_weights > 0 else 0
result[5] = skm.log_loss(gt, probs, eps=1e-100, labels=np.arange(nclasses))
return result
def calculate_scores(nclasses, postprocess, agg, gt, probs, doc_start, bootstrapping=True, print_per_class_results=False):
result = -np.ones((len(SCORE_NAMES), 1))
# exclude data points with missing ground truth
agg = agg[gt != -1]
probs = probs[gt != -1]
doc_start = doc_start[gt != -1]
gt = gt[gt != -1]
print('unique ground truth values: ')
print(np.unique(gt))
print('unique prediction values: ')
print(np.unique(agg))
if postprocess:
agg = data_utils.postprocess(agg, doc_start)
print('Plotting confusion matrix for errors: ')
nclasses = probs.shape[1]
conf = np.zeros((nclasses, nclasses))
for i in range(nclasses):
for j in range(nclasses):
conf[i, j] = np.sum((gt == i).flatten() & (agg == j).flatten())
# print 'Acc for class %i: %f' % (i, skm.accuracy_score(gt==i, agg==i))
print(np.round(conf))
gold_doc_start = np.copy(doc_start)
gold_doc_start[gt == -1] = 0
Ndocs = int(np.sum(gold_doc_start))
if Ndocs < 100 and bootstrapping:
print('Using bootstrapping with small test set size')
# use bootstrap resampling with small datasets
nbootstraps = 100
nmetrics = len(SCORE_NAMES) - 3
resample_results = np.zeros((nmetrics, nbootstraps))
for i in range(nbootstraps):
print('Bootstrapping the evaluation: %i of %i' % (i, nbootstraps))
not_sampled = True
nsample_attempts = 0
while not_sampled:
sampleidxs = np.random.choice(Ndocs, Ndocs, replace=True)
sampleidxs = np.in1d(np.cumsum(gold_doc_start) - 1, sampleidxs)
if len(np.unique(gt[sampleidxs])) >= 2:
not_sampled = False
# if we have at least two class labels in the sample ground truth, we can use this sample
nsample_attempts += 1
if nsample_attempts >= Ndocs:
not_sampled = False
if nsample_attempts <= Ndocs:
resample_results[:, i] = calculate_sample_metrics(nclasses,
agg[sampleidxs],
gt[sampleidxs],
probs[sampleidxs],
doc_start[sampleidxs],
print_per_class_results)
else: # can't find enough valid samples for the bootstrap, let's just use what we got so far.
resample_results = resample_results[:, :i]
break
sample_res = np.mean(resample_results, axis=1)
result[:len(sample_res), 0] = sample_res
std_result = np.std(resample_results, axis=1)
else:
sample_res = calculate_sample_metrics(nclasses, agg, gt, probs, doc_start, print_per_class_results)
result[:len(sample_res), 0] = sample_res
std_result = None
result[len(sample_res)] = metrics.count_error(agg, gt)
result[len(sample_res) + 1] = metrics.num_invalid_labels(agg, doc_start)
result[len(sample_res) + 2] = metrics.mean_length_error(agg, gt, doc_start)
print('CEE tokens = %f' % result[5])
print('F1 score tokens = %f' % result[3])
print('F1 score spans strict = %f' % result[8])
print('F1 score spans relaxed = %f' % result[11])
return result, std_result
class Experiment(object):
'''
classdocs
'''
generator = None
config_file = None
param_values = None
param_idx = None
param_fixed = None
acc_bias = None
miss_bias = None
short_bias = None
generate_data= False
methods = None
alpha0 = None
nu0 = None
num_runs = None
doc_length = None
group_sizes = None
num_docs = None
num_classes = None
save_results = False
save_plots = False
show_plots = False
postprocess = False # previous papers did not use this so we leave out to make results comparable.
# Only simple IOB2 is implemented so far.
opt_hyper = False
crf_probs = False
random_sampling = False
def __init__(self, generator, nclasses=None, nannotators=None, config=None, alpha0_factor=1.0, alpha0_diags=1.0,
beta0_factor=0.1, max_iter=20, crf_probs=False, rep=0, max_internal_iter=20):
self.output_dir = '~/data/bayesian_sequence_combination/output/'
self.crf_probs = crf_probs
self.generator = generator
if config is not None:
self.config_file = config
self.read_config_file()
else:
self.num_classes = nclasses
self.nannotators = nannotators
self.output_dir = os.path.expanduser(self.output_dir)
self.alpha0_factor = alpha0_factor
self.alpha0_diags = alpha0_diags
self.nu0_factor = beta0_factor
self.max_internal_iter = max_internal_iter
# save results from methods here. If we use compound methods, we can reuse these results in different
# combinations of methods.
self.aggs = {}
self.probs = {}
self.max_iter = max_iter # allow all methods to use a maximum no. iterations
np.random.seed(3849)
self.seed = np.random.randint(1, 1000, 100)[rep] # seeds for AL
def read_config_file(self):
print('Reading experiment config file...')
parser = configparser.ConfigParser()
parser.read(self.config_file)
# set up parameters
parameters = dict(parser.items('parameters'))
self.param_idx = int(parameters['idx'].split('#')[0].strip())
self.param_values = np.array(eval(parameters['values'].split('#')[0].strip()))
self.acc_bias = np.array(eval(parameters['acc_bias'].split('#')[0].strip()))
self.miss_bias = np.array(eval(parameters['miss_bias'].split('#')[0].strip()))
self.short_bias = np.array(eval(parameters['short_bias'].split('#')[0].strip()))
self.methods = eval(parameters['methods'].split('#')[0].strip())
self.postprocess = eval(parameters['postprocess'].split('#')[0].strip())
print(self.methods)
self.num_runs = int(parameters['num_runs'].split('#')[0].strip())
self.doc_length = int(parameters['doc_length'].split('#')[0].strip())
self.group_sizes = np.array(eval(parameters['group_sizes'].split('#')[0].strip()))
self.num_classes = int(parameters['num_classes'].split('#')[0].strip())
self.num_docs = int(parameters['num_docs'].split('#')[0].strip())
self.generate_data = eval(parameters['generate_data'].split('#')[0].strip())
# set up output
parameters = dict(parser.items('output'))
self.output_dir = parameters['output_dir']
self.save_results = eval(parameters['save_results'].split('#')[0].strip())
self.save_plots = eval(parameters['save_plots'].split('#')[0].strip())
self.show_plots = eval(parameters['show_plots'].split('#')[0].strip())
def tune_alpha0(self, alpha0diag_proposals, alpha0factor_proposals, nu0factor_proposals, method,
annotations, ground_truth, doc_start,
outputdir, text, tune_lstm=False, metric_idx_to_optimise=8,
ground_truth_val=None, doc_start_val=None, text_val=None, new_data=False):
if outputdir is not None:
if not os.path.exists(outputdir):
os.mkdir(outputdir)
self.methods = [method]
scores = np.zeros((len(nu0factor_proposals) * len(alpha0diag_proposals), len(alpha0factor_proposals)))
best_scores = np.zeros(4)
best_idxs = np.zeros(4)
for h, nu0factor in enumerate(nu0factor_proposals):
self.nu0_factor = nu0factor
for i, alpha0diag in enumerate(alpha0diag_proposals):
if tune_lstm:
self.alpha0_diags_lstm = alpha0diag
else:
self.alpha0_diags = alpha0diag
for j, alpha0factor in enumerate(alpha0factor_proposals):
# reset saved data so that models are run again.
self.aggs = {}
self.probs = {}
outputdir_ij = outputdir + ('_%i_%i_%i_' % (h, i, j)) + method + '/'
if tune_lstm:
self.alpha0_factor_lstm = alpha0factor
else:
self.alpha0_factor = alpha0factor
all_scores, _, _, _, _, _ = self.run_methods(annotations, ground_truth, doc_start,
outputdir_ij,
text,
ground_truth_val=ground_truth_val,
doc_start_val=doc_start_val,
text_val=text_val,
bootstrapping=False,
new_data=new_data)
scores[(h*len(alpha0diag_proposals)) + i, j] = all_scores[metric_idx_to_optimise, :] # 3 is F1score
print('Scores for %f, %f, %f: %f' % (nu0factor, alpha0diag, alpha0factor,
scores[(h*len(alpha0diag_proposals)) + i, j]))
if scores[(h*len(alpha0diag_proposals)) + i, j] > best_scores[0]:
best_scores[0] = scores[(h*len(alpha0diag_proposals)) + i, j]
best_scores[1] = nu0factor
best_scores[2] = alpha0diag
best_scores[3] = alpha0factor
best_idxs[0] = scores[(h*len(alpha0diag_proposals)) + i, j]
best_idxs[1] = h
best_idxs[2] = i
best_idxs[3] = j
print('Saving scores for this setting to %s' % (outputdir + '/%s_scores.csv' % method))
np.savetxt(outputdir + '/%s_scores.csv' % method, scores, fmt='%s', delimiter=',',
header=str(self.methods).strip('[]'))
np.savetxt(outputdir + '/%s_bestscores.csv' % method, best_scores, fmt='%s', delimiter=',',
header=str(self.methods).strip('[]'))
return best_idxs
def tune_acc_bias(self, acc_bias_proposals, method,
annotations, ground_truth, doc_start,
outputdir, text, tune_lstm=False, metric_idx_to_optimise=8,
ground_truth_val=None, doc_start_val=None, text_val=None):
self.methods = [method]
scores = np.zeros(len(acc_bias_proposals))
best_score = -np.inf
best_idx = 0
for i, acc_bias in enumerate(acc_bias_proposals):
self.alpha0_acc_bias = acc_bias
# reset saved data so that models are run again.
self.aggs = {}
self.probs = {}
outputdir_i = outputdir + ('_acc_bias_%i' % (i)) + method + '/'
all_scores, _, _, _, _, _ = self.run_methods(annotations, ground_truth, doc_start,
outputdir_i,
text,
ground_truth_val=ground_truth_val,
doc_start_val=doc_start_val,
text_val=text_val,
bootstrapping=False)
scores[i] = all_scores[metric_idx_to_optimise, :] # 3 is F1score
print('Scores for %f: %f' % (acc_bias, scores[i]))
if scores[i] > best_score:
best_score = scores[i]
best_idx = scores[i]
print('Saving scores for this setting to %s' % (outputdir + '/%s_scores.csv' % method))
np.savetxt(outputdir + '/%s_acc_bias_scores.csv' % method, scores, fmt='%s', delimiter=',',
header=str(self.methods).strip('[]'))
return best_score, best_idx
def _run_best_worker(self, annos, gt, doc_start):
# choose the best classifier by f1-score
f1scores = np.zeros_like(annos) - 1.0
print('F1 scores for individual workers:')
individual_scores = []
for w in range(annos.shape[1]):
valididxs = annos[:, | |
# %% md
# Data Loading
# %%
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3' # specify GPUs locally
import pandas as pd
from matplotlib import pyplot as plt
# import seaborn as sns
# %%
#os.listdir('../input/cassava-leaf-disease-classification')
# %%
train = pd.read_csv('/root/disk/csy/cassava/data/data/csv/2020_csv_5folds/train.csv')
train_merged = pd.read_csv('/root/disk/csy/cassava/data/data/merged19_20.csv')
test = pd.read_csv('/root/disk/csy/cassava/data/data/sample_submission.csv')
label_map = pd.read_json('/root/disk/csy/cassava/data/data/label_num_to_disease_map.json',
orient='index')
# %%
# Directory settings
# %%
# ====================================================
# Directory settings
# ====================================================
import os
OUTPUT_DIR = './results'
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
# TRAIN_PATH = '../input/cassava-leaf-disease-classification/train_images'
TRAIN_PATH = '/root/disk/csy/cassava/data/data/train_images'
TEST_PATH = '/root/disk/csy/cassava/data/data/test_images'
# %% md
# CFG
# %%
# ====================================================
# CFG
# ====================================================
class CFG:
debug = False
apex = False
print_freq = 100
num_workers = 4
model_name='resnext50_32x4d'
#model_name = 'tf_efficientnet_b4_ns'
size = 500 # 256
scheduler = 'CosineAnnealingWarmRestarts' # ['ReduceLROnPlateau', 'CosineAnnealingLR', 'CosineAnnealingWarmRestarts']
criterion = 'TaylorCrossEntropyLoss' # ['CrossEntropyLoss', LabelSmoothing', 'FocalLoss' 'FocalCosineLoss', 'SymmetricCrossEntropyLoss', 'BiTemperedLoss', 'TaylorCrossEntropyLoss']
epochs = 12
# factor=0.2 # ReduceLROnPlateau
# patience=4 # ReduceLROnPlateau
# eps=1e-6 # ReduceLROnPlateau
# T_max=10 # CosineAnnealingLR
T_0 = 10 # CosineAnnealingWarmRestarts
lr = 1e-4
min_lr = 1e-6
batch_size = 32
weight_decay = 1e-6
gradient_accumulation_steps = 1
max_grad_norm = 1000
seed = 42
target_size = 5
target_col = 'label'
n_fold = 5
trn_fold = [0, 1, 2, 3, 4]
train = True
smoothing = 0.05
t1 = 0.3 # bi-tempered-loss https://www.kaggle.com/c/cassava-leaf-disease-classification/discussion/202017
t2 = 1.0 # bi-tempered-loss https://www.kaggle.com/c/cassava-leaf-disease-classification/discussion/202017
if CFG.debug:
CFG.epochs = 1
train = train.sample(n=1000, random_state=CFG.seed).reset_index(drop=True)
# %% md
# Library
# %%
# ====================================================
# Library
# ====================================================
import sys
sys.path.append('../pytorch-image-models-master')
import os
import math
import time
import random
import shutil
from pathlib import Path
from contextlib import contextmanager
from collections import defaultdict, Counter
import scipy as sp
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedKFold
from tqdm.auto import tqdm
from functools import partial
import cv2
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam, SGD
import torchvision.models as models
from torch.nn.parameter import Parameter
from torch.utils.data import DataLoader, Dataset, WeightedRandomSampler
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, CosineAnnealingLR, ReduceLROnPlateau
import albumentations as A
from albumentations.pytorch import ToTensorV2
from albumentations import ImageOnlyTransform
import timm
import warnings
warnings.filterwarnings('ignore')
if CFG.apex:
# from apex import amp
from torch.cuda.amp import autocast, GradScaler
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# %% md
# Utils
# %%
# ====================================================
# Utils
# ====================================================
def get_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
@contextmanager
def timer(name):
t0 = time.time()
LOGGER.info(f'[{name}] start')
yield
LOGGER.info(f'[{name}] done in {time.time() - t0:.0f} s.')
def init_logger(log_file=OUTPUT_DIR + 'train.log'):
from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler
logger = getLogger(__name__)
logger.setLevel(INFO)
handler1 = StreamHandler()
handler1.setFormatter(Formatter("%(message)s"))
handler2 = FileHandler(filename=log_file)
handler2.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
LOGGER = init_logger()
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_torch(seed=CFG.seed)
# %% md
# CV split
# %%
#folds = train.copy()
folds = train_merged.copy()
Fold = StratifiedKFold(n_splits=CFG.n_fold, shuffle=True, random_state=CFG.seed)
for n, (train_index, val_index) in enumerate(Fold.split(folds, folds[CFG.target_col])):
folds.loc[val_index, 'fold'] = int(n)
folds['fold'] = folds['fold'].astype(int)
print(folds.groupby(['fold', CFG.target_col]).size())
# %% md
# Dataset
# %%
# ====================================================
# Dataset
# ====================================================
class TrainDataset(Dataset):
def __init__(self, df, transform=None):
self.df = df
self.file_names = df['image_id'].values
self.labels = df['label'].values
self.transform = transform
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
file_name = self.file_names[idx]
file_path = f'{TRAIN_PATH}/{file_name}'
image = cv2.imread(file_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.transform:
augmented = self.transform(image=image)
image = augmented['image']
label = torch.tensor(self.labels[idx]).long()
return image, label
class TestDataset(Dataset):
def __init__(self, df, transform=None):
self.df = df
self.file_names = df['image_id'].values
self.transform = transform
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
file_name = self.file_names[idx]
file_path = f'{TEST_PATH}/{file_name}'
image = cv2.imread(file_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.transform:
augmented = self.transform(image=image)
image = augmented['image']
return image
# %%
train_dataset = TrainDataset(train, transform=None)
# for i in range(1):
# image, label = train_dataset[i]
# plt.imshow(image)
# plt.title(f'label: {label}')
# plt.show()
#
# %% md
# Transforms
# %%
# ====================================================
# Transforms
# ====================================================
def get_transforms(*, data):
if data == 'train':
return A.Compose([
#A.Resize(CFG.size, CFG.size),
A.RandomResizedCrop(CFG.size, CFG.size),
A.Transpose(p=0.5),
A.HorizontalFlip(p=0.5),
A.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),
A.RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5),
A.VerticalFlip(p=0.5),
A.ShiftScaleRotate(p=0.5),
A.CoarseDropout(p=0.5),
A.Cutout(p=0.5),
A.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensorV2(),
])
elif data == 'valid':
return A.Compose([
A.CenterCrop(CFG.size, CFG.size),
A.Resize(CFG.size, CFG.size),
#A.CenterCrop(CFG.size, CFG.size),
A.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensorV2(),
])
# %%
train_dataset = TrainDataset(train, transform=get_transforms(data='train'))
# for i in range(1):
# image, label = train_dataset[i]
# plt.imshow(image[0])
# plt.title(f'label: {label}')
# plt.show()
#
# %%
# ====================================================
# MODEL
# ====================================================
class CustomEfficientNet(nn.Module):
def __init__(self, model_name=CFG.model_name, pretrained=False):
super().__init__()
self.model = timm.create_model(CFG.model_name, pretrained=pretrained)
n_features = self.model.classifier.in_features
self.model.classifier = nn.Linear(n_features, CFG.target_size)
def forward(self, x):
x = self.model(x)
return x
class CustomResNext(nn.Module):
def __init__(self, model_name='resnext50_32x4d', pretrained=False):
super().__init__()
self.model = timm.create_model(model_name, pretrained=pretrained)
n_features = self.model.fc.in_features
self.model.fc = nn.Linear(n_features, CFG.target_size)
def forward(self, x):
x = self.model(x)
return x
# %%
# model = CustomResNext(model_name=CFG.model_name, pretrained=True)
# # model = CustomEfficientNet(model_name=CFG.model_name, pretrained=False)
# train_dataset = TrainDataset(train, transform=get_transforms(data='train'))
# train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True,
# num_workers=4, pin_memory=True, drop_last=True)
#
# for image, label in train_loader:
# output = model(image)
# print(output)
# break
# %% md
# Criterion
# %% md
## Label Smoothing
# %%
# ====================================================
# Label Smoothing
# ====================================================
class LabelSmoothingLoss(nn.Module):
def __init__(self, classes=5, smoothing=0.0, dim=-1):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.cls = classes
self.dim = dim
def forward(self, pred, target):
pred = pred.log_softmax(dim=self.dim)
with torch.no_grad():
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
# %% md
## Focal Loss
# %%
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=2, reduce=True):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduce = reduce
def forward(self, inputs, targets):
BCE_loss = nn.CrossEntropyLoss()(inputs, targets)
pt = torch.exp(-BCE_loss)
F_loss = self.alpha * (1 - pt) ** self.gamma * BCE_loss
if self.reduce:
return torch.mean(F_loss)
else:
return F_loss
# %% md
## FocalCosineLoss
# %%
class FocalCosineLoss(nn.Module):
def __init__(self, alpha=1, gamma=2, xent=.1):
super(FocalCosineLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.xent = xent
self.y = torch.Tensor([1]).cuda()
def forward(self, input, target, reduction="mean"):
cosine_loss = F.cosine_embedding_loss(input, F.one_hot(target, num_classes=input.size(-1)), self.y,
reduction=reduction)
cent_loss = F.cross_entropy(F.normalize(input), target, reduce=False)
pt = torch.exp(-cent_loss)
focal_loss = self.alpha * (1 - pt) ** self.gamma * cent_loss
if reduction == "mean":
focal_loss = torch.mean(focal_loss)
return cosine_loss + self.xent * focal_loss
# %% md
## SymmetricCrossEntropy
# %%
class SymmetricCrossEntropy(nn.Module):
def __init__(self, alpha=0.1, beta=1.0, num_classes=5):
super(SymmetricCrossEntropy, self).__init__()
self.alpha = alpha
self.beta = beta
self.num_classes = num_classes
def forward(self, logits, targets, reduction='mean'):
onehot_targets = torch.eye(self.num_classes)[targets].cuda()
ce_loss = F.cross_entropy(logits, targets, reduction=reduction)
rce_loss = (-onehot_targets * logits.softmax(1).clamp(1e-7, 1.0).log()).sum(1)
if reduction == 'mean':
rce_loss = rce_loss.mean()
elif reduction == 'sum':
rce_loss = rce_loss.sum()
return self.alpha * ce_loss + self.beta * rce_loss
# %% md
# Bi-Tempered-Loss
# %%
def log_t(u, t):
"""Compute log_t for `u'."""
if t == 1.0:
return u.log()
else:
return (u.pow(1.0 - t) - 1.0) / (1.0 - t)
def exp_t(u, t):
"""Compute exp_t for `u'."""
if t == 1:
return u.exp()
else:
return (1.0 + (1.0 - t) * u).relu().pow(1.0 / (1.0 - t))
def compute_normalization_fixed_point(activations, t, num_iters):
"""Returns the normalization value for each example (t > 1.0).
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
t: Temperature 2 (> 1.0 for tail heaviness).
num_iters: Number of iterations to run the method.
Return: A tensor of same shape as activation with the last dimension being 1.
"""
mu, _ = torch.max(activations, -1, keepdim=True)
normalized_activations_step_0 = activations - mu
normalized_activations = normalized_activations_step_0
for _ in range(num_iters):
logt_partition = torch.sum(
exp_t(normalized_activations, t), -1, keepdim=True)
normalized_activations = normalized_activations_step_0 * \
logt_partition.pow(1.0 - t)
logt_partition = torch.sum(
exp_t(normalized_activations, t), -1, keepdim=True)
normalization_constants = - log_t(1.0 / logt_partition, t) + mu
return normalization_constants
def compute_normalization_binary_search(activations, t, num_iters):
"""Returns the normalization value for each example (t < 1.0).
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
t: Temperature 2 (< 1.0 for finite support).
num_iters: Number of iterations to run the method.
Return: A tensor of same rank as activation with the last dimension being 1.
"""
mu, _ = torch.max(activations, -1, keepdim=True)
normalized_activations = activations - mu
effective_dim = \
torch.sum(
(normalized_activations > -1.0 / (1.0 - t)).to(torch.int32),
dim=-1, keepdim=True).to(activations.dtype)
shape_partition = activations.shape[:-1] + (1,)
lower = torch.zeros(shape_partition, dtype=activations.dtype, device=activations.device)
upper = -log_t(1.0 / effective_dim, t) * torch.ones_like(lower)
for _ in range(num_iters):
logt_partition = (upper + lower) / 2.0
sum_probs = torch.sum(
exp_t(normalized_activations - logt_partition, t),
dim=-1, keepdim=True)
update = (sum_probs < 1.0).to(activations.dtype)
lower = torch.reshape(
lower * update + (1.0 - update) * logt_partition,
shape_partition)
upper = torch.reshape(
upper * (1.0 - update) + update * logt_partition,
shape_partition)
logt_partition = (upper + lower) / 2.0
return logt_partition + mu
class ComputeNormalization(torch.autograd.Function):
"""
Class implementing custom backward pass for compute_normalization. See compute_normalization.
"""
@staticmethod
def forward(ctx, activations, t, num_iters):
if t < 1.0:
normalization_constants = compute_normalization_binary_search(activations, t, num_iters)
else:
normalization_constants = compute_normalization_fixed_point(activations, t, num_iters)
ctx.save_for_backward(activations, normalization_constants)
ctx.t = t
return normalization_constants
@staticmethod
def backward(ctx, grad_output):
activations, normalization_constants = ctx.saved_tensors
t = ctx.t
normalized_activations = | |
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# BitBake Tests for the Data Store (data.py/data_smart.py)
#
# Copyright (C) 2010 <NAME>
# Copyright (C) 2012 <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import unittest
import bb
import bb.data
import bb.parse
import logging
class LogRecord():
def __enter__(self):
logs = []
class LogHandler(logging.Handler):
def emit(self, record):
logs.append(record)
logger = logging.getLogger("BitBake")
handler = LogHandler()
self.handler = handler
logger.addHandler(handler)
return logs
def __exit__(self, type, value, traceback):
logger = logging.getLogger("BitBake")
logger.removeHandler(self.handler)
return
def logContains(item, logs):
for l in logs:
m = l.getMessage()
if item in m:
return True
return False
class DataExpansions(unittest.TestCase):
def setUp(self):
self.d = bb.data.init()
self.d["foo"] = "value_of_foo"
self.d["bar"] = "value_of_bar"
self.d["value_of_foo"] = "value_of_'value_of_foo'"
def test_one_var(self):
val = self.d.expand("${foo}")
self.assertEqual(str(val), "value_of_foo")
def test_indirect_one_var(self):
val = self.d.expand("${${foo}}")
self.assertEqual(str(val), "value_of_'value_of_foo'")
def test_indirect_and_another(self):
val = self.d.expand("${${foo}} ${bar}")
self.assertEqual(str(val), "value_of_'value_of_foo' value_of_bar")
def test_python_snippet(self):
val = self.d.expand("${@5*12}")
self.assertEqual(str(val), "60")
def test_expand_in_python_snippet(self):
val = self.d.expand("${@'boo ' + '${foo}'}")
self.assertEqual(str(val), "boo value_of_foo")
def test_python_snippet_getvar(self):
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
self.assertEqual(str(val), "value_of_foo value_of_bar")
def test_python_unexpanded(self):
self.d.setVar("bar", "${unsetvar}")
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
self.assertEqual(str(val), "${@d.getVar('foo') + ' ${unsetvar}'}")
def test_python_snippet_syntax_error(self):
self.d.setVar("FOO", "${@foo = 5}")
self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
def test_python_snippet_runtime_error(self):
self.d.setVar("FOO", "${@int('test')}")
self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
def test_python_snippet_error_path(self):
self.d.setVar("FOO", "foo value ${BAR}")
self.d.setVar("BAR", "bar value ${@int('test')}")
self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
def test_value_containing_value(self):
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
self.assertEqual(str(val), "value_of_foo value_of_bar")
def test_reference_undefined_var(self):
val = self.d.expand("${undefinedvar} meh")
self.assertEqual(str(val), "${undefinedvar} meh")
def test_double_reference(self):
self.d.setVar("BAR", "bar value")
self.d.setVar("FOO", "${BAR} foo ${BAR}")
val = self.d.getVar("FOO")
self.assertEqual(str(val), "bar value foo bar value")
def test_direct_recursion(self):
self.d.setVar("FOO", "${FOO}")
self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
def test_indirect_recursion(self):
self.d.setVar("FOO", "${BAR}")
self.d.setVar("BAR", "${BAZ}")
self.d.setVar("BAZ", "${FOO}")
self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
def test_recursion_exception(self):
self.d.setVar("FOO", "${BAR}")
self.d.setVar("BAR", "${${@'FOO'}}")
self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
def test_incomplete_varexp_single_quotes(self):
self.d.setVar("FOO", "sed -i -e 's:IP{:I${:g' $pc")
val = self.d.getVar("FOO")
self.assertEqual(str(val), "sed -i -e 's:IP{:I${:g' $pc")
def test_nonstring(self):
self.d.setVar("TEST", 5)
val = self.d.getVar("TEST")
self.assertEqual(str(val), "5")
def test_rename(self):
self.d.renameVar("foo", "newfoo")
self.assertEqual(self.d.getVar("newfoo", False), "value_of_foo")
self.assertEqual(self.d.getVar("foo", False), None)
def test_deletion(self):
self.d.delVar("foo")
self.assertEqual(self.d.getVar("foo", False), None)
def test_keys(self):
keys = list(self.d.keys())
self.assertCountEqual(keys, ['value_of_foo', 'foo', 'bar'])
def test_keys_deletion(self):
newd = bb.data.createCopy(self.d)
newd.delVar("bar")
keys = list(newd.keys())
self.assertCountEqual(keys, ['value_of_foo', 'foo'])
class TestNestedExpansions(unittest.TestCase):
def setUp(self):
self.d = bb.data.init()
self.d["foo"] = "foo"
self.d["bar"] = "bar"
self.d["value_of_foobar"] = "187"
def test_refs(self):
val = self.d.expand("${value_of_${foo}${bar}}")
self.assertEqual(str(val), "187")
#def test_python_refs(self):
# val = self.d.expand("${@${@3}**2 + ${@4}**2}")
# self.assertEqual(str(val), "25")
def test_ref_in_python_ref(self):
val = self.d.expand("${@'${foo}' + 'bar'}")
self.assertEqual(str(val), "foobar")
def test_python_ref_in_ref(self):
val = self.d.expand("${${@'f'+'o'+'o'}}")
self.assertEqual(str(val), "foo")
def test_deep_nesting(self):
depth = 100
val = self.d.expand("${" * depth + "foo" + "}" * depth)
self.assertEqual(str(val), "foo")
#def test_deep_python_nesting(self):
# depth = 50
# val = self.d.expand("${@" * depth + "1" + "+1}" * depth)
# self.assertEqual(str(val), str(depth + 1))
def test_mixed(self):
val = self.d.expand("${value_of_${@('${foo}'+'bar')[0:3]}${${@'BAR'.lower()}}}")
self.assertEqual(str(val), "187")
def test_runtime(self):
val = self.d.expand("${${@'value_of' + '_f'+'o'+'o'+'b'+'a'+'r'}}")
self.assertEqual(str(val), "187")
class TestMemoize(unittest.TestCase):
def test_memoized(self):
d = bb.data.init()
d.setVar("FOO", "bar")
self.assertTrue(d.getVar("FOO", False) is d.getVar("FOO", False))
def test_not_memoized(self):
d1 = bb.data.init()
d2 = bb.data.init()
d1.setVar("FOO", "bar")
d2.setVar("FOO", "bar2")
self.assertTrue(d1.getVar("FOO", False) is not d2.getVar("FOO", False))
def test_changed_after_memoized(self):
d = bb.data.init()
d.setVar("foo", "value of foo")
self.assertEqual(str(d.getVar("foo", False)), "value of foo")
d.setVar("foo", "second value of foo")
self.assertEqual(str(d.getVar("foo", False)), "second value of foo")
def test_same_value(self):
d = bb.data.init()
d.setVar("foo", "value of")
d.setVar("bar", "value of")
self.assertEqual(d.getVar("foo", False),
d.getVar("bar", False))
class TestConcat(unittest.TestCase):
def setUp(self):
self.d = bb.data.init()
self.d.setVar("FOO", "foo")
self.d.setVar("VAL", "val")
self.d.setVar("BAR", "bar")
def test_prepend(self):
self.d.setVar("TEST", "${VAL}")
self.d.prependVar("TEST", "${FOO}:")
self.assertEqual(self.d.getVar("TEST"), "foo:val")
def test_append(self):
self.d.setVar("TEST", "${VAL}")
self.d.appendVar("TEST", ":${BAR}")
self.assertEqual(self.d.getVar("TEST"), "val:bar")
def test_multiple_append(self):
self.d.setVar("TEST", "${VAL}")
self.d.prependVar("TEST", "${FOO}:")
self.d.appendVar("TEST", ":val2")
self.d.appendVar("TEST", ":${BAR}")
self.assertEqual(self.d.getVar("TEST"), "foo:val:val2:bar")
class TestConcatOverride(unittest.TestCase):
def setUp(self):
self.d = bb.data.init()
self.d.setVar("FOO", "foo")
self.d.setVar("VAL", "val")
self.d.setVar("BAR", "bar")
def test_prepend(self):
self.d.setVar("TEST", "${VAL}")
self.d.setVar("TEST_prepend", "${FOO}:")
self.assertEqual(self.d.getVar("TEST"), "foo:val")
def test_append(self):
self.d.setVar("TEST", "${VAL}")
self.d.setVar("TEST_append", ":${BAR}")
self.assertEqual(self.d.getVar("TEST"), "val:bar")
def test_multiple_append(self):
self.d.setVar("TEST", "${VAL}")
self.d.setVar("TEST_prepend", "${FOO}:")
self.d.setVar("TEST_append", ":val2")
self.d.setVar("TEST_append", ":${BAR}")
self.assertEqual(self.d.getVar("TEST"), "foo:val:val2:bar")
def test_append_unset(self):
self.d.setVar("TEST_prepend", "${FOO}:")
self.d.setVar("TEST_append", ":val2")
self.d.setVar("TEST_append", ":${BAR}")
self.assertEqual(self.d.getVar("TEST"), "foo::val2:bar")
def test_remove(self):
self.d.setVar("TEST", "${VAL} ${BAR}")
self.d.setVar("TEST_remove", "val")
self.assertEqual(self.d.getVar("TEST"), " bar")
def test_remove_cleared(self):
self.d.setVar("TEST", "${VAL} ${BAR}")
self.d.setVar("TEST_remove", "val")
self.d.setVar("TEST", "${VAL} ${BAR}")
self.assertEqual(self.d.getVar("TEST"), "val bar")
# Ensure the value is unchanged if we have an inactive remove override
# (including that whitespace is preserved)
def test_remove_inactive_override(self):
self.d.setVar("TEST", "${VAL} ${BAR} 123")
self.d.setVar("TEST_remove_inactiveoverride", "val")
self.assertEqual(self.d.getVar("TEST"), "val bar 123")
def test_doubleref_remove(self):
self.d.setVar("TEST", "${VAL} ${BAR}")
self.d.setVar("TEST_remove", "val")
self.d.setVar("TEST_TEST", "${TEST} ${TEST}")
self.assertEqual(self.d.getVar("TEST_TEST"), " bar bar")
def test_empty_remove(self):
self.d.setVar("TEST", "")
self.d.setVar("TEST_remove", "val")
self.assertEqual(self.d.getVar("TEST"), "")
def test_remove_expansion(self):
self.d.setVar("BAR", "Z")
self.d.setVar("TEST", "${BAR}/X Y")
self.d.setVar("TEST_remove", "${BAR}/X")
self.assertEqual(self.d.getVar("TEST"), " Y")
def test_remove_expansion_items(self):
self.d.setVar("TEST", "A B C D")
self.d.setVar("BAR", "B D")
self.d.setVar("TEST_remove", "${BAR}")
self.assertEqual(self.d.getVar("TEST"), "A C ")
def test_remove_preserve_whitespace(self):
# When the removal isn't active, the original value should be preserved
self.d.setVar("TEST", " A B")
self.d.setVar("TEST_remove", "C")
self.assertEqual(self.d.getVar("TEST"), " A B")
def test_remove_preserve_whitespace2(self):
# When the removal is active preserve the whitespace
self.d.setVar("TEST", " A B")
self.d.setVar("TEST_remove", "B")
self.assertEqual(self.d.getVar("TEST"), " A ")
class TestOverrides(unittest.TestCase):
def setUp(self):
self.d = bb.data.init()
self.d.setVar("OVERRIDES", "foo:bar:local")
self.d.setVar("TEST", "testvalue")
def test_no_override(self):
self.assertEqual(self.d.getVar("TEST"), "testvalue")
def test_one_override(self):
self.d.setVar("TEST_bar", "testvalue2")
self.assertEqual(self.d.getVar("TEST"), "testvalue2")
def test_one_override_unset(self):
self.d.setVar("TEST2_bar", "testvalue2")
self.assertEqual(self.d.getVar("TEST2"), "testvalue2")
self.assertCountEqual(list(self.d.keys()), ['TEST', 'TEST2', 'OVERRIDES', 'TEST2_bar'])
def test_multiple_override(self):
self.d.setVar("TEST_bar", "testvalue2")
self.d.setVar("TEST_local", "testvalue3")
self.d.setVar("TEST_foo", "testvalue4")
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
self.assertCountEqual(list(self.d.keys()), ['TEST', 'TEST_foo', 'OVERRIDES', 'TEST_bar', 'TEST_local'])
def test_multiple_combined_overrides(self):
self.d.setVar("TEST_local_foo_bar", "testvalue3")
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
def test_multiple_overrides_unset(self):
self.d.setVar("TEST2_local_foo_bar", "testvalue3")
self.assertEqual(self.d.getVar("TEST2"), "testvalue3")
def test_keyexpansion_override(self):
self.d.setVar("LOCAL", "local")
self.d.setVar("TEST_bar", "testvalue2")
self.d.setVar("TEST_${LOCAL}", "testvalue3")
self.d.setVar("TEST_foo", "testvalue4")
bb.data.expandKeys(self.d)
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
def test_rename_override(self):
self.d.setVar("ALTERNATIVE_ncurses-tools_class-target", "a")
self.d.setVar("OVERRIDES", "class-target")
self.d.renameVar("ALTERNATIVE_ncurses-tools", "ALTERNATIVE_lib32-ncurses-tools")
self.assertEqual(self.d.getVar("ALTERNATIVE_lib32-ncurses-tools"), "a")
def test_underscore_override(self):
self.d.setVar("TEST_bar", "testvalue2")
self.d.setVar("TEST_some_val", "testvalue3")
self.d.setVar("TEST_foo", "testvalue4")
self.d.setVar("OVERRIDES", "foo:bar:some_val")
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
def test_remove_with_override(self):
self.d.setVar("TEST_bar", "testvalue2")
self.d.setVar("TEST_some_val", "testvalue3 testvalue5")
self.d.setVar("TEST_some_val_remove", "testvalue3")
self.d.setVar("TEST_foo", "testvalue4")
self.d.setVar("OVERRIDES", "foo:bar:some_val")
self.assertEqual(self.d.getVar("TEST"), " testvalue5")
class TestKeyExpansion(unittest.TestCase):
def setUp(self):
self.d = bb.data.init()
self.d.setVar("FOO", "foo")
self.d.setVar("BAR", "foo")
def test_keyexpand(self):
self.d.setVar("VAL_${FOO}", "A")
self.d.setVar("VAL_${BAR}", "B")
with LogRecord() as logs:
bb.data.expandKeys(self.d)
self.assertTrue(logContains("Variable key VAL_${FOO} (A) replaces original key VAL_foo (B)", logs))
self.assertEqual(self.d.getVar("VAL_foo"), "A")
class TestFlags(unittest.TestCase):
def setUp(self):
self.d = bb.data.init()
self.d.setVar("foo", "value of foo")
self.d.setVarFlag("foo", "flag1", "value of flag1")
self.d.setVarFlag("foo", "flag2", "value of flag2")
def test_setflag(self):
self.assertEqual(self.d.getVarFlag("foo", "flag1", False), "value of flag1")
self.assertEqual(self.d.getVarFlag("foo", "flag2", False), "value of flag2")
def test_delflag(self):
self.d.delVarFlag("foo", "flag2")
self.assertEqual(self.d.getVarFlag("foo", "flag1", False), "value of flag1")
self.assertEqual(self.d.getVarFlag("foo", "flag2", False), None)
class Contains(unittest.TestCase):
def setUp(self):
self.d = bb.data.init()
self.d.setVar("SOMEFLAG", "a b c")
def test_contains(self):
self.assertTrue(bb.utils.contains("SOMEFLAG", "a", True, False, self.d))
self.assertTrue(bb.utils.contains("SOMEFLAG", "b", True, False, self.d))
self.assertTrue(bb.utils.contains("SOMEFLAG", "c", True, False, self.d))
self.assertTrue(bb.utils.contains("SOMEFLAG", "a b", True, False, self.d))
self.assertTrue(bb.utils.contains("SOMEFLAG", "b c", True, False, self.d))
self.assertTrue(bb.utils.contains("SOMEFLAG", "c a", True, False, self.d))
self.assertTrue(bb.utils.contains("SOMEFLAG", "a b c", True, False, self.d))
self.assertTrue(bb.utils.contains("SOMEFLAG", "c b a", True, False, self.d))
self.assertFalse(bb.utils.contains("SOMEFLAG", "x", True, False, self.d))
self.assertFalse(bb.utils.contains("SOMEFLAG", "a x", True, False, self.d))
self.assertFalse(bb.utils.contains("SOMEFLAG", "x c b", True, False, self.d))
self.assertFalse(bb.utils.contains("SOMEFLAG", "x c b a", True, False, self.d))
def test_contains_any(self):
self.assertTrue(bb.utils.contains_any("SOMEFLAG", "a", True, False, self.d))
self.assertTrue(bb.utils.contains_any("SOMEFLAG", "b", True, False, self.d))
self.assertTrue(bb.utils.contains_any("SOMEFLAG", "c", True, False, self.d))
self.assertTrue(bb.utils.contains_any("SOMEFLAG", "a b", True, False, self.d))
self.assertTrue(bb.utils.contains_any("SOMEFLAG", "b c", True, False, self.d))
self.assertTrue(bb.utils.contains_any("SOMEFLAG", "c a", True, False, self.d))
self.assertTrue(bb.utils.contains_any("SOMEFLAG", "a x", True, False, self.d))
self.assertTrue(bb.utils.contains_any("SOMEFLAG", "x c", True, False, self.d))
self.assertFalse(bb.utils.contains_any("SOMEFLAG", "x", True, False, self.d))
self.assertFalse(bb.utils.contains_any("SOMEFLAG", "x y z", True, False, self.d))
class TaskHash(unittest.TestCase):
def test_taskhashes(self):
def gettask_bashhash(taskname, d):
tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d)
taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, set(), "somefile")
bb.warn(str(lookupcache))
return basehash["somefile." + taskname]
d = bb.data.init()
d.setVar("__BBTASKS", ["mytask"])
d.setVar("__exportlist", [])
d.setVar("mytask", "${MYCOMMAND}")
d.setVar("MYCOMMAND", "${VAR}; foo; bar; exit 0")
d.setVar("VAR", "val")
orighash = gettask_bashhash("mytask", d)
# Changing a variable should change the hash
d.setVar("VAR", "val2")
nexthash = gettask_bashhash("mytask", d)
self.assertNotEqual(orighash, nexthash)
d.setVar("VAR", "val")
# Adding an inactive removal shouldn't change the hash
d.setVar("BAR", "notbar")
d.setVar("MYCOMMAND_remove", "${BAR}")
nexthash = gettask_bashhash("mytask", d)
self.assertEqual(orighash, nexthash)
# Adding an active removal should change the hash
d.setVar("BAR", "bar;")
nexthash = gettask_bashhash("mytask", d)
self.assertNotEqual(orighash, nexthash)
# Setup | |
# osid.resource.ResourceAdminSession.can_update_resources
return (self._can('update') or
bool(self._get_overriding_catalog_ids('update')))
@raise_null_argument
def get_qualifier_form_for_update(self, qualifier_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
if not self._can_for_qualifier('update', qualifier_id):
raise PermissionDenied()
return self._provider_session.get_qualifier_form_for_update(qualifier_id)
def duplicate_qualifier(self, qualifier_id):
if not self._can('update'):
raise PermissionDenied()
return self._provider_session.duplicate_qualifier(qualifier_id)
@raise_null_argument
def update_qualifier(self, qualifier_form):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.update_resource
if not self._can('update'):
raise PermissionDenied()
return self._provider_session.update_qualifier(qualifier_form)
def can_delete_qualifiers(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_delete_resources
return (self._can('delete') or
bool(self._get_overriding_catalog_ids('delete')))
@raise_null_argument
def delete_qualifier(self, qualifier_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.delete_resource
if not self._can_for_qualifier('delete', qualifier_id):
raise PermissionDenied()
return self._provider_session.delete_qualifier(qualifier_id)
def can_manage_qualifier_aliases(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_manage_resource_aliases
return (self._can('manage') or
bool(self._get_overriding_catalog_ids('manage')))
@raise_null_argument
def alias_qualifier(self, qualifier_id, alias_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.alias_resources
if not self._can_for_qualifier('alias', qualifier_id):
raise PermissionDenied()
return self._provider_session.alias_qualifier(qualifier_id, alias_id)
class QualifierNotificationSession(abc_authorization_sessions.QualifierNotificationSession, osid_sessions.OsidSession):
"""Adapts underlying QualifierNotificationSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = self._provider_session.get_vault_id()
self._id_namespace = 'authorization.Qualifier'
def get_vault_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_vault_id()
vault_id = property(fget=get_vault_id)
def get_vault(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_vault()
vault = property(fget=get_vault)
def can_register_for_qualifier_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.can_register_for_resource_notifications
return self._can('register')
def use_federated_vault_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_vault_view()
if self._query_session:
self._query_session.use_federated_vault_view()
def use_isolated_vault_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_vault_view()
if self._query_session:
self._query_session.use_isolated_vault_view()
def reliable_qualifier_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_qualifier_notifications()
def unreliable_qualifier_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_qualifier_notifications()
@raise_null_argument
def acknowledge_qualifier_notification(self, notification_id):
raise Unimplemented()
def register_for_new_qualifiers(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_new_qualifiers()
def register_for_changed_qualifiers(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_qualifiers()
@raise_null_argument
def register_for_changed_qualifier(self, qualifier_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_qualifier(qualifier_id)
def register_for_deleted_qualifiers(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_deleted_qualifiers()
@raise_null_argument
def register_for_deleted_qualifier(self, qualifier_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_deleted_qualifier(qualifier_id)
def register_for_changed_qualifier_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_qualifier_hierarchy()
@raise_null_argument
def register_for_changed_qualifier_hierarchy_for_ancestors(self, qualifier_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_qualifier_hierarchy_for_ancestors(qualifier_id)
@raise_null_argument
def register_for_changed_qualifier_hierarchy_for_descendants(self, qualifier_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_qualifier_hierarchy_for_descendants(qualifier_id)
def reliable_qualifier_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_qualifier_notifications()
def unreliable_qualifier_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_qualifier_notifications()
@raise_null_argument
def acknowledge_qualifier_notification(self, notification_id):
raise Unimplemented()
class QualifierHierarchySession(abc_authorization_sessions.QualifierHierarchySession, osid_sessions.OsidSession):
"""Adapts underlying QualifierHierarchySession methodswith authorization checks."""
def get_qualifier_hierarchy_id(self):
raise Unimplemented()
qualifier_hierarchy_id = property(fget=get_qualifier_hierarchy_id)
def get_qualifier_hierarchy(self):
raise Unimplemented()
qualifier_hierarchy = property(fget=get_qualifier_hierarchy)
def can_access_qualifier_hierarchy(self):
raise Unimplemented()
def use_comparative_qualifier_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_comparative_resource_view_template
self._use_comparative_object_view()
self._provider_session.use_comparative_qualifier_view()
def use_plenary_qualifier_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_plenary_resource_view_template
self._use_plenary_object_view()
self._provider_session.use_plenary_qualifier_view()
def get_root_qualifier_ids(self):
raise Unimplemented()
root_qualifier_ids = property(fget=get_root_qualifier_ids)
def get_root_qualifiers(self):
# From azosid_templates/ontology.py::SubjectHierarchySession::get_root_subjects_template
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_root_qualifiers()
root_qualifiers = property(fget=get_root_qualifiers)
@raise_null_argument
def has_parent_qualifiers(self, qualifier_id):
raise Unimplemented()
@raise_null_argument
def is_parent_of_qualifier(self, id_, qualifier_id):
raise Unimplemented()
@raise_null_argument
def get_parent_qualifier_ids(self, qualifier_id):
raise Unimplemented()
@raise_null_argument
def get_parent_qualifiers(self, qualifier_id):
raise Unimplemented()
@raise_null_argument
def is_ancestor_of_qualifier(self, id_, qualifier_id):
raise Unimplemented()
@raise_null_argument
def has_child_qualifiers(self, qualifier_id):
raise Unimplemented()
@raise_null_argument
def is_child_of_qualifier(self, id_, qualifier_id):
raise Unimplemented()
@raise_null_argument
def get_child_qualifier_ids(self, qualifier_id):
raise Unimplemented()
@raise_null_argument
def get_child_qualifiers(self, qualifier_id):
# From azosid_templates/ontology.py::SubjectHierarchySession::get_child_subjects_template
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_child_qualifiers(qualifier_id)
@raise_null_argument
def is_descendant_of_qualifier(self, id_, qualifier_id):
raise Unimplemented()
@raise_null_argument
def get_qualifier_node_ids(self, qualifier_id, ancestor_levels, descendant_levels, include_siblings):
raise Unimplemented()
@raise_null_argument
def get_qualifier_nodes(self, qualifier_id, ancestor_levels, descendant_levels, include_siblings):
raise Unimplemented()
class QualifierHierarchyDesignSession(abc_authorization_sessions.QualifierHierarchyDesignSession, osid_sessions.OsidSession):
"""Adapts underlying QualifierHierarchyDesignSession methodswith authorization checks."""
def get_qualifier_hierarchy_id(self):
raise Unimplemented()
qualifier_hierarchy_id = property(fget=get_qualifier_hierarchy_id)
def get_qualifier_hierarchy(self):
raise Unimplemented()
qualifier_hierarchy = property(fget=get_qualifier_hierarchy)
def can_modify_qualifier_hierarchy(self):
# From azosid_templates/ontology.py::SubjectHierarchyDesignSession::can_modify_subject_hierarchy_template
return self._can('modify')
@raise_null_argument
def add_root_qualifier(self, qualifier_id):
# From azosid_templates/ontology.py::SubjectHierarchyDesignSession::add_root_subject_template
if not self._can('modify'):
raise PermissionDenied()
return self._provider_session.add_root_qualifier(qualifier_id)
@raise_null_argument
def remove_root_qualifier(self, qualifier_id):
# From azosid_templates/ontology.py::SubjectHierarchyDesignSession::remove_root_subject_template
if not self._can('modify'):
raise PermissionDenied()
return self._provider_session.remove_root_qualifier(qualifier_id)
@raise_null_argument
def add_child_qualifier(self, qualifier_id, child_id):
# From azosid_templates/ontology.py::SubjectHierarchyDesignSession::add_child_subject_template
if not self._can('modify'):
raise PermissionDenied()
return self._provider_session.add_child_qualifier(qualifier_id, child_id)
@raise_null_argument
def remove_child_qualifier(self, qualifier_id, child_id):
# From azosid_templates/ontology.py::SubjectHierarchyDesignSession::remove_child_subject_template
if not self._can('modify'):
raise PermissionDenied()
return self._provider_session.remove_child_qualifier(qualifier_id, child_id)
@raise_null_argument
def remove_child_qualifiers(self, qualifier_id):
# From azosid_templates/ontology.py::SubjectHierarchyDesignSession::remove_child_subjects_template
if not self._can('modify'):
raise PermissionDenied()
return self._provider_session.remove_child_qualifiers(qualifier_id)
class QualifierVaultSession(abc_authorization_sessions.QualifierVaultSession, osid_sessions.OsidSession):
"""Adapts underlying QualifierVaultSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = Id('authorization.Vault%3AROOT%40ODL.MIT.EDU') # This could be better
self._id_namespace = 'authorization.QualifierVault'
def can_lookup_qualifier_vault_mappings(self):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.can_lookup_resource_bin_mappings
return self._can('lookup')
def use_comparative_vault_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_comparative_bin_view_template
self._provider_session.use_comparative_vault_view()
def use_plenary_vault_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_plenary_bin_view_template
self._provider_session.use_plenary_vault_view()
@raise_null_argument
def get_qualifier_ids_by_vault(self, vault_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resource_ids_by_bin
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_qualifier_ids_by_vault(vault_id)
@raise_null_argument
def get_qualifiers_by_vault(self, vault_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resources_by_bin_template
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_qualifiers_by_vault(vault_id)
@raise_null_argument
def get_qualifier_ids_by_vaults(self, vault_ids):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resource_ids_by_bins
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_qualifier_ids_by_vaults(vault_ids)
@raise_null_argument
def get_qualifiers_by_vaults(self, vault_ids):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resources_by_bins
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_qualifiers_by_vaults(vault_ids)
@raise_null_argument
def get_vault_ids_by_qualifier(self, qualifier_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_vault_ids_by_qualifier(qualifier_id)
@raise_null_argument
def get_vaults_by_qualifier(self, qualifier_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_bins_by_resource
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_vaults_by_qualifier(qualifier_id)
class QualifierVaultAssignmentSession(abc_authorization_sessions.QualifierVaultAssignmentSession, osid_sessions.OsidSession):
"""Adapts underlying QualifierVaultAssignmentSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = Id('authorization.Vault%3AROOT%40ODL.MIT.EDU') # This could be better
self._id_namespace = 'authorization.QualifierVault'
def can_assign_qualifiers(self):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.can_assign_resources
return self._can('assign')
@raise_null_argument
def can_assign_qualifiers_to_vault(self, vault_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.can_assign_resources_to_bin
return self._can('assign', qualifier_id=vault_id)
@raise_null_argument
def get_assignable_vault_ids(self, vault_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids
if not self._can('assign'):
raise PermissionDenied()
return self._provider_session.get_assignable_vault_ids(vault_id)
@raise_null_argument
def get_assignable_vault_ids_for_qualifier(self, vault_id, qualifier_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids_for_resource
if not self._can('assign'):
raise PermissionDenied()
return self._provider_session.get_assignable_vault_ids_for_qualifier(vault_id, qualifier_id)
@raise_null_argument
def assign_qualifier_to_vault(self, qualifier_id, vault_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
if not self._can('assign'):
raise PermissionDenied()
return self._provider_session.assign_qualifier_to_vault(qualifier_id, vault_id)
@raise_null_argument
def unassign_qualifier_from_vault(self, qualifier_id, vault_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
if not self._can('assign'):
raise PermissionDenied()
return self._provider_session.unassign_qualifier_from_vault(qualifier_id, vault_id)
@raise_null_argument
def reassign_qualifier_to_vault(self, qualifier_id, from_vault_id, to_vault_id):
raise Unimplemented()
class QualifierSmartVaultSession(abc_authorization_sessions.QualifierSmartVaultSession, osid_sessions.OsidSession):
"""Adapts underlying QualifierSmartVaultSession methodswith authorization checks."""
def get_vault_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_vault_id()
vault_id = property(fget=get_vault_id)
def get_vault(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_vault()
vault = property(fget=get_vault)
def can_manage_smart_vaults(self):
raise Unimplemented()
def get_qualifier_query(self):
raise Unimplemented()
qualifier_query = property(fget=get_qualifier_query)
def get_qualifier_search_order(self):
raise Unimplemented()
qualifier_search_order = property(fget=get_qualifier_search_order)
@raise_null_argument
def apply_qualifier_query(self, qualifier_query):
raise Unimplemented()
def inspect_qualifier_query(self):
raise Unimplemented()
@raise_null_argument
def apply_qualifier_sequencing(self, qualifier_search_order):
raise Unimplemented()
@raise_null_argument
def get_qualifier_query_from_inspector(self, qualifier_query_inspector):
raise Unimplemented()
class VaultLookupSession(abc_authorization_sessions.VaultLookupSession, osid_sessions.OsidSession):
"""Adapts underlying VaultLookupSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
# This needs to be done right
# Build from authority in config
self._qualifier_id = Id('authorization.Vault%3AROOT%40ODL.MIT.EDU')
self._id_namespace = 'authorization.Vault'
def can_lookup_vaults(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.can_lookup_bins_template
return self._can('lookup')
def use_comparative_vault_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_comparative_bin_view_template
self._provider_session.use_comparative_vault_view()
def use_plenary_vault_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_plenary_bin_view_template
self._provider_session.use_plenary_vault_view()
@raise_null_argument
def get_vault(self, vault_id):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_vault(vault_id)
@raise_null_argument
def get_vaults_by_ids(self, vault_ids):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.get_bins_by_ids_template
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_vaults_by_ids(vault_ids)
@raise_null_argument
def get_vaults_by_genus_type(self, vault_genus_type):
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_vaults_by_genus_type(vault_genus_type)
@raise_null_argument
def get_vaults_by_parent_genus_type(self, vault_genus_type):
raise Unimplemented()
@raise_null_argument
def get_vaults_by_record_type(self, vault_record_type):
raise Unimplemented()
@raise_null_argument
def get_vaults_by_provider(self, resource_id):
raise Unimplemented()
def get_vaults(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.get_bins_template
if not self._can('lookup'):
raise PermissionDenied()
| |
<reponame>kazoup/boto
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import copy
import pickle
import os
from tests.compat import unittest, mock
from tests.unit import MockServiceWithConfigTestCase
from boto.auth import HmacAuthV4Handler
from boto.auth import S3HmacAuthV4Handler
from boto.auth import detect_potential_s3sigv4
from boto.auth import detect_potential_sigv4
from boto.connection import HTTPRequest
from boto.provider import Provider
from boto.regioninfo import RegionInfo
class TestSigV4Handler(unittest.TestCase):
def setUp(self):
self.provider = mock.Mock()
self.provider.access_key = 'access_key'
self.provider.secret_key = 'secret_key'
self.request = HTTPRequest(
'POST', 'https', 'glacier.us-east-1.amazonaws.com', 443,
'/-/vaults/foo/archives', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
def test_not_adding_empty_qs(self):
self.provider.security_token = None
auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com', mock.Mock(), self.provider)
req = copy.copy(self.request)
auth.add_auth(req)
self.assertEqual(req.path, '/-/vaults/foo/archives')
def test_inner_whitespace_is_collapsed(self):
auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com',
mock.Mock(), self.provider)
self.request.headers['x-amz-archive-description'] = 'two spaces'
self.request.headers['x-amz-quoted-string'] = ' "a b c" '
headers = auth.headers_to_sign(self.request)
self.assertEqual(headers, {'Host': 'glacier.us-east-1.amazonaws.com',
'x-amz-archive-description': 'two spaces',
'x-amz-glacier-version': '2012-06-01',
'x-amz-quoted-string': ' "a b c" '})
# Note the single space between the "two spaces".
self.assertEqual(auth.canonical_headers(headers),
'host:glacier.us-east-1.amazonaws.com\n'
'x-amz-archive-description:two spaces\n'
'x-amz-glacier-version:2012-06-01\n'
'x-amz-quoted-string:"a b c"')
def test_canonical_query_string(self):
auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com',
mock.Mock(), self.provider)
request = HTTPRequest(
'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443,
'/-/vaults/foo/archives', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
request.params['Foo.1'] = 'aaa'
request.params['Foo.10'] = 'zzz'
query_string = auth.canonical_query_string(request)
self.assertEqual(query_string, 'Foo.1=aaa&Foo.10=zzz')
def test_query_string(self):
auth = HmacAuthV4Handler('sns.us-east-1.amazonaws.com',
mock.Mock(), self.provider)
params = {
'Message': u'We \u2665 utf-8'.encode('utf-8'),
}
request = HTTPRequest(
'POST', 'https', 'sns.us-east-1.amazonaws.com', 443,
'/', None, params, {}, '')
query_string = auth.query_string(request)
self.assertEqual(query_string, 'Message=We%20%E2%99%A5%20utf-8')
def test_canonical_uri(self):
auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com',
mock.Mock(), self.provider)
request = HTTPRequest(
'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443,
'x/./././x .html', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
canonical_uri = auth.canonical_uri(request)
# This should be both normalized & urlencoded.
self.assertEqual(canonical_uri, 'x/x%20.html')
auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com',
mock.Mock(), self.provider)
request = HTTPRequest(
'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443,
'x/./././x/html/', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
canonical_uri = auth.canonical_uri(request)
# Trailing slashes should be preserved.
self.assertEqual(canonical_uri, 'x/x/html/')
request = HTTPRequest(
'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443,
'/', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
canonical_uri = auth.canonical_uri(request)
# There should not be two-slashes.
self.assertEqual(canonical_uri, '/')
# Make sure Windows-style slashes are converted properly
request = HTTPRequest(
'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443,
'\\x\\x.html', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
canonical_uri = auth.canonical_uri(request)
self.assertEqual(canonical_uri, '/x/x.html')
def test_credential_scope(self):
# test the AWS standard regions IAM endpoint
auth = HmacAuthV4Handler('iam.amazonaws.com',
mock.Mock(), self.provider)
request = HTTPRequest(
'POST', 'https', 'iam.amazonaws.com', 443,
'/', '/',
{'Action': 'ListAccountAliases', 'Version': '2010-05-08'},
{
'Content-Length': '44',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Amz-Date': '20130808T013210Z'
},
'Action=ListAccountAliases&Version=2010-05-08')
credential_scope = auth.credential_scope(request)
region_name = credential_scope.split('/')[1]
self.assertEqual(region_name, 'us-east-1')
# test the AWS GovCloud region IAM endpoint
auth = HmacAuthV4Handler('iam.us-gov.amazonaws.com',
mock.Mock(), self.provider)
request = HTTPRequest(
'POST', 'https', 'iam.us-gov.amazonaws.com', 443,
'/', '/',
{'Action': 'ListAccountAliases', 'Version': '2010-05-08'},
{
'Content-Length': '44',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Amz-Date': '20130808T013210Z'
},
'Action=ListAccountAliases&Version=2010-05-08')
credential_scope = auth.credential_scope(request)
region_name = credential_scope.split('/')[1]
self.assertEqual(region_name, 'us-gov-west-1')
# iam.us-west-1.amazonaws.com does not exist however this
# covers the remaining region_name control structure for a
# different region name
auth = HmacAuthV4Handler('iam.us-west-1.amazonaws.com',
mock.Mock(), self.provider)
request = HTTPRequest(
'POST', 'https', 'iam.us-west-1.amazonaws.com', 443,
'/', '/',
{'Action': 'ListAccountAliases', 'Version': '2010-05-08'},
{
'Content-Length': '44',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Amz-Date': '20130808T013210Z'
},
'Action=ListAccountAliases&Version=2010-05-08')
credential_scope = auth.credential_scope(request)
region_name = credential_scope.split('/')[1]
self.assertEqual(region_name, 'us-west-1')
# Test connections to custom locations, e.g. localhost:8080
auth = HmacAuthV4Handler('localhost', mock.Mock(), self.provider,
service_name='iam')
request = HTTPRequest(
'POST', 'http', 'localhost', 8080,
'/', '/',
{'Action': 'ListAccountAliases', 'Version': '2010-05-08'},
{
'Content-Length': '44',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Amz-Date': '20130808T013210Z'
},
'Action=ListAccountAliases&Version=2010-05-08')
credential_scope = auth.credential_scope(request)
timestamp, region, service, v = credential_scope.split('/')
self.assertEqual(region, 'localhost')
self.assertEqual(service, 'iam')
def test_headers_to_sign(self):
auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com',
mock.Mock(), self.provider)
request = HTTPRequest(
'GET', 'http', 'glacier.us-east-1.amazonaws.com', 80,
'x/./././x .html', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
headers = auth.headers_to_sign(request)
# Port 80 & not secure excludes the port.
self.assertEqual(headers['Host'], 'glacier.us-east-1.amazonaws.com')
request = HTTPRequest(
'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443,
'x/./././x .html', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
headers = auth.headers_to_sign(request)
# SSL port excludes the port.
self.assertEqual(headers['Host'], 'glacier.us-east-1.amazonaws.com')
request = HTTPRequest(
'GET', 'https', 'glacier.us-east-1.amazonaws.com', 8080,
'x/./././x .html', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
headers = auth.headers_to_sign(request)
# URL should include port.
self.assertEqual(headers['Host'], 'glacier.us-east-1.amazonaws.com:8080')
def test_region_and_service_can_be_overriden(self):
auth = HmacAuthV4Handler('queue.amazonaws.com',
mock.Mock(), self.provider)
self.request.headers['X-Amz-Date'] = '20121121000000'
auth.region_name = 'us-west-2'
auth.service_name = 'sqs'
scope = auth.credential_scope(self.request)
self.assertEqual(scope, '20121121/us-west-2/sqs/aws4_request')
def test_pickle_works(self):
provider = Provider('aws', access_key='access_key',
secret_key='secret_key')
auth = HmacAuthV4Handler('queue.amazonaws.com', None, provider)
# Pickle it!
pickled = pickle.dumps(auth)
# Now restore it
auth2 = pickle.loads(pickled)
self.assertEqual(auth.host, auth2.host)
def test_bytes_header(self):
auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com',
mock.Mock(), self.provider)
request = HTTPRequest(
'GET', 'http', 'glacier.us-east-1.amazonaws.com', 80,
'x/./././x .html', None, {},
{'x-amz-glacier-version': '2012-06-01', 'x-amz-hash': b'f00'}, '')
canonical = auth.canonical_request(request)
self.assertIn('f00', canonical)
class TestS3HmacAuthV4Handler(unittest.TestCase):
def setUp(self):
self.provider = mock.Mock()
self.provider.access_key = 'access_key'
self.provider.secret_key = 'secret_key'
self.provider.security_token = '<PASSWORD>'
self.request = HTTPRequest(
'GET', 'https', 's3-us-west-2.amazonaws.com', 443,
'/awesome-bucket/?max-keys=0', None, {},
{}, ''
)
self.awesome_bucket_request = HTTPRequest(
method='GET',
protocol='https',
host='awesome-bucket.s3-us-west-2.amazonaws.com',
port=443,
path='/',
auth_path=None,
params={
'max-keys': 0,
},
headers={
'User-Agent': 'Boto',
'X-AMZ-Content-sha256': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855',
'X-AMZ-Date': '20130605T193245Z',
},
body=''
)
self.auth = S3HmacAuthV4Handler(
host='awesome-bucket.s3-us-west-2.amazonaws.com',
config=mock.Mock(),
provider=self.provider,
region_name='s3-us-west-2'
)
def test_clean_region_name(self):
# Untouched.
cleaned = self.auth.clean_region_name('us-west-2')
self.assertEqual(cleaned, 'us-west-2')
# Stripped of the ``s3-`` prefix.
cleaned = self.auth.clean_region_name('s3-us-west-2')
self.assertEqual(cleaned, 'us-west-2')
# Untouched (classic).
cleaned = self.auth.clean_region_name('s3.amazonaws.com')
self.assertEqual(cleaned, 's3.amazonaws.com')
# Untouched.
cleaned = self.auth.clean_region_name('something-s3-us-west-2')
self.assertEqual(cleaned, 'something-s3-us-west-2')
def test_region_stripping(self):
auth = S3HmacAuthV4Handler(
host='s3-us-west-2.amazonaws.com',
config=mock.Mock(),
provider=self.provider
)
self.assertEqual(auth.region_name, None)
# What we wish we got.
auth = S3HmacAuthV4Handler(
host='s3-us-west-2.amazonaws.com',
config=mock.Mock(),
provider=self.provider,
region_name='us-west-2'
)
self.assertEqual(auth.region_name, 'us-west-2')
# What we actually get (i.e. ``s3-us-west-2``).
self.assertEqual(self.auth.region_name, 'us-west-2')
def test_determine_region_name(self):
name = self.auth.determine_region_name('s3-us-west-2.amazonaws.com')
self.assertEqual(name, 'us-west-2')
def test_canonical_uri(self):
request = HTTPRequest(
'GET', 'https', 's3-us-west-2.amazonaws.com', 443,
'x/./././x .html', None, {},
{}, ''
)
canonical_uri = self.auth.canonical_uri(request)
# S3 doesn't canonicalize the way other SigV4 services do.
# This just urlencoded, no normalization of the path.
self.assertEqual(canonical_uri, 'x/./././x%20.html')
def test_determine_service_name(self):
# What we wish we got.
name = self.auth.determine_service_name(
's3.us-west-2.amazonaws.com'
)
self.assertEqual(name, 's3')
# What we actually get.
name = self.auth.determine_service_name(
's3-us-west-2.amazonaws.com'
)
self.assertEqual(name, 's3')
# What we wish we got with virtual hosting.
name = self.auth.determine_service_name(
'bucket.s3.us-west-2.amazonaws.com'
)
self.assertEqual(name, 's3')
# What we actually get with virtual hosting.
name = self.auth.determine_service_name(
'bucket.s3-us-west-2.amazonaws.com'
)
self.assertEqual(name, 's3')
def test_add_auth(self):
# The side-effects sideshow.
self.assertFalse('x-amz-content-sha256' in self.request.headers)
self.auth.add_auth(self.request)
self.assertTrue('x-amz-content-sha256' in self.request.headers)
the_sha = self.request.headers['x-amz-content-sha256']
self.assertEqual(
the_sha,
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
)
def test_host_header(self):
host = self.auth.host_header(
self.awesome_bucket_request.host,
self.awesome_bucket_request
)
self.assertEqual(host, 'awesome-bucket.s3-us-west-2.amazonaws.com')
def test_canonical_query_string(self):
qs = self.auth.canonical_query_string(self.awesome_bucket_request)
self.assertEqual(qs, 'max-keys=0')
def test_correct_handling_of_plus_sign(self):
request = HTTPRequest(
'GET', 'https', 's3-us-west-2.amazonaws.com', 443,
'hello+world.txt', None, {},
{}, ''
)
canonical_uri = self.auth.canonical_uri(request)
# Ensure that things are properly quoted.
self.assertEqual(canonical_uri, 'hello%2Bworld.txt')
request = HTTPRequest(
'GET', 'https', 's3-us-west-2.amazonaws.com', 443,
'hello%2Bworld.txt', None, {},
{}, ''
)
canonical_uri = self.auth.canonical_uri(request)
# Verify double escaping hasn't occurred.
self.assertEqual(canonical_uri, 'hello%2Bworld.txt')
def test_mangle_path_and_params(self):
request = HTTPRequest(
method='GET',
protocol='https',
host='awesome-bucket.s3-us-west-2.amazonaws.com',
port=443,
# LOOK AT THIS PATH. JUST LOOK AT IT.
path='/?delete&max-keys=0',
auth_path=None,
params={
'key': 'why hello there',
# This gets overwritten, to make sure back-compat is maintained.
'max-keys': 1,
},
headers={
'User-Agent': 'Boto',
'X-AMZ-Content-sha256': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855',
'X-AMZ-Date': '20130605T193245Z',
},
body=''
)
mod_req = self.auth.mangle_path_and_params(request)
self.assertEqual(mod_req.path, '/?delete&max-keys=0')
self.assertEqual(mod_req.auth_path, '/')
self.assertEqual(mod_req.params, {
'max-keys': '0',
'key': 'why hello there',
'delete': ''
})
def test_canonical_request(self):
expected = """GET
/
max-keys=0
host:awesome-bucket.s3-us-west-2.amazonaws.com
user-agent:Boto
x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
x-amz-date:20130605T193245Z
host;user-agent;x-amz-content-sha256;x-amz-date
e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"""
authed_req = self.auth.canonical_request(self.awesome_bucket_request)
self.assertEqual(authed_req, expected)
# Now the way ``boto.s3`` actually sends data.
request = copy.copy(self.awesome_bucket_request)
request.path = request.auth_path = '/?max-keys=0'
request.params = {}
expected = """GET
/
max-keys=0
host:awesome-bucket.s3-us-west-2.amazonaws.com
user-agent:Boto
x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
x-amz-date:20130605T193245Z
host;user-agent;x-amz-content-sha256;x-amz-date
e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"""
# Pre-mangle it. In practice, this happens as part of ``add_auth``,
# but that's a side-effect that's hard to test.
request = self.auth.mangle_path_and_params(request)
authed_req = self.auth.canonical_request(request)
self.assertEqual(authed_req, expected)
def | |
. target_eid , I11i1i1i1iii . rloc , to_etr = True )
if 60 - 60: I1ii11iIi11i * iII111i / OoOoOO00 . o0oOOo0O0Ooo / iIii1I11I1II1
return ( [ ooOOOo0o0oo . eid , ooOOOo0o0oo . group , LISP_DDT_ACTION_MS_ACK ] )
if 94 - 94: OoO0O00 . ooOoO0o
if 25 - 25: I1Ii111 % OOooOOo
if 82 - 82: Ii1I
if 17 - 17: iII111i . i1IIi . i1IIi
if 76 - 76: OoooooooOO % IiII
if 81 - 81: iII111i . OOooOOo * i1IIi
if 14 - 14: oO0o
def lisp_ddt_process_map_request ( lisp_sockets , map_request , ecm_source , port ) :
if 16 - 16: iII111i
if 26 - 26: iII111i . oO0o * i11iIiiIii . iIii1I11I1II1
if 74 - 74: Ii1I / iIii1I11I1II1 + OOooOOo . II111iiii
if 65 - 65: OOooOOo * I11i * Oo0Ooo
o00oo00oo = map_request . target_eid
ii1I1 = map_request . target_group
oOoo0OooOOo00 = lisp_print_eid_tuple ( o00oo00oo , ii1I1 )
iI1III = map_request . nonce
oo0oOOo0 = LISP_DDT_ACTION_NULL
if 21 - 21: Ii1I . iIii1I11I1II1
if 84 - 84: OOooOOo
if 67 - 67: I1IiiI % OoO0O00 % o0oOOo0O0Ooo % IiII
if 33 - 33: ooOoO0o % I1IiiI
if 98 - 98: oO0o . o0oOOo0O0Ooo + II111iiii
O0oooOO0O = None
if ( lisp_i_am_ms ) :
ooOOOo0o0oo = lisp_site_eid_lookup ( o00oo00oo , ii1I1 , False )
if ( ooOOOo0o0oo == None ) : return
if 70 - 70: oO0o % OoooooooOO * I1IiiI - OoOoOO00 * OoOoOO00 . OOooOOo
if ( ooOOOo0o0oo . registered ) :
oo0oOOo0 = LISP_DDT_ACTION_MS_ACK
oooOooOO = 1440
else :
o00oo00oo , ii1I1 , oo0oOOo0 = lisp_ms_compute_neg_prefix ( o00oo00oo , ii1I1 )
oo0oOOo0 = LISP_DDT_ACTION_MS_NOT_REG
oooOooOO = 1
if 9 - 9: iII111i * Oo0Ooo % iII111i % Oo0Ooo * II111iiii
else :
O0oooOO0O = lisp_ddt_cache_lookup ( o00oo00oo , ii1I1 , False )
if ( O0oooOO0O == None ) :
oo0oOOo0 = LISP_DDT_ACTION_NOT_AUTH
oooOooOO = 0
lprint ( "DDT delegation entry not found for EID {}" . format ( green ( oOoo0OooOOo00 , False ) ) )
if 71 - 71: II111iiii + I1ii11iIi11i * II111iiii
elif ( O0oooOO0O . is_auth_prefix ( ) ) :
if 59 - 59: OoO0O00
if 81 - 81: i11iIiiIii
if 57 - 57: Oo0Ooo * iIii1I11I1II1 - OoOoOO00 % iII111i % I1ii11iIi11i + Ii1I
if 82 - 82: IiII * Oo0Ooo - iIii1I11I1II1 - i11iIiiIii
oo0oOOo0 = LISP_DDT_ACTION_DELEGATION_HOLE
oooOooOO = 15
oO0 = O0oooOO0O . print_eid_tuple ( )
lprint ( ( "DDT delegation entry not found but auth-prefix {} " + "found for EID {}" ) . format ( oO0 ,
# OoO0O00 / OoO0O00 . I1ii11iIi11i
green ( oOoo0OooOOo00 , False ) ) )
if 100 - 100: iIii1I11I1II1 % II111iiii - I1ii11iIi11i . iIii1I11I1II1 + IiII % iIii1I11I1II1
if ( ii1I1 . is_null ( ) ) :
o00oo00oo = lisp_ddt_compute_neg_prefix ( o00oo00oo , O0oooOO0O ,
lisp_ddt_cache )
else :
ii1I1 = lisp_ddt_compute_neg_prefix ( ii1I1 , O0oooOO0O ,
lisp_ddt_cache )
o00oo00oo = lisp_ddt_compute_neg_prefix ( o00oo00oo , O0oooOO0O ,
O0oooOO0O . source_cache )
if 48 - 48: Ii1I % i1IIi
O0oooOO0O = None
else :
oO0 = O0oooOO0O . print_eid_tuple ( )
lprint ( "DDT delegation entry {} found for EID {}" . format ( oO0 , green ( oOoo0OooOOo00 , False ) ) )
if 38 - 38: OOooOOo / I1ii11iIi11i % oO0o / o0oOOo0O0Ooo
oooOooOO = 1440
if 54 - 54: OoOoOO00 * OoooooooOO - OoO0O00 * OoOoOO00 % I1ii11iIi11i * I11i
if 34 - 34: I11i - oO0o + I11i * OoooooooOO * I11i
if 73 - 73: OOooOOo * iII111i * OoO0O00
if 11 - 11: I1Ii111 * II111iiii
if 3 - 3: Oo0Ooo * OOooOOo
if 13 - 13: I1Ii111 + i11iIiiIii / OOooOOo
i1II1IiiIi = lisp_build_map_referral ( o00oo00oo , ii1I1 , O0oooOO0O , oo0oOOo0 , oooOooOO , iI1III )
iI1III = map_request . nonce >> 32
if ( map_request . nonce != 0 and iI1III != 0xdfdf0e1d ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , i1II1IiiIi , ecm_source , port )
return
if 98 - 98: I1IiiI * Oo0Ooo
if 9 - 9: O0 / i11iIiiIii . iIii1I11I1II1 . IiII
if 14 - 14: OoOoOO00 . OOooOOo - Oo0Ooo + I1Ii111 % ooOoO0o
if 95 - 95: OoO0O00 * II111iiii + i1IIi
if 22 - 22: Ii1I / ooOoO0o % I11i + OoO0O00 . ooOoO0o
if 61 - 61: O0 - iIii1I11I1II1 * Oo0Ooo . Ii1I + O0
if 20 - 20: ooOoO0o / ooOoO0o - Ii1I - ooOoO0o
if 93 - 93: O0 * OoOoOO00 * iIii1I11I1II1
if 3 - 3: I1ii11iIi11i - O0
if 46 - 46: iII111i
if 99 - 99: oO0o
if 85 - 85: I1Ii111 * iIii1I11I1II1 . OoOoOO00
if 20 - 20: I11i * O0 - OoooooooOO * OOooOOo % oO0o * iII111i
def lisp_find_negative_mask_len ( eid , entry_prefix , neg_prefix ) :
O0oooo = eid . hash_address ( entry_prefix )
o0oO0ooOOoOo = eid . addr_length ( ) * 8
iI1iiII1iii111 = 0
if 24 - 24: I1IiiI
if 63 - 63: I11i - iIii1I11I1II1 * Ii1I + OoooooooOO . i11iIiiIii
if 94 - 94: OoO0O00 . oO0o . OoOoOO00 * i11iIiiIii
if 96 - 96: i1IIi . OoO0O00 . OoO0O00 - o0oOOo0O0Ooo - Ii1I
for iI1iiII1iii111 in range ( o0oO0ooOOoOo ) :
I1IIiI = 1 << ( o0oO0ooOOoOo - iI1iiII1iii111 - 1 )
if ( O0oooo & I1IIiI ) : break
if 30 - 30: I1Ii111 + oO0o + iIii1I11I1II1 % OoO0O00 / I1IiiI
if 55 - 55: Ii1I
if ( iI1iiII1iii111 > neg_prefix . mask_len ) : neg_prefix . mask_len = iI1iiII1iii111
return
if 14 - 14: i1IIi * I1ii11iIi11i
if 77 - 77: ooOoO0o . II111iiii
if 41 - 41: IiII
if 27 - 27: IiII / IiII
if 91 - 91: Ii1I
if 93 - 93: OoO0O00 * OoO0O00 * I1ii11iIi11i * OoO0O00 * o0oOOo0O0Ooo
if 84 - 84: I1Ii111 * OoO0O00 - ooOoO0o - Oo0Ooo . OoO0O00 % oO0o
if 98 - 98: OoO0O00 . i1IIi
if 58 - 58: i1IIi * O0 + I1ii11iIi11i . IiII
if 11 - 11: OOooOOo + iIii1I11I1II1 - ooOoO0o * OoO0O00 * i11iIiiIii
def lisp_neg_prefix_walk ( entry , parms ) :
o00oo00oo , iIIiIII , O0oo00000o00 = parms
if 78 - 78: Oo0Ooo
if ( iIIiIII == None ) :
if ( entry . eid . instance_id != o00oo00oo . instance_id ) :
return ( [ True , parms ] )
if 90 - 90: OoooooooOO * i11iIiiIii / OoOoOO00 % I1ii11iIi11i - iIii1I11I1II1 % i1IIi
if ( entry . eid . afi != o00oo00oo . afi ) : return ( [ True , parms ] )
else :
if ( entry . eid . is_more_specific ( iIIiIII ) == False ) :
return ( [ True , parms ] )
if 71 - 71: Oo0Ooo % i11iIiiIii
if 54 - 54: IiII . iII111i * OOooOOo / ooOoO0o . i11iIiiIii
if 91 - 91: ooOoO0o % iII111i
if 41 - 41: o0oOOo0O0Ooo . I1Ii111 + IiII / oO0o
if 86 - 86: iII111i % OoOoOO00 . i11iIiiIii . I1Ii111 + II111iiii . i1IIi
if 88 - 88: O0
lisp_find_negative_mask_len ( o00oo00oo , entry . eid , O0oo00000o00 )
return ( [ True , parms ] )
if 28 - 28: OOooOOo % IiII * Oo0Ooo / OoO0O00
if 67 - 67: Oo0Ooo * I11i - IiII + I1Ii111
if 90 - 90: iII111i % II111iiii % o0oOOo0O0Ooo + o0oOOo0O0Ooo + II111iiii
if 54 - 54: OoooooooOO . IiII - oO0o
if 26 - 26: o0oOOo0O0Ooo - i1IIi / I1ii11iIi11i / OoooooooOO . i1IIi
if 22 - 22: o0oOOo0O0Ooo * I1Ii111 * I1ii11iIi11i . OoOoOO00 . i1IIi % ooOoO0o
if 67 - 67: I11i
if 95 - 95: OoO0O00 % I1Ii111
def lisp_ddt_compute_neg_prefix ( eid , ddt_entry | |
['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_folder" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `get_folder`") # noqa: E501
if ('id' in local_var_params and
len(local_var_params['id']) > 40):
raise ApiValueError("Invalid value for parameter `id` when calling `get_folder`, length must be less than or equal to `40`") # noqa: E501
if ('id' in local_var_params and
len(local_var_params['id']) < 30):
raise ApiValueError("Invalid value for parameter `id` when calling `get_folder`, length must be greater than or equal to `30`") # noqa: E501
if 'id' in local_var_params and not re.search(r'^[a-zA-Z0-9\-]+$', local_var_params['id']): # noqa: E501
raise ApiValueError("Invalid value for parameter `id` when calling `get_folder`, must conform to the pattern `/^[a-zA-Z0-9\-]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.1.185'
return self.api_client.call_api(
'/api/folders/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StorageObject', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_folder_contents(self, id, **kwargs): # noqa: E501
"""[BETA] List contents of a folder # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_folder_contents(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: Unique ID of the folder (required)
:param str page: The pagination token to use to continue listing contents from a previous call to list contents. This value is returned from the previous call. If a pagination token is provided the sortBy and filter fields must not have changed since the original request. Also, if set, a start value cannot be provided.
:param list[str] sort_by: Order the results by these fields. Use use the '-' sign to denote descending order.
:param int start: When paginating, skip this number of results.
:param int limit: When paginating, limit the number of returned results to this many.
:param str filter: Expression to filter the result set.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PagedResourceListOfStorageObject
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_folder_contents_with_http_info(id, **kwargs) # noqa: E501
def get_folder_contents_with_http_info(self, id, **kwargs): # noqa: E501
"""[BETA] List contents of a folder # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_folder_contents_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: Unique ID of the folder (required)
:param str page: The pagination token to use to continue listing contents from a previous call to list contents. This value is returned from the previous call. If a pagination token is provided the sortBy and filter fields must not have changed since the original request. Also, if set, a start value cannot be provided.
:param list[str] sort_by: Order the results by these fields. Use use the '-' sign to denote descending order.
:param int start: When paginating, skip this number of results.
:param int limit: When paginating, limit the number of returned results to this many.
:param str filter: Expression to filter the result set.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PagedResourceListOfStorageObject, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'page', 'sort_by', 'start', 'limit', 'filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_folder_contents" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `get_folder_contents`") # noqa: E501
if ('id' in local_var_params and
len(local_var_params['id']) > 40):
raise ApiValueError("Invalid value for parameter `id` when calling `get_folder_contents`, length must be less than or equal to `40`") # noqa: E501
if ('id' in local_var_params and
len(local_var_params['id']) < 30):
raise ApiValueError("Invalid value for parameter `id` when calling `get_folder_contents`, length must be greater than or equal to `30`") # noqa: E501
if 'id' in local_var_params and not re.search(r'^[a-zA-Z0-9\-]+$', local_var_params['id']): # noqa: E501
raise ApiValueError("Invalid value for parameter `id` when calling `get_folder_contents`, must conform to the pattern `/^[a-zA-Z0-9\-]+$/`") # noqa: E501
if ('page' in local_var_params and
len(local_var_params['page']) > 200):
raise ApiValueError("Invalid value for parameter `page` when calling `get_folder_contents`, length must be less than or equal to `200`") # noqa: E501
if ('page' in local_var_params and
len(local_var_params['page']) < 1):
raise ApiValueError("Invalid value for parameter `page` when calling `get_folder_contents`, length must be greater than or equal to `1`") # noqa: E501
if 'page' in local_var_params and not re.search(r'^[a-zA-Z0-9\+\/]*={0,3}$', local_var_params['page']): # noqa: E501
raise ApiValueError("Invalid value for parameter `page` when calling `get_folder_contents`, must conform to the pattern `/^[a-zA-Z0-9\+\/]*={0,3}$/`") # noqa: E501
if ('filter' in local_var_params and
len(local_var_params['filter']) > 2147483647):
raise ApiValueError("Invalid value for parameter `filter` when calling `get_folder_contents`, length must be less than or equal to `2147483647`") # noqa: E501
if ('filter' in local_var_params and
len(local_var_params['filter']) < 0):
raise ApiValueError("Invalid value for parameter `filter` when calling `get_folder_contents`, length must be greater than or equal to `0`") # noqa: E501
if 'filter' in local_var_params and not re.search(r'(?s).*', local_var_params['filter']): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `get_folder_contents`, must conform to the pattern `/(?s).*/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'sort_by' in local_var_params:
query_params.append(('sortBy', local_var_params['sort_by'])) # noqa: E501
collection_formats['sortBy'] = 'multi' # noqa: E501
if 'start' in local_var_params:
query_params.append(('start', local_var_params['start'])) # noqa: E501
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'filter' in local_var_params:
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.1.185'
return self.api_client.call_api(
'/api/folders/{id}/contents', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PagedResourceListOfStorageObject', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_root_folder(self, **kwargs): # noqa: E501
"""[BETA] List contents of root folder # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_root_folder(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str page: The pagination token to use to continue listing contents from a previous call to list contents. This value is returned from the previous call. If a pagination token is provided the sortBy and filter fields must not have changed since the original request. Also, if set, | |
3932, 3721, 4910, 5801, 6580, 5195, 3551, 5907, 3221, 3471, 3029, 6019, 3999,
5908, 5909, 5266, 5267, 3444, 3023, 3828, 3170, 4796, 5646, 4979, 4259, 6356, 5647, 5337, 3694,
6357, 5648, 5338, 4520, 4322, 5802, 3031, 3759, 4071, 6020, 5586, 4836, 4386, 5048, 6581, 3571,
4679, 4174, 4949, 6154, 4813, 3787, 3402, 3822, 3958, 3215, 3552, 5268, 4387, 3933, 4950, 4359,
6021, 5910, 5075, 3579, 6358, 4234, 4566, 5521, 6359, 3613, 5049, 6022, 5911, 3375, 3702, 3178,
4911, 5339, 4521, 6582, 6583, 4395, 3087, 3811, 5377, 6023, 6360, 6155, 4027, 5171, 5649, 4421,
4249, 2804, 6584, 2270, 6585, 4000, 4235, 3045, 6156, 5137, 5729, 4140, 4312, 3886, 6361, 4330,
6157, 4215, 6158, 3500, 3676, 4929, 4331, 3713, 4930, 5912, 4265, 3776, 3368, 5587, 4470, 4855,
3038, 4980, 3631, 6159, 6160, 4132, 4680, 6161, 6362, 3923, 4379, 5588, 4255, 6586, 4121, 6587,
6363, 4649, 6364, 3288, 4773, 4774, 6162, 6024, 6365, 3543, 6588, 4274, 3107, 3737, 5050, 5803,
4797, 4522, 5589, 5051, 5730, 3714, 4887, 5378, 4001, 4523, 6163, 5026, 5522, 4701, 4175, 2791,
3760, 6589, 5473, 4224, 4133, 3847, 4814, 4815, 4775, 3259, 5416, 6590, 2738, 6164, 6025, 5304,
3733, 5076, 5650, 4816, 5590, 6591, 6165, 6592, 3934, 5269, 6593, 3396, 5340, 6594, 5804, 3445,
3602, 4042, 4488, 5731, 5732, 3525, 5591, 4601, 5196, 6166, 6026, 5172, 3642, 4612, 3202, 4506,
4798, 6366, 3818, 5108, 4303, 5138, 5139, 4776, 3332, 4304, 2915, 3415, 4434, 5077, 5109, 4856,
2879, 5305, 4817, 6595, 5913, 3104, 3144, 3903, 4634, 5341, 3133, 5110, 5651, 5805, 6167, 4057,
5592, 2945, 4371, 5593, 6596, 3474, 4182, 6367, 6597, 6168, 4507, 4279, 6598, 2822, 6599, 4777,
4713, 5594, 3829, 6169, 3887, 5417, 6170, 3653, 5474, 6368, 4216, 2971, 5228, 3790, 4579, 6369,
5733, 6600, 6601, 4951, 4746, 4555, 6602, 5418, 5475, 6027, 3400, 4665, 5806, 6171, 4799, 6028,
5052, 6172, 3343, 4800, 4747, 5006, 6370, 4556, 4217, 5476, 4396, 5229, 5379, 5477, 3839, 5914,
5652, 5807, 4714, 3068, 4635, 5808, 6173, 5342, 4192, 5078, 5419, 5523, 5734, 6174, 4557, 6175,
4602, 6371, 6176, 6603, 5809, 6372, 5735, 4260, 3869, 5111, 5230, 6029, 5112, 6177, 3126, 4681,
5524, 5915, 2706, 3563, 4748, 3130, 6178, 4018, 5525, 6604, 6605, 5478, 4012, 4837, 6606, 4534,
4193, 5810, 4857, 3615, 5479, 6030, 4082, 3697, 3539, 4086, 5270, 3662, 4508, 4931, 5916, 4912,
5811, 5027, 3888, 6607, 4397, 3527, 3302, 3798, 2775, 2921, 2637, 3966, 4122, 4388, 4028, 4054,
1633, 4858, 5079, 3024, 5007, 3982, 3412, 5736, 6608, 3426, 3236, 5595, 3030, 6179, 3427, 3336,
3279, 3110, 6373, 3874, 3039, 5080, 5917, 5140, 4489, 3119, 6374, 5812, 3405, 4494, 6031, 4666,
4141, 6180, 4166, 6032, 5813, 4981, 6609, 5081, 4422, 4982, 4112, 3915, 5653, 3296, 3983, 6375,
4266, 4410, 5654, 6610, 6181, 3436, 5082, 6611, 5380, 6033, 3819, 5596, 4535, 5231, 5306, 5113,
6612, 4952, 5918, 4275, 3113, 6613, 6376, 6182, 6183, 5814, 3073, 4731, 4838, 5008, 3831, 6614,
4888, 3090, 3848, 4280, 5526, 5232, 3014, 5655, 5009, 5737, 5420, 5527, 6615, 5815, 5343, 5173,
5381, 4818, 6616, 3151, 4953, 6617, 5738, 2796, 3204, 4360, 2989, 4281, 5739, 5174, 5421, 5197,
3132, 5141, 3849, 5142, 5528, 5083, 3799, 3904, 4839, 5480, 2880, 4495, 3448, 6377, 6184, 5271,
5919, 3771, 3193, 6034, 6035, 5920, 5010, 6036, 5597, 6037, 6378, 6038, 3106, 5422, 6618, 5423,
5424, 4142, 6619, 4889, 5084, 4890, 4313, 5740, 6620, 3437, 5175, 5307, 5816, 4199, 5198, 5529,
5817, 5199, 5656, 4913, 5028, 5344, 3850, 6185, 2955, 5272, 5011, 5818, 4567, 4580, 5029, 5921,
3616, 5233, 6621, 6622, 6186, 4176, 6039, 6379, 6380, 3352, 5200, 5273, 2908, 5598, 5234, 3837,
5308, 6623, 6624, 5819, 4496, 4323, 5309, 5201, 6625, 6626, 4983, 3194, 3838, 4167, 5530, 5922,
5274, 6381, 6382, 3860, 3861, 5599, 3333, 4292, 4509, 6383, 3553, 5481, 5820, 5531, 4778, 6187,
3955, 3956, 4324, 4389, 4218, 3945, 4325, 3397, 2681, 5923, 4779, 5085, 4019, 5482, 4891, 5382,
5383, 6040, 4682, 3425, 5275, 4094, 6627, 5310, 3015, 5483, 5657, 4398, 5924, 3168, 4819, 6628,
5925, 6629, 5532, 4932, 4613, 6041, 6630, 4636, 6384, 4780, 4204, 5658, 4423, 5821, 3989, 4683,
5822, 6385, 4954, 6631, 5345, 6188, 5425, 5012, 5384, 3894, 6386, 4490, 4104, 6632, 5741, 5053,
6633, 5823, 5926, 5659, 5660, 5927, 6634, 5235, 5742, 5824, 4840, 4933, 4820, 6387, 4859, 5928,
4955, 6388, 4143, 3584, 5825, 5346, 5013, 6635, 5661, 6389, 5014, 5484, 5743, 4337, 5176, 5662,
6390, 2836, 6391, 3268, 6392, 6636, 6042, 5236, 6637, 4158, 6638, 5744, 5663, 4471, 5347, 3663,
4123, 5143, 4293, 3895, 6639, 6640, 5311, 5929, 5826, 3800, 6189, 6393, 6190, 5664, 5348, 3554,
3594, 4749, 4603, 6641, 5385, 4801, 6043, 5827, 4183, 6642, 5312, 5426, 4761, 6394, 5665, 6191,
4715, 2669, 6643, 6644, 5533, 3185, 5427, 5086, 5930, 5931, 5386, 6192, 6044, 6645, 4781, 4013,
5745, 4282, 4435, 5534, 4390, 4267, 6045, 5746, 4984, 6046, 2743, 6193, 3501, 4087, 5485, 5932,
5428, 4184, 4095, 5747, 4061, 5054, 3058, 3862, 5933, 5600, 6646, 5144, 3618, 6395, 3131, 5055,
5313, 6396, 4650, 4956, 3855, 6194, 3896, 5202, 4985, 4029, 4225, 6195, 6647, 5828, 5486, 5829,
3589, 3002, 6648, 6397, 4782, 5276, 6649, 6196, 6650, 4105, 3803, 4043, 5237, 5830, 6398, 4096,
3643, 6399, 3528, 6651, 4453, 3315, 4637, 6652, 3984, 6197, 5535, 3182, 3339, 6653, 3096, 2660,
6400, 6654, 3449, 5934, 4250, 4236, 6047, 6401, 5831, 6655, 5487, 3753, 4062, 5832, 6198, 6199,
6656, 3766, 6657, 3403, 4667, 6048, 6658, 4338, 2897, 5833, 3880, 2797, 3780, 4326, 6659, 5748,
5015, 6660, 5387, 4351, 5601, 4411, 6661, 3654, 4424, 5935, 4339, 4072, 5277, 4568, 5536, 6402,
6662, 5238, 6663, 5349, 5203, 6200, 5204, 6201, 5145, 4536, 5016, 5056, 4762, 5834, 4399, 4957,
6202, 6403, 5666, 5749, 6664, 4340, 6665, 5936, 5177, 5667, 6666, 6667, 3459, 4668, 6404, 6668,
6669, 4543, 6203, 6670, 4276, 6405, 4480, 5537, 6671, 4614, 5205, 5668, 6672, 3348, 2193, 4763,
6406, 6204, 5937, 5602, 4177, 5669, 3419, 6673, 4020, 6205, 4443, 4569, 5388, 3715, 3639, 6407,
6049, 4058, 6206, 6674, 5938, 4544, 6050, 4185, 4294, 4841, 4651, 4615, 5488, 6207, 6408, 6051,
5178, 3241, 3509, 5835, 6208, 4958, 5836, 4341, 5489, 5278, 6209, 2823, 5538, 5350, 5206, 5429,
6675, 4638, 4875, 4073, 3516, 4684, 4914, 4860, 5939, 5603, 5389, 6052, 5057, 3237, 5490, 3791,
6676, 6409, 6677, 4821, 4915, 4106, 5351, 5058, 4243, 5539, 4244, 5604, 4842, 4916, 5239, 3028,
3716, 5837, 5114, 5605, 5390, 5940, 5430, 6210, 4332, 6678, 5540, 4732, 3667, 3840, 6053, 4305,
3408, 5670, 5541, 6410, 2744, 5240, 5750, 6679, 3234, 5606, 6680, 5607, 5671, 3608, 4283, 4159,
4400, 5352, 4783, 6681, 6411, 6682, 4491, 4802, 6211, 6412, 5941, 6413, 6414, 5542, 5751, 6683,
4669, 3734, 5942, 6684, 6415, 5943, 5059, 3328, 4670, 4144, 4268, 6685, 6686, 6687, 6688, 4372,
3603, 6689, 5944, 5491, 4373, 3440, 6416, 5543, 4784, 4822, 5608, 3792, 4616, 5838, 5672, 3514,
5391, 6417, 4892, 6690, 4639, 6691, 6054, 5673, 5839, 6055, 6692, 6056, 5392, 6212, 4038, 5544,
5674, 4497, 6057, 6693, 5840, 4284, 5675, 4021, 4545, 5609, 6418, 4454, 6419, 6213, 4113, 4472,
5314, 3738, 5087, 5279, 4074, 5610, 4959, 4063, 3179, 4750, 6058, 6420, 6214, 3476, 4498, 4716,
5431, 4960, 4685, 6215, 5241, 6694, 6421, 6216, 6695, 5841, 5945, 6422, 3748, 5946, 5179, 3905,
5752, 5545, 5947, 4374, 6217, 4455, 6423, 4412, 6218, 4803, 5353, 6696, 3832, 5280, 6219, 4327,
4702, 6220, 6221, 6059, 4652, 5432, 6424, 3749, 4751, 6425, 5753, 4986, 5393, 4917, 5948, 5030,
5754, 4861, 4733, 6426, 4703, 6697, 6222, 4671, 5949, 4546, 4961, 5180, 6223, 5031, 3316, 5281,
6698, 4862, 4295, 4934, 5207, 3644, 6427, 5842, 5950, 6428, 6429, 4570, 5843, 5282, 6430, 6224,
5088, 3239, 6060, 6699, 5844, 5755, 6061, 6431, 2701, 5546, 6432, 5115, 5676, 4039, 3993, 3327,
4752, 4425, 5315, 6433, 3941, 6434, 5677, 4617, 4604, 3074, 4581, 6225, 5433, 6435, 6226, 6062,
4823, 5756, 5116, | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['MigratingVmArgs', 'MigratingVm']
@pulumi.input_type
class MigratingVmArgs:
def __init__(__self__, *,
migrating_vm_id: pulumi.Input[str],
source_id: pulumi.Input[str],
compute_engine_target_defaults: Optional[pulumi.Input['ComputeEngineTargetDefaultsArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
policy: Optional[pulumi.Input['SchedulePolicyArgs']] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
source_vm_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a MigratingVm resource.
:param pulumi.Input['ComputeEngineTargetDefaultsArgs'] compute_engine_target_defaults: Details of the target VM in Compute Engine.
:param pulumi.Input[str] description: The description attached to the migrating VM by the user.
:param pulumi.Input[str] display_name: The display name attached to the MigratingVm by the user.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: The labels of the migrating VM.
:param pulumi.Input['SchedulePolicyArgs'] policy: The replication schedule policy.
:param pulumi.Input[str] source_vm_id: The unique ID of the VM in the source. The VM's name in vSphere can be changed, so this is not the VM's name but rather its moRef id. This id is of the form vm-.
"""
pulumi.set(__self__, "migrating_vm_id", migrating_vm_id)
pulumi.set(__self__, "source_id", source_id)
if compute_engine_target_defaults is not None:
pulumi.set(__self__, "compute_engine_target_defaults", compute_engine_target_defaults)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if location is not None:
pulumi.set(__self__, "location", location)
if policy is not None:
pulumi.set(__self__, "policy", policy)
if project is not None:
pulumi.set(__self__, "project", project)
if request_id is not None:
pulumi.set(__self__, "request_id", request_id)
if source_vm_id is not None:
pulumi.set(__self__, "source_vm_id", source_vm_id)
@property
@pulumi.getter(name="migratingVmId")
def migrating_vm_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "migrating_vm_id")
@migrating_vm_id.setter
def migrating_vm_id(self, value: pulumi.Input[str]):
pulumi.set(self, "migrating_vm_id", value)
@property
@pulumi.getter(name="sourceId")
def source_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "source_id")
@source_id.setter
def source_id(self, value: pulumi.Input[str]):
pulumi.set(self, "source_id", value)
@property
@pulumi.getter(name="computeEngineTargetDefaults")
def compute_engine_target_defaults(self) -> Optional[pulumi.Input['ComputeEngineTargetDefaultsArgs']]:
"""
Details of the target VM in Compute Engine.
"""
return pulumi.get(self, "compute_engine_target_defaults")
@compute_engine_target_defaults.setter
def compute_engine_target_defaults(self, value: Optional[pulumi.Input['ComputeEngineTargetDefaultsArgs']]):
pulumi.set(self, "compute_engine_target_defaults", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description attached to the migrating VM by the user.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name attached to the MigratingVm by the user.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The labels of the migrating VM.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def policy(self) -> Optional[pulumi.Input['SchedulePolicyArgs']]:
"""
The replication schedule policy.
"""
return pulumi.get(self, "policy")
@policy.setter
def policy(self, value: Optional[pulumi.Input['SchedulePolicyArgs']]):
pulumi.set(self, "policy", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="requestId")
def request_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "request_id")
@request_id.setter
def request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_id", value)
@property
@pulumi.getter(name="sourceVmId")
def source_vm_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique ID of the VM in the source. The VM's name in vSphere can be changed, so this is not the VM's name but rather its moRef id. This id is of the form vm-.
"""
return pulumi.get(self, "source_vm_id")
@source_vm_id.setter
def source_vm_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_vm_id", value)
class MigratingVm(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_engine_target_defaults: Optional[pulumi.Input[pulumi.InputType['ComputeEngineTargetDefaultsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
migrating_vm_id: Optional[pulumi.Input[str]] = None,
policy: Optional[pulumi.Input[pulumi.InputType['SchedulePolicyArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
source_id: Optional[pulumi.Input[str]] = None,
source_vm_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a new MigratingVm in a given Source.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ComputeEngineTargetDefaultsArgs']] compute_engine_target_defaults: Details of the target VM in Compute Engine.
:param pulumi.Input[str] description: The description attached to the migrating VM by the user.
:param pulumi.Input[str] display_name: The display name attached to the MigratingVm by the user.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: The labels of the migrating VM.
:param pulumi.Input[pulumi.InputType['SchedulePolicyArgs']] policy: The replication schedule policy.
:param pulumi.Input[str] source_vm_id: The unique ID of the VM in the source. The VM's name in vSphere can be changed, so this is not the VM's name but rather its moRef id. This id is of the form vm-.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: MigratingVmArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a new MigratingVm in a given Source.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param MigratingVmArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(MigratingVmArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_engine_target_defaults: Optional[pulumi.Input[pulumi.InputType['ComputeEngineTargetDefaultsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
migrating_vm_id: Optional[pulumi.Input[str]] = None,
policy: Optional[pulumi.Input[pulumi.InputType['SchedulePolicyArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
source_id: Optional[pulumi.Input[str]] = None,
source_vm_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = MigratingVmArgs.__new__(MigratingVmArgs)
__props__.__dict__["compute_engine_target_defaults"] = compute_engine_target_defaults
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["labels"] = labels
__props__.__dict__["location"] = location
if migrating_vm_id is None and not opts.urn:
raise TypeError("Missing required property 'migrating_vm_id'")
__props__.__dict__["migrating_vm_id"] = migrating_vm_id
__props__.__dict__["policy"] = policy
__props__.__dict__["project"] = project
__props__.__dict__["request_id"] = request_id
if source_id is None and not opts.urn:
raise TypeError("Missing required property 'source_id'")
__props__.__dict__["source_id"] = source_id
__props__.__dict__["source_vm_id"] = source_vm_id
__props__.__dict__["create_time"] = None
__props__.__dict__["current_sync_info"] = None
__props__.__dict__["error"] = None
__props__.__dict__["group"] = None
__props__.__dict__["last_sync"] = None
__props__.__dict__["name"] = None
__props__.__dict__["recent_clone_jobs"] = None
__props__.__dict__["recent_cutover_jobs"] = None
__props__.__dict__["state"] = None
__props__.__dict__["state_time"] = None
__props__.__dict__["update_time"] = None
super(MigratingVm, __self__).__init__(
'google-native:vmmigration/v1alpha1:MigratingVm',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'MigratingVm':
"""
Get an existing MigratingVm resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = MigratingVmArgs.__new__(MigratingVmArgs)
__props__.__dict__["compute_engine_target_defaults"] = None
__props__.__dict__["create_time"] = None
__props__.__dict__["current_sync_info"] = None
__props__.__dict__["description"] = None
__props__.__dict__["display_name"] = None
__props__.__dict__["error"] = None
__props__.__dict__["group"] = None
__props__.__dict__["labels"] = None
__props__.__dict__["last_sync"] = None
__props__.__dict__["name"] = None
__props__.__dict__["policy"] = None
__props__.__dict__["recent_clone_jobs"] = None
__props__.__dict__["recent_cutover_jobs"] = None
__props__.__dict__["source_vm_id"] = None
__props__.__dict__["state"] = None
__props__.__dict__["state_time"] = None
__props__.__dict__["update_time"] = None
return MigratingVm(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="computeEngineTargetDefaults")
def compute_engine_target_defaults(self) -> pulumi.Output['outputs.ComputeEngineTargetDefaultsResponse']:
"""
Details of the target VM in Compute Engine.
"""
return pulumi.get(self, "compute_engine_target_defaults")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> pulumi.Output[str]:
"""
The time the migrating VM was created (this refers to this resource and not to the time it was installed in the source).
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="currentSyncInfo")
def current_sync_info(self) -> pulumi.Output['outputs.ReplicationCycleResponse']:
"""
The percentage progress of the current running replication cycle.
"""
return pulumi.get(self, "current_sync_info")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
The description attached to the migrating VM by the user.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
The display name attached to the MigratingVm by the user.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def error(self) -> pulumi.Output['outputs.StatusResponse']:
"""
Provides details on | |
<filename>regraph/category_op.py
"""Category operations used by graph rewriting tool."""
import networkx as nx
import copy
from regraph.primitives import (add_node,
add_edge,
set_edge,
add_node_attrs,
get_edge,
add_edge_attrs,
clone_node,
merge_nodes,
update_node_attrs,
remove_node,
remove_edge,
remove_node_attrs,
remove_edge_attrs,
unique_node_id,
subtract,
print_graph)
from regraph.utils import (keys_by_value,
merge_attributes,
restrict_mapping,
dict_sub,
id_of,
valid_attributes,
attrs_intersection)
from regraph.exceptions import (InvalidHomomorphism, ReGraphError)
def subgraph(graph, nodes):
"""Get a subgraph induced by a set nodes.
:param graph:
:param nodes:
:return:
"""
subgraph = copy.deepcopy(graph)
for node in graph.nodes():
if node not in nodes:
remove_node(subgraph, node)
return subgraph
def compose(d1, d2):
"""Compose two homomorphisms given by dicts."""
res = dict()
for key, value in d1.items():
if value in d2.keys():
res[key] = d2[value]
return res
def is_total_homomorphism(elements, mapping):
"""Return True if mapping is total."""
return set(elements) == set(mapping.keys())
def check_totality(elements, dictionary):
"""Check that a mapping is total."""
if set(elements) != set(dictionary.keys()):
raise InvalidHomomorphism(
"Invalid homomorphism: Mapping is not "
"covering all the nodes of source graph! "
"domain: {}, domain of definition: {}"
.format(set(elements), set(dictionary.keys())))
def check_homomorphism(source, target, dictionary, total=True):
"""Check if the homomorphism is valid.
Valid homomorphism preserves edges,
and attributes if requires.
"""
# check if there is mapping for all the nodes of source graph
if total:
check_totality(source.nodes(), dictionary)
if not set(dictionary.values()).issubset(target.nodes()):
raise InvalidHomomorphism(
"Some of the image nodes in mapping %s do not "
"exist in target graph (target graph nodes %s) "
"namely %s" %
(dictionary.values(),
target.nodes(),
set(dictionary.values()) - set(target.nodes()))
)
# check connectivity
for s, t in source.edges():
try:
if (s in dictionary.keys() and
t in dictionary.keys() and
not (dictionary[s], dictionary[t])
in target.edges()):
if not target.is_directed():
if not (dictionary[t], dictionary[s]) in target.edges():
raise InvalidHomomorphism(
"Connectivity is not preserved!"
" Was expecting an edge '%s' and '%s'" %
(dictionary[t], dictionary[s]))
else:
raise InvalidHomomorphism(
"Connectivity is not preserved!"
" Was expecting an edge between '%s' and '%s'" %
(dictionary[s], dictionary[t]))
except KeyError:
pass
for s, t in dictionary.items():
# check sets of attributes of nodes (here homomorphism = set
# inclusion)
if not valid_attributes(source.node[s], target.node[t]):
raise InvalidHomomorphism(
"Attributes of nodes source:'%s' %s and "
"target:'%s' %s do not match!" %
(s, source.node[s], t, target.node[t])
)
# check sets of attributes of edges (homomorphism = set inclusion)
for s1, s2 in source.edges():
try:
if (s1 in dictionary.keys() and s2 in dictionary.keys() and
not valid_attributes(
source.edge[s1][s2],
target.edge[dictionary[s1]][dictionary[s2]])):
raise InvalidHomomorphism(
"Attributes of edges (%s)-(%s) (%s) and "
"(%s)-(%s) (%s) do not match!" %
(s1, s2, source.edge[s1][s2], dictionary[s1],
dictionary[s2],
target.edge[dictionary[s1]][dictionary[s2]]))
except KeyError:
pass
return True
def compose_chain(chain):
"""Compose a chain of homomorphisms."""
homomorphism = chain[0]
for i in range(1, len(chain)):
homomorphism = compose(
homomorphism,
chain[i]
)
return homomorphism
def get_unique_map_to_pullback(p, p_a, p_b, z_a, z_b):
"""Find a unique map to pullback."""
z_p = dict()
for value in p:
z_keys_from_a = set()
if value in p_a.keys():
a_value = p_a[value]
z_keys_from_a = set(keys_by_value(z_a, a_value))
z_keys_from_b = set()
if value in p_b.keys():
b_value = p_b[value]
z_keys_from_b.update(keys_by_value(z_b, b_value))
z_keys = z_keys_from_a.intersection(z_keys_from_b)
for z_key in z_keys:
z_p[z_key] = value
return z_p
def get_unique_map_from_pushout(p, a_p, b_p, a_z, b_z):
"""Find a unique map to pushout."""
p_z = dict()
for value in p:
z_values = set()
a_values = set(keys_by_value(a_p, value))
for a_value in a_values:
if a_value in a_z.keys():
z_values.add(a_z[a_value])
b_values = set(keys_by_value(b_p, value))
for b_value in b_values:
if b_value in b_z.keys():
z_values.add(b_z[b_value])
if len(z_values) > 0:
if len(z_values) > 1:
raise ReGraphError("Cannot construct a unique map!")
p_z[value] = z_values.pop()
return p_z
def get_unique_map(a, b, c, d, a_b, b_d, c_d):
"""Get a map a->c that makes a square commute."""
a_c = dict()
for node in b.nodes():
a_keys = keys_by_value(a_b, node)
if len(a_keys) > 0:
# node stayed in the rule
if node in b_d.keys():
d_node = b_d[node]
c_keys = keys_by_value(
c_d,
d_node
)
if len(a_keys) != len(c_keys):
raise ReGraphError("Map is not unique!")
else:
for i, a_key in enumerate(a_keys):
a_c[a_key] = c_keys[i]
return a_c
def identity(a, b):
"""Return identity homomorphism from a to b."""
dic = {}
for n in a.nodes():
if n in b.nodes():
dic[n] = n
else:
raise ReGraphError(
"Cannot construct morphism by names: "
"node '%s' not found in the second graph!" % n
)
return dic
def is_monic(f):
"""Check if the homomorphism is monic."""
return len(set(f.keys())) ==\
len(set(f.values()))
def nary_pullback(b, cds, total=True):
"""Find a pullback with multiple conspans."""
# 1. find individual pullbacks
pullbacks = []
for c_name, (c, d, b_d, c_d) in cds.items():
if total:
pb = pullback(b, c, d, b_d, c_d)
else:
pb = partial_pullback(b, c, d, b_d, c_d)
pullbacks.append((
c_name, pb
))
# 2. find pullbacks of pullbacks
if len(pullbacks) > 1:
c_name1, (a1, a_b1, a_c1) = pullbacks[0]
a_c = dict([(c_name1, a_c1)])
for i in range(1, len(pullbacks)):
c_name2, (a2, a_b2, a_c2) = pullbacks[i]
if total:
a1, a1_old_a1, a1_a2 = pullback(
a1, a2, b, a_b1, a_b2)
else:
a1, a1_old_a1, a1_a2 = partial_pullback(
a1, a2, b, a_b1, a_b2)
a_b1 = compose(a1_old_a1, a_b1)
# update a_c
for c_name, old_a_c in a_c.items():
a_c[c_name] = compose(a1_old_a1, old_a_c)
a_c[c_name2] = compose(a1_a2, a_c2)
# at the end of pullback iterations assign right a and a_b
a_b = a_b1
a = a1
check_homomorphism(a, b, a_b, total=False)
for c_name, a_c_guy in a_c.items():
check_homomorphism(a, cds[c_name][0], a_c_guy, total=False)
return (a, a_b, a_c)
def partial_pullback(b, c, d, b_d, c_d):
"""Find partail pullback."""
check_homomorphism(b, d, b_d, total=False)
check_homomorphism(c, d, c_d, total=False)
bd_dom = subgraph(b, b_d.keys())
cd_dom = subgraph(c, c_d.keys())
bd_b = {n: n for n in bd_dom.nodes()}
cd_c = {n: n for n in cd_dom.nodes()}
(tmp, tmp_bddom, tmp_cddom) = pullback(bd_dom, cd_dom, d, b_d, c_d)
(b2, tmp_b2, b2_b) = pullback_complement(
tmp, bd_dom, b, tmp_bddom, bd_b)
(c2, tmp_c2, c2_c) = pullback_complement(
tmp, cd_dom, c, tmp_cddom, cd_c)
(new, b2_new, c2_new) = pushout(tmp, b2, c2, tmp_b2, tmp_c2)
hom1 = {v: b2_b[k] for (k, v) in b2_new.items()}
hom2 = {v: c2_c[k] for (k, v) in c2_new.items()}
return(new, hom1, hom2)
def pullback(b, c, d, b_d, c_d, inplace=False):
"""Find the pullback from b -> d <- c.
Given h1 : B -> D; h2 : C -> D returns A, rh1, rh2
with rh1 : A -> B; rh2 : A -> C and A the pullback.
"""
if inplace is True:
a = b
else:
a = type(b)()
# Check homomorphisms
check_homomorphism(b, d, b_d)
check_homomorphism(c, d, c_d)
hom1 = {}
hom2 = {}
f = b_d
g = c_d
for n1 in b.nodes():
for n2 in c.nodes():
if f[n1] == g[n2]:
new_attrs = merge_attributes(b.node[n1],
c.node[n2],
'intersection')
if n1 not in a.nodes():
add_node(a, n1, new_attrs)
hom1[n1] = n1
hom2[n1] = n2
else:
i = 1
new_name = str(n1) + str(i)
while new_name in a.nodes():
i += 1
new_name = str(n1) + str(i)
# if n2 not in a.nodes():
add_node(a, new_name, new_attrs)
hom1[new_name] = n1
hom2[new_name] = n2
for n1 in a.nodes():
for n2 in a.nodes():
if (hom1[n1], hom1[n2]) in b.edges() or \
((not a.is_directed()) and (hom1[n2], hom1[n1]) in b.edges()):
if (hom2[n1], hom2[n2]) in c.edges() or \
((not a.is_directed) and (hom2[n2], hom2[n1]) in c.edges()):
add_edge(a, n1, n2)
set_edge(
a,
n1,
n2,
merge_attributes(
get_edge(b, hom1[n1], hom1[n2]),
get_edge(c, hom2[n1], hom2[n2]),
'intersection'))
check_homomorphism(a, b, hom1)
check_homomorphism(a, c, hom2)
return (a, hom1, hom2)
def partial_pushout(a, b, c, a_b, a_c):
"""Find the partial pushout."""
check_homomorphism(a, b, a_b, total=False)
check_homomorphism(a, c, a_c, total=False)
if a.is_directed():
ab_dom = nx.DiGraph(a.subgraph(a_b.keys()))
ac_dom = nx.DiGraph(a.subgraph(a_c.keys()))
else:
ab_dom = nx.Graph(a.subgraph(a_b.keys()))
ac_dom = nx.Graph(a.subgraph(a_c.keys()))
ac_a = {n: n for n in ac_dom.nodes()}
ab_a = {n: n for n in ab_dom.nodes()}
(c2, a_c2, c_c2) = pushout(ac_dom, a, c, ac_a, a_c)
(b2, a_b2, b_b2) = pushout(ab_dom, a, b, ab_a, a_b)
(d, b2_d, c2_d) = pushout(a, b2, c2, a_b2, a_c2)
b_d = compose(b_b2, b2_d)
c_d = compose(c_c2, c2_d)
return(d, b_d, c_d)
def pushout(a, b, c, a_b, a_c, inplace=False):
"""Find the pushour of the span b <- a -> c."""
check_homomorphism(a, b, a_b)
check_homomorphism(a, c, a_c)
if inplace is True:
d = b
else:
d = copy.deepcopy(b)
b_d = id_of(b.nodes())
c_d = dict()
# Add/merge nodes
for c_n in c.nodes():
a_keys = keys_by_value(a_c, c_n)
# Add nodes
if len(a_keys) == 0:
add_node(d, c_n, c.node[c_n])
c_d[c_n] = c_n
# Keep nodes
elif len(a_keys) == 1:
c_d[a_c[a_keys[0]]] = a_b[a_keys[0]]
# Merge nodes
else:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.